From 8b3ee8115998bce35a724fa1cc075a08d3d73d05 Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 20 Jun 2025 14:36:50 -0400 Subject: [PATCH 01/19] Initial addition of sqisign. Signed-off-by: Shane --- .CMake/alg_support.cmake | 39 + CMakeLists.txt | 3 + .../copy_from_upstream/copy_from_upstream.yml | 30 + .../patches/sqisign_fp.patch | 108 + src/CMakeLists.txt | 4 + src/oqsconfig.h.cmake | 8 + src/sig/sig.c | 47 +- src/sig/sig.h | 11 +- src/sig/sqisign/CMakeLists.txt | 62 + src/sig/sqisign/sig_sqisign.h | 47 + src/sig/sqisign/sig_sqisign_lvl1.c | 108 + src/sig/sqisign/sig_sqisign_lvl3.c | 108 + src/sig/sqisign/sig_sqisign_lvl5.c | 108 + .../LICENSE | 202 + .../the-sqisign_sqisign_lvl1_broadwell/NOTICE | 21 + .../the-sqisign_sqisign_lvl1_broadwell/aes.h | 29 + .../aes_ni.c | 258 + .../aes_ni.h | 85 + .../the-sqisign_sqisign_lvl1_broadwell/api.c | 33 + .../the-sqisign_sqisign_lvl1_broadwell/api.h | 32 + .../asm_preamble.h | 22 + .../basis.c | 416 ++ .../bench.h | 126 + .../bench_macos.h | 143 + .../biextension.c | 770 +++ .../biextension.h | 82 + .../common.c | 88 + .../ctr_drbg.c | 201 + .../ctr_drbg.h | 78 + .../the-sqisign_sqisign_lvl1_broadwell/defs.h | 63 + .../dim2id2iso.c | 1172 +++++ .../e0_basis.c | 55 + .../e0_basis.h | 3 + .../the-sqisign_sqisign_lvl1_broadwell/ec.c | 665 +++ .../the-sqisign_sqisign_lvl1_broadwell/ec.h | 668 +++ .../ec_jac.c | 335 ++ .../ec_params.c | 4 + .../ec_params.h | 12 + .../encode_signature.c | 208 + .../encode_verification.c | 220 + .../encoded_sizes.h | 11 + .../endomorphism_action.c | 3336 ++++++++++++ .../endomorphism_action.h | 31 + .../fips202.c | 876 ++++ .../fips202.h | 171 + .../the-sqisign_sqisign_lvl1_broadwell/fp.c | 95 + .../the-sqisign_sqisign_lvl1_broadwell/fp.h | 135 + .../the-sqisign_sqisign_lvl1_broadwell/fp2.c | 188 + .../the-sqisign_sqisign_lvl1_broadwell/fp2.h | 41 + .../the-sqisign_sqisign_lvl1_broadwell/fp2x.h | 162 + .../fp_asm.S | 466 ++ .../fp_constants.h | 17 + .../gf5248.c | 767 +++ .../gf5248.h | 912 ++++ .../the-sqisign_sqisign_lvl1_broadwell/hd.c | 93 + .../the-sqisign_sqisign_lvl1_broadwell/hd.h | 435 ++ .../hd_splitting_transforms.c | 143 + .../hd_splitting_transforms.h | 18 + .../id2iso.c | 338 ++ .../id2iso.h | 280 + .../intbig.h | 303 ++ .../the-sqisign_sqisign_lvl1_broadwell/isog.h | 28 + .../isog_chains.c | 241 + .../keygen.c | 64 + .../lvlx.cmake | 12 + .../the-sqisign_sqisign_lvl1_broadwell/mem.c | 23 + .../the-sqisign_sqisign_lvl1_broadwell/mem.h | 24 + .../mini-gmp-extra.c | 73 + .../mini-gmp-extra.h | 19 + .../mini-gmp.c | 4671 +++++++++++++++++ .../mini-gmp.h | 311 ++ .../the-sqisign_sqisign_lvl1_broadwell/mp.h | 88 + .../quaternion.h | 708 +++ .../quaternion_constants.h | 6 + .../quaternion_data.c | 3176 +++++++++++ .../quaternion_data.h | 12 + .../randombytes_arm64crypto.h | 27 + .../randombytes_ctrdrbg_aesni.c | 87 + .../randombytes_system.c | 431 ++ .../the-sqisign_sqisign_lvl1_broadwell/rng.h | 43 + .../the-sqisign_sqisign_lvl1_broadwell/sig.h | 85 + .../the-sqisign_sqisign_lvl1_broadwell/sign.c | 634 +++ .../signature.h | 97 + .../sqisign.c | 146 + .../sqisign_namespace.h | 1071 ++++ .../sqisign_parameters.txt | 3 + .../theta_isogenies.c | 1283 +++++ .../theta_isogenies.h | 18 + .../theta_structure.c | 78 + .../theta_structure.h | 135 + .../tools.c | 75 + .../tools.h | 49 + .../torsion_constants.c | 43 + .../torsion_constants.h | 6 + .../tutil.h | 36 + .../vaes256_key_expansion.S | 122 + .../verification.h | 123 + .../verify.c | 309 ++ .../xeval.c | 64 + .../xisog.c | 61 + .../the-sqisign_sqisign_lvl1_ref/COPYING.LGPL | 165 + .../the-sqisign_sqisign_lvl1_ref/LICENSE | 202 + .../the-sqisign_sqisign_lvl1_ref/NOTICE | 21 + .../the-sqisign_sqisign_lvl1_ref/aes.h | 29 + .../the-sqisign_sqisign_lvl1_ref/aes_c.c | 783 +++ .../the-sqisign_sqisign_lvl1_ref/algebra.c | 280 + .../the-sqisign_sqisign_lvl1_ref/api.c | 33 + .../the-sqisign_sqisign_lvl1_ref/api.h | 32 + .../the-sqisign_sqisign_lvl1_ref/basis.c | 416 ++ .../the-sqisign_sqisign_lvl1_ref/bench.h | 126 + .../bench_macos.h | 143 + .../biextension.c | 770 +++ .../biextension.h | 82 + .../the-sqisign_sqisign_lvl1_ref/common.c | 88 + .../the-sqisign_sqisign_lvl1_ref/dim2.c | 132 + .../the-sqisign_sqisign_lvl1_ref/dim2id2iso.c | 1172 +++++ .../the-sqisign_sqisign_lvl1_ref/dim4.c | 470 ++ .../the-sqisign_sqisign_lvl1_ref/dpe.h | 743 +++ .../the-sqisign_sqisign_lvl1_ref/e0_basis.c | 55 + .../the-sqisign_sqisign_lvl1_ref/e0_basis.h | 3 + .../sqisign/the-sqisign_sqisign_lvl1_ref/ec.c | 665 +++ .../sqisign/the-sqisign_sqisign_lvl1_ref/ec.h | 668 +++ .../the-sqisign_sqisign_lvl1_ref/ec_jac.c | 335 ++ .../the-sqisign_sqisign_lvl1_ref/ec_params.c | 4 + .../the-sqisign_sqisign_lvl1_ref/ec_params.h | 12 + .../encode_signature.c | 208 + .../encode_verification.c | 220 + .../encoded_sizes.h | 11 + .../endomorphism_action.c | 3336 ++++++++++++ .../endomorphism_action.h | 31 + .../the-sqisign_sqisign_lvl1_ref/finit.c | 122 + .../the-sqisign_sqisign_lvl1_ref/fips202.c | 876 ++++ .../the-sqisign_sqisign_lvl1_ref/fips202.h | 171 + .../sqisign/the-sqisign_sqisign_lvl1_ref/fp.c | 15 + .../sqisign/the-sqisign_sqisign_lvl1_ref/fp.h | 48 + .../the-sqisign_sqisign_lvl1_ref/fp2.c | 328 ++ .../the-sqisign_sqisign_lvl1_ref/fp2.h | 41 + .../fp_constants.h | 17 + .../fp_p5248_32.c | 945 ++++ .../fp_p5248_64.c | 794 +++ .../sqisign/the-sqisign_sqisign_lvl1_ref/hd.c | 93 + .../sqisign/the-sqisign_sqisign_lvl1_ref/hd.h | 435 ++ .../hd_splitting_transforms.c | 143 + .../hd_splitting_transforms.h | 18 + .../the-sqisign_sqisign_lvl1_ref/hnf.c | 210 + .../hnf_internal.c | 182 + .../hnf_internal.h | 94 + .../ibz_division.c | 12 + .../the-sqisign_sqisign_lvl1_ref/id2iso.c | 338 ++ .../the-sqisign_sqisign_lvl1_ref/id2iso.h | 280 + .../the-sqisign_sqisign_lvl1_ref/ideal.c | 323 ++ .../the-sqisign_sqisign_lvl1_ref/intbig.c | 791 +++ .../the-sqisign_sqisign_lvl1_ref/intbig.h | 303 ++ .../intbig_internal.h | 123 + .../the-sqisign_sqisign_lvl1_ref/integers.c | 116 + .../the-sqisign_sqisign_lvl1_ref/internal.h | 812 +++ .../the-sqisign_sqisign_lvl1_ref/isog.h | 28 + .../isog_chains.c | 241 + .../the-sqisign_sqisign_lvl1_ref/keygen.c | 64 + .../sqisign/the-sqisign_sqisign_lvl1_ref/l2.c | 190 + .../the-sqisign_sqisign_lvl1_ref/lat_ball.c | 139 + .../the-sqisign_sqisign_lvl1_ref/lattice.c | 328 ++ .../lll_applications.c | 127 + .../lll_internals.h | 238 + .../the-sqisign_sqisign_lvl1_ref/lvlx.cmake | 12 + .../the-sqisign_sqisign_lvl1_ref/mem.c | 23 + .../the-sqisign_sqisign_lvl1_ref/mem.h | 24 + .../mini-gmp-extra.c | 73 + .../mini-gmp-extra.h | 19 + .../the-sqisign_sqisign_lvl1_ref/mini-gmp.c | 4671 +++++++++++++++++ .../the-sqisign_sqisign_lvl1_ref/mini-gmp.h | 311 ++ .../sqisign/the-sqisign_sqisign_lvl1_ref/mp.c | 357 ++ .../sqisign/the-sqisign_sqisign_lvl1_ref/mp.h | 88 + .../the-sqisign_sqisign_lvl1_ref/normeq.c | 369 ++ .../the-sqisign_sqisign_lvl1_ref/printer.c | 132 + .../the-sqisign_sqisign_lvl1_ref/quaternion.h | 708 +++ .../quaternion_constants.h | 6 + .../quaternion_data.c | 3176 +++++++++++ .../quaternion_data.h | 12 + .../randombytes_ctrdrbg.c | 161 + .../randombytes_system.c | 431 ++ .../the-sqisign_sqisign_lvl1_ref/rationals.c | 233 + .../the-sqisign_sqisign_lvl1_ref/rng.h | 43 + .../the-sqisign_sqisign_lvl1_ref/sig.h | 85 + .../the-sqisign_sqisign_lvl1_ref/sign.c | 634 +++ .../the-sqisign_sqisign_lvl1_ref/signature.h | 97 + .../the-sqisign_sqisign_lvl1_ref/sqisign.c | 146 + .../sqisign_namespace.h | 1071 ++++ .../sqisign_parameters.txt | 3 + .../theta_isogenies.c | 1283 +++++ .../theta_isogenies.h | 18 + .../theta_structure.c | 78 + .../theta_structure.h | 135 + .../the-sqisign_sqisign_lvl1_ref/tools.c | 75 + .../the-sqisign_sqisign_lvl1_ref/tools.h | 49 + .../torsion_constants.c | 43 + .../torsion_constants.h | 6 + .../the-sqisign_sqisign_lvl1_ref/tutil.h | 36 + .../verification.h | 123 + .../the-sqisign_sqisign_lvl1_ref/verify.c | 309 ++ .../the-sqisign_sqisign_lvl1_ref/xeval.c | 64 + .../the-sqisign_sqisign_lvl1_ref/xisog.c | 61 + .../COPYING.LGPL | 165 + .../LICENSE | 202 + .../the-sqisign_sqisign_lvl3_broadwell/NOTICE | 21 + .../the-sqisign_sqisign_lvl3_broadwell/aes.h | 29 + .../aes_ni.c | 258 + .../aes_ni.h | 85 + .../the-sqisign_sqisign_lvl3_broadwell/api.c | 32 + .../the-sqisign_sqisign_lvl3_broadwell/api.h | 32 + .../asm_preamble.h | 22 + .../basis.c | 416 ++ .../bench.h | 126 + .../bench_macos.h | 143 + .../biextension.c | 770 +++ .../biextension.h | 82 + .../common.c | 88 + .../ctr_drbg.c | 201 + .../ctr_drbg.h | 78 + .../the-sqisign_sqisign_lvl3_broadwell/defs.h | 63 + .../dim2id2iso.c | 1172 +++++ .../e0_basis.c | 55 + .../e0_basis.h | 3 + .../the-sqisign_sqisign_lvl3_broadwell/ec.c | 665 +++ .../the-sqisign_sqisign_lvl3_broadwell/ec.h | 668 +++ .../ec_jac.c | 335 ++ .../ec_params.c | 4 + .../ec_params.h | 12 + .../encode_signature.c | 208 + .../encode_verification.c | 220 + .../encoded_sizes.h | 11 + .../endomorphism_action.c | 3812 ++++++++++++++ .../endomorphism_action.h | 31 + .../fips202.c | 876 ++++ .../fips202.h | 171 + .../the-sqisign_sqisign_lvl3_broadwell/fp.c | 108 + .../the-sqisign_sqisign_lvl3_broadwell/fp.h | 135 + .../the-sqisign_sqisign_lvl3_broadwell/fp2.c | 188 + .../the-sqisign_sqisign_lvl3_broadwell/fp2.h | 45 + .../the-sqisign_sqisign_lvl3_broadwell/fp2x.h | 162 + .../fp_asm.S | 825 +++ .../fp_constants.h | 17 + .../gf65376.c | 792 +++ .../gf65376.h | 1121 ++++ .../the-sqisign_sqisign_lvl3_broadwell/hd.c | 93 + .../the-sqisign_sqisign_lvl3_broadwell/hd.h | 435 ++ .../hd_splitting_transforms.c | 143 + .../hd_splitting_transforms.h | 18 + .../id2iso.c | 338 ++ .../id2iso.h | 280 + .../intbig.h | 303 ++ .../the-sqisign_sqisign_lvl3_broadwell/isog.h | 28 + .../isog_chains.c | 241 + .../keygen.c | 64 + .../lvlx.cmake | 12 + .../the-sqisign_sqisign_lvl3_broadwell/mem.c | 23 + .../the-sqisign_sqisign_lvl3_broadwell/mem.h | 24 + .../mini-gmp-extra.c | 73 + .../mini-gmp-extra.h | 19 + .../mini-gmp.c | 4671 +++++++++++++++++ .../mini-gmp.h | 311 ++ .../the-sqisign_sqisign_lvl3_broadwell/mp.h | 88 + .../quaternion.h | 708 +++ .../quaternion_constants.h | 6 + .../quaternion_data.c | 3626 +++++++++++++ .../quaternion_data.h | 12 + .../randombytes_arm64crypto.h | 27 + .../randombytes_ctrdrbg_aesni.c | 87 + .../randombytes_system.c | 431 ++ .../the-sqisign_sqisign_lvl3_broadwell/rng.h | 43 + .../the-sqisign_sqisign_lvl3_broadwell/sig.h | 85 + .../the-sqisign_sqisign_lvl3_broadwell/sign.c | 634 +++ .../signature.h | 97 + .../sqisign.c | 146 + .../sqisign_namespace.h | 1071 ++++ .../sqisign_parameters.txt | 3 + .../theta_isogenies.c | 1283 +++++ .../theta_isogenies.h | 18 + .../theta_structure.c | 78 + .../theta_structure.h | 135 + .../tools.c | 75 + .../tools.h | 49 + .../torsion_constants.c | 43 + .../torsion_constants.h | 6 + .../tutil.h | 36 + .../vaes256_key_expansion.S | 122 + .../verification.h | 123 + .../verify.c | 309 ++ .../xeval.c | 64 + .../xisog.c | 61 + .../the-sqisign_sqisign_lvl3_ref/COPYING.LGPL | 165 + .../the-sqisign_sqisign_lvl3_ref/LICENSE | 202 + .../the-sqisign_sqisign_lvl3_ref/NOTICE | 21 + .../the-sqisign_sqisign_lvl3_ref/aes.h | 29 + .../the-sqisign_sqisign_lvl3_ref/aes_c.c | 783 +++ .../the-sqisign_sqisign_lvl3_ref/algebra.c | 280 + .../the-sqisign_sqisign_lvl3_ref/api.c | 32 + .../the-sqisign_sqisign_lvl3_ref/api.h | 32 + .../the-sqisign_sqisign_lvl3_ref/basis.c | 416 ++ .../the-sqisign_sqisign_lvl3_ref/bench.h | 126 + .../bench_macos.h | 143 + .../biextension.c | 770 +++ .../biextension.h | 82 + .../the-sqisign_sqisign_lvl3_ref/common.c | 88 + .../the-sqisign_sqisign_lvl3_ref/dim2.c | 132 + .../the-sqisign_sqisign_lvl3_ref/dim2id2iso.c | 1172 +++++ .../the-sqisign_sqisign_lvl3_ref/dim4.c | 470 ++ .../the-sqisign_sqisign_lvl3_ref/dpe.h | 743 +++ .../the-sqisign_sqisign_lvl3_ref/e0_basis.c | 55 + .../the-sqisign_sqisign_lvl3_ref/e0_basis.h | 3 + .../sqisign/the-sqisign_sqisign_lvl3_ref/ec.c | 665 +++ .../sqisign/the-sqisign_sqisign_lvl3_ref/ec.h | 668 +++ .../the-sqisign_sqisign_lvl3_ref/ec_jac.c | 335 ++ .../the-sqisign_sqisign_lvl3_ref/ec_params.c | 4 + .../the-sqisign_sqisign_lvl3_ref/ec_params.h | 12 + .../encode_signature.c | 208 + .../encode_verification.c | 220 + .../encoded_sizes.h | 11 + .../endomorphism_action.c | 3812 ++++++++++++++ .../endomorphism_action.h | 31 + .../the-sqisign_sqisign_lvl3_ref/finit.c | 122 + .../the-sqisign_sqisign_lvl3_ref/fips202.c | 876 ++++ .../the-sqisign_sqisign_lvl3_ref/fips202.h | 171 + .../sqisign/the-sqisign_sqisign_lvl3_ref/fp.c | 15 + .../sqisign/the-sqisign_sqisign_lvl3_ref/fp.h | 48 + .../the-sqisign_sqisign_lvl3_ref/fp2.c | 328 ++ .../the-sqisign_sqisign_lvl3_ref/fp2.h | 41 + .../fp_constants.h | 17 + .../fp_p65376_32.c | 1234 +++++ .../fp_p65376_64.c | 875 +++ .../sqisign/the-sqisign_sqisign_lvl3_ref/hd.c | 93 + .../sqisign/the-sqisign_sqisign_lvl3_ref/hd.h | 435 ++ .../hd_splitting_transforms.c | 143 + .../hd_splitting_transforms.h | 18 + .../the-sqisign_sqisign_lvl3_ref/hnf.c | 210 + .../hnf_internal.c | 182 + .../hnf_internal.h | 94 + .../ibz_division.c | 12 + .../the-sqisign_sqisign_lvl3_ref/id2iso.c | 338 ++ .../the-sqisign_sqisign_lvl3_ref/id2iso.h | 280 + .../the-sqisign_sqisign_lvl3_ref/ideal.c | 323 ++ .../the-sqisign_sqisign_lvl3_ref/intbig.c | 791 +++ .../the-sqisign_sqisign_lvl3_ref/intbig.h | 303 ++ .../intbig_internal.h | 123 + .../the-sqisign_sqisign_lvl3_ref/integers.c | 116 + .../the-sqisign_sqisign_lvl3_ref/internal.h | 812 +++ .../the-sqisign_sqisign_lvl3_ref/isog.h | 28 + .../isog_chains.c | 241 + .../the-sqisign_sqisign_lvl3_ref/keygen.c | 64 + .../sqisign/the-sqisign_sqisign_lvl3_ref/l2.c | 190 + .../the-sqisign_sqisign_lvl3_ref/lat_ball.c | 139 + .../the-sqisign_sqisign_lvl3_ref/lattice.c | 328 ++ .../lll_applications.c | 127 + .../lll_internals.h | 238 + .../the-sqisign_sqisign_lvl3_ref/lvlx.cmake | 12 + .../the-sqisign_sqisign_lvl3_ref/mem.c | 23 + .../the-sqisign_sqisign_lvl3_ref/mem.h | 24 + .../mini-gmp-extra.c | 73 + .../mini-gmp-extra.h | 19 + .../the-sqisign_sqisign_lvl3_ref/mini-gmp.c | 4671 +++++++++++++++++ .../the-sqisign_sqisign_lvl3_ref/mini-gmp.h | 311 ++ .../sqisign/the-sqisign_sqisign_lvl3_ref/mp.c | 357 ++ .../sqisign/the-sqisign_sqisign_lvl3_ref/mp.h | 88 + .../the-sqisign_sqisign_lvl3_ref/normeq.c | 369 ++ .../the-sqisign_sqisign_lvl3_ref/printer.c | 132 + .../the-sqisign_sqisign_lvl3_ref/quaternion.h | 708 +++ .../quaternion_constants.h | 6 + .../quaternion_data.c | 3626 +++++++++++++ .../quaternion_data.h | 12 + .../randombytes_ctrdrbg.c | 161 + .../randombytes_system.c | 431 ++ .../the-sqisign_sqisign_lvl3_ref/rationals.c | 233 + .../the-sqisign_sqisign_lvl3_ref/rng.h | 43 + .../the-sqisign_sqisign_lvl3_ref/sig.h | 85 + .../the-sqisign_sqisign_lvl3_ref/sign.c | 634 +++ .../the-sqisign_sqisign_lvl3_ref/signature.h | 97 + .../the-sqisign_sqisign_lvl3_ref/sqisign.c | 146 + .../sqisign_namespace.h | 1071 ++++ .../sqisign_parameters.txt | 3 + .../theta_isogenies.c | 1283 +++++ .../theta_isogenies.h | 18 + .../theta_structure.c | 78 + .../theta_structure.h | 135 + .../the-sqisign_sqisign_lvl3_ref/tools.c | 75 + .../the-sqisign_sqisign_lvl3_ref/tools.h | 49 + .../torsion_constants.c | 43 + .../torsion_constants.h | 6 + .../the-sqisign_sqisign_lvl3_ref/tutil.h | 36 + .../verification.h | 123 + .../the-sqisign_sqisign_lvl3_ref/verify.c | 309 ++ .../the-sqisign_sqisign_lvl3_ref/xeval.c | 64 + .../the-sqisign_sqisign_lvl3_ref/xisog.c | 61 + .../COPYING.LGPL | 165 + .../LICENSE | 202 + .../the-sqisign_sqisign_lvl5_broadwell/NOTICE | 21 + .../the-sqisign_sqisign_lvl5_broadwell/aes.h | 29 + .../aes_ni.c | 258 + .../aes_ni.h | 85 + .../the-sqisign_sqisign_lvl5_broadwell/api.c | 32 + .../the-sqisign_sqisign_lvl5_broadwell/api.h | 32 + .../asm_preamble.h | 22 + .../basis.c | 416 ++ .../bench.h | 126 + .../bench_macos.h | 143 + .../biextension.c | 770 +++ .../biextension.h | 82 + .../common.c | 88 + .../ctr_drbg.c | 201 + .../ctr_drbg.h | 78 + .../the-sqisign_sqisign_lvl5_broadwell/defs.h | 63 + .../dim2id2iso.c | 1172 +++++ .../e0_basis.c | 55 + .../e0_basis.h | 3 + .../the-sqisign_sqisign_lvl5_broadwell/ec.c | 665 +++ .../the-sqisign_sqisign_lvl5_broadwell/ec.h | 668 +++ .../ec_jac.c | 335 ++ .../ec_params.c | 4 + .../ec_params.h | 12 + .../encode_signature.c | 208 + .../encode_verification.c | 220 + .../encoded_sizes.h | 11 + .../endomorphism_action.c | 3336 ++++++++++++ .../endomorphism_action.h | 31 + .../fips202.c | 876 ++++ .../fips202.h | 171 + .../the-sqisign_sqisign_lvl5_broadwell/fp.c | 112 + .../the-sqisign_sqisign_lvl5_broadwell/fp.h | 136 + .../the-sqisign_sqisign_lvl5_broadwell/fp2.c | 188 + .../the-sqisign_sqisign_lvl5_broadwell/fp2.h | 49 + .../the-sqisign_sqisign_lvl5_broadwell/fp2x.h | 162 + .../fp_asm.S | 784 +++ .../fp_constants.h | 17 + .../gf27500.c | 839 +++ .../gf27500.h | 1409 +++++ .../the-sqisign_sqisign_lvl5_broadwell/hd.c | 93 + .../the-sqisign_sqisign_lvl5_broadwell/hd.h | 435 ++ .../hd_splitting_transforms.c | 143 + .../hd_splitting_transforms.h | 18 + .../id2iso.c | 338 ++ .../id2iso.h | 280 + .../intbig.h | 303 ++ .../the-sqisign_sqisign_lvl5_broadwell/isog.h | 28 + .../isog_chains.c | 241 + .../keygen.c | 64 + .../lvlx.cmake | 12 + .../the-sqisign_sqisign_lvl5_broadwell/mem.c | 23 + .../the-sqisign_sqisign_lvl5_broadwell/mem.h | 24 + .../mini-gmp-extra.c | 73 + .../mini-gmp-extra.h | 19 + .../mini-gmp.c | 4671 +++++++++++++++++ .../mini-gmp.h | 311 ++ .../the-sqisign_sqisign_lvl5_broadwell/mp.h | 88 + .../quaternion.h | 708 +++ .../quaternion_constants.h | 6 + .../quaternion_data.c | 3176 +++++++++++ .../quaternion_data.h | 12 + .../randombytes_arm64crypto.h | 27 + .../randombytes_ctrdrbg_aesni.c | 87 + .../randombytes_system.c | 431 ++ .../the-sqisign_sqisign_lvl5_broadwell/rng.h | 43 + .../the-sqisign_sqisign_lvl5_broadwell/sig.h | 85 + .../the-sqisign_sqisign_lvl5_broadwell/sign.c | 634 +++ .../signature.h | 97 + .../sqisign.c | 146 + .../sqisign_namespace.h | 1071 ++++ .../sqisign_parameters.txt | 3 + .../theta_isogenies.c | 1283 +++++ .../theta_isogenies.h | 18 + .../theta_structure.c | 78 + .../theta_structure.h | 135 + .../tools.c | 75 + .../tools.h | 49 + .../torsion_constants.c | 43 + .../torsion_constants.h | 6 + .../tutil.h | 36 + .../vaes256_key_expansion.S | 122 + .../verification.h | 123 + .../verify.c | 309 ++ .../xeval.c | 64 + .../xisog.c | 61 + .../the-sqisign_sqisign_lvl5_ref/COPYING.LGPL | 165 + .../the-sqisign_sqisign_lvl5_ref/LICENSE | 202 + .../the-sqisign_sqisign_lvl5_ref/NOTICE | 21 + .../the-sqisign_sqisign_lvl5_ref/aes.h | 29 + .../the-sqisign_sqisign_lvl5_ref/aes_c.c | 783 +++ .../the-sqisign_sqisign_lvl5_ref/algebra.c | 280 + .../the-sqisign_sqisign_lvl5_ref/api.c | 32 + .../the-sqisign_sqisign_lvl5_ref/api.h | 32 + .../the-sqisign_sqisign_lvl5_ref/basis.c | 416 ++ .../the-sqisign_sqisign_lvl5_ref/bench.h | 126 + .../bench_macos.h | 143 + .../biextension.c | 770 +++ .../biextension.h | 82 + .../the-sqisign_sqisign_lvl5_ref/common.c | 88 + .../the-sqisign_sqisign_lvl5_ref/dim2.c | 132 + .../the-sqisign_sqisign_lvl5_ref/dim2id2iso.c | 1172 +++++ .../the-sqisign_sqisign_lvl5_ref/dim4.c | 470 ++ .../the-sqisign_sqisign_lvl5_ref/dpe.h | 743 +++ .../the-sqisign_sqisign_lvl5_ref/e0_basis.c | 55 + .../the-sqisign_sqisign_lvl5_ref/e0_basis.h | 3 + .../sqisign/the-sqisign_sqisign_lvl5_ref/ec.c | 665 +++ .../sqisign/the-sqisign_sqisign_lvl5_ref/ec.h | 668 +++ .../the-sqisign_sqisign_lvl5_ref/ec_jac.c | 335 ++ .../the-sqisign_sqisign_lvl5_ref/ec_params.c | 4 + .../the-sqisign_sqisign_lvl5_ref/ec_params.h | 12 + .../encode_signature.c | 208 + .../encode_verification.c | 220 + .../encoded_sizes.h | 11 + .../endomorphism_action.c | 3336 ++++++++++++ .../endomorphism_action.h | 31 + .../the-sqisign_sqisign_lvl5_ref/finit.c | 122 + .../the-sqisign_sqisign_lvl5_ref/fips202.c | 876 ++++ .../the-sqisign_sqisign_lvl5_ref/fips202.h | 171 + .../sqisign/the-sqisign_sqisign_lvl5_ref/fp.c | 15 + .../sqisign/the-sqisign_sqisign_lvl5_ref/fp.h | 48 + .../the-sqisign_sqisign_lvl5_ref/fp2.c | 328 ++ .../the-sqisign_sqisign_lvl5_ref/fp2.h | 41 + .../fp_constants.h | 17 + .../fp_p27500_32.c | 1517 ++++++ .../fp_p27500_64.c | 973 ++++ .../sqisign/the-sqisign_sqisign_lvl5_ref/hd.c | 93 + .../sqisign/the-sqisign_sqisign_lvl5_ref/hd.h | 435 ++ .../hd_splitting_transforms.c | 143 + .../hd_splitting_transforms.h | 18 + .../the-sqisign_sqisign_lvl5_ref/hnf.c | 210 + .../hnf_internal.c | 182 + .../hnf_internal.h | 94 + .../ibz_division.c | 12 + .../the-sqisign_sqisign_lvl5_ref/id2iso.c | 338 ++ .../the-sqisign_sqisign_lvl5_ref/id2iso.h | 280 + .../the-sqisign_sqisign_lvl5_ref/ideal.c | 323 ++ .../the-sqisign_sqisign_lvl5_ref/intbig.c | 791 +++ .../the-sqisign_sqisign_lvl5_ref/intbig.h | 303 ++ .../intbig_internal.h | 123 + .../the-sqisign_sqisign_lvl5_ref/integers.c | 116 + .../the-sqisign_sqisign_lvl5_ref/internal.h | 812 +++ .../the-sqisign_sqisign_lvl5_ref/isog.h | 28 + .../isog_chains.c | 241 + .../the-sqisign_sqisign_lvl5_ref/keygen.c | 64 + .../sqisign/the-sqisign_sqisign_lvl5_ref/l2.c | 190 + .../the-sqisign_sqisign_lvl5_ref/lat_ball.c | 139 + .../the-sqisign_sqisign_lvl5_ref/lattice.c | 328 ++ .../lll_applications.c | 127 + .../lll_internals.h | 238 + .../the-sqisign_sqisign_lvl5_ref/lvlx.cmake | 12 + .../the-sqisign_sqisign_lvl5_ref/mem.c | 23 + .../the-sqisign_sqisign_lvl5_ref/mem.h | 24 + .../mini-gmp-extra.c | 73 + .../mini-gmp-extra.h | 19 + .../the-sqisign_sqisign_lvl5_ref/mini-gmp.c | 4671 +++++++++++++++++ .../the-sqisign_sqisign_lvl5_ref/mini-gmp.h | 311 ++ .../sqisign/the-sqisign_sqisign_lvl5_ref/mp.c | 357 ++ .../sqisign/the-sqisign_sqisign_lvl5_ref/mp.h | 88 + .../the-sqisign_sqisign_lvl5_ref/normeq.c | 369 ++ .../the-sqisign_sqisign_lvl5_ref/printer.c | 132 + .../the-sqisign_sqisign_lvl5_ref/quaternion.h | 708 +++ .../quaternion_constants.h | 6 + .../quaternion_data.c | 3176 +++++++++++ .../quaternion_data.h | 12 + .../randombytes_ctrdrbg.c | 161 + .../randombytes_system.c | 431 ++ .../the-sqisign_sqisign_lvl5_ref/rationals.c | 233 + .../the-sqisign_sqisign_lvl5_ref/rng.h | 43 + .../the-sqisign_sqisign_lvl5_ref/sig.h | 85 + .../the-sqisign_sqisign_lvl5_ref/sign.c | 634 +++ .../the-sqisign_sqisign_lvl5_ref/signature.h | 97 + .../the-sqisign_sqisign_lvl5_ref/sqisign.c | 146 + .../sqisign_namespace.h | 1071 ++++ .../sqisign_parameters.txt | 3 + .../theta_isogenies.c | 1283 +++++ .../theta_isogenies.h | 18 + .../theta_structure.c | 78 + .../theta_structure.h | 135 + .../the-sqisign_sqisign_lvl5_ref/tools.c | 75 + .../the-sqisign_sqisign_lvl5_ref/tools.h | 49 + .../torsion_constants.c | 43 + .../torsion_constants.h | 6 + .../the-sqisign_sqisign_lvl5_ref/tutil.h | 36 + .../verification.h | 123 + .../the-sqisign_sqisign_lvl5_ref/verify.c | 309 ++ .../the-sqisign_sqisign_lvl5_ref/xeval.c | 64 + .../the-sqisign_sqisign_lvl5_ref/xisog.c | 61 + tests/KATs/sig/kats.json | 9 + tests/kat_sig.c | 30 + 584 files changed, 199570 insertions(+), 2 deletions(-) create mode 100644 scripts/copy_from_upstream/patches/sqisign_fp.patch create mode 100644 src/sig/sqisign/CMakeLists.txt create mode 100644 src/sig/sqisign/sig_sqisign.h create mode 100644 src/sig/sqisign/sig_sqisign_lvl1.c create mode 100644 src/sig/sqisign/sig_sqisign_lvl3.c create mode 100644 src/sig/sqisign/sig_sqisign_lvl5.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/LICENSE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/NOTICE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/api.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/api.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/asm_preamble.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/bench.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/bench_macos.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/biextension.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/biextension.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/defs.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_jac.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_params.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_params.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_signature.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_verification.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encoded_sizes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2x.h create mode 100755 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp_asm.S create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/isog.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/isog_chains.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/keygen.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lvlx.cmake create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_arm64crypto.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/signature.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_parameters.txt create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_structure.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_structure.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tutil.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/verification.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/verify.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/xeval.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/xisog.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/COPYING.LGPL create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/LICENSE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/NOTICE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes_c.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/api.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/api.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/bench.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/bench_macos.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/biextension.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/biextension.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim4.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dpe.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_jac.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_params.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_params.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_signature.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_verification.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encoded_sizes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/finit.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp2.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ideal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/integers.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/isog.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/isog_chains.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/keygen.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lat_ball.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lattice.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_applications.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_internals.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lvlx.cmake create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/normeq.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/printer.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_system.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rationals.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/signature.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_parameters.txt create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_structure.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_structure.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tutil.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/verification.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/verify.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/xeval.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/xisog.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/COPYING.LGPL create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/LICENSE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/NOTICE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/api.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/api.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/asm_preamble.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/bench.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/bench_macos.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/biextension.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/biextension.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/defs.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_jac.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_params.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_params.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_signature.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_verification.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encoded_sizes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2x.h create mode 100755 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp_asm.S create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/isog.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/isog_chains.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/keygen.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lvlx.cmake create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_arm64crypto.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/signature.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_parameters.txt create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_structure.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_structure.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tutil.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/verification.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/verify.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/xeval.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/xisog.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/COPYING.LGPL create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/LICENSE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/NOTICE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes_c.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/api.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/api.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/bench.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/bench_macos.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/biextension.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/biextension.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim4.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dpe.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_jac.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_params.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_params.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_signature.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_verification.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encoded_sizes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/finit.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp2.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ideal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/integers.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/isog.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/isog_chains.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/keygen.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lat_ball.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lattice.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_applications.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_internals.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lvlx.cmake create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/normeq.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/printer.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_system.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rationals.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/signature.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_parameters.txt create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_structure.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_structure.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tutil.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/verification.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/verify.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/xeval.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/xisog.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/COPYING.LGPL create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/LICENSE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/NOTICE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/api.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/api.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/asm_preamble.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/bench.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/bench_macos.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/biextension.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/biextension.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/defs.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_jac.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_params.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_params.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_signature.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_verification.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encoded_sizes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2x.h create mode 100755 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp_asm.S create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/isog.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/isog_chains.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/keygen.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lvlx.cmake create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_arm64crypto.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/signature.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_parameters.txt create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_structure.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_structure.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tutil.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/verification.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/verify.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/xeval.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/xisog.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/COPYING.LGPL create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/LICENSE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/NOTICE create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes_c.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/api.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/api.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/bench.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/bench_macos.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/biextension.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/biextension.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim4.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dpe.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_jac.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_params.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_params.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_signature.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_verification.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encoded_sizes.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/finit.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp2.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ideal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/integers.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/isog.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/isog_chains.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/keygen.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lat_ball.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lattice.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_applications.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_internals.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lvlx.cmake create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/normeq.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/printer.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_system.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rationals.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sig.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/signature.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_parameters.txt create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_structure.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_structure.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tutil.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/verification.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/verify.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/xeval.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/xisog.c diff --git a/.CMake/alg_support.cmake b/.CMake/alg_support.cmake index 06fcb095ef..83f94ae8d0 100644 --- a/.CMake/alg_support.cmake +++ b/.CMake/alg_support.cmake @@ -222,8 +222,28 @@ cmake_dependent_option(OQS_ENABLE_SIG_snova_SNOVA_37_8_4 "" ON "OQS_ENABLE_SIG_S cmake_dependent_option(OQS_ENABLE_SIG_snova_SNOVA_24_5_5 "" ON "OQS_ENABLE_SIG_SNOVA" OFF) cmake_dependent_option(OQS_ENABLE_SIG_snova_SNOVA_60_10_4 "" ON "OQS_ENABLE_SIG_SNOVA" OFF) cmake_dependent_option(OQS_ENABLE_SIG_snova_SNOVA_29_6_5 "" ON "OQS_ENABLE_SIG_SNOVA" OFF) + +option(OQS_ENABLE_SIG_SQISIGN "Enable sqisign algorithm family" ON) +cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl1 "" ON "OQS_ENABLE_SIG_SQISIGN" OFF) +cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl3 "" ON "OQS_ENABLE_SIG_SQISIGN" OFF) +cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl5 "" ON "OQS_ENABLE_SIG_SQISIGN" OFF) ##### OQS_COPY_FROM_UPSTREAM_FRAGMENT_ADD_ENABLE_BY_ALG_END + +# TODO Don't know where to put this. We can just fix it so that only 64-bit systems are supported. +if(CMAKE_SIZEOF_VOID_P MATCHES "4") + # TODO Should also disable boradwell builds here. + add_compile_definitions(RADIX_32) + add_compile_definitions(GMP_LIMB_BITS=32) + message(STATUS "SQISign using 32 bit stuff") +else() + add_compile_definitions(RADIX_64) + # This is potentially an issues for a 64 bit system without uint128_t support. + add_compile_definitions(HAVE_UINT128) + add_compile_definitions(GMP_LIMB_BITS=64) + message(STATUS "SQISign using 64 bit stuff") +endif() + ##### OQS_COPY_FROM_LIBJADE_FRAGMENT_ADD_ENABLE_BY_ALG_START if ((OQS_LIBJADE_BUILD STREQUAL "ON")) @@ -1010,6 +1030,25 @@ if(OQS_DIST_ARM64_V8_BUILD OR (OQS_USE_ARM_NEON_INSTRUCTIONS AND OQS_USE_ARM_NEO endif() endif() + +if(CMAKE_SYSTEM_NAME MATCHES "Darwin|Linux") +if(OQS_DIST_X86_64_BUILD OR (OQS_USE_AVX2_INSTRUCTIONS)) + cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl1_broadwell "" ON "OQS_ENABLE_SIG_sqisign_lvl1" OFF) +endif() +endif() + +if(CMAKE_SYSTEM_NAME MATCHES "Darwin|Linux") +if(OQS_DIST_X86_64_BUILD OR (OQS_USE_AVX2_INSTRUCTIONS)) + cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl3_broadwell "" ON "OQS_ENABLE_SIG_sqisign_lvl3" OFF) +endif() +endif() + +if(CMAKE_SYSTEM_NAME MATCHES "Darwin|Linux") +if(OQS_DIST_X86_64_BUILD OR (OQS_USE_AVX2_INSTRUCTIONS)) + cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl5_broadwell "" ON "OQS_ENABLE_SIG_sqisign_lvl5" OFF) +endif() +endif() + ##### OQS_COPY_FROM_UPSTREAM_FRAGMENT_ADD_ENABLE_BY_ALG_CONDITIONAL_END ##### OQS_COPY_FROM_LIBJADE_FRAGMENT_ADD_ENABLE_BY_ALG_CONDITIONAL_START diff --git a/CMakeLists.txt b/CMakeLists.txt index 024ec92300..f231e797ac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -270,6 +270,9 @@ endif() if(OQS_ENABLE_SIG_SNOVA) set(PUBLIC_HEADERS ${PUBLIC_HEADERS} ${PROJECT_SOURCE_DIR}/src/sig/snova/sig_snova.h) endif() +if(OQS_ENABLE_SIG_SQISIGN) + set(PUBLIC_HEADERS ${PUBLIC_HEADERS} ${PROJECT_SOURCE_DIR}/src/sig/sqisign/sig_sqisign.h) +endif() ##### OQS_COPY_FROM_UPSTREAM_FRAGMENT_INCLUDE_HEADERS_END if(OQS_ENABLE_SIG_STFL_XMSS) set(PUBLIC_HEADERS ${PUBLIC_HEADERS} ${PROJECT_SOURCE_DIR}/src/sig_stfl/xmss/sig_stfl_xmss.h) diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index 4f419524e3..af05743bfc 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -91,6 +91,16 @@ upstreams: git_commit: 1c3ca6f4f7286c0bde98d7d6f222cf63b9d52bff sig_scheme_path: '.' sig_meta_path: 'liboqs/META/{pretty_name_full}_META.yml' + + - + name: the-sqisign + git_url: https://github.com/shane-digi/the-sqisign.git + git_branch: develop_oqs + git_commit: 6ae72148fd136c19e1d3f4ba493a96012071bb89 + sig_scheme_path: '.' + sig_meta_path: 'META/{pqclean_scheme}.yml' + patches: [sqisign_fp.patch] + kems: - name: classic_mceliece @@ -593,3 +603,23 @@ sigs: pqclean_scheme: SNOVA_29_6_5 pretty_name_full: SNOVA_29_6_5 signed_msg_order: sig_then_msg + - + name: sqisign + default_implementation: ref + upstream_location: the-sqisign + schemes: + - + scheme: "lvl1" + pqclean_scheme: sqisign_lvl1 + pretty_name_full: SQIsign-lvl1 + signed_msg_order: sig_then_msg + - + scheme: "lvl3" + pqclean_scheme: sqisign_lvl3 + pretty_name_full: SQIsign-lvl3 + signed_msg_order: sig_then_msg + - + scheme: "lvl5" + pqclean_scheme: sqisign_lvl5 + pretty_name_full: SQIsign-lvl5 + signed_msg_order: sig_then_msg diff --git a/scripts/copy_from_upstream/patches/sqisign_fp.patch b/scripts/copy_from_upstream/patches/sqisign_fp.patch new file mode 100644 index 0000000000..d704fedc60 --- /dev/null +++ b/scripts/copy_from_upstream/patches/sqisign_fp.patch @@ -0,0 +1,108 @@ +diff --git a/src/gf/ref/lvl1/fp_p5248_32.c b/src/gf/ref/lvl1/fp_p5248_32.c +index a52add3..62e5491 100644 +--- a/src/gf/ref/lvl1/fp_p5248_32.c ++++ b/src/gf/ref/lvl1/fp_p5248_32.c +@@ -1,6 +1,7 @@ + // clang-format off + // Command line : python monty.py 32 + // 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ++#ifdef RADIX_32 + + #include + #include +@@ -940,3 +941,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) + fp_add(d, d, &a); + } + } ++ ++#endif /* RADIX_32 */ +diff --git a/src/gf/ref/lvl1/fp_p5248_64.c b/src/gf/ref/lvl1/fp_p5248_64.c +index cde28dd..57c2131 100644 +--- a/src/gf/ref/lvl1/fp_p5248_64.c ++++ b/src/gf/ref/lvl1/fp_p5248_64.c +@@ -1,6 +1,7 @@ + // clang-format off + // Command line : python monty.py 64 + // 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ++#ifdef RADIX_64 + + #include + #include +@@ -789,3 +790,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) + fp_add(d, d, &a); + } + } ++ ++#endif /* RADIX_64 */ +diff --git a/src/gf/ref/lvl3/fp_p65376_32.c b/src/gf/ref/lvl3/fp_p65376_32.c +index 1483461..2aaad84 100644 +--- a/src/gf/ref/lvl3/fp_p65376_32.c ++++ b/src/gf/ref/lvl3/fp_p65376_32.c +@@ -1,6 +1,7 @@ + // clang-format off + // Command line : python monty.py 32 + // 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ++#ifdef RADIX_32 + + #include + #include +@@ -1229,3 +1230,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) + fp_add(d, d, &a); + } + } ++ ++#endif +diff --git a/src/gf/ref/lvl3/fp_p65376_64.c b/src/gf/ref/lvl3/fp_p65376_64.c +index 539cde5..9ac5fc5 100644 +--- a/src/gf/ref/lvl3/fp_p65376_64.c ++++ b/src/gf/ref/lvl3/fp_p65376_64.c +@@ -1,6 +1,7 @@ + // clang-format off + // Command line : python monty.py 64 + // 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ++#ifdef RADIX_64 + + #include + #include +@@ -870,3 +871,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) + fp_add(d, d, &a); + } + } ++ ++#endif +diff --git a/src/gf/ref/lvl5/fp_p27500_32.c b/src/gf/ref/lvl5/fp_p27500_32.c +index ecf5ea7..f002495 100644 +--- a/src/gf/ref/lvl5/fp_p27500_32.c ++++ b/src/gf/ref/lvl5/fp_p27500_32.c +@@ -1,6 +1,7 @@ + // clang-format off + // Command line : python monty.py 32 + // 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ++#ifdef RADIX_32 + + #include + #include +@@ -1512,3 +1513,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) + fp_add(d, d, &a); + } + } ++ ++#endif +diff --git a/src/gf/ref/lvl5/fp_p27500_64.c b/src/gf/ref/lvl5/fp_p27500_64.c +index 33bb75e..c187e87 100644 +--- a/src/gf/ref/lvl5/fp_p27500_64.c ++++ b/src/gf/ref/lvl5/fp_p27500_64.c +@@ -1,6 +1,7 @@ + // clang-format off + // Command line : python monty.py 64 + // 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ++#ifdef RADIX_64 + + #include + #include +@@ -968,3 +969,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) + fp_add(d, d, &a); + } + } ++ ++#endif diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ec059c7627..f743596496 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -71,6 +71,10 @@ if(OQS_ENABLE_SIG_SNOVA) add_subdirectory(sig/snova) set(SIG_OBJS ${SIG_OBJS} ${SNOVA_OBJS}) endif() +if(OQS_ENABLE_SIG_SQISIGN) + add_subdirectory(sig/sqisign) + set(SIG_OBJS ${SIG_OBJS} ${SQISIGN_OBJS}) +endif() ##### OQS_COPY_FROM_UPSTREAM_FRAGMENT_ADD_ALG_OBJECTS_END if(OQS_ENABLE_SIG_STFL_XMSS) diff --git a/src/oqsconfig.h.cmake b/src/oqsconfig.h.cmake index 5d02314c2b..f0a83941e1 100644 --- a/src/oqsconfig.h.cmake +++ b/src/oqsconfig.h.cmake @@ -330,6 +330,14 @@ #cmakedefine OQS_ENABLE_SIG_snova_SNOVA_29_6_5 1 #cmakedefine OQS_ENABLE_SIG_snova_SNOVA_29_6_5_avx2 1 #cmakedefine OQS_ENABLE_SIG_snova_SNOVA_29_6_5_neon 1 + +#cmakedefine OQS_ENABLE_SIG_SQISIGN 1 +#cmakedefine OQS_ENABLE_SIG_sqisign_lvl1 1 +#cmakedefine OQS_ENABLE_SIG_sqisign_lvl1_broadwell 1 +#cmakedefine OQS_ENABLE_SIG_sqisign_lvl3 1 +#cmakedefine OQS_ENABLE_SIG_sqisign_lvl3_broadwell 1 +#cmakedefine OQS_ENABLE_SIG_sqisign_lvl5 1 +#cmakedefine OQS_ENABLE_SIG_sqisign_lvl5_broadwell 1 ///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ADD_ALG_ENABLE_DEFINES_END ///// OQS_COPY_FROM_LIBJADE_FRAGMENT_ADD_ALG_ENABLE_DEFINES_START diff --git a/src/sig/sig.c b/src/sig/sig.c index 029931d7bd..d06336e5d3 100644 --- a/src/sig/sig.c +++ b/src/sig/sig.c @@ -82,7 +82,10 @@ OQS_API const char *OQS_SIG_alg_identifier(size_t i) { OQS_SIG_alg_snova_SNOVA_37_8_4, OQS_SIG_alg_snova_SNOVA_24_5_5, OQS_SIG_alg_snova_SNOVA_60_10_4, - OQS_SIG_alg_snova_SNOVA_29_6_5,///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ALG_IDENTIFIER_END + OQS_SIG_alg_snova_SNOVA_29_6_5, + OQS_SIG_alg_sqisign_lvl1, + OQS_SIG_alg_sqisign_lvl3, + OQS_SIG_alg_sqisign_lvl5,///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ALG_IDENTIFIER_END }; if (i >= OQS_SIG_algs_length) { return NULL; @@ -576,6 +579,27 @@ OQS_API int OQS_SIG_alg_is_enabled(const char *method_name) { #else return 0; #endif + + } else if (0 == strcasecmp(method_name, OQS_SIG_alg_sqisign_lvl1)) { +#ifdef OQS_ENABLE_SIG_sqisign_lvl1 + return 1; +#else + return 0; +#endif + + } else if (0 == strcasecmp(method_name, OQS_SIG_alg_sqisign_lvl3)) { +#ifdef OQS_ENABLE_SIG_sqisign_lvl3 + return 1; +#else + return 0; +#endif + + } else if (0 == strcasecmp(method_name, OQS_SIG_alg_sqisign_lvl5)) { +#ifdef OQS_ENABLE_SIG_sqisign_lvl5 + return 1; +#else + return 0; +#endif ///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ENABLED_CASE_END } else { return 0; @@ -1063,6 +1087,27 @@ OQS_API OQS_SIG *OQS_SIG_new(const char *method_name) { #else return NULL; #endif + + } else if (0 == strcasecmp(method_name, OQS_SIG_alg_sqisign_lvl1)) { +#ifdef OQS_ENABLE_SIG_sqisign_lvl1 + return OQS_SIG_sqisign_lvl1_new(); +#else + return NULL; +#endif + + } else if (0 == strcasecmp(method_name, OQS_SIG_alg_sqisign_lvl3)) { +#ifdef OQS_ENABLE_SIG_sqisign_lvl3 + return OQS_SIG_sqisign_lvl3_new(); +#else + return NULL; +#endif + + } else if (0 == strcasecmp(method_name, OQS_SIG_alg_sqisign_lvl5)) { +#ifdef OQS_ENABLE_SIG_sqisign_lvl5 + return OQS_SIG_sqisign_lvl5_new(); +#else + return NULL; +#endif ///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_NEW_CASE_END // EDIT-WHEN-ADDING-SIG } else { diff --git a/src/sig/sig.h b/src/sig/sig.h index d77787fa9d..5bb546a910 100644 --- a/src/sig/sig.h +++ b/src/sig/sig.h @@ -168,12 +168,18 @@ extern "C" { #define OQS_SIG_alg_snova_SNOVA_60_10_4 "SNOVA_60_10_4" /** Algorithm identifier for SNOVA_29_6_5 */ #define OQS_SIG_alg_snova_SNOVA_29_6_5 "SNOVA_29_6_5" +/** Algorithm identifier for SQIsign-lvl1 */ +#define OQS_SIG_alg_sqisign_lvl1 "SQIsign-lvl1" +/** Algorithm identifier for SQIsign-lvl3 */ +#define OQS_SIG_alg_sqisign_lvl3 "SQIsign-lvl3" +/** Algorithm identifier for SQIsign-lvl5 */ +#define OQS_SIG_alg_sqisign_lvl5 "SQIsign-lvl5" ///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ALG_IDENTIFIER_END // EDIT-WHEN-ADDING-SIG ///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ALGS_LENGTH_START /** Number of algorithm identifiers above. */ -#define OQS_SIG_algs_length 68 +#define OQS_SIG_algs_length 71 ///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ALGS_LENGTH_END /** @@ -445,6 +451,9 @@ OQS_API bool OQS_SIG_supports_ctx_str(const char *alg_name); #ifdef OQS_ENABLE_SIG_SNOVA #include #endif /* OQS_ENABLE_SIG_SNOVA */ +#ifdef OQS_ENABLE_SIG_SQISIGN +#include +#endif /* OQS_ENABLE_SIG_SQISIGN */ ///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_INCLUDE_END // EDIT-WHEN-ADDING-SIG diff --git a/src/sig/sqisign/CMakeLists.txt b/src/sig/sqisign/CMakeLists.txt new file mode 100644 index 0000000000..592ab95cab --- /dev/null +++ b/src/sig/sqisign/CMakeLists.txt @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: MIT + +# This file was generated by +# scripts/copy_from_upstream/copy_from_upstream.py + +set(_SQISIGN_OBJS "") + +if(OQS_ENABLE_SIG_sqisign_lvl1) + add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/aes_c.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fips202.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mem.c the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl1_ref/mini-gmp.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl1_ref/randombytes_system.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/tools.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) + target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DRANDOMBYTES_SYSTEM=ON) + target_include_directories(sqisign_lvl1_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_ref) + target_include_directories(sqisign_lvl1_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DRANDOMBYTES_SYSTEM=ON) + set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) +endif() + +if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) + add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/aes_ni.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/fips202.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/mem.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/tools.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) + target_include_directories(sqisign_lvl1_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_broadwell) + target_include_directories(sqisign_lvl1_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_compile_options(sqisign_lvl1_broadwell PRIVATE -mavx2) + target_compile_options(sqisign_lvl1_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DRANDOMBYTES_SYSTEM=ON -DSQISIGN_GF_IMPL_BROADWELL) + set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) +endif() + +if(OQS_ENABLE_SIG_sqisign_lvl3) + add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/aes_c.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fips202.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mem.c the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl3_ref/mini-gmp.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl3_ref/randombytes_system.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/tools.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) + target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DRANDOMBYTES_SYSTEM=ON) + target_include_directories(sqisign_lvl3_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_ref) + target_include_directories(sqisign_lvl3_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DRANDOMBYTES_SYSTEM=ON) + set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) +endif() + +if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) + add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/aes_ni.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/fips202.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/mem.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/tools.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) + target_include_directories(sqisign_lvl3_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_broadwell) + target_include_directories(sqisign_lvl3_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_compile_options(sqisign_lvl3_broadwell PRIVATE -mavx2) + target_compile_options(sqisign_lvl3_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DRANDOMBYTES_SYSTEM=ON -DSQISIGN_GF_IMPL_BROADWELL) + set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) +endif() + +if(OQS_ENABLE_SIG_sqisign_lvl5) + add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/aes_c.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fips202.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mem.c the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl5_ref/mini-gmp.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl5_ref/randombytes_system.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/tools.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) + target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DRANDOMBYTES_SYSTEM=ON) + target_include_directories(sqisign_lvl5_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_ref) + target_include_directories(sqisign_lvl5_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DRANDOMBYTES_SYSTEM=ON) + set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) +endif() + +if(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) + add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/aes_ni.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/fips202.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/mem.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/tools.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) + target_include_directories(sqisign_lvl5_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_broadwell) + target_include_directories(sqisign_lvl5_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_compile_options(sqisign_lvl5_broadwell PRIVATE -mavx2) + target_compile_options(sqisign_lvl5_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DRANDOMBYTES_SYSTEM=ON -DSQISIGN_GF_IMPL_BROADWELL) + set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) +endif() + +set(SQISIGN_OBJS ${_SQISIGN_OBJS} PARENT_SCOPE) diff --git a/src/sig/sqisign/sig_sqisign.h b/src/sig/sqisign/sig_sqisign.h new file mode 100644 index 0000000000..4c18410a33 --- /dev/null +++ b/src/sig/sqisign/sig_sqisign.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT + +#ifndef OQS_SIG_SQISIGN_H +#define OQS_SIG_SQISIGN_H + +#include + +#if defined(OQS_ENABLE_SIG_sqisign_lvl1) +#define OQS_SIG_sqisign_lvl1_length_public_key 65 +#define OQS_SIG_sqisign_lvl1_length_secret_key 353 +#define OQS_SIG_sqisign_lvl1_length_signature 148 + +OQS_SIG *OQS_SIG_sqisign_lvl1_new(void); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_keypair(uint8_t *public_key, uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_sign_with_ctx_str(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *ctx, size_t ctxlen, const uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_verify_with_ctx_str(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *ctx, size_t ctxlen, const uint8_t *public_key); +#endif + +#if defined(OQS_ENABLE_SIG_sqisign_lvl3) +#define OQS_SIG_sqisign_lvl3_length_public_key 97 +#define OQS_SIG_sqisign_lvl3_length_secret_key 529 +#define OQS_SIG_sqisign_lvl3_length_signature 224 + +OQS_SIG *OQS_SIG_sqisign_lvl3_new(void); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_keypair(uint8_t *public_key, uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_sign_with_ctx_str(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *ctx, size_t ctxlen, const uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_verify_with_ctx_str(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *ctx, size_t ctxlen, const uint8_t *public_key); +#endif + +#if defined(OQS_ENABLE_SIG_sqisign_lvl5) +#define OQS_SIG_sqisign_lvl5_length_public_key 129 +#define OQS_SIG_sqisign_lvl5_length_secret_key 701 +#define OQS_SIG_sqisign_lvl5_length_signature 292 + +OQS_SIG *OQS_SIG_sqisign_lvl5_new(void); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_keypair(uint8_t *public_key, uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_sign_with_ctx_str(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *ctx, size_t ctxlen, const uint8_t *secret_key); +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_verify_with_ctx_str(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *ctx, size_t ctxlen, const uint8_t *public_key); +#endif + +#endif diff --git a/src/sig/sqisign/sig_sqisign_lvl1.c b/src/sig/sqisign/sig_sqisign_lvl1.c new file mode 100644 index 0000000000..26055e952c --- /dev/null +++ b/src/sig/sqisign/sig_sqisign_lvl1.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT + +#include + +#include + +#if defined(OQS_ENABLE_SIG_sqisign_lvl1) +OQS_SIG *OQS_SIG_sqisign_lvl1_new(void) { + + OQS_SIG *sig = OQS_MEM_malloc(sizeof(OQS_SIG)); + if (sig == NULL) { + return NULL; + } + sig->method_name = OQS_SIG_alg_sqisign_lvl1; + sig->alg_version = "round2"; + + sig->claimed_nist_level = 1; + sig->euf_cma = true; + sig->suf_cma = false; + sig->sig_with_ctx_support = false; + + sig->length_public_key = OQS_SIG_sqisign_lvl1_length_public_key; + sig->length_secret_key = OQS_SIG_sqisign_lvl1_length_secret_key; + sig->length_signature = OQS_SIG_sqisign_lvl1_length_signature; + + sig->keypair = OQS_SIG_sqisign_lvl1_keypair; + sig->sign = OQS_SIG_sqisign_lvl1_sign; + sig->verify = OQS_SIG_sqisign_lvl1_verify; + sig->sign_with_ctx_str = OQS_SIG_sqisign_lvl1_sign_with_ctx_str; + sig->verify_with_ctx_str = OQS_SIG_sqisign_lvl1_verify_with_ctx_str; + + return sig; +} + +extern int sqisign_lvl1_ref_sqisign_keypair(uint8_t *pk, uint8_t *sk); +extern int sqisign_lvl1_ref_sqisign_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk); +extern int sqisign_lvl1_ref_sqisign_verify_signature(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk); + +#if defined(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) +extern int sqisign_lvl1_broadwell_sqisign_keypair(uint8_t *pk, uint8_t *sk); +extern int sqisign_lvl1_broadwell_sqisign_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk); +extern int sqisign_lvl1_broadwell_sqisign_verify_signature(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk); +#endif + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_keypair(uint8_t *public_key, uint8_t *secret_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl1_broadwell_sqisign_keypair(public_key, secret_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl1_ref_sqisign_keypair(public_key, secret_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl1_ref_sqisign_keypair(public_key, secret_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl1_broadwell_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl1_ref_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl1_ref_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl1_broadwell_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl1_ref_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl1_ref_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_sign_with_ctx_str(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *ctx_str, size_t ctx_str_len, const uint8_t *secret_key) { + if (ctx_str == NULL && ctx_str_len == 0) { + return OQS_SIG_sqisign_lvl1_sign(signature, signature_len, message, message_len, secret_key); + } else { + return OQS_ERROR; + } +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl1_verify_with_ctx_str(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *ctx_str, size_t ctx_str_len, const uint8_t *public_key) { + if (ctx_str == NULL && ctx_str_len == 0) { + return OQS_SIG_sqisign_lvl1_verify(message, message_len, signature, signature_len, public_key); + } else { + return OQS_ERROR; + } +} +#endif diff --git a/src/sig/sqisign/sig_sqisign_lvl3.c b/src/sig/sqisign/sig_sqisign_lvl3.c new file mode 100644 index 0000000000..3278449c25 --- /dev/null +++ b/src/sig/sqisign/sig_sqisign_lvl3.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT + +#include + +#include + +#if defined(OQS_ENABLE_SIG_sqisign_lvl3) +OQS_SIG *OQS_SIG_sqisign_lvl3_new(void) { + + OQS_SIG *sig = OQS_MEM_malloc(sizeof(OQS_SIG)); + if (sig == NULL) { + return NULL; + } + sig->method_name = OQS_SIG_alg_sqisign_lvl3; + sig->alg_version = "round2"; + + sig->claimed_nist_level = 3; + sig->euf_cma = true; + sig->suf_cma = false; + sig->sig_with_ctx_support = false; + + sig->length_public_key = OQS_SIG_sqisign_lvl3_length_public_key; + sig->length_secret_key = OQS_SIG_sqisign_lvl3_length_secret_key; + sig->length_signature = OQS_SIG_sqisign_lvl3_length_signature; + + sig->keypair = OQS_SIG_sqisign_lvl3_keypair; + sig->sign = OQS_SIG_sqisign_lvl3_sign; + sig->verify = OQS_SIG_sqisign_lvl3_verify; + sig->sign_with_ctx_str = OQS_SIG_sqisign_lvl3_sign_with_ctx_str; + sig->verify_with_ctx_str = OQS_SIG_sqisign_lvl3_verify_with_ctx_str; + + return sig; +} + +extern int sqisign_lvl3_ref_sqisign_keypair(uint8_t *pk, uint8_t *sk); +extern int sqisign_lvl3_ref_sqisign_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk); +extern int sqisign_lvl3_ref_sqisign_verify_signature(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk); + +#if defined(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) +extern int sqisign_lvl3_broadwell_sqisign_keypair(uint8_t *pk, uint8_t *sk); +extern int sqisign_lvl3_broadwell_sqisign_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk); +extern int sqisign_lvl3_broadwell_sqisign_verify_signature(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk); +#endif + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_keypair(uint8_t *public_key, uint8_t *secret_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl3_broadwell_sqisign_keypair(public_key, secret_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl3_ref_sqisign_keypair(public_key, secret_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl3_ref_sqisign_keypair(public_key, secret_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl3_broadwell_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl3_ref_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl3_ref_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl3_broadwell_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl3_ref_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl3_ref_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_sign_with_ctx_str(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *ctx_str, size_t ctx_str_len, const uint8_t *secret_key) { + if (ctx_str == NULL && ctx_str_len == 0) { + return OQS_SIG_sqisign_lvl3_sign(signature, signature_len, message, message_len, secret_key); + } else { + return OQS_ERROR; + } +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl3_verify_with_ctx_str(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *ctx_str, size_t ctx_str_len, const uint8_t *public_key) { + if (ctx_str == NULL && ctx_str_len == 0) { + return OQS_SIG_sqisign_lvl3_verify(message, message_len, signature, signature_len, public_key); + } else { + return OQS_ERROR; + } +} +#endif diff --git a/src/sig/sqisign/sig_sqisign_lvl5.c b/src/sig/sqisign/sig_sqisign_lvl5.c new file mode 100644 index 0000000000..8c7f3d730d --- /dev/null +++ b/src/sig/sqisign/sig_sqisign_lvl5.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT + +#include + +#include + +#if defined(OQS_ENABLE_SIG_sqisign_lvl5) +OQS_SIG *OQS_SIG_sqisign_lvl5_new(void) { + + OQS_SIG *sig = OQS_MEM_malloc(sizeof(OQS_SIG)); + if (sig == NULL) { + return NULL; + } + sig->method_name = OQS_SIG_alg_sqisign_lvl5; + sig->alg_version = "round2"; + + sig->claimed_nist_level = 5; + sig->euf_cma = true; + sig->suf_cma = false; + sig->sig_with_ctx_support = false; + + sig->length_public_key = OQS_SIG_sqisign_lvl5_length_public_key; + sig->length_secret_key = OQS_SIG_sqisign_lvl5_length_secret_key; + sig->length_signature = OQS_SIG_sqisign_lvl5_length_signature; + + sig->keypair = OQS_SIG_sqisign_lvl5_keypair; + sig->sign = OQS_SIG_sqisign_lvl5_sign; + sig->verify = OQS_SIG_sqisign_lvl5_verify; + sig->sign_with_ctx_str = OQS_SIG_sqisign_lvl5_sign_with_ctx_str; + sig->verify_with_ctx_str = OQS_SIG_sqisign_lvl5_verify_with_ctx_str; + + return sig; +} + +extern int sqisign_lvl5_ref_sqisign_keypair(uint8_t *pk, uint8_t *sk); +extern int sqisign_lvl5_ref_sqisign_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk); +extern int sqisign_lvl5_ref_sqisign_verify_signature(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk); + +#if defined(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) +extern int sqisign_lvl5_broadwell_sqisign_keypair(uint8_t *pk, uint8_t *sk); +extern int sqisign_lvl5_broadwell_sqisign_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk); +extern int sqisign_lvl5_broadwell_sqisign_verify_signature(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk); +#endif + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_keypair(uint8_t *public_key, uint8_t *secret_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl5_broadwell_sqisign_keypair(public_key, secret_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl5_ref_sqisign_keypair(public_key, secret_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl5_ref_sqisign_keypair(public_key, secret_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl5_broadwell_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl5_ref_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl5_ref_sqisign_sign_signature(signature, signature_len, message, message_len, secret_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key) { +#if defined(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) +#if defined(OQS_DIST_BUILD) + if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) { +#endif /* OQS_DIST_BUILD */ + return (OQS_STATUS) sqisign_lvl5_broadwell_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); +#if defined(OQS_DIST_BUILD) + } else { + return (OQS_STATUS) sqisign_lvl5_ref_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); + } +#endif /* OQS_DIST_BUILD */ +#else + return (OQS_STATUS) sqisign_lvl5_ref_sqisign_verify_signature(signature, signature_len, message, message_len, public_key); +#endif +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_sign_with_ctx_str(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *ctx_str, size_t ctx_str_len, const uint8_t *secret_key) { + if (ctx_str == NULL && ctx_str_len == 0) { + return OQS_SIG_sqisign_lvl5_sign(signature, signature_len, message, message_len, secret_key); + } else { + return OQS_ERROR; + } +} + +OQS_API OQS_STATUS OQS_SIG_sqisign_lvl5_verify_with_ctx_str(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *ctx_str, size_t ctx_str_len, const uint8_t *public_key) { + if (ctx_str == NULL && ctx_str_len == 0) { + return OQS_SIG_sqisign_lvl5_verify(message, message_len, signature, signature_len, public_key); + } else { + return OQS_ERROR; + } +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/LICENSE b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/LICENSE new file mode 100644 index 0000000000..8f71f43fee --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/NOTICE b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/NOTICE new file mode 100644 index 0000000000..6eccf392fa --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/NOTICE @@ -0,0 +1,21 @@ +Copyright 2023-2025 the SQIsign team. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +The DPE Library is (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, +LORIA/INRIA, and licensed under the GNU Lesser General Public License, +version 3. You may obtain a copy of the License at + + https://www.gnu.org/licenses/lgpl-3.0.en.html + +or in the file COPYING.LGPL. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes.h new file mode 100644 index 0000000000..e35ec3705b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef AES_H +#define AES_H + +#include +#include + +void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); +#define AES_ECB_encrypt AES_256_ECB + +#ifdef ENABLE_AESNI +int AES_128_CTR_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +int AES_128_CTR_4R_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#define AES_128_CTR AES_128_CTR_NI +#else +int AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.c new file mode 100644 index 0000000000..dc778fc9b6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.c @@ -0,0 +1,258 @@ +/*************************************************************************** +* This implementation is a modified version of the code, +* written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#include "aes_ni.h" +#include + +#include +#include + +#define AESENC(m, key) _mm_aesenc_si128(m, key) +#define AESENCLAST(m, key) _mm_aesenclast_si128(m, key) +#define XOR(a, b) _mm_xor_si128(a, b) +#define ADD32(a, b) _mm_add_epi32(a, b) +#define SHUF8(a, mask) _mm_shuffle_epi8(a, mask) + +#define ZERO256 _mm256_zeroall + +#define BSWAP_MASK 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f + +#ifdef VAES256 +#define VAESENC(a, key) _mm256_aesenc_epi128(a, key) +#define VAESENCLAST(a, key) _mm256_aesenclast_epi128(a, key) +#define EXTRACT128(a, imm) _mm256_extracti128_si256(a, imm) +#define XOR256(a, b) _mm256_xor_si256(a,b) +#define ADD32_256(a, b) _mm256_add_epi32(a,b) +#define SHUF8_256(a, mask) _mm256_shuffle_epi8(a, mask) +#endif + +#ifdef VAES512 +#define VAESENC(a, key) _mm512_aesenc_epi128(a, key) +#define VAESENCLAST(a, key) _mm512_aesenclast_epi128(a, key) +#define EXTRACT128(a, imm) _mm512_extracti64x2_epi64(a, imm) +#define XOR512(a, b) _mm512_xor_si512(a,b) +#define ADD32_512(a, b) _mm512_add_epi32(a,b) +#define SHUF8_512(a, mask) _mm512_shuffle_epi8(a, mask) +#endif + +_INLINE_ __m128i load_m128i(IN const uint8_t *ctr) +{ + return _mm_set_epi8(ctr[0], ctr[1], ctr[2], ctr[3], + ctr[4], ctr[5], ctr[6], ctr[7], + ctr[8], ctr[9], ctr[10], ctr[11], + ctr[12], ctr[13], ctr[14], ctr[15]); +} + +_INLINE_ __m128i loadr_m128i(IN const uint8_t *ctr) +{ + return _mm_setr_epi8(ctr[0], ctr[1], ctr[2], ctr[3], + ctr[4], ctr[5], ctr[6], ctr[7], + ctr[8], ctr[9], ctr[10], ctr[11], + ctr[12], ctr[13], ctr[14], ctr[15]); +} + +void aes256_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const aes256_ks_t *ks) { + uint32_t i = 0; + __m128i block = loadr_m128i(pt); + + block = XOR(block, ks->keys[0]); + for (i = 1; i < AES256_ROUNDS; i++) { + block = AESENC(block, ks->keys[i]); + } + block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); + + _mm_storeu_si128((void*)ct, block); + + // Delete secrets from registers if any. + ZERO256(); +} + +void aes256_ctr_enc(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + __m128i ctr_block = load_m128i(ctr); + + const __m128i bswap_mask = _mm_set_epi32(BSWAP_MASK); + const __m128i one = _mm_set_epi32(0,0,0,1); + + __m128i block = SHUF8(ctr_block, bswap_mask); + + for (uint32_t bidx = 0; bidx < num_blocks; bidx++) + { + block = XOR(block, ks->keys[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) { + block = AESENC(block, ks->keys[i]); + } + block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); + + //We use memcpy to avoid align casting. + _mm_storeu_si128((void*)&ct[16*bidx], block); + + ctr_block = ADD32(ctr_block, one); + block = SHUF8(ctr_block, bswap_mask); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#ifdef VAES256 +_INLINE_ void load_ks(OUT __m256i ks256[AES256_ROUNDS + 1], + IN const aes256_ks_t *ks) +{ + for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) + { + ks256[i] = _mm256_broadcastsi128_si256(ks->keys[i]); + } +} + +// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that +// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 +// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 +// Here num_blocks is assumed to be less then 2^32. +// It is the caller responsiblity to ensure it. +void aes256_ctr_enc256(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + const uint64_t num_par_blocks = num_blocks/2; + const uint64_t blocks_rem = num_blocks - (2*(num_par_blocks)); + + __m256i ks256[AES256_ROUNDS + 1]; + load_ks(ks256, ks); + + __m128i single_block = load_m128i(ctr); + __m256i ctr_blocks = _mm256_broadcastsi128_si256(single_block); + + // Preparing the masks + const __m256i bswap_mask = _mm256_set_epi32(BSWAP_MASK, BSWAP_MASK); + const __m256i two = _mm256_set_epi32(0,0,0,2,0,0,0,2); + const __m256i init = _mm256_set_epi32(0,0,0,1,0,0,0,0); + + // Initialize two parallel counters + ctr_blocks = ADD32_256(ctr_blocks, init); + __m256i p = SHUF8_256(ctr_blocks, bswap_mask); + + for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) + { + p = XOR256(p, ks256[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) + { + p = VAESENC(p, ks256[i]); + } + p = VAESENCLAST(p, ks256[AES256_ROUNDS]); + + // We use memcpy to avoid align casting. + _mm256_storeu_si256((__m256i *)&ct[PAR_AES_BLOCK_SIZE * block_idx], p); + + // Increase the two counters in parallel + ctr_blocks = ADD32_256(ctr_blocks, two); + p = SHUF8_256(ctr_blocks, bswap_mask); + } + + if(0 != blocks_rem) + { + single_block = EXTRACT128(p, 0); + aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], + (const uint8_t*)&single_block, blocks_rem, ks); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#endif //VAES256 + +#ifdef VAES512 + +_INLINE_ void load_ks(OUT __m512i ks512[AES256_ROUNDS + 1], + IN const aes256_ks_t *ks) +{ + for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) + { + ks512[i] = _mm512_broadcast_i32x4(ks->keys[i]); + } +} + +// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that +// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 +// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 +// Here num_blocks is assumed to be less then 2^32. +// It is the caller responsiblity to ensure it. +void aes256_ctr_enc512(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + const uint64_t num_par_blocks = num_blocks/4; + const uint64_t blocks_rem = num_blocks - (4*(num_par_blocks)); + + __m512i ks512[AES256_ROUNDS + 1]; + load_ks(ks512, ks); + + __m128i single_block = load_m128i(ctr); + __m512i ctr_blocks = _mm512_broadcast_i32x4(single_block); + + // Preparing the masks + const __m512i bswap_mask = _mm512_set_epi32(BSWAP_MASK, BSWAP_MASK, + BSWAP_MASK, BSWAP_MASK); + const __m512i four = _mm512_set_epi32(0,0,0,4,0,0,0,4,0,0,0,4,0,0,0,4); + const __m512i init = _mm512_set_epi32(0,0,0,3,0,0,0,2,0,0,0,1,0,0,0,0); + + // Initialize four parallel counters + ctr_blocks = ADD32_512(ctr_blocks, init); + __m512i p = SHUF8_512(ctr_blocks, bswap_mask); + + for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) + { + p = XOR512(p, ks512[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) + { + p = VAESENC(p, ks512[i]); + } + p = VAESENCLAST(p, ks512[AES256_ROUNDS]); + + + // We use memcpy to avoid align casting. + _mm512_storeu_si512(&ct[PAR_AES_BLOCK_SIZE * block_idx], p); + + // Increase the four counters in parallel + ctr_blocks = ADD32_512(ctr_blocks, four); + p = SHUF8_512(ctr_blocks, bswap_mask); + } + + if(0 != blocks_rem) + { + single_block = EXTRACT128(p, 0); + aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], + (const uint8_t*)&single_block, blocks_rem, ks); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#endif //VAES512 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.h new file mode 100644 index 0000000000..3d2b21ecf5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.h @@ -0,0 +1,85 @@ +/*************************************************************************** +* Written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#pragma once + +#include +#include +#include "defs.h" + +#define MAX_AES_INVOKATION (MASK(32)) + +#define AES256_KEY_SIZE (32ULL) +#define AES256_KEY_BITS (AES256_KEY_SIZE * 8) +#define AES_BLOCK_SIZE (16ULL) +#define AES256_ROUNDS (14ULL) + +#ifdef VAES256 +#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*2) +#elif defined(VAES512) +#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*4) +#endif + +typedef ALIGN(16) struct aes256_key_s { + uint8_t raw[AES256_KEY_SIZE]; +} aes256_key_t; + +typedef ALIGN(16) struct aes256_ks_s { + __m128i keys[AES256_ROUNDS + 1]; +} aes256_ks_t; + +// The ks parameter must be 16 bytes aligned! +EXTERNC void aes256_key_expansion(OUT aes256_ks_t *ks, + IN const aes256_key_t *key); + +// Encrypt one 128-bit block ct = E(pt,ks) +void aes256_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks using VAES (AVX-2) +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc256(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks using VAES (AVX512) +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc512(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/api.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/api.c new file mode 100644 index 0000000000..baccd590b5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/api.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include + +#if defined(ENABLE_SIGN) + +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk) +{ + + return sqisign_keypair(pk, sk); +} + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk) +{ + return sqisign_sign(sm, smlen, m, mlen, sk); +} +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk) +{ + return sqisign_open(m, mlen, sm, smlen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/api.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/api.h new file mode 100644 index 0000000000..93a39842fc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/api.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef api_h +#define api_h + +#include + +#define CRYPTO_SECRETKEYBYTES 353 +#define CRYPTO_PUBLICKEYBYTES 65 +#define CRYPTO_BYTES 148 + +#define CRYPTO_ALGNAME "SQIsign_lvl1" + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk); + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk); +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk); + +#endif /* api_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/asm_preamble.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/asm_preamble.h new file mode 100644 index 0000000000..3ef7927e9c --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/asm_preamble.h @@ -0,0 +1,22 @@ +#ifdef __APPLE__ +#define CAT(A, B) _CAT(A, B) +#define _CAT(A, B) A##B +#undef fp_add +#undef fp_sub +#undef fp_mul +#undef fp_sqr +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 +#define p2 CAT(_, p2) +#define p CAT(_, p) +#define fp_add CAT(_, SQISIGN_NAMESPACE(fp_add)) +#define fp_sub CAT(_, SQISIGN_NAMESPACE(fp_sub)) +#define fp_mul CAT(_, SQISIGN_NAMESPACE(fp_mul)) +#define fp_sqr CAT(_, SQISIGN_NAMESPACE(fp_sqr)) +#define fp2_mul_c0 CAT(_, SQISIGN_NAMESPACE(fp2_mul_c0)) +#define fp2_mul_c1 CAT(_, SQISIGN_NAMESPACE(fp2_mul_c1)) +#define fp2_sq_c0 CAT(_, SQISIGN_NAMESPACE(fp2_sq_c0)) +#define fp2_sq_c1 CAT(_, SQISIGN_NAMESPACE(fp2_sq_c1)) +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/basis.c new file mode 100644 index 0000000000..94cb7fcacb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/basis.c @@ -0,0 +1,416 @@ +#include "ec.h" +#include "fp2.h" +#include "e0_basis.h" +#include + +uint32_t +ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve) +{ // Recover y-coordinate of a point on the Montgomery curve y^2 = x^3 + Ax^2 + x + fp2_t t0; + + fp2_sqr(&t0, Px); + fp2_mul(y, &t0, &curve->A); // Ax^2 + fp2_add(y, y, Px); // Ax^2 + x + fp2_mul(&t0, &t0, Px); + fp2_add(y, y, &t0); // x^3 + Ax^2 + x + // This is required, because we do not yet know that our curves are + // supersingular so our points live on the twist with B = 1. + return fp2_sqrt_verify(y); +} + +static void +difference_point(ec_point_t *PQ, const ec_point_t *P, const ec_point_t *Q, const ec_curve_t *curve) +{ + // Given P,Q in projective x-only, computes a deterministic choice for (P-Q) + // Based on Proposition 3 of https://eprint.iacr.org/2017/518.pdf + + fp2_t Bxx, Bxz, Bzz, t0, t1; + + fp2_mul(&t0, &P->x, &Q->x); + fp2_mul(&t1, &P->z, &Q->z); + fp2_sub(&Bxx, &t0, &t1); + fp2_sqr(&Bxx, &Bxx); + fp2_mul(&Bxx, &Bxx, &curve->C); // C*(P.x*Q.x-P.z*Q.z)^2 + fp2_add(&Bxz, &t0, &t1); + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + fp2_add(&Bzz, &t0, &t1); + fp2_mul(&Bxz, &Bxz, &Bzz); // (P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_sub(&Bzz, &t0, &t1); + fp2_sqr(&Bzz, &Bzz); + fp2_mul(&Bzz, &Bzz, &curve->C); // C*(P.x*Q.z-P.z*Q.x)^2 + fp2_mul(&Bxz, &Bxz, &curve->C); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &curve->A); + fp2_add(&t0, &t0, &t0); + fp2_add(&Bxz, &Bxz, &t0); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + 2*A*P.x*Q.z*P.z*Q.x + + // To ensure that the denominator is a fourth power in Fp, we normalize by + // C*C_bar^2*(P.z)_bar^2*(Q.z)_bar^2 + fp_copy(&t0.re, &curve->C.re); + fp_neg(&t0.im, &curve->C.im); + fp2_sqr(&t0, &t0); + fp2_mul(&t0, &t0, &curve->C); + fp_copy(&t1.re, &P->z.re); + fp_neg(&t1.im, &P->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp_copy(&t1.re, &Q->z.re); + fp_neg(&t1.im, &Q->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&Bxx, &Bxx, &t0); + fp2_mul(&Bxz, &Bxz, &t0); + fp2_mul(&Bzz, &Bzz, &t0); + + // Solving quadratic equation + fp2_sqr(&t0, &Bxz); + fp2_mul(&t1, &Bxx, &Bzz); + fp2_sub(&t0, &t0, &t1); + // No need to check if t0 is square, as per the entangled basis algorithm. + fp2_sqrt(&t0); + fp2_add(&PQ->x, &Bxz, &t0); + fp2_copy(&PQ->z, &Bzz); +} + +// Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and the point +// P = (X/Z : 1). For generic implementation see lift_basis() +uint32_t +lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + assert(fp2_is_one(&B->P.z)); + assert(fp2_is_one(&E->C)); + + fp2_copy(&P->x, &B->P.x); + fp2_copy(&Q->x, &B->Q.x); + fp2_copy(&Q->z, &B->Q.z); + fp2_set_one(&P->z); + uint32_t ret = ec_recover_y(&P->y, &P->x, E); + + // Algorithm of Okeya-Sakurai to recover y.Q in the montgomery model + fp2_t v1, v2, v3, v4; + fp2_mul(&v1, &P->x, &Q->z); + fp2_add(&v2, &Q->x, &v1); + fp2_sub(&v3, &Q->x, &v1); + fp2_sqr(&v3, &v3); + fp2_mul(&v3, &v3, &B->PmQ.x); + fp2_add(&v1, &E->A, &E->A); + fp2_mul(&v1, &v1, &Q->z); + fp2_add(&v2, &v2, &v1); + fp2_mul(&v4, &P->x, &Q->x); + fp2_add(&v4, &v4, &Q->z); + fp2_mul(&v2, &v2, &v4); + fp2_mul(&v1, &v1, &Q->z); + fp2_sub(&v2, &v2, &v1); + fp2_mul(&v2, &v2, &B->PmQ.z); + fp2_sub(&Q->y, &v3, &v2); + fp2_add(&v1, &P->y, &P->y); + fp2_mul(&v1, &v1, &Q->z); + fp2_mul(&v1, &v1, &B->PmQ.z); + fp2_mul(&Q->x, &Q->x, &v1); + fp2_mul(&Q->z, &Q->z, &v1); + + // Transforming to a jacobian coordinate + fp2_sqr(&v1, &Q->z); + fp2_mul(&Q->y, &Q->y, &v1); + fp2_mul(&Q->x, &Q->x, &Q->z); + return ret; +} + +uint32_t +lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + // Normalise the curve E such that (A : C) is (A/C : 1) + // and the point x(P) = (X/Z : 1). + fp2_t inverses[2]; + fp2_copy(&inverses[0], &B->P.z); + fp2_copy(&inverses[1], &E->C); + + fp2_batched_inv(inverses, 2); + fp2_set_one(&B->P.z); + fp2_set_one(&E->C); + + fp2_mul(&B->P.x, &B->P.x, &inverses[0]); + fp2_mul(&E->A, &E->A, &inverses[1]); + + // Lift the basis to Jacobian points P, Q + return lift_basis_normalized(P, Q, B, E); +} + +// Given an x-coordinate, determines if this is a valid +// point on the curve. Assumes C=1. +static uint32_t +is_on_curve(const fp2_t *x, const ec_curve_t *curve) +{ + assert(fp2_is_one(&curve->C)); + fp2_t t0; + + fp2_add(&t0, x, &curve->A); // x + (A/C) + fp2_mul(&t0, &t0, x); // x^2 + (A/C)*x + fp2_add_one(&t0, &t0); // x^2 + (A/C)*x + 1 + fp2_mul(&t0, &t0, x); // x^3 + (A/C)*x^2 + x + + return fp2_is_square(&t0); +} + +// Helper function which given a point of order k*2^n with n maximal +// and k odd, computes a point of order 2^f +static inline void +clear_cofactor_for_maximal_even_order(ec_point_t *P, ec_curve_t *curve, int f) +{ + // clear out the odd cofactor to get a point of order 2^n + ec_mul(P, p_cofactor_for_2f, P_COFACTOR_FOR_2F_BITLENGTH, P, curve); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_A24(P, P, &curve->A24, curve->is_A24_computed_and_normalized); + } +} + +// Helper function which finds an NQR -1 / (1 + i*b) for entangled basis generation +static uint8_t +find_nqr_factor(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + // factor = -1/(1 + i*b) for b in Fp will be NQR whenever 1 + b^2 is NQR + // in Fp, so we find one of these and then invert (1 + i*b). We store b + // as a u8 hint to save time in verification. + + // We return the hint as a u8, but use (uint16_t)n to give 2^16 - 1 + // to make failure cryptographically negligible, with a fallback when + // n > 128 is required. + uint8_t hint; + uint32_t found = 0; + uint16_t n = start; + + bool qr_b = 1; + fp_t b, tmp; + fp2_t z, t0, t1; + + do { + while (qr_b) { + // find b with 1 + b^2 a non-quadratic residue + fp_set_small(&tmp, (uint32_t)n * n + 1); + qr_b = fp_is_square(&tmp); + n++; // keeps track of b = n - 1 + } + + // for Px := -A/(1 + i*b) to be on the curve + // is equivalent to A^2*(z-1) - z^2 NQR for z = 1 + i*b + // thus prevents unnecessary inversion pre-check + + // t0 = z - 1 = i*b + // t1 = z = 1 + i*b + fp_set_small(&b, (uint32_t)n - 1); + fp2_set_zero(&t0); + fp2_set_one(&z); + fp_copy(&z.im, &b); + fp_copy(&t0.im, &b); + + // A^2*(z-1) - z^2 + fp2_sqr(&t1, &curve->A); + fp2_mul(&t0, &t0, &t1); // A^2 * (z - 1) + fp2_sqr(&t1, &z); + fp2_sub(&t0, &t0, &t1); // A^2 * (z - 1) - z^2 + found = !fp2_is_square(&t0); + + qr_b = 1; + } while (!found); + + // set Px to -A/(1 + i*b) + fp2_copy(x, &z); + fp2_inv(x); + fp2_mul(x, x, &curve->A); + fp2_neg(x, x); + + /* + * With very low probability n will not fit in 7 bits. + * We set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + hint = n <= 128 ? n - 1 : 0; + + return hint; +} + +// Helper function which finds a point x(P) = n * A +static uint8_t +find_nA_x_coord(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + assert(!fp2_is_square(&curve->A)); // Only to be called when A is a NQR + + // when A is NQR we allow x(P) to be a multiple n*A of A + uint8_t n = start; + if (n == 1) { + fp2_copy(x, &curve->A); + } else { + fp2_mul_small(x, &curve->A, n); + } + + while (!is_on_curve(x, curve)) { + fp2_add(x, x, &curve->A); + n++; + } + + /* + * With very low probability (1/2^128), n will not fit in 7 bits. + * In this case, we set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + uint8_t hint = n < 128 ? n : 0; + return hint; +} + +// The entangled basis generation does not allow A = 0 +// so we simply return the one we have already precomputed +static void +ec_basis_E0_2f(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + assert(fp2_is_zero(&curve->A)); + ec_point_t P, Q; + + // Set P, Q to precomputed (X : 1) values + fp2_copy(&P.x, &BASIS_E0_PX); + fp2_copy(&Q.x, &BASIS_E0_QX); + fp2_set_one(&P.z); + fp2_set_one(&Q.z); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_E0(&P, &P); + xDBL_E0(&Q, &Q); + } + + // Set P, Q in the basis and compute x(P - Q) + copy_point(&PQ2->P, &P); + copy_point(&PQ2->Q, &Q); + difference_point(&PQ2->PmQ, &P, &Q, curve); +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// and stores hints as an array for faster recomputation at a later point +uint8_t +ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 0; + } + + uint8_t hint; + bool hint_A = fp2_is_square(&curve->A); + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_A) { + // when A is NQR we allow x(P) to be a multiple n*A of A + hint = find_nA_x_coord(&P.x, curve, 1); + } else { + // when A is QR we instead have to find (1 + b^2) a NQR + // such that x(P) = -A / (1 + i*b) + hint = find_nqr_factor(&P.x, curve, 1); + } + + fp2_set_one(&P.z); + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + + // Finally, we compress hint_A and hint into a single bytes. + // We choose to set the LSB of hint to hint_A + assert(hint < 128); // We expect hint to be 7-bits in size + return (hint << 1) | hint_A; +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// given the hints as an array for faster basis computation +int +ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 1; + } + + // The LSB of hint encodes whether A is a QR + // The remaining 7-bits are used to find a valid x(P) + bool hint_A = hint & 1; + uint8_t hint_P = hint >> 1; + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_P) { + // When hint_P = 0 it means we did not find a point in 128 attempts + // this is very rare and we almost never expect to need this fallback + // In either case, we can start with b = 128 to skip testing the known + // values which will not work + if (!hint_A) { + find_nA_x_coord(&P.x, curve, 128); + } else { + find_nqr_factor(&P.x, curve, 128); + } + } else { + // Otherwise we use the hint to directly find x(P) based on hint_A + if (!hint_A) { + // when A is NQR, we have found n such that x(P) = n*A + fp2_mul_small(&P.x, &curve->A, hint_P); + } else { + // when A is QR we have found b such that (1 + b^2) is a NQR in + // Fp, so we must compute x(P) = -A / (1 + i*b) + fp_set_one(&P.x.re); + fp_set_small(&P.x.im, hint_P); + fp2_inv(&P.x); + fp2_mul(&P.x, &P.x, &curve->A); + fp2_neg(&P.x, &P.x); + } + } + fp2_set_one(&P.z); + +#ifndef NDEBUG + int passed = 1; + passed = is_on_curve(&P.x, curve); + passed &= !fp2_is_square(&P.x); + + if (!passed) + return 0; +#endif + + // set xQ to -xP - A + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + +#ifndef NDEBUG + passed &= test_basis_order_twof(PQ2, curve, f); + + if (!passed) + return 0; +#endif + + return 1; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/bench.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/bench.h new file mode 100644 index 0000000000..c253825828 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/bench.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: Apache-2.0 +#ifndef BENCH_H__ +#define BENCH_H__ + +#include +#include +#include +#include +#include +#if defined(__APPLE__) +#include "bench_macos.h" +#endif + +#if defined(TARGET_ARM) || defined(TARGET_S390X) || defined(NO_CYCLE_COUNTER) +#define BENCH_UNIT0 "nanoseconds" +#define BENCH_UNIT3 "microseconds" +#define BENCH_UNIT6 "milliseconds" +#define BENCH_UNIT9 "seconds" +#else +#define BENCH_UNIT0 "cycles" +#define BENCH_UNIT3 "kilocycles" +#define BENCH_UNIT6 "megacycles" +#define BENCH_UNIT9 "gigacycles" +#endif + +static inline void +cpucycles_init(void) { +#if defined(__APPLE__) && defined(TARGET_ARM64) + macos_init_rdtsc(); +#endif +} + +static inline uint64_t +cpucycles(void) +{ +#if defined(TARGET_AMD64) || defined(TARGET_X86) + uint32_t hi, lo; + + asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)lo) | ((uint64_t)hi << 32); +#elif defined(TARGET_S390X) + uint64_t tod; + asm volatile("stckf %0\n" : "=Q"(tod) : : "cc"); + return (tod * 1000 / 4096); +#elif defined(TARGET_ARM64) && !defined(NO_CYCLE_COUNTER) +#if defined(__APPLE__) + return macos_rdtsc(); +#else + uint64_t cycles; + asm volatile("mrs %0, PMCCNTR_EL0" : "=r"(cycles)); + return cycles; +#endif // __APPLE__ +#else + struct timespec time; + clock_gettime(CLOCK_REALTIME, &time); + return (uint64_t)time.tv_sec * 1000000000 + time.tv_nsec; +#endif +} + +static inline int +CMPFUNC(const void *a, const void *b) +{ + uint64_t aa = *(uint64_t *)a, bb = *(uint64_t *)b; + + if (aa > bb) + return +1; + if (aa < bb) + return -1; + return 0; +} + +static inline uint32_t +ISQRT(uint64_t x) +{ + uint32_t r = 0; + for (ssize_t i = 31; i >= 0; --i) { + uint32_t s = r + (1 << i); + if ((uint64_t)s * s <= x) + r = s; + } + return r; +} + +static inline double +_TRUNC(uint64_t x) +{ + return x / 1000 / 1000.; +} +#define _FMT ".3lf" +#define _UNIT BENCH_UNIT6 + +#define BENCH_CODE_1(RUNS) \ + { \ + const size_t count = (RUNS); \ + if (!count) \ + abort(); \ + uint64_t cycles, cycles1, cycles2; \ + uint64_t cycles_list[count]; \ + cycles = 0; \ + for (size_t i = 0; i < count; ++i) { \ + cycles1 = cpucycles(); + +#define BENCH_CODE_2(name) \ + cycles2 = cpucycles(); \ + cycles_list[i] = cycles2 - cycles1; \ + cycles += cycles2 - cycles1; \ + } \ + qsort(cycles_list, count, sizeof(uint64_t), CMPFUNC); \ + uint64_t variance = 0; \ + for (size_t i = 0; i < count; ++i) { \ + int64_t off = cycles_list[i] - cycles / count; \ + variance += off * off; \ + } \ + variance /= count; \ + printf(" %-10s", name); \ + printf(" | average %9" _FMT " | stddev %9" _FMT, \ + _TRUNC(cycles / count), \ + _TRUNC(ISQRT(variance))); \ + printf(" | median %9" _FMT " | min %9" _FMT " | max %9" _FMT, \ + _TRUNC(cycles_list[count / 2]), \ + _TRUNC(cycles_list[0]), \ + _TRUNC(cycles_list[count - 1])); \ + printf(" (%s)\n", _UNIT); \ + } + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/bench_macos.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/bench_macos.h new file mode 100644 index 0000000000..0494fc85e9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/bench_macos.h @@ -0,0 +1,143 @@ +// WARNING: must be run as root on an M1 device +// WARNING: fragile, uses private apple APIs +// currently no command line interface, see variables at top of main + +/* +no warranty; use at your own risk - i believe this code needs +some minor changes to work on some later hardware and/or software revisions, +which is unsurprising given the use of undocumented, private APIs. +------------------------------------------------------------------------------ +This code is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2020 Dougall Johnson +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ + +/* + Based on https://github.com/travisdowns/robsize + Henry Wong + http://blog.stuffedcow.net/2013/05/measuring-rob-capacity/ + 2014-10-14 +*/ + +#include +#include +#include +#include + +#define KPERF_LIST \ + /* ret, name, params */ \ + F(int, kpc_force_all_ctrs_set, int) \ + F(int, kpc_set_counting, uint32_t) \ + F(int, kpc_set_thread_counting, uint32_t) \ + F(int, kpc_set_config, uint32_t, void *) \ + F(int, kpc_get_thread_counters, int, unsigned int, void *) + +#define F(ret, name, ...) \ + typedef ret name##proc(__VA_ARGS__); \ + static name##proc *name; +KPERF_LIST +#undef F + +#define CFGWORD_EL0A64EN_MASK (0x20000) + +#define CPMU_CORE_CYCLE 0x02 + +#define KPC_CLASS_FIXED (0) +#define KPC_CLASS_CONFIGURABLE (1) + +#define COUNTERS_COUNT 10 +#define KPC_MASK ((1u << KPC_CLASS_CONFIGURABLE) | (1u << KPC_CLASS_FIXED)) +static uint64_t g_config[COUNTERS_COUNT]; +static uint64_t g_counters[COUNTERS_COUNT]; + +static void +macos_configure_rdtsc() +{ + if (kpc_force_all_ctrs_set(1)) { + printf("kpc_force_all_ctrs_set failed\n"); + return; + } + + if (kpc_set_config(KPC_MASK, g_config)) { + printf("kpc_set_config failed\n"); + return; + } + + if (kpc_set_counting(KPC_MASK)) { + printf("kpc_set_counting failed\n"); + return; + } + + if (kpc_set_thread_counting(KPC_MASK)) { + printf("kpc_set_thread_counting failed\n"); + return; + } +} + +static void +macos_init_rdtsc() +{ + void *kperf = + dlopen("/System/Library/PrivateFrameworks/kperf.framework/Versions/A/kperf", RTLD_LAZY); + if (!kperf) { + printf("kperf = %p\n", kperf); + return; + } +#define F(ret, name, ...) \ + name = (name##proc *)(intptr_t)(dlsym(kperf, #name)); \ + if (!name) { \ + printf("%s = %p\n", #name, (void *)(intptr_t)name); \ + return; \ + } + KPERF_LIST +#undef F + + g_config[0] = CPMU_CORE_CYCLE | CFGWORD_EL0A64EN_MASK; + + macos_configure_rdtsc(); +} + +static uint64_t +macos_rdtsc(void) +{ + if (kpc_get_thread_counters(0, COUNTERS_COUNT, g_counters)) { + printf("kpc_get_thread_counters failed\n"); + return 1; + } + return g_counters[2]; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/biextension.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/biextension.c new file mode 100644 index 0000000000..1df7ab938b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/biextension.c @@ -0,0 +1,770 @@ +#include +#include +#include +#include + +/* + * We implement the biextension arithmetic by using the cubical torsor + * representation. For now only implement the 2^e-ladder. + * + * Warning: cubicalADD is off by a factor x4 with respect to the correct + * cubical arithmetic. This does not affect the Weil pairing or the Tate + * pairing over F_{p^2} (due to the final exponentiation), but would give + * the wrong result if we compute the Tate pairing over F_p. + */ + +// this would be exactly like xADD if PQ was 'antinormalised' as (1,z) +// Cost: 3M + 2S + 3a + 3s +// Note: if needed, cubicalDBL is simply xDBL_A24 normalized and +// costs 3M + 2S + 2a + 2s + +static void +cubicalADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const fp2_t *ixPQ) +{ + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&R->z, &t3); + fp2_sqr(&t2, &t2); + fp2_mul(&R->x, ixPQ, &t2); +} + +// Given cubical reps of P, Q and x(P - Q) = (1 : ixPQ) +// compute P + Q, [2]Q +// Cost: 6M + 4S + 4a + 4s +static void +cubicalDBLADD(ec_point_t *PpQ, + ec_point_t *QQ, + const ec_point_t *P, + const ec_point_t *Q, + const fp2_t *ixPQ, + const ec_point_t *A24) +{ + // A24 = (A+2C/4C: 1) + assert(fp2_is_one(&A24->z)); + + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&PpQ->x, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_sqr(&t2, &PpQ->x); + fp2_sqr(&QQ->z, &t3); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &PpQ->x); + fp2_add(&PpQ->x, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&PpQ->z, &t3); + fp2_sqr(&PpQ->x, &PpQ->x); + fp2_mul(&PpQ->x, ixPQ, &PpQ->x); + fp2_sub(&t3, &t2, &QQ->z); + fp2_mul(&QQ->x, &t2, &QQ->z); + fp2_mul(&t0, &t3, &A24->x); + fp2_add(&t0, &t0, &QQ->z); + fp2_mul(&QQ->z, &t0, &t3); +} + +// iterative biextension doubling +static void +biext_ladder_2e(uint32_t e, + ec_point_t *PnQ, + ec_point_t *nQ, + const ec_point_t *PQ, + const ec_point_t *Q, + const fp2_t *ixP, + const ec_point_t *A24) +{ + copy_point(PnQ, PQ); + copy_point(nQ, Q); + for (uint32_t i = 0; i < e; i++) { + cubicalDBLADD(PnQ, nQ, PnQ, nQ, ixP, A24); + } +} + +// Compute the monodromy ratio X/Z above as a (X:Z) point to avoid a division +// We implicitly use (1,0) as a cubical point above 0_E +static void +point_ratio(ec_point_t *R, const ec_point_t *PnQ, const ec_point_t *nQ, const ec_point_t *P) +{ + // Sanity tests + assert(ec_is_zero(nQ)); + assert(ec_is_equal(PnQ, P)); + + fp2_mul(&R->x, &nQ->x, &P->x); + fp2_copy(&R->z, &PnQ->x); +} + +// Compute the cubical translation of P by a point of 2-torsion T +static void +translate(ec_point_t *P, const ec_point_t *T) +{ + // When we translate, the following three things can happen: + // T = (A : 0) then the translation of P should be P + // T = (0 : B) then the translation of P = (X : Z) should be (Z : X) + // Otherwise T = (A : B) and P translates to (AX - BZ : BX - AZ) + // We compute this in constant time by computing the generic case + // and then using constant time swaps. + fp2_t PX_new, PZ_new; + + { + fp2_t t0, t1; + + // PX_new = AX - BZ + fp2_mul(&t0, &T->x, &P->x); + fp2_mul(&t1, &T->z, &P->z); + fp2_sub(&PX_new, &t0, &t1); + + // PZ_new = BX - AZ + fp2_mul(&t0, &T->z, &P->x); + fp2_mul(&t1, &T->x, &P->z); + fp2_sub(&PZ_new, &t0, &t1); + } + + // When we have A zero we should return (Z : X) + uint32_t TA_is_zero = fp2_is_zero(&T->x); + fp2_select(&PX_new, &PX_new, &P->z, TA_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->x, TA_is_zero); + + // When we have B zero we should return (X : Z) + uint32_t TB_is_zero = fp2_is_zero(&T->z); + fp2_select(&PX_new, &PX_new, &P->x, TB_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->z, TB_is_zero); + + // Set the point to the desired result + fp2_copy(&P->x, &PX_new); + fp2_copy(&P->z, &PZ_new); +} + +// Compute the biextension monodromy g_P,Q^{2^g} (in level 1) via the +// cubical arithmetic of P+2^e Q. +// The suffix _i means that we are given 1/x(P) as parameter. Warning: to +// get meaningful result when using the monodromy to compute pairings, we +// need P, Q, PQ, A24 to be normalised (this is not strictly necessary, but +// care need to be taken when they are not normalised. Only handle the +// normalised case for now) +static void +monodromy_i(ec_point_t *R, const pairing_params_t *pairing_data, bool swap_PQ) +{ + fp2_t ixP; + ec_point_t P, Q, PnQ, nQ; + + // When we compute the Weil pairing we need both P + [2^e]Q and + // Q + [2^e]P which we can do easily with biext_ladder_2e() below + // we use a bool to decide wether to use Q, ixP or P, ixQ in the + // ladder and P or Q in translation. + if (!swap_PQ) { + copy_point(&P, &pairing_data->P); + copy_point(&Q, &pairing_data->Q); + fp2_copy(&ixP, &pairing_data->ixP); + } else { + copy_point(&P, &pairing_data->Q); + copy_point(&Q, &pairing_data->P); + fp2_copy(&ixP, &pairing_data->ixQ); + } + + // Compute the biextension ladder P + [2^e]Q + biext_ladder_2e(pairing_data->e - 1, &PnQ, &nQ, &pairing_data->PQ, &Q, &ixP, &pairing_data->A24); + translate(&PnQ, &nQ); + translate(&nQ, &nQ); + point_ratio(R, &PnQ, &nQ, &P); +} + +// Normalize the points and also store 1/x(P), 1/x(Q) +static void +cubical_normalization(pairing_params_t *pairing_data, const ec_point_t *P, const ec_point_t *Q) +{ + fp2_t t[4]; + fp2_copy(&t[0], &P->x); + fp2_copy(&t[1], &P->z); + fp2_copy(&t[2], &Q->x); + fp2_copy(&t[3], &Q->z); + fp2_batched_inv(t, 4); + + // Store PZ / PX and QZ / QX + fp2_mul(&pairing_data->ixP, &P->z, &t[0]); + fp2_mul(&pairing_data->ixQ, &Q->z, &t[2]); + + // Store x(P), x(Q) normalised to (X/Z : 1) + fp2_mul(&pairing_data->P.x, &P->x, &t[1]); + fp2_mul(&pairing_data->Q.x, &Q->x, &t[3]); + fp2_set_one(&pairing_data->P.z); + fp2_set_one(&pairing_data->Q.z); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// We assume the points are normalised correctly +static void +weil_n(fp2_t *r, const pairing_params_t *pairing_data) +{ + ec_point_t R0, R1; + monodromy_i(&R0, pairing_data, true); + monodromy_i(&R1, pairing_data, false); + + fp2_mul(r, &R0.x, &R1.z); + fp2_inv(r); + fp2_mul(r, r, &R0.z); + fp2_mul(r, r, &R1.x); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// Normalise the points and call the code above +// The code will crash (division by 0) if either P or Q is (0:1) +void +weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + pairing_params_t pairing_data; + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + // Compute the Weil pairing e_(2^n)(P, Q) + weil_n(r, &pairing_data); +} + +// two helper functions for reducing the tate pairing +// clear_cofac clears (p + 1) // 2^f for an Fp2 value +void +clear_cofac(fp2_t *r, const fp2_t *a) +{ + digit_t exp = *p_cofactor_for_2f; + exp >>= 1; + + fp2_t x; + fp2_copy(&x, a); + fp2_copy(r, a); + + // removes cofac + while (exp > 0) { + fp2_sqr(r, r); + if (exp & 1) { + fp2_mul(r, r, &x); + } + exp >>= 1; + } +} + +// applies frobenius a + ib --> a - ib to an fp2 element +void +fp2_frob(fp2_t *out, const fp2_t *in) +{ + fp_copy(&(out->re), &(in->re)); + fp_neg(&(out->im), &(in->im)); +} + +// reduced Tate pairing, normalizes the points, assumes PQ is P+Q in (X:Z) +// coordinates. Computes 1/x(P) and 1/x(Q) for efficient cubical ladder +void +reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - e; + ec_point_t R; + pairing_params_t pairing_data; + + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + monodromy_i(&R, &pairing_data, true); + + // we get unreduced tate as R.X, R.Z + // reduced tate is -(R.Z/R.X)^((p^2 - 1) div 2^f) + // we reuse R.X and R.Z to split reduction step ^(p-1) into frobenius and ^-1 + fp2_t frob, tmp; + fp2_copy(&tmp, &R.x); + fp2_frob(&frob, &R.x); + fp2_mul(&R.x, &R.z, &frob); + fp2_frob(&frob, &R.z); + fp2_mul(&R.z, &tmp, &frob); + fp2_inv(&R.x); + fp2_mul(r, &R.x, &R.z); + + clear_cofac(r, r); + // clear remaining 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(r, r); + } +} + +// Functions to compute discrete logs by computing the Weil pairing of points +// followed by computing the dlog in Fp^2 +// (If we work with full order points, it would be faster to use the Tate +// pairings rather than the Weil pairings; this is not implemented yet) + +// recursive dlog function +static bool +fp2_dlog_2e_rec(digit_t *a, long len, fp2_t *pows_f, fp2_t *pows_g, long stacklen) +{ + if (len == 0) { + // *a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + return true; + } else if (len == 1) { + if (fp2_is_one(&pows_f[stacklen - 1])) { + // a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else if (fp2_is_equal(&pows_f[stacklen - 1], &pows_g[stacklen - 1])) { + // a = 1; + a[0] = 1; + for (int i = 1; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_mul(&pows_f[i], &pows_f[i], &pows_g[i]); // new_f = f*g + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else { + return false; + } + } else { + long right = (double)len * 0.5; + long left = len - right; + pows_f[stacklen] = pows_f[stacklen - 1]; + pows_g[stacklen] = pows_g[stacklen - 1]; + for (int i = 0; i < left; i++) { + fp2_sqr(&pows_f[stacklen], &pows_f[stacklen]); + fp2_sqr(&pows_g[stacklen], &pows_g[stacklen]); + } + // uint32_t dlp1 = 0, dlp2 = 0; + digit_t dlp1[NWORDS_ORDER], dlp2[NWORDS_ORDER]; + bool ok; + ok = fp2_dlog_2e_rec(dlp1, right, pows_f, pows_g, stacklen + 1); + if (!ok) + return false; + ok = fp2_dlog_2e_rec(dlp2, left, pows_f, pows_g, stacklen); + if (!ok) + return false; + // a = dlp1 + 2^right * dlp2 + multiple_mp_shiftl(dlp2, right, NWORDS_ORDER); + mp_add(a, dlp2, dlp1, NWORDS_ORDER); + + return true; + } +} + +// compute DLP: compute scal such that f = g^scal with f, 1/g as input +static bool +fp2_dlog_2e(digit_t *scal, const fp2_t *f, const fp2_t *g_inverse, int e) +{ + long log, len = e; + for (log = 0; len > 1; len >>= 1) + log++; + log += 1; + + fp2_t pows_f[log], pows_g[log]; + pows_f[0] = *f; + pows_g[0] = *g_inverse; + + for (int i = 0; i < NWORDS_ORDER; i++) { + scal[i] = 0; + } + + bool ok = fp2_dlog_2e_rec(scal, e, pows_f, pows_g, 1); + assert(ok); + + return ok; +} + +// Normalize the bases (P, Q), (R, S) and store their inverse +// and additionally normalise the curve to (A/C : 1) +static void +cubical_normalization_dlog(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + fp2_t t[11]; + ec_basis_t *PQ = &pairing_dlog_data->PQ; + ec_basis_t *RS = &pairing_dlog_data->RS; + fp2_copy(&t[0], &PQ->P.x); + fp2_copy(&t[1], &PQ->P.z); + fp2_copy(&t[2], &PQ->Q.x); + fp2_copy(&t[3], &PQ->Q.z); + fp2_copy(&t[4], &PQ->PmQ.x); + fp2_copy(&t[5], &PQ->PmQ.z); + fp2_copy(&t[6], &RS->P.x); + fp2_copy(&t[7], &RS->P.z); + fp2_copy(&t[8], &RS->Q.x); + fp2_copy(&t[9], &RS->Q.z); + fp2_copy(&t[10], &curve->C); + + fp2_batched_inv(t, 11); + + fp2_mul(&pairing_dlog_data->ixP, &PQ->P.z, &t[0]); + fp2_mul(&PQ->P.x, &PQ->P.x, &t[1]); + fp2_set_one(&PQ->P.z); + + fp2_mul(&pairing_dlog_data->ixQ, &PQ->Q.z, &t[2]); + fp2_mul(&PQ->Q.x, &PQ->Q.x, &t[3]); + fp2_set_one(&PQ->Q.z); + + fp2_mul(&PQ->PmQ.x, &PQ->PmQ.x, &t[5]); + fp2_set_one(&PQ->PmQ.z); + + fp2_mul(&pairing_dlog_data->ixR, &RS->P.z, &t[6]); + fp2_mul(&RS->P.x, &RS->P.x, &t[7]); + fp2_set_one(&RS->P.z); + + fp2_mul(&pairing_dlog_data->ixS, &RS->Q.z, &t[8]); + fp2_mul(&RS->Q.x, &RS->Q.x, &t[9]); + fp2_set_one(&RS->Q.z); + + fp2_mul(&curve->A, &curve->A, &t[10]); + fp2_set_one(&curve->C); +} + +// Given two bases and basis = compute +// x(P - R), x(P - S), x(R - Q), x(S - Q) +static void +compute_difference_points(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + jac_point_t xyP, xyQ, xyR, xyS, temp; + + // lifting the two basis points, assumes that x(P) and x(R) + // and the curve itself are normalised to (X : 1) + lift_basis_normalized(&xyP, &xyQ, &pairing_dlog_data->PQ, curve); + lift_basis_normalized(&xyR, &xyS, &pairing_dlog_data->RS, curve); + + // computation of the differences + // x(P - R) + jac_neg(&temp, &xyR); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmR, &temp); + + // x(P - S) + jac_neg(&temp, &xyS); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmS, &temp); + + // x(R - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyR, curve); + jac_to_xz(&pairing_dlog_data->diff.RmQ, &temp); + + // x(S - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyS, curve); + jac_to_xz(&pairing_dlog_data->diff.SmQ, &temp); +} + +// Inline all the Weil pairing computations needed for ec_dlog_2_weil +static void +weil_dlog(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + ec_point_t nP, nQ, nR, nS, nPQ, PnQ, nPR, PnR, nPS, PnS, nRQ, RnQ, nSQ, SnQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&nPR, &pairing_dlog_data->diff.PmR); + copy_point(&nPS, &pairing_dlog_data->diff.PmS); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + copy_point(&RnQ, &pairing_dlog_data->diff.RmQ); + copy_point(&SnQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&nPQ, &nPQ, &nP, &pairing_dlog_data->ixQ); + cubicalADD(&nPR, &nPR, &nP, &pairing_dlog_data->ixR); + cubicalDBLADD(&nPS, &nP, &nPS, &nP, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnQ, &PnQ, &nQ, &pairing_dlog_data->ixP); + cubicalADD(&RnQ, &RnQ, &nQ, &pairing_dlog_data->ixR); + cubicalDBLADD(&SnQ, &nQ, &SnQ, &nQ, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + // weil(&w0,e,&PQ->P,&PQ->Q,&PQ->PmQ,&A24); + translate(&nPQ, &nP); + translate(&nPR, &nP); + translate(&nPS, &nP); + translate(&PnQ, &nQ); + translate(&RnQ, &nQ); + translate(&SnQ, &nQ); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference weil pairing + ec_point_t T0, T1; + fp2_t w1[5], w2[5]; + + // e(P, Q) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &PnQ, &nQ, &pairing_dlog_data->PQ.P); + // For the first element we need it's inverse for + // fp2_dlog_2e so we swap w1 and w2 here to save inversions + fp2_mul(&w2[0], &T0.x, &T1.z); + fp2_mul(&w1[0], &T1.x, &T0.z); + + // e(P,R) = w0^r2 + point_ratio(&T0, &nPR, &nP, &pairing_dlog_data->RS.P); + point_ratio(&T1, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[1], &T0.x, &T1.z); + fp2_mul(&w2[1], &T1.x, &T0.z); + + // e(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &RnQ, &nQ, &pairing_dlog_data->RS.P); + fp2_mul(&w1[2], &T0.x, &T1.z); + fp2_mul(&w2[2], &T1.x, &T0.z); + + // e(P,S) = w0^s2 + point_ratio(&T0, &nPS, &nP, &pairing_dlog_data->RS.Q); + point_ratio(&T1, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[3], &T0.x, &T1.z); + fp2_mul(&w2[3], &T1.x, &T0.z); + + // e(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &SnQ, &nQ, &pairing_dlog_data->RS.Q); + fp2_mul(&w1[4], &T0.x, &T1.z); + fp2_mul(&w2[4], &T1.x, &T0.z); + + fp2_batched_inv(w1, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + assert(test_point_order_twof(&PQ->Q, curve, e)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + + weil_dlog(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} + +// Inline all the Tate pairing computations needed for ec_dlog_2_weil +// including reduction, assumes a bases PQ of full E[2^e_full] torsion +// and a bases RS of smaller E[2^e] torsion +static void +tate_dlog_partial(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - pairing_dlog_data->e; + + ec_point_t nP, nQ, nR, nS, nPQ, PnR, PnS, nRQ, nSQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < e_full - 1; i++) { + cubicalDBLADD(&nPQ, &nP, &nPQ, &nP, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + translate(&nPQ, &nP); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference Tate pairing + ec_point_t T0; + fp2_t w1[5], w2[5]; + + // t(P, Q)^(2^e_diff) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + fp2_copy(&w1[0], &T0.x); + fp2_copy(&w2[0], &T0.z); + + // t(R,P) = w0^r2 + point_ratio(&T0, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[1], &T0.x); + fp2_copy(&w2[1], &T0.z); + + // t(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[2], &T0.x); + fp2_copy(&w1[2], &T0.z); + + // t(S,P) = w0^s2 + point_ratio(&T0, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[3], &T0.x); + fp2_copy(&w2[3], &T0.z); + + // t(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[4], &T0.x); + fp2_copy(&w1[4], &T0.z); + + // batched reduction using projective representation + for (int i = 0; i < 5; i++) { + fp2_t frob, tmp; + fp2_copy(&tmp, &w1[i]); + // inline frobenius for ^p + // multiply by inverse to get ^(p-1) + fp2_frob(&frob, &w1[i]); + fp2_mul(&w1[i], &w2[i], &frob); + + // repeat for denom + fp2_frob(&frob, &w2[i]); + fp2_mul(&w2[i], &tmp, &frob); + } + + // batched normalization + fp2_batched_inv(w2, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + for (int i = 0; i < 5; i++) { + clear_cofac(&w1[i], &w1[i]); + + // removes 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(&w1[i], &w1[i]); + } + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + // assume PQ is a full torsion basis + // returns a, b, c, d such that R = [a]P + [b]Q, S = [c]P + [d]Q + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - e; +#endif + assert(test_basis_order_twof(PQ, curve, e_full)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + tate_dlog_partial(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/biextension.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/biextension.h new file mode 100644 index 0000000000..1a50fcc738 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/biextension.h @@ -0,0 +1,82 @@ +#ifndef _BIEXT_H_ +#define _BIEXT_H_ + +#include +#include + +typedef struct pairing_params +{ + uint32_t e; // Points have order 2^e + ec_point_t P; // x(P) + ec_point_t Q; // x(Q) + ec_point_t PQ; // x(P-Q) = (PQX/PQZ : 1) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_params_t; + +// For two bases and store: +// x(P - R), x(P - S), x(R - Q), x(S - Q) +typedef struct pairing_dlog_diff_points +{ + ec_point_t PmR; // x(P - R) + ec_point_t PmS; // x(P - S) + ec_point_t RmQ; // x(R - Q) + ec_point_t SmQ; // x(S - Q) +} pairing_dlog_diff_points_t; + +typedef struct pairing_dlog_params +{ + uint32_t e; // Points have order 2^e + ec_basis_t PQ; // x(P), x(Q), x(P-Q) + ec_basis_t RS; // x(R), x(S), x(R-S) + pairing_dlog_diff_points_t diff; // x(P - R), x(P - S), x(R - Q), x(S - Q) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + fp2_t ixR; // RZ/RX + fp2_t ixS; // SZ/SX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_dlog_params_t; + +// Computes e = e_{2^e}(P, Q) using biextension ladder +void weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Computes (reduced) z = t_{2^e}(P, Q) using biextension ladder +void reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Given two bases and computes scalars +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +// Given two bases and +// where is a basis for E[2^f] +// the full 2-torsion, and a basis +// for smaller torsion E[2^e] +// computes scalars r1, r2, s1, s2 +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +void ec_dlog_2_tate_to_full(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + ec_basis_t *RS, + ec_curve_t *curve, + int e); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c new file mode 100644 index 0000000000..d393e9cb11 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include + +void +public_key_init(public_key_t *pk) +{ + ec_curve_init(&pk->curve); +} + +void +public_key_finalize(public_key_t *pk) +{ +} + +// compute the challenge as the hash of the message and the commitment curve and public key +void +hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length) +{ + unsigned char buf[2 * FP2_ENCODED_BYTES]; + { + fp2_t j1, j2; + ec_j_inv(&j1, &pk->curve); + ec_j_inv(&j2, com_curve); + fp2_encode(buf, &j1); + fp2_encode(buf + FP2_ENCODED_BYTES, &j2); + } + + { + // The type scalar_t represents an element of GF(p), which is about + // 2*lambda bits, where lambda = 128, 192 or 256, according to the + // security level. Thus, the variable scalar should have enough memory + // for the values produced by SHAKE256 in the intermediate iterations. + + shake256incctx ctx; + + size_t hash_bytes = ((2 * SECURITY_BITS) + 7) / 8; + size_t limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + size_t bits = (2 * SECURITY_BITS) % RADIX; + digit_t mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, buf, 2 * FP2_ENCODED_BYTES); + shake256_inc_absorb(&ctx, message, length); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + for (int i = 2; i < HASH_ITERATIONS; i++) { + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + } + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + + hash_bytes = ((TORSION_EVEN_POWER - SQIsign_response_length) + 7) / 8; + limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + bits = (TORSION_EVEN_POWER - SQIsign_response_length) % RADIX; + mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + +#ifdef TARGET_BIG_ENDIAN + for (int i = 0; i < NWORDS_ORDER; i++) + (*scalar)[i] = BSWAP_DIGIT((*scalar)[i]); +#endif + + mp_mod_2exp(*scalar, SECURITY_BITS, NWORDS_ORDER); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c new file mode 100644 index 0000000000..983ba49adf --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c @@ -0,0 +1,201 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/*************************************************************************** + * Small modification by Nir Drucker and Shay Gueron + * AWS Cryptographic Algorithms Group + * (ndrucker@amazon.com, gueron@amazon.com) + * include: + * 1) Use memcpy/memset instead of OPENSSL_memcpy/memset + * 2) Include aes.h as the underlying aes code + * 3) Modifying the drbg structure + * ***************************************************************************/ + +#include "ctr_drbg.h" +#include + + +// Section references in this file refer to SP 800-90Ar1: +// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf + +int CTR_DRBG_init(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *personalization, size_t personalization_len) { + // Section 10.2.1.3.1 + if (personalization_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + uint8_t seed_material[CTR_DRBG_ENTROPY_LEN]; + memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN); + + for (size_t i = 0; i < personalization_len; i++) { + seed_material[i] ^= personalization[i]; + } + + // Section 10.2.1.2 + // kInitMask is the result of encrypting blocks with big-endian value 1, 2 + // and 3 with the all-zero AES-256 key. + static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { + 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, + 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, + 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca, + 0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e, + }; + + for (size_t i = 0; i < sizeof(kInitMask); i++) { + seed_material[i] ^= kInitMask[i]; + } + + aes256_key_t key; + memcpy(key.raw, seed_material, 32); + memcpy(drbg->counter.bytes, seed_material + 32, 16); + + aes256_key_expansion(&drbg->ks, &key); + drbg->reseed_counter = 1; + + return 1; +} + +// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a +// big-endian number. +static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { + drbg->counter.words[3] = + CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n); +} + +static int ctr_drbg_update(CTR_DRBG_STATE *drbg, const uint8_t *data, + size_t data_len) { + // Per section 10.2.1.2, |data_len| must be |CTR_DRBG_ENTROPY_LEN|. Here, we + // allow shorter inputs and right-pad them with zeros. This is equivalent to + // the specified algorithm but saves a copy in |CTR_DRBG_generate|. + if (data_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + uint8_t temp[CTR_DRBG_ENTROPY_LEN]; + for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) { + ctr32_add(drbg, 1); + aes256_enc(temp + i, drbg->counter.bytes, &drbg->ks); + } + + for (size_t i = 0; i < data_len; i++) { + temp[i] ^= data[i]; + } + + aes256_key_t key; + memcpy(key.raw, temp, 32); + memcpy(drbg->counter.bytes, temp + 32, 16); + aes256_key_expansion(&drbg->ks, &key); + + return 1; +} + +int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *additional_data, + size_t additional_data_len) { + // Section 10.2.1.4 + uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; + + if (additional_data_len > 0) { + if (additional_data_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN); + for (size_t i = 0; i < additional_data_len; i++) { + entropy_copy[i] ^= additional_data[i]; + } + + entropy = entropy_copy; + } + + if (!ctr_drbg_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) { + return 0; + } + + drbg->reseed_counter = 1; + + return 1; +} + +int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, + const uint8_t *additional_data, + size_t additional_data_len) { + if (additional_data_len != 0 && + !ctr_drbg_update(drbg, additional_data, additional_data_len)) { + return 0; + } + + // kChunkSize is used to interact better with the cache. Since the AES-CTR + // code assumes that it's encrypting rather than just writing keystream, the + // buffer has to be zeroed first. Without chunking, large reads would zero + // the whole buffer, flushing the L1 cache, and then do another pass (missing + // the cache every time) to “encrypt” it. The code can avoid this by + // chunking. + static const size_t kChunkSize = 8 * 1024; + + while (out_len >= AES_BLOCK_SIZE) { + size_t todo = kChunkSize; + if (todo > out_len) { + todo = out_len; + } + + todo &= ~(AES_BLOCK_SIZE - 1); + + const size_t num_blocks = todo / AES_BLOCK_SIZE; + if (1) { + memset(out, 0, todo); + ctr32_add(drbg, 1); +#ifdef VAES512 + aes256_ctr_enc512(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#elif defined(VAES256) + aes256_ctr_enc256(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#else + aes256_ctr_enc(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#endif + ctr32_add(drbg, num_blocks - 1); + } else { + for (size_t i = 0; i < todo; i += AES_BLOCK_SIZE) { + ctr32_add(drbg, 1); + aes256_enc(&out[i], drbg->counter.bytes, &drbg->ks); + } + } + + out += todo; + out_len -= todo; + } + + if (out_len > 0) { + uint8_t block[AES_BLOCK_SIZE]; + ctr32_add(drbg, 1); + aes256_enc(block, drbg->counter.bytes, &drbg->ks); + + memcpy(out, block, out_len); + } + + // Right-padding |additional_data| in step 2.2 is handled implicitly by + // |ctr_drbg_update|, to save a copy. + if (!ctr_drbg_update(drbg, additional_data, additional_data_len)) { + return 0; + } + + drbg->reseed_counter++; + return 1; +} + +void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) { + secure_clean((uint8_t *)drbg, sizeof(CTR_DRBG_STATE)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.h new file mode 100644 index 0000000000..2d1b1f3f0c --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/*************************************************************************** +* Small modification by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* include: +* 1) Use memcpy/memset instead of OPENSSL_memcpy/memset +* 2) Include aes.h as the underlying aes code +* 3) Modifying the drbg structure +* ***************************************************************************/ + +#pragma once + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "aes_ni.h" + +// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP +// 800-90Ar1. +typedef struct { + aes256_ks_t ks; + union { + uint8_t bytes[16]; + uint32_t words[4]; + } counter; + uint64_t reseed_counter; +} CTR_DRBG_STATE; + +// See SP 800-90Ar1, table 3. +#define CTR_DRBG_ENTROPY_LEN 48 + +// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of +// entropy in |entropy| and, optionally, a personalization string up to +// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero +// on error. +int CTR_DRBG_init(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *personalization, + size_t personalization_len); + +// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy +// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of +// additional data. It returns one on success or zero on error. +int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *additional_data, + size_t additional_data_len); + +// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional +// data (if any) and then writes |out_len| random bytes to |out|. It returns one on success or +// zero on error. +int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, + size_t out_len, + const uint8_t *additional_data, + size_t additional_data_len); + +// CTR_DRBG_clear zeroises the state of |drbg|. +void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); + + +#if defined(__cplusplus) +} // extern C +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/defs.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/defs.h new file mode 100644 index 0000000000..09bb8b5eba --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/defs.h @@ -0,0 +1,63 @@ +/*************************************************************************** +* Written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#pragma once + +#include + +#ifdef __cplusplus + #define EXTERNC extern "C" +#else + #define EXTERNC +#endif + +// For code clarity. +#define IN +#define OUT + +#define ALIGN(n) __attribute__((aligned(n))) +#define _INLINE_ static inline + +typedef enum +{ + SUCCESS=0, + ERROR=1 +} status_t; + +#define SUCCESS 0 +#define ERROR 1 +#define GUARD(func) {if(SUCCESS != func) {return ERROR;}} + +#if defined(__GNUC__) && __GNUC__ >= 2 +static inline uint32_t CRYPTO_bswap4(uint32_t x) { + return __builtin_bswap32(x); +} +#endif + +_INLINE_ void secure_clean(OUT uint8_t *p, IN const uint32_t len) +{ +#ifdef _WIN32 + SecureZeroMemory(p, len); +#else + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(p, 0, len); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c new file mode 100644 index 0000000000..171473d481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c @@ -0,0 +1,1172 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +_fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + + // var declaration + int ret; + ibz_t two_pow, tmp; + quat_alg_elem_t theta; + + ec_curve_t E0; + copy_curve(&E0, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].curve); + ec_curve_normalize_A24(&E0); + + unsigned length; + + int u_bitsize = ibz_bitsize(u); + + // deciding the power of 2 of the dim2 isogeny we use for this + // the smaller the faster, but if it set too low there is a risk that + // RepresentInteger will fail + if (!small) { + // in that case, we just set it to be the biggest value possible + length = TORSION_EVEN_POWER - HD_extra_torsion; + } else { + length = ibz_bitsize(&QUATALG_PINFTY.p) + QUAT_repres_bound_input - u_bitsize; + assert(u_bitsize < (int)length); + assert(length < TORSION_EVEN_POWER - HD_extra_torsion); + } + assert(length); + + // var init + ibz_init(&two_pow); + ibz_init(&tmp); + quat_alg_elem_init(&theta); + + ibz_pow(&two_pow, &ibz_const_two, length); + ibz_copy(&tmp, u); + assert(ibz_cmp(&two_pow, &tmp) > 0); + assert(!ibz_is_even(&tmp)); + + // computing the endomorphism theta of norm u * (2^(length) - u) + ibz_sub(&tmp, &two_pow, &tmp); + ibz_mul(&tmp, &tmp, u); + assert(!ibz_is_even(&tmp)); + + // setting-up the quat_represent_integer_params + quat_represent_integer_params_t ri_params; + ri_params.primality_test_iterations = QUAT_represent_integer_params.primality_test_iterations; + + quat_p_extremal_maximal_order_t order_hnf; + quat_alg_elem_init(&order_hnf.z); + quat_alg_elem_copy(&order_hnf.z, &EXTREMAL_ORDERS[index_alternate_order].z); + quat_alg_elem_init(&order_hnf.t); + quat_alg_elem_copy(&order_hnf.t, &EXTREMAL_ORDERS[index_alternate_order].t); + quat_lattice_init(&order_hnf.order); + ibz_copy(&order_hnf.order.denom, &EXTREMAL_ORDERS[index_alternate_order].order.denom); + ibz_mat_4x4_copy(&order_hnf.order.basis, &EXTREMAL_ORDERS[index_alternate_order].order.basis); + order_hnf.q = EXTREMAL_ORDERS[index_alternate_order].q; + ri_params.order = &order_hnf; + ri_params.algebra = &QUATALG_PINFTY; + +#ifndef NDEBUG + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->z)); + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->t)); +#endif + + ret = quat_represent_integer(&theta, &tmp, 1, &ri_params); + + assert(!ibz_is_even(&tmp)); + + if (!ret) { + printf("represent integer failed for the alternate order number %d and for " + "a target of " + "size %d for a u of size %d with length = " + "%u \n", + index_alternate_order, + ibz_bitsize(&tmp), + ibz_bitsize(u), + length); + goto cleanup; + } + quat_lideal_create(lideal, &theta, u, &order_hnf.order, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&order_hnf.z); + quat_alg_elem_finalize(&order_hnf.t); + quat_lattice_finalize(&order_hnf.order); + +#ifndef NDEBUG + ibz_t test_norm, test_denom; + ibz_init(&test_denom); + ibz_init(&test_norm); + quat_alg_norm(&test_norm, &test_denom, &theta, &QUATALG_PINFTY); + assert(ibz_is_one(&test_denom)); + assert(ibz_cmp(&test_norm, &tmp) == 0); + assert(!ibz_is_even(&tmp)); + assert(quat_lattice_contains(NULL, &EXTREMAL_ORDERS[index_alternate_order].order, &theta)); + ibz_finalize(&test_norm); + ibz_finalize(&test_denom); +#endif + + ec_basis_t B0_two; + // copying the basis + copy_basis(&B0_two, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].basis_even); + assert(test_basis_order_twof(&B0_two, &E0, TORSION_EVEN_POWER)); + ec_dbl_iter_basis(&B0_two, TORSION_EVEN_POWER - length - HD_extra_torsion, &B0_two, &E0); + + assert(test_basis_order_twof(&B0_two, &E0, length + HD_extra_torsion)); + + // now we set-up the kernel + theta_couple_point_t T1; + theta_couple_point_t T2, T1m2; + + copy_point(&T1.P1, &B0_two.P); + copy_point(&T2.P1, &B0_two.Q); + copy_point(&T1m2.P1, &B0_two.PmQ); + + // multiplication of theta by (u)^-1 mod 2^(length+2) + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_copy(&tmp, u); + ibz_invmod(&tmp, &tmp, &two_pow); + assert(!ibz_is_even(&tmp)); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta to the basis + ec_basis_t B0_two_theta; + copy_basis(&B0_two_theta, &B0_two); + endomorphism_application_even_basis(&B0_two_theta, index_alternate_order, &E0, &theta, length + HD_extra_torsion); + + // Ensure the basis we're using has the expected order + assert(test_basis_order_twof(&B0_two_theta, &E0, length + HD_extra_torsion)); + + // Set-up the domain E0 x E0 + theta_couple_curve_t E00; + E00.E1 = E0; + E00.E2 = E0; + + // Set-up the kernel from the bases + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &B0_two, &B0_two_theta); + + ret = theta_chain_compute_and_eval(length, &E00, &dim_two_ker, true, E34, P12, numP); + if (!ret) + goto cleanup; + + assert(length); + ret = (int)length; + +cleanup: + // var finalize + ibz_finalize(&two_pow); + ibz_finalize(&tmp); + quat_alg_elem_finalize(&theta); + + return ret; +} + +int +fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + return _fixed_degree_isogeny_impl(lideal, u, small, E34, P12, numP, index_alternate_order); +} + +// takes the output of LLL and apply some small treatment on the basis +// reordering vectors and switching some signs if needed to make it in a nicer +// shape +static void +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +{ + // if the left order is the special one, then we apply some additional post + // treatment + if (is_special_order) { + // reordering the basis if needed + if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + } + ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); + ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); + ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); + ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + // in this case it seems that we need to swap the second and third + // element, and then recompute entirely the second element from the first + // first we swap the second and third element + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } + + // adjusting the sign if needed + if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); + ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); + ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + } + } + if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); + ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); + ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + } + // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + } + } +} + +// enumerate all vectors in an hypercube of norm m for the infinity norm +// with respect to a basis whose gram matrix is given by gram +// Returns an int `count`, the number of vectors found with the desired +// properties +static int +enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t *gram, const ibz_t *adjusted_norm) +{ + + ibz_t remain, norm; + ibz_vec_4_t point; + + ibz_init(&remain); + ibz_init(&norm); + ibz_vec_4_init(&point); + + assert(m > 0); + + int count = 0; + int dim = 2 * m + 1; + int dim2 = dim * dim; + int dim3 = dim2 * dim; + + // if the basis is of the form alpha, i*alpha, beta, i*beta + // we can remove some values due to symmetry of the basis that + bool need_remove_symmetry = + (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + + int check1, check2, check3; + + // Enumerate over points in a hypercube with coordinates (x, y, z, w) + for (int x = -m; x <= 0; x++) { // We only check non-positive x-values + for (int y = -m; y < m + 1; y++) { + // Once x = 0 we only consider non-positive y values + if (x == 0 && y > 0) { + break; + } + for (int z = -m; z < m + 1; z++) { + // If x and y are both zero, we only consider non-positive z values + if (x == 0 && y == 0 && z > 0) { + break; + } + for (int w = -m; w < m + 1; w++) { + // If x, y, z are all zero, we only consider negative w values + if (x == 0 && y == 0 && z == 0 && w >= 0) { + break; + } + + // Now for each candidate (x, y, z, w) we need to check a number of + // conditions We have already filtered for symmetry with several break + // statements, but there are more checks. + + // 1. We do not allow all (x, y, z, w) to be multiples of 2 + // 2. We do not allow all (x, y, z, w) to be multiples of 3 + // 3. We do not want elements of the same norm, so we quotient out the + // action + // of a group of order four generated by i for a basis expected to + // be of the form: [gamma, i gamma, beta, i beta ]. + + // Ensure that not all values are even + if (!((x | y | z | w) & 1)) { + continue; + } + // Ensure that not all values are multiples of three + if (x % 3 == 0 && y % 3 == 0 && z % 3 == 0 && w % 3 == 0) { + continue; + } + + check1 = (m + w) + dim * (m + z) + dim2 * (m + y) + dim3 * (m + x); + check2 = (m - z) + dim * (m + w) + dim2 * (m - x) + dim3 * (m + y); + check3 = (m + z) + dim * (m - w) + dim2 * (m + x) + dim3 * (m - y); + + // either the basis does not have symmetry and we are good, + // or there is a special symmetry that we can exploit + // and we ensure that we don't record the same norm in the list + if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { + // Set the point as a vector (x, y, z, w) + ibz_set(&point[0], x); + ibz_set(&point[1], y); + ibz_set(&point[2], z); + ibz_set(&point[3], w); + + // Evaluate this through the gram matrix and divide out by the + // adjusted_norm + quat_qf_eval(&norm, gram, &point); + ibz_div(&norm, &remain, &norm, adjusted_norm); + assert(ibz_is_zero(&remain)); + + if (ibz_mod_ui(&norm, 2) == 1) { + ibz_set(&vecs[count][0], x); + ibz_set(&vecs[count][1], y); + ibz_set(&vecs[count][2], z); + ibz_set(&vecs[count][3], w); + ibz_copy(&norms[count], &norm); + count++; + } + } + } + } + } + } + + ibz_finalize(&remain); + ibz_finalize(&norm); + ibz_vec_4_finalize(&point); + + return count - 1; +} + +// enumerate through the two list given in input to find to integer d1,d2 such +// that there exists u,v with u d1 + v d2 = target the bool is diagonal +// indicates if the two lists are the same +static int +find_uv_from_lists(ibz_t *au, + ibz_t *bu, + ibz_t *av, + ibz_t *bv, + ibz_t *u, + ibz_t *v, + int *index_sol1, + int *index_sol2, + const ibz_t *target, + const ibz_t *small_norms1, + const ibz_t *small_norms2, + const ibz_t *quotients, + const int index1, + const int index2, + const int is_diagonal, + const int number_sum_square) +{ + + ibz_t n, remain, adjusted_norm; + ibz_init(&n); + ibz_init(&remain); + ibz_init(&adjusted_norm); + + int found = 0; + int cmp; + ibz_copy(&n, target); + + // enumerating through the list + for (int i1 = 0; i1 < index1; i1++) { + ibz_mod(&adjusted_norm, &n, &small_norms1[i1]); + int starting_index2; + if (is_diagonal) { + starting_index2 = i1; + } else { + starting_index2 = 0; + } + for (int i2 = starting_index2; i2 < index2; i2++) { + // u = target / d1 mod d2 + if (!ibz_invmod(&remain, &small_norms2[i2], &small_norms1[i1])) { + continue; + } + ibz_mul(v, &remain, &adjusted_norm); + ibz_mod(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + while (!found && cmp < 0) { + if (number_sum_square > 0) { + found = ibz_cornacchia_prime(av, bv, &ibz_const_one, v); + } else if (number_sum_square == 0) { + found = 1; + } + if (found) { + ibz_mul(&remain, v, &small_norms2[i2]); + ibz_copy(au, &n); + ibz_sub(u, au, &remain); + assert(ibz_cmp(u, &ibz_const_zero) > 0); + ibz_div(u, &remain, u, &small_norms1[i1]); + assert(ibz_is_zero(&remain)); + // we want to remove weird cases where u,v have big power of two + found = found && (ibz_get(u) != 0 && ibz_get(v) != 0); + if (number_sum_square == 2) { + found = ibz_cornacchia_prime(au, bu, &ibz_const_one, u); + } + } + if (!found) { + ibz_add(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + } + } + + if (found) { + // copying the indices + *index_sol1 = i1; + *index_sol2 = i2; + break; + } + } + if (found) { + break; + } + } + + ibz_finalize(&n); + ibz_finalize(&remain); + ibz_finalize(&adjusted_norm); + + return found; +} + +struct vec_and_norm +{ + ibz_vec_4_t vec; + ibz_t norm; + int idx; +}; + +static int +compare_vec_by_norm(const void *_first, const void *_second) +{ + const struct vec_and_norm *first = _first, *second = _second; + int res = ibz_cmp(&first->norm, &second->norm); + if (res != 0) + return res; + else + return first->idx - second->idx; +} + +// use several special curves +// we assume that the first one is always j=1728 +int +find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order) + +{ + + // variable declaration & init + ibz_vec_4_t vec; + ibz_t n; + ibz_t au, bu, av, bv; + ibz_t norm_d; + ibz_t remain; + ibz_init(&au); + ibz_init(&bu); + ibz_init(&av); + ibz_init(&bv); + ibz_init(&norm_d); + ibz_init(&n); + ibz_vec_4_init(&vec); + ibz_init(&remain); + + ibz_copy(&n, target); + + ibz_t adjusted_norm[num_alternate_order + 1]; + ibz_mat_4x4_t gram[num_alternate_order + 1], reduced[num_alternate_order + 1]; + quat_left_ideal_t ideal[num_alternate_order + 1]; + + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_init(&adjusted_norm[i]); + ibz_mat_4x4_init(&gram[i]); + ibz_mat_4x4_init(&reduced[i]); + quat_left_ideal_init(&ideal[i]); + } + + // first we reduce the ideal given in input + quat_lideal_copy(&ideal[0], lideal); + quat_lideal_reduce_basis(&reduced[0], &gram[0], &ideal[0], Bpoo); + + ibz_mat_4x4_copy(&ideal[0].lattice.basis, &reduced[0]); + ibz_set(&adjusted_norm[0], 1); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + + // for efficient lattice reduction, we replace ideal[0] by the equivalent + // ideal of smallest norm + quat_left_ideal_t reduced_id; + quat_left_ideal_init(&reduced_id); + quat_lideal_copy(&reduced_id, &ideal[0]); + quat_alg_elem_t delta; + // delta will be the element of smallest norm + quat_alg_elem_init(&delta); + ibz_set(&delta.coord[0], 1); + ibz_set(&delta.coord[1], 0); + ibz_set(&delta.coord[2], 0); + ibz_set(&delta.coord[3], 0); + ibz_copy(&delta.denom, &reduced_id.lattice.denom); + ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); + assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); + + // reduced_id = ideal[0] * \overline{delta}/n(ideal[0]) + quat_alg_conj(&delta, &delta); + ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); + quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); + ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + + // and conj_ideal is the conjugate of reduced_id + // init the right order; + quat_lattice_t right_order; + quat_lattice_init(&right_order); + // computing the conjugate + quat_left_ideal_t conj_ideal; + quat_left_ideal_init(&conj_ideal); + quat_lideal_conjugate_without_hnf(&conj_ideal, &right_order, &reduced_id, Bpoo); + + // computing all the other connecting ideals and reducing them + for (int i = 1; i < num_alternate_order + 1; i++) { + quat_lideal_lideal_mul_reduced(&ideal[i], &gram[i], &conj_ideal, &ALTERNATE_CONNECTING_IDEALS[i - 1], Bpoo); + ibz_mat_4x4_copy(&reduced[i], &ideal[i].lattice.basis); + ibz_set(&adjusted_norm[i], 1); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + } + + // enumerating small vectors + + // global parameters for the enumeration + int m = FINDUV_box_size; + int m4 = FINDUV_cube_size; + + ibz_vec_4_t small_vecs[num_alternate_order + 1][m4]; + ibz_t small_norms[num_alternate_order + 1][m4]; + ibz_vec_4_t alternate_small_vecs[num_alternate_order + 1][m4]; + ibz_t alternate_small_norms[num_alternate_order + 1][m4]; + ibz_t quotients[num_alternate_order + 1][m4]; + int indices[num_alternate_order + 1]; + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_init(&small_norms[j][i]); + ibz_vec_4_init(&small_vecs[j][i]); + ibz_init(&alternate_small_norms[j][i]); + ibz_init("ients[j][i]); + ibz_vec_4_init(&alternate_small_vecs[j][i]); + } + // enumeration in the hypercube of norm m + indices[j] = enumerate_hypercube(small_vecs[j], small_norms[j], m, &gram[j], &adjusted_norm[j]); + + // sorting the list + { + struct vec_and_norm small_vecs_and_norms[indices[j]]; + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs_and_norms[i].vec, &small_vecs[j][i], sizeof(ibz_vec_4_t)); + memcpy(&small_vecs_and_norms[i].norm, &small_norms[j][i], sizeof(ibz_t)); + small_vecs_and_norms[i].idx = i; + } + qsort(small_vecs_and_norms, indices[j], sizeof(*small_vecs_and_norms), compare_vec_by_norm); + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs[j][i], &small_vecs_and_norms[i].vec, sizeof(ibz_vec_4_t)); + memcpy(&small_norms[j][i], &small_vecs_and_norms[i].norm, sizeof(ibz_t)); + } +#ifndef NDEBUG + for (int i = 1; i < indices[j]; ++i) + assert(ibz_cmp(&small_norms[j][i - 1], &small_norms[j][i]) <= 0); +#endif + } + + for (int i = 0; i < indices[j]; i++) { + ibz_div("ients[j][i], &remain, &n, &small_norms[j][i]); + } + } + + int found = 0; + int i1; + int i2; + for (int j1 = 0; j1 < num_alternate_order + 1; j1++) { + for (int j2 = j1; j2 < num_alternate_order + 1; j2++) { + // in this case, there are some small adjustements to make + int is_diago = (j1 == j2); + found = find_uv_from_lists(&au, + &bu, + &av, + &bv, + u, + v, + &i1, + &i2, + target, + small_norms[j1], + small_norms[j2], + quotients[j2], + indices[j1], + indices[j2], + is_diago, + 0); + // } + + if (found) { + // recording the solutions that we found + ibz_copy(&beta1->denom, &ideal[j1].lattice.denom); + ibz_copy(&beta2->denom, &ideal[j2].lattice.denom); + ibz_copy(d1, &small_norms[j1][i1]); + ibz_copy(d2, &small_norms[j2][i2]); + ibz_mat_4x4_eval(&beta1->coord, &reduced[j1], &small_vecs[j1][i1]); + ibz_mat_4x4_eval(&beta2->coord, &reduced[j2], &small_vecs[j2][i2]); + assert(quat_lattice_contains(NULL, &ideal[j1].lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal[j2].lattice, beta2)); + if (j1 != 0 || j2 != 0) { + ibz_div(&delta.denom, &remain, &delta.denom, &lideal->norm); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + ibz_mul(&delta.denom, &delta.denom, &conj_ideal.norm); + } + if (j1 != 0) { + // we send back beta1 to the original ideal + quat_alg_mul(beta1, &delta, beta1, Bpoo); + quat_alg_normalize(beta1); + } + if (j2 != 0) { + // we send back beta2 to the original ideal + quat_alg_mul(beta2, &delta, beta2, Bpoo); + quat_alg_normalize(beta2); + } + + // if the selected element belong to an alternate order, we conjugate it + if (j1 != 0) { + quat_alg_conj(beta1, beta1); + } + if (j2 != 0) { + quat_alg_conj(beta2, beta2); + } + +#ifndef NDEBUG + quat_alg_norm(&remain, &norm_d, beta1, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d1, &ideal->norm); + if (j1 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j1 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + quat_alg_norm(&remain, &norm_d, beta2, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d2, &ideal->norm); + if (j2 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j2 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta2)); + + quat_left_ideal_t ideal_test; + quat_lattice_t ro; + quat_left_ideal_init(&ideal_test); + quat_lattice_init(&ro); + if (j1 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j1 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta1)); + } + if (j2 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j2 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta2)); + } + + quat_lattice_finalize(&ro); + quat_left_ideal_finalize(&ideal_test); +#endif + + *index_alternate_order_1 = j1; + *index_alternate_order_2 = j2; + break; + } + } + if (found) { + break; + } + } + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_finalize(&small_norms[j][i]); + ibz_vec_4_finalize(&small_vecs[j][i]); + ibz_finalize(&alternate_small_norms[j][i]); + ibz_finalize("ients[j][i]); + ibz_vec_4_finalize(&alternate_small_vecs[j][i]); + } + } + + // var finalize + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_mat_4x4_finalize(&gram[i]); + ibz_mat_4x4_finalize(&reduced[i]); + quat_left_ideal_finalize(&ideal[i]); + ibz_finalize(&adjusted_norm[i]); + } + + ibz_finalize(&n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&au); + ibz_finalize(&bu); + ibz_finalize(&av); + ibz_finalize(&bv); + ibz_finalize(&remain); + ibz_finalize(&norm_d); + quat_lattice_finalize(&right_order); + quat_left_ideal_finalize(&conj_ideal); + quat_left_ideal_finalize(&reduced_id); + quat_alg_elem_finalize(&delta); + + return found; +} + +int +dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo) +{ + ibz_t target, tmp, two_pow; + ; + quat_alg_elem_t theta; + + ibz_t norm_d; + ibz_init(&norm_d); + ibz_t test1, test2; + ibz_init(&test1); + ibz_init(&test2); + + ibz_init(&target); + ibz_init(&tmp); + ibz_init(&two_pow); + int exp = TORSION_EVEN_POWER; + quat_alg_elem_init(&theta); + + // first, we find u,v,d1,d2,beta1,beta2 + // such that u*d1 + v*d2 = 2^TORSION_EVEN_POWER and there are ideals of + // norm d1,d2 equivalent to ideal beta1 and beta2 are elements of norm nd1, + // nd2 where n=n(lideal) + int ret; + int index_order1 = 0, index_order2 = 0; +#ifndef NDEBUG + unsigned int Fu_length, Fv_length; +#endif + ret = find_uv(u, + v, + beta1, + beta2, + d1, + d2, + &index_order1, + &index_order2, + &TORSION_PLUS_2POWER, + lideal, + Bpoo, + NUM_ALTERNATE_EXTREMAL_ORDERS); + if (!ret) { + goto cleanup; + } + + assert(ibz_is_odd(d1) && ibz_is_odd(d2)); + // compute the valuation of the GCD of u,v + ibz_gcd(&tmp, u, v); + assert(ibz_cmp(&tmp, &ibz_const_zero) != 0); + int exp_gcd = ibz_two_adic(&tmp); + exp = TORSION_EVEN_POWER - exp_gcd; + // removing the power of 2 from u and v + ibz_div(u, &test1, u, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + ibz_div(v, &test1, v, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + +#ifndef NDEBUG + // checking that ud1+vd2 = 2^exp + ibz_t pow_check, tmp_check; + ibz_init(&pow_check); + ibz_init(&tmp_check); + ibz_pow(&pow_check, &ibz_const_two, exp); + ibz_mul(&tmp_check, d1, u); + ibz_sub(&pow_check, &pow_check, &tmp_check); + ibz_mul(&tmp_check, v, d2); + ibz_sub(&pow_check, &pow_check, &tmp_check); + assert(ibz_cmp(&pow_check, &ibz_const_zero) == 0); + ibz_finalize(&tmp_check); + ibz_finalize(&pow_check); +#endif + + // now we compute the dimension 2 isogeny + // F : Eu x Ev -> E x E' + // where we have phi_u : Eu -> E_index_order1 and phi_v : Ev -> E_index_order2 + // if we have phi1 : E_index_order_1 -> E of degree d1 + // and phi2 : E_index_order_2 -> E of degree d2 + // we can define theta = phi2 o hat{phi1} + // and the kernel of F is given by + // ( [ud1](P), phiv o theta o hat{phiu} (P)),( [ud1](Q), phiv o theta o + // hat{phiu} (Q)) where P,Q is a basis of E0[2e] + + // now we set-up the kernel + // ec_curve_t E0 = CURVE_E0; + ec_curve_t E1; + copy_curve(&E1, &CURVES_WITH_ENDOMORPHISMS[index_order1].curve); + ec_curve_t E2; + copy_curve(&E2, &CURVES_WITH_ENDOMORPHISMS[index_order2].curve); + ec_basis_t bas1, bas2; + theta_couple_curve_t E01; + theta_kernel_couple_points_t ker; + + ec_basis_t bas_u; + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + + // we start by computing theta = beta2 \hat{beta1}/n + ibz_set(&theta.denom, 1); + quat_alg_conj(&theta, beta1); + quat_alg_mul(&theta, beta2, &theta, &QUATALG_PINFTY); + ibz_mul(&theta.denom, &theta.denom, &lideal->norm); + + // now we perform the actual computation + quat_left_ideal_t idealu, idealv; + quat_left_ideal_init(&idealu); + quat_left_ideal_init(&idealv); + theta_couple_curve_t Fu_codomain, Fv_codomain; + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const V1 = pushed_points + 0, *const V2 = pushed_points + 1, *const V1m2 = pushed_points + 2; + theta_couple_point_t P, Q, PmQ; + + copy_point(&P.P1, &bas1.P); + copy_point(&PmQ.P1, &bas1.PmQ); + copy_point(&Q.P1, &bas1.Q); + // Set points to zero + ec_point_init(&P.P2); + ec_point_init(&Q.P2); + ec_point_init(&PmQ.P2); + + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + // we perform the computation of phiu with a fixed degree isogeny + ret = fixed_degree_isogeny_and_eval( + &idealu, u, true, &Fu_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order1); + + if (!ret) { + goto cleanup; + } + assert(test_point_order_twof(&V1->P1, &Fu_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fu_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fu_length = (unsigned int)ret; + // presumably the correct curve is the first one, we check this + fp2_t w0a, w1a, w2a; + ec_curve_t E1_tmp, Fu_codomain_E1_tmp, Fu_codomain_E2_tmp; + copy_curve(&E1_tmp, &E1); + copy_curve(&Fu_codomain_E1_tmp, &Fu_codomain.E1); + copy_curve(&Fu_codomain_E2_tmp, &Fu_codomain.E2); + weil(&w0a, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fu_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fu_codomain_E2_tmp); + ibz_pow(&two_pow, &ibz_const_two, Fu_length); + ibz_sub(&two_pow, &two_pow, u); + + // now we are checking that the weil pairings are equal to the correct value + digit_t digit_u[NWORDS_ORDER] = { 0 }; + ibz_to_digit_array(digit_u, u); + fp2_t test_powa; + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); +#endif + + // copying the basis images + copy_point(&bas_u.P, &V1->P1); + copy_point(&bas_u.Q, &V2->P1); + copy_point(&bas_u.PmQ, &V1m2->P1); + + // copying the points to the first part of the kernel + copy_point(&ker.T1.P1, &bas_u.P); + copy_point(&ker.T2.P1, &bas_u.Q); + copy_point(&ker.T1m2.P1, &bas_u.PmQ); + copy_curve(&E01.E1, &Fu_codomain.E1); + + copy_point(&P.P1, &bas2.P); + copy_point(&PmQ.P1, &bas2.PmQ); + copy_point(&Q.P1, &bas2.Q); + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + + // computation of phiv + ret = fixed_degree_isogeny_and_eval( + &idealv, v, true, &Fv_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order2); + if (!ret) { + goto cleanup; + } + + assert(test_point_order_twof(&V1->P1, &Fv_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fv_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fv_length = (unsigned int)ret; + ec_curve_t E2_tmp, Fv_codomain_E1_tmp, Fv_codomain_E2_tmp; + copy_curve(&E2_tmp, &E2); + copy_curve(&Fv_codomain_E1_tmp, &Fv_codomain.E1); + copy_curve(&Fv_codomain_E2_tmp, &Fv_codomain.E2); + // presumably the correct curve is the first one, we check this + weil(&w0a, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fv_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fv_codomain_E2_tmp); + if (Fv_length == 0) { + ibz_set(&tmp, 1); + ibz_set(&two_pow, 1); + } else { + ibz_pow(&two_pow, &ibz_const_two, Fv_length); + ibz_sub(&two_pow, &two_pow, v); + } + + // now we are checking that one of the two is equal to the correct value + ibz_to_digit_array(digit_u, v); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); + +#endif + + copy_point(&bas2.P, &V1->P1); + copy_point(&bas2.Q, &V2->P1); + copy_point(&bas2.PmQ, &V1m2->P1); + + // multiplying theta by 1 / (d1 * n(connecting_ideal2)) + ibz_pow(&two_pow, &ibz_const_two, TORSION_EVEN_POWER); + ibz_copy(&tmp, d1); + if (index_order2 > 0) { + ibz_mul(&tmp, &tmp, &ALTERNATE_CONNECTING_IDEALS[index_order2 - 1].norm); + } + ibz_invmod(&tmp, &tmp, &two_pow); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta + endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); + + assert(test_basis_order_twof(&bas2, &Fv_codomain.E1, TORSION_EVEN_POWER)); + + // copying points to the second part of the kernel + copy_point(&ker.T1.P2, &bas2.P); + copy_point(&ker.T2.P2, &bas2.Q); + copy_point(&ker.T1m2.P2, &bas2.PmQ); + copy_curve(&E01.E2, &Fv_codomain.E1); + + // copying the points to the first part of the kernel + quat_left_ideal_finalize(&idealu); + quat_left_ideal_finalize(&idealv); + + double_couple_point_iter(&ker.T1, TORSION_EVEN_POWER - exp, &ker.T1, &E01); + double_couple_point_iter(&ker.T2, TORSION_EVEN_POWER - exp, &ker.T2, &E01); + double_couple_point_iter(&ker.T1m2, TORSION_EVEN_POWER - exp, &ker.T1m2, &E01); + + assert(test_point_order_twof(&ker.T1.P1, &E01.E1, exp)); + assert(test_point_order_twof(&ker.T1m2.P2, &E01.E2, exp)); + + assert(ibz_is_odd(u)); + + // now we evaluate the basis points through the isogeny + assert(test_basis_order_twof(&bas_u, &E01.E1, TORSION_EVEN_POWER)); + + // evaluating the basis through the isogeny of degree u*d1 + copy_point(&pushed_points[0].P1, &bas_u.P); + copy_point(&pushed_points[2].P1, &bas_u.PmQ); + copy_point(&pushed_points[1].P1, &bas_u.Q); + // Set points to zero + ec_point_init(&pushed_points[0].P2); + ec_point_init(&pushed_points[1].P2); + ec_point_init(&pushed_points[2].P2); + + theta_couple_curve_t theta_codomain; + + ret = theta_chain_compute_and_eval_randomized( + exp, &E01, &ker, false, &theta_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points)); + if (!ret) { + goto cleanup; + } + + theta_couple_point_t T1, T2, T1m2; + T1 = pushed_points[0]; + T2 = pushed_points[1]; + T1m2 = pushed_points[2]; + + assert(test_point_order_twof(&T1.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1.P1, &theta_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1m2.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + + copy_point(&basis->P, &T1.P1); + copy_point(&basis->Q, &T2.P1); + copy_point(&basis->PmQ, &T1m2.P1); + copy_curve(codomain, &theta_codomain.E1); + + // using weil pairing to verify that we selected the correct curve + fp2_t w0, w1; + // ec_curve_t E0 = CURVE_E0; + // ec_basis_t bas0 = BASIS_EVEN; + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, codomain); + + digit_t digit_d[NWORDS_ORDER] = { 0 }; + ibz_mul(&tmp, d1, u); + ibz_mul(&tmp, &tmp, u); + ibz_mod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_to_digit_array(digit_d, &tmp); + fp2_t test_pow; + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + + // then we have selected the wrong one + if (!fp2_is_equal(&w1, &test_pow)) { + copy_point(&basis->P, &T1.P2); + copy_point(&basis->Q, &T2.P2); + copy_point(&basis->PmQ, &T1m2.P2); + copy_curve(codomain, &theta_codomain.E2); + +// verifying that the other one is the good one +#ifndef NDEBUG + ec_curve_t codomain_tmp; + copy_curve(&codomain_tmp, codomain); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1)); +#endif + } + + // now we apply M / (u * d1) where M is the matrix corresponding to the + // endomorphism beta1 = phi o dual(phi1) we multiply beta1 by the inverse of + // (u*d1) mod 2^TORSION_EVEN_POWER + ibz_mul(&tmp, u, d1); + if (index_order1 != 0) { + ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); + } + ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); + ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); + ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); + ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + + endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + ec_curve_t E0 = CURVE_E0; + ec_curve_t codomain_tmp; + ec_basis_t bas0 = CURVES_WITH_ENDOMORPHISMS[0].basis_even; + copy_curve(&codomain_tmp, codomain); + copy_curve(&E1_tmp, &E1); + copy_curve(&E2_tmp, &E2); + weil(&w0a, TORSION_EVEN_POWER, &bas0.P, &bas0.Q, &bas0.PmQ, &E0); + weil(&w1a, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + digit_t tmp_d[2 * NWORDS_ORDER] = { 0 }; + if (index_order1 != 0) { + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order1].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + if (index_order2 != 0) { + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order2].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + ibz_to_digit_array(tmp_d, &lideal->norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1a)); + } +#endif + +cleanup: + ibz_finalize(&norm_d); + ibz_finalize(&test1); + ibz_finalize(&test2); + ibz_finalize(&target); + ibz_finalize(&tmp); + ibz_finalize(&two_pow); + quat_alg_elem_finalize(&theta); + return ret; +} + +int +dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal) +{ + int ret; + + quat_alg_elem_t beta1, beta2; + ibz_t u, v, d1, d2; + + quat_alg_elem_init(&beta1); + quat_alg_elem_init(&beta2); + + ibz_init(&u); + ibz_init(&v); + ibz_init(&d1); + ibz_init(&d2); + + ret = dim2id2iso_ideal_to_isogeny_clapotis( + &beta1, &beta2, &u, &v, &d1, &d2, codomain, basis, lideal, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&beta1); + quat_alg_elem_finalize(&beta2); + + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&d1); + ibz_finalize(&d2); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.c new file mode 100644 index 0000000000..5be2b8e57e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.c @@ -0,0 +1,55 @@ +#include +const fp2_t BASIS_E0_PX = { +#if 0 +#elif RADIX == 16 +{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3} +#elif RADIX == 32 +{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3} +#else +{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7} +#elif RADIX == 32 +{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10} +#else +{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166} +#endif +#endif +}; +const fp2_t BASIS_E0_QX = { +#if 0 +#elif RADIX == 16 +{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9} +#elif RADIX == 32 +{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec} +#else +{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8} +#elif RADIX == 32 +{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40} +#else +{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52} +#endif +#endif +}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.h new file mode 100644 index 0000000000..05cafb8462 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.h @@ -0,0 +1,3 @@ +#include +extern const fp2_t BASIS_E0_PX; +extern const fp2_t BASIS_E0_QX; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.c new file mode 100644 index 0000000000..be4e4e55b1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.c @@ -0,0 +1,665 @@ +#include +#include +#include +#include + +void +ec_point_init(ec_point_t *P) +{ // Initialize point as identity element (1:0) + fp2_set_one(&(P->x)); + fp2_set_zero(&(P->z)); +} + +void +ec_curve_init(ec_curve_t *E) +{ // Initialize the curve struct + // Initialize the constants + fp2_set_zero(&(E->A)); + fp2_set_one(&(E->C)); + + // Initialize the point (A+2 : 4C) + ec_point_init(&(E->A24)); + + // Set the bool to be false by default + E->is_A24_computed_and_normalized = false; +} + +void +select_point(ec_point_t *Q, const ec_point_t *P1, const ec_point_t *P2, const digit_t option) +{ // Select points in constant time + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +cswap_points(ec_point_t *P, ec_point_t *Q, const digit_t option) +{ // Swap points in constant time + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then P <- Q and Q <- P + fp2_cswap(&(P->x), &(Q->x), option); + fp2_cswap(&(P->z), &(Q->z), option); +} + +void +ec_normalize_point(ec_point_t *P) +{ + fp2_inv(&P->z); + fp2_mul(&P->x, &P->x, &P->z); + fp2_set_one(&(P->z)); +} + +void +ec_normalize_curve(ec_curve_t *E) +{ + fp2_inv(&E->C); + fp2_mul(&E->A, &E->A, &E->C); + fp2_set_one(&E->C); +} + +void +ec_curve_normalize_A24(ec_curve_t *E) +{ + if (!E->is_A24_computed_and_normalized) { + AC_to_A24(&E->A24, E); + ec_normalize_point(&E->A24); + E->is_A24_computed_and_normalized = true; + } + assert(fp2_is_one(&E->A24.z)); +} + +void +ec_normalize_curve_and_A24(ec_curve_t *E) +{ // Neither the curve or A24 are guaranteed to be normalized. + // First we normalize (A/C : 1) and conditionally compute + if (!fp2_is_one(&E->C)) { + ec_normalize_curve(E); + } + + if (!E->is_A24_computed_and_normalized) { + // Now compute A24 = ((A + 2) / 4 : 1) + fp2_add_one(&E->A24.x, &E->A); // re(A24.x) = re(A) + 1 + fp2_add_one(&E->A24.x, &E->A24.x); // re(A24.x) = re(A) + 2 + fp_copy(&E->A24.x.im, &E->A.im); // im(A24.x) = im(A) + + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 2 + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 4 + fp2_set_one(&E->A24.z); + + E->is_A24_computed_and_normalized = true; + } +} + +uint32_t +ec_is_zero(const ec_point_t *P) +{ + return fp2_is_zero(&P->z); +} + +uint32_t +ec_has_zero_coordinate(const ec_point_t *P) +{ + return fp2_is_zero(&P->x) | fp2_is_zero(&P->z); +} + +uint32_t +ec_is_equal(const ec_point_t *P, const ec_point_t *Q) +{ // Evaluate if two points in Montgomery coordinates (X:Z) are equal + // Returns 0xFFFFFFFF (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1; + + // Check if P, Q are the points at infinity + uint32_t l_zero = ec_is_zero(P); + uint32_t r_zero = ec_is_zero(Q); + + // Check if PX * QZ = QX * PZ + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + uint32_t lr_equal = fp2_is_equal(&t0, &t1); + + // Points are equal if + // - Both are zero, or + // - neither are zero AND PX * QZ = QX * PZ + return (l_zero & r_zero) | (~l_zero & ~r_zero * lr_equal); +} + +uint32_t +ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + if (ec_is_zero(P)) + return 0; + + uint32_t x_is_zero, tmp_is_zero; + fp2_t t0, t1, t2; + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t0, &t1); + fp2_mul(&t2, &t2, &E->A); + fp2_mul(&t1, &t1, &E->C); + fp2_add(&t1, &t1, &t1); + fp2_add(&t0, &t1, &t2); // 4 (CX^2+CZ^2+AXZ) + + x_is_zero = fp2_is_zero(&P->x); + tmp_is_zero = fp2_is_zero(&t0); + + // two torsion if x or x^2 + Ax + 1 is zero + return x_is_zero | tmp_is_zero; +} + +uint32_t +ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + ec_point_t test; + xDBL_A24(&test, P, &E->A24, E->is_A24_computed_and_normalized); + return ec_is_two_torsion(&test, E); +} + +uint32_t +ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E) +{ // Check if basis points (P, Q) form a full 2^t-basis + ec_point_t P2, Q2; + xDBL_A24(&P2, &B->P, &E->A24, E->is_A24_computed_and_normalized); + xDBL_A24(&Q2, &B->Q, &E->A24, E->is_A24_computed_and_normalized); + return (ec_is_two_torsion(&P2, E) & ec_is_two_torsion(&Q2, E) & ~ec_is_equal(&P2, &Q2)); +} + +int +ec_curve_verify_A(const fp2_t *A) +{ // Verify the Montgomery coefficient A is valid (A^2-4 \ne 0) + // Return 1 if curve is valid, 0 otherwise + fp2_t t; + fp2_set_one(&t); + fp_add(&t.re, &t.re, &t.re); // t=2 + if (fp2_is_equal(A, &t)) + return 0; + fp_neg(&t.re, &t.re); // t=-2 + if (fp2_is_equal(A, &t)) + return 0; + return 1; +} + +int +ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A) +{ // Initialize the curve from the A coefficient and check it is valid + // Return 1 if curve is valid, 0 otherwise + ec_curve_init(E); + fp2_copy(&E->A, A); // Set A + return ec_curve_verify_A(A); +} + +void +ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve) +{ // j-invariant computation for Montgommery coefficient A2=(A+2C:4C) + fp2_t t0, t1; + + fp2_sqr(&t1, &curve->C); + fp2_sqr(j_inv, &curve->A); + fp2_add(&t0, &t1, &t1); + fp2_sub(&t0, j_inv, &t0); + fp2_sub(&t0, &t0, &t1); + fp2_sub(j_inv, &t0, &t1); + fp2_sqr(&t1, &t1); + fp2_mul(j_inv, j_inv, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_sqr(&t1, &t0); + fp2_mul(&t0, &t0, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_inv(j_inv); + fp2_mul(j_inv, &t0, j_inv); +} + +void +xDBL_E0(ec_point_t *Q, const ec_point_t *P) +{ // Doubling of a Montgomery point in projective coordinates (X:Z) on the curve E0 with (A:C) = (0:1). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C) = (0:1). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&Q->z, &t1, &t2); + fp2_mul(&Q->z, &Q->z, &t2); +} + +void +xDBL(ec_point_t *Q, const ec_point_t *P, const ec_point_t *AC) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). Computation of coefficient values A+2C and 4C + // on-the-fly. + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t3, &AC->z, &AC->z); + fp2_mul(&t1, &t1, &t3); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&t0, &t3, &AC->x); + fp2_mul(&t0, &t0, &t2); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and + // the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + if (!A24_normalized) + fp2_mul(&t1, &t1, &A24->z); + fp2_mul(&Q->x, &t0, &t1); + fp2_mul(&t0, &t2, &A24->x); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ) +{ // Differential addition of Montgomery points in projective coordinates (X:Z). + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, and difference + // PQ=P-Q=(XPQ:ZPQ). + // Output: projective Montgomery point R <- P+Q = (XR:ZR) such that x(P+Q)=XR/ZR. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&t2, &t2); + fp2_sqr(&t3, &t3); + fp2_mul(&t2, &PQ->z, &t2); + fp2_mul(&R->z, &PQ->x, &t3); + fp2_copy(&R->x, &t2); +} + +void +xDBLADD(ec_point_t *R, + ec_point_t *S, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_point_t *A24, + const bool A24_normalized) +{ // Simultaneous doubling and differential addition. + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, the difference + // PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points R <- 2*P = (XR:ZR) such that x(2P)=XR/ZR, and S <- P+Q = (XS:ZS) such that = + // x(Q+P)=XS/ZS. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&R->x, &t0); + fp2_sub(&t2, &Q->x, &Q->z); + fp2_add(&S->x, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t2); + fp2_sqr(&R->z, &t1); + fp2_mul(&t1, &t1, &S->x); + fp2_sub(&t2, &R->x, &R->z); + if (!A24_normalized) + fp2_mul(&R->z, &R->z, &A24->z); + fp2_mul(&R->x, &R->x, &R->z); + fp2_mul(&S->x, &A24->x, &t2); + fp2_sub(&S->z, &t0, &t1); + fp2_add(&R->z, &R->z, &S->x); + fp2_add(&S->x, &t0, &t1); + fp2_mul(&R->z, &R->z, &t2); + fp2_sqr(&S->z, &S->z); + fp2_sqr(&S->x, &S->x); + fp2_mul(&S->z, &S->z, &PQ->x); + fp2_mul(&S->x, &S->x, &PQ->z); +} + +void +xMUL(ec_point_t *Q, const ec_point_t *P, const digit_t *k, const int kbits, const ec_curve_t *curve) +{ // The Montgomery ladder + // Input: projective Montgomery point P=(XP:ZP) such that xP=XP/ZP, a scalar k of bitlength kbits, and + // the Montgomery curve constants (A:C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points Q <- k*P = (XQ:ZQ) such that x(k*P)=XQ/ZQ. + ec_point_t R0, R1, A24; + digit_t mask; + unsigned int bit, prevbit = 0, swap; + + if (!curve->is_A24_computed_and_normalized) { + // Computation of A24=(A+2C:4C) + fp2_add(&A24.x, &curve->C, &curve->C); + fp2_add(&A24.z, &A24.x, &A24.x); + fp2_add(&A24.x, &A24.x, &curve->A); + } else { + fp2_copy(&A24.x, &curve->A24.x); + fp2_copy(&A24.z, &curve->A24.z); + // Assert A24 has been normalised + assert(fp2_is_one(&A24.z)); + } + + // R0 <- (1:0), R1 <- P + ec_point_init(&R0); + fp2_copy(&R1.x, &P->x); + fp2_copy(&R1.z, &P->z); + + // Main loop + for (int i = kbits - 1; i >= 0; i--) { + bit = (k[i >> LOG2RADIX] >> (i & (RADIX - 1))) & 1; + swap = bit ^ prevbit; + prevbit = bit; + mask = 0 - (digit_t)swap; + + cswap_points(&R0, &R1, mask); + xDBLADD(&R0, &R1, &R0, &R1, P, &A24, true); + } + swap = 0 ^ prevbit; + mask = 0 - (digit_t)swap; + cswap_points(&R0, &R1, mask); + + fp2_copy(&Q->x, &R0.x); + fp2_copy(&Q->z, &R0.z); +} + +int +xDBLMUL(ec_point_t *S, + const ec_point_t *P, + const digit_t *k, + const ec_point_t *Q, + const digit_t *l, + const ec_point_t *PQ, + const int kbits, + const ec_curve_t *curve) +{ // The Montgomery biladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, scalars k and l of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants (A:C). + // Output: projective Montgomery point S <- k*P + l*Q = (XS:ZS) such that x(k*P + l*Q)=XS/ZS. + + int i, A_is_zero; + digit_t evens, mevens, bitk0, bitl0, maskk, maskl, temp, bs1_ip1, bs2_ip1, bs1_i, bs2_i, h; + digit_t sigma[2] = { 0 }, pre_sigma = 0; + digit_t k_t[NWORDS_ORDER], l_t[NWORDS_ORDER], one[NWORDS_ORDER] = { 0 }, r[2 * BITS] = { 0 }; + ec_point_t DIFF1a, DIFF1b, DIFF2a, DIFF2b, R[3] = { 0 }, T[3]; + + // differential additions formulas are invalid in this case + if (ec_has_zero_coordinate(P) | ec_has_zero_coordinate(Q) | ec_has_zero_coordinate(PQ)) + return 0; + + // Derive sigma according to parity + bitk0 = (k[0] & 1); + bitl0 = (l[0] & 1); + maskk = 0 - bitk0; // Parity masks: 0 if even, otherwise 1...1 + maskl = 0 - bitl0; + sigma[0] = (bitk0 ^ 1); + sigma[1] = (bitl0 ^ 1); + evens = sigma[0] + sigma[1]; // Count number of even scalars + mevens = 0 - (evens & 1); // Mask mevens <- 0 if # even of scalars = 0 or 2, otherwise mevens = 1...1 + + // If k and l are both even or both odd, pick sigma = (0,1) + sigma[0] = (sigma[0] & mevens); + sigma[1] = (sigma[1] & mevens) | (1 & ~mevens); + + // Convert even scalars to odd + one[0] = 1; + mp_sub(k_t, k, one, NWORDS_ORDER); + mp_sub(l_t, l, one, NWORDS_ORDER); + select_ct(k_t, k_t, k, maskk, NWORDS_ORDER); + select_ct(l_t, l_t, l, maskl, NWORDS_ORDER); + + // Scalar recoding + for (i = 0; i < kbits; i++) { + // If sigma[0] = 1 swap k_t and l_t + maskk = 0 - (sigma[0] ^ pre_sigma); + swap_ct(k_t, l_t, maskk, NWORDS_ORDER); + + if (i == kbits - 1) { + bs1_ip1 = 0; + bs2_ip1 = 0; + } else { + bs1_ip1 = mp_shiftr(k_t, 1, NWORDS_ORDER); + bs2_ip1 = mp_shiftr(l_t, 1, NWORDS_ORDER); + } + bs1_i = k_t[0] & 1; + bs2_i = l_t[0] & 1; + + r[2 * i] = bs1_i ^ bs1_ip1; + r[2 * i + 1] = bs2_i ^ bs2_ip1; + + // Revert sigma if second bit, r_(2i+1), is 1 + pre_sigma = sigma[0]; + maskk = 0 - r[2 * i + 1]; + select_ct(&temp, &sigma[0], &sigma[1], maskk, 1); + select_ct(&sigma[1], &sigma[1], &sigma[0], maskk, 1); + sigma[0] = temp; + } + + // Point initialization + ec_point_init(&R[0]); + maskk = 0 - sigma[0]; + select_point(&R[1], P, Q, maskk); + select_point(&R[2], Q, P, maskk); + + fp2_copy(&DIFF1a.x, &R[1].x); + fp2_copy(&DIFF1a.z, &R[1].z); + fp2_copy(&DIFF1b.x, &R[2].x); + fp2_copy(&DIFF1b.z, &R[2].z); + + // Initialize DIFF2a <- P+Q, DIFF2b <- P-Q + xADD(&R[2], &R[1], &R[2], PQ); + if (ec_has_zero_coordinate(&R[2])) + return 0; // non valid formulas + + fp2_copy(&DIFF2a.x, &R[2].x); + fp2_copy(&DIFF2a.z, &R[2].z); + fp2_copy(&DIFF2b.x, &PQ->x); + fp2_copy(&DIFF2b.z, &PQ->z); + + A_is_zero = fp2_is_zero(&curve->A); + + // Main loop + for (i = kbits - 1; i >= 0; i--) { + h = r[2 * i] + r[2 * i + 1]; // in {0, 1, 2} + maskk = 0 - (h & 1); + select_point(&T[0], &R[0], &R[1], maskk); + maskk = 0 - (h >> 1); + select_point(&T[0], &T[0], &R[2], maskk); + if (A_is_zero) { + xDBL_E0(&T[0], &T[0]); + } else { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(&T[0], &T[0], &curve->A24, true); + } + + maskk = 0 - r[2 * i + 1]; // in {0, 1} + select_point(&T[1], &R[0], &R[1], maskk); + select_point(&T[2], &R[1], &R[2], maskk); + + cswap_points(&DIFF1a, &DIFF1b, maskk); + xADD(&T[1], &T[1], &T[2], &DIFF1a); + xADD(&T[2], &R[0], &R[2], &DIFF2a); + + // If hw (mod 2) = 1 then swap DIFF2a and DIFF2b + maskk = 0 - (h & 1); + cswap_points(&DIFF2a, &DIFF2b, maskk); + + // R <- T + copy_point(&R[0], &T[0]); + copy_point(&R[1], &T[1]); + copy_point(&R[2], &T[2]); + } + + // Output R[evens] + select_point(S, &R[0], &R[1], mevens); + + maskk = 0 - (bitk0 & bitl0); + select_point(S, S, &R[2], maskk); + return 1; +} + +int +ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *E) +{ // The 3-point Montgomery ladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, a scalar k of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C/4C:1). + // Output: projective Montgomery point R <- P + m*Q = (XR:ZR) such that x(P + m*Q)=XR/ZR. + assert(E->is_A24_computed_and_normalized); + if (!fp2_is_one(&E->A24.z)) { + return 0; + } + // Formulas are not valid in that case + if (ec_has_zero_coordinate(PQ)) { + return 0; + } + + ec_point_t X0, X1, X2; + copy_point(&X0, Q); + copy_point(&X1, P); + copy_point(&X2, PQ); + + int i, j; + digit_t t; + for (i = 0; i < NWORDS_ORDER; i++) { + t = 1; + for (j = 0; j < RADIX; j++) { + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + xDBLADD(&X0, &X1, &X0, &X1, &X2, &E->A24, true); + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + t <<= 1; + }; + }; + copy_point(R, &X1); + return 1; +} + +// WRAPPERS to export + +void +ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve) +{ + // If A24 = ((A+2)/4 : 1) we save multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + } else { + // Otherwise we compute A24 on the fly for doubling + xDBL(res, P, (const ec_point_t *)curve); + } +} + +void +ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve) +{ + if (n == 0) { + copy_point(res, P); + return; + } + + // When the chain is long enough, we should normalise A24 + if (n > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is normalized we can save some multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + for (int i = 0; i < n - 1; i++) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, res, &curve->A24, true); + } + } else { + // Otherwise we do normal doubling + xDBL(res, P, (const ec_point_t *)curve); + for (int i = 0; i < n - 1; i++) { + xDBL(res, res, (const ec_point_t *)curve); + } + } +} + +void +ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve) +{ + ec_dbl_iter(&res->P, n, &B->P, curve); + ec_dbl_iter(&res->Q, n, &B->Q, curve); + ec_dbl_iter(&res->PmQ, n, &B->PmQ, curve); +} + +void +ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve) +{ + // For large scalars it's worth normalising anyway + if (kbits > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is computed and normalized we save some Fp2 multiplications + xMUL(res, P, scalar, kbits, curve); +} + +int +ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + if (fp2_is_zero(&PQ->PmQ.z)) + return 0; + + /* Differential additions behave badly when PmQ = (0:1), so we need to + * treat this case specifically. Since we assume P, Q are a basis, this + * can happen only if kbits==1 */ + if (kbits == 1) { + // Sanity check: our basis should be given by 2-torsion points + if (!ec_is_two_torsion(&PQ->P, curve) || !ec_is_two_torsion(&PQ->Q, curve) || + !ec_is_two_torsion(&PQ->PmQ, curve)) + return 0; + digit_t bP, bQ; + bP = (scalarP[0] & 1); + bQ = (scalarQ[0] & 1); + if (bP == 0 && bQ == 0) + ec_point_init(res); //(1: 0) + else if (bP == 1 && bQ == 0) + copy_point(res, &PQ->P); + else if (bP == 0 && bQ == 1) + copy_point(res, &PQ->Q); + else if (bP == 1 && bQ == 1) + copy_point(res, &PQ->PmQ); + else // should never happen + assert(0); + return 1; + } else { + ec_curve_t E; + copy_curve(&E, curve); + + if (!fp2_is_zero(&curve->A)) { // If A is not zero normalize + ec_curve_normalize_A24(&E); + } + return xDBLMUL(res, &PQ->P, scalarP, &PQ->Q, scalarQ, &PQ->PmQ, kbits, (const ec_curve_t *)&E); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h new file mode 100644 index 0000000000..ee2be38060 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h @@ -0,0 +1,668 @@ +/** @file + * + * @authors Luca De Feo, Francisco RH + * + * @brief Elliptic curve stuff + */ + +#ifndef EC_H +#define EC_H +#include +#include +#include +#include +#include + +/** @defgroup ec Elliptic curves + * @{ + */ + +/** @defgroup ec_t Data structures + * @{ + */ + +/** @brief Projective point on the Kummer line E/pm 1 in Montgomery coordinates + * + * @typedef ec_point_t + * + * @struct ec_point_t + * + * A projective point in (X:Z) or (X:Y:Z) coordinates (tbd). + */ +typedef struct ec_point_t +{ + fp2_t x; + fp2_t z; +} ec_point_t; + +/** @brief Projective point in Montgomery coordinates + * + * @typedef jac_point_t + * + * @struct jac_point_t + * + * A projective point in (X:Y:Z) coordinates + */ +typedef struct jac_point_t +{ + fp2_t x; + fp2_t y; + fp2_t z; +} jac_point_t; + +/** @brief Addition components + * + * @typedef add_components_t + * + * @struct add_components_t + * + * 3 components u,v,w that define the (X:Z) coordinates of both + * addition and substraction of two distinct points with + * P+Q =(u-v:w) and P-Q = (u+v=w) + */ +typedef struct add_components_t +{ + fp2_t u; + fp2_t v; + fp2_t w; +} add_components_t; + +/** @brief A basis of a torsion subgroup + * + * @typedef ec_basis_t + * + * @struct ec_basis_t + * + * A pair of points (or a triplet, tbd) forming a basis of a torsion subgroup. + */ +typedef struct ec_basis_t +{ + ec_point_t P; + ec_point_t Q; + ec_point_t PmQ; +} ec_basis_t; + +/** @brief An elliptic curve + * + * @typedef ec_curve_t + * + * @struct ec_curve_t + * + * An elliptic curve in projective Montgomery form + */ +typedef struct ec_curve_t +{ + fp2_t A; + fp2_t C; ///< cannot be 0 + ec_point_t A24; // the point (A+2 : 4C) + bool is_A24_computed_and_normalized; // says if A24 has been computed and normalized +} ec_curve_t; + +/** @brief An isogeny of degree a power of 2 + * + * @typedef ec_isog_even_t + * + * @struct ec_isog_even_t + */ +typedef struct ec_isog_even_t +{ + ec_curve_t curve; ///< The domain curve + ec_point_t kernel; ///< A kernel generator + unsigned length; ///< The length as a 2-isogeny walk +} ec_isog_even_t; + +/** @brief Isomorphism of Montgomery curves + * + * @typedef ec_isom_t + * + * @struct ec_isom_t + * + * The isomorphism is given by the map maps (X:Z) ↦ ( (Nx X + Nz Z) : (D Z) ) + */ +typedef struct ec_isom_t +{ + fp2_t Nx; + fp2_t Nz; + fp2_t D; +} ec_isom_t; + +// end ec_t +/** @} + */ + +/** @defgroup ec_curve_t Curves and isomorphisms + * @{ + */ + +// Initalisation for curves and points +void ec_curve_init(ec_curve_t *E); +void ec_point_init(ec_point_t *P); + +/** + * @brief Verify that a Montgomery coefficient is valid + * + * @param A an fp2_t + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_verify_A(const fp2_t *A); + +/** + * @brief Initialize an elliptic curve from a coefficient + * + * @param A an fp2_t + * @param E the elliptic curve to initialize + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A); + +// Copying points, bases and curves +static inline void +copy_point(ec_point_t *P, const ec_point_t *Q) +{ + fp2_copy(&P->x, &Q->x); + fp2_copy(&P->z, &Q->z); +} + +static inline void +copy_basis(ec_basis_t *B1, const ec_basis_t *B0) +{ + copy_point(&B1->P, &B0->P); + copy_point(&B1->Q, &B0->Q); + copy_point(&B1->PmQ, &B0->PmQ); +} + +static inline void +copy_curve(ec_curve_t *E1, const ec_curve_t *E2) +{ + fp2_copy(&(E1->A), &(E2->A)); + fp2_copy(&(E1->C), &(E2->C)); + E1->is_A24_computed_and_normalized = E2->is_A24_computed_and_normalized; + copy_point(&E1->A24, &E2->A24); +} + +// Functions for working with the A24 point and normalisation + +/** + * @brief Reduce (A : C) to (A/C : 1) in place + * + * @param E a curve + */ +void ec_normalize_curve(ec_curve_t *E); + +/** + * @brief Reduce (A + 2 : 4C) to ((A+2)/4C : 1) in place + * + * @param E a curve + */ +void ec_curve_normalize_A24(ec_curve_t *E); + +/** + * @brief Normalise both (A : C) and (A + 2 : 4C) as above, in place + * + * @param E a curve + */ +void ec_normalize_curve_and_A24(ec_curve_t *E); + +/** + * @brief Given a curve E, compute (A+2 : 4C) + * + * @param A24 the value (A+2 : 4C) to return into + * @param E a curve + */ +static inline void +AC_to_A24(ec_point_t *A24, const ec_curve_t *E) +{ + // Maybe we already have this computed + if (E->is_A24_computed_and_normalized) { + copy_point(A24, &E->A24); + return; + } + + // A24 = (A+2C : 4C) + fp2_add(&A24->z, &E->C, &E->C); + fp2_add(&A24->x, &E->A, &A24->z); + fp2_add(&A24->z, &A24->z, &A24->z); +} + +/** + * @brief Given a curve the point (A+2 : 4C) compute the curve coefficients (A : C) + * + * @param E a curve to compute + * @param A24 the value (A+2 : 4C) + */ +static inline void +A24_to_AC(ec_curve_t *E, const ec_point_t *A24) +{ + // (A:C) = ((A+2C)*2-4C : 4C) + fp2_add(&E->A, &A24->x, &A24->x); + fp2_sub(&E->A, &E->A, &A24->z); + fp2_add(&E->A, &E->A, &E->A); + fp2_copy(&E->C, &A24->z); +} + +/** + * @brief j-invariant. + * + * @param j_inv computed j_invariant + * @param curve input curve + */ +void ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve); + +/** + * @brief Isomorphism of elliptic curve + * Takes as input two isomorphic Kummer lines in Montgomery form, and output an isomorphism between + * them + * + * @param isom computed isomorphism + * @param from domain curve + * @param to image curve + * @return 0xFFFFFFFF if there was an error during the computation, zero otherwise + */ +uint32_t ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to); + +/** + * @brief In-place evaluation of an isomorphism + * + * @param P a point + * @param isom an isomorphism + */ +void ec_iso_eval(ec_point_t *P, ec_isom_t *isom); + +/** @} + */ +/** @defgroup ec_point_t Point operations + * @{ + */ + +/** + * @brief Point equality + * + * @param P a point + * @param Q a point + * @return 0xFFFFFFFF if equal, zero otherwise + */ +uint32_t ec_is_equal(const ec_point_t *P, const ec_point_t *Q); + +/** + * @brief Point equality + * + * @param P a point + * @return 0xFFFFFFFF if point at infinity, zero otherwise + */ +uint32_t ec_is_zero(const ec_point_t *P); + +/** + * @brief Two torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Four torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Reduce Z-coordinate of point in place + * + * @param P a point + */ +void ec_normalize_point(ec_point_t *P); + +void xDBL_E0(ec_point_t *Q, const ec_point_t *P); +void xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ); +void xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized); + +/** + * @brief Point doubling + * + * @param res computed double of P + * @param P a point + * @param curve an elliptic curve + */ +void ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve); + +/** + * @brief Point iterated doubling + * + * @param res computed double of P + * @param P a point + * @param n the number of double + * @param curve the curve on which P lays + */ +void ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Iterated doubling for a basis P, Q, PmQ + * + * @param res the computed iterated double of basis B + * @param n the number of doubles + * @param B the basis to double + * @param curve the parent curve of the basis + */ +void ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve); + +/** + * @brief Point multiplication + * + * @param res computed scalar * P + * @param curve the curve + * @param scalar an unsigned multi-precision integer + * @param P a point + * @param kbits numer of bits of the scalar + */ +void ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Combination P+m*Q + * + * @param R computed P + m * Q + * @param curve the curve + * @param m an unsigned multi-precision integer + * @param P a point + * @param Q a point + * @param PQ the difference P-Q + * @return 0 if there was an error, 1 otherwise + */ +int ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Linear combination of points of a basis + * + * @param res computed scalarP * P + scalarQ * Q + * @param scalarP an unsigned multi-precision integer + * @param scalarQ an unsigned multi-precision integer + * @param kbits number of bits of the scalars, or n for points of order 2^n + * @param PQ a torsion basis consisting of points P and Q + * @param curve the curve + * + * @return 0 if there was an error, 1 otherwise + */ +int ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +// end point computations +/** + * @} + */ + +/** @defgroup ec_dlog_t Torsion basis computations + * @{ + */ + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve along with a hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * + * @return A hint + * + * The algorithm is deterministc + */ +uint8_t ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f); + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve and a given hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * @param hint the hint + * + * @return 1 is the basis is valid, 0 otherwise + * + * The algorithm is deterministc + */ +int ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint); +/** // end basis computations + * @} + */ + +/** @defgroup ec_isog_t Isogenies + * @{ + */ + +/** + * @brief Evaluate isogeny of even degree on list of points. + * Returns 0 if successful and -1 if kernel has the wrong order or includes (0:1). + * + * @param image computed image curve + * @param phi isogeny + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points); + +/** + * @brief Multiplicative strategy for a short isogeny chain. Returns 1 if successfull and -1 + * if kernel has the wrong order or includes (0:1) when special=false. + * + * @param curve domain curve, to be overwritten by the codomain curve. + * @param kernel a kernel generator of order 2^len + * @param len the length of t he 2-isogeny chain + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * @param special if true, allow isogenies with (0:1) in the kernel + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special); + +/** + * @brief Recover Y-coordinate from X-coordinate and curve coefficients. + * + * @param y: a y-coordinate + * @param Px: a x-coordinate + * @param curve: the elliptic curve + * + * @return 0xFFFFFFFF if the point was on the curve, 0 otherwise + */ +uint32_t ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve); + +// Jacobian point init and copying +void jac_init(jac_point_t *P); +void copy_jac_point(jac_point_t *P, const jac_point_t *Q); + +/** + * @brief Test if two Jacobian points are equal + * + * @param P: a point + * @param Q: a point + * + * @return 0xFFFFFFFF if they are equal, 0 otherwise + */ +uint32_t jac_is_equal(const jac_point_t *P, const jac_point_t *Q); + +// Convert from Jacobian to x-only (just drop the Y-coordinate) +void jac_to_xz(ec_point_t *P, const jac_point_t *xyP); +// Convert from Jacobian coordinates in Montgomery model to Weierstrass +void jac_to_ws(jac_point_t *P, fp2_t *t, fp2_t *ao3, const jac_point_t *Q, const ec_curve_t *curve); +void jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve); + +// Jacobian arithmetic +void jac_neg(jac_point_t *Q, const jac_point_t *P); +void ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); +void DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC); +void DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t); +void jac_to_xz_add_components(add_components_t *uvw, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + * + * + * Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and + * the point P = (X/Z : 1). For generic implementation see lift_basis() + */ +uint32_t lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + */ +uint32_t lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Check if basis points (P, Q) form a full 4-basis + * + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if they form a basis, 0 otherwise + */ +uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); + +/* + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Test functions for printing and order checking, only used in debug mode + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + */ + +/** + * @brief Check if a point (X : Z) has order exactly 2^t + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) +{ + ec_point_t test; + ec_curve_t curve; + test = *P; + copy_curve(&curve, E); + + if (ec_is_zero(&test)) + return 0; + // Scale point by 2^(t-1) + ec_dbl_iter(&test, t - 1, &test, &curve); + // If it's zero now, it doesnt have order 2^t + if (ec_is_zero(&test)) + return 0; + // Ensure [2^t] P = 0 + ec_dbl(&test, &test, &curve); + return ec_is_zero(&test); +} + +/** + * @brief Check if basis points (P, Q, PmQ) all have order exactly 2^t + * + * @param B: a basis + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) +{ + int check_P = test_point_order_twof(&B->P, E, t); + int check_Q = test_point_order_twof(&B->Q, E, t); + int check_PmQ = test_point_order_twof(&B->PmQ, E, t); + + return check_P & check_Q & check_PmQ; +} + +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} + +// Prints the x-coordinate of the point (X : 1) +static void +ec_point_print(const char *name, ec_point_t P) +{ + fp2_t a; + if (fp2_is_zero(&P.z)) { + printf("%s = INF\n", name); + } else { + fp2_copy(&a, &P.z); + fp2_inv(&a); + fp2_mul(&a, &a, &P.x); + fp2_print(name, &a); + } +} + +// Prints the Montgomery coefficient A +static void +ec_curve_print(const char *name, ec_curve_t E) +{ + fp2_t a; + fp2_copy(&a, &E.C); + fp2_inv(&a); + fp2_mul(&a, &a, &E.A); + fp2_print(name, &a); +} + +#endif +// end isogeny computations +/** + * @} + */ + +// end ec +/** + * @} + */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_jac.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_jac.c new file mode 100644 index 0000000000..20ca68c9b2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_jac.c @@ -0,0 +1,335 @@ +#include +#include + +void +jac_init(jac_point_t *P) +{ // Initialize Montgomery in Jacobian coordinates as identity element (0:1:0) + fp2_set_zero(&P->x); + fp2_set_one(&P->y); + fp2_set_zero(&P->z); +} + +uint32_t +jac_is_equal(const jac_point_t *P, const jac_point_t *Q) +{ // Evaluate if two points in Jacobian coordinates (X:Y:Z) are equal + // Returns 1 (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1, t2, t3; + + fp2_sqr(&t0, &Q->z); + fp2_mul(&t2, &P->x, &t0); // x1*z2^2 + fp2_sqr(&t1, &P->z); + fp2_mul(&t3, &Q->x, &t1); // x2*z1^2 + fp2_sub(&t2, &t2, &t3); + + fp2_mul(&t0, &t0, &Q->z); + fp2_mul(&t0, &P->y, &t0); // y1*z2^3 + fp2_mul(&t1, &t1, &P->z); + fp2_mul(&t1, &Q->y, &t1); // y2*z1^3 + fp2_sub(&t0, &t0, &t1); + + return fp2_is_zero(&t0) & fp2_is_zero(&t2); +} + +void +jac_to_xz(ec_point_t *P, const jac_point_t *xyP) +{ + fp2_copy(&P->x, &xyP->x); + fp2_copy(&P->z, &xyP->z); + fp2_sqr(&P->z, &P->z); + + // If xyP = (0:1:0), we currently have P=(0 : 0) but we want to set P=(1:0) + uint32_t c1, c2; + fp2_t one; + fp2_set_one(&one); + + c1 = fp2_is_zero(&P->x); + c2 = fp2_is_zero(&P->z); + fp2_select(&P->x, &P->x, &one, c1 & c2); +} + +void +jac_to_ws(jac_point_t *Q, fp2_t *t, fp2_t *ao3, const jac_point_t *P, const ec_curve_t *curve) +{ + // Cost of 3M + 2S when A != 0. + fp_t one; + fp2_t a; + /* a = 1 - A^2/3, U = X + (A*Z^2)/3, V = Y, W = Z, T = a*Z^4*/ + fp_set_one(&one); + if (!fp2_is_zero(&(curve->A))) { + fp_div3(&(ao3->re), &(curve->A.re)); + fp_div3(&(ao3->im), &(curve->A.im)); + fp2_sqr(t, &P->z); + fp2_mul(&Q->x, ao3, t); + fp2_add(&Q->x, &Q->x, &P->x); + fp2_sqr(t, t); + fp2_mul(&a, ao3, &(curve->A)); + fp_sub(&(a.re), &one, &(a.re)); + fp_neg(&(a.im), &(a.im)); + fp2_mul(t, t, &a); + } else { + fp2_copy(&Q->x, &P->x); + fp2_sqr(t, &P->z); + fp2_sqr(t, t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve) +{ + // Cost of 1M + 1S when A != 0. + fp2_t t; + /* X = U - (A*W^2)/3, Y = V, Z = W. */ + if (!fp2_is_zero(&(curve->A))) { + fp2_sqr(&t, &P->z); + fp2_mul(&t, &t, ao3); + fp2_sub(&Q->x, &P->x, &t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +copy_jac_point(jac_point_t *P, const jac_point_t *Q) +{ + fp2_copy(&(P->x), &(Q->x)); + fp2_copy(&(P->y), &(Q->y)); + fp2_copy(&(P->z), &(Q->z)); +} + +void +jac_neg(jac_point_t *Q, const jac_point_t *P) +{ + fp2_copy(&Q->x, &P->x); + fp2_neg(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC) +{ // Cost of 6M + 6S. + // Doubling on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding to + // (X/Z^2,Y/Z^3) This version receives the coefficient value A + fp2_t t0, t1, t2, t3; + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // t0 = 3x1^2 + fp2_sqr(&t1, &P->z); // t1 = z1^2 + fp2_mul(&t2, &P->x, &AC->A); + fp2_add(&t2, &t2, &t2); // t2 = 2Ax1 + fp2_add(&t2, &t1, &t2); // t2 = 2Ax1+z1^2 + fp2_mul(&t2, &t1, &t2); // t2 = z1^2(2Ax1+z1^2) + fp2_add(&t2, &t0, &t2); // t2 = alpha = 3x1^2 + z1^2(2Ax1+z1^2) + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); // z2 = 2y1z1 + fp2_sqr(&t0, &Q->z); + fp2_mul(&t0, &t0, &AC->A); // t0 = 4Ay1^2z1^2 + fp2_sqr(&t1, &P->y); + fp2_add(&t1, &t1, &t1); // t1 = 2y1^2 + fp2_add(&t3, &P->x, &P->x); // t3 = 2x1 + fp2_mul(&t3, &t1, &t3); // t3 = 4x1y1^2 + fp2_sqr(&Q->x, &t2); // x2 = alpha^2 + fp2_sub(&Q->x, &Q->x, &t0); // x2 = alpha^2 - 4Ay1^2z1^2 + fp2_sub(&Q->x, &Q->x, &t3); + fp2_sub(&Q->x, &Q->x, &t3); // x2 = alpha^2 - 4Ay1^2z1^2 - 8x1y1^2 + fp2_sub(&Q->y, &t3, &Q->x); // y2 = 4x1y1^2 - x2 + fp2_mul(&Q->y, &Q->y, &t2); // y2 = alpha(4x1y1^2 - x2) + fp2_sqr(&t1, &t1); // t1 = 4y1^4 + fp2_sub(&Q->y, &Q->y, &t1); + fp2_sub(&Q->y, &Q->y, &t1); // y2 = alpha(4x1y1^2 - x2) - 8y1^4 + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t) +{ // Cost of 3M + 5S. + // Doubling on a Weierstrass curve, representation in modified Jacobian coordinates + // (X:Y:Z:T=a*Z^4) corresponding to (X/Z^2,Y/Z^3), where a is the curve coefficient. + // Formula from https://hyperelliptic.org/EFD/g1p/auto-shortw-modified.html + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_t xx, c, cc, r, s, m; + // XX = X^2 + fp2_sqr(&xx, &P->x); + // A = 2*Y^2 + fp2_sqr(&c, &P->y); + fp2_add(&c, &c, &c); + // AA = A^2 + fp2_sqr(&cc, &c); + // R = 2*AA + fp2_add(&r, &cc, &cc); + // S = (X+A)^2-XX-AA + fp2_add(&s, &P->x, &c); + fp2_sqr(&s, &s); + fp2_sub(&s, &s, &xx); + fp2_sub(&s, &s, &cc); + // M = 3*XX+T1 + fp2_add(&m, &xx, &xx); + fp2_add(&m, &m, &xx); + fp2_add(&m, &m, t); + // X3 = M^2-2*S + fp2_sqr(&Q->x, &m); + fp2_sub(&Q->x, &Q->x, &s); + fp2_sub(&Q->x, &Q->x, &s); + // Z3 = 2*Y*Z + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); + // Y3 = M*(S-X3)-R + fp2_sub(&Q->y, &s, &Q->x); + fp2_mul(&Q->y, &Q->y, &m); + fp2_sub(&Q->y, &Q->y, &r); + // T3 = 2*R*T1 + fp2_mul(u, t, &r); + fp2_add(u, u, u); + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +select_jac_point(jac_point_t *Q, const jac_point_t *P1, const jac_point_t *P2, const digit_t option) +{ // Select points + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->y), &(P1->y), &(P2->y), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Addition on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding + // to (x,y) = (X/Z^2,Y/Z^3) This version receives the coefficient value A + // + // Complete routine, to handle all edge cases: + // if ZP == 0: # P == inf + // return Q + // if ZQ == 0: # Q == inf + // return P + // dy <- YQ*ZP**3 - YP*ZQ**3 + // dx <- XQ*ZP**2 - XP*ZQ**2 + // if dx == 0: # x1 == x2 + // if dy == 0: # ... and y1 == y2: doubling case + // dy <- ZP*ZQ * (3*XP^2 + ZP^2 * (2*A*XP + ZP^2)) + // dx <- 2*YP*ZP + // else: # ... but y1 != y2, thus P = -Q + // return inf + // XR <- dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) + // YR <- dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3 + // ZR <- dx * ZP * ZQ + + // Constant time processing: + // - The case for P == 0 or Q == 0 is handled at the end with conditional select + // - dy and dx are computed for both the normal and doubling cases, we switch when + // dx == dy == 0 for the normal case. + // - If we have that P = -Q then dx = 0 and so ZR will be zero, giving us the point + // at infinity for "free". + // + // These current formula are expensive and I'm probably missing some tricks... + // Thought I'd get the ball rolling. + // Cost 17M + 6S + 13a + fp2_t t0, t1, t2, t3, u1, u2, v1, dx, dy; + + /* If P is zero or Q is zero we will conditionally swap before returning. */ + uint32_t ctl1 = fp2_is_zero(&P->z); + uint32_t ctl2 = fp2_is_zero(&Q->z); + + /* Precompute some values */ + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + + /* Compute dy and dx for ordinary case */ + fp2_mul(&v1, &t1, &Q->z); // v1 = z2^3 + fp2_mul(&t2, &t0, &P->z); // t2 = z1^3 + fp2_mul(&v1, &v1, &P->y); // v1 = y1z2^3 + fp2_mul(&t2, &t2, &Q->y); // t2 = y2z1^3 + fp2_sub(&dy, &t2, &v1); // dy = y2z1^3 - y1z2^3 + fp2_mul(&u2, &t0, &Q->x); // u2 = x2z1^2 + fp2_mul(&u1, &t1, &P->x); // u1 = x1z2^2 + fp2_sub(&dx, &u2, &u1); // dx = x2z1^2 - x1z2^2 + + /* Compute dy and dx for doubling case */ + fp2_add(&t1, &P->y, &P->y); // dx_dbl = t1 = 2y1 + fp2_add(&t2, &AC->A, &AC->A); // t2 = 2A + fp2_mul(&t2, &t2, &P->x); // t2 = 2Ax1 + fp2_add(&t2, &t2, &t0); // t2 = 2Ax1 + z1^2 + fp2_mul(&t2, &t2, &t0); // t2 = z1^2 * (2Ax1 + z1^2) + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t2, &t2, &t0); // t2 = x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 2*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 3*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_mul(&t2, &t2, &Q->z); // dy_dbl = t2 = z2 * (3*x1^2 + z1^2 * (2Ax1 + z1^2)) + + /* If dx is zero and dy is zero swap with double variables */ + uint32_t ctl = fp2_is_zero(&dx) & fp2_is_zero(&dy); + fp2_select(&dx, &dx, &t1, ctl); + fp2_select(&dy, &dy, &t2, ctl); + + /* Some more precomputations */ + fp2_mul(&t0, &P->z, &Q->z); // t0 = z1z2 + fp2_sqr(&t1, &t0); // t1 = z1z2^2 + fp2_sqr(&t2, &dx); // t2 = dx^2 + fp2_sqr(&t3, &dy); // t3 = dy^2 + + /* Compute x3 = dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) */ + fp2_mul(&R->x, &AC->A, &t1); // x3 = A*(z1z2)^2 + fp2_add(&R->x, &R->x, &u1); // x3 = A*(z1z2)^2 + u1 + fp2_add(&R->x, &R->x, &u2); // x3 = A*(z1z2)^2 + u1 + u2 + fp2_mul(&R->x, &R->x, &t2); // x3 = dx^2 * (A*(z1z2)^2 + u1 + u2) + fp2_sub(&R->x, &t3, &R->x); // x3 = dy^2 - dx^2 * (A*(z1z2)^2 + u1 + u2) + + /* Compute y3 = dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3*/ + fp2_mul(&R->y, &u1, &t2); // y3 = u1 * dx^2 + fp2_sub(&R->y, &R->y, &R->x); // y3 = u1 * dx^2 - x3 + fp2_mul(&R->y, &R->y, &dy); // y3 = dy * (u1 * dx^2 - x3) + fp2_mul(&t3, &t2, &dx); // t3 = dx^3 + fp2_mul(&t3, &t3, &v1); // t3 = v1 * dx^3 + fp2_sub(&R->y, &R->y, &t3); // y3 = dy * (u1 * dx^2 - x3) - v1 * dx^3 + + /* Compute z3 = dx * z1 * z2 */ + fp2_mul(&R->z, &dx, &t0); + + /* Finally, we need to set R = P is Q.Z = 0 and R = Q if P.Z = 0 */ + select_jac_point(R, R, Q, ctl1); + select_jac_point(R, R, P, ctl2); +} + +void +jac_to_xz_add_components(add_components_t *add_comp, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Take P and Q in E distinct, two jac_point_t, return three components u,v and w in Fp2 such + // that the xz coordinates of P+Q are (u-v:w) and of P-Q are (u+v:w) + + fp2_t t0, t1, t2, t3, t4, t5, t6; + + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + fp2_mul(&t2, &P->x, &t1); // t2 = x1z2^2 + fp2_mul(&t3, &t0, &Q->x); // t3 = z1^2x2 + fp2_mul(&t4, &P->y, &Q->z); // t4 = y1z2 + fp2_mul(&t4, &t4, &t1); // t4 = y1z2^3 + fp2_mul(&t5, &P->z, &Q->y); // t5 = z1y2 + fp2_mul(&t5, &t5, &t0); // t5 = z1^3y2 + fp2_mul(&t0, &t0, &t1); // t0 = (z1z2)^2 + fp2_mul(&t6, &t4, &t5); // t6 = (z1z_2)^3y1y2 + fp2_add(&add_comp->v, &t6, &t6); // v = 2(z1z_2)^3y1y2 + fp2_sqr(&t4, &t4); // t4 = y1^2z2^6 + fp2_sqr(&t5, &t5); // t5 = z1^6y_2^2 + fp2_add(&t4, &t4, &t5); // t4 = z1^6y_2^2 + y1^2z2^6 + fp2_add(&t5, &t2, &t3); // t5 = x1z2^2 +z_1^2x2 + fp2_add(&t6, &t3, &t3); // t6 = 2z_1^2x2 + fp2_sub(&t6, &t5, &t6); // t6 = lambda = x1z2^2 - z_1^2x2 + fp2_sqr(&t6, &t6); // t6 = lambda^2 = (x1z2^2 - z_1^2x2)^2 + fp2_mul(&t1, &AC->A, &t0); // t1 = A*(z1z2)^2 + fp2_add(&t1, &t5, &t1); // t1 = gamma =A*(z1z2)^2 + x1z2^2 +z_1^2x2 + fp2_mul(&t1, &t1, &t6); // t1 = gamma*lambda^2 + fp2_sub(&add_comp->u, &t4, &t1); // u = z1^6y_2^2 + y1^2z2^6 - gamma*lambda^2 + fp2_mul(&add_comp->w, &t6, &t0); // w = (z1z2)^2(lambda)^2 +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_params.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_params.c new file mode 100644 index 0000000000..5011f102e1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_params.c @@ -0,0 +1,4 @@ +#include +// p+1 divided by the power of 2 +const digit_t p_cofactor_for_2f[1] = {5}; + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_params.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_params.h new file mode 100644 index 0000000000..e02ac1d146 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec_params.h @@ -0,0 +1,12 @@ +#ifndef EC_PARAMS_H +#define EC_PARAMS_H + +#include + +#define TORSION_EVEN_POWER 248 + +// p+1 divided by the power of 2 +extern const digit_t p_cofactor_for_2f[1]; +#define P_COFACTOR_FOR_2F_BITLENGTH 3 + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_signature.c new file mode 100644 index 0000000000..112c695941 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_signature.c @@ -0,0 +1,208 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// ibz_t + +static byte_t * +ibz_to_bytes(byte_t *enc, const ibz_t *x, size_t nbytes, bool sgn) +{ +#ifndef NDEBUG + { + // make sure there is enough space + ibz_t abs, bnd; + ibz_init(&bnd); + ibz_init(&abs); + ibz_pow(&bnd, &ibz_const_two, 8 * nbytes - sgn); + ibz_abs(&abs, x); + assert(ibz_cmp(&abs, &bnd) < 0); + ibz_finalize(&bnd); + ibz_finalize(&abs); + } +#endif + const size_t digits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + digit_t d[digits]; + memset(d, 0, sizeof(d)); + if (ibz_cmp(x, &ibz_const_zero) >= 0) { + // non-negative, straightforward. + ibz_to_digits(d, x); + } else { + assert(sgn); + // negative; use two's complement. + ibz_t tmp; + ibz_init(&tmp); + ibz_neg(&tmp, x); + ibz_sub(&tmp, &tmp, &ibz_const_one); + ibz_to_digits(d, &tmp); + for (size_t i = 0; i < digits; ++i) + d[i] = ~d[i]; +#ifndef NDEBUG + { + // make sure the result is correct + ibz_t chk; + ibz_init(&chk); + ibz_copy_digit_array(&tmp, d); + ibz_sub(&tmp, &tmp, x); + ibz_pow(&chk, &ibz_const_two, 8 * sizeof(d)); + assert(!ibz_cmp(&tmp, &chk)); + ibz_finalize(&chk); + } +#endif + ibz_finalize(&tmp); + } + encode_digits(enc, d, nbytes); + return enc + nbytes; +} + +static const byte_t * +ibz_from_bytes(ibz_t *x, const byte_t *enc, size_t nbytes, bool sgn) +{ + assert(nbytes > 0); + const size_t ndigits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + assert(ndigits > 0); + digit_t d[ndigits]; + memset(d, 0, sizeof(d)); + decode_digits(d, enc, nbytes, ndigits); + if (sgn && enc[nbytes - 1] >> 7) { + // negative, decode two's complement + const size_t s = sizeof(digit_t) - 1 - (sizeof(d) - nbytes); + assert(s < sizeof(digit_t)); + d[ndigits - 1] |= ((digit_t)-1) >> 8 * s << 8 * s; + for (size_t i = 0; i < ndigits; ++i) + d[i] = ~d[i]; + ibz_copy_digits(x, d, ndigits); + ibz_add(x, x, &ibz_const_one); + ibz_neg(x, x); + } else { + // non-negative + ibz_copy_digits(x, d, ndigits); + } + return enc + nbytes; +} + +// public API + +void +secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = public_key_to_bytes(enc, pk); + +#ifndef NDEBUG + { + fp2_t lhs, rhs; + fp2_mul(&lhs, &sk->curve.A, &pk->curve.C); + fp2_mul(&rhs, &sk->curve.C, &pk->curve.A); + assert(fp2_is_equal(&lhs, &rhs)); + } +#endif + + enc = ibz_to_bytes(enc, &sk->secret_ideal.norm, FP_ENCODED_BYTES, false); + { + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + int ret UNUSED = quat_lideal_generator(&gen, &sk->secret_ideal, &QUATALG_PINFTY); + assert(ret); + // we skip encoding the denominator since it won't change the generated ideal +#ifndef NDEBUG + { + // let's make sure that the denominator is indeed coprime to the norm of the ideal + ibz_t gcd; + ibz_init(&gcd); + ibz_gcd(&gcd, &gen.denom, &sk->secret_ideal.norm); + assert(!ibz_cmp(&gcd, &ibz_const_one)); + ibz_finalize(&gcd); + } +#endif + enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); +} + +void +secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = public_key_from_bytes(pk, enc); + + { + ibz_t norm; + ibz_init(&norm); + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); + enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); + ibz_finalize(&norm); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); + + sk->curve = pk->curve; + ec_curve_to_basis_2f_from_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER, pk->hint_pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_verification.c new file mode 100644 index 0000000000..fecdb9c259 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_verification.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// fp2_t + +static byte_t * +fp2_to_bytes(byte_t *enc, const fp2_t *x) +{ + fp2_encode(enc, x); + return enc + FP2_ENCODED_BYTES; +} + +static const byte_t * +fp2_from_bytes(fp2_t *x, const byte_t *enc) +{ + fp2_decode(x, enc); + return enc + FP2_ENCODED_BYTES; +} + +// curves and points + +static byte_t * +proj_to_bytes(byte_t *enc, const fp2_t *x, const fp2_t *z) +{ + assert(!fp2_is_zero(z)); + fp2_t tmp = *z; + fp2_inv(&tmp); +#ifndef NDEBUG + { + fp2_t chk; + fp2_mul(&chk, z, &tmp); + fp2_t one; + fp2_set_one(&one); + assert(fp2_is_equal(&chk, &one)); + } +#endif + fp2_mul(&tmp, x, &tmp); + enc = fp2_to_bytes(enc, &tmp); + return enc; +} + +static const byte_t * +proj_from_bytes(fp2_t *x, fp2_t *z, const byte_t *enc) +{ + enc = fp2_from_bytes(x, enc); + fp2_set_one(z); + return enc; +} + +static byte_t * +ec_curve_to_bytes(byte_t *enc, const ec_curve_t *curve) +{ + return proj_to_bytes(enc, &curve->A, &curve->C); +} + +static const byte_t * +ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) +{ + memset(curve, 0, sizeof(*curve)); + return proj_from_bytes(&curve->A, &curve->C, enc); +} + +static byte_t * +ec_point_to_bytes(byte_t *enc, const ec_point_t *point) +{ + return proj_to_bytes(enc, &point->x, &point->z); +} + +static const byte_t * +ec_point_from_bytes(ec_point_t *point, const byte_t *enc) +{ + return proj_from_bytes(&point->x, &point->z, enc); +} + +static byte_t * +ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) +{ + enc = ec_point_to_bytes(enc, &basis->P); + enc = ec_point_to_bytes(enc, &basis->Q); + enc = ec_point_to_bytes(enc, &basis->PmQ); + return enc; +} + +static const byte_t * +ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) +{ + enc = ec_point_from_bytes(&basis->P, enc); + enc = ec_point_from_bytes(&basis->Q, enc); + enc = ec_point_from_bytes(&basis->PmQ, enc); + return enc; +} + +// public API + +byte_t * +public_key_to_bytes(byte_t *enc, const public_key_t *pk) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_to_bytes(enc, &pk->curve); + *enc++ = pk->hint_pk; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +const byte_t * +public_key_from_bytes(public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_from_bytes(&pk->curve, enc); + pk->hint_pk = *enc++; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +void +signature_to_bytes(byte_t *enc, const signature_t *sig) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = fp2_to_bytes(enc, &sig->E_aux_A); + + *enc++ = sig->backtracking; + *enc++ = sig->two_resp_length; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][1], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][1], nbytes); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + encode_digits(enc, sig->chall_coeff, nbytes); + enc += nbytes; + + *enc++ = sig->hint_aux; + *enc++ = sig->hint_chall; + + assert(enc - start == SIGNATURE_BYTES); +} + +void +signature_from_bytes(signature_t *sig, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = fp2_from_bytes(&sig->E_aux_A, enc); + + sig->backtracking = *enc++; + sig->two_resp_length = *enc++; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + decode_digits(sig->chall_coeff, enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + sig->hint_aux = *enc++; + sig->hint_chall = *enc++; + + assert(enc - start == SIGNATURE_BYTES); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encoded_sizes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encoded_sizes.h new file mode 100644 index 0000000000..02f8642967 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encoded_sizes.h @@ -0,0 +1,11 @@ +#define SECURITY_BITS 128 +#define SQIsign_response_length 126 +#define HASH_ITERATIONS 64 +#define FP_ENCODED_BYTES 32 +#define FP2_ENCODED_BYTES 64 +#define EC_CURVE_ENCODED_BYTES 64 +#define EC_POINT_ENCODED_BYTES 64 +#define EC_BASIS_ENCODED_BYTES 192 +#define PUBLICKEY_BYTES 65 +#define SECRETKEY_BYTES 353 +#define SIGNATURE_BYTES 148 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c new file mode 100644 index 0000000000..abeddc30a7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c @@ -0,0 +1,3336 @@ +#include +#include +#include +const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x199, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6} +#elif RADIX == 32 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x19, 0x0, 0x0, 0x300000000000000} +#else +{0xc, 0x0, 0x0, 0x0, 0x400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3} +#elif RADIX == 32 +{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3} +#else +{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7} +#elif RADIX == 32 +{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10} +#else +{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9} +#elif RADIX == 32 +{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec} +#else +{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8} +#elif RADIX == 32 +{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40} +#else +{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x342, 0xfb7, 0xed, 0x1d80, 0x17f1, 0x4a2, 0x1c26, 0xb96, 0x1367, 0x3dc, 0x1624, 0x1f2a, 0x5e, 0x1cab, 0x27, 0x1e89, 0x1293, 0x1e24, 0x417, 0x5} +#elif RADIX == 32 +{0xbedc685, 0x11ec003b, 0x4c4a2bf, 0xd9d72dc, 0xb120f72, 0x1605ef95, 0x2404fca, 0x1124a4fd, 0x20bf} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x57f1ec003b5f6e34, 0x7b93675cb709894, 0x809f95605ef95589, 0xc905fc49293f44} +#else +{0xf6001dafb71a, 0x75cb70989457f, 0x5f2ab120f726c, 0x7d12027e55817, 0x6482fe24949} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xf3c, 0x1d21, 0xd78, 0xe8e, 0x1f3c, 0x11b, 0x12c, 0x1851, 0x19b1, 0xd9, 0xf3f, 0x759, 0xf47, 0x1e88, 0x56e, 0x8ef, 0x116e, 0x1fa1, 0x1199, 0x0} +#elif RADIX == 32 +{0x7485e78, 0x1c74735e, 0x5811bf9, 0x6c70a21, 0x179f8367, 0x10f473ac, 0x1bcadde8, 0x1d0c5b91, 0x8ccf} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x7f3c74735e3a42f3, 0xc1b39b1c2884b023, 0x95bbd10f473acbcf, 0x3c4667f4316e477} +#else +{0x63a39af1d2179, 0x1c2884b0237f3, 0x675979f836736, 0x11de56ef443d1, 0x462333fa18b7} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xabf,0x5490,0xd5fd,0x36ba,0xda0f,0x4a59,0x4eea,0xd1,0xa3f0,0xa7ae,0x6f6,0x9146,0x5004,0xcde6,0xa2d2,0x7d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x54900abf,0x36bad5fd,0x4a59da0f,0xd14eea,0xa7aea3f0,0x914606f6,0xcde65004,0x7da2d2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x36bad5fd54900abf,0xd14eea4a59da0f,0x914606f6a7aea3f0,0x7da2d2cde65004}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8680,0xb787,0xbde3,0x611d,0xa95f,0x8b68,0xc9ec,0x819,0x2361,0xf73e,0x5e31,0xbd7b,0x2b45,0x40d7,0x2400,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7878680,0x611dbde3,0x8b68a95f,0x819c9ec,0xf73e2361,0xbd7b5e31,0x40d72b45,0x682400}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x611dbde3b7878680,0x819c9ec8b68a95f,0xbd7b5e31f73e2361,0x68240040d72b45}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4277,0x6d20,0x9e12,0x1f0c,0x977f,0xf854,0x9d1c,0x563f,0xdb,0xc2ed,0xaf54,0xe829,0x4fb,0xd83,0x7be8,0xca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6d204277,0x1f0c9e12,0xf854977f,0x563f9d1c,0xc2ed00db,0xe829af54,0xd8304fb,0xca7be8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1f0c9e126d204277,0x563f9d1cf854977f,0xe829af54c2ed00db,0xca7be80d8304fb}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf541,0xab6f,0x2a02,0xc945,0x25f0,0xb5a6,0xb115,0xff2e,0x5c0f,0x5851,0xf909,0x6eb9,0xaffb,0x3219,0x5d2d,0x82}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xab6ff541,0xc9452a02,0xb5a625f0,0xff2eb115,0x58515c0f,0x6eb9f909,0x3219affb,0x825d2d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9452a02ab6ff541,0xff2eb115b5a625f0,0x6eb9f90958515c0f,0x825d2d3219affb}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x30cd,0xb7f2,0x49cf,0xfe47,0xdb8a,0x683b,0x7335,0xbaa3,0xebe0,0x74ae,0x9dd4,0x8871,0x67c8,0x3c39,0x2ba2,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7f230cd,0xfe4749cf,0x683bdb8a,0xbaa37335,0x74aeebe0,0x88719dd4,0x3c3967c8,0x242ba2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe4749cfb7f230cd,0xbaa37335683bdb8a,0x88719dd474aeebe0,0x242ba23c3967c8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81fd,0xde09,0x9d8a,0x6e8c,0xa299,0x77a0,0xadb7,0x58b7,0x13a1,0x7d41,0x6349,0x1a1d,0xc40b,0x17c5,0xb772,0xdf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xde0981fd,0x6e8c9d8a,0x77a0a299,0x58b7adb7,0x7d4113a1,0x1a1d6349,0x17c5c40b,0xdfb772}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6e8c9d8ade0981fd,0x58b7adb777a0a299,0x1a1d63497d4113a1,0xdfb77217c5c40b}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4363,0xd1dc,0x3a2d,0x523e,0xecad,0x20f1,0x267e,0x376e,0x661b,0x53fc,0xddaa,0xf004,0x267a,0x5b07,0xd8e1,0x6f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd1dc4363,0x523e3a2d,0x20f1ecad,0x376e267e,0x53fc661b,0xf004ddaa,0x5b07267a,0x6fd8e1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523e3a2dd1dc4363,0x376e267e20f1ecad,0xf004ddaa53fc661b,0x6fd8e15b07267a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf33,0x480d,0xb630,0x1b8,0x2475,0x97c4,0x8cca,0x455c,0x141f,0x8b51,0x622b,0x778e,0x9837,0xc3c6,0xd45d,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x480dcf33,0x1b8b630,0x97c42475,0x455c8cca,0x8b51141f,0x778e622b,0xc3c69837,0xdbd45d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b8b630480dcf33,0x455c8cca97c42475,0x778e622b8b51141f,0xdbd45dc3c69837}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x19e1, 0x1d98, 0x1de9, 0x1dfc, 0x922, 0x1fb8, 0x476, 0xd05, 0xc85, 0x1788, 0x1967, 0x155d, 0x1f93, 0x629, 0x188f, 0x119, 0x1f6f, 0x241, 0x1378, 0x0} +#elif RADIX == 32 +{0xf6633c2, 0x2efe77a, 0xedfb849, 0x1215a0a4, 0x1cb3de21, 0x13f93aae, 0x6711e62, 0x120fdbc2, 0x9bc0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x922efe77a7b319e, 0xef10c8568291dbf7, 0xe23cc53f93aaee59, 0x54de0483f6f08c} +#else +{0x177f3bd3d98cf, 0x568291dbf7092, 0x755dcb3de2190, 0x423388f314fe4, 0x2a6f0241fb7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x811, 0xf66, 0x77a, 0x177f, 0x248, 0x17ee, 0x91d, 0xb41, 0x321, 0x1de2, 0xe59, 0x1d57, 0xfe4, 0x198a, 0xe23, 0x1846, 0xfdb, 0x90, 0x14de, 0x8} +#elif RADIX == 32 +{0x13d99023, 0x8bbf9de, 0x3b7ee12, 0xc856829, 0x172cf788, 0x14fe4eab, 0x119c4798, 0x483f6f0, 0x3a6f0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc248bbf9de9ecc81, 0x7bc43215a0a476fd, 0x388f314fe4eabb96, 0x95378120fdbc23} +#else +{0x45dfcef4f6640, 0x15a0a476fdc24, 0x1d5772cf78864, 0x708ce23cc53f9, 0x2ca9bc0907ed} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x869, 0x197b, 0xcdb, 0x1d89, 0xf9b, 0x1d79, 0x18ec, 0xafe, 0x1d41, 0x77, 0x9d4, 0x1a3f, 0x2b, 0x46d, 0x173e, 0xedd, 0x172, 0x1c77, 0x8a6, 0x8} +#elif RADIX == 32 +{0x1e5ed0d3, 0x1bec4b36, 0x1d9d797c, 0x15055fd8, 0x14ea01df, 0x1a02bd1f, 0x176e7c46, 0x3b85c9d, 0x34537} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2f9bec4b36f2f686, 0xefd4157f63b3af, 0xdcf88da02bd1fa75, 0x31229b8ee17276e} +#else +{0x5f6259b797b43, 0x157f63b3af2f9, 0x7a3f4ea01dfa8, 0x1dbb73e23680a, 0x18914dc770b9} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x124b, 0xed4, 0x1706, 0x32d, 0x1541, 0x11b8, 0x2b0, 0xbe4, 0x1ee8, 0x1a3c, 0x16e3, 0x1d25, 0x19bb, 0xb63, 0x1fc1, 0x5fa, 0xf03, 0xfa, 0x1ec, 0x9} +#elif RADIX == 32 +{0x13b52497, 0x1196dc1, 0x1611b8aa, 0x1ba17c82, 0x1b71e8f3, 0x79bbe92, 0x1ebf82b6, 0x7d3c0cb, 0x40f60} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1541196dc19da924, 0xf479ee85f20ac237, 0x7f056c79bbe92db8, 0x3b87b01f4f032fd} +#else +{0x8cb6e0ced492, 0x5f20ac237154, 0x7d25b71e8f3dd, 0x4bf5fc15b1e6e, 0x1dc3d80fa781} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1e71, 0xd67, 0x13da, 0x19eb, 0x137a, 0x1d27, 0x1ba7, 0x1996, 0x755, 0xe3d, 0x1139, 0x1764, 0x18ac, 0x1020, 0x3c4, 0x150e, 0x1ffd, 0x14fe, 0xa16, 0x6} +#elif RADIX == 32 +{0x1359fce3, 0x1acf5cf6, 0x14fd279b, 0x1d5732db, 0x89cb8f4, 0x18acbb2, 0x3878902, 0x7f7ff6a, 0x150b5} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf37acf5cf69acfe7, 0x5c7a755ccb6e9fa4, 0xf120418acbb244e, 0x8285a9fdffda87} +#else +{0x567ae7b4d67f3, 0x5ccb6e9fa4f37, 0x176489cb8f4ea, 0x6a1c3c481062b, 0x2c142d4feffe} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x13ec, 0x10a3, 0x1e69, 0x106f, 0x619, 0x1cb5, 0x9aa, 0x362, 0x53a, 0x1af5, 0x1bae, 0x60a, 0x2a4, 0x448, 0x3d0, 0x535, 0xeb1, 0x1a6e, 0x978, 0x5} +#elif RADIX == 32 +{0xc28e7d9, 0x19837f9a, 0x155cb530, 0x14e86c49, 0xdd76bd4, 0x102a4305, 0xd47a044, 0x1373ac4a, 0x4bc6} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa619837f9a61473e, 0xb5ea53a1b126ab96, 0x8f408902a43056eb, 0x3ea5e34dceb129a} +#else +{0x4c1bfcd30a39f, 0x21b126ab96a61, 0x60add76bd4a7, 0x4a6a3d02240a9, 0x1f52f1a6e758} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x77a, 0x201, 0x168d, 0x8fe, 0x780, 0x1ccb, 0x52b, 0x1c83, 0x18dd, 0xcef, 0x11f5, 0x1446, 0x301, 0xb63, 0xe3f, 0x1b72, 0x1, 0x1da9, 0x1281, 0x8} +#elif RADIX == 32 +{0x8804ef5, 0x47f5a3, 0x57ccb3c, 0x3779065, 0x8fab3bf, 0x6301a23, 0x1c9c7eb6, 0xd480076, 0x3940f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x678047f5a3440277, 0x59df8dde4194af99, 0x38fd6c6301a2347d, 0x364a07b52001db9} +#else +{0x23fad1a2013b, 0x5e4194af99678, 0x34468fab3bf1b, 0x76e4e3f5b18c0, 0x432503da9000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1, 0xb39, 0x969, 0x1324, 0xbe6, 0x86e, 0x1021, 0x29a, 0x1ff0, 0xd23, 0x7d5, 0x72a, 0x1e33, 0x1fd9, 0x10af, 0x15bc, 0x1d56, 0x928, 0x1d49, 0x0} +#elif RADIX == 32 +{0xace4002, 0x699225a, 0x4286e5f, 0x1fc05350, 0x3eab48f, 0x13e33395, 0xf215ffd, 0x94755ab, 0xea4a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xcbe699225a567200, 0x5a47ff014d40850d, 0x42bffb3e333951f5, 0x57525251d56ade} +#else +{0x34c912d2b3900, 0x14d40850dcbe, 0x672a3eab48ffe, 0x2b790affecf8c, 0x2ba92928eab} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8d79,0x38f8,0xf94c,0xe776,0x2bdf,0x2d2e,0x4242,0x8677,0xddf0,0x1736,0xa2e3,0x8ee7,0x52ac,0x4bb1,0xbb55,0xa4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38f88d79,0xe776f94c,0x2d2e2bdf,0x86774242,0x1736ddf0,0x8ee7a2e3,0x4bb152ac,0xa4bb55}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe776f94c38f88d79,0x867742422d2e2bdf,0x8ee7a2e31736ddf0,0xa4bb554bb152ac}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6774,0xe280,0xc0b8,0xd49d,0x3b88,0x2577,0xc53f,0x7a5d,0x3032,0x4cfb,0xd6b2,0x3ed5,0x27b8,0x584c,0x85b1,0xfc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe2806774,0xd49dc0b8,0x25773b88,0x7a5dc53f,0x4cfb3032,0x3ed5d6b2,0x584c27b8,0xfc85b1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd49dc0b8e2806774,0x7a5dc53f25773b88,0x3ed5d6b24cfb3032,0xfc85b1584c27b8}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc139,0x25cf,0xd25b,0xadb9,0xbd39,0xaa20,0x8867,0x4e7a,0x8b24,0xa81f,0x412a,0xacfc,0xee2d,0xab0c,0x1d50,0x20}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x25cfc139,0xadb9d25b,0xaa20bd39,0x4e7a8867,0xa81f8b24,0xacfc412a,0xab0cee2d,0x201d50}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xadb9d25b25cfc139,0x4e7a8867aa20bd39,0xacfc412aa81f8b24,0x201d50ab0cee2d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7287,0xc707,0x6b3,0x1889,0xd420,0xd2d1,0xbdbd,0x7988,0x220f,0xe8c9,0x5d1c,0x7118,0xad53,0xb44e,0x44aa,0x5b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc7077287,0x188906b3,0xd2d1d420,0x7988bdbd,0xe8c9220f,0x71185d1c,0xb44ead53,0x5b44aa}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x188906b3c7077287,0x7988bdbdd2d1d420,0x71185d1ce8c9220f,0x5b44aab44ead53}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7029,0x8b30,0x7529,0x9941,0x2be8,0x7b3f,0xe3d7,0x4553,0x7065,0x7bef,0xb49c,0xc80b,0xfa3e,0x950c,0x1ece,0x18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b307029,0x99417529,0x7b3f2be8,0x4553e3d7,0x7bef7065,0xc80bb49c,0x950cfa3e,0x181ece}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x994175298b307029,0x4553e3d77b3f2be8,0xc80bb49c7bef7065,0x181ece950cfa3e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb399,0x92ce,0x85e8,0x7c82,0x86eb,0xb186,0x8924,0x64f1,0xd93,0x5e9a,0x3165,0x4196,0x5e79,0x158,0x55d5,0x31}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92ceb399,0x7c8285e8,0xb18686eb,0x64f18924,0x5e9a0d93,0x41963165,0x1585e79,0x3155d5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c8285e892ceb399,0x64f18924b18686eb,0x419631655e9a0d93,0x3155d501585e79}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda47,0x29f8,0x7209,0xaa0c,0xfc22,0x39c9,0x6e19,0x517c,0xc94e,0xcfa4,0x20fc,0x1edc,0xe0d0,0x396d,0x85f0,0xdf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x29f8da47,0xaa0c7209,0x39c9fc22,0x517c6e19,0xcfa4c94e,0x1edc20fc,0x396de0d0,0xdf85f0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa0c720929f8da47,0x517c6e1939c9fc22,0x1edc20fccfa4c94e,0xdf85f0396de0d0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fd7,0x74cf,0x8ad6,0x66be,0xd417,0x84c0,0x1c28,0xbaac,0x8f9a,0x8410,0x4b63,0x37f4,0x5c1,0x6af3,0xe131,0xe7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x74cf8fd7,0x66be8ad6,0x84c0d417,0xbaac1c28,0x84108f9a,0x37f44b63,0x6af305c1,0xe7e131}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x66be8ad674cf8fd7,0xbaac1c2884c0d417,0x37f44b6384108f9a,0xe7e1316af305c1}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x13f5, 0x8b7, 0x1873, 0x144a, 0x1a89, 0x13f9, 0xd49, 0x6f2, 0x3bb, 0x102, 0x1ecb, 0x180b, 0x4d5, 0x608, 0x119, 0x5e6, 0x751, 0x961, 0xd37, 0x5} +#elif RADIX == 32 +{0x1a2de7eb, 0x9a2561c, 0x933f9d4, 0xeecde4d, 0x1f658408, 0x104d5c05, 0x19823260, 0xb09d44b, 0x69ba} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3a89a2561cd16f3f, 0xc2043bb37935267f, 0x464c104d5c05fb2, 0x1bb4dd2c27512f3} +#else +{0x4d12b0e68b79f, 0x337935267f3a8, 0x380bf65840877, 0x4bcc119304135, 0x35da6e9613a8} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1e96, 0x1a2d, 0x161c, 0xd12, 0xea2, 0xcfe, 0x1352, 0x19bc, 0x10ee, 0x1840, 0x1fb2, 0xe02, 0x135, 0x982, 0x1046, 0x979, 0x9d4, 0x1a58, 0x1b4d, 0x9} +#elif RADIX == 32 +{0x68b7d2d, 0x2689587, 0xa4cfe75, 0x3bb3793, 0xfd96102, 0x4135701, 0x1e608c98, 0x12c27512, 0x4da6e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xcea2689587345be9, 0xb0810eecde4d499f, 0xc1193041357017ec, 0x22ed374b09d44bc} +#else +{0x1344ac39a2df4, 0x6cde4d499fcea, 0x2e02fd961021d, 0x12f30464c104d, 0x39769ba584ea} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xa82, 0x1d2d, 0x15b8, 0x404, 0x1a32, 0xaf9, 0xa86, 0xddf, 0x14bf, 0x100c, 0xc42, 0xa89, 0x1df, 0x82f, 0x1f07, 0x782, 0x664, 0x1ba5, 0x5d7, 0x2} +#elif RADIX == 32 +{0x74b5504, 0x1220256e, 0x10caf9d1, 0x12fdbbea, 0x16214032, 0x1e1df544, 0xbe0e82, 0x1d29990f, 0x22ebe} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3a3220256e3a5aa8, 0xa0194bf6efaa195f, 0x7c1d05e1df544b10, 0xb175f74a6643c1} +#else +{0x11012b71d2d54, 0x76efaa195f3a3, 0x6a89621403297, 0xf05f07417877, 0x58bafba5332} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x5a1, 0x46a, 0x17ab, 0x1cfa, 0x547, 0x1b9c, 0xda5, 0x141e, 0x216, 0x1f49, 0xaca, 0x15a1, 0xfe0, 0x1afb, 0x1a47, 0x133d, 0x1887, 0x590, 0xbc2, 0x1} +#elif RADIX == 32 +{0x191a8b42, 0x7e7d5ea, 0x14bb9c2a, 0x85a83cd, 0x15657d24, 0x16fe0ad0, 0xf748faf, 0xc8621e6, 0x15e11} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8547e7d5eac8d45a, 0xbe92216a0f369773, 0xe91f5f6fe0ad0ab2, 0x5af08b2188799e} +#else +{0x3f3eaf5646a2d, 0x6a0f369773854, 0x15a15657d2442, 0x667ba47d7dbf8, 0x2d784590c43} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1311, 0x910, 0x413, 0x1d16, 0x14f7, 0x19c9, 0x14d3, 0x1504, 0x776, 0x1c2c, 0x15b0, 0xc6e, 0x36b, 0x1777, 0x1ed2, 0xb34, 0x1281, 0x1281, 0xd0f, 0x4} +#elif RADIX == 32 +{0x1a442622, 0x17e8b104, 0x1a79c9a7, 0x1ddaa094, 0xad870b0, 0xe36b637, 0xd3da577, 0x140ca056, 0x4687c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x34f7e8b104d22131, 0x3858776a82534f39, 0x7b4aeee36b63756c, 0x7343e50328159a} +#else +{0x3f45882691098, 0x6a82534f3934f, 0x6c6ead870b0ee, 0x5669ed2bbb8da, 0x2b9a1f281940} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x12d2, 0x6d8, 0x1e2c, 0x6f9, 0x5e8, 0x4e5, 0x32c, 0x58d, 0x1bda, 0x16f9, 0x8b5, 0x3c0, 0x10c, 0xb18, 0x450, 0x834, 0x3b7, 0x8d7, 0x15bf, 0x0} +#elif RADIX == 32 +{0x1b625a4, 0x837cf8b, 0x584e52f, 0xf68b1a3, 0x45adbe7, 0x1010c1e0, 0xd08a0b1, 0x6b8edd0, 0xadfa} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa5e837cf8b0db12d, 0x6df3bda2c68cb09c, 0x114163010c1e022d, 0xa56fd1ae3b741a} +#else +{0x41be7c586d896, 0x22c68cb09ca5e, 0x3c045adbe77b, 0x506845058c043, 0x2d2b7e8d71db} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x5f, 0x444, 0x49e, 0xae7, 0x248, 0x1a37, 0x9b6, 0xc28, 0x464, 0x19b7, 0x1560, 0xd7a, 0x2e3, 0x81a, 0x6f5, 0x5f9, 0x1818, 0x164c, 0x1713, 0x7} +#elif RADIX == 32 +{0x111100bf, 0x8573927, 0x16da3712, 0x11918509, 0xab066dc, 0x142e36bd, 0x1e4dea81, 0x1266060b, 0x2b89d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe248573927888805, 0x336e46461426db46, 0x9bd50342e36bd558, 0x4edc4ec998182fc} +#else +{0x42b9c93c44402, 0x461426db46e24, 0x6d7aab066dc8c, 0xbf26f540d0b8, 0x4f6e2764cc0c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x19b1, 0x1912, 0x1eb, 0x1cbc, 0x210, 0x17cf, 0x1b9e, 0x754, 0x38c, 0x816, 0x1431, 0x79a, 0xa57, 0x15ff, 0x756, 0xa60, 0x1064, 0x162f, 0x1e5e, 0x0} +#elif RADIX == 32 +{0x1e44b362, 0x10e5e07a, 0x13d7cf10, 0xe30ea9b, 0xa18a058, 0x1ea573cd, 0x180ead5f, 0x117c1914, 0xf2f5} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe210e5e07af2259b, 0x502c38c3aa6e7af9, 0x1d5abfea573cd50c, 0x5797ac5f064530} +#else +{0x72f03d7912cd, 0x43aa6e7af9e21, 0x679aa18a05871, 0x14c0756affa95, 0x2abcbd62f832} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffc3,0x1fbe,0xc7ef,0x56c4,0x2834,0xfa5c,0x36aa,0x1ced,0x9076,0xa31d,0x8890,0xe52,0x87d2,0xef68,0x98bc,0xc2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1fbeffc3,0x56c4c7ef,0xfa5c2834,0x1ced36aa,0xa31d9076,0xe528890,0xef6887d2,0xc298bc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x56c4c7ef1fbeffc3,0x1ced36aafa5c2834,0xe528890a31d9076,0xc298bcef6887d2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4098,0xd740,0xb5c6,0x8109,0x299,0x3a8c,0x81c2,0xc0d0,0xe848,0x9243,0x8996,0x656a,0x8c87,0x6c99,0xb9f5,0x4c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd7404098,0x8109b5c6,0x3a8c0299,0xc0d081c2,0x9243e848,0x656a8996,0x6c998c87,0x4cb9f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8109b5c6d7404098,0xc0d081c23a8c0299,0x656a89969243e848,0x4cb9f56c998c87}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x712b,0xfeed,0x55b5,0xc5fe,0xe867,0x77a9,0x1775,0x7814,0x4780,0x73b1,0x86b1,0x3973,0x797a,0x7f0b,0x1fa,0xb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfeed712b,0xc5fe55b5,0x77a9e867,0x78141775,0x73b14780,0x397386b1,0x7f0b797a,0xb001fa}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5fe55b5feed712b,0x7814177577a9e867,0x397386b173b14780,0xb001fa7f0b797a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d,0xe041,0x3810,0xa93b,0xd7cb,0x5a3,0xc955,0xe312,0x6f89,0x5ce2,0x776f,0xf1ad,0x782d,0x1097,0x6743,0x3d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe041003d,0xa93b3810,0x5a3d7cb,0xe312c955,0x5ce26f89,0xf1ad776f,0x1097782d,0x3d6743}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa93b3810e041003d,0xe312c95505a3d7cb,0xf1ad776f5ce26f89,0x3d67431097782d}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d2b,0x1bd6,0xcc3f,0x7e74,0x4fea,0xfba0,0x9f84,0xd6d4,0x42a1,0x88d1,0x68b1,0x4f4e,0x13ec,0xa60c,0xb13b,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1bd65d2b,0x7e74cc3f,0xfba04fea,0xd6d49f84,0x88d142a1,0x4f4e68b1,0xa60c13ec,0x2eb13b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e74cc3f1bd65d2b,0xd6d49f84fba04fea,0x4f4e68b188d142a1,0x2eb13ba60c13ec}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b4f,0x9448,0xaa16,0x649a,0xe4b4,0x3bc2,0xd3fd,0x8df1,0x931e,0x4078,0x8caa,0xe896,0xdeec,0xbed5,0x166e,0x7c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x94487b4f,0x649aaa16,0x3bc2e4b4,0x8df1d3fd,0x4078931e,0xe8968caa,0xbed5deec,0x7c166e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x649aaa1694487b4f,0x8df1d3fd3bc2e4b4,0xe8968caa4078931e,0x7c166ebed5deec}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x101d,0x51aa,0xd32d,0x2b40,0x7ba,0xc5f8,0x257a,0xb323,0x9bde,0x20c5,0xdc8f,0x2c3d,0x4e7b,0x54a6,0x17b9,0x99}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x51aa101d,0x2b40d32d,0xc5f807ba,0xb323257a,0x20c59bde,0x2c3ddc8f,0x54a64e7b,0x9917b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2b40d32d51aa101d,0xb323257ac5f807ba,0x2c3ddc8f20c59bde,0x9917b954a64e7b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa2d5,0xe429,0x33c0,0x818b,0xb015,0x45f,0x607b,0x292b,0xbd5e,0x772e,0x974e,0xb0b1,0xec13,0x59f3,0x4ec4,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe429a2d5,0x818b33c0,0x45fb015,0x292b607b,0x772ebd5e,0xb0b1974e,0x59f3ec13,0xd14ec4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x818b33c0e429a2d5,0x292b607b045fb015,0xb0b1974e772ebd5e,0xd14ec459f3ec13}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0xa72, 0x186f, 0x81c, 0x105c, 0x151, 0x1451, 0x1b96, 0x4d1, 0x12be, 0x3bf, 0x996, 0x1130, 0x1460, 0x1ed8, 0xc2a, 0x1c23, 0x1ee, 0x3f4, 0xbe2, 0x9} +#elif RADIX == 32 +{0x61bd4e5, 0x1182e207, 0x12d4510a, 0xaf89a3b, 0x4cb0efe, 0x11460898, 0x8d855ed, 0x1fa07bb8, 0x45f10} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x215182e20730dea7, 0x877f2be268ee5a8a, 0xb0abdb1460898265, 0xeaf887e81eee11} +#else +{0xc17103986f53, 0x6268ee5a8a215, 0x11304cb0efe57, 0x3846c2af6c518, 0x2f57c43f40f7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1c36, 0x61b, 0x207, 0xc17, 0x854, 0x1514, 0xee5, 0x1134, 0x1caf, 0x10ef, 0x265, 0x44c, 0x518, 0x17b6, 0x1b0a, 0x1708, 0x7b, 0x10fd, 0xaf8, 0x3} +#elif RADIX == 32 +{0x1986f86c, 0x1460b881, 0x1cb51442, 0x12be268e, 0x132c3bf, 0xc518226, 0x236157b, 0x7e81eee, 0x357c4} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x885460b881cc37c3, 0x61dfcaf89a3b96a2, 0x6c2af6c518226099, 0x1fabe21fa07bb84} +#else +{0x2305c40e61be1, 0x789a3b96a2885, 0x44c132c3bf95, 0x6e11b0abdb146, 0x37d5f10fd03d} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1c07, 0x15d6, 0x526, 0xde7, 0x149b, 0x719, 0x1786, 0x1272, 0x18b, 0x1bac, 0xf74, 0x1588, 0xe6f, 0x24c, 0x1204, 0x1e9d, 0x13bb, 0x1ccb, 0x78d, 0x9} +#elif RADIX == 32 +{0x1575b80f, 0x1b6f3949, 0x10c719a4, 0x62e4e57, 0x7ba6eb0, 0x18e6fac4, 0x7640824, 0x65ceefd, 0x43c6f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x349b6f3949abadc0, 0x375818b9395e18e3, 0xc810498e6fac43dd, 0x279e379973bbf4e} +#else +{0x5b79ca4d5d6e0, 0x39395e18e3349, 0x75887ba6eb031, 0x7d3b20412639b, 0x13cf1bccb9dd} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xddf, 0x238, 0xe4b, 0x1958, 0xe6e, 0x1059, 0x133, 0x1e11, 0x5ae, 0x2ab, 0x1044, 0xdd, 0xe9d, 0x1aa8, 0x15e2, 0xc9b, 0xaa6, 0x3c8, 0x10ac, 0x0} +#elif RADIX == 32 +{0x188e1bbe, 0xecac392, 0x6705973, 0x16bbc221, 0x18220aac, 0x10e9d06e, 0x6ebc5aa, 0x1e42a999, 0x8560} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2e6ecac392c470dd, 0x5565aef0884ce0b, 0xd78b550e9d06ec11, 0x4b42b0790aa664d} +#else +{0x76561c962386e, 0x6f0884ce0b2e6, 0x20dd8220aacb5, 0x19375e2d543a7, 0x4da1583c8553} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x192, 0x1c6d, 0x18a4, 0x152, 0x1aa9, 0xec4, 0x1be8, 0x1209, 0x7f, 0x797, 0x1295, 0x1433, 0x1a75, 0x15a, 0x1d64, 0x146c, 0x12df, 0x10af, 0x188f, 0x1} +#elif RADIX == 32 +{0x71b4324, 0x90a9629, 0x1d0ec4d5, 0x1fe413b, 0x194a9e5c, 0x15a75a19, 0x1b3ac815, 0x57cb7e8, 0x1c47c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9aa90a962938da19, 0x4f2e07f904efa1d8, 0x75902b5a75a19ca5, 0xae23e15f2dfa36} +#else +{0x4854b149c6d0c, 0x7904efa1d89aa, 0x343394a9e5c0f, 0x68d9d640ad69d, 0x2d711f0af96f} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x129c, 0xe1d, 0x1bd3, 0xf2a, 0x937, 0xf81, 0xa47, 0x186b, 0x1bbe, 0x1c6d, 0x1edd, 0x1b51, 0xa10, 0x167a, 0x1f0b, 0x374, 0x720, 0x1547, 0x726, 0x1} +#elif RADIX == 32 +{0x1b876538, 0x177956f4, 0x8ef8149, 0xefb0d6a, 0x1f6ef1b7, 0x14a10da8, 0x1d3e1767, 0xa39c806, 0x13935} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x29377956f4dc3b29, 0x78dbbbec35a91df0, 0x7c2ecf4a10da8fb7, 0x3c9c9aa8e7201ba} +#else +{0x3bcab7a6e1d94, 0x6c35a91df0293, 0x1b51f6ef1b777, 0x6e9f0bb3d284, 0x464e4d547390} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x12cc, 0x495, 0x1a14, 0x1db0, 0xb66, 0x76a, 0x1a77, 0xaf6, 0x1656, 0x1ad7, 0xb35, 0x4b1, 0xffa, 0x37b, 0xabf, 0xa5c, 0xdc9, 0x1a74, 0x11c9, 0x8} +#elif RADIX == 32 +{0x1256599, 0x6ed8685, 0xee76a5b, 0x19595eda, 0x159aeb5e, 0x16ffa258, 0x17157e37, 0x13a37254, 0x38e4e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x4b66ed8685092b2c, 0x75af65657b69dced, 0x2afc6f6ffa258acd, 0x4047274e8dc952e} +#else +{0x376c342849596, 0x657b69dced4b6, 0x44b159aeb5eca, 0x54b8abf1bdbfe, 0x202393a746e4} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1379, 0x125e, 0x1c56, 0x1811, 0x144, 0x2a8, 0xbb3, 0x2ca, 0x6d2, 0x565, 0x91e, 0x1280, 0x1b4f, 0x51a, 0x1eb7, 0x35a, 0x14fe, 0x1b59, 0x182e, 0x2} +#elif RADIX == 32 +{0x1497a6f2, 0x4c08f15, 0x1662a80a, 0x1b48594b, 0x48f1594, 0x15b4f940, 0x16bd6e51, 0x1acd3f86, 0x2c176} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x144c08f15a4bd37, 0x8aca6d21652ecc55, 0x7adca35b4f940247, 0x2e60bb6b34fe1ad} +#else +{0x260478ad25e9b, 0x21652ecc55014, 0x728048f1594da, 0x6b5eb728d6d3, 0x3f305db59a7f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7363,0xbe7a,0xc901,0xb6e0,0x6a56,0x779d,0xbc42,0xd659,0x3476,0x3868,0x12f4,0x923a,0x6fa8,0x5412,0xd5f9,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe7a7363,0xb6e0c901,0x779d6a56,0xd659bc42,0x38683476,0x923a12f4,0x54126fa8,0x3d5f9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6e0c901be7a7363,0xd659bc42779d6a56,0x923a12f438683476,0x3d5f954126fa8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xedb4,0x4fd4,0x5c14,0x14b,0xf702,0xd6be,0x9c11,0x4bb,0x9f10,0xde25,0xb159,0x5085,0xb0a9,0x6f42,0xc4d3,0x1d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4fd4edb4,0x14b5c14,0xd6bef702,0x4bb9c11,0xde259f10,0x5085b159,0x6f42b0a9,0x1dc4d3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x14b5c144fd4edb4,0x4bb9c11d6bef702,0x5085b159de259f10,0x1dc4d36f42b0a9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe873,0x4974,0xc7ed,0x6b01,0xaffb,0xf3d4,0xc641,0x20d6,0xca22,0x2d69,0x9f01,0x451e,0xfa05,0xef65,0xb43b,0xde}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4974e873,0x6b01c7ed,0xf3d4affb,0x20d6c641,0x2d69ca22,0x451e9f01,0xef65fa05,0xdeb43b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6b01c7ed4974e873,0x20d6c641f3d4affb,0x451e9f012d69ca22,0xdeb43bef65fa05}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c9d,0x4185,0x36fe,0x491f,0x95a9,0x8862,0x43bd,0x29a6,0xcb89,0xc797,0xed0b,0x6dc5,0x9057,0xabed,0x2a06,0xfc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41858c9d,0x491f36fe,0x886295a9,0x29a643bd,0xc797cb89,0x6dc5ed0b,0xabed9057,0xfc2a06}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x491f36fe41858c9d,0x29a643bd886295a9,0x6dc5ed0bc797cb89,0xfc2a06abed9057}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca5b,0x1036,0x34a6,0x490c,0xc0ed,0x771b,0x1590,0x1c17,0x4855,0x977e,0x8054,0xdb98,0xb26f,0x1175,0x7722,0xfe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1036ca5b,0x490c34a6,0x771bc0ed,0x1c171590,0x977e4855,0xdb988054,0x1175b26f,0xfe7722}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x490c34a61036ca5b,0x1c171590771bc0ed,0xdb988054977e4855,0xfe77221175b26f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf543,0x821c,0xae0a,0xb0cb,0x642d,0x5a80,0xd2bf,0x2340,0xc8f,0xe1ce,0x4e38,0xdace,0x3445,0x807e,0x9bc4,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x821cf543,0xb0cbae0a,0x5a80642d,0x2340d2bf,0xe1ce0c8f,0xdace4e38,0x807e3445,0x59bc4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb0cbae0a821cf543,0x2340d2bf5a80642d,0xdace4e38e1ce0c8f,0x59bc4807e3445}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e85,0xc3dc,0xfd4,0x39a7,0x5158,0x777b,0xb83,0xb0fe,0x55de,0x45b3,0x103f,0x53dc,0x27e2,0xb6cb,0x2b18,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc3dc6e85,0x39a70fd4,0x777b5158,0xb0fe0b83,0x45b355de,0x53dc103f,0xb6cb27e2,0x12b18}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x39a70fd4c3dc6e85,0xb0fe0b83777b5158,0x53dc103f45b355de,0x12b18b6cb27e2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35a5,0xefc9,0xcb59,0xb6f3,0x3f12,0x88e4,0xea6f,0xe3e8,0xb7aa,0x6881,0x7fab,0x2467,0x4d90,0xee8a,0x88dd,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xefc935a5,0xb6f3cb59,0x88e43f12,0xe3e8ea6f,0x6881b7aa,0x24677fab,0xee8a4d90,0x188dd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f3cb59efc935a5,0xe3e8ea6f88e43f12,0x24677fab6881b7aa,0x188ddee8a4d90}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x102e, 0x4c4, 0x1586, 0x1184, 0xa12, 0x9ce, 0x1866, 0x433, 0x5ef, 0x82a, 0x13a5, 0xbc6, 0x591, 0x175b, 0x10bc, 0xfa5, 0x109d, 0x8d4, 0x1325, 0x9} +#elif RADIX == 32 +{0x1131205d, 0x128c2561, 0xcc9ce50, 0x17bc8678, 0x9d2a0a8, 0x165915e3, 0x9617975, 0x6a4275f, 0x4992a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xca128c2561898902, 0x50545ef219e19939, 0xc2f2eb65915e34e9, 0x4acc951a909d7d2} +#else +{0x14612b0c4c481, 0x7219e19939ca1, 0x2bc69d2a0a8bd, 0x5f4b0bcbad964, 0x25664a8d484e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x5a5, 0x1131, 0x561, 0x1461, 0x1284, 0x1273, 0x1e19, 0x190c, 0x117b, 0xa0a, 0x14e9, 0xaf1, 0x1964, 0x5d6, 0xc2f, 0xbe9, 0x427, 0xa35, 0xcc9, 0x3} +#elif RADIX == 32 +{0xc4c4b4a, 0x4a30958, 0x3327394, 0x5ef219e, 0x1a74a82a, 0xd964578, 0x1a585e5d, 0x11a909d7, 0x3664a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x7284a3095862625a, 0x541517bc8678664e, 0xb0bcbad964578d3a, 0x1ab32546a4275f4} +#else +{0x25184ac31312d, 0x3c8678664e728, 0xaf1a74a82a2f, 0x57d2c2f2eb659, 0xd5992a35213} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1b4a, 0xf6a, 0xadd, 0x302, 0x196b, 0x366, 0x1399, 0xe83, 0x1540, 0xcd, 0x169d, 0x1007, 0xfe6, 0x1fd2, 0xebb, 0x808, 0x1725, 0x1c1e, 0x1009, 0x8} +#elif RADIX == 32 +{0xbdab695, 0xb1812b7, 0x132366cb, 0x1501d073, 0x1b4e8336, 0x4fe6803, 0x21d77fd, 0xf5c950, 0x3804f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd96b1812b75ed5b4, 0x419b540741ce646c, 0x3aeffa4fe6803da7, 0x36402783d725404} +#else +{0x58c095baf6ada, 0x741ce646cd96, 0x5007b4e8336a8, 0x5010ebbfe93f9, 0x1b2013c1eb92} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x122a, 0x94e, 0x1927, 0x1701, 0x58e, 0x79, 0x134e, 0xecc, 0xa0f, 0x7be, 0xc39, 0xfb2, 0x1df0, 0x79a, 0x154a, 0x1a4a, 0x23f, 0x3de, 0x1be1, 0x9} +#elif RADIX == 32 +{0x1a53a455, 0xeb80e49, 0x9c0792c, 0x83dd993, 0x61c9ef9, 0x15df07d9, 0x12aa9479, 0x1ef08ff4, 0x4df08} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x258eb80e49d29d22, 0x4f7ca0f7664d380f, 0x5528f35df07d930e, 0x36ef847bc23fd25} +#else +{0x75c0724e94e91, 0x77664d380f258, 0xfb261c9ef941, 0x749554a3cd77c, 0x1b77c23de11f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1943, 0x2e1, 0x677, 0x614, 0x19e, 0x11e6, 0xde2, 0x104d, 0x551, 0x1455, 0x1d7e, 0xdd, 0x15e0, 0x14c5, 0xeeb, 0x14b5, 0x168f, 0x1a03, 0xa9d, 0x4} +#elif RADIX == 32 +{0x18b87286, 0x1e30a19d, 0x1c51e60c, 0x154609ad, 0x1ebf5154, 0xb5e006e, 0xd5dd74c, 0x101da3e9, 0x454ee} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc19e30a19dc5c394, 0xa8aa551826b78a3c, 0xbbae98b5e006ef5f, 0x112a7740768fa5a} +#else +{0x71850cee2e1ca, 0x1826b78a3cc19, 0xddebf5154aa, 0x696aeeba62d78, 0x8953ba03b47} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x512, 0xda9, 0x31a, 0x1711, 0x1b65, 0x9f0, 0xe54, 0x1d4a, 0xe1c, 0xc90, 0x1837, 0x1728, 0x15fa, 0xa40, 0xf21, 0x1b43, 0x1716, 0x1277, 0x11a8, 0x9} +#elif RADIX == 32 +{0x136a4a25, 0x5b888c6, 0xa89f0db, 0x1873a94e, 0xc1bb241, 0x15fab94, 0x10de42a4, 0x13bdc5b6, 0x48d44} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1b65b888c69b5251, 0xd920e1cea539513e, 0xbc854815fab9460d, 0xec6a24ef716da1} +#else +{0x2dc44634da928, 0x4ea539513e1b6, 0x5728c1bb241c3, 0x3686f2152057e, 0x2f6351277b8b} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x822, 0x1a13, 0x11d, 0x10e0, 0x2b9, 0x1d20, 0x19f9, 0x1dc2, 0x1770, 0x135e, 0x1c13, 0x1cba, 0x14df, 0x5c8, 0x1f31, 0x215, 0x16ed, 0x1f7a, 0xc6c, 0x5} +#elif RADIX == 32 +{0xe84d045, 0x19870047, 0x1f3d2015, 0x1dc3b859, 0xe09cd7a, 0x114dfe5d, 0x57e625c, 0x1bd5bb44, 0x6367} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2b9870047742682, 0xe6bd770ee167e7a4, 0xfcc4b914dfe5d704, 0xcb1b3ef56ed10a} +#else +{0x4c38023ba1341, 0xee167e7a402b, 0x7cbae09cd7aee, 0x442bf312e4537, 0x658d9f7ab76} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x2cc, 0xd50, 0xeda, 0x1c3c, 0x8a6, 0x1659, 0xffb, 0x1cee, 0x1f14, 0x17fe, 0x1860, 0x427, 0x132c, 0x5c0, 0xb9f, 0x143d, 0x639, 0x19f0, 0x1551, 0x7} +#elif RADIX == 32 +{0x13540599, 0x6e1e3b6, 0x1f765945, 0x1c539dcf, 0x1c305ffb, 0x132c213, 0xf573e5c, 0xf818e68, 0x2aa8e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x28a6e1e3b69aa02c, 0x2ffdf14e773feecb, 0xae7cb8132c213e18, 0x3fd5473e0639a1e} +#else +{0x370f1db4d5016, 0x4e773feecb28a, 0x427c305ffbe2, 0x687ab9f2e04cb, 0x1feaa39f031c} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2417,0x1b00,0xcfe,0x8960,0x662e,0x42d2,0xc00f,0x222c,0x7671,0x278b,0x863f,0xbcac,0xdb9c,0x6e5e,0x4c5a,0x1b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b002417,0x89600cfe,0x42d2662e,0x222cc00f,0x278b7671,0xbcac863f,0x6e5edb9c,0x1b4c5a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x89600cfe1b002417,0x222cc00f42d2662e,0xbcac863f278b7671,0x1b4c5a6e5edb9c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x21e8,0xd92b,0x5a2d,0xef86,0xf492,0x1483,0x8ae0,0x6b37,0x7f78,0x7b90,0x69c5,0xf4ec,0x2fb9,0x1660,0x8296,0xf8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd92b21e8,0xef865a2d,0x1483f492,0x6b378ae0,0x7b907f78,0xf4ec69c5,0x16602fb9,0xf88296}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xef865a2dd92b21e8,0x6b378ae01483f492,0xf4ec69c57b907f78,0xf8829616602fb9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x38ff,0x5dc5,0x9aea,0xbc0e,0xbea5,0x775d,0x447b,0xc311,0xf01c,0xb63a,0x15fd,0x162a,0xab76,0x9def,0x2a0d,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5dc538ff,0xbc0e9aea,0x775dbea5,0xc311447b,0xb63af01c,0x162a15fd,0x9defab76,0xc52a0d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbc0e9aea5dc538ff,0xc311447b775dbea5,0x162a15fdb63af01c,0xc52a0d9defab76}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdbe9,0xe4ff,0xf301,0x769f,0x99d1,0xbd2d,0x3ff0,0xddd3,0x898e,0xd874,0x79c0,0x4353,0x2463,0x91a1,0xb3a5,0xe4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4ffdbe9,0x769ff301,0xbd2d99d1,0xddd33ff0,0xd874898e,0x435379c0,0x91a12463,0xe4b3a5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x769ff301e4ffdbe9,0xddd33ff0bd2d99d1,0x435379c0d874898e,0xe4b3a591a12463}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x20f0,0x2693,0xacbf,0x731a,0xb0f3,0xd8ce,0x1bcd,0xf836,0x8469,0x44d5,0xd604,0xd3aa,0x4aa8,0xcdc3,0x9086,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x269320f0,0x731aacbf,0xd8ceb0f3,0xf8361bcd,0x44d58469,0xd3aad604,0xcdc34aa8,0x3f9086}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x731aacbf269320f0,0xf8361bcdd8ceb0f3,0xd3aad60444d58469,0x3f9086cdc34aa8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcc11,0xe55a,0x932f,0x9534,0x2895,0xaf43,0x2956,0x614f,0x4e84,0xe4b2,0x60c6,0x255,0xbb14,0xd70d,0xc61e,0x13}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe55acc11,0x9534932f,0xaf432895,0x614f2956,0xe4b24e84,0x25560c6,0xd70dbb14,0x13c61e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9534932fe55acc11,0x614f2956af432895,0x25560c6e4b24e84,0x13c61ed70dbb14}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x28d6,0x450d,0xd24f,0x54e4,0x6e67,0x81d,0x9b71,0xadbe,0x1088,0x6148,0x4ebf,0x4b68,0x829e,0x65c8,0xe1a6,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450d28d6,0x54e4d24f,0x81d6e67,0xadbe9b71,0x61481088,0x4b684ebf,0x65c8829e,0xe5e1a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x54e4d24f450d28d6,0xadbe9b71081d6e67,0x4b684ebf61481088,0xe5e1a665c8829e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf10,0xd96c,0x5340,0x8ce5,0x4f0c,0x2731,0xe432,0x7c9,0x7b96,0xbb2a,0x29fb,0x2c55,0xb557,0x323c,0x6f79,0xc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd96cdf10,0x8ce55340,0x27314f0c,0x7c9e432,0xbb2a7b96,0x2c5529fb,0x323cb557,0xc06f79}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8ce55340d96cdf10,0x7c9e43227314f0c,0x2c5529fbbb2a7b96,0xc06f79323cb557}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x6b9, 0xd4c, 0x1d8d, 0x1f99, 0x1be4, 0x1f53, 0x5c1, 0x1937, 0x9c, 0xe68, 0x61e, 0x14e8, 0x352, 0x3d6, 0x174, 0x133, 0x18a6, 0x1b19, 0x94b, 0x9} +#elif RADIX == 32 +{0xb530d73, 0x4fccf63, 0x183f53df, 0x27326e5, 0x30f39a0, 0xc352a74, 0xcc2e83d, 0x18ce2982, 0x44a5e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x7be4fccf635a986b, 0x9cd009cc9b9707ea, 0x85d07ac352a74187, 0x31a52f6338a6099} +#else +{0x27e67b1ad4c35, 0x4c9b9707ea7be, 0x54e830f39a013, 0x2661741eb0d4, 0x40d297b19c53} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x348, 0xb53, 0xf63, 0x7e6, 0x1ef9, 0xfd4, 0x1970, 0x64d, 0x27, 0x139a, 0x187, 0x153a, 0x10d4, 0xf5, 0x185d, 0x104c, 0xe29, 0x1ec6, 0x1a52, 0x0} +#elif RADIX == 32 +{0x1ad4c690, 0x193f33d8, 0xe0fd4f7, 0x9cc9b9, 0xc3ce68, 0xb0d4a9d, 0x1330ba0f, 0x16338a60, 0xd297} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9ef93f33d8d6a634, 0xe734027326e5c1fa, 0x61741eb0d4a9d061, 0x28694bd8ce29826} +#else +{0x49f99ec6b531a, 0x7326e5c1fa9ef, 0x153a0c3ce6804, 0x609985d07ac35, 0x1434a5ec6714} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x18af, 0xb6e, 0x124d, 0xa49, 0xa8c, 0x11f5, 0xea9, 0x298, 0xa55, 0x1738, 0xb61, 0x2b9, 0x8a, 0x167a, 0x17e6, 0x2b0, 0x1290, 0x16ad, 0x1505, 0x2} +#elif RADIX == 32 +{0xadbb15e, 0xc524c93, 0x1531f554, 0x954530e, 0x15b0dce1, 0x1408a15c, 0xc2fcd67, 0x156ca405, 0x2a82d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xaa8c524c9356dd8a, 0x6e70a5514c3aa63e, 0x5f9acf408a15cad8, 0x4c5416d5b290158} +#else +{0x6292649ab6ec5, 0x514c3aa63eaa8, 0x42b95b0dce14a, 0x5617e6b3d022, 0x262a0b6ad948} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1390, 0x1895, 0x9b7, 0xa5a, 0x1030, 0x16c1, 0xd21, 0x1053, 0x327, 0x1a4c, 0x1a22, 0x11e4, 0x16ba, 0x13a1, 0x1dbc, 0x1aac, 0x148c, 0x5c8, 0x15d2, 0x0} +#elif RADIX == 32 +{0x1e256720, 0x1052d26d, 0x436c181, 0xc9e0a6d, 0xd116930, 0x36ba8f2, 0xb3b793a, 0xe452335, 0xae91} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x303052d26df12b39, 0xb498327829b486d8, 0x76f27436ba8f2688, 0x5748b9148cd56} +#else +{0x296936f8959c, 0x7829b486d8303, 0x51e4d11693064, 0x3559dbc9d0dae, 0x282ba45c8a46} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1be6, 0x11b3, 0x14ba, 0xf43, 0x1bd1, 0x215, 0x1e9a, 0x137a, 0x7b2, 0x15, 0x126, 0x148, 0x1c2b, 0x1b70, 0xf1c, 0x1e48, 0x1259, 0x188a, 0x1e44, 0x7} +#elif RADIX == 32 +{0x146cf7cd, 0x117a1d2e, 0x134215de, 0x1eca6f5e, 0x930054, 0x1c2b0a4, 0x121e39b7, 0x454967c, 0x2f226} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbbd17a1d2ea367be, 0x802a7b29bd7a6842, 0x3c736e1c2b0a4049, 0x21f913115259f24} +#else +{0xbd0e9751b3df, 0x29bd7a6842bbd, 0x61480930054f6, 0x7c90f1cdb870a, 0x10fc8988a92c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x4c5, 0x37e, 0xafa, 0x1b90, 0x13d, 0x8d3, 0xaa7, 0x489, 0x1d4a, 0x17bc, 0x168, 0x37f, 0x1ed6, 0x666, 0x1889, 0x1a4e, 0xa57, 0xeb7, 0xd37, 0x7} +#elif RADIX == 32 +{0x10df898b, 0x1ddc82be, 0x14e8d309, 0x1528912a, 0x10b45ef3, 0xded61bf, 0x13b11266, 0x15ba95f4, 0x269bb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x613ddc82be86fc4c, 0x2f79d4a244aa9d1a, 0x6224ccded61bf85a, 0x1cb4ddd6ea57d27} +#else +{0x6ee415f437e26, 0x2244aa9d1a613, 0x437f0b45ef3a9, 0x749d8893337b5, 0xe5a6eeb752b} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x447, 0x1b87, 0x1cf0, 0x155, 0xb1, 0x804, 0x97a, 0x64a, 0x886, 0x3a3, 0x126f, 0x1553, 0x74d, 0xde9, 0x941, 0x39c, 0x8f, 0x1bbb, 0xf3, 0x1} +#elif RADIX == 32 +{0x6e1c88e, 0x110aaf3c, 0xf480405, 0x218c949, 0x19378e8d, 0x1274daa9, 0x71282de, 0x1dd823c7, 0x1079e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x80b10aaf3c370e44, 0xc74688632525e900, 0x2505bd274daa9c9b, 0x2383cf77608f1ce} +#else +{0x85579e1b8722, 0x632525e90080b, 0x35539378e8d10, 0x47389416f49d3, 0x11c1e7bbb047} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xf9d, 0x552, 0x797, 0x19fc, 0x166, 0x7a8, 0x1ee5, 0xc77, 0x1ee7, 0x15ef, 0x340, 0x10df, 0x1d5f, 0x170, 0xf2, 0x123, 0x1bb1, 0xd23, 0x3fc, 0x6} +#elif RADIX == 32 +{0x19549f3b, 0x6cfe1e5, 0x1ca7a80b, 0x1b9d8efe, 0x11a057bf, 0x1d5f86f, 0x8c1e417, 0x91eec42, 0x11fe3} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x166cfe1e5caa4f9, 0x2bdfee763bfb94f5, 0x83c82e1d5f86f8d0, 0x440ff1a47bb1091} +#else +{0x367f0f2e5527c, 0x763bfb94f5016, 0x70df1a057bfdc, 0x42460f20b8757, 0x4a07f8d23dd8} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x94df,0x6dc7,0xcd7f,0xebb2,0xb290,0x811d,0x2825,0xc88,0xd514,0x959a,0x7d64,0xc8c3,0x16a9,0x106a,0x1eea,0x32}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6dc794df,0xebb2cd7f,0x811db290,0xc882825,0x959ad514,0xc8c37d64,0x106a16a9,0x321eea}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xebb2cd7f6dc794df,0xc882825811db290,0xc8c37d64959ad514,0x321eea106a16a9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe08c,0xe778,0x1464,0x19fe,0xef25,0x1d24,0xa98f,0x4af0,0x70d3,0x8e4d,0x2b82,0x95ea,0x3277,0xc267,0x1695,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe778e08c,0x19fe1464,0x1d24ef25,0x4af0a98f,0x8e4d70d3,0x95ea2b82,0xc2673277,0xf1695}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19fe1464e778e08c,0x4af0a98f1d24ef25,0x95ea2b828e4d70d3,0xf1695c2673277}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1df,0xb6e1,0xe2a4,0x4bc9,0xdc85,0x6365,0x3fca,0x9a38,0xee2,0xed03,0xca7f,0x1984,0xe709,0x1efe,0xc173,0x8b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6e1f1df,0x4bc9e2a4,0x6365dc85,0x9a383fca,0xed030ee2,0x1984ca7f,0x1efee709,0x8bc173}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4bc9e2a4b6e1f1df,0x9a383fca6365dc85,0x1984ca7fed030ee2,0x8bc1731efee709}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b21,0x9238,0x3280,0x144d,0x4d6f,0x7ee2,0xd7da,0xf377,0x2aeb,0x6a65,0x829b,0x373c,0xe956,0xef95,0xe115,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92386b21,0x144d3280,0x7ee24d6f,0xf377d7da,0x6a652aeb,0x373c829b,0xef95e956,0xcde115}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x144d328092386b21,0xf377d7da7ee24d6f,0x373c829b6a652aeb,0xcde115ef95e956}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xf187,0x9a31,0x1ee,0x193b,0xeec2,0xbfed,0x9418,0x15b6,0xe9a,0x4c74,0xae85,0x3ebe,0x2677,0x3f12,0x42}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf187d647,0x1ee9a31,0xeec2193b,0x9418bfed,0xe9a15b6,0xae854c74,0x26773ebe,0x423f12}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1ee9a31f187d647,0x9418bfedeec2193b,0xae854c740e9a15b6,0x423f1226773ebe}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x68ff,0x99be,0x416c,0x7bbf,0xd44f,0x609f,0x7682,0xa8ff,0xa6bb,0xec03,0x8e77,0xc076,0x7873,0x9676,0xa152,0xf5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x99be68ff,0x7bbf416c,0x609fd44f,0xa8ff7682,0xec03a6bb,0xc0768e77,0x96767873,0xf5a152}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bbf416c99be68ff,0xa8ff7682609fd44f,0xc0768e77ec03a6bb,0xf5a15296767873}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3739,0xf7da,0xbd23,0xa38e,0x8cf9,0x7690,0x6b0e,0x1a7,0x77f0,0xa2bd,0x5ac7,0x5101,0x3aae,0xa922,0x2d3a,0x95}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7da3739,0xa38ebd23,0x76908cf9,0x1a76b0e,0xa2bd77f0,0x51015ac7,0xa9223aae,0x952d3a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa38ebd23f7da3739,0x1a76b0e76908cf9,0x51015ac7a2bd77f0,0x952d3aa9223aae}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x29b9,0xe78,0x65ce,0xfe11,0xe6c4,0x113d,0x4012,0x6be7,0xea49,0xf165,0xb38b,0x517a,0xc141,0xd988,0xc0ed,0xbd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7829b9,0xfe1165ce,0x113de6c4,0x6be74012,0xf165ea49,0x517ab38b,0xd988c141,0xbdc0ed}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe1165ce0e7829b9,0x6be74012113de6c4,0x517ab38bf165ea49,0xbdc0edd988c141}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x1068, 0xf2c, 0x1ada, 0x1f58, 0x1343, 0x5cc, 0x90, 0x1bba, 0x50b, 0x1a35, 0x35f, 0x1388, 0x5ce, 0xc4f, 0x1462, 0x191f, 0x665, 0x843, 0x1cb1, 0x1} +#elif RADIX == 32 +{0x13cb20d0, 0x3fac6b6, 0x1205cc9a, 0x142f7740, 0x1afe8d4, 0x1e5ce9c4, 0x7e8c4c4, 0x2199972, 0x1e58a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9343fac6b69e5906, 0xf46a50bddd0240b9, 0xd18989e5ce9c40d7, 0x28f2c5086665c8f} +#else +{0x1fd635b4f2c83, 0x3ddd0240b9934, 0x53881afe8d4a1, 0x723f462627973, 0x147962843332} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x5b3, 0x13cb, 0x6b6, 0x1fd6, 0x4d0, 0x173, 0x1024, 0x1eee, 0x942, 0x1e8d, 0xd7, 0x14e2, 0x1973, 0x1313, 0x1d18, 0xe47, 0x1999, 0xa10, 0xf2c, 0x6} +#elif RADIX == 32 +{0x14f2cb67, 0x10feb1ad, 0x4817326, 0x50bddd0, 0x6bfa35, 0x7973a71, 0x11fa3131, 0x1086665c, 0x17962} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x64d0feb1ada7965b, 0xfd1a942f7740902e, 0xf462627973a71035, 0x123cb1421999723} +#else +{0x7f58d6d3cb2d, 0x2f7740902e64d, 0x74e206bfa3528, 0x5c8fd18989e5c, 0x311e58a10ccc} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x14ba, 0xa50, 0x219, 0x1ca8, 0x1858, 0xe67, 0x1b19, 0xb09, 0x17fa, 0x89f, 0x10d7, 0x1a55, 0x14de, 0x1f37, 0x12f0, 0x1247, 0x1aa6, 0x109f, 0x493, 0x6} +#elif RADIX == 32 +{0xa942975, 0x18e54086, 0x32e67c2, 0x1fe9613b, 0x186ba27e, 0xf4ded2a, 0x11e5e1f3, 0x4fea9a4, 0x1249c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf858e5408654a14b, 0xd13f7fa584ec65cc, 0xcbc3e6f4ded2ac35, 0x35124e13faa6923} +#else +{0x472a0432a50a5, 0x2584ec65ccf85, 0x5a5586ba27eff, 0x248f2f0f9bd37, 0x42892709fd53} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1ba, 0xab8, 0x1ded, 0xdc9, 0xf40, 0xaa3, 0x169, 0x53c, 0x2, 0x848, 0x9a6, 0xbad, 0xb7e, 0x15dc, 0x87, 0x1cf3, 0x1791, 0x1af2, 0x1cdf, 0x7} +#elif RADIX == 32 +{0xaae0375, 0x6e4f7b, 0xd2aa37a, 0x8a781, 0x14d32120, 0x18b7e5d6, 0x1cc10f5d, 0x1795e479, 0x2e6fe} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6f406e4f7b55701b, 0x909000229e05a554, 0x821ebb8b7e5d6a69, 0x35f37f5e5791e79} +#else +{0x3727bdaab80d, 0x229e05a5546f4, 0x4bad4d3212000, 0x79e6087aee2df, 0x42f9bfaf2bc8} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x5b, 0xad0, 0x69, 0x1038, 0x18d2, 0x180d, 0x1871, 0x46b, 0x26b, 0x1ef2, 0xe46, 0x72d, 0xc0d, 0x15a4, 0x6d7, 0x221, 0x1611, 0x1a89, 0xd3f, 0x8} +#elif RADIX == 32 +{0xab400b7, 0x1281c01a, 0xe380dc6, 0x9ac8d78, 0x17237bc8, 0x8c0d396, 0x84daf5a, 0x144d8444, 0x369fe} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xb8d281c01a55a005, 0xbde426b235e1c701, 0x9b5eb48c0d396b91, 0x3b34ff513611110} +#else +{0x140e00d2ad002, 0x3235e1c701b8d, 0x272d7237bc84d, 0x44426d7ad2303, 0x459a7fa89b08} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1131, 0xac7, 0xa16, 0x918, 0x5d8, 0x1e64, 0x3e5, 0x142c, 0x1f89, 0x1cb7, 0xf96, 0x370, 0x4da, 0xf45, 0x1aa5, 0x1872, 0x1fc, 0xd83, 0x1145, 0x6} +#elif RADIX == 32 +{0x12b1e263, 0x1848c285, 0x1cbe642e, 0x1e268583, 0x7cb72df, 0xa4da1b8, 0x1cb54af4, 0xc187f30, 0x18a2b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x85d848c285958f13, 0xb96ff89a160f97cc, 0x6a95e8a4da1b83e5, 0x84515b061fcc39} +#else +{0x4246142cac789, 0x1a160f97cc85d, 0x43707cb72dff1, 0x30e5aa57a2936, 0x2c228ad830fe} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x7a4, 0x388, 0xd00, 0x66c, 0x1a9a, 0xabc, 0x97b, 0xadc, 0xaab, 0x1601, 0x287, 0xb2a, 0x1ab7, 0x1803, 0x1d06, 0x81c, 0x890, 0x11e0, 0x1e19, 0x0} +#elif RADIX == 32 +{0xe20f48, 0x1a336340, 0xf6abcd4, 0xaad5b89, 0x143d805, 0x7ab7595, 0x73a0d80, 0xf022410, 0xf0cc} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9a9a33634007107a, 0xec02aab56e25ed57, 0x741b007ab75950a1, 0x1478663c089040e} +#else +{0x519b1a003883d, 0x356e25ed579a9, 0x6b2a143d80555, 0x1039d06c01ead, 0xa3c331e0448} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1e68, 0xcde, 0x29, 0x1777, 0x1ef8, 0x1a1c, 0x204, 0x148, 0x14ba, 0x1c39, 0x175, 0x1263, 0x4de, 0x1032, 0x1649, 0x5a4, 0xad, 0xcfb, 0x870, 0x3} +#elif RADIX == 32 +{0xb37bcd0, 0x18bbb80a, 0x9a1cf7, 0x12e82902, 0x10baf0e6, 0x44de931, 0x92c9303, 0x7d82b4b, 0x34383} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9ef8bbb80a59bde6, 0x78734ba0a4081343, 0x59260644de93185d, 0x29a1c19f60ad2d2} +#else +{0x45ddc052cdef3, 0x20a40813439ef, 0x52630baf0e697, 0x4b49649819137, 0x14d0e0cfb056} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1975,0x2b02,0x86c,0x9cbe,0x7576,0xb1c3,0xd9a7,0x737e,0x4de1,0xa245,0x7652,0xf9bf,0x4bf8,0xdc2c,0xeaa1,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b021975,0x9cbe086c,0xb1c37576,0x737ed9a7,0xa2454de1,0xf9bf7652,0xdc2c4bf8,0x8eaa1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9cbe086c2b021975,0x737ed9a7b1c37576,0xf9bf7652a2454de1,0x8eaa1dc2c4bf8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee88,0x46bc,0x7177,0x337c,0x92b6,0x40dc,0xb657,0x3366,0x6c8a,0x2b98,0x40eb,0x1146,0xe116,0xb00a,0xa22f,0xe3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x46bcee88,0x337c7177,0x40dc92b6,0x3366b657,0x2b986c8a,0x114640eb,0xb00ae116,0xe3a22f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x337c717746bcee88,0x3366b65740dc92b6,0x114640eb2b986c8a,0xe3a22fb00ae116}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf28d,0x64d3,0xe248,0x40b9,0x5141,0x82bb,0x82ea,0xcf35,0xfaf0,0x3,0xd71f,0x6e88,0x7ac9,0xf4c9,0x6b9e,0xcc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x64d3f28d,0x40b9e248,0x82bb5141,0xcf3582ea,0x3faf0,0x6e88d71f,0xf4c97ac9,0xcc6b9e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x40b9e24864d3f28d,0xcf3582ea82bb5141,0x6e88d71f0003faf0,0xcc6b9ef4c97ac9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe68b,0xd4fd,0xf793,0x6341,0x8a89,0x4e3c,0x2658,0x8c81,0xb21e,0x5dba,0x89ad,0x640,0xb407,0x23d3,0x155e,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4fde68b,0x6341f793,0x4e3c8a89,0x8c812658,0x5dbab21e,0x64089ad,0x23d3b407,0xf7155e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6341f793d4fde68b,0x8c8126584e3c8a89,0x64089ad5dbab21e,0xf7155e23d3b407}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x84a0,0x8ad1,0xbcc4,0xc440,0x94e1,0x46ea,0x15c6,0x784e,0x190,0xd26f,0x630,0x2bee,0x74b1,0x93ce,0xe061,0x3c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8ad184a0,0xc440bcc4,0x46ea94e1,0x784e15c6,0xd26f0190,0x2bee0630,0x93ce74b1,0x3ce061}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc440bcc48ad184a0,0x784e15c646ea94e1,0x2bee0630d26f0190,0x3ce06193ce74b1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e2b,0xdafe,0xfa45,0xa69b,0xb77e,0xf670,0x927d,0xa0f9,0xccb5,0xc897,0x9607,0x5f22,0x47bf,0x867,0xf781,0xd9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdafe1e2b,0xa69bfa45,0xf670b77e,0xa0f9927d,0xc897ccb5,0x5f229607,0x86747bf,0xd9f781}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa69bfa45dafe1e2b,0xa0f9927df670b77e,0x5f229607c897ccb5,0xd9f781086747bf}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2aa2,0xbd3f,0x2ad,0x19bd,0xe6f0,0x3b95,0x3fff,0xd17e,0xf3a6,0x7888,0xda46,0x3b21,0xcc57,0x5301,0x3e50,0xc4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbd3f2aa2,0x19bd02ad,0x3b95e6f0,0xd17e3fff,0x7888f3a6,0x3b21da46,0x5301cc57,0xc43e50}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19bd02adbd3f2aa2,0xd17e3fff3b95e6f0,0x3b21da467888f3a6,0xc43e505301cc57}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b60,0x752e,0x433b,0x3bbf,0x6b1e,0xb915,0xea39,0x87b1,0xfe6f,0x2d90,0xf9cf,0xd411,0x8b4e,0x6c31,0x1f9e,0xc3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x752e7b60,0x3bbf433b,0xb9156b1e,0x87b1ea39,0x2d90fe6f,0xd411f9cf,0x6c318b4e,0xc31f9e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3bbf433b752e7b60,0x87b1ea39b9156b1e,0xd411f9cf2d90fe6f,0xc31f9e6c318b4e}}} +#endif +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.h new file mode 100644 index 0000000000..1cc782a5bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.h @@ -0,0 +1,31 @@ +#ifndef ENDOMORPHISM_ACTION_H +#define ENDOMORPHISM_ACTION_H +#include +#include +#include +/** Type for precomputed endomorphism rings applied to precomputed torsion bases. + * + * Precomputed by the precompute scripts. + * + * @typedef curve_with_endomorphism_ring_t + * + * @struct curve_with_endomorphism_ring + **/ +typedef struct curve_with_endomorphism_ring { + ec_curve_t curve; + ec_basis_t basis_even; + ibz_mat_2x2_t action_i, action_j, action_k; + ibz_mat_2x2_t action_gen2, action_gen3, action_gen4; +} curve_with_endomorphism_ring_t; +#define CURVE_E0 (CURVES_WITH_ENDOMORPHISMS->curve) +#define BASIS_EVEN (CURVES_WITH_ENDOMORPHISMS->basis_even) +#define ACTION_I (CURVES_WITH_ENDOMORPHISMS->action_i) +#define ACTION_J (CURVES_WITH_ENDOMORPHISMS->action_j) +#define ACTION_K (CURVES_WITH_ENDOMORPHISMS->action_k) +#define ACTION_GEN2 (CURVES_WITH_ENDOMORPHISMS->action_gen2) +#define ACTION_GEN3 (CURVES_WITH_ENDOMORPHISMS->action_gen3) +#define ACTION_GEN4 (CURVES_WITH_ENDOMORPHISMS->action_gen4) +#define NUM_ALTERNATE_STARTING_CURVES 6 +#define ALTERNATE_STARTING_CURVES (CURVES_WITH_ENDOMORPHISMS+1) +extern const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7]; +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.c new file mode 100644 index 0000000000..f2992d8c7f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: PD and Apache-2.0 + +/* FIPS202 implementation based on code from PQClean, + * which is in turn based based on the public domain implementation in + * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html + * by Ronny Van Keer + * and the public domain "TweetFips202" implementation + * from https://twitter.com/tweetfips202 + * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ + +#include +#include +#include +#include + +#include "fips202.h" + +#define NROUNDS 24 +#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) + +/************************************************* + * Name: load64 + * + * Description: Load 8 bytes into uint64_t in little-endian order + * + * Arguments: - const uint8_t *x: pointer to input byte array + * + * Returns the loaded 64-bit unsigned integer + **************************************************/ +static uint64_t load64(const uint8_t *x) { + uint64_t r = 0; + for (size_t i = 0; i < 8; ++i) { + r |= (uint64_t)x[i] << 8 * i; + } + + return r; +} + +/************************************************* + * Name: store64 + * + * Description: Store a 64-bit integer to a byte array in little-endian order + * + * Arguments: - uint8_t *x: pointer to the output byte array + * - uint64_t u: input 64-bit unsigned integer + **************************************************/ +static void store64(uint8_t *x, uint64_t u) { + for (size_t i = 0; i < 8; ++i) { + x[i] = (uint8_t) (u >> 8 * i); + } +} + +/* Keccak round constants */ +static const uint64_t KeccakF_RoundConstants[NROUNDS] = { + 0x0000000000000001ULL, 0x0000000000008082ULL, + 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, + 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, + 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, + 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, + 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, + 0x0000000080000001ULL, 0x8000000080008008ULL +}; + +/************************************************* + * Name: KeccakF1600_StatePermute + * + * Description: The Keccak F1600 Permutation + * + * Arguments: - uint64_t *state: pointer to input/output Keccak state + **************************************************/ +static void KeccakF1600_StatePermute(uint64_t *state) { + int round; + + uint64_t Aba, Abe, Abi, Abo, Abu; + uint64_t Aga, Age, Agi, Ago, Agu; + uint64_t Aka, Ake, Aki, Ako, Aku; + uint64_t Ama, Ame, Ami, Amo, Amu; + uint64_t Asa, Ase, Asi, Aso, Asu; + uint64_t BCa, BCe, BCi, BCo, BCu; + uint64_t Da, De, Di, Do, Du; + uint64_t Eba, Ebe, Ebi, Ebo, Ebu; + uint64_t Ega, Ege, Egi, Ego, Egu; + uint64_t Eka, Eke, Eki, Eko, Eku; + uint64_t Ema, Eme, Emi, Emo, Emu; + uint64_t Esa, Ese, Esi, Eso, Esu; + + // copyFromState(A, state) + Aba = state[0]; + Abe = state[1]; + Abi = state[2]; + Abo = state[3]; + Abu = state[4]; + Aga = state[5]; + Age = state[6]; + Agi = state[7]; + Ago = state[8]; + Agu = state[9]; + Aka = state[10]; + Ake = state[11]; + Aki = state[12]; + Ako = state[13]; + Aku = state[14]; + Ama = state[15]; + Ame = state[16]; + Ami = state[17]; + Amo = state[18]; + Amu = state[19]; + Asa = state[20]; + Ase = state[21]; + Asi = state[22]; + Aso = state[23]; + Asu = state[24]; + + for (round = 0; round < NROUNDS; round += 2) { + // prepareTheta + BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; + BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; + BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; + BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; + BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; + + // thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Aba ^= Da; + BCa = Aba; + Age ^= De; + BCe = ROL(Age, 44); + Aki ^= Di; + BCi = ROL(Aki, 43); + Amo ^= Do; + BCo = ROL(Amo, 21); + Asu ^= Du; + BCu = ROL(Asu, 14); + Eba = BCa ^ ((~BCe) & BCi); + Eba ^= KeccakF_RoundConstants[round]; + Ebe = BCe ^ ((~BCi) & BCo); + Ebi = BCi ^ ((~BCo) & BCu); + Ebo = BCo ^ ((~BCu) & BCa); + Ebu = BCu ^ ((~BCa) & BCe); + + Abo ^= Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka ^= Da; + BCi = ROL(Aka, 3); + Ame ^= De; + BCo = ROL(Ame, 45); + Asi ^= Di; + BCu = ROL(Asi, 61); + Ega = BCa ^ ((~BCe) & BCi); + Ege = BCe ^ ((~BCi) & BCo); + Egi = BCi ^ ((~BCo) & BCu); + Ego = BCo ^ ((~BCu) & BCa); + Egu = BCu ^ ((~BCa) & BCe); + + Abe ^= De; + BCa = ROL(Abe, 1); + Agi ^= Di; + BCe = ROL(Agi, 6); + Ako ^= Do; + BCi = ROL(Ako, 25); + Amu ^= Du; + BCo = ROL(Amu, 8); + Asa ^= Da; + BCu = ROL(Asa, 18); + Eka = BCa ^ ((~BCe) & BCi); + Eke = BCe ^ ((~BCi) & BCo); + Eki = BCi ^ ((~BCo) & BCu); + Eko = BCo ^ ((~BCu) & BCa); + Eku = BCu ^ ((~BCa) & BCe); + + Abu ^= Du; + BCa = ROL(Abu, 27); + Aga ^= Da; + BCe = ROL(Aga, 36); + Ake ^= De; + BCi = ROL(Ake, 10); + Ami ^= Di; + BCo = ROL(Ami, 15); + Aso ^= Do; + BCu = ROL(Aso, 56); + Ema = BCa ^ ((~BCe) & BCi); + Eme = BCe ^ ((~BCi) & BCo); + Emi = BCi ^ ((~BCo) & BCu); + Emo = BCo ^ ((~BCu) & BCa); + Emu = BCu ^ ((~BCa) & BCe); + + Abi ^= Di; + BCa = ROL(Abi, 62); + Ago ^= Do; + BCe = ROL(Ago, 55); + Aku ^= Du; + BCi = ROL(Aku, 39); + Ama ^= Da; + BCo = ROL(Ama, 41); + Ase ^= De; + BCu = ROL(Ase, 2); + Esa = BCa ^ ((~BCe) & BCi); + Ese = BCe ^ ((~BCi) & BCo); + Esi = BCi ^ ((~BCo) & BCu); + Eso = BCo ^ ((~BCu) & BCa); + Esu = BCu ^ ((~BCa) & BCe); + + // prepareTheta + BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; + BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; + BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; + BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; + BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; + + // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^ ((~BCe) & BCi); + Aba ^= KeccakF_RoundConstants[round + 1]; + Abe = BCe ^ ((~BCi) & BCo); + Abi = BCi ^ ((~BCo) & BCu); + Abo = BCo ^ ((~BCu) & BCa); + Abu = BCu ^ ((~BCa) & BCe); + + Ebo ^= Do; + BCa = ROL(Ebo, 28); + Egu ^= Du; + BCe = ROL(Egu, 20); + Eka ^= Da; + BCi = ROL(Eka, 3); + Eme ^= De; + BCo = ROL(Eme, 45); + Esi ^= Di; + BCu = ROL(Esi, 61); + Aga = BCa ^ ((~BCe) & BCi); + Age = BCe ^ ((~BCi) & BCo); + Agi = BCi ^ ((~BCo) & BCu); + Ago = BCo ^ ((~BCu) & BCa); + Agu = BCu ^ ((~BCa) & BCe); + + Ebe ^= De; + BCa = ROL(Ebe, 1); + Egi ^= Di; + BCe = ROL(Egi, 6); + Eko ^= Do; + BCi = ROL(Eko, 25); + Emu ^= Du; + BCo = ROL(Emu, 8); + Esa ^= Da; + BCu = ROL(Esa, 18); + Aka = BCa ^ ((~BCe) & BCi); + Ake = BCe ^ ((~BCi) & BCo); + Aki = BCi ^ ((~BCo) & BCu); + Ako = BCo ^ ((~BCu) & BCa); + Aku = BCu ^ ((~BCa) & BCe); + + Ebu ^= Du; + BCa = ROL(Ebu, 27); + Ega ^= Da; + BCe = ROL(Ega, 36); + Eke ^= De; + BCi = ROL(Eke, 10); + Emi ^= Di; + BCo = ROL(Emi, 15); + Eso ^= Do; + BCu = ROL(Eso, 56); + Ama = BCa ^ ((~BCe) & BCi); + Ame = BCe ^ ((~BCi) & BCo); + Ami = BCi ^ ((~BCo) & BCu); + Amo = BCo ^ ((~BCu) & BCa); + Amu = BCu ^ ((~BCa) & BCe); + + Ebi ^= Di; + BCa = ROL(Ebi, 62); + Ego ^= Do; + BCe = ROL(Ego, 55); + Eku ^= Du; + BCi = ROL(Eku, 39); + Ema ^= Da; + BCo = ROL(Ema, 41); + Ese ^= De; + BCu = ROL(Ese, 2); + Asa = BCa ^ ((~BCe) & BCi); + Ase = BCe ^ ((~BCi) & BCo); + Asi = BCi ^ ((~BCo) & BCu); + Aso = BCo ^ ((~BCu) & BCa); + Asu = BCu ^ ((~BCa) & BCe); + } + + // copyToState(state, A) + state[0] = Aba; + state[1] = Abe; + state[2] = Abi; + state[3] = Abo; + state[4] = Abu; + state[5] = Aga; + state[6] = Age; + state[7] = Agi; + state[8] = Ago; + state[9] = Agu; + state[10] = Aka; + state[11] = Ake; + state[12] = Aki; + state[13] = Ako; + state[14] = Aku; + state[15] = Ama; + state[16] = Ame; + state[17] = Ami; + state[18] = Amo; + state[19] = Amu; + state[20] = Asa; + state[21] = Ase; + state[22] = Asi; + state[23] = Aso; + state[24] = Asu; +} + +/************************************************* + * Name: keccak_absorb + * + * Description: Absorb step of Keccak; + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, + size_t mlen, uint8_t p) { + size_t i; + uint8_t t[200]; + + /* Zero state */ + for (i = 0; i < 25; ++i) { + s[i] = 0; + } + + while (mlen >= r) { + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(m + 8 * i); + } + + KeccakF1600_StatePermute(s); + mlen -= r; + m += r; + } + + for (i = 0; i < r; ++i) { + t[i] = 0; + } + for (i = 0; i < mlen; ++i) { + t[i] = m[i]; + } + t[i] = p; + t[r - 1] |= 128; + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(t + 8 * i); + } +} + +/************************************************* + * Name: keccak_squeezeblocks + * + * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. + * Modifies the state. Can be called multiple times to keep + * squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *h: pointer to output blocks + * - size_t nblocks: number of blocks to be + * squeezed (written to h) + * - uint64_t *s: pointer to input/output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, + uint64_t *s, uint32_t r) { + while (nblocks > 0) { + KeccakF1600_StatePermute(s); + for (size_t i = 0; i < (r >> 3); i++) { + store64(h + 8 * i, s[i]); + } + h += r; + nblocks--; + } +} + +/************************************************* + * Name: keccak_inc_init + * + * Description: Initializes the incremental Keccak state to zero. + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + **************************************************/ +static void keccak_inc_init(uint64_t *s_inc) { + size_t i; + + for (i = 0; i < 25; ++i) { + s_inc[i] = 0; + } + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_absorb + * + * Description: Incremental keccak absorb + * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + **************************************************/ +static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, + size_t mlen) { + size_t i; + + /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ + while (mlen + s_inc[25] >= r) { + for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { + /* Take the i'th byte from message + xor with the s_inc[25] + i'th byte of the state; little-endian */ + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + mlen -= (size_t)(r - s_inc[25]); + m += r - s_inc[25]; + s_inc[25] = 0; + + KeccakF1600_StatePermute(s_inc); + } + + for (i = 0; i < mlen; i++) { + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + s_inc[25] += mlen; +} + +/************************************************* + * Name: keccak_inc_finalize + * + * Description: Finalizes Keccak absorb phase, prepares for squeezing + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { + /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, + so we can always use one more byte for p in the current state. */ + s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); + s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_squeeze + * + * Description: Incremental Keccak squeeze; can be called on byte-level + * + * Arguments: - uint8_t *h: pointer to output bytes + * - size_t outlen: number of bytes to be squeezed + * - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_inc_squeeze(uint8_t *h, size_t outlen, + uint64_t *s_inc, uint32_t r) { + size_t i; + + /* First consume any bytes we still have sitting around */ + for (i = 0; i < outlen && i < s_inc[25]; i++) { + /* There are s_inc[25] bytes left, so r - s_inc[25] is the first + available byte. We consume from there, i.e., up to r. */ + h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] -= i; + + /* Then squeeze the remaining necessary blocks */ + while (outlen > 0) { + KeccakF1600_StatePermute(s_inc); + + for (i = 0; i < outlen && i < r; i++) { + h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] = r - i; + } +} + +void shake128_inc_init(shake128incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); +} + +void shake128_inc_finalize(shake128incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); +} + +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); +} + +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake128_inc_ctx_release(shake128incctx *state) { + (void)state; +} + +void shake256_inc_init(shake256incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); +} + +void shake256_inc_finalize(shake256incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); +} + +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); +} + +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake256_inc_ctx_release(shake256incctx *state) { + (void)state; +} + + +/************************************************* + * Name: shake128_absorb + * + * Description: Absorb step of the SHAKE128 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake128_squeezeblocks + * + * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of + * SHAKE128_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake128ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); +} + +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake128_ctx_release(shake128ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake256_absorb + * + * Description: Absorb step of the SHAKE256 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake256_squeezeblocks + * + * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of + * SHAKE256_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake256ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); +} + +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake256_ctx_release(shake256ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake128 + * + * Description: SHAKE128 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE128_RATE; + uint8_t t[SHAKE128_RATE]; + shake128ctx s; + + shake128_absorb(&s, input, inlen); + shake128_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE128_RATE; + outlen -= nblocks * SHAKE128_RATE; + + if (outlen) { + shake128_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake128_ctx_release(&s); +} + +/************************************************* + * Name: shake256 + * + * Description: SHAKE256 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE256_RATE; + uint8_t t[SHAKE256_RATE]; + shake256ctx s; + + shake256_absorb(&s, input, inlen); + shake256_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE256_RATE; + outlen -= nblocks * SHAKE256_RATE; + + if (outlen) { + shake256_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake256_ctx_release(&s); +} + +void sha3_256_inc_init(sha3_256incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_256_inc_ctx_release(sha3_256incctx *state) { + (void)state; +} + +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); +} + +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { + uint8_t t[SHA3_256_RATE]; + keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); + + sha3_256_inc_ctx_release(state); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_256 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_256_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +void sha3_384_inc_init(sha3_384incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); +} + +void sha3_384_inc_ctx_release(sha3_384incctx *state) { + (void)state; +} + +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { + uint8_t t[SHA3_384_RATE]; + keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); + + sha3_384_inc_ctx_release(state); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_384 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_384_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +void sha3_512_inc_init(sha3_512incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); +} + +void sha3_512_inc_ctx_release(sha3_512incctx *state) { + (void)state; +} + +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { + uint8_t t[SHA3_512_RATE]; + keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); + + sha3_512_inc_ctx_release(state); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_512 + * + * Description: SHA3-512 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_512_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h new file mode 100644 index 0000000000..c29ebd8f9d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef FIPS202_H +#define FIPS202_H + +#include +#include + +#define SHAKE128_RATE 168 +#define SHAKE256_RATE 136 +#define SHA3_256_RATE 136 +#define SHA3_384_RATE 104 +#define SHA3_512_RATE 72 + +#define PQC_SHAKEINCCTX_U64WORDS 26 +#define PQC_SHAKECTX_U64WORDS 25 + +#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) +#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake128incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake128ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake256incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake256ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_256incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_384incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_512incctx; + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); +/* Free the state */ +void shake128_ctx_release(shake128ctx *state); +/* Copy the state. */ +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); + +/* Initialize incremental hashing API */ +void shake128_inc_init(shake128incctx *state); +/* Absorb more information into the XOF. + * + * Can be called multiple times. + */ +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); +/* Finalize the XOF for squeezing */ +void shake128_inc_finalize(shake128incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); +/* Copy the context of the SHAKE128 XOF */ +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); +/* Free the context of the SHAKE128 XOF */ +void shake128_inc_ctx_release(shake128incctx *state); + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); +/* Free the context held by this XOF */ +void shake256_ctx_release(shake256ctx *state); +/* Copy the context held by this XOF */ +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); + +/* Initialize incremental hashing API */ +void shake256_inc_init(shake256incctx *state); +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); +/* Prepares for squeeze phase */ +void shake256_inc_finalize(shake256incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); +/* Copy the state */ +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); +/* Free the state */ +void shake256_inc_ctx_release(shake256incctx *state); + +/* One-stop SHAKE128 call */ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* One-stop SHAKE256 call */ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_256_inc_init(sha3_256incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); +/* Copy the context */ +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_256_inc_ctx_release(sha3_256incctx *state); + +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_384_inc_init(sha3_384incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); +/* Copy the context */ +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_384_inc_ctx_release(sha3_384incctx *state); + +/* One-stop SHA3-384 shop */ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_512_inc_init(sha3_512incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); +/* Copy the context */ +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_512_inc_ctx_release(sha3_512incctx *state); + +/* One-stop SHA3-512 shop */ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.c new file mode 100644 index 0000000000..f7c7456498 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.c @@ -0,0 +1,95 @@ +#include +#include "fp.h" + +const digit_t p[NWORDS_FIELD] = { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x04ffffffffffffff }; +const digit_t p2[NWORDS_FIELD] = { 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff, 0x09ffffffffffffff }; + +void +fp_sqrt(fp_t *x) +{ + (void)gf5248_sqrt(x, x); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + // ls is (0, 1, -1) and we want fp_is_square + // to return 0xFF..FF when ls is 1 or 0 and 0x00..00 otherwise + int32_t ls = gf5248_legendre(a); + return ~(uint32_t)(ls >> 1); +} + +void +fp_inv(fp_t *x) +{ + (void)gf5248_invert(x, x); +} + +void +fp_exp3div4(fp_t *a) +{ + // + // We optimise this by using the shape of the prime + // to avoid almost all multiplications: + // + // We write: + // (p - 3) / 4 = (5*2^248 - 4) / 4 + // = 5*2^246 - 1 + // = 5*(2^246 - 1) + 4 + // Then we first compute: + // a246 = a**(2^246 - 1) + // Then from this we get the desired result as: + // a**((p-3)/4) = a246**5 * a**4 + // We can compute this with 12 multiplications and 247 squares. + fp_t z4, t3, t6, tmp; + // Compute a**3 and a**4 + fp_sqr(&z4, a); + fp_mul(&tmp, a, &z4); + fp_sqr(&z4, &z4); + // Compute a**(2^3 - 1) = a**7 + fp_mul(&t3, &tmp, &z4); + // Compute a**(2^6 - 1) + fp_sqr(&t6, &t3); + for (int i = 1; i < 3; i++) + fp_sqr(&t6, &t6); + fp_mul(&t6, &t6, &t3); + // Compute a**(2^12 - 1) + fp_sqr(a, &t6); + for (int i = 1; i < 6; i++) + fp_sqr(a, a); + fp_mul(a, a, &t6); + // Compute a**(2^15 - 1) + for (int i = 0; i < 3; i++) + fp_sqr(a, a); + fp_mul(a, a, &t3); + // Compute a**(2^30 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 15; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^60 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 30; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^120 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 60; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^123 - 1) + for (int i = 0; i < 3; i++) + fp_sqr(a, a); + fp_mul(a, a, &t3); + // Compute a**(2^246 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 123; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(5*(2^246 - 1)) + fp_sqr(&tmp, a); + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(5*(2^246 - 1) + 4) + fp_mul(a, a, &z4); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.h new file mode 100644 index 0000000000..3210a041c8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.h @@ -0,0 +1,135 @@ +#ifndef FP_H +#define FP_H + +// Include statements +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gf5248.h" + +// Type for elements of GF(p) +#define fp_t gf5248 + +// Operations in fp +static inline void +fp_neg(fp_t *d, const fp_t *a) +{ + gf5248_neg(d, a); +} + +void fp_add(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S +void fp_sub(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S +void fp_sqr(fp_t *out, const fp_t *a); // implemented in fp_asm.S +void fp_mul(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S + +static inline void +fp_mul_small(fp_t *d, const fp_t *a, uint32_t n) +{ + gf5248_mul_small(d, a, n); +} + +static inline void +fp_half(fp_t *d, const fp_t *a) +{ + gf5248_half(d, a); +} +// #define fp_half gf5248_half + +static inline void +fp_div3(fp_t *d, const fp_t *a) +{ + gf5248_div3(d, a); +} +// #define fp_div3 gf5248_div3 + +// Constant time selection and swapping +static inline void +fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) +{ + gf5248_select(d, a0, a1, ctl); +} +// #define fp_select gf5248_select +static inline void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + gf5248_cswap(a, b, ctl); +} +// #define fp_cswap gf5248_cswap + +// Comparisons for fp elements +static inline uint32_t +fp_is_zero(const fp_t *a) +{ + return gf5248_iszero(a); +} +// #define fp_is_zero gf5248_iszero + +static inline uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return gf5248_equals(a, b); +} +// #define fp_is_equal gf5248_equals + +// Set a uint32 to an Fp value +static inline void +fp_set_small(fp_t *d, uint32_t x) +{ + gf5248_set_small(d, x); +} +// #define fp_set_small gf5248_set_small + +// Encoding and decoding of bytes +static inline void +fp_encode(void *dst, const fp_t *a) +{ + gf5248_encode(dst, a); +} +// #define fp_encode gf5248_encode +static inline uint32_t +fp_decode(fp_t *d, const void *src) +{ + return gf5248_decode(d, src); +} +// #define fp_decode gf5248_decode +static inline void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + gf5248_decode_reduce(d, src, len); +} +// #define fp_decode_reduce gf5248_decode_reduce + +// These functions are essentially useless because we can just +// use = for the shallow copies we need, but they're here for +// now until we do a larger refactoring +static inline void +fp_copy(fp_t *out, const fp_t *a) +{ + memcpy(out, a, sizeof(fp_t)); +} + +static inline void +fp_set_zero(fp_t *a) +{ + memcpy(a, &ZERO, sizeof(fp_t)); +} + +static inline void +fp_set_one(fp_t *a) +{ + memcpy(a, &ONE, sizeof(fp_t)); +} + +// Functions defined in low level code but with different API +void fp_inv(fp_t *a); +void fp_sqrt(fp_t *a); +void fp_exp3div4(fp_t *a); +uint32_t fp_is_square(const fp_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.c new file mode 100644 index 0000000000..3269f6c66f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.c @@ -0,0 +1,188 @@ +#include "fp2.h" +#include +#include + +/* Arithmetic modulo X^2 + 1 */ + +void +fp2_encode(void *dst, const fp2_t *a) +{ + uint8_t *buf = dst; + fp_encode(buf, &(a->re)); + fp_encode(buf + FP_ENCODED_BYTES, &(a->im)); +} + +uint32_t +fp2_decode(fp2_t *d, const void *src) +{ + const uint8_t *buf = src; + uint32_t re, im; + + re = fp_decode(&(d->re), buf); + im = fp_decode(&(d->im), buf + FP_ENCODED_BYTES); + return re & im; +} + +void +fp2_inv(fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + fp_inv(&t0); + fp_mul(&(x->re), &(x->re), &t0); + fp_mul(&(x->im), &(x->im), &t0); + fp_neg(&(x->im), &(x->im)); +} + +void +fp2_batched_inv(fp2_t *x, int len) +{ + fp2_t t1[len], t2[len]; + fp2_t inverse; + + // x = x0,...,xn + // t1 = x0, x0*x1, ... ,x0 * x1 * ... * xn + t1[0] = x[0]; + for (int i = 1; i < len; i++) { + fp2_mul(&t1[i], &t1[i - 1], &x[i]); + } + + // inverse = 1/ (x0 * x1 * ... * xn) + inverse = t1[len - 1]; + fp2_inv(&inverse); + t2[0] = inverse; + + // t2 = 1/ (x0 * x1 * ... * xn), 1/ (x0 * x1 * ... * x(n-1)) , ... , 1/xO + for (int i = 1; i < len; i++) { + fp2_mul(&t2[i], &t2[i - 1], &x[len - i]); + } + + x[0] = t2[len - 1]; + for (int i = 1; i < len; i++) { + fp2_mul(&x[i], &t1[i - 1], &t2[len - i - 1]); + } +} + +uint32_t +fp2_is_square(const fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + + return fp_is_square(&t0); +} + +void +fp2_sqrt(fp2_t *a) +{ + fp_t x0, x1, t0, t1; + + /* From "Optimized One-Dimensional SQIsign Verification on Intel and + * Cortex-M4" by Aardal et al: https://eprint.iacr.org/2024/1563 */ + + // x0 = \delta = sqrt(a0^2 + a1^2). + fp_sqr(&x0, &(a->re)); + fp_sqr(&x1, &(a->im)); + fp_add(&x0, &x0, &x1); + fp_sqrt(&x0); + // If a1 = 0, there is a risk of \delta = -a0, which makes x0 = 0 below. + // In that case, we restore the value \delta = a0. + fp_select(&x0, &x0, &(a->re), fp_is_zero(&(a->im))); + // x0 = \delta + a0, t0 = 2 * x0. + fp_add(&x0, &x0, &(a->re)); + fp_add(&t0, &x0, &x0); + // x1 = t0^(p-3)/4. + fp_copy(&x1, &t0); + fp_exp3div4(&x1); + // x0 = x0 * x1, x1 = x1 * a1, t1 = (2x0)^2. + fp_mul(&x0, &x0, &x1); + fp_mul(&x1, &x1, &(a->im)); + fp_add(&t1, &x0, &x0); + fp_sqr(&t1, &t1); + // If t1 = t0, return x0 + x1*i, otherwise x1 - x0*i. + fp_sub(&t0, &t0, &t1); + uint32_t f = fp_is_zero(&t0); + fp_neg(&t1, &x0); + fp_copy(&t0, &x1); + fp_select(&t0, &t0, &x0, f); + fp_select(&t1, &t1, &x1, f); + + // Check if t0 is zero + uint32_t t0_is_zero = fp_is_zero(&t0); + // Check whether t0, t1 are odd + // Note: we encode to ensure canonical representation + uint8_t tmp_bytes[FP_ENCODED_BYTES]; + fp_encode(tmp_bytes, &t0); + uint32_t t0_is_odd = -((uint32_t)tmp_bytes[0] & 1); + fp_encode(tmp_bytes, &t1); + uint32_t t1_is_odd = -((uint32_t)tmp_bytes[0] & 1); + // We negate the output if: + // t0 is odd, or + // t0 is zero and t1 is odd + uint32_t negate_output = t0_is_odd | (t0_is_zero & t1_is_odd); + fp_neg(&x0, &t0); + fp_select(&(a->re), &t0, &x0, negate_output); + fp_neg(&x0, &t1); + fp_select(&(a->im), &t1, &x0, negate_output); +} + +uint32_t +fp2_sqrt_verify(fp2_t *a) +{ + fp2_t t0, t1; + + fp2_copy(&t0, a); + fp2_sqrt(a); + fp2_sqr(&t1, a); + + return (fp2_is_equal(&t0, &t1)); +} + +// exponentiation +void +fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size) +{ + fp2_t acc; + digit_t bit; + + fp2_copy(&acc, x); + fp2_set_one(out); + + // Iterate over each word of exp + for (int j = 0; j < size; j++) { + // Iterate over each bit of the word + for (int i = 0; i < RADIX; i++) { + bit = (exp[j] >> i) & 1; + if (bit == 1) { + fp2_mul(out, out, &acc); + } + fp2_sqr(&acc, &acc); + } + } +} + +void +fp2_print(const char *name, const fp2_t *a) +{ + printf("%s0x", name); + + uint8_t buf[FP_ENCODED_BYTES]; + fp_encode(&buf, &a->re); // Encoding ensures canonical rep + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + + printf(" + i*0x"); + + fp_encode(&buf, &a->im); + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + printf("\n"); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.h new file mode 100644 index 0000000000..5f84fdf646 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.h @@ -0,0 +1,41 @@ +#ifndef FP2_H +#define FP2_H + +#define NO_FP2X_MUL +#define NO_FP2X_SQR + +#include + +extern void fp2_sq_c0(fp2_t *out, const fp2_t *in); +extern void fp2_sq_c1(fp_t *out, const fp2_t *in); + +extern void fp2_mul_c0(fp_t *out, const fp2_t *in0, const fp2_t *in1); +extern void fp2_mul_c1(fp_t *out, const fp2_t *in0, const fp2_t *in1); + +static inline void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t; + + fp2_mul_c0(&t, y, z); // c0 = a0*b0 - a1*b1 + fp2_mul_c1(&x->im, y, z); // c1 = a0*b1 + a1*b0 + x->re.arr[0] = t.arr[0]; + x->re.arr[1] = t.arr[1]; + x->re.arr[2] = t.arr[2]; + x->re.arr[3] = t.arr[3]; +} + +static inline void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp2_t t; + + fp2_sq_c0(&t, y); // c0 = (a0+a1)(a0-a1) + fp2_sq_c1(&x->im, y); // c1 = 2a0*a1 + x->re.arr[0] = t.re.arr[0]; + x->re.arr[1] = t.re.arr[1]; + x->re.arr[2] = t.re.arr[2]; + x->re.arr[3] = t.re.arr[3]; +} + +#endif \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2x.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2x.h new file mode 100644 index 0000000000..44cf103bf2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2x.h @@ -0,0 +1,162 @@ +#ifndef FP2X_H +#define FP2X_H + +#include +#include "fp.h" +#include + +// Structure for representing elements in GF(p^2) +typedef struct fp2_t +{ + fp_t re, im; +} fp2_t; + +static inline void +fp2_set_small(fp2_t *x, const uint32_t val) +{ + fp_set_small(&(x->re), val); + fp_set_zero(&(x->im)); +} + +static inline void +fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n) +{ + fp_mul_small(&x->re, &y->re, n); + fp_mul_small(&x->im, &y->im, n); +} + +static inline void +fp2_set_zero(fp2_t *x) +{ + fp_set_zero(&(x->re)); + fp_set_zero(&(x->im)); +} + +static inline void +fp2_set_one(fp2_t *x) +{ + fp_set_one(&(x->re)); + fp_set_zero(&(x->im)); +} + +static inline uint32_t +fp2_is_equal(const fp2_t *a, const fp2_t *b) +{ // Compare two GF(p^2) elements in constant time + // Returns 1 (true) if a=b, 0 (false) otherwise + + return fp_is_equal(&(a->re), &(b->re)) & fp_is_equal(&(a->im), &(b->im)); +} + +static inline uint32_t +fp2_is_zero(const fp2_t *a) +{ // Is a GF(p^2) element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + + return fp_is_zero(&(a->re)) & fp_is_zero(&(a->im)); +} + +static inline uint32_t +fp2_is_one(const fp2_t *a) +{ // Is a GF(p^2) element one? + // Returns 1 (true) if a=0, 0 (false) otherwise + return fp_is_equal(&(a->re), &ONE) & fp_is_zero(&(a->im)); +} + +static inline void +fp2_half(fp2_t *x, const fp2_t *y) +{ + fp_half(&(x->re), &(y->re)); + fp_half(&(x->im), &(y->im)); +} + +static inline void +fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_add(&(x->re), &(y->re), &(z->re)); + fp_add(&(x->im), &(y->im), &(z->im)); +} + +static inline void +fp2_add_one(fp2_t *x, const fp2_t *y) +{ + fp_add(&x->re, &y->re, &ONE); + fp_copy(&x->im, &y->im); +} + +static inline void +fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_sub(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &(y->im), &(z->im)); +} + +static inline void +fp2_neg(fp2_t *x, const fp2_t *y) +{ + fp_neg(&(x->re), &(y->re)); + fp_neg(&(x->im), &(y->im)); +} + +#ifndef NO_FP2X_MUL +static inline void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t0, t1; + + fp_add(&t0, &(y->re), &(y->im)); + fp_add(&t1, &(z->re), &(z->im)); + fp_mul(&t0, &t0, &t1); + fp_mul(&t1, &(y->im), &(z->im)); + fp_mul(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &t0, &t1); + fp_sub(&(x->im), &(x->im), &(x->re)); + fp_sub(&(x->re), &(x->re), &t1); +} +#endif + +#ifndef NO_FP2X_SQR +static inline void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp_t sum, diff; + + fp_add(&sum, &(y->re), &(y->im)); + fp_sub(&diff, &(y->re), &(y->im)); + fp_mul(&(x->im), &(y->re), &(y->im)); + fp_add(&(x->im), &(x->im), &(x->im)); + fp_mul(&(x->re), &sum, &diff); +} +#endif + +static inline void +fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl) +{ + fp_select(&(d->re), &(a0->re), &(a1->re), ctl); + fp_select(&(d->im), &(a0->im), &(a1->im), ctl); +} + +static inline void +fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl) +{ + fp_cswap(&(a->re), &(b->re), ctl); + fp_cswap(&(a->im), &(b->im), ctl); +} + +static inline void +fp2_copy(fp2_t *x, const fp2_t *y) +{ + *x = *y; +} + +// New functions +void fp2_encode(void *dst, const fp2_t *a); +uint32_t fp2_decode(fp2_t *d, const void *src); +void fp2_inv(fp2_t *x); +uint32_t fp2_is_square(const fp2_t *x); +void fp2_sqrt(fp2_t *x); +uint32_t fp2_sqrt_verify(fp2_t *a); +void fp2_batched_inv(fp2_t *x, int len); +void fp2_pow_vartime(fp2_t *out, const fp2_t *x, const uint64_t *exp, const int size); +void fp2_print(const char *name, const fp2_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp_asm.S b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp_asm.S new file mode 100755 index 0000000000..6da7ff7c9f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp_asm.S @@ -0,0 +1,466 @@ +#include +.intel_syntax noprefix + +.set pbytes,32 +.set plimbs,4 + +#ifdef __APPLE__ +.section __TEXT,__const +#else +.section .rodata +#endif +p_plus_1: .quad 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0500000000000000 + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",@progbits +#endif + +#include + +.text +.p2align 4,,15 + +.global fp_add +fp_add: + xor rax, rax + mov r8, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + add r8, [rdx] + adc r9, [rdx+8] + adc r10, [rdx+16] + adc r11, [rdx+24] + mov rax, r11 + shr rax, 59 + neg rax + mov rdx, [rip+p+24] + and rdx, rax + sub r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rdx + + mov rax, r11 + shr rax, 59 + neg rax + mov rdx, [rip+p+24] + and rdx, rax + sub r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rdx + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + ret + +.global fp_sub +fp_sub: + xor rax, rax + mov r8, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + sub r8, [rdx] + sbb r9, [rdx+8] + sbb r10, [rdx+16] + sbb r11, [rdx+24] + sbb rax, 0 + + mov rdx, [rip+p+24] + and rdx, rax + add r8, rax + adc r9, rax + adc r10, rax + adc r11, rdx + + mov rax, r11 + sar rax, 59 + mov rdx, [rip+p+24] + and rdx, rax + add r8, rax + adc r9, rax + adc r10, rax + adc r11, rdx + + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + ret + +///////////////////////////////////////////////////////////////// MACROS +// z = a x bi + z +// Inputs: base memory pointer M1 (a), +// bi pre-stored in rdx, +// accumulator z in [Z0:Z4] +// Output: [Z0:Z4] +// Temps: regs T0:T1 +///////////////////////////////////////////////////////////////// +.macro MULADD64x256 M1, Z0, Z1, Z2, Z3, Z4, T0, T1, C + mulx \T0, \T1, \M1 // A0*B0 + xor \C, \C + adox \Z0, \T1 + adox \Z1, \T0 + mulx \T0, \T1, 8\M1 // A0*B1 + adcx \Z1, \T1 + adox \Z2, \T0 + mulx \T0, \T1, 16\M1 // A0*B2 + adcx \Z2, \T1 + adox \Z3, \T0 + mulx \T0, \T1, 24\M1 // A0*B3 + adcx \Z3, \T1 + adox \Z4, \T0 + adc \Z4, 0 +.endm + +.macro MULADD64x64 M1, Z0, Z1, Z2, Z3, T0, T1 + mulx \T0, \T1, \M1 // A0*B0 + xor rax, rax + adox \Z2, \T1 + adox \Z3, \T0 +.endm + +//*********************************************************************** +// Multiplication in GF(p^2), non-complex part +// Operation: c [rdi] = a0 x b0 - a1 x b1 +// Inputs: a = [a1, a0] stored in [rsi] +// b = [b1, b0] stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_mul_c0 +fp2_mul_c0: + push r12 + push r13 + push r14 + mov rcx, rdx + + // [rdi0:3] <- 2p - b1 + mov r8, [rip+p2] + mov r9, [rip+p2+8] + mov r10, r9 + mov r11, [rip+p2+24] + mov rax, [rcx+32] + mov rdx, [rcx+40] + sub r8, rax + sbb r9, rdx + mov rax, [rcx+48] + mov rdx, [rcx+56] + sbb r10, rax + sbb r11, rdx + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + + // [r8:r12] <- z = a0 x b00 - a1 x b10 + mov rdx, [rcx] + mulx r9, r8, [rsi] + xor rax, rax + mulx r10, r11, [rsi+8] + adox r9, r11 + mulx r11, r12, [rsi+16] + adox r10, r12 + mulx r12, r13, [rsi+24] + adox r11, r13 + adox r12, rax + + mov rdx, [rdi] + MULADD64x256 [rsi+32], r8, r9, r10, r11, r12, r13, r14, rax + // [r9:r12] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r8 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], r9, r10, r11, r12, r13, r14 + + // [r9:r12, r8] <- z = a0 x b01 - a1 x b11 + z + mov rdx, [rcx+8] + MULADD64x256 [rsi], r9, r10, r11, r12, r8, r13, r14, r8 + mov rdx, [rdi+8] + MULADD64x256 [rsi+32], r9, r10, r11, r12, r8, r13, r14, rax + // [r10:r12, r8] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r9 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], r10, r11, r12, r8, r13, r14 + + // [r10:r12, r8:r9] <- z = a0 x b02 - a1 x b12 + z + mov rdx, [rcx+16] + MULADD64x256 [rsi], r10, r11, r12, r8, r9, r13, r14, r9 + mov rdx, [rdi+16] + MULADD64x256 [rsi+32], r10, r11, r12, r8, r9, r13, r14, rax + // [r11:r12, r8:r9] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r10 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], r11, r12, r8, r9, r13, r14 + + // [r11:r12, r8:r10] <- z = a0 x b03 - a1 x b13 + z + mov rdx, [rcx+24] + MULADD64x256 [rsi], r11, r12, r8, r9, r10, r13, r14, r10 + mov rdx, [rdi+24] + MULADD64x256 [rsi+32], r11, r12, r8, r9, r10, r13, r14, rax + // [r12, r8:r10] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r11 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], r12, r8, r9, r10, r13, r14 + + mov [rdi], r12 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + pop r14 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Multiplication in GF(p^2), complex part +// Operation: c [rdi] = a0 x b1 + a1 x b0 +// Inputs: a = [a1, a0] stored in [rsi] +// b = [b1, b0] stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_mul_c1 +fp2_mul_c1: + push r12 + push r13 + push r14 + mov rcx, rdx + + // [r8:r12] <- z = a0 x b10 + a1 x b00 + mov rdx, [rcx+32] + mulx r9, r8, [rsi] + xor rax, rax + mulx r10, r11, [rsi+8] + adox r9, r11 + mulx r11, r12, [rsi+16] + adox r10, r12 + mulx r12, r13, [rsi+24] + adox r11, r13 + adox r12, rax + + mov rdx, [rcx] + MULADD64x256 [rsi+32], r8, r9, r10, r11, r12, r13, r14, rax + // [r9:r12] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r8 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], r9, r10, r11, r12, r13, r14 + + // [r9:r12, r8] <- z = a0 x b01 - a1 x b11 + z + mov rdx, [rcx+40] + MULADD64x256 [rsi], r9, r10, r11, r12, r8, r13, r14, r8 + mov rdx, [rcx+8] + MULADD64x256 [rsi+32], r9, r10, r11, r12, r8, r13, r14, rax + // [r10:r12, r8] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r9 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], r10, r11, r12, r8, r13, r14 + + // [r10:r12, r8:r9] <- z = a0 x b02 - a1 x b12 + z + mov rdx, [rcx+48] + MULADD64x256 [rsi], r10, r11, r12, r8, r9, r13, r14, r9 + mov rdx, [rcx+16] + MULADD64x256 [rsi+32], r10, r11, r12, r8, r9, r13, r14, rax + // [r11:r12, r8:r9] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r10 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], r11, r12, r8, r9, r13, r14 + + // [r11:r12, r8:r10] <- z = a0 x b03 - a1 x b13 + z + mov rdx, [rcx+56] + MULADD64x256 [rsi], r11, r12, r8, r9, r10, r13, r14, r10 + mov rdx, [rcx+24] + MULADD64x256 [rsi+32], r11, r12, r8, r9, r10, r13, r14, rax + // [r12, r8:r10] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r11 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], r12, r8, r9, r10, r13, r14 + + mov [rdi], r12 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + pop r14 + pop r13 + pop r12 + ret + +///////////////////////////////////////////////////////////////// MACRO +// z = a x b (mod p) +// Inputs: base memory pointers M0 (a), M1 (b) +// bi pre-stored in rdx, +// accumulator z in [Z0:Z4], pre-stores a0 x b +// Output: [Z0:Z4] +// Temps: regs T0:T1 +///////////////////////////////////////////////////////////////// +.macro FPMUL256x256 M0, M1, Z0, Z1, Z2, Z3, Z4, T0, T1 + // [Z1:Z4] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z0 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], \Z1, \Z2, \Z3, \Z4, \T0, \T1 + + // [Z1:Z4, Z0] <- z = a01 x a1 + z + mov rdx, 8\M0 + MULADD64x256 \M1, \Z1, \Z2, \Z3, \Z4, \Z0, \T0, \T1, \Z0 + // [Z2:Z4, Z0] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z1 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], \Z2, \Z3, \Z4, \Z0, \T0, \T1 + + // [Z2:Z4, Z0:Z1] <- z = a02 x a1 + z + mov rdx, 16\M0 + MULADD64x256 \M1, \Z2, \Z3, \Z4, \Z0, \Z1, \T0, \T1, \Z1 + // [Z3:Z4, Z0:Z1] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z2 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], \Z3, \Z4, \Z0, \Z1, \T0, \T1 + + // [Z3:Z4, Z0:Z2] <- z = a03 x a1 + z + mov rdx, 24\M0 + MULADD64x256 \M1, \Z3, \Z4, \Z0, \Z1, \Z2, \T0, \T1, \Z2 + // [Z4, Z0:Z2] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z3 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+24], \Z4, \Z0, \Z1, \Z2, \T0, \T1 +.endm + +//*********************************************************************** +// Squaring in GF(p^2), non-complex part +// Operation: c [rdi] = (a0+a1) x (a0-a1) +// Inputs: a = [a1, a0] stored in [rsi] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_sq_c0 +fp2_sq_c0: + push r12 + push r13 + + // a0 + a1 + mov rdx, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + add rdx, [rsi+32] + adc r9, [rsi+40] + adc r10, [rsi+48] + adc r11, [rsi+56] + mov [rdi], rdx + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + + // a0 - a1 + 2p + mov r8, [rsi] + mov r10, [rsi+8] + mov r12, [rsi+16] + mov r13, [rsi+24] + sub r8, [rsi+32] + sbb r10, [rsi+40] + sbb r12, [rsi+48] + sbb r13, [rsi+56] + mov rax, [rip+p2] + add r8, rax + mov rax, [rip+p2+8] + adc r10, rax + adc r12, rax + adc r13, [rip+p2+24] + mov [rdi+32], r8 + mov [rdi+40], r10 + mov [rdi+48], r12 + mov [rdi+56], r13 + + // [r8:r12] <- z = a00 x a1 + mulx r9, r8, r8 + xor rax, rax + mulx r10, r11, r10 + adox r9, r11 + mulx r11, r12, r12 + adox r10, r12 + mulx r12, r13, r13 + adox r11, r13 + adox r12, rax + + FPMUL256x256 [rdi], [rdi+32], r8, r9, r10, r11, r12, r13, rcx + + mov [rdi], r12 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Squaring in GF(p^2), complex part +// Operation: c [rdi] = 2a0 x a1 +// Inputs: a = [a1, a0] stored in [reg_p1] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_sq_c1 +fp2_sq_c1: + push r12 + push r13 + + mov rdx, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + add rdx, rdx + adc r9, r9 + adc r10, r10 + adc r11, r11 + sub rsp, 32 + mov [rsp+8], r9 + mov [rsp+16], r10 + mov [rsp+24], r11 + + // [r8:r12] <- z = a00 x a1 + mulx r9, r8, [rsi+32] + xor rax, rax + mulx r10, r11, [rsi+40] + adox r9, r11 + mulx r11, r12, [rsi+48] + adox r10, r12 + mulx r12, r13, [rsi+56] + adox r11, r13 + adox r12, rax + + FPMUL256x256 [rsp], [rsi+32], r8, r9, r10, r11, r12, r13, rcx + add rsp, 32 + + mov [rdi], r12 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Field multiplication in GF(p) +// Operation: c = a x b mod p +// Inputs: a stored in [rsi], b stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp_mul +fp_mul: + push r12 + push r13 + push r14 + mov rcx, rdx + + // [r8:r12] <- z = a x b0 + mov rdx, [rcx] + mulx r9, r8, [rsi] + xor rax, rax + mulx r10, r11, [rsi+8] + adox r9, r11 + mulx r11, r12, [rsi+16] + adox r10, r12 + mulx r12, r13, [rsi+24] + adox r11, r13 + adox r12, rax + + FPMUL256x256 [rcx], [rsi], r8, r9, r10, r11, r12, r13, r14 + + mov [rdi], r12 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + pop r14 + pop r13 + pop r12 + ret + +.global fp_sqr +fp_sqr: + mov rdx, rsi + jmp fp_mul diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp_constants.h new file mode 100644 index 0000000000..c770b78f58 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp_constants.h @@ -0,0 +1,17 @@ +#if RADIX == 32 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 8 +#else +#define NWORDS_FIELD 9 +#endif +#define NWORDS_ORDER 8 +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 4 +#else +#define NWORDS_FIELD 5 +#endif +#define NWORDS_ORDER 4 +#endif +#define BITS 256 +#define LOG2P 8 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.c new file mode 100644 index 0000000000..1d4a41dae0 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.c @@ -0,0 +1,767 @@ +/* + * This code is derived from discussions with Thomas Pornin + */ + +#include "gf5248.h" + +// see gf5248.h +const gf5248 ZERO = { 0, 0, 0, 0 }; + +// see gf5248.h +const gf5248 ONE = { 0x0000000000000033, 0x0000000000000000, 0x0000000000000000, 0x0100000000000000 }; + +// see gf5248.h +const gf5248 gf5248_MINUS_ONE = { 0xFFFFFFFFFFFFFFCC, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x03FFFFFFFFFFFFFF }; + +// Montgomery representation of 2^256. +static const gf5248 R2 = { 0x3333333333333d70, 0x3333333333333333, 0x3333333333333333, 0x0333333333333333 }; + +// The modulus itself (this is also a valid representation of zero). +static const gf5248 MODULUS = { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x04FFFFFFFFFFFFFF }; + +// 1/2^244 (in Montgomery representation). +static const gf5248 INVT244 = { 0x0000000000001000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; + +static const gf5248 PM1O3 = { 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0x01aaaaaaaaaaaaaa }; + +// Normalize value *a into *d. +static inline void +inner_gf5248_normalize(gf5248 *d, const gf5248 *a) +{ + uint64_t d0, d1, d2, d3, m; + unsigned char cc; + + // Subtract q. + cc = inner_gf5248_sbb(0, a->v0, 0xFFFFFFFFFFFFFFFF, &d0); + cc = inner_gf5248_sbb(cc, a->v1, 0xFFFFFFFFFFFFFFFF, &d1); + cc = inner_gf5248_sbb(cc, a->v2, 0xFFFFFFFFFFFFFFFF, &d2); + cc = inner_gf5248_sbb(cc, a->v3, 0x04FFFFFFFFFFFFFF, &d3); + + // Add back q if the result is negative. + (void)inner_gf5248_sbb(cc, 0, 0, &m); + cc = inner_gf5248_adc(0, d0, m, &d0); + cc = inner_gf5248_adc(cc, d1, m, &d1); + cc = inner_gf5248_adc(cc, d2, m, &d2); + (void)inner_gf5248_adc(cc, d3, m & 0x04FFFFFFFFFFFFFF, &d3); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; +} + +// Expand the most significant bit of x into a full-width 64-bit word +// (0x0000000000000000 or 0xFFFFFFFFFFFFFFFF). +static inline uint64_t +sgnw(uint64_t x) +{ + return (uint64_t)(*(int64_t *)&x >> 63); +} + +// d <- u*f + v*g (in the field) +// Coefficients f and g are provided as unsigned integers, but they +// really are signed values which must be less than 2^62 (in absolute value). +static void +gf5248_lin(gf5248 *d, const gf5248 *u, const gf5248 *v, uint64_t f, uint64_t g) +{ + // f <- abs(f), keeping the sign in sf, and negating u accordingly + uint64_t sf = sgnw(f); + f = (f ^ sf) - sf; + gf5248 tu; + gf5248_neg(&tu, u); + gf5248_select(&tu, u, &tu, (uint32_t)sf); + + // g <- abs(g), keeping the sign in sg, and negating v accordingly + uint64_t sg = sgnw(g); + g = (g ^ sg) - sg; + gf5248 tv; + gf5248_neg(&tv, v); + gf5248_select(&tv, v, &tv, (uint32_t)sg); + + // Linear combination over plain integers. + uint64_t d0, d1, d2, d3, t; + inner_gf5248_umul_x2(d0, t, tu.v0, f, tv.v0, g); + inner_gf5248_umul_x2_add(d1, t, tu.v1, f, tv.v1, g, t); + inner_gf5248_umul_x2_add(d2, t, tu.v2, f, tv.v2, g, t); + inner_gf5248_umul_x2_add(d3, t, tu.v3, f, tv.v3, g, t); + + // Reduction: split into low part (248 bits) and high part + // (71 bits, since t can be up to 63 bits). If the high + // part is h, then: + // h*2^248 = (h mod 5)*2^248 + floor(h/5) mod q + uint64_t h0 = (d3 >> 56) | (t << 8); + uint64_t h1 = t >> 56; + d3 &= 0x00FFFFFFFFFFFFFF; + uint64_t z0, z1, quo0, rem0, quo1, rem1; + inner_gf5248_umul(z0, z1, h0, 0xCCCCCCCCCCCCCCCD); + (void)z0; + quo0 = z1 >> 2; + rem0 = h0 - (5 * quo0); + quo1 = (h1 * 0xCD) >> 10; + rem1 = h1 - (5 * quo1); + + // h = rem0 + 5*quo0 + (rem1 + 5*quo1)*2^64 + // = rem0 + rem1 + 5*(quo0 + quo1*2^64 + rem1*((2^64 - 1)/5)) + // We add rem0 and rem1 modulo 5, with an extra carry that + // goes into the folded part (multiple of 5). + uint64_t e, f0, f1; + unsigned char cc; + cc = inner_gf5248_adc(0, rem0 + 0xFFFFFFFFFFFFFFFA, rem1, &e); + cc = inner_gf5248_adc(cc, quo0, rem1 * 0x3333333333333333, &f0); + (void)inner_gf5248_adc(cc, quo1, 0, &f1); + e -= 0xFFFFFFFFFFFFFFFA; + + // Now we only have to add e*2^248 + f0:f1 to the low part. + cc = inner_gf5248_adc(0, d0, f0, &d0); + cc = inner_gf5248_adc(cc, d1, f1, &d1); + cc = inner_gf5248_adc(cc, d2, 0, &d2); + (void)inner_gf5248_adc(cc, d3, e << 56, &d3); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; +} + +// d <- abs(floor((a*f + b*g) / 2^31)) +// Coefficients f and g are provided as unsigned integer, but they really +// are signed values, which MUST be at most 2^31 in absolute value. +// The computation is performed over the integers, not modulo q. The low +// 31 bits are dropped (in practice, callers provided appropriate coefficients +// f and g such that a*f + b*g is a multiple of 2^31. +// +// If a*f + b*g is negative, then the absolute value is computed, and the +// function returns 0xFFFFFFFFFFFFFFFF; otherwise, the function returns +// 0x0000000000000000. +static uint64_t +lindiv31abs(gf5248 *d, const gf5248 *a, const gf5248 *b, uint64_t f, uint64_t g) +{ + // f <- abs(f), keeping the sign in sf + uint64_t sf = sgnw(f); + f = (f ^ sf) - sf; + + // g <- abs(g), keeping the sign in sg + uint64_t sg = sgnw(g); + g = (g ^ sg) - sg; + + // Apply the signs of f and g to the source operands. + uint64_t a0, a1, a2, a3, a4; + uint64_t b0, b1, b2, b3, b4; + unsigned char cc; + + cc = inner_gf5248_sbb(0, a->v0 ^ sf, sf, &a0); + cc = inner_gf5248_sbb(cc, a->v1 ^ sf, sf, &a1); + cc = inner_gf5248_sbb(cc, a->v2 ^ sf, sf, &a2); + cc = inner_gf5248_sbb(cc, a->v3 ^ sf, sf, &a3); + (void)inner_gf5248_sbb(cc, 0, 0, &a4); + + cc = inner_gf5248_sbb(0, b->v0 ^ sg, sg, &b0); + cc = inner_gf5248_sbb(cc, b->v1 ^ sg, sg, &b1); + cc = inner_gf5248_sbb(cc, b->v2 ^ sg, sg, &b2); + cc = inner_gf5248_sbb(cc, b->v3 ^ sg, sg, &b3); + (void)inner_gf5248_sbb(cc, 0, 0, &b4); + + // Compute a*f + b*g into d0:d1:d2:d3:d4. Since f and g are at + // most 2^31, we can add two 128-bit products with no overflow. + // Note: a4 and b4 are both in {0, -1}. + uint64_t d0, d1, d2, d3, d4, t; + inner_gf5248_umul_x2(d0, t, a0, f, b0, g); + inner_gf5248_umul_x2_add(d1, t, a1, f, b1, g, t); + inner_gf5248_umul_x2_add(d2, t, a2, f, b2, g, t); + inner_gf5248_umul_x2_add(d3, t, a3, f, b3, g, t); + d4 = t - (a4 & f) - (b4 & g); + + // Right-shift the value by 31 bits. + d0 = (d0 >> 31) | (d1 << 33); + d1 = (d1 >> 31) | (d2 << 33); + d2 = (d2 >> 31) | (d3 << 33); + d3 = (d3 >> 31) | (d4 << 33); + + // If the result is negative, negate it. + t = sgnw(d4); + cc = inner_gf5248_sbb(0, d0 ^ t, t, &d0); + cc = inner_gf5248_sbb(cc, d1 ^ t, t, &d1); + cc = inner_gf5248_sbb(cc, d2 ^ t, t, &d2); + (void)inner_gf5248_sbb(cc, d3 ^ t, t, &d3); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + return t; +} + +// lzcnt(x) returns the number of leading bits of value 0 in x. It supports +// x == 0 (in which case the function returns 64). +#if defined __LZCNT__ +static inline uint64_t +lzcnt(uint64_t x) +{ + return _lzcnt_u64(x); +} +#else +static inline uint64_t +lzcnt(uint64_t x) +{ + uint64_t m, s; + m = sgnw((x >> 32) - 1); + s = m & 32; + x = (x >> 32) ^ (m & (x ^ (x >> 32))); + m = sgnw((x >> 16) - 1); + s |= m & 16; + x = (x >> 16) ^ (m & (x ^ (x >> 16))); + m = sgnw((x >> 8) - 1); + s |= m & 8; + x = (x >> 8) ^ (m & (x ^ (x >> 8))); + m = sgnw((x >> 4) - 1); + s |= m & 4; + x = (x >> 4) ^ (m & (x ^ (x >> 4))); + m = sgnw((x >> 2) - 1); + s |= m & 2; + x = (x >> 2) ^ (m & (x ^ (x >> 2))); + + // At this point, x fits on 2 bits. Count of extra zeros: + // x = 0 -> 2 + // x = 1 -> 1 + // x = 2 -> 0 + // x = 3 -> 0 + s += (2 - x) & ((x - 3) >> 2); + return s; +} +#endif + +// see gf5248.h +uint32_t +gf5248_div(gf5248 *d, const gf5248 *x, const gf5248 *y) +{ + // Extended binary GCD: + // + // a <- y + // b <- q (modulus) + // u <- x (self) + // v <- 0 + // + // Value a is normalized (in the 0..q-1 range). Values a and b are + // then considered as (signed) integers. Values u and v are field + // elements. + // + // Invariants: + // a*x = y*u mod q + // b*x = y*v mod q + // b is always odd + // + // At each step: + // if a is even, then: + // a <- a/2, u <- u/2 mod q + // else: + // if a < b: + // (a, u, b, v) <- (b, v, a, u) + // a <- (a-b)/2, u <- (u-v)/2 mod q + // + // What we implement below is the optimized version of this + // algorithm, as described in https://eprint.iacr.org/2020/972 + + gf5248 a, b, u, v; + uint64_t xa, xb, f0, g0, f1, g1; + uint32_t r; + + r = ~gf5248_iszero(y); + inner_gf5248_normalize(&a, y); + b = MODULUS; + u = *x; + v = ZERO; + + // Generic loop does 15*31 = 465 inner iterations. + for (int i = 0; i < 15; i++) { + // Get approximations of a and b over 64 bits: + // - If len(a) <= 64 and len(b) <= 64, then we just use + // their values (low limbs). + // - Otherwise, with n = max(len(a), len(b)), we use: + // (a mod 2^31) + 2^31*floor(a / 2^(n - 33)) + // (b mod 2^31) + 2^31*floor(b / 2^(n - 33)) + uint64_t m3 = a.v3 | b.v3; + uint64_t m2 = a.v2 | b.v2; + uint64_t m1 = a.v1 | b.v1; + uint64_t tnz3 = sgnw(m3 | -m3); + uint64_t tnz2 = sgnw(m2 | -m2) & ~tnz3; + uint64_t tnz1 = sgnw(m1 | -m1) & ~tnz3 & ~tnz2; + uint64_t tnzm = (m3 & tnz3) | (m2 & tnz2) | (m1 & tnz1); + uint64_t tnza = (a.v3 & tnz3) | (a.v2 & tnz2) | (a.v1 & tnz1); + uint64_t tnzb = (b.v3 & tnz3) | (b.v2 & tnz2) | (b.v1 & tnz1); + uint64_t snza = (a.v2 & tnz3) | (a.v1 & tnz2) | (a.v0 & tnz1); + uint64_t snzb = (b.v2 & tnz3) | (b.v1 & tnz2) | (b.v0 & tnz1); + + // If both len(a) <= 64 and len(b) <= 64, then: + // tnzm = 0 + // tnza = 0, snza = 0, tnzb = 0, snzb = 0 + // Otherwise: + // tnzm != 0 + // tnza contains the top non-zero limb of a + // snza contains the limb right below tnza + // tnzb contains the top non-zero limb of a + // snzb contains the limb right below tnzb + // + // We count the number of leading zero bits in tnzm: + // - If s <= 31, then the top 31 bits can be extracted from + // tnza and tnzb alone. + // - If 32 <= s <= 63, then we need some bits from snza and + // snzb as well. + int64_t s = lzcnt(tnzm); + uint64_t sm = (uint64_t)((31 - s) >> 63); + tnza ^= sm & (tnza ^ ((tnza << 32) | (snza >> 32))); + tnzb ^= sm & (tnzb ^ ((tnzb << 32) | (snzb >> 32))); + s -= 32 & sm; + tnza <<= s; + tnzb <<= s; + + // At this point: + // - If len(a) <= 64 and len(b) <= 64, then: + // tnza = 0 + // tnzb = 0 + // tnz1 = tnz2 = tnz3 = 0 + // we want to use the entire low words of a and b + // - Otherwise, we want to use the top 33 bits of tnza and + // tnzb, and the low 31 bits of the low words of a and b. + uint64_t tzx = ~(tnz1 | tnz2 | tnz3); + tnza |= a.v0 & tzx; + tnzb |= b.v0 & tzx; + xa = (a.v0 & 0x7FFFFFFF) | (tnza & 0xFFFFFFFF80000000); + xb = (b.v0 & 0x7FFFFFFF) | (tnzb & 0xFFFFFFFF80000000); + + // Compute the 31 inner iterations on xa and xb. + uint64_t fg0 = (uint64_t)1; + uint64_t fg1 = (uint64_t)1 << 32; + for (int j = 0; j < 31; j++) { + uint64_t a_odd, swap, t0, t1, t2; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf5248_sbb(0, xa, xb, &t0); + (void)inner_gf5248_sbb(cc, 0, 0, &swap); + swap &= a_odd; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + xa >>= 1; + fg1 <<= 1; + } + fg0 += 0x7FFFFFFF7FFFFFFF; + fg1 += 0x7FFFFFFF7FFFFFFF; + f0 = (fg0 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0 >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1 >> 32) - (uint64_t)0x7FFFFFFF; + + // Propagate updates to a, b, u and v. + gf5248 na, nb, nu, nv; + uint64_t nega = lindiv31abs(&na, &a, &b, f0, g0); + uint64_t negb = lindiv31abs(&nb, &a, &b, f1, g1); + f0 = (f0 ^ nega) - nega; + g0 = (g0 ^ nega) - nega; + f1 = (f1 ^ negb) - negb; + g1 = (g1 ^ negb) - negb; + gf5248_lin(&nu, &u, &v, f0, g0); + gf5248_lin(&nv, &u, &v, f1, g1); + a = na; + b = nb; + u = nu; + v = nv; + } + + // If y is invertible, then the final GCD is 1, and + // len(a) + len(b) <= 37, so we can end the computation with + // the low words directly. We only need 35 iterations to reach + // the point where b = 1. + // + // If y is zero, then v is unchanged (hence zero) and none of + // the subsequent iterations will change it either, so we get + // 0 on output, which is what we want. + xa = a.v0; + xb = b.v0; + f0 = 1; + g0 = 0; + f1 = 0; + g1 = 1; + for (int j = 0; j < 35; j++) { + uint64_t a_odd, swap, t0, t1, t2, t3; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf5248_sbb(0, xa, xb, &t0); + (void)inner_gf5248_sbb(cc, 0, 0, &swap); + swap &= a_odd; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (f0 ^ f1); + f0 ^= t2; + f1 ^= t2; + t3 = swap & (g0 ^ g1); + g0 ^= t3; + g1 ^= t3; + xa -= a_odd & xb; + f0 -= a_odd & f1; + g0 -= a_odd & g1; + xa >>= 1; + f1 <<= 1; + g1 <<= 1; + } + gf5248_lin(d, &u, &v, f1, g1); + + // At the point: + // - Numerator and denominator were both in Montgomery representation, + // but the two factors R canceled each other. + // - We have injected 31*15+35 = 500 extra factors of 2, hence we + // must divide the result by 2^500. + // - However, we also want to obtain the result in Montgomery + // representation, i.e. multiply by 2^256. We thus want to + // divide the current result by 2^(500 - 256) = 2^244. + // - We do this division by using a Montgomery multiplication with + // the Montgomery representation of 1/2^244, i.e. the integer + // 2^256/2^244 = 4096. + gf5248_mul(d, d, &INVT244); + return r; +} + +// see gf5248.h +uint32_t +gf5248_invert(gf5248 *d, const gf5248 *a) +{ + return gf5248_div(d, &ONE, a); +} + +// see gf5248.h +int32_t +gf5248_legendre(const gf5248 *x) +{ + // Same algorithm as the binary GCD in gf5248_div(), with + // a few differences: + // - We do not keep track of the Bézout coefficients u and v. + // - In each inner iteration we adjust the running symbol value, + // which uses the low 3 bits of the values. + // - Since we need two extra bits of look-ahead, we can only run + // 29 inner iterations, and then need an extra recomputation + // for the last 2. + + gf5248 a, b; + uint64_t xa, xb, f0, g0, f1, g1, ls; + + inner_gf5248_normalize(&a, x); + b = MODULUS; + ls = 0; // running symbol information in bit 1. + + // Outer loop + for (int i = 0; i < 15; i++) { + // Get approximations of a and b over 64 bits. + uint64_t m3 = a.v3 | b.v3; + uint64_t m2 = a.v2 | b.v2; + uint64_t m1 = a.v1 | b.v1; + uint64_t tnz3 = sgnw(m3 | -m3); + uint64_t tnz2 = sgnw(m2 | -m2) & ~tnz3; + uint64_t tnz1 = sgnw(m1 | -m1) & ~tnz3 & ~tnz2; + uint64_t tnzm = (m3 & tnz3) | (m2 & tnz2) | (m1 & tnz1); + uint64_t tnza = (a.v3 & tnz3) | (a.v2 & tnz2) | (a.v1 & tnz1); + uint64_t tnzb = (b.v3 & tnz3) | (b.v2 & tnz2) | (b.v1 & tnz1); + uint64_t snza = (a.v2 & tnz3) | (a.v1 & tnz2) | (a.v0 & tnz1); + uint64_t snzb = (b.v2 & tnz3) | (b.v1 & tnz2) | (b.v0 & tnz1); + + int64_t s = lzcnt(tnzm); + uint64_t sm = (uint64_t)((31 - s) >> 63); + tnza ^= sm & (tnza ^ ((tnza << 32) | (snza >> 32))); + tnzb ^= sm & (tnzb ^ ((tnzb << 32) | (snzb >> 32))); + s -= 32 & sm; + tnza <<= s; + tnzb <<= s; + + uint64_t tzx = ~(tnz1 | tnz2 | tnz3); + tnza |= a.v0 & tzx; + tnzb |= b.v0 & tzx; + xa = (a.v0 & 0x7FFFFFFF) | (tnza & 0xFFFFFFFF80000000); + xb = (b.v0 & 0x7FFFFFFF) | (tnzb & 0xFFFFFFFF80000000); + + // First 290 inner iterations. + uint64_t fg0 = (uint64_t)1; + uint64_t fg1 = (uint64_t)1 << 32; + for (int j = 0; j < 29; j++) { + uint64_t a_odd, swap, t0, t1, t2; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf5248_sbb(0, xa, xb, &t0); + (void)inner_gf5248_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & xa & xb; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + xa >>= 1; + fg1 <<= 1; + ls ^= (xb + 2) >> 1; + } + + // Compute the updated a and b (low words only) to get + // enough bits for the next two iterations. + uint64_t fg0z = fg0 + 0x7FFFFFFF7FFFFFFF; + uint64_t fg1z = fg1 + 0x7FFFFFFF7FFFFFFF; + f0 = (fg0z & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0z >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1z & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1z >> 32) - (uint64_t)0x7FFFFFFF; + uint64_t a0 = (a.v0 * f0 + b.v0 * g0) >> 29; + uint64_t b0 = (a.v0 * f1 + b.v0 * g1) >> 29; + for (int j = 0; j < 2; j++) { + uint64_t a_odd, swap, t0, t1, t2, t3; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf5248_sbb(0, xa, xb, &t0); + (void)inner_gf5248_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & a0 & b0; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + t3 = swap & (a0 ^ b0); + a0 ^= t3; + b0 ^= t3; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + a0 -= a_odd & b0; + xa >>= 1; + fg1 <<= 1; + a0 >>= 1; + ls ^= (b0 + 2) >> 1; + } + + // Propagate updates to a and b. + fg0 += 0x7FFFFFFF7FFFFFFF; + fg1 += 0x7FFFFFFF7FFFFFFF; + f0 = (fg0 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0 >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1 >> 32) - (uint64_t)0x7FFFFFFF; + gf5248 na, nb; + uint64_t nega = lindiv31abs(&na, &a, &b, f0, g0); + (void)lindiv31abs(&nb, &a, &b, f1, g1); + ls ^= nega & nb.v0; + a = na; + b = nb; + } + + // Final iterations: values are at most 37 bits now. We do not + // need to keep track of update coefficients. Just like the GCD, + // we need only 35 iterations, because after 35 iterations, + // value a is 0 or 1, and b is 1, and no further modification to + // the Legendre symbol may happen. + xa = a.v0; + xb = b.v0; + for (int j = 0; j < 35; j++) { + uint64_t a_odd, swap, t0, t1; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf5248_sbb(0, xa, xb, &t0); + (void)inner_gf5248_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & xa & xb; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + xa -= a_odd & xb; + xa >>= 1; + ls ^= (xb + 2) >> 1; + } + + // At this point, if the source value was not zero, then the low + // bit of ls contains the QR status (0 = square, 1 = non-square), + // which we need to convert to the expected value (+1 or -1). + // If y == 0, then we return 0, per the API. + uint32_t r = 1 - ((uint32_t)ls & 2); + r &= ~gf5248_iszero(x); + return *(int32_t *)&r; +} + +// see gf5248.h +uint32_t +gf5248_sqrt(gf5248 *d, const gf5248 *a) +{ + // Candidate root is a^((q+1)/4), with (q+1)/4 = 5*2^246 + gf5248 y; + gf5248_xsquare(&y, a, 2); + gf5248_mul(&y, &y, a); + gf5248_xsquare(&y, &y, 246); + + // Normalize y and negate if necessary, to set the low bit to 0. + // The low bit check must be on the normal representation, + // not the Montgomery representation. + gf5248 yn; + inner_gf5248_montgomery_reduce(&yn, &y); + uint32_t ctl = -((uint32_t)yn.v0 & 1); + gf5248_neg(&yn, &y); + gf5248_select(&y, &y, &yn, ctl); + + // Check whether the candidate is indeed a square root. + gf5248_square(&yn, &y); + uint32_t r = gf5248_equals(&yn, a); + *d = y; + return r; +} + +// Little-endian encoding of a 64-bit integer. +static inline void +enc64le(void *dst, uint64_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); + buf[4] = (uint8_t)(x >> 32); + buf[5] = (uint8_t)(x >> 40); + buf[6] = (uint8_t)(x >> 48); + buf[7] = (uint8_t)(x >> 56); +} + +// Little-endian decoding of a 64-bit integer. +static inline uint64_t +dec64le(const void *src) +{ + const uint8_t *buf = src; + return (uint64_t)buf[0] | ((uint64_t)buf[1] << 8) | ((uint64_t)buf[2] << 16) | ((uint64_t)buf[3] << 24) | + ((uint64_t)buf[4] << 32) | ((uint64_t)buf[5] << 40) | ((uint64_t)buf[6] << 48) | ((uint64_t)buf[7] << 56); +} + +// see gf5248.h +void +gf5248_encode(void *dst, const gf5248 *a) +{ + uint8_t *buf = dst; + gf5248 x; + + inner_gf5248_montgomery_reduce(&x, a); + enc64le(buf, x.v0); + enc64le(buf + 8, x.v1); + enc64le(buf + 16, x.v2); + enc64le(buf + 24, x.v3); +} + +// see gf5248.h +uint32_t +gf5248_decode(gf5248 *d, const void *src) +{ + const uint8_t *buf = src; + uint64_t d0, d1, d2, d3, t; + unsigned char cc; + + d0 = dec64le(buf); + d1 = dec64le(buf + 8); + d2 = dec64le(buf + 16); + d3 = dec64le(buf + 24); + cc = inner_gf5248_sbb(0, d0, MODULUS.v0, &t); + cc = inner_gf5248_sbb(cc, d1, MODULUS.v1, &t); + cc = inner_gf5248_sbb(cc, d2, MODULUS.v2, &t); + cc = inner_gf5248_sbb(cc, d3, MODULUS.v3, &t); + (void)inner_gf5248_sbb(cc, 0, 0, &t); + + // If the value was not canonical then t = 0; otherwise, t = -1. + d->v0 = d0 & t; + d->v1 = d1 & t; + d->v2 = d2 & t; + d->v3 = d3 & t; + + // Convert to Montgomery representation. + gf5248_mul(d, d, &R2); + + return (uint32_t)t; +} + +// see gf5248.h +void +gf5248_decode_reduce(gf5248 *d, const void *src, size_t len) +{ + const uint8_t *buf = src; + + *d = ZERO; + if (len == 0) { + return; + } + + if ((len & 31) != 0) { + // Input size is not a multiple of 32, we decode a partial + // block, which is already less than 2^248. + uint8_t tmp[32]; + size_t k; + + k = len & ~(size_t)31; + memcpy(tmp, buf + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + d->v0 = dec64le(&tmp[0]); + d->v1 = dec64le(&tmp[8]); + d->v2 = dec64le(&tmp[16]); + d->v3 = dec64le(&tmp[24]); + len = k; + } else { + // Input size is a multiple of 32, we decode a full block, + // and a reduction is needed. + len -= 32; + uint64_t d0 = dec64le(buf + len); + uint64_t d1 = dec64le(buf + len + 8); + uint64_t d2 = dec64le(buf + len + 16); + uint64_t d3 = dec64le(buf + len + 24); + inner_gf5248_partial_reduce(d, d0, d1, d2, d3); + } + + // Process all remaining blocks, in descending address order. + while (len > 0) { + gf5248_mul(d, d, &R2); + len -= 32; + uint64_t t0 = dec64le(buf + len); + uint64_t t1 = dec64le(buf + len + 8); + uint64_t t2 = dec64le(buf + len + 16); + uint64_t t3 = dec64le(buf + len + 24); + gf5248 t; + inner_gf5248_partial_reduce(&t, t0, t1, t2, t3); + gf5248_add(d, d, &t); + } + + // Final conversion to Montgomery representation. + gf5248_mul(d, d, &R2); +} + +void +gf5248_div3(gf5248 *d, const gf5248 *a) +{ + const digit_t MAGIC = 0xAAAAAAAAAAAAAAAB; // 3^-1 mod 2^64 + uint64_t c0, c1, f0, f1; + gf5248 t; + + inner_gf5248_umul(f0, f1, a->arr[3], MAGIC); + t.arr[3] = f1 >> 1; + c1 = a->arr[3] - 3 * t.arr[3]; + + for (int32_t i = 2; i >= 0; i--) { + c0 = c1; + inner_gf5248_umul(f0, f1, a->arr[i], MAGIC); + t.arr[i] = f1 >> 1; + c1 = c0 + a->arr[i] - 3 * t.arr[i]; + t.arr[i] += c0 * ((MAGIC - 1) >> 1); + f0 = ((c1 >> 1) & c1); /* c1 == 3 */ + f1 = ((c1 >> 2) & !(c1 & 0x11)); /* c1 == 4 */ + f0 |= f1; + t.arr[i] += f0; + c1 = c1 - 3 * f0; + } + *d = t; + gf5248_sub(&t, d, &PM1O3); + gf5248_select(d, d, &t, -((c1 & 1) | (c1 >> 1))); // c1 >= 1 + gf5248_sub(&t, d, &PM1O3); + gf5248_select(d, d, &t, -(c1 == 2)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.h new file mode 100644 index 0000000000..f1d21b45c6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.h @@ -0,0 +1,912 @@ +/* + * This code is derived from discussions with Thomas Pornin + */ + +#ifndef gf5248_h__ +#define gf5248_h__ + +#ifdef __cplusplus +extern "C" +{ +#endif + +#include +#include +#include +#include + + typedef uint64_t digit_t; // Datatype for representing field elements + + /* + * A gf5248 instance represents an integer modulo q. + * This is a structure; it can be copied with a simple assignment, and + * passed around as a value (though exchanging pointers is possibly more + * efficient). + * The contents are opaque. No calling code should make any assumption + * about the contents. + */ + + typedef union + { + // Contents are opaque. + // Implementation note: this encodes the value in Montgomery + // representation, with R = 2^256, and partially reduced (value + // is less than 2^251, but not necessarily less than q). + struct + { + uint64_t v0; + uint64_t v1; + uint64_t v2; + uint64_t v3; + }; + digit_t arr[4]; + } gf5248; + + /* + * Constant zero (in the field). + */ + extern const gf5248 ZERO; + + /* + * Constant one (in the field). + */ + extern const gf5248 ONE; + + /* + * Constant -1 (in the field). + */ + extern const gf5248 gf5248_MINUS_ONE; + + /* + * API RULES: + * ========== + * + * Elementary operations on field elements are implemented by functions + * which take as parameter pointers to the operands. The first parameter + * is the pointer to the destination. Thus: + * gf5248 a = ...; + * gf5248 b = ...; + * gf5248 d; + * gf5248_sub(&d, &a, &b) + * sets field element d to a - b (implicitly modulo q). + * + * Operands may be used several times: it is always valid to use as + * output a gf5248 structure which is also used as input. + * + * Boolean values are represented by 32-bit integer (uint32_t) which have + * value exactly 0xFFFFFFFF (for "true") or 0x00000000 (for "false"). This + * convention minimizes the risk that a "smart" compiler breaks the + * constant-time property of the code through unfortunated optimizations. + * When a function expects such a Boolean, the caller MUST take care never + * to provide any value other than 0x00000000 or 0xFFFFFFFF. + * + * Values are encoded into exactly 32 bytes: value x modulo q is mapped to + * its unique integer representant in the [0..q-1] range, which is then + * encoded over 32 bytes with little-endian convention. Encoding is canonical + * and checked: when decoding (with gf5248_decode()), the input value is + * verified to be in the [0..q-1] range; for an out-of-range value, + * gf5248_decode() fills the output structure with zero, and returns + * 0x00000000. + * + * For most operations, the implementation is an inline function, defined + * below; the compiler can thus efficiently include it in the calling code. + * A few expensive operations (e.g. divisions) use non-inline functions, + * declared below but defined in gf5248.c + * + * All functions and macro whose name starts with "inner_gf5248_" are + * internal to this implementation and visible here only in order to + * support the API inline functions; they MUST NOT be used directly. + */ + +#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) +#include +#define inner_gf5248_adc(cc, a, b, d) _addcarry_u64(cc, a, b, (unsigned long long *)(void *)d) +#define inner_gf5248_sbb(cc, a, b, d) _subborrow_u64(cc, a, b, (unsigned long long *)(void *)d) +#else +static inline unsigned char +inner_gf5248_adc(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) +{ + unsigned __int128 t = (unsigned __int128)a + (unsigned __int128)b + cc; + *d = (uint64_t)t; + return (unsigned char)(t >> 64); +} +static inline unsigned char +inner_gf5248_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) +{ + unsigned __int128 t = (unsigned __int128)a - (unsigned __int128)b - cc; + *d = (uint64_t)t; + return (unsigned char)(-(uint64_t)(t >> 64)); +} +#endif + +#if defined _MSC_VER +#define inner_gf5248_umul(lo, hi, x, y) \ + do { \ + uint64_t umul_hi; \ + (lo) = _umul128((x), (y), &umul_hi); \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf5248_umul_add(lo, hi, x, y, z) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x), (y), &umul_hi); \ + unsigned char umul_cc; \ + umul_cc = inner_gf5248_adc(0, umul_lo, (z), &umul_lo); \ + (void)inner_gf5248_adc(umul_cc, umul_hi, 0, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf5248_umul_x2(lo, hi, x1, y1, x2, y2) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x1), (y1), &umul_hi); \ + uint64_t umul_lo2, umul_hi2; \ + umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + unsigned char umul_cc; \ + umul_cc = inner_gf5248_adc(0, umul_lo, umul_lo2, &umul_lo); \ + (void)inner_gf5248_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf5248_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x1), (y1), &umul_hi); \ + uint64_t umul_lo2, umul_hi2; \ + umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + unsigned char umul_cc; \ + umul_cc = inner_gf5248_adc(0, umul_lo, umul_lo2, &umul_lo); \ + (void)inner_gf5248_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ + umul_cc = inner_gf5248_adc(0, umul_lo, (z), &umul_lo); \ + (void)inner_gf5248_adc(umul_cc, umul_hi, 0, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#else +#define inner_gf5248_umul(lo, hi, x, y) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x) * (unsigned __int128)(y); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf5248_umul_add(lo, hi, x, y, z) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x) * (unsigned __int128)(y) + (unsigned __int128)(uint64_t)(z); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf5248_umul_x2(lo, hi, x1, y1, x2, y2) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = \ + (unsigned __int128)(x1) * (unsigned __int128)(y1) + (unsigned __int128)(x2) * (unsigned __int128)(y2); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf5248_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x1) * (unsigned __int128)(y1) + \ + (unsigned __int128)(x2) * (unsigned __int128)(y2) + (unsigned __int128)(uint64_t)(z); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#endif + + /* + * d <- a + b + */ + static inline void + gf5248_add(gf5248 *d, const gf5248 *a, const gf5248 *b) + { + uint64_t d0, d1, d2, d3, f; + unsigned char cc; + + // Raw addition. + cc = inner_gf5248_adc(0, a->v0, b->v0, &d0); + cc = inner_gf5248_adc(cc, a->v1, b->v1, &d1); + cc = inner_gf5248_adc(cc, a->v2, b->v2, &d2); + (void)inner_gf5248_adc(cc, a->v3, b->v3, &d3); + + // Sum is up to 2^252 - 2. Subtract q if the value is not lower + // than 2^251 (we subtract q by adding -q). + f = d3 >> 59; + cc = inner_gf5248_adc(0, d0, f, &d0); + cc = inner_gf5248_adc(cc, d1, 0, &d1); + cc = inner_gf5248_adc(cc, d2, 0, &d2); + (void)inner_gf5248_adc(cc, d3, ((uint64_t)0xFB << 56) & -f, &d3); + + // One subtraction of q might not be enough. + f = d3 >> 59; + cc = inner_gf5248_adc(0, d0, f, &d0); + cc = inner_gf5248_adc(cc, d1, 0, &d1); + cc = inner_gf5248_adc(cc, d2, 0, &d2); + (void)inner_gf5248_adc(cc, d3, ((uint64_t)0xFB << 56) & -f, &d3); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + } + + /* + * d <- a - b + */ + static inline void + gf5248_sub(gf5248 *d, const gf5248 *a, const gf5248 *b) + { + uint64_t d0, d1, d2, d3, m, f; + unsigned char cc; + + // Raw subtraction. + cc = inner_gf5248_sbb(0, a->v0, b->v0, &d0); + cc = inner_gf5248_sbb(cc, a->v1, b->v1, &d1); + cc = inner_gf5248_sbb(cc, a->v2, b->v2, &d2); + cc = inner_gf5248_sbb(cc, a->v3, b->v3, &d3); + + // Add 2*q if the result is negative. + (void)inner_gf5248_sbb(cc, 0, 0, &m); + cc = inner_gf5248_sbb(0, d0, m & 2, &d0); + cc = inner_gf5248_sbb(cc, d1, 0, &d1); + cc = inner_gf5248_sbb(cc, d2, 0, &d2); + (void)inner_gf5248_sbb(cc, d3, ((uint64_t)0xF6 << 56) & m, &d3); + + // We might have overdone it; subtract q if necessary. + f = d3 >> 59; + cc = inner_gf5248_adc(0, d0, f, &d0); + cc = inner_gf5248_adc(cc, d1, 0, &d1); + cc = inner_gf5248_adc(cc, d2, 0, &d2); + (void)inner_gf5248_adc(cc, d3, ((uint64_t)0xFB << 56) & -f, &d3); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + } + + /* + * d <- -a + */ + static inline void + gf5248_neg(gf5248 *d, const gf5248 *a) + { + uint64_t d0, d1, d2, d3, f; + unsigned char cc; + + // 2*q - a + cc = inner_gf5248_sbb(0, (uint64_t)0xFFFFFFFFFFFFFFFE, a->v0, &d0); + cc = inner_gf5248_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v1, &d1); + cc = inner_gf5248_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v2, &d2); + (void)inner_gf5248_sbb(cc, (uint64_t)0x09FFFFFFFFFFFFFF, a->v3, &d3); + + // Subtract q if the value is not lower than 2^251. + f = d3 >> 59; + cc = inner_gf5248_adc(0, d0, f, &d0); + cc = inner_gf5248_adc(cc, d1, 0, &d1); + cc = inner_gf5248_adc(cc, d2, 0, &d2); + (void)inner_gf5248_adc(cc, d3, ((uint64_t)0xFB << 56) & -f, &d3); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + } + + /* + * If ctl == 0x00000000, then *a0 is copied into *d. + * If ctl == 0xFFFFFFFF, then *a1 is copied into *d. + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ + static inline void + gf5248_select(gf5248 *d, const gf5248 *a0, const gf5248 *a1, uint32_t ctl) + { + uint64_t cw = (uint64_t)*(int32_t *)&ctl; + d->v0 = a0->v0 ^ (cw & (a0->v0 ^ a1->v0)); + d->v1 = a0->v1 ^ (cw & (a0->v1 ^ a1->v1)); + d->v2 = a0->v2 ^ (cw & (a0->v2 ^ a1->v2)); + d->v3 = a0->v3 ^ (cw & (a0->v3 ^ a1->v3)); + } + + /* + * If ctl == 0x00000000, then *a and *b are unchanged. + * If ctl == 0xFFFFFFFF, then the contents of *a and *b are swapped. + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ + static inline void + gf5248_cswap(gf5248 *a, gf5248 *b, uint32_t ctl) + { + uint64_t cw = (uint64_t)*(int32_t *)&ctl; + uint64_t t; + t = cw & (a->v0 ^ b->v0); + a->v0 ^= t; + b->v0 ^= t; + t = cw & (a->v1 ^ b->v1); + a->v1 ^= t; + b->v1 ^= t; + t = cw & (a->v2 ^ b->v2); + a->v2 ^= t; + b->v2 ^= t; + t = cw & (a->v3 ^ b->v3); + a->v3 ^= t; + b->v3 ^= t; + } + + /* + * d <- a/2 + */ + static inline void + gf5248_half(gf5248 *d, const gf5248 *a) + { + uint64_t d0, d1, d2, d3; + + d0 = (a->v0 >> 1) | (a->v1 << 63); + d1 = (a->v1 >> 1) | (a->v2 << 63); + d2 = (a->v2 >> 1) | (a->v3 << 63); + d3 = a->v3 >> 1; + d3 += ((uint64_t)5 << 55) & -(a->v0 & 1); + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + } + + // Inner function: 256-bit to 251-bit reduction + static inline void + inner_gf5248_partial_reduce(gf5248 *d, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3) + { + uint64_t d0, d1, d2, d3, h, quo, rem; + unsigned char cc; + + // Split value in high (8 bits) and low (248 bits) parts. + h = a3 >> 56; + a3 &= 0x00FFFFFFFFFFFFFF; + + // 5*2^248 = 1 mod q; hence, we add floor(h/5) + (h mod 5)*2^248 + // to the low part. + quo = (h * 0xCD) >> 10; + rem = h - (5 * quo); + cc = inner_gf5248_adc(0, a0, quo, &d0); + cc = inner_gf5248_adc(cc, a1, 0, &d1); + cc = inner_gf5248_adc(cc, a2, 0, &d2); + (void)inner_gf5248_adc(cc, a3, rem << 56, &d3); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + } + + /* + * d <- 2*a + */ + static inline void + gf5248_mul2(gf5248 *d, const gf5248 *a) + { + gf5248_add(d, a, a); + } + + /* + * d <- 4*a + */ + static inline void + gf5248_mul4(gf5248 *d, const gf5248 *a) + { + uint64_t d0, d1, d2, d3; + d0 = a->v0 << 2; + d1 = (a->v0 >> 62) | (a->v1 << 2); + d2 = (a->v1 >> 62) | (a->v2 << 2); + d3 = (a->v2 >> 62) | (a->v3 << 2); + inner_gf5248_partial_reduce(d, d0, d1, d2, d3); + } + + /* + * d <- 8*a + */ + static inline void + gf5248_mul8(gf5248 *d, const gf5248 *a) + { + uint64_t d0, d1, d2, d3; + d0 = a->v0 << 3; + d1 = (a->v0 >> 61) | (a->v1 << 3); + d2 = (a->v1 >> 61) | (a->v2 << 3); + d3 = (a->v2 >> 61) | (a->v3 << 3); + inner_gf5248_partial_reduce(d, d0, d1, d2, d3); + } + + /* + * d <- 16*a + */ + static inline void + gf5248_mul16(gf5248 *d, const gf5248 *a) + { + uint64_t d0, d1, d2, d3; + d0 = a->v0 << 4; + d1 = (a->v0 >> 60) | (a->v1 << 4); + d2 = (a->v1 >> 60) | (a->v2 << 4); + d3 = (a->v2 >> 60) | (a->v3 << 4); + inner_gf5248_partial_reduce(d, d0, d1, d2, d3); + } + + /* + * d <- 32*a + */ + static inline void + gf5248_mul32(gf5248 *d, const gf5248 *a) + { + uint64_t d0, d1, d2, d3; + d0 = a->v0 << 5; + d1 = (a->v0 >> 59) | (a->v1 << 5); + d2 = (a->v1 >> 59) | (a->v2 << 5); + d3 = (a->v2 >> 59) | (a->v3 << 5); + inner_gf5248_partial_reduce(d, d0, d1, d2, d3); + } + + /* + * d <- a*x + * (multiplication by a 32-bit integer) + */ + static inline void + gf5248_mul_small(gf5248 *d, const gf5248 *a, uint32_t x) + { + uint64_t d0, d1, d2, d3, d4, lo, hi, b, h, quo, rem; + unsigned char cc; + + // Product over the integers. Top output word (d4) is at most 27 bits. + b = (uint64_t)x; + inner_gf5248_umul(d0, d1, a->v0, b); + inner_gf5248_umul(d2, d3, a->v2, b); + inner_gf5248_umul(lo, hi, a->v1, b); + cc = inner_gf5248_adc(0, d1, lo, &d1); + cc = inner_gf5248_adc(cc, d2, hi, &d2); + inner_gf5248_umul(lo, d4, a->v3, b); + cc = inner_gf5248_adc(cc, d3, lo, &d3); + (void)inner_gf5248_adc(cc, d4, 0, &d4); + + // Extract low 248-bit part, and the high part (at most 35 bits). + h = (d4 << 8) | (d3 >> 56); + d3 &= 0x00FFFFFFFFFFFFFF; + + // Fold h by adding floor(h/5) + (h mod 5)*2^248 to the low part. + inner_gf5248_umul(lo, hi, h, 0xCCCCCCCCCCCCCCCD); + quo = hi >> 2; + rem = h - (5 * quo); + cc = inner_gf5248_adc(cc, d0, quo, &d0); + cc = inner_gf5248_adc(cc, d1, 0, &d1); + cc = inner_gf5248_adc(cc, d2, 0, &d2); + (void)inner_gf5248_adc(cc, d3, rem << 56, &d3); + + // Max value is now 5*2^248 + 6871947672 + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + } + + /* + * d <- x + * Input value x (32-bit integer) is converted to field element x mod q. + */ + static inline void + gf5248_set_small(gf5248 *d, uint32_t x) + { + // We want Montgomery representation, i.e. x*2^256 mod q. + // We set h = x*2^8; then: + // x*2^256 = h*2^248 + // = (h mod 5)*2^248 + floor(h/5)*5*2^248 + // = (h mod 5)*2^248 + floor(h/5) mod q + // by using the fact that 5*2^248 = 1 mod q. + uint64_t h, lo, hi, quo, rem; + + h = (uint64_t)x << 8; + inner_gf5248_umul(lo, hi, h, 0xCCCCCCCCCCCCCCCD); + (void)lo; + quo = hi >> 2; + rem = h - (5 * quo); + d->v0 = quo; + d->v1 = 0; + d->v2 = 0; + d->v3 = rem << 56; + } + + // Inner function: d <- a/2^256, with normalization to [0..q-1]. + static inline void + inner_gf5248_montgomery_reduce(gf5248 *d, const gf5248 *a) + { + uint64_t x0, x1, x2, x3, f0, f1, f2, f3; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7; + uint64_t d0, d1, d2, d3; + uint64_t hi, t, w; + unsigned char cc; + + // Let m = -1/q mod 2^256 = 5*2^248 + 1 + // For input x, we compute f = x*m mod 2^256, then + // h = x + f*q, which is a multiple of 2^256. The output + // is then h/2^256. + // Since x < 2^256, we have: + // h <= 2^256 - 1 + (2^256 - 1)*q + // h <= q*2^256 + 2^256 - q - 1 + // Since h = 0 mod 2^256, this implies that h <= q*2^256. + // The output h/2^256 is therefore between 0 and q (inclusive). + + x0 = a->v0; + x1 = a->v1; + x2 = a->v2; + x3 = a->v3; + + // f = x*(-1/q) mod 2^256 + f0 = x0; + f1 = x1; + f2 = x2; + f3 = x3 + ((x0 * 5) << 56); + + // g = f*q + inner_gf5248_umul(g3, hi, f0, (uint64_t)5 << 56); + inner_gf5248_umul_add(g4, hi, f1, (uint64_t)5 << 56, hi); + inner_gf5248_umul_add(g5, hi, f2, (uint64_t)5 << 56, hi); + inner_gf5248_umul_add(g6, g7, f3, (uint64_t)5 << 56, hi); + cc = inner_gf5248_sbb(0, 0, f0, &g0); + cc = inner_gf5248_sbb(cc, 0, f1, &g1); + cc = inner_gf5248_sbb(cc, 0, f2, &g2); + cc = inner_gf5248_sbb(cc, g3, f3, &g3); + cc = inner_gf5248_sbb(cc, g4, 0, &g4); + cc = inner_gf5248_sbb(cc, g5, 0, &g5); + cc = inner_gf5248_sbb(cc, g6, 0, &g6); + (void)inner_gf5248_sbb(cc, g7, 0, &g7); + + // h = x + f*q (we drop the low 256 bits). + cc = inner_gf5248_adc(0, g0, x0, &x0); + cc = inner_gf5248_adc(cc, g1, x1, &x1); + cc = inner_gf5248_adc(cc, g2, x2, &x2); + cc = inner_gf5248_adc(cc, g3, x3, &x3); + cc = inner_gf5248_adc(cc, g4, 0, &d0); + cc = inner_gf5248_adc(cc, g5, 0, &d1); + cc = inner_gf5248_adc(cc, g6, 0, &d2); + (void)inner_gf5248_adc(cc, g7, 0, &d3); + + // Normalize: if h = q, replace it with zero. + t = d0 & d1 & d2 & (d3 ^ ~(uint64_t)0x04FFFFFFFFFFFFFF); + cc = inner_gf5248_adc(0, t, 1, &t); + (void)inner_gf5248_sbb(cc, 0, 0, &w); + w = ~w; + d->v0 = d0 & w; + d->v1 = d1 & w; + d->v2 = d2 & w; + d->v3 = d3 & w; + } + + /* + * d <- a*b + */ + static inline void + gf5248_mul(gf5248 *d, const gf5248 *a, const gf5248 *b) + { + uint64_t e0, e1, e2, e3, e4, e5, e6, e7; + uint64_t f0, f1, f2, f3, lo, hi, lo2, hi2; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7; + unsigned char cc; + + // Multiplication over integers. + inner_gf5248_umul(e0, e1, a->v0, b->v0); + inner_gf5248_umul(e2, e3, a->v1, b->v1); + inner_gf5248_umul(e4, e5, a->v2, b->v2); + inner_gf5248_umul(e6, e7, a->v3, b->v3); + + inner_gf5248_umul(lo, hi, a->v0, b->v1); + cc = inner_gf5248_adc(0, e1, lo, &e1); + cc = inner_gf5248_adc(cc, e2, hi, &e2); + inner_gf5248_umul(lo, hi, a->v0, b->v3); + cc = inner_gf5248_adc(cc, e3, lo, &e3); + cc = inner_gf5248_adc(cc, e4, hi, &e4); + inner_gf5248_umul(lo, hi, a->v2, b->v3); + cc = inner_gf5248_adc(cc, e5, lo, &e5); + cc = inner_gf5248_adc(cc, e6, hi, &e6); + (void)inner_gf5248_adc(cc, e7, 0, &e7); + + inner_gf5248_umul(lo, hi, a->v1, b->v0); + cc = inner_gf5248_adc(0, e1, lo, &e1); + cc = inner_gf5248_adc(cc, e2, hi, &e2); + inner_gf5248_umul(lo, hi, a->v3, b->v0); + cc = inner_gf5248_adc(cc, e3, lo, &e3); + cc = inner_gf5248_adc(cc, e4, hi, &e4); + inner_gf5248_umul(lo, hi, a->v3, b->v2); + cc = inner_gf5248_adc(cc, e5, lo, &e5); + cc = inner_gf5248_adc(cc, e6, hi, &e6); + (void)inner_gf5248_adc(cc, e7, 0, &e7); + + inner_gf5248_umul(lo, hi, a->v0, b->v2); + cc = inner_gf5248_adc(0, e2, lo, &e2); + cc = inner_gf5248_adc(cc, e3, hi, &e3); + inner_gf5248_umul(lo, hi, a->v1, b->v3); + cc = inner_gf5248_adc(cc, e4, lo, &e4); + cc = inner_gf5248_adc(cc, e5, hi, &e5); + cc = inner_gf5248_adc(cc, e6, 0, &e6); + (void)inner_gf5248_adc(cc, e7, 0, &e7); + + inner_gf5248_umul(lo, hi, a->v2, b->v0); + cc = inner_gf5248_adc(0, e2, lo, &e2); + cc = inner_gf5248_adc(cc, e3, hi, &e3); + inner_gf5248_umul(lo, hi, a->v3, b->v1); + cc = inner_gf5248_adc(cc, e4, lo, &e4); + cc = inner_gf5248_adc(cc, e5, hi, &e5); + cc = inner_gf5248_adc(cc, e6, 0, &e6); + (void)inner_gf5248_adc(cc, e7, 0, &e7); + + inner_gf5248_umul(lo, hi, a->v1, b->v2); + inner_gf5248_umul(lo2, hi2, a->v2, b->v1); + cc = inner_gf5248_adc(0, lo, lo2, &lo); + cc = inner_gf5248_adc(cc, hi, hi2, &hi); + (void)inner_gf5248_adc(cc, 0, 0, &hi2); + cc = inner_gf5248_adc(0, e3, lo, &e3); + cc = inner_gf5248_adc(cc, e4, hi, &e4); + cc = inner_gf5248_adc(cc, e5, hi2, &e5); + cc = inner_gf5248_adc(cc, e6, 0, &e6); + (void)inner_gf5248_adc(cc, e7, 0, &e7); + + // Montgomery reduction. + // + // Low part is lo(e) = e0..e3 (256 bits). + // Let m = -1/q mod 2^256; we add (lo(e)*m mod 2^256)*q to the + // high part g = e4..e7 (246 bits). + // + // We have m = 5*2^248 + 1. + f0 = e0; + f1 = e1; + f2 = e2; + f3 = e3 + ((e0 * 5) << 56); + + // g = f*q + inner_gf5248_umul(g3, hi, f0, (uint64_t)5 << 56); + inner_gf5248_umul_add(g4, hi, f1, (uint64_t)5 << 56, hi); + inner_gf5248_umul_add(g5, hi, f2, (uint64_t)5 << 56, hi); + inner_gf5248_umul_add(g6, g7, f3, (uint64_t)5 << 56, hi); + cc = inner_gf5248_sbb(0, 0, f0, &g0); + cc = inner_gf5248_sbb(cc, 0, f1, &g1); + cc = inner_gf5248_sbb(cc, 0, f2, &g2); + cc = inner_gf5248_sbb(cc, g3, f3, &g3); + cc = inner_gf5248_sbb(cc, g4, 0, &g4); + cc = inner_gf5248_sbb(cc, g5, 0, &g5); + cc = inner_gf5248_sbb(cc, g6, 0, &g6); + (void)inner_gf5248_sbb(cc, g7, 0, &g7); + + // Add g = f*q to e0..e7. + // Since e0..e7 < 2^502 and f < 2^256, we know that the result + // is less than 2^502 + 2^256*5*2^248, which is less than 6*2^504. + // This is also a multiple of 2^256. We divide by 2^256 by simply + // dropping the low 256 bits (which are all equal to zero), and + // the result is less than 6*2^248, which is already in our + // acceptable value range. + cc = inner_gf5248_adc(0, g0, e0, &e0); + cc = inner_gf5248_adc(cc, g1, e1, &e1); + cc = inner_gf5248_adc(cc, g2, e2, &e2); + cc = inner_gf5248_adc(cc, g3, e3, &e3); + cc = inner_gf5248_adc(cc, g4, e4, &e4); + cc = inner_gf5248_adc(cc, g5, e5, &e5); + cc = inner_gf5248_adc(cc, g6, e6, &e6); + (void)inner_gf5248_adc(cc, g7, e7, &e7); + + d->v0 = e4; + d->v1 = e5; + d->v2 = e6; + d->v3 = e7; + } + + /* + * d <- a^2 + */ + static inline void + gf5248_square(gf5248 *d, const gf5248 *a) + { + uint64_t e0, e1, e2, e3, e4, e5, e6, e7; + uint64_t f0, f1, f2, f3, lo, hi; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7; + unsigned char cc; + + // Squaring over integers. + inner_gf5248_umul(e1, e2, a->v0, a->v1); + inner_gf5248_umul(e3, e4, a->v0, a->v3); + inner_gf5248_umul(e5, e6, a->v2, a->v3); + inner_gf5248_umul(lo, hi, a->v0, a->v2); + cc = inner_gf5248_adc(0, e2, lo, &e2); + cc = inner_gf5248_adc(cc, e3, hi, &e3); + inner_gf5248_umul(lo, hi, a->v1, a->v3); + cc = inner_gf5248_adc(cc, e4, lo, &e4); + cc = inner_gf5248_adc(cc, e5, hi, &e5); + (void)inner_gf5248_adc(cc, e6, 0, &e6); + inner_gf5248_umul(lo, hi, a->v1, a->v2); + cc = inner_gf5248_adc(0, e3, lo, &e3); + cc = inner_gf5248_adc(cc, e4, hi, &e4); + cc = inner_gf5248_adc(cc, e5, 0, &e5); + (void)inner_gf5248_adc(cc, e6, 0, &e6); + + // There cannot be extra carry here because the partial sum is + // necessarily lower than 2^448 at this point. + + e7 = e6 >> 63; + e6 = (e6 << 1) | (e5 >> 63); + e5 = (e5 << 1) | (e4 >> 63); + e4 = (e4 << 1) | (e3 >> 63); + e3 = (e3 << 1) | (e2 >> 63); + e2 = (e2 << 1) | (e1 >> 63); + e1 = e1 << 1; + + inner_gf5248_umul(e0, hi, a->v0, a->v0); + cc = inner_gf5248_adc(0, e1, hi, &e1); + inner_gf5248_umul(lo, hi, a->v1, a->v1); + cc = inner_gf5248_adc(cc, e2, lo, &e2); + cc = inner_gf5248_adc(cc, e3, hi, &e3); + inner_gf5248_umul(lo, hi, a->v2, a->v2); + cc = inner_gf5248_adc(cc, e4, lo, &e4); + cc = inner_gf5248_adc(cc, e5, hi, &e5); + inner_gf5248_umul(lo, hi, a->v3, a->v3); + cc = inner_gf5248_adc(cc, e6, lo, &e6); + (void)inner_gf5248_adc(cc, e7, hi, &e7); + + // Montgomery reduction. + // + // Low part is lo(e) = e0..e3 (256 bits). + // Let m = -1/q mod 2^256; we add (lo(e)*m mod 2^256)*q to the + // high part g = e4..e7 (246 bits). + // + // We have m = 5*2^248 + 1. + f0 = e0; + f1 = e1; + f2 = e2; + f3 = e3 + ((e0 * 5) << 56); + + // g = f*q + inner_gf5248_umul(g3, hi, f0, (uint64_t)5 << 56); + inner_gf5248_umul_add(g4, hi, f1, (uint64_t)5 << 56, hi); + inner_gf5248_umul_add(g5, hi, f2, (uint64_t)5 << 56, hi); + inner_gf5248_umul_add(g6, g7, f3, (uint64_t)5 << 56, hi); + cc = inner_gf5248_sbb(0, 0, f0, &g0); + cc = inner_gf5248_sbb(cc, 0, f1, &g1); + cc = inner_gf5248_sbb(cc, 0, f2, &g2); + cc = inner_gf5248_sbb(cc, g3, f3, &g3); + cc = inner_gf5248_sbb(cc, g4, 0, &g4); + cc = inner_gf5248_sbb(cc, g5, 0, &g5); + cc = inner_gf5248_sbb(cc, g6, 0, &g6); + (void)inner_gf5248_sbb(cc, g7, 0, &g7); + + // Add g = f*q to e0..e7. + // Since e0..e7 < 2^502 and f < 2^256, we know that the result + // is less than 2^502 + 2^256*5*2^248, which is less than 6*2^504. + // This is also a multiple of 2^256. We divide by 2^256 by simply + // dropping the low 256 bits (which are all equal to zero), and + // the result is less than 6*2^248, which is already in our + // acceptable value range. + cc = inner_gf5248_adc(0, g0, e0, &e0); + cc = inner_gf5248_adc(cc, g1, e1, &e1); + cc = inner_gf5248_adc(cc, g2, e2, &e2); + cc = inner_gf5248_adc(cc, g3, e3, &e3); + cc = inner_gf5248_adc(cc, g4, e4, &e4); + cc = inner_gf5248_adc(cc, g5, e5, &e5); + cc = inner_gf5248_adc(cc, g6, e6, &e6); + (void)inner_gf5248_adc(cc, g7, e7, &e7); + + d->v0 = e4; + d->v1 = e5; + d->v2 = e6; + d->v3 = e7; + } + + /* + * d <- a^(2^n) + * This computes n successive squarings of value a, with result in d. + * n == 0 is a valid input (in that case, *a is copied into *d). + * This function is not constant-time with regard to n: the number of + * successive squarings may be observable through timing-based side channels. + */ + static inline void + gf5248_xsquare(gf5248 *d, const gf5248 *a, unsigned n) + { + if (n == 0) { + *d = *a; + return; + } + gf5248_square(d, a); + while (n-- > 1) { + gf5248_square(d, d); + } + } + + /* + * Returns 0xFFFFFFFF if *a is zero; otherwise, 0x00000000 is returned. + */ + static inline uint32_t + gf5248_iszero(const gf5248 *a) + { + uint64_t a0, a1, a2, a3, t0, t1, r; + + // Zero can be represented by 0 or by q. + a0 = a->v0; + a1 = a->v1; + a2 = a->v2; + a3 = a->v3; + t0 = a0 | a1 | a2 | a3; + t1 = ~a0 | ~a1 | ~a2 | (a3 ^ 0x04FFFFFFFFFFFFFF); + + // Top bit of r is 0 if and only if one of t0 or t1 is zero. + r = (t0 | -t0) & (t1 | -t1); + return (uint32_t)(r >> 63) - 1; + } + + /* + * Returns 0xFFFFFFFF if *a and *b represent the same field element; + * otherwise, 0x00000000 is returned. + */ + static inline uint32_t + gf5248_equals(const gf5248 *a, const gf5248 *b) + { + gf5248 d; + gf5248_sub(&d, a, b); + return gf5248_iszero(&d); + } + + /* + * d <- 1/a + * If *a is not zero, then the inverse is well-defined and written into *d, + * and the function returns 0xFFFFFFFF. If *a is zero, then this function + * sets *d to zero and returns 0x00000000. + */ + uint32_t gf5248_invert(gf5248 *d, const gf5248 *a); + + /* + * d <- a/b + * If *b is not zero, then this functions writes a/b into *d, and returns + * 0xFFFFFFFF. If *b is zero, then this function sets *d to zero (regardless + * of the value of *a) and returns 0x00000000. + */ + uint32_t gf5248_div(gf5248 *d, const gf5248 *a, const gf5248 *b); + + /* + * d <- a/3 + * Divides by 3 in the field by implementing the algorithm proposed in + * "Efficient Multiplication in Finite Field Extensions of Degree 5" + * by El Mrabet, Guillevic and Ionica at ASIACRYPT 2011. + */ + void gf5248_div3(gf5248 *out, const gf5248 *a); + + /* + * Get the Legendre symbol of *a (0 for zero, +1 for a non-zero square, + * -1 for a non-square). + */ + int32_t gf5248_legendre(const gf5248 *a); + + /* + * If *a is a square, then this function sets *d to a square root of a, + * and returns 0xFFFFFFFF. If *a is not a square, then this function + * sets *d to a square root of -a, and returns 0x00000000. + * In all cases, the value written into *d is such that the least significant + * bit of its integer representation (in [0..q-1]) is zero. + */ + uint32_t gf5248_sqrt(gf5248 *d, const gf5248 *a); + + /* + * Encode field element *a into buffer dst (exactly 32 bytes are written). + */ + void gf5248_encode(void *dst, const gf5248 *a); + + /* + * Decode source buffer src (exactly 32 bytes) into a field element *d. + * If the source value is not a valid canonical encoding, then *d is zero + * and the function returns 0x00000000; otherwise, the function returns + * 0xFFFFFFFF. + */ + uint32_t gf5248_decode(gf5248 *d, const void *src); + + /* + * Interpret the source buffer (of size len bytes) as an unsigned integer + * (little-endian convention) and reduce it modulo q, yielding a field + * element which is written into *d. Since reduction is applied, this + * function cannot fail. + */ + void gf5248_decode_reduce(gf5248 *d, const void *src, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.c new file mode 100644 index 0000000000..0424108019 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.c @@ -0,0 +1,93 @@ +#include +#include + +void +double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2) +{ + ec_dbl(&out->P1, &in->P1, &E1E2->E1); + ec_dbl(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + memmove(out, in, sizeof(theta_couple_point_t)); + } else { + double_couple_point(out, in, E1E2); + for (unsigned i = 0; i < n - 1; i++) { + double_couple_point(out, out, E1E2); + } + } +} + +void +add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2) +{ + ADD(&out->P1, &T1->P1, &T2->P1, &E1E2->E1); + ADD(&out->P2, &T1->P2, &T2->P2, &E1E2->E2); +} + +void +double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + DBL(&out->P1, &in->P1, &E1E2->E1); + DBL(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + *out = *in; + } else if (n == 1) { + double_couple_jac_point(out, in, E1E2); + } else { + fp2_t a1, a2, t1, t2; + + jac_to_ws(&out->P1, &t1, &a1, &in->P1, &E1E2->E1); + jac_to_ws(&out->P2, &t2, &a2, &in->P2, &E1E2->E2); + + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + for (unsigned i = 0; i < n - 1; i++) { + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + } + + jac_from_ws(&out->P1, &out->P1, &a1, &E1E2->E1); + jac_from_ws(&out->P2, &out->P2, &a2, &E1E2->E2); + } +} + +void +couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP) +{ + jac_to_xz(&P->P1, &xyP->P1); + jac_to_xz(&P->P2, &xyP->P2); +} + +void +copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2) +{ + // Copy the basis on E1 to (P, _) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P1, &B1->P); + copy_point(&ker->T2.P1, &B1->Q); + copy_point(&ker->T1m2.P1, &B1->PmQ); + + // Copy the basis on E2 to (_, P) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P2, &B2->P); + copy_point(&ker->T2.P2, &B2->Q); + copy_point(&ker->T1m2.P2, &B2->PmQ); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.h new file mode 100644 index 0000000000..2b16e23834 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.h @@ -0,0 +1,435 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The HD-isogenies algorithm required by the signature + * + */ + +#ifndef HD_H +#define HD_H + +#include +#include +#include + +/** @defgroup hd_module Abelian surfaces and their isogenies + * @{ + */ + +#define HD_extra_torsion 2 + +/** @defgroup hd_struct Data structures for dimension 2 + * @{ + */ + +/** @brief Type for couple point with XZ coordinates + * @typedef theta_couple_point_t + * + * @struct theta_couple_point + * + * Structure for the couple point on an elliptic product + * using XZ coordinates + */ +typedef struct theta_couple_point +{ + ec_point_t P1; + ec_point_t P2; +} theta_couple_point_t; + +/** @brief Type for three couple points T1, T2, T1-T2 with XZ coordinates + * @typedef theta_kernel_couple_points_t + * + * @struct theta_kernel_couple_points + * + * Structure for a triple of theta couple points T1, T2 and T1 - T2 + */ +typedef struct theta_kernel_couple_points +{ + theta_couple_point_t T1; + theta_couple_point_t T2; + theta_couple_point_t T1m2; +} theta_kernel_couple_points_t; + +/** @brief Type for couple point with XYZ coordinates + * @typedef theta_couple_jac_point_t + * + * @struct theta_couple_jac_point + * + * Structure for the couple point on an elliptic product + * using XYZ coordinates + */ +typedef struct theta_couple_jac_point +{ + jac_point_t P1; + jac_point_t P2; +} theta_couple_jac_point_t; + +/** @brief Type for couple curve * + * @typedef theta_couple_curve_t + * + * @struct theta_couple_curve + * + * the theta_couple_curve structure + */ +typedef struct theta_couple_curve +{ + ec_curve_t E1; + ec_curve_t E2; +} theta_couple_curve_t; + +/** @brief Type for a product E1 x E2 with corresponding bases + * @typedef theta_couple_curve_with_basis_t + * + * @struct theta_couple_curve_with_basis + * + * tType for a product E1 x E2 with corresponding bases Ei[2^n] + */ +typedef struct theta_couple_curve_with_basis +{ + ec_curve_t E1; + ec_curve_t E2; + ec_basis_t B1; + ec_basis_t B2; +} theta_couple_curve_with_basis_t; + +/** @brief Type for theta point * + * @typedef theta_point_t + * + * @struct theta_point + * + * the theta_point structure used + */ +typedef struct theta_point +{ + fp2_t x; + fp2_t y; + fp2_t z; + fp2_t t; +} theta_point_t; + +/** @brief Type for theta point with repeating components + * @typedef theta_point_compact_t + * + * @struct theta_point_compact + * + * the theta_point structure used for points with repeated components + */ +typedef struct theta_point_compact +{ + fp2_t x; + fp2_t y; +} theta_point_compact_t; + +/** @brief Type for theta structure * + * @typedef theta_structure_t + * + * @struct theta_structure + * + * the theta_structure structure used + */ +typedef struct theta_structure +{ + theta_point_t null_point; + bool precomputation; + + // Eight precomputed values used for doubling and + // (2,2)-isogenies. + fp2_t XYZ0; + fp2_t YZT0; + fp2_t XZT0; + fp2_t XYT0; + + fp2_t xyz0; + fp2_t yzt0; + fp2_t xzt0; + fp2_t xyt0; +} theta_structure_t; + +/** @brief A 2x2 matrix used for action by translation + * @typedef translation_matrix_t + * + * @struct translation_matrix + * + * Structure to hold 4 fp2_t elements representing a 2x2 matrix used when computing + * a compatible theta structure during gluing. + */ +typedef struct translation_matrix +{ + fp2_t g00; + fp2_t g01; + fp2_t g10; + fp2_t g11; +} translation_matrix_t; + +/** @brief A 4x4 matrix used for basis changes + * @typedef basis_change_matrix_t + * + * @struct basis_change_matrix + * + * Structure to hold 16 elements representing a 4x4 matrix used for changing + * the basis of a theta point. + */ +typedef struct basis_change_matrix +{ + fp2_t m[4][4]; +} basis_change_matrix_t; + +/** @brief Type for gluing (2,2) theta isogeny * + * @typedef theta_gluing_t + * + * @struct theta_gluing + * + * the theta_gluing structure + */ +typedef struct theta_gluing +{ + + theta_couple_curve_t domain; + theta_couple_jac_point_t xyK1_8; + theta_point_compact_t imageK1_8; + basis_change_matrix_t M; + theta_point_t precomputation; + theta_point_t codomain; + +} theta_gluing_t; + +/** @brief Type for standard (2,2) theta isogeny * + * @typedef theta_isogeny_t + * + * @struct theta_isogeny + * + * the theta_isogeny structure + */ +typedef struct theta_isogeny +{ + theta_point_t T1_8; + theta_point_t T2_8; + bool hadamard_bool_1; + bool hadamard_bool_2; + theta_structure_t domain; + theta_point_t precomputation; + theta_structure_t codomain; +} theta_isogeny_t; + +/** @brief Type for splitting isomorphism * + * @typedef theta_splitting_t + * + * @struct theta_splitting + * + * the theta_splitting structure + */ +typedef struct theta_splitting +{ + basis_change_matrix_t M; + theta_structure_t B; + +} theta_splitting_t; + +// end of hd_struct +/** + * @} + */ + +/** @defgroup hd_functions Functions for dimension 2 + * @{ + */ + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param n : the number of iteration + * @param E1E2 an elliptic product + * @param in the theta couple point in the elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the addition of two points in (X : Y : Z) coordinates on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param T1 the theta couple jac point in the elliptic product + * @param T2 the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1, P2), (Q1, Q2) + * out = (P1 + Q1, P2 + Q2) + * + **/ +void add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple jac point in on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param n : the number of iteration + * @param in the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief A forgetful function which returns (X : Z) points given a pair of (X : Y : Z) points + * + * @param P Output: the theta_couple_point + * @param xyP : the theta_couple_jac_point + **/ +void couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it does extra isotropy + * checks on the kernel. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it selects a random Montgomery + * model of the codomain. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success, 0 on failure + * + */ +int theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Given a bases B1 on E1 and B2 on E2 copies this to create a kernel + * on E1 x E2 as couple points T1, T2 and T1 - T2 + * + * @param ker Output: a kernel for dim_two_isogenies (T1, T2, T1-T2) + * @param B1 Input basis on E1 + * @param B2 Input basis on E2 + **/ +void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2); + +/** + * @brief Given a couple of points (P1, P2) on a couple of curves (E1, E2) + * this function tests if both points are of order exactly 2^t + * + * @param T: couple point (P1, P2) + * @param E: a couple of curves (E1, E2) + * @param t: an integer + * @returns 0xFFFFFFFF on success, 0 on failure + */ +static int +test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) +{ + int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); + int check_P2 = test_point_order_twof(&T->P2, &E->E2, t); + + return check_P1 & check_P2; +} + +// end of hd_functions +/** + * @} + */ +// end of hd_module +/** + * @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c new file mode 100644 index 0000000000..6332d21f8e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c @@ -0,0 +1,143 @@ +#include + +#define FP2_ZERO 0 +#define FP2_ONE 1 +#define FP2_I 2 +#define FP2_MINUS_ONE 3 +#define FP2_MINUS_I 4 + +const int EVEN_INDEX[10][2] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 0}, {1, 2}, {2, 0}, {2, 1}, {3, 0}, {3, 3}}; +const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}; +const fp2_t FP2_CONSTANTS[5] = {{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7} +#elif RADIX == 32 +{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +#else +{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7} +#elif RADIX == 32 +{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +#else +{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff} +#endif +#endif +}}; +const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10] = {{{{FP2_ONE, FP2_I, FP2_ONE, FP2_I}, {FP2_ONE, FP2_MINUS_I, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_MINUS_ONE, FP2_MINUS_I}, {FP2_MINUS_ONE, FP2_I, FP2_MINUS_ONE, FP2_I}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}}; +const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6] = {{{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}, {{{FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.h new file mode 100644 index 0000000000..b3147a42a9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.h @@ -0,0 +1,18 @@ +#ifndef HD_SPLITTING_H +#define HD_SPLITTING_H + +#include +#include + +typedef struct precomp_basis_change_matrix { + uint8_t m[4][4]; +} precomp_basis_change_matrix_t; + +extern const int EVEN_INDEX[10][2]; +extern const int CHI_EVAL[4][4]; +extern const fp2_t FP2_CONSTANTS[5]; +extern const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10]; +extern const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6]; + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.c new file mode 100644 index 0000000000..0743974345 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.c @@ -0,0 +1,338 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Scalar multiplication [x]P + [y]Q where x and y are stored +// inside an ibz_vec_2_t [x, y] and P, Q \in E[2^f] +void +ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + digit_t scalars[2][NWORDS_ORDER]; + ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); + ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); +} + +// Given an ideal, computes the scalars s0, s1 which determine the kernel generator +// of the equivalent isogeny +void +id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lideal) +{ + ibz_t tmp; + ibz_init(&tmp); + + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + // construct the matrix of the dual of alpha on the 2^f-torsion + { + quat_alg_elem_t alpha; + quat_alg_elem_init(&alpha); + + int lideal_generator_ok UNUSED = quat_lideal_generator(&alpha, lideal, &QUATALG_PINFTY); + assert(lideal_generator_ok); + quat_alg_conj(&alpha, &alpha); + + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + quat_change_to_O0_basis(&coeffs, &alpha); + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + } + } + + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&alpha); + } + + // find the kernel of alpha modulo the norm of the ideal + { + const ibz_t *const norm = &lideal->norm; + + ibz_mod(&(*vec)[0], &mat[0][0], norm); + ibz_mod(&(*vec)[1], &mat[1][0], norm); + ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + if (ibz_is_even(&tmp)) { + ibz_mod(&(*vec)[0], &mat[0][1], norm); + ibz_mod(&(*vec)[1], &mat[1][1], norm); + } +#ifndef NDEBUG + ibz_gcd(&tmp, &(*vec)[0], norm); + ibz_gcd(&tmp, &(*vec)[1], &tmp); + assert(!ibz_cmp(&tmp, &ibz_const_one)); +#endif + } + + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&tmp); +} + +// helper function to apply a matrix to a basis of E[2^f] +// works in place +int +matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f) +{ + digit_t scalars[2][NWORDS_ORDER] = { 0 }; + int ret; + + ibz_t tmp, pow_two; + ibz_init(&tmp); + ibz_init(&pow_two); + ibz_pow(&pow_two, &ibz_const_two, f); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // reduction mod 2f + ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); + ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); + ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); + ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][0]); + ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); + + // second basis element S = [c]P + [d]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][1]); + ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); + + // Their difference R - S = [a - c]P + [b - d]Q + ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[0], &tmp); + ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[1], &tmp); + ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); + + ibz_finalize(&tmp); + ibz_finalize(&pow_two); + + return ret; +} + +// helper function to apply some endomorphism of E0 on the precomputed basis of E[2^f] +// works in place +void +endomorphism_application_even_basis(ec_basis_t *bas, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_t content; + ibz_init(&content); + + // decomposing theta on the basis + quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); + assert(ibz_is_odd(&content)); + + ibz_set(&mat[0][0], 0); + ibz_set(&mat[0][1], 0); + ibz_set(&mat[1][0], 0); + ibz_set(&mat[1][1], 0); + + // computing the matrix + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&mat[i][j], &mat[i][j], &content); + } + } + + // and now we apply it + matrix_application_even_basis(bas, E, &mat, f); + + ibz_vec_4_finalize(&coeffs); + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&content); + + ibz_finalize(&tmp); +} + +// compute the ideal whose kernel is generated by vec2[0]*BO[0] + vec2[1]*B0[1] where B0 is the +// canonical basis of E0 +void +id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f) +{ + + // algorithm: apply endomorphisms 1 and j+(1+k)/2 to the kernel point, + // the result should form a basis of the respective torsion subgroup. + // then apply i to the kernel point and decompose over said basis. + // hence we have an equation a*P + b*[j+(1+k)/2]P == [i]P, which will + // easily reveal an endomorphism that kills P. + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + if (f == TORSION_EVEN_POWER) { + ibz_copy(&two_pow, &TORSION_PLUS_2POWER); + } else { + ibz_pow(&two_pow, &ibz_const_two, f); + } + + { + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_copy(&mat[0][0], &(*vec2)[0]); + ibz_copy(&mat[1][0], &(*vec2)[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); + ibz_copy(&mat[0][1], &vec[0]); + ibz_copy(&mat[1][1], &vec[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); + ibz_add(&mat[0][1], &mat[0][1], &vec[0]); + ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + + ibz_mod(&mat[0][1], &mat[0][1], &two_pow); + ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + + ibz_mat_2x2_t inv; + ibz_mat_2x2_init(&inv); + { + int inv_ok UNUSED = ibz_mat_2x2_inv_mod(&inv, &mat, &two_pow); + assert(inv_ok); + } + ibz_mat_2x2_finalize(&mat); + + ibz_mat_2x2_eval(&vec, &ACTION_I, vec2); + ibz_mat_2x2_eval(&vec, &inv, &vec); + + ibz_mat_2x2_finalize(&inv); + } + + // final result: a - i + b*(j+(1+k)/2) + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + ibz_set(&gen.denom, 2); + ibz_add(&gen.coord[0], &vec[0], &vec[0]); + ibz_set(&gen.coord[1], -2); + ibz_add(&gen.coord[2], &vec[1], &vec[1]); + ibz_copy(&gen.coord[3], &vec[1]); + ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_vec_2_finalize(&vec); + + quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + assert(0 == ibz_cmp(&lideal->norm, &two_pow)); + + quat_alg_elem_finalize(&gen); + ibz_finalize(&two_pow); +} + +// finds mat such that: +// (mat*v).B2 = v.B1 +// where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q +// mat encodes the coordinates of the points of B1 in the basis B2 +// specifically requires B1 or B2 to be "full" w.r.t to the 2^n torsion, so that we use tate +// full = 0 assumes B2 is "full" so the easier case. +// if we want to switch the role of B2 and B1, we invert the matrix, e.g. set full = 1 +static void +_change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f, + bool invert) +{ + digit_t x1[NWORDS_ORDER] = { 0 }, x2[NWORDS_ORDER] = { 0 }, x3[NWORDS_ORDER] = { 0 }, x4[NWORDS_ORDER] = { 0 }; + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - f; +#endif + + // Ensure the input basis has points of order 2^f + if (invert) { + assert(test_basis_order_twof(B1, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B1, B2, E, f); + mp_invert_matrix(x1, x2, x3, x4, f, NWORDS_ORDER); + } else { + assert(test_basis_order_twof(B2, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B2, B1, E, f); + } + +#ifndef NDEBUG + { + if (invert) { + ec_point_t test, test2; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->P, E); + assert(ec_is_equal(&test, &test2)); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->Q, E); + assert(ec_is_equal(&test, &test2)); + } else { + ec_point_t test; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->P))); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->Q))); + } + } +#endif + + // Copy the results into the matrix + ibz_copy_digit_array(&((*mat)[0][0]), x1); + ibz_copy_digit_array(&((*mat)[1][0]), x2); + ibz_copy_digit_array(&((*mat)[0][1]), x3); + ibz_copy_digit_array(&((*mat)[1][1]), x4); +} + +void +change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, false); +} + +void +change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.h new file mode 100644 index 0000000000..1b4eaae3c5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.h @@ -0,0 +1,280 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The id2iso algorithms + */ + +#ifndef ID2ISO_H +#define ID2ISO_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @defgroup id2iso_id2iso Ideal to isogeny conversion + * @{ + */ +static const quat_represent_integer_params_t QUAT_represent_integer_params = { + .algebra = &QUATALG_PINFTY, /// The level-specific quaternion algebra + .order = &(EXTREMAL_ORDERS[0]), // The special extremal order O0 + .primality_test_iterations = QUAT_primality_num_iter // precompted bound on the iteration number in primality tests +}; + +/*************************** Functions *****************************/ + +/** @defgroup id2iso_others Other functions needed for id2iso + * @{ + */ + +/** + * @brief Scalar multiplication [x]P + [y]Q where x and y are stored inside an + * ibz_vec_2_t [x, y] and P, Q in E[2^f] + * + * @param res Output: the point R = [x]P + [y]Q + * @param scalar_vec: a vector of ibz type elements (x, y) + * @param f: an integer such that P, Q are in E[2^f] + * @param PQ: an x-only basis x(P), x(Q) and x(P-Q) + * @param curve: the curve E the points P, Q, R are defined on + * + */ +void ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Translating an ideal of norm 2^f dividing p²-1 into the corresponding + * kernel coefficients + * + * @param ker_dlog Output : two coefficients indicating the decomposition of the + * kernel over the canonical basis of E0[2^f] + * @param lideal_input : O0-ideal corresponding to the ideal to be translated of + * norm 2^f + * + */ +void id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *ker_dlog, const quat_left_ideal_t *lideal_input); + +/** + * @brief Applies some 2x2 matrix on a basis of E[2^TORSION_EVEN_POWER] + * + * @param P the basis + * @param E the curve + * @param mat the matrix + * @param f TORSION_EVEN_POWER + * @returns 1 if success, 0 if error + * + * helper function, works in place + * + */ +int matrix_application_even_basis(ec_basis_t *P, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f); + +/** + * @brief Applies some endomorphism of an alternate curve to E[f] + * + * @param P the basis + * @param index_alternate_curve index of the alternate order in the list of precomputed extremal + * orders + * @param E the curve (E is not required to be the alternate curve in question since in the end we + * only apply a matrix) + * @param theta the endomorphism + * @param f TORSION_EVEN_POWER + * + * helper function, works in place + * + */ +void endomorphism_application_even_basis(ec_basis_t *P, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f); + +/** + * @brief Translating a kernel on the curve E0, represented as a vector with + * respect to the precomputed 2^f-torsion basis, into the corresponding O0-ideal + * + * @param lideal Output : the output O0-ideal + * @param f : exponent definining the norm of the ideal to compute + * @param vec2 : length-2 vector giving the 2-power part of the kernel with + * respect to the precomputed 2^f basis + * + */ +void id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B2 = v.B1 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^f] + * @param B2 the target basis for E[2^e] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2 + */ +void change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B1 = [2^e-f]*v.B2 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^e] + * @param B2 the target basis for E[2^f] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2, by + * applying change_of_basis_matrix_tate and inverting the outcome + */ +void change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f); + +/** @} + */ + +/** @defgroup id2iso_arbitrary Arbitrary isogeny evaluation + * @{ + */ +/** + * @brief Function to find elements u, v, d1, d2, beta1, beta2 for the ideal to isogeny + * + * @param u Output: integer + * @param v Output: integer + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param d1 Output: integer + * @param d2 Output: integer + * @param index_alternate_order_1 Output: small integer (index of an alternate order) + * @param index_alternate_order_2 Output: small integer (index of an alternate order) + * @param target : integer, target norm + * @param lideal : O0-ideal defining the search space + * @param Bpoo : quaternion algebra + * @param num_alternate_order number of alternate order we consider + * @returns 1 if the computation succeeds, 0 otherwise + * + * Let us write ti = index_alternate_order_i, + * we look for u,v,beta1,beta2,d1,d2,t1,t2 + * such that u d1 + v d2 = target + * and where di = norm(betai)/norm(Ii), where the ideal Ii is equal to overbar{Ji} * lideal and + * betai is in Ii where Ji is a connecting ideal between the maximal order O0 and O_ti t1,t2 must be + * contained between 0 and num_alternate_order This corresponds to the function SuitableIdeals in + * the spec + */ +int find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order); + +/** + * @brief Computes an arbitrary isogeny of fixed degree starting from E0 + * and evaluates it a list of points of the form (P1,0) or (0,P2). + * + * @param lideal Output : an ideal of norm u + * @param u : integer + * @param small : bit indicating if we the value of u is "small" meaning that we + expect it to be + * around sqrt{p}, in that case we use a length slightly above + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny + (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @param index_alternate_order : index of the special extremal order to be used (in the list of + these orders) + * @returns the length of the chain if the computation succeeded, zero upon + failure + * + * F is an isogeny encoding an isogeny [adjust]*phi : E0 -> Eu of degree u + * note that the codomain of F can be either Eu x Eu' or Eu' x Eu for some curve + Eu' + */ +int fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param u Output: integer + * @param v Output: integer + * @param d1 Output: integer + * @param d2 Output: integer + * @param codomain the codomain of the isogeny corresponding to lideal + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : O0 - ideal in input + * @param Bpoo : the quaternion algebra + * @returns 1 if the computation succeeded, 0 otherwise + * + * Compute the codomain and image on the basis of E0 of the isogeny + * E0 -> codomain corresponding to lideal + * + * There is some integer e >= 0 such that + * 2^e * u, 2^e * v,beta1, beta2, d1, d2 are the output of find_uv + * on input target = 2^TORSION_PLUS_EVEN_POWER and lideal + * + * codomain and basis are computed with the help of a dimension 2 isogeny + * of degree 2^TORSION_PLUS_EVEN_POWER - e using a Kani diagram + * + */ +int dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : ideal in input + * @param codomain + * @returns 1 if the computation succeeds, 0 otherwise + * + * This is a wrapper around the ideal to isogeny clapotis function + */ +int dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.h new file mode 100644 index 0000000000..a0c2c02477 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.h @@ -0,0 +1,303 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for big integers in the reference implementation + */ + +#ifndef INTBIG_H +#define INTBIG_H + +#include +#if defined(MINI_GMP) +#include +#include +#else +#include +#endif +#include +#include + +/** @ingroup quat_quat + * @defgroup ibz_all Signed big integers (gmp-based) + * @{ + */ + +/** @defgroup ibz_t Precise number types + * @{ + */ + +/** @brief Type for signed long integers + * + * @typedef ibz_t + * + * For integers of arbitrary size, used by intbig module, using gmp + */ +typedef mpz_t ibz_t; + +/** @} + */ + +/** @defgroup ibz_c Constants + * @{ + */ + +/** + * Constant zero + */ +extern const ibz_t ibz_const_zero; + +/** + * Constant one + */ +extern const ibz_t ibz_const_one; + +/** + * Constant two + */ +extern const ibz_t ibz_const_two; + +/** + * Constant three + */ +extern const ibz_t ibz_const_three; + +/** @} + */ + +/** @defgroup ibz_finit Constructors and Destructors + * @{ + */ + +void ibz_init(ibz_t *x); +void ibz_finalize(ibz_t *x); + +/** @} + */ + +/** @defgroup ibz_za Basic integer arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b); + +/** @brief diff=a-b + */ +void ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b); + +/** @brief prod=a*b + */ +void ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b); + +/** @brief neg=-a + */ +void ibz_neg(ibz_t *neg, const ibz_t *a); + +/** @brief abs=|a| + */ +void ibz_abs(ibz_t *abs, const ibz_t *a); + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards zero. + */ +void ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b); + +/** @brief Euclidean division of a by 2^exp + * + * Computes a right shift of abs(a) by exp bits, then sets sign(quotient) to sign(a). + * + * Division and rounding is as in ibz_div. + */ +void ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp); + +/** @brief Two adic valuation computation + * + * Computes the position of the first 1 in the binary representation of the integer given in input + * + * When this number is a power of two this gives the two adic valuation of the integer + */ +int ibz_two_adic(ibz_t *pow); + +/** @brief r = a mod b + * + * Assumes valid inputs + * The sign of the divisor is ignored, the result is always non-negative + */ +void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); + +unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); + +/** @brief Test if a = 0 mod b + */ +int ibz_divides(const ibz_t *a, const ibz_t *b); + +/** @brief pow=x^e + * + * Assumes valid inputs, The case 0^0 yields 1. + */ +void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e); + +/** @brief pow=(x^e) mod m + * + * Assumes valid inputs + */ +void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibz_cmp(const ibz_t *a, const ibz_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibz_is_zero(const ibz_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibz_is_one(const ibz_t *x); + +/** @brief Compare x to y + * + * @returns 0 if x=y, positive if x>y, negative if x= 0 and target must hold sufficient elements to hold ibz + * + * @param target Target digit_t array + * @param ibz ibz source ibz_t element + */ +void ibz_to_digits(digit_t *target, const ibz_t *ibz); +#define ibz_to_digit_array(T, I) \ + do { \ + memset((T), 0, sizeof(T)); \ + ibz_to_digits((T), (I)); \ + } while (0) + +/** @brief get int32_t equal to the lowest bits of i + * + * Should not be used to get the value of i if its bitsize is close to 32 bit + * It can however be used on any i to get an int32_t of the same parity as i (and same value modulo + * 4) + * + * @param i Input integer + */ +int32_t ibz_get(const ibz_t *i); + +/** @brief generate random value in [a, b] + * assumed that a >= 0 and b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b); + +/** @brief generate random value in [-m, m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m); + +/** @brief Bitsize of a. + * + * @returns Bitsize of a. + * + */ +int ibz_bitsize(const ibz_t *a); + +/** @brief Size of a in given base. + * + * @returns Size of a in given base. + * + */ +int ibz_size_in_base(const ibz_t *a, int base); + +/** @} + */ + +/** @defgroup ibz_n Number theory functions + * @{ + */ + +/** + * @brief Greatest common divisor + * + * @param gcd Output: Set to the gcd of a and b + * @param a + * @param b + */ +void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b); + +/** + * @brief Modular inverse + * + * @param inv Output: Set to the integer in [0,mod[ such that a*inv = 1 mod (mod) if it exists + * @param a + * @param mod + * @returns 1 if inverse exists and was computed, 0 otherwise + */ +int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod); + +/** + * @brief Floor of Integer square root + * + * @param sqrt Output: Set to the floor of an integer square root + * @param a number of which a floor of an integer square root is searched + */ +void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/isog.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/isog.h new file mode 100644 index 0000000000..b251ca3cdc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/isog.h @@ -0,0 +1,28 @@ +#ifndef _ISOG_H_ +#define _ISOG_H_ +#include +#include + +/* KPS structure for isogenies of degree 2 or 4 */ +typedef struct +{ + ec_point_t K; +} ec_kps2_t; +typedef struct +{ + ec_point_t K[3]; +} ec_kps4_t; + +void xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P); // degree-2 isogeny construction +void xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24); + +void xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P); // degree-4 isogeny construction +void xisog_4_singular(ec_kps4_t *kps, ec_point_t *B24, const ec_point_t P, ec_point_t A24); + +void xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps); +void xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps); + +void xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps); +void xeval_4_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_point_t P, const ec_kps4_t *kps); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/isog_chains.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/isog_chains.c new file mode 100644 index 0000000000..abc9808057 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/isog_chains.c @@ -0,0 +1,241 @@ +#include "isog.h" +#include + +// since we use degree 4 isogeny steps, we need to handle the odd case with care +static uint32_t +ec_eval_even_strategy(ec_curve_t *curve, + ec_point_t *points, + unsigned len_points, + const ec_point_t *kernel, + const int isog_len) +{ + ec_curve_normalize_A24(curve); + ec_point_t A24; + copy_point(&A24, &curve->A24); + + int space = 1; + for (int i = 1; i < isog_len; i *= 2) + ++space; + + // Stack of remaining kernel points and their associated orders + ec_point_t splits[space]; + uint16_t todo[space]; + splits[0] = *kernel; + todo[0] = isog_len; + + int current = 0; // Pointer to current top of stack + + // Chain of 4-isogenies + for (int j = 0; j < isog_len / 2; ++j) { + assert(current >= 0); + assert(todo[current] >= 1); + // Get the next point of order 4 + while (todo[current] != 2) { + assert(todo[current] >= 3); + // A new split will be added + ++current; + assert(current < space); + // We set the seed of the new split to be computed and saved + copy_point(&splits[current], &splits[current - 1]); + // if we copied from the very first element, then we perform one additional doubling + unsigned num_dbls = todo[current - 1] / 4 * 2 + todo[current - 1] % 2; + todo[current] = todo[current - 1] - num_dbls; + while (num_dbls--) + xDBL_A24(&splits[current], &splits[current], &A24, false); + } + + if (j == 0) { + assert(fp2_is_one(&A24.z)); + if (!ec_is_four_torsion(&splits[current], curve)) + return -1; + + ec_point_t T; + xDBL_A24(&T, &splits[current], &A24, false); + if (fp2_is_zero(&T.x)) + return -1; // special isogenies not allowed + } else { + assert(todo[current] == 2); +#ifndef NDEBUG + if (fp2_is_zero(&splits[current].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + + ec_point_t test; + xDBL_A24(&test, &splits[current], &A24, false); + if (fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly zero before doubling"); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + } + + // Evaluate 4-isogeny + ec_kps4_t kps4; + xisog_4(&kps4, &A24, splits[current]); + xeval_4(splits, splits, current, &kps4); + for (int i = 0; i < current; ++i) + todo[i] -= 2; + xeval_4(points, points, len_points, &kps4); + + --current; + } + assert(isog_len % 2 ? !current : current == -1); + + // Final 2-isogeny + if (isog_len % 2) { +#ifndef NDEBUG + if (fp2_is_zero(&splits[0].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + ec_point_t test; + copy_point(&test, &splits[0]); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + + // We need to check the order of this point in case there were no 4-isogenies + if (isog_len == 1 && !ec_is_two_torsion(&splits[0], curve)) + return -1; + if (fp2_is_zero(&splits[0].x)) { + // special isogenies not allowed + // this case can only happen if isog_len == 1; otherwise the + // previous 4-isogenies we computed ensure that $T=(0:1)$ is put + // as the kernel of the dual isogeny + return -1; + } + + ec_kps2_t kps2; + xisog_2(&kps2, &A24, splits[0]); + xeval_2(points, points, len_points, &kps2); + } + + // Output curve in the form (A:C) + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + + return 0; +} + +uint32_t +ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points) +{ + copy_curve(image, &phi->curve); + return ec_eval_even_strategy(image, points, len_points, &phi->kernel, phi->length); +} + +// naive implementation +uint32_t +ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special) // do we allow special isogenies? +{ + + ec_point_t A24; + AC_to_A24(&A24, curve); + + ec_kps2_t kps; + ec_point_t small_K, big_K; + copy_point(&big_K, kernel); + + for (int i = 0; i < len; i++) { + copy_point(&small_K, &big_K); + // small_K = big_K; + for (int j = 0; j < len - i - 1; j++) { + xDBL_A24(&small_K, &small_K, &A24, false); + } + // Check the order of the point before the first isogeny step + if (i == 0 && !ec_is_two_torsion(&small_K, curve)) + return (uint32_t)-1; + // Perform isogeny step + if (fp2_is_zero(&small_K.x)) { + if (special) { + ec_point_t B24; + xisog_2_singular(&kps, &B24, A24); + xeval_2_singular(&big_K, &big_K, 1, &kps); + xeval_2_singular(points, points, len_points, &kps); + copy_point(&A24, &B24); + } else { + return (uint32_t)-1; + } + } else { + xisog_2(&kps, &A24, small_K); + xeval_2(&big_K, &big_K, 1, &kps); + xeval_2(points, points, len_points, &kps); + } + } + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + return 0; +} + +uint32_t +ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to) +{ + fp2_t t0, t1, t2, t3, t4; + + fp2_mul(&t0, &from->A, &from->C); + fp2_mul(&t1, &to->A, &to->C); + + fp2_mul(&t2, &t1, &to->C); // toA*toC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*toA*toC^2 + fp2_sqr(&t3, &to->A); + fp2_mul(&t3, &t3, &to->A); // toA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->Nx, &t3, &t2); // 2*toA^3-9*toA*toC^2 + fp2_mul(&t2, &t0, &from->A); // fromA^2*fromC + fp2_sqr(&t3, &from->C); + fp2_mul(&t3, &t3, &from->C); // fromC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*fromC^3 + fp2_sub(&t3, &t3, &t2); // 3*fromC^3-fromA^2*fromC + fp2_mul(&isom->Nx, &isom->Nx, &t3); // lambda_x = (2*toA^3-9*toA*toC^2)*(3*fromC^3-fromA^2*fromC) + + fp2_mul(&t2, &t0, &from->C); // fromA*fromC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*fromA*fromC^2 + fp2_sqr(&t3, &from->A); + fp2_mul(&t3, &t3, &from->A); // fromA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->D, &t3, &t2); // 2*fromA^3-9*fromA*fromC^2 + fp2_mul(&t2, &t1, &to->A); // toA^2*toC + fp2_sqr(&t3, &to->C); + fp2_mul(&t3, &t3, &to->C); // toC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*toC^3 + fp2_sub(&t3, &t3, &t2); // 3*toC^3-toA^2*toC + fp2_mul(&isom->D, &isom->D, &t3); // lambda_z = (2*fromA^3-9*fromA*fromC^2)*(3*toC^3-toA^2*toC) + + // Mont -> SW -> SW -> Mont + fp2_mul(&t0, &to->C, &from->A); + fp2_mul(&t0, &t0, &isom->Nx); // lambda_x*toC*fromA + fp2_mul(&t1, &from->C, &to->A); + fp2_mul(&t1, &t1, &isom->D); // lambda_z*fromC*toA + fp2_sub(&isom->Nz, &t0, &t1); // lambda_x*toC*fromA - lambda_z*fromC*toA + fp2_mul(&t0, &from->C, &to->C); + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // 3*fromC*toC + fp2_mul(&isom->D, &isom->D, &t0); // 3*lambda_z*fromC*toC + fp2_mul(&isom->Nx, &isom->Nx, &t0); // 3*lambda_x*fromC*toC + + return (fp2_is_zero(&isom->Nx) | fp2_is_zero(&isom->D)); +} + +void +ec_iso_eval(ec_point_t *P, ec_isom_t *isom) +{ + fp2_t tmp; + fp2_mul(&P->x, &P->x, &isom->Nx); + fp2_mul(&tmp, &P->z, &isom->Nz); + fp2_add(&P->x, &P->x, &tmp); + fp2_mul(&P->z, &P->z, &isom->D); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/keygen.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/keygen.c new file mode 100644 index 0000000000..c1c206c99d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/keygen.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +void +secret_key_init(secret_key_t *sk) +{ + quat_left_ideal_init(&(sk->secret_ideal)); + ibz_mat_2x2_init(&(sk->mat_BAcan_to_BA0_two)); + ec_curve_init(&sk->curve); +} + +void +secret_key_finalize(secret_key_t *sk) +{ + quat_left_ideal_finalize(&(sk->secret_ideal)); + ibz_mat_2x2_finalize(&(sk->mat_BAcan_to_BA0_two)); +} + +int +protocols_keygen(public_key_t *pk, secret_key_t *sk) +{ + int found = 0; + ec_basis_t B_0_two; + + // iterating until a solution has been found + while (!found) { + + found = quat_sampling_random_ideal_O0_given_norm( + &sk->secret_ideal, &SEC_DEGREE, 1, &QUAT_represent_integer_params, NULL); + + // replacing the secret key ideal by a shorter equivalent one for efficiency + found = found && quat_lideal_prime_norm_reduced_equivalent( + &sk->secret_ideal, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + + // ideal to isogeny clapotis + + found = found && dim2id2iso_arbitrary_isogeny_evaluation(&B_0_two, &sk->curve, &sk->secret_ideal); + } + + // Assert the isogeny was found and images have the correct order + assert(test_basis_order_twof(&B_0_two, &sk->curve, TORSION_EVEN_POWER)); + + // Compute a deterministic basis with a hint to speed up verification + pk->hint_pk = ec_curve_to_basis_2f_to_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER); + + // Assert the deterministic basis we computed has the correct order + assert(test_basis_order_twof(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the 2x2 matrix basis change from the canonical basis to the evaluation of our secret + // isogeny + change_of_basis_matrix_tate( + &sk->mat_BAcan_to_BA0_two, &sk->canonical_basis, &B_0_two, &sk->curve, TORSION_EVEN_POWER); + + // Set the public key from the codomain curve + copy_curve(&pk->curve, &sk->curve); + pk->curve.is_A24_computed_and_normalized = false; // We don't send any precomputation + + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lvlx.cmake b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lvlx.cmake new file mode 100644 index 0000000000..3ab2d2dc90 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lvlx.cmake @@ -0,0 +1,12 @@ +set(SOURCE_FILES_GF_${SVARIANT_UPPER}_BROADWELL + ${SOURCE_FILES_GF_SPECIFIC} + fp.c + ${LVLX_DIR}/fp2.c +) + +add_library(${LIB_GF_${SVARIANT_UPPER}} STATIC ${SOURCE_FILES_GF_${SVARIANT_UPPER}_BROADWELL}) +target_include_directories(${LIB_GF_${SVARIANT_UPPER}} PRIVATE ${INC_COMMON} ${PROJECT_SOURCE_DIR}/src/precomp/ref/${SVARIANT_LOWER}/include ${INC_GF} ${INC_GF_${SVARIANT_UPPER}} include ${INC_PUBLIC}) +target_compile_options(${LIB_GF_${SVARIANT_UPPER}} PRIVATE ${C_OPT_FLAGS}) +target_compile_definitions(${LIB_GF_${SVARIANT_UPPER}} PUBLIC SQISIGN_VARIANT=${SVARIANT_LOWER}) + +add_subdirectory(test) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.c new file mode 100644 index 0000000000..4956beda50 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include + +void +sqisign_secure_free(void *mem, size_t size) +{ + if (mem) { + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); + free(mem); + } +} +void +sqisign_secure_clear(void *mem, size_t size) +{ + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.h new file mode 100644 index 0000000000..ab8f6c6481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef MEM_H +#define MEM_H +#include +#include + +/** + * Clears and frees allocated memory. + * + * @param[out] mem Memory to be cleared and freed. + * @param size Size of memory to be cleared and freed. + */ +void sqisign_secure_free(void *mem, size_t size); + +/** + * Clears memory. + * + * @param[out] mem Memory to be cleared. + * @param size Size of memory to be cleared. + */ +void sqisign_secure_clear(void *mem, size_t size); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c new file mode 100644 index 0000000000..396d505aec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c @@ -0,0 +1,73 @@ +#include +#include +#if defined(MINI_GMP) +#include "mini-gmp.h" +#else +// This configuration is used only for testing +#include +#endif +#include + +// Exported for testing +int +mini_mpz_legendre(const mpz_t a, const mpz_t p) +{ + int res = 0; + mpz_t e; + mpz_init_set(e, p); + mpz_sub_ui(e, e, 1); + mpz_fdiv_q_2exp(e, e, 1); + mpz_powm(e, a, e, p); + + if (mpz_cmp_ui(e, 1) <= 0) { + res = mpz_get_si(e); + } else { + res = -1; + } + mpz_clear(e); + return res; +} + +#if defined(MINI_GMP) +int +mpz_legendre(const mpz_t a, const mpz_t p) +{ + return mini_mpz_legendre(a, p); +} +#endif + +// Exported for testing +double +mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + double ret; + int tmp_exp; + mpz_t tmp; + + // Handle the case where op is 0 + if (mpz_cmp_ui(op, 0) == 0) { + *exp = 0; + return 0.0; + } + + *exp = mpz_sizeinbase(op, 2); + + mpz_init_set(tmp, op); + + if (*exp > DBL_MAX_EXP) { + mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); + } + + ret = frexp(mpz_get_d(tmp), &tmp_exp); + mpz_clear(tmp); + + return ret; +} + +#if defined(MINI_GMP) +double +mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + return mini_mpz_get_d_2exp(exp, op); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.h new file mode 100644 index 0000000000..0113cfdfe6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.h @@ -0,0 +1,19 @@ +#ifndef MINI_GMP_EXTRA_H +#define MINI_GMP_EXTRA_H + +#if defined MINI_GMP +#include "mini-gmp.h" + +typedef long mp_exp_t; + +int mpz_legendre(const mpz_t a, const mpz_t p); +double mpz_get_d_2exp(signed long int *exp, const mpz_t op); +#else +// This configuration is used only for testing +#include +#endif + +int mini_mpz_legendre(const mpz_t a, const mpz_t p); +double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c new file mode 100644 index 0000000000..3830ab2031 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c @@ -0,0 +1,4671 @@ +/* Note: The code from mini-gmp is modifed from the original by + commenting out the definition of GMP_LIMB_BITS */ + +/* + mini-gmp, a minimalistic implementation of a GNU GMP subset. + + Contributed to the GNU project by Niels Möller + Additional functionalities and improvements by Marco Bodrato. + +Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* NOTE: All functions in this file which are not declared in + mini-gmp.h are internal, and are not intended to be compatible + with GMP or with future versions of mini-gmp. */ + +/* Much of the material copied from GMP files, including: gmp-impl.h, + longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, + mpn/generic/lshift.c, mpn/generic/mul_1.c, + mpn/generic/mul_basecase.c, mpn/generic/rshift.c, + mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, + mpn/generic/submul_1.c. */ + +#include +#include +#include +#include +#include +#include + +#include "mini-gmp.h" + +#if !defined(MINI_GMP_DONT_USE_FLOAT_H) +#include +#endif + + +/* Macros */ +/* Removed from here as it is passed as a compiler command-line definition */ +/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ + +#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) +#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) + +#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) +#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) + +#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) +#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) + +#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) +#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) + +#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) + +#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 +#define GMP_DBL_MANT_BITS DBL_MANT_DIG +#else +#define GMP_DBL_MANT_BITS (53) +#endif + +/* Return non-zero if xp,xsize and yp,ysize overlap. + If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no + overlap. If both these are false, there's an overlap. */ +#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ + ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) + +#define gmp_assert_nocarry(x) do { \ + mp_limb_t __cy = (x); \ + assert (__cy == 0); \ + (void) (__cy); \ + } while (0) + +#define gmp_clz(count, x) do { \ + mp_limb_t __clz_x = (x); \ + unsigned __clz_c = 0; \ + int LOCAL_SHIFT_BITS = 8; \ + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ + for (; \ + (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ + __clz_c += 8) \ + { __clz_x <<= LOCAL_SHIFT_BITS; } \ + for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ + __clz_x <<= 1; \ + (count) = __clz_c; \ + } while (0) + +#define gmp_ctz(count, x) do { \ + mp_limb_t __ctz_x = (x); \ + unsigned __ctz_c = 0; \ + gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ + (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ + } while (0) + +#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) + (bl); \ + (sh) = (ah) + (bh) + (__x < (al)); \ + (sl) = __x; \ + } while (0) + +#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) - (bl); \ + (sh) = (ah) - (bh) - ((al) < (bl)); \ + (sl) = __x; \ + } while (0) + +#define gmp_umul_ppmm(w1, w0, u, v) \ + do { \ + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ + if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned int __ww = (unsigned int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned long int __ww = (unsigned long int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else { \ + mp_limb_t __x0, __x1, __x2, __x3; \ + unsigned __ul, __vl, __uh, __vh; \ + mp_limb_t __u = (u), __v = (v); \ + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ + \ + __ul = __u & GMP_LLIMB_MASK; \ + __uh = __u >> (GMP_LIMB_BITS / 2); \ + __vl = __v & GMP_LLIMB_MASK; \ + __vh = __v >> (GMP_LIMB_BITS / 2); \ + \ + __x0 = (mp_limb_t) __ul * __vl; \ + __x1 = (mp_limb_t) __ul * __vh; \ + __x2 = (mp_limb_t) __uh * __vl; \ + __x3 = (mp_limb_t) __uh * __vh; \ + \ + __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ + (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ + } \ + } while (0) + +/* If mp_limb_t is of size smaller than int, plain u*v implies + automatic promotion to *signed* int, and then multiply may overflow + and cause undefined behavior. Explicitly cast to unsigned int for + that case. */ +#define gmp_umullo_limb(u, v) \ + ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) + +#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ + do { \ + mp_limb_t _qh, _ql, _r, _mask; \ + gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ + gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ + _r = (nl) - gmp_umullo_limb (_qh, (d)); \ + _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ + _qh += _mask; \ + _r += _mask & (d); \ + if (_r >= (d)) \ + { \ + _r -= (d); \ + _qh++; \ + } \ + \ + (r) = _r; \ + (q) = _qh; \ + } while (0) + +#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ + do { \ + mp_limb_t _q0, _t1, _t0, _mask; \ + gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ + gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ + \ + /* Compute the two most significant limbs of n - q'd */ \ + (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ + gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ + (q)++; \ + \ + /* Conditionally adjust q and the remainders */ \ + _mask = - (mp_limb_t) ((r1) >= _q0); \ + (q) += _mask; \ + gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ + if ((r1) >= (d1)) \ + { \ + if ((r1) > (d1) || (r0) >= (d0)) \ + { \ + (q)++; \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ + } \ + } \ + } while (0) + +/* Swap macros. */ +#define MP_LIMB_T_SWAP(x, y) \ + do { \ + mp_limb_t __mp_limb_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_limb_t_swap__tmp; \ + } while (0) +#define MP_SIZE_T_SWAP(x, y) \ + do { \ + mp_size_t __mp_size_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_size_t_swap__tmp; \ + } while (0) +#define MP_BITCNT_T_SWAP(x,y) \ + do { \ + mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_bitcnt_t_swap__tmp; \ + } while (0) +#define MP_PTR_SWAP(x, y) \ + do { \ + mp_ptr __mp_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_ptr_swap__tmp; \ + } while (0) +#define MP_SRCPTR_SWAP(x, y) \ + do { \ + mp_srcptr __mp_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_srcptr_swap__tmp; \ + } while (0) + +#define MPN_PTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_PTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) +#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_SRCPTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) + +#define MPZ_PTR_SWAP(x, y) \ + do { \ + mpz_ptr __mpz_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_ptr_swap__tmp; \ + } while (0) +#define MPZ_SRCPTR_SWAP(x, y) \ + do { \ + mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_srcptr_swap__tmp; \ + } while (0) + +const int mp_bits_per_limb = GMP_LIMB_BITS; + + +/* Memory allocation and other helper functions. */ +static void +gmp_die (const char *msg) +{ + fprintf (stderr, "%s\n", msg); + abort(); +} + +static void * +gmp_default_alloc (size_t size) +{ + void *p; + + assert (size > 0); + + p = malloc (size); + if (!p) + gmp_die("gmp_default_alloc: Virtual memory exhausted."); + + return p; +} + +static void * +gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) +{ + void * p; + + p = realloc (old, new_size); + + if (!p) + gmp_die("gmp_default_realloc: Virtual memory exhausted."); + + return p; +} + +static void +gmp_default_free (void *p, size_t unused_size) +{ + free (p); +} + +static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; +static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; +static void (*gmp_free_func) (void *, size_t) = gmp_default_free; + +void +mp_get_memory_functions (void *(**alloc_func) (size_t), + void *(**realloc_func) (void *, size_t, size_t), + void (**free_func) (void *, size_t)) +{ + if (alloc_func) + *alloc_func = gmp_allocate_func; + + if (realloc_func) + *realloc_func = gmp_reallocate_func; + + if (free_func) + *free_func = gmp_free_func; +} + +void +mp_set_memory_functions (void *(*alloc_func) (size_t), + void *(*realloc_func) (void *, size_t, size_t), + void (*free_func) (void *, size_t)) +{ + if (!alloc_func) + alloc_func = gmp_default_alloc; + if (!realloc_func) + realloc_func = gmp_default_realloc; + if (!free_func) + free_func = gmp_default_free; + + gmp_allocate_func = alloc_func; + gmp_reallocate_func = realloc_func; + gmp_free_func = free_func; +} + +#define gmp_alloc(size) ((*gmp_allocate_func)((size))) +#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) +#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) + +static mp_ptr +gmp_alloc_limbs (mp_size_t size) +{ + return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); +} + +static mp_ptr +gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) +{ + assert (size > 0); + return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); +} + +static void +gmp_free_limbs (mp_ptr old, mp_size_t size) +{ + gmp_free (old, size * sizeof (mp_limb_t)); +} + + +/* MPN interface */ + +void +mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + mp_size_t i; + for (i = 0; i < n; i++) + d[i] = s[i]; +} + +void +mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + while (--n >= 0) + d[n] = s[n]; +} + +int +mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + while (--n >= 0) + { + if (ap[n] != bp[n]) + return ap[n] > bp[n] ? 1 : -1; + } + return 0; +} + +static int +mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + if (an != bn) + return an < bn ? -1 : 1; + else + return mpn_cmp (ap, bp, an); +} + +static mp_size_t +mpn_normalized_size (mp_srcptr xp, mp_size_t n) +{ + while (n > 0 && xp[n-1] == 0) + --n; + return n; +} + +int +mpn_zero_p(mp_srcptr rp, mp_size_t n) +{ + return mpn_normalized_size (rp, n) == 0; +} + +void +mpn_zero (mp_ptr rp, mp_size_t n) +{ + while (--n >= 0) + rp[n] = 0; +} + +mp_limb_t +mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + i = 0; + do + { + mp_limb_t r = ap[i] + b; + /* Carry out */ + b = (r < b); + rp[i] = r; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b, r; + a = ap[i]; b = bp[i]; + r = a + cy; + cy = (r < cy); + r += b; + cy += (r < b); + rp[i] = r; + } + return cy; +} + +mp_limb_t +mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_add_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + + i = 0; + do + { + mp_limb_t a = ap[i]; + /* Carry out */ + mp_limb_t cy = a < b; + rp[i] = a - b; + b = cy; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b; + a = ap[i]; b = bp[i]; + b += cy; + cy = (b < cy); + cy += (a < b); + rp[i] = a - b; + } + return cy; +} + +mp_limb_t +mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_sub_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl + lpl; + cl += lpl < rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl - lpl; + cl += lpl > rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn >= 1); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); + + /* We first multiply by the low order limb. This result can be + stored, not added, to rp. We also avoid a loop for zeroing this + way. */ + + rp[un] = mpn_mul_1 (rp, up, un, vp[0]); + + /* Now accumulate the product of up[] and the next higher limb from + vp[]. */ + + while (--vn >= 1) + { + rp += 1, vp += 1; + rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); + } + return rp[un]; +} + +void +mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mpn_mul (rp, ap, n, bp, n); +} + +void +mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) +{ + mpn_mul (rp, ap, n, ap, n); +} + +mp_limb_t +mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + up += n; + rp += n; + + tnc = GMP_LIMB_BITS - cnt; + low_limb = *--up; + retval = low_limb >> tnc; + high_limb = (low_limb << cnt); + + while (--n != 0) + { + low_limb = *--up; + *--rp = high_limb | (low_limb >> tnc); + high_limb = (low_limb << cnt); + } + *--rp = high_limb; + + return retval; +} + +mp_limb_t +mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + tnc = GMP_LIMB_BITS - cnt; + high_limb = *up++; + retval = (high_limb << tnc); + low_limb = high_limb >> cnt; + + while (--n != 0) + { + high_limb = *up++; + *rp++ = low_limb | (high_limb << tnc); + low_limb = high_limb >> cnt; + } + *rp = low_limb; + + return retval; +} + +static mp_bitcnt_t +mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, + mp_limb_t ux) +{ + unsigned cnt; + + assert (ux == 0 || ux == GMP_LIMB_MAX); + assert (0 <= i && i <= un ); + + while (limb == 0) + { + i++; + if (i == un) + return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); + limb = ux ^ up[i]; + } + gmp_ctz (cnt, limb); + return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; +} + +mp_bitcnt_t +mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, 0); +} + +mp_bitcnt_t +mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, GMP_LIMB_MAX); +} + +void +mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (--n >= 0) + *rp++ = ~ *up++; +} + +mp_limb_t +mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (*up == 0) + { + *rp = 0; + if (!--n) + return 0; + ++up; ++rp; + } + *rp = - *up; + mpn_com (++rp, ++up, --n); + return 1; +} + + +/* MPN division interface. */ + +/* The 3/2 inverse is defined as + + m = floor( (B^3-1) / (B u1 + u0)) - B +*/ +mp_limb_t +mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) +{ + mp_limb_t r, m; + + { + mp_limb_t p, ql; + unsigned ul, uh, qh; + + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); + /* For notation, let b denote the half-limb base, so that B = b^2. + Split u1 = b uh + ul. */ + ul = u1 & GMP_LLIMB_MASK; + uh = u1 >> (GMP_LIMB_BITS / 2); + + /* Approximation of the high half of quotient. Differs from the 2/1 + inverse of the half limb uh, since we have already subtracted + u0. */ + qh = (u1 ^ GMP_LIMB_MAX) / uh; + + /* Adjust to get a half-limb 3/2 inverse, i.e., we want + + qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u + = floor( (b (~u) + b-1) / u), + + and the remainder + + r = b (~u) + b-1 - qh (b uh + ul) + = b (~u - qh uh) + b-1 - qh ul + + Subtraction of qh ul may underflow, which implies adjustments. + But by normalization, 2 u >= B > qh ul, so we need to adjust by + at most 2. + */ + + r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; + + p = (mp_limb_t) qh * ul; + /* Adjustment steps taken from udiv_qrnnd_c */ + if (r < p) + { + qh--; + r += u1; + if (r >= u1) /* i.e. we didn't get carry when adding to r */ + if (r < p) + { + qh--; + r += u1; + } + } + r -= p; + + /* Low half of the quotient is + + ql = floor ( (b r + b-1) / u1). + + This is a 3/2 division (on half-limbs), for which qh is a + suitable inverse. */ + + p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; + /* Unlike full-limb 3/2, we can add 1 without overflow. For this to + work, it is essential that ql is a full mp_limb_t. */ + ql = (p >> (GMP_LIMB_BITS / 2)) + 1; + + /* By the 3/2 trick, we don't need the high half limb. */ + r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; + + if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) + { + ql--; + r += u1; + } + m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; + if (r >= u1) + { + m++; + r -= u1; + } + } + + /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a + 3/2 inverse. */ + if (u0 > 0) + { + mp_limb_t th, tl; + r = ~r; + r += u0; + if (r < u0) + { + m--; + if (r >= u1) + { + m--; + r -= u1; + } + r -= u1; + } + gmp_umul_ppmm (th, tl, u0, m); + r += th; + if (r < th) + { + m--; + m -= ((r > u1) | ((r == u1) & (tl > u0))); + } + } + + return m; +} + +struct gmp_div_inverse +{ + /* Normalization shift count. */ + unsigned shift; + /* Normalized divisor (d0 unused for mpn_div_qr_1) */ + mp_limb_t d1, d0; + /* Inverse, for 2/1 or 3/2. */ + mp_limb_t di; +}; + +static void +mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) +{ + unsigned shift; + + assert (d > 0); + gmp_clz (shift, d); + inv->shift = shift; + inv->d1 = d << shift; + inv->di = mpn_invert_limb (inv->d1); +} + +static void +mpn_div_qr_2_invert (struct gmp_div_inverse *inv, + mp_limb_t d1, mp_limb_t d0) +{ + unsigned shift; + + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 <<= shift; + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); +} + +static void +mpn_div_qr_invert (struct gmp_div_inverse *inv, + mp_srcptr dp, mp_size_t dn) +{ + assert (dn > 0); + + if (dn == 1) + mpn_div_qr_1_invert (inv, dp[0]); + else if (dn == 2) + mpn_div_qr_2_invert (inv, dp[1], dp[0]); + else + { + unsigned shift; + mp_limb_t d1, d0; + + d1 = dp[dn-1]; + d0 = dp[dn-2]; + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); + } +} + +/* Not matching current public gmp interface, rather corresponding to + the sbpi1_div_* functions. */ +static mp_limb_t +mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + mp_limb_t d, di; + mp_limb_t r; + mp_ptr tp = NULL; + mp_size_t tn = 0; + + if (inv->shift > 0) + { + /* Shift, reusing qp area if possible. In-place shift if qp == np. */ + tp = qp; + if (!tp) + { + tn = nn; + tp = gmp_alloc_limbs (tn); + } + r = mpn_lshift (tp, np, nn, inv->shift); + np = tp; + } + else + r = 0; + + d = inv->d1; + di = inv->di; + while (--nn >= 0) + { + mp_limb_t q; + + gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); + if (qp) + qp[nn] = q; + } + if (tn) + gmp_free_limbs (tp, tn); + + return r >> inv->shift; +} + +static void +mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + unsigned shift; + mp_size_t i; + mp_limb_t d1, d0, di, r1, r0; + + assert (nn >= 2); + shift = inv->shift; + d1 = inv->d1; + d0 = inv->d0; + di = inv->di; + + if (shift > 0) + r1 = mpn_lshift (np, np, nn, shift); + else + r1 = 0; + + r0 = np[nn - 1]; + + i = nn - 2; + do + { + mp_limb_t n0, q; + n0 = np[i]; + gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + if (shift > 0) + { + assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); + r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); + r1 >>= shift; + } + + np[1] = r1; + np[0] = r0; +} + +static void +mpn_div_qr_pi1 (mp_ptr qp, + mp_ptr np, mp_size_t nn, mp_limb_t n1, + mp_srcptr dp, mp_size_t dn, + mp_limb_t dinv) +{ + mp_size_t i; + + mp_limb_t d1, d0; + mp_limb_t cy, cy1; + mp_limb_t q; + + assert (dn > 2); + assert (nn >= dn); + + d1 = dp[dn - 1]; + d0 = dp[dn - 2]; + + assert ((d1 & GMP_LIMB_HIGHBIT) != 0); + /* Iteration variable is the index of the q limb. + * + * We divide + * by + */ + + i = nn - dn; + do + { + mp_limb_t n0 = np[dn-1+i]; + + if (n1 == d1 && n0 == d0) + { + q = GMP_LIMB_MAX; + mpn_submul_1 (np+i, dp, dn, q); + n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ + } + else + { + gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); + + cy = mpn_submul_1 (np + i, dp, dn-2, q); + + cy1 = n0 < cy; + n0 = n0 - cy; + cy = n1 < cy1; + n1 = n1 - cy1; + np[dn-2+i] = n0; + + if (cy != 0) + { + n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); + q--; + } + } + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + np[dn - 1] = n1; +} + +static void +mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + mp_srcptr dp, mp_size_t dn, + const struct gmp_div_inverse *inv) +{ + assert (dn > 0); + assert (nn >= dn); + + if (dn == 1) + np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); + else if (dn == 2) + mpn_div_qr_2_preinv (qp, np, nn, inv); + else + { + mp_limb_t nh; + unsigned shift; + + assert (inv->d1 == dp[dn-1]); + assert (inv->d0 == dp[dn-2]); + assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); + + shift = inv->shift; + if (shift > 0) + nh = mpn_lshift (np, np, nn, shift); + else + nh = 0; + + mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); + + if (shift > 0) + gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); + } +} + +static void +mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) +{ + struct gmp_div_inverse inv; + mp_ptr tp = NULL; + + assert (dn > 0); + assert (nn >= dn); + + mpn_div_qr_invert (&inv, dp, dn); + if (dn > 2 && inv.shift > 0) + { + tp = gmp_alloc_limbs (dn); + gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); + dp = tp; + } + mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); + if (tp) + gmp_free_limbs (tp, dn); +} + + +/* MPN base conversion. */ +static unsigned +mpn_base_power_of_two_p (unsigned b) +{ + switch (b) + { + case 2: return 1; + case 4: return 2; + case 8: return 3; + case 16: return 4; + case 32: return 5; + case 64: return 6; + case 128: return 7; + case 256: return 8; + default: return 0; + } +} + +struct mpn_base_info +{ + /* bb is the largest power of the base which fits in one limb, and + exp is the corresponding exponent. */ + unsigned exp; + mp_limb_t bb; +}; + +static void +mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) +{ + mp_limb_t m; + mp_limb_t p; + unsigned exp; + + m = GMP_LIMB_MAX / b; + for (exp = 1, p = b; p <= m; exp++) + p *= b; + + info->exp = exp; + info->bb = p; +} + +static mp_bitcnt_t +mpn_limb_size_in_base_2 (mp_limb_t u) +{ + unsigned shift; + + assert (u > 0); + gmp_clz (shift, u); + return GMP_LIMB_BITS - shift; +} + +static size_t +mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) +{ + unsigned char mask; + size_t sn, j; + mp_size_t i; + unsigned shift; + + sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) + + bits - 1) / bits; + + mask = (1U << bits) - 1; + + for (i = 0, j = sn, shift = 0; j-- > 0;) + { + unsigned char digit = up[i] >> shift; + + shift += bits; + + if (shift >= GMP_LIMB_BITS && ++i < un) + { + shift -= GMP_LIMB_BITS; + digit |= up[i] << (bits - shift); + } + sp[j] = digit & mask; + } + return sn; +} + +/* We generate digits from the least significant end, and reverse at + the end. */ +static size_t +mpn_limb_get_str (unsigned char *sp, mp_limb_t w, + const struct gmp_div_inverse *binv) +{ + mp_size_t i; + for (i = 0; w > 0; i++) + { + mp_limb_t h, l, r; + + h = w >> (GMP_LIMB_BITS - binv->shift); + l = w << binv->shift; + + gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); + assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); + r >>= binv->shift; + + sp[i] = r; + } + return i; +} + +static size_t +mpn_get_str_other (unsigned char *sp, + int base, const struct mpn_base_info *info, + mp_ptr up, mp_size_t un) +{ + struct gmp_div_inverse binv; + size_t sn; + size_t i; + + mpn_div_qr_1_invert (&binv, base); + + sn = 0; + + if (un > 1) + { + struct gmp_div_inverse bbinv; + mpn_div_qr_1_invert (&bbinv, info->bb); + + do + { + mp_limb_t w; + size_t done; + w = mpn_div_qr_1_preinv (up, up, un, &bbinv); + un -= (up[un-1] == 0); + done = mpn_limb_get_str (sp + sn, w, &binv); + + for (sn += done; done < info->exp; done++) + sp[sn++] = 0; + } + while (un > 1); + } + sn += mpn_limb_get_str (sp + sn, up[0], &binv); + + /* Reverse order */ + for (i = 0; 2*i + 1 < sn; i++) + { + unsigned char t = sp[i]; + sp[i] = sp[sn - i - 1]; + sp[sn - i - 1] = t; + } + + return sn; +} + +size_t +mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) +{ + unsigned bits; + + assert (un > 0); + assert (up[un-1] > 0); + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_get_str_bits (sp, bits, up, un); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_get_str_other (sp, base, &info, up, un); + } +} + +static mp_size_t +mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, + unsigned bits) +{ + mp_size_t rn; + mp_limb_t limb; + unsigned shift; + + for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) + { + limb |= (mp_limb_t) sp[sn] << shift; + shift += bits; + if (shift >= GMP_LIMB_BITS) + { + shift -= GMP_LIMB_BITS; + rp[rn++] = limb; + /* Next line is correct also if shift == 0, + bits == 8, and mp_limb_t == unsigned char. */ + limb = (unsigned int) sp[sn] >> (bits - shift); + } + } + if (limb != 0) + rp[rn++] = limb; + else + rn = mpn_normalized_size (rp, rn); + return rn; +} + +/* Result is usually normalized, except for all-zero input, in which + case a single zero limb is written at *RP, and 1 is returned. */ +static mp_size_t +mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, + mp_limb_t b, const struct mpn_base_info *info) +{ + mp_size_t rn; + mp_limb_t w; + unsigned k; + size_t j; + + assert (sn > 0); + + k = 1 + (sn - 1) % info->exp; + + j = 0; + w = sp[j++]; + while (--k != 0) + w = w * b + sp[j++]; + + rp[0] = w; + + for (rn = 1; j < sn;) + { + mp_limb_t cy; + + w = sp[j++]; + for (k = 1; k < info->exp; k++) + w = w * b + sp[j++]; + + cy = mpn_mul_1 (rp, rp, rn, info->bb); + cy += mpn_add_1 (rp, rp, rn, w); + if (cy > 0) + rp[rn++] = cy; + } + assert (j == sn); + + return rn; +} + +mp_size_t +mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) +{ + unsigned bits; + + if (sn == 0) + return 0; + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_set_str_bits (rp, sp, sn, bits); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_set_str_other (rp, sp, sn, base, &info); + } +} + + +/* MPZ interface */ +void +mpz_init (mpz_t r) +{ + static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; + + r->_mp_alloc = 0; + r->_mp_size = 0; + r->_mp_d = (mp_ptr) &dummy_limb; +} + +/* The utility of this function is a bit limited, since many functions + assigns the result variable using mpz_swap. */ +void +mpz_init2 (mpz_t r, mp_bitcnt_t bits) +{ + mp_size_t rn; + + bits -= (bits != 0); /* Round down, except if 0 */ + rn = 1 + bits / GMP_LIMB_BITS; + + r->_mp_alloc = rn; + r->_mp_size = 0; + r->_mp_d = gmp_alloc_limbs (rn); +} + +void +mpz_clear (mpz_t r) +{ + if (r->_mp_alloc) + gmp_free_limbs (r->_mp_d, r->_mp_alloc); +} + +static mp_ptr +mpz_realloc (mpz_t r, mp_size_t size) +{ + size = GMP_MAX (size, 1); + + if (r->_mp_alloc) + r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); + else + r->_mp_d = gmp_alloc_limbs (size); + r->_mp_alloc = size; + + if (GMP_ABS (r->_mp_size) > size) + r->_mp_size = 0; + + return r->_mp_d; +} + +/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ +#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ + ? mpz_realloc(z,n) \ + : (z)->_mp_d) + +/* MPZ assignment and basic conversions. */ +void +mpz_set_si (mpz_t r, signed long int x) +{ + if (x >= 0) + mpz_set_ui (r, x); + else /* (x < 0) */ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); + mpz_neg (r, r); + } + else + { + r->_mp_size = -1; + MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); + } +} + +void +mpz_set_ui (mpz_t r, unsigned long int x) +{ + if (x > 0) + { + r->_mp_size = 1; + MPZ_REALLOC (r, 1)[0] = x; + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + while (x >>= LOCAL_GMP_LIMB_BITS) + { + ++ r->_mp_size; + MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; + } + } + } + else + r->_mp_size = 0; +} + +void +mpz_set (mpz_t r, const mpz_t x) +{ + /* Allow the NOP r == x */ + if (r != x) + { + mp_size_t n; + mp_ptr rp; + + n = GMP_ABS (x->_mp_size); + rp = MPZ_REALLOC (r, n); + + mpn_copyi (rp, x->_mp_d, n); + r->_mp_size = x->_mp_size; + } +} + +void +mpz_init_set_si (mpz_t r, signed long int x) +{ + mpz_init (r); + mpz_set_si (r, x); +} + +void +mpz_init_set_ui (mpz_t r, unsigned long int x) +{ + mpz_init (r); + mpz_set_ui (r, x); +} + +void +mpz_init_set (mpz_t r, const mpz_t x) +{ + mpz_init (r); + mpz_set (r, x); +} + +int +mpz_fits_slong_p (const mpz_t u) +{ + return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; +} + +static int +mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) +{ + int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; + mp_limb_t ulongrem = 0; + + if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) + ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; + + return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); +} + +int +mpz_fits_ulong_p (const mpz_t u) +{ + mp_size_t us = u->_mp_size; + + return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); +} + +int +mpz_fits_sint_p (const mpz_t u) +{ + return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; +} + +int +mpz_fits_uint_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; +} + +int +mpz_fits_sshort_p (const mpz_t u) +{ + return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; +} + +int +mpz_fits_ushort_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; +} + +long int +mpz_get_si (const mpz_t u) +{ + unsigned long r = mpz_get_ui (u); + unsigned long c = -LONG_MAX - LONG_MIN; + + if (u->_mp_size < 0) + /* This expression is necessary to properly handle -LONG_MIN */ + return -(long) c - (long) ((r - c) & LONG_MAX); + else + return (long) (r & LONG_MAX); +} + +unsigned long int +mpz_get_ui (const mpz_t u) +{ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + unsigned long r = 0; + mp_size_t n = GMP_ABS (u->_mp_size); + n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); + while (--n >= 0) + r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; + return r; + } + + return u->_mp_size == 0 ? 0 : u->_mp_d[0]; +} + +size_t +mpz_size (const mpz_t u) +{ + return GMP_ABS (u->_mp_size); +} + +mp_limb_t +mpz_getlimbn (const mpz_t u, mp_size_t n) +{ + if (n >= 0 && n < GMP_ABS (u->_mp_size)) + return u->_mp_d[n]; + else + return 0; +} + +void +mpz_realloc2 (mpz_t x, mp_bitcnt_t n) +{ + mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); +} + +mp_srcptr +mpz_limbs_read (mpz_srcptr x) +{ + return x->_mp_d; +} + +mp_ptr +mpz_limbs_modify (mpz_t x, mp_size_t n) +{ + assert (n > 0); + return MPZ_REALLOC (x, n); +} + +mp_ptr +mpz_limbs_write (mpz_t x, mp_size_t n) +{ + return mpz_limbs_modify (x, n); +} + +void +mpz_limbs_finish (mpz_t x, mp_size_t xs) +{ + mp_size_t xn; + xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); + x->_mp_size = xs < 0 ? -xn : xn; +} + +static mpz_srcptr +mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + x->_mp_alloc = 0; + x->_mp_d = (mp_ptr) xp; + x->_mp_size = xs; + return x; +} + +mpz_srcptr +mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + mpz_roinit_normal_n (x, xp, xs); + mpz_limbs_finish (x, xs); + return x; +} + + +/* Conversions and comparison to double. */ +void +mpz_set_d (mpz_t r, double x) +{ + int sign; + mp_ptr rp; + mp_size_t rn, i; + double B; + double Bi; + mp_limb_t f; + + /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is + zero or infinity. */ + if (x != x || x == x * 0.5) + { + r->_mp_size = 0; + return; + } + + sign = x < 0.0 ; + if (sign) + x = - x; + + if (x < 1.0) + { + r->_mp_size = 0; + return; + } + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + for (rn = 1; x >= B; rn++) + x *= Bi; + + rp = MPZ_REALLOC (r, rn); + + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + i = rn-1; + rp[i] = f; + while (--i >= 0) + { + x = B * x; + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + rp[i] = f; + } + + r->_mp_size = sign ? - rn : rn; +} + +void +mpz_init_set_d (mpz_t r, double x) +{ + mpz_init (r); + mpz_set_d (r, x); +} + +double +mpz_get_d (const mpz_t u) +{ + int m; + mp_limb_t l; + mp_size_t un; + double x; + double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + + un = GMP_ABS (u->_mp_size); + + if (un == 0) + return 0.0; + + l = u->_mp_d[--un]; + gmp_clz (m, l); + m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + + for (x = l; --un >= 0;) + { + x = B*x; + if (m > 0) { + l = u->_mp_d[un]; + m -= GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + x += l; + } + } + + if (u->_mp_size < 0) + x = -x; + + return x; +} + +int +mpz_cmpabs_d (const mpz_t x, double d) +{ + mp_size_t xn; + double B, Bi; + mp_size_t i; + + xn = x->_mp_size; + d = GMP_ABS (d); + + if (xn != 0) + { + xn = GMP_ABS (xn); + + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + + /* Scale d so it can be compared with the top limb. */ + for (i = 1; i < xn; i++) + d *= Bi; + + if (d >= B) + return -1; + + /* Compare floor(d) to top limb, subtract and cancel when equal. */ + for (i = xn; i-- > 0;) + { + mp_limb_t f, xl; + + f = (mp_limb_t) d; + xl = x->_mp_d[i]; + if (xl > f) + return 1; + else if (xl < f) + return -1; + d = B * (d - f); + } + } + return - (d > 0.0); +} + +int +mpz_cmp_d (const mpz_t x, double d) +{ + if (x->_mp_size < 0) + { + if (d >= 0.0) + return -1; + else + return -mpz_cmpabs_d (x, d); + } + else + { + if (d < 0.0) + return 1; + else + return mpz_cmpabs_d (x, d); + } +} + + +/* MPZ comparisons and the like. */ +int +mpz_sgn (const mpz_t u) +{ + return GMP_CMP (u->_mp_size, 0); +} + +int +mpz_cmp_si (const mpz_t u, long v) +{ + mp_size_t usize = u->_mp_size; + + if (v >= 0) + return mpz_cmp_ui (u, v); + else if (usize >= 0) + return 1; + else + return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); +} + +int +mpz_cmp_ui (const mpz_t u, unsigned long v) +{ + mp_size_t usize = u->_mp_size; + + if (usize < 0) + return -1; + else + return mpz_cmpabs_ui (u, v); +} + +int +mpz_cmp (const mpz_t a, const mpz_t b) +{ + mp_size_t asize = a->_mp_size; + mp_size_t bsize = b->_mp_size; + + if (asize != bsize) + return (asize < bsize) ? -1 : 1; + else if (asize >= 0) + return mpn_cmp (a->_mp_d, b->_mp_d, asize); + else + return mpn_cmp (b->_mp_d, a->_mp_d, -asize); +} + +int +mpz_cmpabs_ui (const mpz_t u, unsigned long v) +{ + mp_size_t un = GMP_ABS (u->_mp_size); + + if (! mpn_absfits_ulong_p (u->_mp_d, un)) + return 1; + else + { + unsigned long uu = mpz_get_ui (u); + return GMP_CMP(uu, v); + } +} + +int +mpz_cmpabs (const mpz_t u, const mpz_t v) +{ + return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), + v->_mp_d, GMP_ABS (v->_mp_size)); +} + +void +mpz_abs (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = GMP_ABS (r->_mp_size); +} + +void +mpz_neg (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = -r->_mp_size; +} + +void +mpz_swap (mpz_t u, mpz_t v) +{ + MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); + MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); +} + + +/* MPZ addition and subtraction */ + + +void +mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_t bb; + mpz_init_set_ui (bb, b); + mpz_add (r, a, bb); + mpz_clear (bb); +} + +void +mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_ui_sub (r, b, a); + mpz_neg (r, r); +} + +void +mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) +{ + mpz_neg (r, b); + mpz_add_ui (r, r, a); +} + +static mp_size_t +mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + mp_ptr rp; + mp_limb_t cy; + + if (an < bn) + { + MPZ_SRCPTR_SWAP (a, b); + MP_SIZE_T_SWAP (an, bn); + } + + rp = MPZ_REALLOC (r, an + 1); + cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); + + rp[an] = cy; + + return an + cy; +} + +static mp_size_t +mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + int cmp; + mp_ptr rp; + + cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); + if (cmp > 0) + { + rp = MPZ_REALLOC (r, an); + gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); + return mpn_normalized_size (rp, an); + } + else if (cmp < 0) + { + rp = MPZ_REALLOC (r, bn); + gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); + return -mpn_normalized_size (rp, bn); + } + else + return 0; +} + +void +mpz_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_add (r, a, b); + else + rn = mpz_abs_sub (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + +void +mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_sub (r, a, b); + else + rn = mpz_abs_add (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + + +/* MPZ multiplication */ +void +mpz_mul_si (mpz_t r, const mpz_t u, long int v) +{ + if (v < 0) + { + mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); + mpz_neg (r, r); + } + else + mpz_mul_ui (r, u, v); +} + +void +mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t vv; + mpz_init_set_ui (vv, v); + mpz_mul (r, u, vv); + mpz_clear (vv); + return; +} + +void +mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) +{ + int sign; + mp_size_t un, vn, rn; + mpz_t t; + mp_ptr tp; + + un = u->_mp_size; + vn = v->_mp_size; + + if (un == 0 || vn == 0) + { + r->_mp_size = 0; + return; + } + + sign = (un ^ vn) < 0; + + un = GMP_ABS (un); + vn = GMP_ABS (vn); + + mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); + + tp = t->_mp_d; + if (un >= vn) + mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); + else + mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); + + rn = un + vn; + rn -= tp[rn-1] == 0; + + t->_mp_size = sign ? - rn : rn; + mpz_swap (r, t); + mpz_clear (t); +} + +void +mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) +{ + mp_size_t un, rn; + mp_size_t limbs; + unsigned shift; + mp_ptr rp; + + un = GMP_ABS (u->_mp_size); + if (un == 0) + { + r->_mp_size = 0; + return; + } + + limbs = bits / GMP_LIMB_BITS; + shift = bits % GMP_LIMB_BITS; + + rn = un + limbs + (shift > 0); + rp = MPZ_REALLOC (r, rn); + if (shift > 0) + { + mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); + rp[rn-1] = cy; + rn -= (cy == 0); + } + else + mpn_copyd (rp + limbs, u->_mp_d, un); + + mpn_zero (rp, limbs); + + r->_mp_size = (u->_mp_size < 0) ? - rn : rn; +} + +void +mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_sub (r, r, t); + mpz_clear (t); +} + +void +mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_sub (r, r, t); + mpz_clear (t); +} + + +/* MPZ division */ +enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; + +/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ +static int +mpz_div_qr (mpz_t q, mpz_t r, + const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) +{ + mp_size_t ns, ds, nn, dn, qs; + ns = n->_mp_size; + ds = d->_mp_size; + + if (ds == 0) + gmp_die("mpz_div_qr: Divide by zero."); + + if (ns == 0) + { + if (q) + q->_mp_size = 0; + if (r) + r->_mp_size = 0; + return 0; + } + + nn = GMP_ABS (ns); + dn = GMP_ABS (ds); + + qs = ds ^ ns; + + if (nn < dn) + { + if (mode == GMP_DIV_CEIL && qs >= 0) + { + /* q = 1, r = n - d */ + if (r) + mpz_sub (r, n, d); + if (q) + mpz_set_ui (q, 1); + } + else if (mode == GMP_DIV_FLOOR && qs < 0) + { + /* q = -1, r = n + d */ + if (r) + mpz_add (r, n, d); + if (q) + mpz_set_si (q, -1); + } + else + { + /* q = 0, r = d */ + if (r) + mpz_set (r, n); + if (q) + q->_mp_size = 0; + } + return 1; + } + else + { + mp_ptr np, qp; + mp_size_t qn, rn; + mpz_t tq, tr; + + mpz_init_set (tr, n); + np = tr->_mp_d; + + qn = nn - dn + 1; + + if (q) + { + mpz_init2 (tq, qn * GMP_LIMB_BITS); + qp = tq->_mp_d; + } + else + qp = NULL; + + mpn_div_qr (qp, np, nn, d->_mp_d, dn); + + if (qp) + { + qn -= (qp[qn-1] == 0); + + tq->_mp_size = qs < 0 ? -qn : qn; + } + rn = mpn_normalized_size (np, dn); + tr->_mp_size = ns < 0 ? - rn : rn; + + if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) + { + if (q) + mpz_sub_ui (tq, tq, 1); + if (r) + mpz_add (tr, tr, d); + } + else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) + { + if (q) + mpz_add_ui (tq, tq, 1); + if (r) + mpz_sub (tr, tr, d); + } + + if (q) + { + mpz_swap (tq, q); + mpz_clear (tq); + } + if (r) + mpz_swap (tr, r); + + mpz_clear (tr); + + return rn != 0; + } +} + +void +mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); +} + +static void +mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t un, qn; + mp_size_t limb_cnt; + mp_ptr qp; + int adjust; + + un = u->_mp_size; + if (un == 0) + { + q->_mp_size = 0; + return; + } + limb_cnt = bit_index / GMP_LIMB_BITS; + qn = GMP_ABS (un) - limb_cnt; + bit_index %= GMP_LIMB_BITS; + + if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ + /* Note: Below, the final indexing at limb_cnt is valid because at + that point we have qn > 0. */ + adjust = (qn <= 0 + || !mpn_zero_p (u->_mp_d, limb_cnt) + || (u->_mp_d[limb_cnt] + & (((mp_limb_t) 1 << bit_index) - 1))); + else + adjust = 0; + + if (qn <= 0) + qn = 0; + else + { + qp = MPZ_REALLOC (q, qn); + + if (bit_index != 0) + { + mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); + qn -= qp[qn - 1] == 0; + } + else + { + mpn_copyi (qp, u->_mp_d + limb_cnt, qn); + } + } + + q->_mp_size = qn; + + if (adjust) + mpz_add_ui (q, q, 1); + if (un < 0) + mpz_neg (q, q); +} + +static void +mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t us, un, rn; + mp_ptr rp; + mp_limb_t mask; + + us = u->_mp_size; + if (us == 0 || bit_index == 0) + { + r->_mp_size = 0; + return; + } + rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + assert (rn > 0); + + rp = MPZ_REALLOC (r, rn); + un = GMP_ABS (us); + + mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); + + if (rn > un) + { + /* Quotient (with truncation) is zero, and remainder is + non-zero */ + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* Have to negate and sign extend. */ + mp_size_t i; + + gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); + for (i = un; i < rn - 1; i++) + rp[i] = GMP_LIMB_MAX; + + rp[rn-1] = mask; + us = -us; + } + else + { + /* Just copy */ + if (r != u) + mpn_copyi (rp, u->_mp_d, un); + + rn = un; + } + } + else + { + if (r != u) + mpn_copyi (rp, u->_mp_d, rn - 1); + + rp[rn-1] = u->_mp_d[rn-1] & mask; + + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* If r != 0, compute 2^{bit_count} - r. */ + mpn_neg (rp, rp, rn); + + rp[rn-1] &= mask; + + /* us is not used for anything else, so we can modify it + here to indicate flipped sign. */ + us = -us; + } + } + rn = mpn_normalized_size (rp, rn); + r->_mp_size = us < 0 ? -rn : rn; +} + +void +mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) +{ + gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_p (const mpz_t n, const mpz_t d) +{ + return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + +int +mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) +{ + mpz_t t; + int res; + + /* a == b (mod 0) iff a == b */ + if (mpz_sgn (m) == 0) + return (mpz_cmp (a, b) == 0); + + mpz_init (t); + mpz_sub (t, a, b); + res = mpz_divisible_p (t, m); + mpz_clear (t); + + return res; +} + +static unsigned long +mpz_div_qr_ui (mpz_t q, mpz_t r, + const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) +{ + unsigned long ret; + mpz_t rr, dd; + + mpz_init (rr); + mpz_init_set_ui (dd, d); + mpz_div_qr (q, rr, n, dd, mode); + mpz_clear (dd); + ret = mpz_get_ui (rr); + + if (r) + mpz_swap (r, rr); + mpz_clear (rr); + + return ret; +} + +unsigned long +mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); +} +unsigned long +mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} +unsigned long +mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_ui_p (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + + +/* GCD */ +static mp_limb_t +mpn_gcd_11 (mp_limb_t u, mp_limb_t v) +{ + unsigned shift; + + assert ( (u | v) > 0); + + if (u == 0) + return v; + else if (v == 0) + return u; + + gmp_ctz (shift, u | v); + + u >>= shift; + v >>= shift; + + if ( (u & 1) == 0) + MP_LIMB_T_SWAP (u, v); + + while ( (v & 1) == 0) + v >>= 1; + + while (u != v) + { + if (u > v) + { + u -= v; + do + u >>= 1; + while ( (u & 1) == 0); + } + else + { + v -= u; + do + v >>= 1; + while ( (v & 1) == 0); + } + } + return u << shift; +} + +mp_size_t +mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn > 0); + assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); + assert (vp[vn-1] > 0); + assert ((up[0] | vp[0]) & 1); + + if (un > vn) + mpn_div_qr (NULL, up, un, vp, vn); + + un = mpn_normalized_size (up, vn); + if (un == 0) + { + mpn_copyi (rp, vp, vn); + return vn; + } + + if (!(vp[0] & 1)) + MPN_PTR_SWAP (up, un, vp, vn); + + while (un > 1 || vn > 1) + { + int shift; + assert (vp[0] & 1); + + while (up[0] == 0) + { + up++; + un--; + } + gmp_ctz (shift, up[0]); + if (shift > 0) + { + gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); + un -= (up[un-1] == 0); + } + + if (un < vn) + MPN_PTR_SWAP (up, un, vp, vn); + else if (un == vn) + { + int c = mpn_cmp (up, vp, un); + if (c == 0) + { + mpn_copyi (rp, up, un); + return un; + } + else if (c < 0) + MP_PTR_SWAP (up, vp); + } + + gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); + un = mpn_normalized_size (up, un); + } + rp[0] = mpn_gcd_11 (up[0], vp[0]); + return 1; +} + +unsigned long +mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) +{ + mpz_t t; + mpz_init_set_ui(t, v); + mpz_gcd (t, u, t); + if (v > 0) + v = mpz_get_ui (t); + + if (g) + mpz_swap (t, g); + + mpz_clear (t); + + return v; +} + +static mp_bitcnt_t +mpz_make_odd (mpz_t r) +{ + mp_bitcnt_t shift; + + assert (r->_mp_size > 0); + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + shift = mpn_scan1 (r->_mp_d, 0); + mpz_tdiv_q_2exp (r, r, shift); + + return shift; +} + +void +mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv; + mp_bitcnt_t uz, vz, gz; + + if (u->_mp_size == 0) + { + mpz_abs (g, v); + return; + } + if (v->_mp_size == 0) + { + mpz_abs (g, u); + return; + } + + mpz_init (tu); + mpz_init (tv); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + if (tu->_mp_size < tv->_mp_size) + mpz_swap (tu, tv); + + tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); + mpz_mul_2exp (g, tu, gz); + + mpz_clear (tu); + mpz_clear (tv); +} + +void +mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv, s0, s1, t0, t1; + mp_bitcnt_t uz, vz, gz; + mp_bitcnt_t power; + int cmp; + + if (u->_mp_size == 0) + { + /* g = 0 u + sgn(v) v */ + signed long sign = mpz_sgn (v); + mpz_abs (g, v); + if (s) + s->_mp_size = 0; + if (t) + mpz_set_si (t, sign); + return; + } + + if (v->_mp_size == 0) + { + /* g = sgn(u) u + 0 v */ + signed long sign = mpz_sgn (u); + mpz_abs (g, u); + if (s) + mpz_set_si (s, sign); + if (t) + t->_mp_size = 0; + return; + } + + mpz_init (tu); + mpz_init (tv); + mpz_init (s0); + mpz_init (s1); + mpz_init (t0); + mpz_init (t1); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + uz -= gz; + vz -= gz; + + /* Cofactors corresponding to odd gcd. gz handled later. */ + if (tu->_mp_size < tv->_mp_size) + { + mpz_swap (tu, tv); + MPZ_SRCPTR_SWAP (u, v); + MPZ_PTR_SWAP (s, t); + MP_BITCNT_T_SWAP (uz, vz); + } + + /* Maintain + * + * u = t0 tu + t1 tv + * v = s0 tu + s1 tv + * + * where u and v denote the inputs with common factors of two + * eliminated, and det (s0, t0; s1, t1) = 2^p. Then + * + * 2^p tu = s1 u - t1 v + * 2^p tv = -s0 u + t0 v + */ + + /* After initial division, tu = q tv + tu', we have + * + * u = 2^uz (tu' + q tv) + * v = 2^vz tv + * + * or + * + * t0 = 2^uz, t1 = 2^uz q + * s0 = 0, s1 = 2^vz + */ + + mpz_tdiv_qr (t1, tu, tu, tv); + mpz_mul_2exp (t1, t1, uz); + + mpz_setbit (s1, vz); + power = uz + vz; + + if (tu->_mp_size > 0) + { + mp_bitcnt_t shift; + shift = mpz_make_odd (tu); + mpz_setbit (t0, uz + shift); + power += shift; + + for (;;) + { + int c; + c = mpz_cmp (tu, tv); + if (c == 0) + break; + + if (c < 0) + { + /* tv = tv' + tu + * + * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' + * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ + + mpz_sub (tv, tv, tu); + mpz_add (t0, t0, t1); + mpz_add (s0, s0, s1); + + shift = mpz_make_odd (tv); + mpz_mul_2exp (t1, t1, shift); + mpz_mul_2exp (s1, s1, shift); + } + else + { + mpz_sub (tu, tu, tv); + mpz_add (t1, t0, t1); + mpz_add (s1, s0, s1); + + shift = mpz_make_odd (tu); + mpz_mul_2exp (t0, t0, shift); + mpz_mul_2exp (s0, s0, shift); + } + power += shift; + } + } + else + mpz_setbit (t0, uz); + + /* Now tv = odd part of gcd, and -s0 and t0 are corresponding + cofactors. */ + + mpz_mul_2exp (tv, tv, gz); + mpz_neg (s0, s0); + + /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To + adjust cofactors, we need u / g and v / g */ + + mpz_divexact (s1, v, tv); + mpz_abs (s1, s1); + mpz_divexact (t1, u, tv); + mpz_abs (t1, t1); + + while (power-- > 0) + { + /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ + if (mpz_odd_p (s0) || mpz_odd_p (t0)) + { + mpz_sub (s0, s0, s1); + mpz_add (t0, t0, t1); + } + assert (mpz_even_p (t0) && mpz_even_p (s0)); + mpz_tdiv_q_2exp (s0, s0, 1); + mpz_tdiv_q_2exp (t0, t0, 1); + } + + /* Choose small cofactors (they should generally satify + + |s| < |u| / 2g and |t| < |v| / 2g, + + with some documented exceptions). Always choose the smallest s, + if there are two choices for s with same absolute value, choose + the one with smallest corresponding t (this asymmetric condition + is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ + mpz_add (s1, s0, s1); + mpz_sub (t1, t0, t1); + cmp = mpz_cmpabs (s0, s1); + if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) + { + mpz_swap (s0, s1); + mpz_swap (t0, t1); + } + if (u->_mp_size < 0) + mpz_neg (s0, s0); + if (v->_mp_size < 0) + mpz_neg (t0, t0); + + mpz_swap (g, tv); + if (s) + mpz_swap (s, s0); + if (t) + mpz_swap (t, t0); + + mpz_clear (tu); + mpz_clear (tv); + mpz_clear (s0); + mpz_clear (s1); + mpz_clear (t0); + mpz_clear (t1); +} + +void +mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t g; + + if (u->_mp_size == 0 || v->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + mpz_init (g); + + mpz_gcd (g, u, v); + mpz_divexact (g, u, g); + mpz_mul (r, g, v); + + mpz_clear (g); + mpz_abs (r, r); +} + +void +mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) +{ + if (v == 0 || u->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + v /= mpz_gcd_ui (NULL, u, v); + mpz_mul_ui (r, u, v); + + mpz_abs (r, r); +} + +int +mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) +{ + mpz_t g, tr; + int invertible; + + if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) + return 0; + + mpz_init (g); + mpz_init (tr); + + mpz_gcdext (g, tr, NULL, u, m); + invertible = (mpz_cmp_ui (g, 1) == 0); + + if (invertible) + { + if (tr->_mp_size < 0) + { + if (m->_mp_size >= 0) + mpz_add (tr, tr, m); + else + mpz_sub (tr, tr, m); + } + mpz_swap (r, tr); + } + + mpz_clear (g); + mpz_clear (tr); + return invertible; +} + + +/* Higher level operations (sqrt, pow and root) */ + +void +mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) +{ + unsigned long bit; + mpz_t tr; + mpz_init_set_ui (tr, 1); + + bit = GMP_ULONG_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (e & bit) + mpz_mul (tr, tr, b); + bit >>= 1; + } + while (bit > 0); + + mpz_swap (r, tr); + mpz_clear (tr); +} + +void +mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) +{ + mpz_t b; + + mpz_init_set_ui (b, blimb); + mpz_pow_ui (r, b, e); + mpz_clear (b); +} + +void +mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) +{ + mpz_t tr; + mpz_t base; + mp_size_t en, mn; + mp_srcptr mp; + struct gmp_div_inverse minv; + unsigned shift; + mp_ptr tp = NULL; + + en = GMP_ABS (e->_mp_size); + mn = GMP_ABS (m->_mp_size); + if (mn == 0) + gmp_die ("mpz_powm: Zero modulo."); + + if (en == 0) + { + mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); + return; + } + + mp = m->_mp_d; + mpn_div_qr_invert (&minv, mp, mn); + shift = minv.shift; + + if (shift > 0) + { + /* To avoid shifts, we do all our reductions, except the final + one, using a *normalized* m. */ + minv.shift = 0; + + tp = gmp_alloc_limbs (mn); + gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); + mp = tp; + } + + mpz_init (base); + + if (e->_mp_size < 0) + { + if (!mpz_invert (base, b, m)) + gmp_die ("mpz_powm: Negative exponent and non-invertible base."); + } + else + { + mp_size_t bn; + mpz_abs (base, b); + + bn = base->_mp_size; + if (bn >= mn) + { + mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); + bn = mn; + } + + /* We have reduced the absolute value. Now take care of the + sign. Note that we get zero represented non-canonically as + m. */ + if (b->_mp_size < 0) + { + mp_ptr bp = MPZ_REALLOC (base, mn); + gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); + bn = mn; + } + base->_mp_size = mpn_normalized_size (base->_mp_d, bn); + } + mpz_init_set_ui (tr, 1); + + while (--en >= 0) + { + mp_limb_t w = e->_mp_d[en]; + mp_limb_t bit; + + bit = GMP_LIMB_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (w & bit) + mpz_mul (tr, tr, base); + if (tr->_mp_size > mn) + { + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + bit >>= 1; + } + while (bit > 0); + } + + /* Final reduction */ + if (tr->_mp_size >= mn) + { + minv.shift = shift; + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + if (tp) + gmp_free_limbs (tp, mn); + + mpz_swap (r, tr); + mpz_clear (tr); + mpz_clear (base); +} + +void +mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) +{ + mpz_t e; + + mpz_init_set_ui (e, elimb); + mpz_powm (r, b, e, m); + mpz_clear (e); +} + +/* x=trunc(y^(1/z)), r=y-x^z */ +void +mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) +{ + int sgn; + mp_bitcnt_t bc; + mpz_t t, u; + + sgn = y->_mp_size < 0; + if ((~z & sgn) != 0) + gmp_die ("mpz_rootrem: Negative argument, with even root."); + if (z == 0) + gmp_die ("mpz_rootrem: Zeroth root."); + + if (mpz_cmpabs_ui (y, 1) <= 0) { + if (x) + mpz_set (x, y); + if (r) + r->_mp_size = 0; + return; + } + + mpz_init (u); + mpz_init (t); + bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; + mpz_setbit (t, bc); + + if (z == 2) /* simplify sqrt loop: z-1 == 1 */ + do { + mpz_swap (u, t); /* u = x */ + mpz_tdiv_q (t, y, u); /* t = y/x */ + mpz_add (t, t, u); /* t = y/x + x */ + mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + else /* z != 2 */ { + mpz_t v; + + mpz_init (v); + if (sgn) + mpz_neg (t, t); + + do { + mpz_swap (u, t); /* u = x */ + mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ + mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ + mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ + mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ + mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + + mpz_clear (v); + } + + if (r) { + mpz_pow_ui (t, u, z); + mpz_sub (r, y, t); + } + if (x) + mpz_swap (x, u); + mpz_clear (u); + mpz_clear (t); +} + +int +mpz_root (mpz_t x, const mpz_t y, unsigned long z) +{ + int res; + mpz_t r; + + mpz_init (r); + mpz_rootrem (x, r, y, z); + res = r->_mp_size == 0; + mpz_clear (r); + + return res; +} + +/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ +void +mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) +{ + mpz_rootrem (s, r, u, 2); +} + +void +mpz_sqrt (mpz_t s, const mpz_t u) +{ + mpz_rootrem (s, NULL, u, 2); +} + +int +mpz_perfect_square_p (const mpz_t u) +{ + if (u->_mp_size <= 0) + return (u->_mp_size == 0); + else + return mpz_root (NULL, u, 2); +} + +int +mpn_perfect_square_p (mp_srcptr p, mp_size_t n) +{ + mpz_t t; + + assert (n > 0); + assert (p [n-1] != 0); + return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); +} + +mp_size_t +mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) +{ + mpz_t s, r, u; + mp_size_t res; + + assert (n > 0); + assert (p [n-1] != 0); + + mpz_init (r); + mpz_init (s); + mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); + + assert (s->_mp_size == (n+1)/2); + mpn_copyd (sp, s->_mp_d, s->_mp_size); + mpz_clear (s); + res = r->_mp_size; + if (rp) + mpn_copyd (rp, r->_mp_d, res); + mpz_clear (r); + return res; +} + +/* Combinatorics */ + +void +mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) +{ + mpz_set_ui (x, n + (n == 0)); + if (m + 1 < 2) return; + while (n > m + 1) + mpz_mul_ui (x, x, n -= m); +} + +void +mpz_2fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 2); +} + +void +mpz_fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 1); +} + +void +mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) +{ + mpz_t t; + + mpz_set_ui (r, k <= n); + + if (k > (n >> 1)) + k = (k <= n) ? n - k : 0; + + mpz_init (t); + mpz_fac_ui (t, k); + + for (; k > 0; --k) + mpz_mul_ui (r, r, n--); + + mpz_divexact (r, r, t); + mpz_clear (t); +} + + +/* Primality testing */ + +/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ +/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ +static int +gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) +{ + int c, bit = 0; + + assert (b & 1); + assert (a != 0); + /* assert (mpn_gcd_11 (a, b) == 1); */ + + /* Below, we represent a and b shifted right so that the least + significant one bit is implicit. */ + b >>= 1; + + gmp_ctz(c, a); + a >>= 1; + + for (;;) + { + a >>= c; + /* (2/b) = -1 if b = 3 or 5 mod 8 */ + bit ^= c & (b ^ (b >> 1)); + if (a < b) + { + if (a == 0) + return bit & 1 ? -1 : 1; + bit ^= a & b; + a = b - a; + b -= a; + } + else + { + a -= b; + assert (a != 0); + } + + gmp_ctz(c, a); + ++c; + } +} + +static void +gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) +{ + mpz_mod (Qk, Qk, n); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + mpz_mul (V, V, V); + mpz_submul_ui (V, Qk, 2); + mpz_tdiv_r (V, V, n); + /* Q^{2k} = (Q^k)^2 */ + mpz_mul (Qk, Qk, Qk); +} + +/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ +/* with P=1, Q=Q; k = (n>>b0)|1. */ +/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ +/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ +static int +gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, + mp_bitcnt_t b0, const mpz_t n) +{ + mp_bitcnt_t bs; + mpz_t U; + int res; + + assert (b0 > 0); + assert (Q <= - (LONG_MIN / 2)); + assert (Q >= - (LONG_MAX / 2)); + assert (mpz_cmp_ui (n, 4) > 0); + assert (mpz_odd_p (n)); + + mpz_init_set_ui (U, 1); /* U1 = 1 */ + mpz_set_ui (V, 1); /* V1 = 1 */ + mpz_set_si (Qk, Q); + + for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) + { + /* U_{2k} <- U_k * V_k */ + mpz_mul (U, U, V); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + /* A step k->k+1 is performed if the bit in $n$ is 1 */ + /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ + /* should be 1 in $n+1$ (bs == b0) */ + if (b0 == bs || mpz_tstbit (n, bs)) + { + /* Q^{k+1} <- Q^k * Q */ + mpz_mul_si (Qk, Qk, Q); + /* U_{k+1} <- (U_k + V_k) / 2 */ + mpz_swap (U, V); /* Keep in V the old value of U_k */ + mpz_add (U, U, V); + /* We have to compute U/2, so we need an even value, */ + /* equivalent (mod n) */ + if (mpz_odd_p (U)) + mpz_add (U, U, n); + mpz_tdiv_q_2exp (U, U, 1); + /* V_{k+1} <-(D*U_k + V_k) / 2 = + U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ + mpz_mul_si (V, V, -2*Q); + mpz_add (V, U, V); + mpz_tdiv_r (V, V, n); + } + mpz_tdiv_r (U, U, n); + } + + res = U->_mp_size == 0; + mpz_clear (U); + return res; +} + +/* Performs strong Lucas' test on x, with parameters suggested */ +/* for the BPSW test. Qk is only passed to recycle a variable. */ +/* Requires GCD (x,6) = 1.*/ +static int +gmp_stronglucas (const mpz_t x, mpz_t Qk) +{ + mp_bitcnt_t b0; + mpz_t V, n; + mp_limb_t maxD, D; /* The absolute value is stored. */ + long Q; + mp_limb_t tl; + + /* Test on the absolute value. */ + mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); + + assert (mpz_odd_p (n)); + /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ + if (mpz_root (Qk, n, 2)) + return 0; /* A square is composite. */ + + /* Check Ds up to square root (in case, n is prime) + or avoid overflows */ + maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; + + D = 3; + /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ + /* For those Ds we have (D/n) = (n/|D|) */ + do + { + if (D >= maxD) + return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ + D += 2; + tl = mpz_tdiv_ui (n, D); + if (tl == 0) + return 0; + } + while (gmp_jacobi_coprime (tl, D) == 1); + + mpz_init (V); + + /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ + b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); + /* b0 = mpz_scan0 (n, 0); */ + + /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ + Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); + + if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ + while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ + /* V <- V ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + mpz_clear (V); + return (b0 != 0); +} + +static int +gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, + const mpz_t q, mp_bitcnt_t k) +{ + assert (k > 0); + + /* Caller must initialize y to the base. */ + mpz_powm (y, y, q, n); + + if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) + return 1; + + while (--k > 0) + { + mpz_powm_ui (y, y, 2, n); + if (mpz_cmp (y, nm1) == 0) + return 1; + } + return 0; +} + +/* This product is 0xc0cfd797, and fits in 32 bits. */ +#define GMP_PRIME_PRODUCT \ + (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) + +/* Bit (p+1)/2 is set, for each odd prime <= 61 */ +#define GMP_PRIME_MASK 0xc96996dcUL + +int +mpz_probab_prime_p (const mpz_t n, int reps) +{ + mpz_t nm1; + mpz_t q; + mpz_t y; + mp_bitcnt_t k; + int is_prime; + int j; + + /* Note that we use the absolute value of n only, for compatibility + with the real GMP. */ + if (mpz_even_p (n)) + return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; + + /* Above test excludes n == 0 */ + assert (n->_mp_size != 0); + + if (mpz_cmpabs_ui (n, 64) < 0) + return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; + + if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) + return 0; + + /* All prime factors are >= 31. */ + if (mpz_cmpabs_ui (n, 31*31) < 0) + return 2; + + mpz_init (nm1); + mpz_init (q); + + /* Find q and k, where q is odd and n = 1 + 2**k * q. */ + mpz_abs (nm1, n); + nm1->_mp_d[0] -= 1; + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + k = mpn_scan1 (nm1->_mp_d, 0); + mpz_tdiv_q_2exp (q, nm1, k); + + /* BPSW test */ + mpz_init_set_ui (y, 2); + is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); + reps -= 24; /* skip the first 24 repetitions */ + + /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = + j^2 + j + 41 using Euler's polynomial. We potentially stop early, + if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > + 30 (a[30] == 971 > 31*31 == 961). */ + + for (j = 0; is_prime & (j < reps); j++) + { + mpz_set_ui (y, (unsigned long) j*j+j+41); + if (mpz_cmp (y, nm1) >= 0) + { + /* Don't try any further bases. This "early" break does not affect + the result for any reasonable reps value (<=5000 was tested) */ + assert (j >= 30); + break; + } + is_prime = gmp_millerrabin (n, nm1, y, q, k); + } + mpz_clear (nm1); + mpz_clear (q); + mpz_clear (y); + + return is_prime; +} + + +/* Logical operations and bit manipulation. */ + +/* Numbers are treated as if represented in two's complement (and + infinitely sign extended). For a negative values we get the two's + complement from -x = ~x + 1, where ~ is bitwise complement. + Negation transforms + + xxxx10...0 + + into + + yyyy10...0 + + where yyyy is the bitwise complement of xxxx. So least significant + bits, up to and including the first one bit, are unchanged, and + the more significant bits are all complemented. + + To change a bit from zero to one in a negative number, subtract the + corresponding power of two from the absolute value. This can never + underflow. To change a bit from one to zero, add the corresponding + power of two, and this might overflow. E.g., if x = -001111, the + two's complement is 110001. Clearing the least significant bit, we + get two's complement 110000, and -010000. */ + +int +mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t limb_index; + unsigned shift; + mp_size_t ds; + mp_size_t dn; + mp_limb_t w; + int bit; + + ds = d->_mp_size; + dn = GMP_ABS (ds); + limb_index = bit_index / GMP_LIMB_BITS; + if (limb_index >= dn) + return ds < 0; + + shift = bit_index % GMP_LIMB_BITS; + w = d->_mp_d[limb_index]; + bit = (w >> shift) & 1; + + if (ds < 0) + { + /* d < 0. Check if any of the bits below is set: If so, our bit + must be complemented. */ + if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) + return bit ^ 1; + while (--limb_index >= 0) + if (d->_mp_d[limb_index] > 0) + return bit ^ 1; + } + return bit; +} + +static void +mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_limb_t bit; + mp_ptr dp; + + dn = GMP_ABS (d->_mp_size); + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + if (limb_index >= dn) + { + mp_size_t i; + /* The bit should be set outside of the end of the number. + We have to increase the size of the number. */ + dp = MPZ_REALLOC (d, limb_index + 1); + + dp[limb_index] = bit; + for (i = dn; i < limb_index; i++) + dp[i] = 0; + dn = limb_index + 1; + } + else + { + mp_limb_t cy; + + dp = d->_mp_d; + + cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); + if (cy > 0) + { + dp = MPZ_REALLOC (d, dn + 1); + dp[dn++] = cy; + } + } + + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +static void +mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_ptr dp; + mp_limb_t bit; + + dn = GMP_ABS (d->_mp_size); + dp = d->_mp_d; + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + assert (limb_index < dn); + + gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, + dn - limb_index, bit)); + dn = mpn_normalized_size (dp, dn); + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +void +mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (!mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_add_bit (d, bit_index); + else + mpz_abs_sub_bit (d, bit_index); + } +} + +void +mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); + } +} + +void +mpz_combit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); +} + +void +mpz_com (mpz_t r, const mpz_t u) +{ + mpz_add_ui (r, u, 1); + mpz_neg (r, r); +} + +void +mpz_and (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + r->_mp_size = 0; + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc & vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is positive, higher limbs don't matter. */ + rn = vx ? un : vn; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul & vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul & vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc | vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is negative, by sign extension higher limbs + don't matter. */ + rn = vx ? vn : un; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul | vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul | vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc ^ vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + rp = MPZ_REALLOC (r, un + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = (ul ^ vl ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = (ul ^ ux) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[un++] = rc; + else + un = mpn_normalized_size (rp, un); + + r->_mp_size = rx ? -un : un; +} + +static unsigned +gmp_popcount_limb (mp_limb_t x) +{ + unsigned c; + + /* Do 16 bits at a time, to avoid limb-sized constants. */ + int LOCAL_SHIFT_BITS = 16; + for (c = 0; x > 0;) + { + unsigned w = x - ((x >> 1) & 0x5555); + w = ((w >> 2) & 0x3333) + (w & 0x3333); + w = (w >> 4) + w; + w = ((w >> 8) & 0x000f) + (w & 0x000f); + c += w; + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) + x >>= LOCAL_SHIFT_BITS; + else + x = 0; + } + return c; +} + +mp_bitcnt_t +mpn_popcount (mp_srcptr p, mp_size_t n) +{ + mp_size_t i; + mp_bitcnt_t c; + + for (c = 0, i = 0; i < n; i++) + c += gmp_popcount_limb (p[i]); + + return c; +} + +mp_bitcnt_t +mpz_popcount (const mpz_t u) +{ + mp_size_t un; + + un = u->_mp_size; + + if (un < 0) + return ~(mp_bitcnt_t) 0; + + return mpn_popcount (u->_mp_d, un); +} + +mp_bitcnt_t +mpz_hamdist (const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_limb_t uc, vc, ul, vl, comp; + mp_srcptr up, vp; + mp_bitcnt_t c; + + un = u->_mp_size; + vn = v->_mp_size; + + if ( (un ^ vn) < 0) + return ~(mp_bitcnt_t) 0; + + comp = - (uc = vc = (un < 0)); + if (uc) + { + assert (vn < 0); + un = -un; + vn = -vn; + } + + up = u->_mp_d; + vp = v->_mp_d; + + if (un < vn) + MPN_SRCPTR_SWAP (up, un, vp, vn); + + for (i = 0, c = 0; i < vn; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + vl = (vp[i] ^ comp) + vc; + vc = vl < vc; + + c += gmp_popcount_limb (ul ^ vl); + } + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + c += gmp_popcount_limb (ul ^ comp); + } + + return c; +} + +mp_bitcnt_t +mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit + for u<0. Notice this test picks up any u==0 too. */ + if (i >= un) + return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); + + up = u->_mp_d; + ux = 0; + limb = up[i]; + + if (starting_bit != 0) + { + if (us < 0) + { + ux = mpn_zero_p (up, i); + limb = ~ limb + ux; + ux = - (mp_limb_t) (limb >= ux); + } + + /* Mask to 0 all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + } + + return mpn_common_scan (limb, i, up, un, ux); +} + +mp_bitcnt_t +mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + ux = - (mp_limb_t) (us >= 0); + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for + u<0. Notice this test picks up all cases of u==0 too. */ + if (i >= un) + return (ux ? starting_bit : ~(mp_bitcnt_t) 0); + + up = u->_mp_d; + limb = up[i] ^ ux; + + if (ux == 0) + limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ + + /* Mask all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + + return mpn_common_scan (limb, i, up, un, ux); +} + + +/* MPZ base conversion. */ + +size_t +mpz_sizeinbase (const mpz_t u, int base) +{ + mp_size_t un, tn; + mp_srcptr up; + mp_ptr tp; + mp_bitcnt_t bits; + struct gmp_div_inverse bi; + size_t ndigits; + + assert (base >= 2); + assert (base <= 62); + + un = GMP_ABS (u->_mp_size); + if (un == 0) + return 1; + + up = u->_mp_d; + + bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); + switch (base) + { + case 2: + return bits; + case 4: + return (bits + 1) / 2; + case 8: + return (bits + 2) / 3; + case 16: + return (bits + 3) / 4; + case 32: + return (bits + 4) / 5; + /* FIXME: Do something more clever for the common case of base + 10. */ + } + + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, up, un); + mpn_div_qr_1_invert (&bi, base); + + tn = un; + ndigits = 0; + do + { + ndigits++; + mpn_div_qr_1_preinv (tp, tp, tn, &bi); + tn -= (tp[tn-1] == 0); + } + while (tn > 0); + + gmp_free_limbs (tp, un); + return ndigits; +} + +char * +mpz_get_str (char *sp, int base, const mpz_t u) +{ + unsigned bits; + const char *digits; + mp_size_t un; + size_t i, sn, osn; + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + if (base > 1) + { + if (base <= 36) + digits = "0123456789abcdefghijklmnopqrstuvwxyz"; + else if (base > 62) + return NULL; + } + else if (base >= -1) + base = 10; + else + { + base = -base; + if (base > 36) + return NULL; + } + + sn = 1 + mpz_sizeinbase (u, base); + if (!sp) + { + osn = 1 + sn; + sp = (char *) gmp_alloc (osn); + } + else + osn = 0; + un = GMP_ABS (u->_mp_size); + + if (un == 0) + { + sp[0] = '0'; + sn = 1; + goto ret; + } + + i = 0; + + if (u->_mp_size < 0) + sp[i++] = '-'; + + bits = mpn_base_power_of_two_p (base); + + if (bits) + /* Not modified in this case. */ + sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); + else + { + struct mpn_base_info info; + mp_ptr tp; + + mpn_get_base_info (&info, base); + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, u->_mp_d, un); + + sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); + gmp_free_limbs (tp, un); + } + + for (; i < sn; i++) + sp[i] = digits[(unsigned char) sp[i]]; + +ret: + sp[sn] = '\0'; + if (osn && osn != sn + 1) + sp = (char*) gmp_realloc (sp, osn, sn + 1); + return sp; +} + +int +mpz_set_str (mpz_t r, const char *sp, int base) +{ + unsigned bits, value_of_a; + mp_size_t rn, alloc; + mp_ptr rp; + size_t dn, sn; + int sign; + unsigned char *dp; + + assert (base == 0 || (base >= 2 && base <= 62)); + + while (isspace( (unsigned char) *sp)) + sp++; + + sign = (*sp == '-'); + sp += sign; + + if (base == 0) + { + if (sp[0] == '0') + { + if (sp[1] == 'x' || sp[1] == 'X') + { + base = 16; + sp += 2; + } + else if (sp[1] == 'b' || sp[1] == 'B') + { + base = 2; + sp += 2; + } + else + base = 8; + } + else + base = 10; + } + + if (!*sp) + { + r->_mp_size = 0; + return -1; + } + sn = strlen(sp); + dp = (unsigned char *) gmp_alloc (sn); + + value_of_a = (base > 36) ? 36 : 10; + for (dn = 0; *sp; sp++) + { + unsigned digit; + + if (isspace ((unsigned char) *sp)) + continue; + else if (*sp >= '0' && *sp <= '9') + digit = *sp - '0'; + else if (*sp >= 'a' && *sp <= 'z') + digit = *sp - 'a' + value_of_a; + else if (*sp >= 'A' && *sp <= 'Z') + digit = *sp - 'A' + 10; + else + digit = base; /* fail */ + + if (digit >= (unsigned) base) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + + dp[dn++] = digit; + } + + if (!dn) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + bits = mpn_base_power_of_two_p (base); + + if (bits > 0) + { + alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_bits (rp, dp, dn, bits); + } + else + { + struct mpn_base_info info; + mpn_get_base_info (&info, base); + alloc = (dn + info.exp - 1) / info.exp; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_other (rp, dp, dn, base, &info); + /* Normalization, needed for all-zero input. */ + assert (rn > 0); + rn -= rp[rn-1] == 0; + } + assert (rn <= alloc); + gmp_free (dp, sn); + + r->_mp_size = sign ? - rn : rn; + + return 0; +} + +int +mpz_init_set_str (mpz_t r, const char *sp, int base) +{ + mpz_init (r); + return mpz_set_str (r, sp, base); +} + +size_t +mpz_out_str (FILE *stream, int base, const mpz_t x) +{ + char *str; + size_t len, n; + + str = mpz_get_str (NULL, base, x); + if (!str) + return 0; + len = strlen (str); + n = fwrite (str, 1, len, stream); + gmp_free (str, len + 1); + return n; +} + + +static int +gmp_detect_endian (void) +{ + static const int i = 2; + const unsigned char *p = (const unsigned char *) &i; + return 1 - *p; +} + +/* Import and export. Does not support nails. */ +void +mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, + size_t nails, const void *src) +{ + const unsigned char *p; + ptrdiff_t word_step; + mp_ptr rp; + mp_size_t rn; + + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes already copied to this limb (starting from + the low end). */ + size_t bytes; + /* The index where the limb should be stored, when completed. */ + mp_size_t i; + + if (nails != 0) + gmp_die ("mpz_import: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) src; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); + rp = MPZ_REALLOC (r, rn); + + for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) + { + size_t j; + for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) + { + limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); + if (bytes == sizeof(mp_limb_t)) + { + rp[i++] = limb; + bytes = 0; + limb = 0; + } + } + } + assert (i + (bytes > 0) == rn); + if (limb != 0) + rp[i++] = limb; + else + i = mpn_normalized_size (rp, i); + + r->_mp_size = i; +} + +void * +mpz_export (void *r, size_t *countp, int order, size_t size, int endian, + size_t nails, const mpz_t u) +{ + size_t count; + mp_size_t un; + + if (nails != 0) + gmp_die ("mpz_export: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + assert (size > 0 || u->_mp_size == 0); + + un = u->_mp_size; + count = 0; + if (un != 0) + { + size_t k; + unsigned char *p; + ptrdiff_t word_step; + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes left to do in this limb. */ + size_t bytes; + /* The index where the limb was read. */ + mp_size_t i; + + un = GMP_ABS (un); + + /* Count bytes in top limb. */ + limb = u->_mp_d[un-1]; + assert (limb != 0); + + k = (GMP_LIMB_BITS <= CHAR_BIT); + if (!k) + { + do { + int LOCAL_CHAR_BIT = CHAR_BIT; + k++; limb >>= LOCAL_CHAR_BIT; + } while (limb != 0); + } + /* else limb = 0; */ + + count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; + + if (!r) + r = gmp_alloc (count * size); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) r; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) + { + size_t j; + for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) + { + if (sizeof (mp_limb_t) == 1) + { + if (i < un) + *p = u->_mp_d[i++]; + else + *p = 0; + } + else + { + int LOCAL_CHAR_BIT = CHAR_BIT; + if (bytes == 0) + { + if (i < un) + limb = u->_mp_d[i++]; + bytes = sizeof (mp_limb_t); + } + *p = limb; + limb >>= LOCAL_CHAR_BIT; + bytes--; + } + } + } + assert (i == un); + assert (k == count); + } + + if (countp) + *countp = count; + + return r; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.h new file mode 100644 index 0000000000..f28cb360ce --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.h @@ -0,0 +1,311 @@ +/* mini-gmp, a minimalistic implementation of a GNU GMP subset. + +Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* About mini-gmp: This is a minimal implementation of a subset of the + GMP interface. It is intended for inclusion into applications which + have modest bignums needs, as a fallback when the real GMP library + is not installed. + + This file defines the public interface. */ + +#ifndef __MINI_GMP_H__ +#define __MINI_GMP_H__ + +/* For size_t */ +#include + +#if defined (__cplusplus) +extern "C" { +#endif + +void mp_set_memory_functions (void *(*) (size_t), + void *(*) (void *, size_t, size_t), + void (*) (void *, size_t)); + +void mp_get_memory_functions (void *(**) (size_t), + void *(**) (void *, size_t, size_t), + void (**) (void *, size_t)); + +#ifndef MINI_GMP_LIMB_TYPE +#define MINI_GMP_LIMB_TYPE long +#endif + +typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; +typedef long mp_size_t; +typedef unsigned long mp_bitcnt_t; + +typedef mp_limb_t *mp_ptr; +typedef const mp_limb_t *mp_srcptr; + +typedef struct +{ + int _mp_alloc; /* Number of *limbs* allocated and pointed + to by the _mp_d field. */ + int _mp_size; /* abs(_mp_size) is the number of limbs the + last field points to. If _mp_size is + negative this is a negative number. */ + mp_limb_t *_mp_d; /* Pointer to the limbs. */ +} __mpz_struct; + +typedef __mpz_struct mpz_t[1]; + +typedef __mpz_struct *mpz_ptr; +typedef const __mpz_struct *mpz_srcptr; + +extern const int mp_bits_per_limb; + +void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); +void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); +void mpn_zero (mp_ptr, mp_size_t); + +int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); +int mpn_zero_p (mp_srcptr, mp_size_t); + +mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); +void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); +int mpn_perfect_square_p (mp_srcptr, mp_size_t); +mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); +mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); + +mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); +mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); + +mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); +mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); + +void mpn_com (mp_ptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); + +mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); + +mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); +#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) + +size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); +mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); + +void mpz_init (mpz_t); +void mpz_init2 (mpz_t, mp_bitcnt_t); +void mpz_clear (mpz_t); + +#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) +#define mpz_even_p(z) (! mpz_odd_p (z)) + +int mpz_sgn (const mpz_t); +int mpz_cmp_si (const mpz_t, long); +int mpz_cmp_ui (const mpz_t, unsigned long); +int mpz_cmp (const mpz_t, const mpz_t); +int mpz_cmpabs_ui (const mpz_t, unsigned long); +int mpz_cmpabs (const mpz_t, const mpz_t); +int mpz_cmp_d (const mpz_t, double); +int mpz_cmpabs_d (const mpz_t, double); + +void mpz_abs (mpz_t, const mpz_t); +void mpz_neg (mpz_t, const mpz_t); +void mpz_swap (mpz_t, mpz_t); + +void mpz_add_ui (mpz_t, const mpz_t, unsigned long); +void mpz_add (mpz_t, const mpz_t, const mpz_t); +void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); +void mpz_sub (mpz_t, const mpz_t, const mpz_t); + +void mpz_mul_si (mpz_t, const mpz_t, long int); +void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_mul (mpz_t, const mpz_t, const mpz_t); +void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_addmul (mpz_t, const mpz_t, const mpz_t); +void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_submul (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); + +void mpz_mod (mpz_t, const mpz_t, const mpz_t); + +void mpz_divexact (mpz_t, const mpz_t, const mpz_t); + +int mpz_divisible_p (const mpz_t, const mpz_t); +int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); + +unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); + +unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); + +void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); + +int mpz_divisible_ui_p (const mpz_t, unsigned long); + +unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); +void mpz_gcd (mpz_t, const mpz_t, const mpz_t); +void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); +void mpz_lcm (mpz_t, const mpz_t, const mpz_t); +int mpz_invert (mpz_t, const mpz_t, const mpz_t); + +void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); +void mpz_sqrt (mpz_t, const mpz_t); +int mpz_perfect_square_p (const mpz_t); + +void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); +void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); +void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); + +void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); +int mpz_root (mpz_t, const mpz_t, unsigned long); + +void mpz_fac_ui (mpz_t, unsigned long); +void mpz_2fac_ui (mpz_t, unsigned long); +void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); +void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); + +int mpz_probab_prime_p (const mpz_t, int); + +int mpz_tstbit (const mpz_t, mp_bitcnt_t); +void mpz_setbit (mpz_t, mp_bitcnt_t); +void mpz_clrbit (mpz_t, mp_bitcnt_t); +void mpz_combit (mpz_t, mp_bitcnt_t); + +void mpz_com (mpz_t, const mpz_t); +void mpz_and (mpz_t, const mpz_t, const mpz_t); +void mpz_ior (mpz_t, const mpz_t, const mpz_t); +void mpz_xor (mpz_t, const mpz_t, const mpz_t); + +mp_bitcnt_t mpz_popcount (const mpz_t); +mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); +mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); +mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); + +int mpz_fits_slong_p (const mpz_t); +int mpz_fits_ulong_p (const mpz_t); +int mpz_fits_sint_p (const mpz_t); +int mpz_fits_uint_p (const mpz_t); +int mpz_fits_sshort_p (const mpz_t); +int mpz_fits_ushort_p (const mpz_t); +long int mpz_get_si (const mpz_t); +unsigned long int mpz_get_ui (const mpz_t); +double mpz_get_d (const mpz_t); +size_t mpz_size (const mpz_t); +mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); + +void mpz_realloc2 (mpz_t, mp_bitcnt_t); +mp_srcptr mpz_limbs_read (mpz_srcptr); +mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); +mp_ptr mpz_limbs_write (mpz_t, mp_size_t); +void mpz_limbs_finish (mpz_t, mp_size_t); +mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); + +#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} + +void mpz_set_si (mpz_t, signed long int); +void mpz_set_ui (mpz_t, unsigned long int); +void mpz_set (mpz_t, const mpz_t); +void mpz_set_d (mpz_t, double); + +void mpz_init_set_si (mpz_t, signed long int); +void mpz_init_set_ui (mpz_t, unsigned long int); +void mpz_init_set (mpz_t, const mpz_t); +void mpz_init_set_d (mpz_t, double); + +size_t mpz_sizeinbase (const mpz_t, int); +char *mpz_get_str (char *, int, const mpz_t); +int mpz_set_str (mpz_t, const char *, int); +int mpz_init_set_str (mpz_t, const char *, int); + +/* This long list taken from gmp.h. */ +/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, + defines EOF but not FILE. */ +#if defined (FILE) \ + || defined (H_STDIO) \ + || defined (_H_STDIO) /* AIX */ \ + || defined (_STDIO_H) /* glibc, Sun, SCO */ \ + || defined (_STDIO_H_) /* BSD, OSF */ \ + || defined (__STDIO_H) /* Borland */ \ + || defined (__STDIO_H__) /* IRIX */ \ + || defined (_STDIO_INCLUDED) /* HPUX */ \ + || defined (__dj_include_stdio_h_) /* DJGPP */ \ + || defined (_FILE_DEFINED) /* Microsoft */ \ + || defined (__STDIO__) /* Apple MPW MrC */ \ + || defined (_MSL_STDIO_H) /* Metrowerks */ \ + || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ + || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ + || defined (__STDIO_LOADED) /* VMS */ \ + || defined (_STDIO) /* HPE NonStop */ \ + || defined (__DEFINED_FILE) /* musl */ +size_t mpz_out_str (FILE *, int, const mpz_t); +#endif + +void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); +void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); + +#if defined (__cplusplus) +} +#endif +#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.h new file mode 100644 index 0000000000..b3733b520d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.h @@ -0,0 +1,88 @@ +#ifndef MP_H +#define MP_H + +#include +#include +#include + +// Functions taken from the GF module + +void mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +digit_t mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords); +void multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void MUL(digit_t *out, const digit_t a, const digit_t b); + +// Functions taken from the EC module + +void mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +void select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords); +void swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords); +int mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords); +bool mp_is_zero(const digit_t *a, unsigned int nwords); +void mp_mul2(digit_t *c, const digit_t *a, const digit_t *b); + +// Further functions for multiprecision arithmetic +void mp_print(const digit_t *a, size_t nwords); +void mp_copy(digit_t *b, const digit_t *a, size_t nwords); +void mp_neg(digit_t *a, unsigned int nwords); +bool mp_is_one(const digit_t *x, unsigned int nwords); +void mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords); +void mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords); +void mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords); +void mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords); + +#define mp_is_odd(x, nwords) (((nwords) != 0) & (int)(x)[0]) +#define mp_is_even(x, nwords) (!mp_is_odd(x, nwords)) + +/********************** Constant-time unsigned comparisons ***********************/ + +// The following functions return 1 (TRUE) if condition is true, 0 (FALSE) otherwise +static inline unsigned int +is_digit_nonzero_ct(digit_t x) +{ // Is x != 0? + return (unsigned int)((x | (0 - x)) >> (RADIX - 1)); +} + +static inline unsigned int +is_digit_zero_ct(digit_t x) +{ // Is x = 0? + return (unsigned int)(1 ^ is_digit_nonzero_ct(x)); +} + +static inline unsigned int +is_digit_lessthan_ct(digit_t x, digit_t y) +{ // Is x < y? + return (unsigned int)((x ^ ((x ^ y) | ((x - y) ^ y))) >> (RADIX - 1)); +} + +/********************** Platform-independent macros for digit-size operations + * **********************/ + +// Digit addition with carry +#define ADDC(sumOut, carryOut, addend1, addend2, carryIn) \ + { \ + digit_t tempReg = (addend1) + (digit_t)(carryIn); \ + (sumOut) = (addend2) + tempReg; \ + (carryOut) = (is_digit_lessthan_ct(tempReg, (digit_t)(carryIn)) | is_digit_lessthan_ct((sumOut), tempReg)); \ + } + +// Digit subtraction with borrow +#define SUBC(differenceOut, borrowOut, minuend, subtrahend, borrowIn) \ + { \ + digit_t tempReg = (minuend) - (subtrahend); \ + unsigned int borrowReg = \ + (is_digit_lessthan_ct((minuend), (subtrahend)) | ((borrowIn) & is_digit_zero_ct(tempReg))); \ + (differenceOut) = tempReg - (digit_t)(borrowIn); \ + (borrowOut) = borrowReg; \ + } + +// Shift right with flexible datatype +#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << (DigitSize - (shift))); + +// Digit shift left +#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((highIn) << (shift)) ^ ((lowIn) >> (RADIX - (shift))); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion.h new file mode 100644 index 0000000000..a567657464 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion.h @@ -0,0 +1,708 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for quaternion algebra operations + */ + +#ifndef QUATERNION_H +#define QUATERNION_H + +// #include +#include +#include "intbig.h" +#include + +/** @defgroup quat_quat Quaternion algebra + * @{ + */ + +/** @defgroup quat_vec_t Types for integer vectors and matrices + * @{ + */ + +/** @brief Type for vector of 2 big integers + * + * @typedef ibz_vec_2_t + */ +typedef ibz_t ibz_vec_2_t[2]; + +/** @brief Type for vectors of 4 integers + * + * @typedef ibz_vec_4_t + * + * Represented as a vector of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_vec_4_t[4]; + +/** @brief Type for 2 by 2 matrices of integers + * + * @typedef ibz_mat_2x2_t + * + * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_2x2_t[2][2]; + +/** @brief Type for 4 by 4 matrices of integers + * + * @typedef ibz_mat_4x4_t + * + * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_4x4_t[4][4]; +/** + * @} + */ + +/** @defgroup quat_quat_t Types for quaternion algebras + * @{ + */ + +/** @brief Type for quaternion algebras + * + * @typedef quat_alg_t + * + * @struct quat_alg + * + * The quaternion algebra ramified at p = 3 mod 4 and ∞. + */ +typedef struct quat_alg +{ + ibz_t p; ///< Prime number, must be = 3 mod 4. +} quat_alg_t; + +/** @brief Type for quaternion algebra elements + * + * @typedef quat_alg_elem_t + * + * @struct quat_alg_elem + * + * Represented as a array *coord* of 4 ibz_t integers and a common ibz_t denominator *denom*. + * + * The representation is not necessarily normalized, that is, gcd(denom, content(coord)) might not + * be 1. For getting a normalized representation, use the quat_alg_normalize function + * + * The elements are always represented in basis (1,i,j,ij) of the quaternion algebra, with i^2=-1 + * and j^2 = -p + */ +typedef struct quat_alg_elem +{ + ibz_t denom; ///< Denominator by which all coordinates are divided (big integer, must not be 0) + ibz_vec_4_t coord; ///< Numerators of the 4 coordinates of the quaternion algebra element in basis (1,i,j,ij) +} quat_alg_elem_t; + +/** @brief Type for lattices in dimension 4 + * + * @typedef quat_lattice_t + * + * @struct quat_lattice + * + * Represented as a rational (`frac`) times an integreal lattice (`basis`) + * + * The basis is such that its columns divided by its denominator are elements of + * the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + * + * All lattices must have full rank (4) + */ +typedef struct quat_lattice +{ + ibz_t denom; ///< Denominator by which the basis is divided (big integer, must not be 0) + ibz_mat_4x4_t basis; ///< Integer basis of the lattice (its columns divided by denom are + ///< algebra elements in the usual basis) +} quat_lattice_t; + +/** @brief Type for left ideals of maximal orders in quaternion algebras + * + * @typedef quat_left_ideal_t + * + * @struct quat_left_ideal + * + * The basis of the lattice representing it is such that its columns divided by its denominator are + * elements of the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + */ +typedef struct quat_left_ideal +{ + quat_lattice_t lattice; ///< lattice representing the ideal + ibz_t norm; ///< norm of the lattice + const quat_lattice_t *parent_order; ///< should be a maximal order +} quat_left_ideal_t; +/** @} + */ + +/** @brief Type for extremal maximal orders + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + * The basis of the order representing it is in hermite normal form, and its columns divid +ed by its denominator are elements of the quaternion algebra, represented in basis (1,z,t, +tz) where z^2 = -q, t^2 = -p. +*/ +typedef struct quat_p_extremal_maximal_order +{ + quat_lattice_t order; ///< the order represented as a lattice + quat_alg_elem_t z; ///< the element of small discriminant + quat_alg_elem_t t; ///< the element of norm p orthogonal to z + uint32_t q; ///< the absolute value of the square of z +} quat_p_extremal_maximal_order_t; + +/** @brief Type for represent integer parameters + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + */ +typedef struct quat_represent_integer_params +{ + int primality_test_iterations; ///< Primality test iterations + const quat_p_extremal_maximal_order_t *order; ///< The standard extremal maximal order + const quat_alg_t *algebra; ///< The quaternion algebra +} quat_represent_integer_params_t; + +/*************************** Functions *****************************/ + +/** @defgroup quat_c Constructors and Destructors + * @{ + */ +void quat_alg_init_set(quat_alg_t *alg, const ibz_t *p); +void quat_alg_finalize(quat_alg_t *alg); + +void quat_alg_elem_init(quat_alg_elem_t *elem); +void quat_alg_elem_finalize(quat_alg_elem_t *elem); + +void ibz_vec_2_init(ibz_vec_2_t *vec); +void ibz_vec_2_finalize(ibz_vec_2_t *vec); + +void ibz_vec_4_init(ibz_vec_4_t *vec); +void ibz_vec_4_finalize(ibz_vec_4_t *vec); + +void ibz_mat_2x2_init(ibz_mat_2x2_t *mat); +void ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat); + +void ibz_mat_4x4_init(ibz_mat_4x4_t *mat); +void ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat); + +void quat_lattice_init(quat_lattice_t *lat); +void quat_lattice_finalize(quat_lattice_t *lat); + +void quat_left_ideal_init(quat_left_ideal_t *lideal); +void quat_left_ideal_finalize(quat_left_ideal_t *lideal); +/** @} + */ + +/** @defgroup quat_printers Print functions for types from the quaternion module + * @{ + */ +void ibz_mat_2x2_print(const ibz_mat_2x2_t *mat); +void ibz_mat_4x4_print(const ibz_mat_4x4_t *mat); +void ibz_vec_2_print(const ibz_vec_2_t *vec); +void ibz_vec_4_print(const ibz_vec_4_t *vec); + +void quat_lattice_print(const quat_lattice_t *lat); +void quat_alg_print(const quat_alg_t *alg); +void quat_alg_elem_print(const quat_alg_elem_t *elem); +void quat_left_ideal_print(const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @defgroup quat_int Integer functions for quaternion algebra + * @{ + */ + +/** @defgroup quat_int_mat Integer matrix and vector functions + * @{ + */ + +/** @brief Copy matrix + * + * @param copy Output: Matrix into which copied will be copied + * @param copied + */ +void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied); + +/** + * @brief Inverse of 2x2 integer matrices modulo m + * + * @param inv Output matrix + * @param mat Input matrix + * @param m Integer modulo + * @return 1 if inverse exists 0 otherwise + */ +int ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m); + +/** @brief mat*vec in dimension 2 for integers + * + * @param res Output vector + * @param mat Input vector + * @param vec Input vector + */ +void ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, + const ibz_mat_4x4_t *mat); // dim4, lattice, test/dim4, ideal + +/** @brief transpose a 4x4 integer matrix + * + * @param transposed Output: is set to the transposition of mat + * @param mat Input matrix + */ +void ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat); + +/** @brief a*b for a,b integer 4x4 matrices + * + * Naive implementation + * + * @param res Output: A 4x4 integer matrix + * @param a + * @param b + */ +void ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b); + +/** @brief divides all values in matrix by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param mat + */ +int ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** + * @brief mat*vec + * + * + * @param res Output: coordinate vector + * @param mat Integer 4x4 matrix + * @param vec Integer vector (coordinate vector) + * + * Multiplies 4x4 integer matrix mat by a 4-integers column vector vec + */ +void ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec); + +/** + * @brief vec*mat + * + * + * @param res Output: coordinate vector. + * @param vec Integer vector (coordinate vector) + * @param mat Integer 4x4 matrix + * + * Multiplies 4x4 integer matrix mat by a 4-integers row vector vec (on the left) + */ +void ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @defgroup quat_integer Higher-level integer functions for quaternion algebra + * @{ + */ + +/** + * @brief Generates a random prime + * + * A number is accepted as prime if it passes a 30-round Miller-Rabin test. + * This function is fairly inefficient and mostly meant for tests. + * + * @returns 1 if a prime is found, 0 otherwise + * @param p Output: The prime (if found) + * @param is3mod4 If 1, the prime is required to be 3 mod 4, if 0 no congruence condition is imposed + * @param bitsize Maximal size of output prime + * @param probability_test_iterations Miller-Rabin iteartions for probabilistic primality testing in + * rejection sampling + */ +int ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations); + +/** + * @brief Find integers x and y such that x^2 + n*y^2 = p + * + * Uses Cornacchia's algorithm, should be used only for prime p + * + * @param x Output + * @param y Output + * @param n first parameter defining the equation + * @param p seond parameter defining the equation, must be prime + * @return 1 if success, 0 otherwise + */ +int ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p); + +/** @} + */ + +/** @defgroup quat_qf Quadratic form functions + * @{ + */ + +/** + * @brief Quadratic form evaluation + * + * qf and coord must be represented in the same basis. + * + * @param res Output: coordinate vector + * @param qf Quadratic form (4x4 integer matrix) + * @param coord Integer vector (coordinate vector) + */ +void quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord); +/** @} + */ + +/** @} + */ + +/** @defgroup quat_quat_f Quaternion algebra functions + * @{ + */ +/** + * @brief Copies an algebra element + * + * @param copy Output: The element into which another one is copied + * @param copied Source element copied into copy + */ +void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied); + +void quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg); + +/** @brief reduced norm of alg_elem x + * + * @param res_num Output: rational which will contain the numerator of the reduced norm of a + * @param res_denom Output: rational which will contain the denominator of the reduced norm of a (it + * is 1 if the norm is integer) + * @param x Algebra element whose norm is computed + * @param alg The quaternion algebra + */ +void quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *x, const quat_alg_t *alg); + +/** @brief Normalize representation of alg_elem x + * + * @param x Algebra element whose representation will be normalized + * + * Modification of x. + * Sets coord and denom of x so that gcd(denom, content(coord))=1 + * without changing the value of x = (coord0/denom, coord1/denom, coord2/denom, coord3/denom). + */ +void quat_alg_normalize(quat_alg_elem_t *x); + +/** + * @brief Standard involution in a quaternion algebra + * + * @param conj Output: image of x by standard involution of the quaternion algebra alg + * @param x element of alg whose image is searched + */ +void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x); + +/** + * @brief Given `x` ∈ `order`, factor it into its primitive and impritive parts + * + * Given `x` ∈ `order`, return a coordinate vector `primitive_x` and an integer `content` + * such that `x` = `content` · Λ `primitive_x`, where Λ is the basis of `order` + * and `x` / `content` is primitive in `order`. + * + * @param primitive_x Output: coordinates of a primitive element of `order` (in `order`'s basis) + * @param content Output: content of `x`'s coordinate vector in order's basis + * @param order order of `alg` + * @param x element of order, must be in `order` + */ +void quat_alg_make_primitive(ibz_vec_4_t *primitive_x, + ibz_t *content, + const quat_alg_elem_t *x, + const quat_lattice_t *order); + +// end quat_quat_f +/** @} + */ + +/** @defgroup quat_lat_f Lattice functions + * @{ + */ + +void quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2); + +/** + * @brief Test whether x ∈ lat. If so, compute its coordinates in lat's basis. + * + * @param coord Output: Set to the coordinates of x in lat. May be NULL. + * @param lat The lattice, not necessarily in HNF but full rank + * @param x An element of the quaternion algebra + * @return true if x ∈ lat + */ +int quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x); + +/** + * @brief Conjugate of a lattice with basis not in HNF + * + * @param conj Output: The lattice conjugate to lat. ATTENTION: is not under HNF + * @param lat Input lattice + */ +void quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat); + +/** + * @brief Multiply a lattice and an algebra element + * + * The element is multiplied to the right of the lattice + * + * @param prod Output: Lattice lat*elem + * @param lat Input lattice + * @param elem Algebra element + * @param alg The quaternion algebra + */ +void quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg); // ideal + +/** + * @brief Sample from the intersection of a lattice with a ball + * + * Sample a uniform non-zero vector of norm ≤ `radius` from the lattice. + * + * @param res Output: sampled quaternion from the lattice + * @param lattice Input lattice + * @param alg The quaternion algebra + * @param radius The ball radius (quaternion norm) + * @return 0 if an error occurred (ball too small or RNG error), 1 otherwise + */ +int quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius); + +// end quat_lat_f +/** @} + */ + +/** @defgroup quat_lideal_f Functions for left ideals + * @{ + */ + +/** @defgroup quat_lideal_c Creating left ideals + * @{ + */ + +/** + * @brief Left ideal of order, generated by x and N as order*x+order*N + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element. Must be non-zero + * @param N generating integer + * + * Creates the left ideal in order generated by the element x and the integer N. + * If x is not divisible (inside the order) by any integer divisor n>1 of N, + * then the norm of the output ideal is N. + * + */ +void quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg); + +/** @} + */ + +/** @defgroup quat_lideal_gen Generators of left ideals + * @{ + */ + +/** + * @brief Generator of 'lideal' + * + * @returns 1 if such a generator was found, 0 otherwise + * @param gen Output: non scalar generator of lideal + * @param lideal left ideal + * @param alg the quaternion algebra + * + * Ideal is generated by gen and the ideal's norm + * + * Bound has as default value QUATERNION_lideal_generator_search_bound + */ +int quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg); +/** @} + */ + +/** @defgroup quat_lideal_op Operations on left ideals + * @{ + */ + +/** + * @brief Copies an ideal + * + * @param copy Output: The ideal into which another one is copied + * @param copied Source ideal copied into copy. The parent order is not copied (only the pointer). + */ +void quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied); + +/** + * @brief Conjugate of a left ideal (not in HNF) + * + * @param conj Output: Ideal conjugate to lideal, with norm and parent order correctly set, but its + * lattice not in HNF + * @param new_parent_order Output: Will be set to the right order of lideal, and serve as parent + * order for conj (so must have at least the lifetime of conj) + * @param lideal input left ideal (of which conj will be the conjugate) + * @param alg the quaternion algebra + */ +void quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); + +/** + * @brief Intersection of two left ideals + * + * @param intersection Output: Left ideal which is the intersection of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_inter(quat_left_ideal_t *intersection, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief L2-reduce the basis of the left ideal, without considering its denominator + * + * This function reduce the basis of the lattice of the ideal, but it does completely ignore its + * denominator. So the outputs of this function must still e divided by the appropriate power of + * lideal.lattice.denom. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param reduced Output: Lattice defining the ideal, which has its basis in a lll-reduced form. + * Must be divided by lideal.lattice.denom before usage + * @param gram Output: Matrix of the quadratic form given by the norm on the basis of the reduced + * ideal, divided by the norm of the ideal + * @param lideal ideal whose basis will be reduced + * @param alg the quaternion algebra + */ +void quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // replaces lideal_lll + +/** + * @brief Multplies two ideals and L2-reduces the lattice of the result + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param prod Output: The product ideal with its lattice basis being L2-reduced + * @param gram Output: Gram matrix of the reduced norm (as quadratic but not bilinear form) on the + * basis of prod, divided by the norm of prod + * @param lideal1 Ideal at left in the product + * @param lideal2 Ideal at right in the product + * @param alg The quaternion algebra + */ +void quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Replaces an ideal by a smaller equivalent one of prime norm + * + * @returns 1 if the computation succeeded and 0 otherwise + * @param lideal In- and Output: Ideal to be replaced + * @param alg The quaternion algebra + * @param primality_num_iter number of repetition for primality testing + * @param equiv_bound_coeff bound on the coefficients for the candidates + */ +int quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff); + +/** @} + */ + +// end quat_lideal_f +/** @} + */ + +/** @defgroup quat_normeq Functions specific to special extremal maximal orders + * @{ + */ + +/** + * @brief Representing an integer by the quadratic norm form of a maximal extremal order + * + * @returns 1 if the computation succeeded + * @param gamma Output: a quaternion element + * @param n_gamma Target norm of gamma. n_gamma must be odd. If n_gamma/(p*params.order->q) < + * 2^QUAT_repres_bound_input failure is likely + * @param non_diag If set to 1 (instead of 0) and the order is O0, an additional property is ensured + * @param params Represent integer parameters specifying the algebra, the special extremal order, + * the number of trials for finding gamma and the number of iterations of the primality test. + * Special requirements apply if non-diag is set to 1 + * + * This algorithm finds a primitive quaternion element gamma of n_gamma inside any maximal extremal + * order. Failure is possible. Most efficient for the standard order. + * + * If non-diag is set to 1,this algorithm finds a primitive quaternion element gamma with some + * special properties used in fixed degree isogeny of n_gamma inside any maximal extremal order such + * that params->order->q=1 mod 4. Failure is possible. Most efficient for the standard order. The + * most important property is to avoid diagonal isogenies, meaning that the gamma returned by the + * algorithm must not be contained inside ZZ + 2 O where O is the maximal order params->order When O + * is the special order O0 corresponding to j=1728, we further need to avoid endomorphisms of E0xE0 + * and there is another requirement + * + * If non-diag is set to 1, the number of trials for finding gamma (in params), the number of + * iterations of the primality test and the value of params->order->q is required to be 1 mod 4 + */ +int quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params); + +/** @brief Basis change to (1,i,(i+j)/2,(1+ij)/2) for elements of O0 + * + * Change the basis in which an element is give from 1,i,j,ij to (1,i,(i+j)/2,(1+ij)/2) the ususal + * basis of the special maximal order O0 Only for elements of O0 + * + * @param vec Output: Coordinates of el in basis (1,i,(i+j)/2,(1+ij)/2) + * @param el Imput: An algebra element in O0 + */ +void quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el); + +/** + * @brief Random O0-ideal of given norm + * + * Much faster if norm is prime and is_prime is set to 1 + * + * @param lideal Output: O0-ideal of norm norm + * @param norm Norm of the ideal to be found + * @param is_prime Indicates if norm is prime: 1 if it is, 0 otherwise + * @param params Represent Integer parameters from the level-dependent constants + * @param prime_cofactor Prime distinct from the prime p defining the algebra but of similar size + * and coprime to norm. If is_prime is 1, it might be NULL. + * @returns 1 if success, 0 if no ideal found or randomness failed + */ +int quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor); +// end quat_normeq +/** @} + */ +// end quat_quat +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_constants.h new file mode 100644 index 0000000000..5dca7d7cd4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_constants.h @@ -0,0 +1,6 @@ +#include +#define QUAT_primality_num_iter 32 +#define QUAT_repres_bound_input 20 +#define QUAT_equiv_bound_coeff 64 +#define FINDUV_box_size 2 +#define FINDUV_cube_size 624 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c new file mode 100644 index 0000000000..baf3da0059 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c @@ -0,0 +1,3176 @@ +#include +#include +#include +const ibz_t QUAT_prime_cofactor = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x8000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x800000000000000}}} +#endif +; +const quat_alg_t QUATALG_PINFTY = { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x4ff}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x4ffffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x4ffffffffffffff}}} +#endif +}; +const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 1}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x80000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 5}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3f47,0x7060,0x5e29,0x3e35,0xd950,0x2a1b,0x10ae,0x78dd,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x70603f47,0x3e355e29,0x2a1bd950,0x78dd10ae,0x0,0x0,0x0,0x2800000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3e355e2970603f47,0x78dd10ae2a1bd950,0x0,0x280000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3fe7,0x28ee,0x26e8,0xb194,0x6d7a,0xaf58,0xe568,0xd6d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x28ee3fe7,0xb19426e8,0xaf586d7a,0xd6de568}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb19426e828ee3fe7,0xd6de568af586d7a}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 17}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x954f,0x6bc9,0xca46,0x3d25,0x431b,0x46ed,0x8229,0x4f5,0xe453,0x6eb3,0x4530,0xeb3e,0x5306,0xb3e4,0x306e,0x45}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6bc9954f,0x3d25ca46,0x46ed431b,0x4f58229,0x6eb3e453,0xeb3e4530,0xb3e45306,0x45306e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3d25ca466bc9954f,0x4f5822946ed431b,0xeb3e45306eb3e453,0x45306eb3e45306}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7f,0xca3a,0x2454,0xbd31,0xe562,0xcb4c,0x72f0,0x21}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xca3a0e7f,0xbd312454,0xcb4ce562,0x2172f0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbd312454ca3a0e7f,0x2172f0cb4ce562}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 37}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x3a03,0xc406,0x47c,0xa0a2,0x6dbc,0x1df4,0x796,0x6cee,0xce0c,0xe0c7,0xc7c,0xc7ce,0x7ce0,0xce0c,0xe0c7,0x7c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xc4063a03,0xa0a2047c,0x1df46dbc,0x6cee0796,0xe0c7ce0c,0xc7ce0c7c,0xce0c7ce0,0x7ce0c7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa0a2047cc4063a03,0x6cee07961df46dbc,0xc7ce0c7ce0c7ce0c,0x7ce0c7ce0c7ce0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x188f,0xa1e2,0x2148,0xd9f8,0x2e79,0x1a07,0xe1b2,0xd6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa1e2188f,0xd9f82148,0x1a072e79,0xd6e1b2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xd9f82148a1e2188f,0xd6e1b21a072e79}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 41}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca33,0x3dd0,0x1d92,0x9f0,0x2f81,0xafe9,0xe395,0x83f7,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x27f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3dd0ca33,0x9f01d92,0xafe92f81,0x83f7e395,0xfffffffc,0xffffffff,0xffffffff,0x27fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9f01d923dd0ca33,0x83f7e395afe92f81,0xfffffffffffffffc,0x27fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb73,0xf93c,0x71c0,0x87f5,0x667a,0xcb3c,0xb9cb,0x12fa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf93ceb73,0x87f571c0,0xcb3c667a,0x12fab9cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x87f571c0f93ceb73,0x12fab9cbcb3c667a}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 53}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf0ab,0x9d3b,0x6ea,0x84ac,0x62e5,0xdde9,0x882b,0xd021,0xffe2,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x13ff}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d3bf0ab,0x84ac06ea,0xdde962e5,0xd021882b,0xffffffe2,0xffffffff,0xffffffff,0x13ffffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x84ac06ea9d3bf0ab,0xd021882bdde962e5,0xffffffffffffffe2,0x13ffffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f37,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1f37,0x77013f1,0x56007183,0x9281da31}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1f37,0x9281da3156007183}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 97}}; +const quat_left_ideal_t CONNECTING_IDEALS[7] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x5000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x50000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x5000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x3000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x30000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x3000000000000000}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfee5,0x2b,0xd6d8,0xe65c,0x68a3,0xe72d,0x373d,0x5b1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2bfee5,0xe65cd6d8,0xe72d68a3,0x5b1373d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe65cd6d8002bfee5,0x5b1373de72d68a3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf719,0x8647,0x3ea3,0x9933,0x6a21,0xe8de,0x6f08,0x7343}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8647f719,0x99333ea3,0xe8de6a21,0x73436f08}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x99333ea38647f719,0x73436f08e8de6a21}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfaff,0xc339,0xabd,0xbfc8,0xe962,0x6805,0x5323,0x3c7a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc339faff,0xbfc80abd,0x6805e962,0x3c7a5323}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbfc80abdc339faff,0x3c7a53236805e962}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8597,0x3af7,0xa5a,0xbb29,0x77c0,0xd2d9,0xf561,0x84f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3af78597,0xbb290a5a,0xd2d977c0,0x84ff561}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbb290a5a3af78597,0x84ff561d2d977c0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x604b,0x3c1e,0x9e8c,0x8146,0x18b7,0xb452,0xa68a,0xf44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3c1e604b,0x81469e8c,0xb45218b7,0xf44a68a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x81469e8c3c1e604b,0xf44a68ab45218b7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x519b,0xa90b,0xcdca,0xd5f5,0x757a,0x83dd,0xb354,0xe59}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa90b519b,0xd5f5cdca,0x83dd757a,0xe59b354}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd5f5cdcaa90b519b,0xe59b35483dd757a}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e07,0xc4e3,0xf746,0x83d,0x5354,0x44c1,0x9c43,0x1f9f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc4e35e07,0x83df746,0x44c15354,0x1f9f9c43}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x83df746c4e35e07,0x1f9f9c4344c15354}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdbd3,0x967a,0x8a96,0x1df4,0x7845,0xd70,0x419a,0x222}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x967adbd3,0x1df48a96,0xd707845,0x222419a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1df48a96967adbd3,0x222419a0d707845}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e1f,0xbf19,0x63e0,0x34ae,0x7c14,0x3859,0xdfed,0xb125}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbf193e1f,0x34ae63e0,0x38597c14,0xb125dfed}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x34ae63e0bf193e1f,0xb125dfed38597c14}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcf9,0xaaca,0x773b,0xa951,0xfa2c,0xa2e4,0x10c3,0x59a4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaaca0cf9,0xa951773b,0xa2e4fa2c,0x59a410c3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xa951773baaca0cf9,0x59a410c3a2e4fa2c}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x275,0xd7ab,0xedeb,0xbc67,0xad41,0xaeb5,0xf2e5,0x148e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd7ab0275,0xbc67edeb,0xaeb5ad41,0x148ef2e5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbc67edebd7ab0275,0x148ef2e5aeb5ad41}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa7c5,0x9024,0x7ceb,0x13c9,0x59c0,0x3d14,0xe56d,0x1507}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9024a7c5,0x13c97ceb,0x3d1459c0,0x1507e56d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x13c97ceb9024a7c5,0x1507e56d3d1459c0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd51d,0xb3e7,0xb56b,0xe818,0x380,0x75e5,0x6c29,0x14cb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3e7d51d,0xe818b56b,0x75e50380,0x14cb6c29}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe818b56bb3e7d51d,0x14cb6c2975e50380}}} +#endif +, &MAXORD_O0}}; +const quat_alg_elem_t CONJUGATING_ELEMENTS[7] = {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#endif +}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.h new file mode 100644 index 0000000000..a5eb1106e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.h @@ -0,0 +1,12 @@ +#include +#define MAXORD_O0 (EXTREMAL_ORDERS->order) +#define STANDARD_EXTREMAL_ORDER (EXTREMAL_ORDERS[0]) +#define NUM_ALTERNATE_EXTREMAL_ORDERS 6 +#define ALTERNATE_EXTREMAL_ORDERS (EXTREMAL_ORDERS+1) +#define ALTERNATE_CONNECTING_IDEALS (CONNECTING_IDEALS+1) +#define ALTERNATE_CONJUGATING_ELEMENTS (CONJUGATING_ELEMENTS+1) +extern const ibz_t QUAT_prime_cofactor; +extern const quat_alg_t QUATALG_PINFTY; +extern const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7]; +extern const quat_left_ideal_t CONNECTING_IDEALS[7]; +extern const quat_alg_elem_t CONJUGATING_ELEMENTS[7]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_arm64crypto.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_arm64crypto.h new file mode 100644 index 0000000000..88c4bf48d0 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_arm64crypto.h @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef RANDOMBYTES_ARM64CRYPTO_H +#define RANDOMBYTES_ARM64CRYPTO_H + +#include + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +typedef struct { + unsigned char buffer[16]; + int buffer_pos; + unsigned long length_remaining; + unsigned char key[32]; + unsigned char ctr[16]; +} AES_XOF_struct; + +typedef struct { + unsigned char Key[32]; + unsigned char V[16]; + int reseed_counter; +} AES256_CTR_DRBG_struct; + +#endif /* RANDOMBYTES_ARM64CRYPTO_H */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c new file mode 100644 index 0000000000..3fc67acfb6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 and Unknown +// +/* +NIST-developed software is provided by NIST as a public service. You may use, +copy, and distribute copies of the software in any medium, provided that you +keep intact this entire notice. You may improve, modify, and create derivative +works of the software or any portion of the software, and you may copy and +distribute such modifications or works. Modified works should carry a notice +stating that you changed the software and should note the date and nature of any +such change. Please explicitly acknowledge the National Institute of Standards +and Technology as the source of the software. + +NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF +ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, +WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS +NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR +ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE +ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, +INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR +USEFULNESS OF THE SOFTWARE. + +You are solely responsible for determining the appropriateness of using and +distributing the software and you assume all risks associated with its use, +including but not limited to the risks and costs of program errors, compliance +with applicable laws, damage to or loss of data, programs or equipment, and the +unavailability or interruption of operation. This software is not intended to be +used in any situation where a failure could cause risk of injury or damage to +property. The software developed by NIST employees is not subject to copyright +protection within the United States. +*/ + +#include + +#include +#include "ctr_drbg.h" + +#ifdef ENABLE_CT_TESTING +#include +#endif + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +CTR_DRBG_STATE drbg; + +#ifndef CTRDRBG_TEST_BENCH +static +#endif +void +randombytes_init_aes_ni(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + (void)security_strength; // fixed to 256 + CTR_DRBG_init(&drbg, entropy_input, personalization_string, + (personalization_string == NULL) ? 0 : CTR_DRBG_ENTROPY_LEN); +} + +#ifndef CTRDRBG_TEST_BENCH +static +#endif +int +randombytes_aes_ni(unsigned char *x, size_t xlen) { + CTR_DRBG_generate(&drbg, x, xlen, NULL, 0); + return RNG_SUCCESS; +} + +#ifdef RANDOMBYTES_AES_NI +SQISIGN_API +int randombytes(unsigned char *random_array, unsigned long long nbytes) { + int ret = randombytes_aes_ni(random_array, nbytes); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); +#endif + return ret; +} + +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + randombytes_init_aes_ni(entropy_input, personalization_string, + security_strength); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c new file mode 100644 index 0000000000..689c29b242 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: MIT + +/* +The MIT License +Copyright (c) 2017 Daan Sprenkels +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +// In the case that are compiling on linux, we need to define _GNU_SOURCE +// *before* randombytes.h is included. Otherwise SYS_getrandom will not be +// declared. +#if defined(__linux__) || defined(__GNU__) +#define _GNU_SOURCE +#endif /* defined(__linux__) || defined(__GNU__) */ + +#if defined(_WIN32) +/* Windows */ +#include +#include /* CryptAcquireContext, CryptGenRandom */ +#endif /* defined(_WIN32) */ + +/* wasi */ +#if defined(__wasi__) +#include +#endif + +/* kFreeBSD */ +#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) +#define GNU_KFREEBSD +#endif + +#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +/* Linux */ +// We would need to include , but not every target has access +// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. +// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the +// linux repo. +#define RNDGETENTCNT 0x80045200 + +#include +#include +#include +#include +#include +#include +#include +#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ + ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) +#define USE_GLIBC +#include +#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ + (__GLIBC_MINOR__ > 24)) */ +#include +#include +#include +#include + +// We need SSIZE_MAX as the maximum read len from /dev/urandom +#if !defined(SSIZE_MAX) +#define SSIZE_MAX (SIZE_MAX / 2 - 1) +#endif /* defined(SSIZE_MAX) */ + +#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ + +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ +#include +#if defined(BSD) +#include +#endif +/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ +#if defined(__GNU__) +#undef BSD +#endif +#endif + +#if defined(__EMSCRIPTEN__) +#include +#include +#include +#include +#endif /* defined(__EMSCRIPTEN__) */ + +#if defined(_WIN32) +static int +randombytes_win32_randombytes(void *buf, size_t n) +{ + HCRYPTPROV ctx; + BOOL tmp; + DWORD to_read = 0; + const size_t MAX_DWORD = 0xFFFFFFFF; + + tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); + if (tmp == FALSE) + return -1; + + while (n > 0) { + to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); + tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); + if (tmp == FALSE) + return -1; + buf = ((char *)buf) + to_read; + n -= to_read; + } + + tmp = CryptReleaseContext(ctx, 0); + if (tmp == FALSE) + return -1; + + return 0; +} +#endif /* defined(_WIN32) */ + +#if defined(__wasi__) +static int +randombytes_wasi_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(__wasi__) */ + +#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) +#if defined(USE_GLIBC) +// getrandom is declared in glibc. +#elif defined(SYS_getrandom) +static ssize_t +getrandom(void *buf, size_t buflen, unsigned int flags) +{ + return syscall(SYS_getrandom, buf, buflen, flags); +} +#endif + +static int +randombytes_linux_randombytes_getrandom(void *buf, size_t n) +{ + /* I have thought about using a separate PRF, seeded by getrandom, but + * it turns out that the performance of getrandom is good enough + * (250 MB/s on my laptop). + */ + size_t offset = 0, chunk; + int ret; + while (n > 0) { + /* getrandom does not allow chunks larger than 33554431 */ + chunk = n <= 33554431 ? n : 33554431; + do { + ret = getrandom((char *)buf + offset, chunk, 0); + } while (ret == -1 && errno == EINTR); + if (ret < 0) + return ret; + offset += ret; + n -= ret; + } + assert(n == 0); + return 0; +} +#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ + defined(SYS_getrandom)) */ + +#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) + +#if defined(__linux__) +static int +randombytes_linux_read_entropy_ioctl(int device, int *entropy) +{ + return ioctl(device, RNDGETENTCNT, entropy); +} + +static int +randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) +{ + int retcode; + do { + rewind(stream); + retcode = fscanf(stream, "%d", entropy); + } while (retcode != 1 && errno == EINTR); + if (retcode != 1) { + return -1; + } + return 0; +} + +static int +randombytes_linux_wait_for_entropy(int device) +{ + /* We will block on /dev/random, because any increase in the OS' entropy + * level will unblock the request. I use poll here (as does libsodium), + * because we don't *actually* want to read from the device. */ + enum + { + IOCTL, + PROC + } strategy = IOCTL; + const int bits = 128; + struct pollfd pfd; + int fd; + FILE *proc_file; + int retcode, retcode_error = 0; // Used as return codes throughout this function + int entropy = 0; + + /* If the device has enough entropy already, we will want to return early */ + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + // printf("errno: %d (%s)\n", errno, strerror(errno)); + if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { + // The ioctl call on /dev/urandom has failed due to a + // - ENOTTY (unsupported action), or + // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). + // + // We will fall back to reading from + // `/proc/sys/kernel/random/entropy_avail`. This less ideal, + // because it allocates a file descriptor, and it may not work + // in a chroot. But at this point it seems we have no better + // options left. + strategy = PROC; + // Open the entropy count file + proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); + if (proc_file == NULL) { + return -1; + } + } else if (retcode != 0) { + // Unrecoverable ioctl error + return -1; + } + if (entropy >= bits) { + return 0; + } + + do { + fd = open("/dev/random", O_RDONLY); + } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ + if (fd == -1) { + /* Unrecoverable IO error */ + return -1; + } + + pfd.fd = fd; + pfd.events = POLLIN; + for (;;) { + retcode = poll(&pfd, 1, -1); + if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { + continue; + } else if (retcode == 1) { + if (strategy == IOCTL) { + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + } else if (strategy == PROC) { + retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); + } else { + return -1; // Unreachable + } + + if (retcode != 0) { + // Unrecoverable I/O error + retcode_error = retcode; + break; + } + if (entropy >= bits) { + break; + } + } else { + // Unreachable: poll() should only return -1 or 1 + retcode_error = -1; + break; + } + } + do { + retcode = close(fd); + } while (retcode == -1 && errno == EINTR); + if (strategy == PROC) { + do { + retcode = fclose(proc_file); + } while (retcode == -1 && errno == EINTR); + } + if (retcode_error != 0) { + return retcode_error; + } + return retcode; +} +#endif /* defined(__linux__) */ + +static int +randombytes_linux_randombytes_urandom(void *buf, size_t n) +{ + int fd; + size_t offset = 0, count; + ssize_t tmp; + do { + fd = open("/dev/urandom", O_RDONLY); + } while (fd == -1 && errno == EINTR); + if (fd == -1) + return -1; +#if defined(__linux__) + if (randombytes_linux_wait_for_entropy(fd) == -1) + return -1; +#endif + + while (n > 0) { + count = n <= SSIZE_MAX ? n : SSIZE_MAX; + tmp = read(fd, (char *)buf + offset, count); + if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { + continue; + } + if (tmp == -1) + return -1; /* Unrecoverable IO error */ + offset += tmp; + n -= tmp; + } + close(fd); + assert(n == 0); + return 0; +} +#endif /* defined(__linux__) && !defined(SYS_getrandom) */ + +#if defined(BSD) +static int +randombytes_bsd_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(BSD) */ + +#if defined(__EMSCRIPTEN__) +static int +randombytes_js_randombytes_nodejs(void *buf, size_t n) +{ + const int ret = EM_ASM_INT( + { + var crypto; + try { + crypto = require('crypto'); + } catch (error) { + return -2; + } + try { + writeArrayToMemory(crypto.randomBytes($1), $0); + return 0; + } catch (error) { + return -1; + } + }, + buf, + n); + switch (ret) { + case 0: + return 0; + case -1: + errno = EINVAL; + return -1; + case -2: + errno = ENOSYS; + return -1; + } + assert(false); // Unreachable +} +#endif /* defined(__EMSCRIPTEN__) */ + +SQISIGN_API +int +randombytes_select(unsigned char *buf, unsigned long long n) +{ +#if defined(__EMSCRIPTEN__) + return randombytes_js_randombytes_nodejs(buf, n); +#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +#if defined(USE_GLIBC) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#elif defined(SYS_getrandom) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#else + /* When we have enough entropy, we can read from /dev/urandom */ + return randombytes_linux_randombytes_urandom(buf, n); +#endif +#elif defined(BSD) + /* Use arc4random system call */ + return randombytes_bsd_randombytes(buf, n); +#elif defined(_WIN32) + /* Use windows API */ + return randombytes_win32_randombytes(buf, n); +#elif defined(__wasi__) + /* Use WASI */ + return randombytes_wasi_randombytes(buf, n); +#else +#error "randombytes(...) is not supported on this platform" +#endif +} + +#ifdef RANDOMBYTES_SYSTEM +SQISIGN_API +int +randombytes(unsigned char *x, unsigned long long xlen) +{ + + int ret = randombytes_select(x, (size_t)xlen); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); +#endif + return ret; +} + +SQISIGN_API +void +randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) +{ + (void)entropy_input; + (void)personalization_string; + (void)security_strength; +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h new file mode 100644 index 0000000000..0a9ca0e465 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef rng_h +#define rng_h + +#include + +/** + * Randombytes initialization. + * Initialization may be needed for some random number generators (e.g. CTR-DRBG). + * + * @param[in] entropy_input 48 bytes entropy input + * @param[in] personalization_string Personalization string + * @param[in] security_strength Security string + */ +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength); + +/** + * Random byte generation using /dev/urandom. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes_select(unsigned char *x, unsigned long long xlen); + +/** + * Random byte generation. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes(unsigned char *x, unsigned long long xlen); + +#endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sig.h new file mode 100644 index 0000000000..4c33510084 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sig.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef SQISIGN_H +#define SQISIGN_H + +#include +#include + +#if defined(ENABLE_SIGN) +/** + * SQIsign keypair generation. + * + * The implementation corresponds to SQIsign.CompactKeyGen() in the SQIsign spec. + * The caller is responsible to allocate sufficient memory to hold pk and sk. + * + * @param[out] pk SQIsign public key + * @param[out] sk SQIsign secret key + * @return int status code + */ +SQISIGN_API +int sqisign_keypair(unsigned char *pk, unsigned char *sk); + +/** + * SQIsign signature generation. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] sm Signature concatenated with message + * @param[out] smlen Pointer to the length of sm + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); +#endif + +/** + * SQIsign open signature. + * + * The implementation performs SQIsign.verify(). If the signature verification succeeded, the + * original message is stored in m. Keys provided is a compact public key. The caller is responsible + * to allocate sufficient memory to hold m. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sm Signature concatenated with message + * @param[in] smlen Length of sm + * @param[in] pk Compacted public key + * @return int status code + */ +SQISIGN_API +int sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk); + +/** + * SQIsign verify signature. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sign.c new file mode 100644 index 0000000000..9216bbe4d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sign.c @@ -0,0 +1,634 @@ +#include +#include +#include +#include +#include +#include + +// compute the commitment with ideal to isogeny clapotis +// and apply it to the basis of E0 (together with the multiplication by some scalar u) +static bool +commit(ec_curve_t *E_com, ec_basis_t *basis_even_com, quat_left_ideal_t *lideal_com) +{ + + bool found = false; + + found = quat_sampling_random_ideal_O0_given_norm(lideal_com, &COM_DEGREE, 1, &QUAT_represent_integer_params, NULL); + // replacing it with a shorter prime norm equivalent ideal + found = found && quat_lideal_prime_norm_reduced_equivalent( + lideal_com, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + // ideal to isogeny clapotis + found = found && dim2id2iso_arbitrary_isogeny_evaluation(basis_even_com, E_com, lideal_com); + return found; +} + +static void +compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const signature_t *sig, const secret_key_t *sk) +{ + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge + // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the + // 2^TORSION_EVEN_POWER torsion of EA + ibz_set(&vec[0], 1); + ibz_copy_digit_array(&vec[1], sig->chall_coeff); + + // now we compute the ideal associated to the challenge + // for that, we need to find vec such that + // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // is the image through the secret key isogeny of the canonical basis E0 + ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); + + // lideal_chall_two is the pullback of the ideal challenge through the secret key ideal + id2iso_kernel_dlogs_to_ideal_even(lideal_chall_two, &vec, TORSION_EVEN_POWER); + assert(ibz_cmp(&lideal_chall_two->norm, &TORSION_PLUS_2POWER) == 0); + + ibz_vec_2_finalize(&vec); +} + +static void +sample_response(quat_alg_elem_t *x, const quat_lattice_t *lattice, const ibz_t *lattice_content) +{ + ibz_t bound; + ibz_init(&bound); + ibz_pow(&bound, &ibz_const_two, SQIsign_response_length); + ibz_sub(&bound, &bound, &ibz_const_one); + ibz_mul(&bound, &bound, lattice_content); + + int ok UNUSED = quat_lattice_sample_from_ball(x, lattice, &QUATALG_PINFTY, &bound); + assert(ok); + + ibz_finalize(&bound); +} + +static void +compute_response_quat_element(quat_alg_elem_t *resp_quat, + ibz_t *lattice_content, + const secret_key_t *sk, + const quat_left_ideal_t *lideal_chall_two, + const quat_left_ideal_t *lideal_commit) +{ + quat_left_ideal_t lideal_chall_secret; + quat_lattice_t lattice_hom_chall_to_com, lat_commit; + + // Init + quat_left_ideal_init(&lideal_chall_secret); + quat_lattice_init(&lat_commit); + quat_lattice_init(&lattice_hom_chall_to_com); + + // lideal_chall_secret = lideal_secret * lideal_chall_two + quat_lideal_inter(&lideal_chall_secret, lideal_chall_two, &(sk->secret_ideal), &QUATALG_PINFTY); + + // now we compute lideal_com_to_chall which is dual(Icom)* lideal_chall_secret + quat_lattice_conjugate_without_hnf(&lat_commit, &(lideal_commit->lattice)); + quat_lattice_intersect(&lattice_hom_chall_to_com, &lideal_chall_secret.lattice, &lat_commit); + + // sampling the smallest response + ibz_mul(lattice_content, &lideal_chall_secret.norm, &lideal_commit->norm); + sample_response(resp_quat, &lattice_hom_chall_to_com, lattice_content); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_secret); + quat_lattice_finalize(&lat_commit); + quat_lattice_finalize(&lattice_hom_chall_to_com); +} + +static void +compute_backtracking_signature(signature_t *sig, quat_alg_elem_t *resp_quat, ibz_t *lattice_content, ibz_t *remain) +{ + uint_fast8_t backtracking; + ibz_t tmp; + ibz_init(&tmp); + + ibz_vec_4_t dummy_coord; + ibz_vec_4_init(&dummy_coord); + + quat_alg_make_primitive(&dummy_coord, &tmp, resp_quat, &MAXORD_O0); + ibz_mul(&resp_quat->denom, &resp_quat->denom, &tmp); + assert(quat_lattice_contains(NULL, &MAXORD_O0, resp_quat)); + + // the backtracking is the common part of the response and the challenge + // its degree is the scalar tmp computed above such that quat_resp is in tmp * O0. + backtracking = ibz_two_adic(&tmp); + sig->backtracking = backtracking; + + ibz_pow(&tmp, &ibz_const_two, backtracking); + ibz_div(lattice_content, remain, lattice_content, &tmp); + + ibz_finalize(&tmp); + ibz_vec_4_finalize(&dummy_coord); +} + +static uint_fast8_t +compute_random_aux_norm_and_helpers(signature_t *sig, + ibz_t *random_aux_norm, + ibz_t *degree_resp_inv, + ibz_t *remain, + const ibz_t *lattice_content, + quat_alg_elem_t *resp_quat, + quat_left_ideal_t *lideal_com_resp, + quat_left_ideal_t *lideal_commit) +{ + uint_fast8_t pow_dim2_deg_resp; + uint_fast8_t exp_diadic_val_full_resp; + + ibz_t tmp, degree_full_resp, degree_odd_resp, norm_d; + + // Init + ibz_init(°ree_full_resp); + ibz_init(°ree_odd_resp); + ibz_init(&norm_d); + ibz_init(&tmp); + + quat_alg_norm(°ree_full_resp, &norm_d, resp_quat, &QUATALG_PINFTY); + + // dividing by n(lideal_com) * n(lideal_secret_chall) + assert(ibz_is_one(&norm_d)); + ibz_div(°ree_full_resp, remain, °ree_full_resp, lattice_content); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); + + // computing the diadic valuation + exp_diadic_val_full_resp = ibz_two_adic(°ree_full_resp); + sig->two_resp_length = exp_diadic_val_full_resp; + + // removing the power of two part + ibz_pow(&tmp, &ibz_const_two, exp_diadic_val_full_resp); + ibz_div(°ree_odd_resp, remain, °ree_full_resp, &tmp); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); +#ifndef NDEBUG + ibz_pow(&tmp, &ibz_const_two, SQIsign_response_length - sig->backtracking); + assert(ibz_cmp(&tmp, °ree_odd_resp) > 0); +#endif + + // creating the ideal + quat_alg_conj(resp_quat, resp_quat); + + // setting the norm + ibz_mul(&tmp, &lideal_commit->norm, °ree_odd_resp); + quat_lideal_create(lideal_com_resp, resp_quat, &tmp, &MAXORD_O0, &QUATALG_PINFTY); + + // now we compute the ideal_aux + // computing the norm + pow_dim2_deg_resp = SQIsign_response_length - exp_diadic_val_full_resp - sig->backtracking; + ibz_pow(remain, &ibz_const_two, pow_dim2_deg_resp); + ibz_sub(random_aux_norm, remain, °ree_odd_resp); + + // multiplying by 2^HD_extra_torsion to account for the fact that + // we use extra torsion above the kernel + for (int i = 0; i < HD_extra_torsion; i++) + ibz_mul(remain, remain, &ibz_const_two); + + ibz_invmod(degree_resp_inv, °ree_odd_resp, remain); + + ibz_finalize(°ree_full_resp); + ibz_finalize(°ree_odd_resp); + ibz_finalize(&norm_d); + ibz_finalize(&tmp); + + return pow_dim2_deg_resp; +} + +static int +evaluate_random_aux_isogeny_signature(ec_curve_t *E_aux, + ec_basis_t *B_aux, + const ibz_t *norm, + const quat_left_ideal_t *lideal_com_resp) +{ + quat_left_ideal_t lideal_aux; + quat_left_ideal_t lideal_aux_resp_com; + + // Init + quat_left_ideal_init(&lideal_aux); + quat_left_ideal_init(&lideal_aux_resp_com); + + // sampling the ideal at random + int found = quat_sampling_random_ideal_O0_given_norm( + &lideal_aux, norm, 0, &QUAT_represent_integer_params, &QUAT_prime_cofactor); + + if (found) { + // pushing forward + quat_lideal_inter(&lideal_aux_resp_com, lideal_com_resp, &lideal_aux, &QUATALG_PINFTY); + + // now we evaluate this isogeny on the basis of E0 + found = dim2id2iso_arbitrary_isogeny_evaluation(B_aux, E_aux, &lideal_aux_resp_com); + + // Clean up + quat_left_ideal_finalize(&lideal_aux_resp_com); + quat_left_ideal_finalize(&lideal_aux); + } + + return found; +} + +static int +compute_dim2_isogeny_challenge(theta_couple_curve_with_basis_t *codomain, + theta_couple_curve_with_basis_t *domain, + const ibz_t *degree_resp_inv, + int pow_dim2_deg_resp, + int exp_diadic_val_full_resp, + int reduced_order) +{ + // now, we compute the isogeny Phi : Ecom x Eaux -> Echl' x Eaux' + // where Echl' is 2^exp_diadic_val_full_resp isogenous to Echal + // ker Phi = <(Bcom_can.P,Baux.P),(Bcom_can.Q,Baux.Q)> + + // preparing the domain + theta_couple_curve_t EcomXEaux; + copy_curve(&EcomXEaux.E1, &domain->E1); + copy_curve(&EcomXEaux.E2, &domain->E2); + + // preparing the kernel + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &domain->B1, &domain->B2); + + // dividing by the degree of the response + digit_t scalar[NWORDS_ORDER]; + ibz_to_digit_array(scalar, degree_resp_inv); + ec_mul(&dim_two_ker.T1.P2, scalar, reduced_order, &dim_two_ker.T1.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T2.P2, scalar, reduced_order, &dim_two_ker.T2.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T1m2.P2, scalar, reduced_order, &dim_two_ker.T1m2.P2, &EcomXEaux.E2); + + // and multiplying by 2^exp_diadic... + double_couple_point_iter(&dim_two_ker.T1, exp_diadic_val_full_resp, &dim_two_ker.T1, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T2, exp_diadic_val_full_resp, &dim_two_ker.T2, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T1m2, exp_diadic_val_full_resp, &dim_two_ker.T1m2, &EcomXEaux); + + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const Tev1 = pushed_points + 0, *const Tev2 = pushed_points + 1, + *const Tev1m2 = pushed_points + 2; + + // Set points on the commitment curve + copy_point(&Tev1->P1, &domain->B1.P); + copy_point(&Tev2->P1, &domain->B1.Q); + copy_point(&Tev1m2->P1, &domain->B1.PmQ); + + // Zero points on the aux curve + ec_point_init(&Tev1->P2); + ec_point_init(&Tev2->P2); + ec_point_init(&Tev1m2->P2); + + theta_couple_curve_t codomain_product; + + // computation of the dim2 isogeny + if (!theta_chain_compute_and_eval_randomized(pow_dim2_deg_resp, + &EcomXEaux, + &dim_two_ker, + true, + &codomain_product, + pushed_points, + sizeof(pushed_points) / sizeof(*pushed_points))) + return 0; + + assert(test_couple_point_order_twof(Tev1, &codomain_product, reduced_order)); + + // Set the auxiliary curve + copy_curve(&codomain->E1, &codomain_product.E2); + + // Set the codomain curve from the dim 2 isogeny + // it should always be the first curve + copy_curve(&codomain->E2, &codomain_product.E1); + + // Set the evaluated basis points + copy_point(&codomain->B1.P, &Tev1->P2); + copy_point(&codomain->B1.Q, &Tev2->P2); + copy_point(&codomain->B1.PmQ, &Tev1m2->P2); + + copy_point(&codomain->B2.P, &Tev1->P1); + copy_point(&codomain->B2.Q, &Tev2->P1); + copy_point(&codomain->B2.PmQ, &Tev1m2->P1); + return 1; +} + +static int +compute_small_chain_isogeny_signature(ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2, + const quat_alg_elem_t *resp_quat, + int pow_dim2_deg_resp, + int length) +{ + int ret = 1; + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec_resp_two; + ibz_vec_2_init(&vec_resp_two); + + quat_left_ideal_t lideal_resp_two; + quat_left_ideal_init(&lideal_resp_two); + + // computing the ideal + ibz_pow(&two_pow, &ibz_const_two, length); + + // we compute the generator of the challenge ideal + quat_lideal_create(&lideal_resp_two, resp_quat, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + // computing the coefficients of the kernel in terms of the basis of O0 + id2iso_ideal_to_kernel_dlogs_even(&vec_resp_two, &lideal_resp_two); + + ec_point_t points[3]; + copy_point(&points[0], &B_chall_2->P); + copy_point(&points[1], &B_chall_2->Q); + copy_point(&points[2], &B_chall_2->PmQ); + + // getting down to the right order and applying the matrix + ec_dbl_iter_basis(B_chall_2, pow_dim2_deg_resp + HD_extra_torsion, B_chall_2, E_chall_2); + assert(test_basis_order_twof(B_chall_2, E_chall_2, length)); + + ec_point_t ker; + // applying the vector to find the kernel + ec_biscalar_mul_ibz_vec(&ker, &vec_resp_two, length, B_chall_2, E_chall_2); + assert(test_point_order_twof(&ker, E_chall_2, length)); + + // computing the isogeny and pushing the points + if (ec_eval_small_chain(E_chall_2, &ker, length, points, 3, true)) { + ret = 0; + } + + // copying the result + copy_point(&B_chall_2->P, &points[0]); + copy_point(&B_chall_2->Q, &points[1]); + copy_point(&B_chall_2->PmQ, &points[2]); + + ibz_finalize(&two_pow); + ibz_vec_2_finalize(&vec_resp_two); + quat_left_ideal_finalize(&lideal_resp_two); + + return ret; +} + +static int +compute_challenge_codomain_signature(const signature_t *sig, + secret_key_t *sk, + ec_curve_t *E_chall, + const ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2) +{ + ec_isog_even_t phi_chall; + ec_basis_t bas_sk; + copy_basis(&bas_sk, &sk->canonical_basis); + + phi_chall.curve = sk->curve; + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + assert(test_basis_order_twof(&bas_sk, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the kernel + { + ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_sk.P, &bas_sk.Q, &bas_sk.PmQ, &sk->curve); + } + assert(test_point_order_twof(&phi_chall.kernel, &sk->curve, TORSION_EVEN_POWER)); + + // Double kernel to get correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &sk->curve); + + assert(test_point_order_twof(&phi_chall.kernel, E_chall, phi_chall.length)); + + // Compute the codomain from challenge isogeny + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + +#ifndef NDEBUG + fp2_t j_chall, j_codomain; + ec_j_inv(&j_codomain, E_chall_2); + ec_j_inv(&j_chall, E_chall); + // apparently its always the second one curve + assert(fp2_is_equal(&j_chall, &j_codomain)); +#endif + + // applying the isomorphism from E_chall_2 to E_chall + ec_isom_t isom; + if (ec_isomorphism(&isom, E_chall_2, E_chall)) + return 0; // error due to a corner case with 1/p probability + ec_iso_eval(&B_chall_2->P, &isom); + ec_iso_eval(&B_chall_2->Q, &isom); + ec_iso_eval(&B_chall_2->PmQ, &isom); + + return 1; +} + +static void +set_aux_curve_signature(signature_t *sig, ec_curve_t *E_aux) +{ + ec_normalize_curve(E_aux); + fp2_copy(&sig->E_aux_A, &E_aux->A); +} + +static void +compute_and_set_basis_change_matrix(signature_t *sig, + const ec_basis_t *B_aux_2, + ec_basis_t *B_chall_2, + ec_curve_t *E_aux_2, + ec_curve_t *E_chall, + int f) +{ + // Matrices for change of bases matrices + ibz_mat_2x2_t mat_Baux2_to_Baux2_can, mat_Bchall_can_to_Bchall; + ibz_mat_2x2_init(&mat_Baux2_to_Baux2_can); + ibz_mat_2x2_init(&mat_Bchall_can_to_Bchall); + + // Compute canonical bases + ec_basis_t B_can_chall, B_aux_2_can; + sig->hint_chall = ec_curve_to_basis_2f_to_hint(&B_can_chall, E_chall, TORSION_EVEN_POWER); + sig->hint_aux = ec_curve_to_basis_2f_to_hint(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(B_aux_2, E_aux_2, f)); + fp2_t w0; + weil(&w0, f, &B_aux_2->P, &B_aux_2->Q, &B_aux_2->PmQ, E_aux_2); + } +#endif + + // compute the matrix to go from B_aux_2 to B_aux_2_can + change_of_basis_matrix_tate_invert(&mat_Baux2_to_Baux2_can, &B_aux_2_can, B_aux_2, E_aux_2, f); + + // apply the change of basis to B_chall_2 + matrix_application_even_basis(B_chall_2, E_chall, &mat_Baux2_to_Baux2_can, f); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_can_chall, E_chall, TORSION_EVEN_POWER)); + } +#endif + + // compute the matrix to go from B_chall_can to B_chall_2 + change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); + + // Assert all values in the matrix are of the expected size for packing + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + + // Set the basis change matrix to signature + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + + // Finalise the matrices + ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); + ibz_mat_2x2_finalize(&mat_Baux2_to_Baux2_can); +} + +int +protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l) +{ + int ret = 0; + int reduced_order = 0; // work around false positive gcc warning + + uint_fast8_t pow_dim2_deg_resp; + assert(SQIsign_response_length <= (intmax_t)UINT_FAST8_MAX); // otherwise we might need more bits there + + ibz_t remain, lattice_content, random_aux_norm, degree_resp_inv; + ibz_init(&remain); + ibz_init(&lattice_content); + ibz_init(&random_aux_norm); + ibz_init(°ree_resp_inv); + + quat_alg_elem_t resp_quat; + quat_alg_elem_init(&resp_quat); + + quat_left_ideal_t lideal_commit, lideal_com_resp; + quat_left_ideal_init(&lideal_commit); + quat_left_ideal_init(&lideal_com_resp); + + // This structure holds two curves E1 x E2 together with a basis + // Bi of E[2^n] for each of these curves + theta_couple_curve_with_basis_t Ecom_Eaux; + // This structure holds two curves E1 x E2 together with a basis + // Bi of Ei[2^n] + theta_couple_curve_with_basis_t Eaux2_Echall2; + + // This will hold the challenge curve + ec_curve_t E_chall = sk->curve; + + ec_curve_init(&Ecom_Eaux.E1); + ec_curve_init(&Ecom_Eaux.E2); + + while (!ret) { + + // computing the commitment + ret = commit(&Ecom_Eaux.E1, &Ecom_Eaux.B1, &lideal_commit); + + // start again if the commitment generation has failed + if (!ret) { + continue; + } + + // Hash the message to a kernel generator + // i.e. a scalar such that ker = P + [s]Q + hash_to_challenge(&sig->chall_coeff, pk, &Ecom_Eaux.E1, m, l); + // Compute the challenge ideal and response quaternion element + { + quat_left_ideal_t lideal_chall_two; + quat_left_ideal_init(&lideal_chall_two); + + // computing the challenge ideal + compute_challenge_ideal_signature(&lideal_chall_two, sig, sk); + compute_response_quat_element(&resp_quat, &lattice_content, sk, &lideal_chall_two, &lideal_commit); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_two); + } + + // computing the amount of backtracking we're making + // and removing it + compute_backtracking_signature(sig, &resp_quat, &lattice_content, &remain); + + // creating lideal_com * lideal_resp + // we first compute the norm of lideal_resp + // norm of the resp_quat + pow_dim2_deg_resp = compute_random_aux_norm_and_helpers(sig, + &random_aux_norm, + °ree_resp_inv, + &remain, + &lattice_content, + &resp_quat, + &lideal_com_resp, + &lideal_commit); + + // notational conventions: + // B0 = canonical basis of E0 + // B_com = image through commitment isogeny (odd degree) of canonical basis of E0 + // B_aux = image through aux_resp_com isogeny (odd degree) of canonical basis of E0 + + if (pow_dim2_deg_resp > 0) { + // Evaluate the random aux ideal on the curve E0 and its basis to find E_aux and B_aux + ret = + evaluate_random_aux_isogeny_signature(&Ecom_Eaux.E2, &Ecom_Eaux.B2, &random_aux_norm, &lideal_com_resp); + + // auxiliary isogeny computation failed we must start again + if (!ret) { + continue; + } + +#ifndef NDEBUG + // testing that the order of the points in the bases is as expected + assert(test_basis_order_twof(&Ecom_Eaux.B1, &Ecom_Eaux.E1, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(&Ecom_Eaux.B2, &Ecom_Eaux.E2, TORSION_EVEN_POWER)); +#endif + + // applying the matrix to compute Baux + // first, we reduce to the relevant order + reduced_order = pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length; + ec_dbl_iter_basis(&Ecom_Eaux.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Ecom_Eaux.B2, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B2, &Ecom_Eaux.E2); + + // Given all the above data, compute a dim two isogeny with domain + // E_com x E_aux + // and codomain + // E_aux_2 x E_chall_2 (note: E_chall_2 is isomorphic to E_chall) + // and evaluated points stored as bases in + // B_aux_2 on E_aux_2 + // B_chall_2 on E_chall_2 + ret = compute_dim2_isogeny_challenge( + &Eaux2_Echall2, &Ecom_Eaux, °ree_resp_inv, pow_dim2_deg_resp, sig->two_resp_length, reduced_order); + if (!ret) + continue; + } else { + // No 2d isogeny needed, so simulate a "Kani matrix" identity here + copy_curve(&Eaux2_Echall2.E1, &Ecom_Eaux.E1); + copy_curve(&Eaux2_Echall2.E2, &Ecom_Eaux.E1); + + reduced_order = sig->two_resp_length; + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + copy_basis(&Eaux2_Echall2.B2, &Eaux2_Echall2.B1); + } + + // computation of the remaining small chain of two isogenies when needed + if (sig->two_resp_length > 0) { + if (!compute_small_chain_isogeny_signature( + &Eaux2_Echall2.E2, &Eaux2_Echall2.B2, &resp_quat, pow_dim2_deg_resp, sig->two_resp_length)) { + assert(0); // this shouldn't fail + } + } + + // computation of the challenge codomain + if (!compute_challenge_codomain_signature(sig, sk, &E_chall, &Eaux2_Echall2.E2, &Eaux2_Echall2.B2)) + assert(0); // this shouldn't fail + } + + // Set to the signature the Montgomery A-coefficient of E_aux_2 + set_aux_curve_signature(sig, &Eaux2_Echall2.E1); + + // Set the basis change matrix from canonical bases to the supplied bases + compute_and_set_basis_change_matrix( + sig, &Eaux2_Echall2.B1, &Eaux2_Echall2.B2, &Eaux2_Echall2.E1, &E_chall, reduced_order); + + quat_alg_elem_finalize(&resp_quat); + quat_left_ideal_finalize(&lideal_commit); + quat_left_ideal_finalize(&lideal_com_resp); + + ibz_finalize(&lattice_content); + ibz_finalize(&remain); + ibz_finalize(°ree_resp_inv); + ibz_finalize(&random_aux_norm); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/signature.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/signature.h new file mode 100644 index 0000000000..ba38c360e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/signature.h @@ -0,0 +1,97 @@ +/** @file + * + * @brief The key generation and signature protocols + */ + +#ifndef SIGNATURE_H +#define SIGNATURE_H + +#include +#include +#include +#include + +/** @defgroup signature SQIsignHD key generation and signature protocols + * @{ + */ +/** @defgroup signature_t Types for SQIsignHD key generation and signature protocols + * @{ + */ + +/** @brief Type for the secret keys + * + * @typedef secret_key_t + * + * @struct secret_key + * + */ +typedef struct secret_key +{ + ec_curve_t curve; /// the public curve, but with little precomputations + quat_left_ideal_t secret_ideal; + ibz_mat_2x2_t mat_BAcan_to_BA0_two; // mat_BA0_to_BAcan*BA0 = BAcan, where BAcan is the + // canonical basis of EA[2^e], and BA0 the image of the + // basis of E0[2^e] through the secret isogeny + ec_basis_t canonical_basis; // the canonical basis of the public key curve +} secret_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void secret_key_init(secret_key_t *sk); +void secret_key_finalize(secret_key_t *sk); + +/** + * @brief Key generation + * + * @param pk Output: will contain the public key + * @param sk Output: will contain the secret key + * @returns 1 if success, 0 otherwise + */ +int protocols_keygen(public_key_t *pk, secret_key_t *sk); + +/** + * @brief Signature computation + * + * @param sig Output: will contain the signature + * @param sk secret key + * @param pk public key + * @param m message + * @param l size + * @returns 1 if success, 0 otherwise + */ +int protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a secret key as a byte array + * + * @param enc : Byte array to encode the secret key (including public key) in + * @param sk : Secret key to encode + * @param pk : Public key to encode + */ +void secret_key_to_bytes(unsigned char *enc, const secret_key_t *sk, const public_key_t *pk); + +/** + * @brief Decodes a secret key (and public key) from a byte array + * + * @param sk : Structure to decode the secret key in + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +void secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign.c new file mode 100644 index 0000000000..7335c38d9a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign.c @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#if defined(ENABLE_SIGN) +#include +#endif + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +sqisign_keypair(unsigned char *pk, unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + secret_key_init(&skt); + + ret = !protocols_keygen(&pkt, &skt); + + secret_key_to_bytes(sk, &skt, &pkt); + public_key_to_bytes(pk, &pkt); + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + memmove(sm + SIGNATURE_BYTES, m, mlen); + + ret = !protocols_sign(&sigt, &pkt, &skt, sm + SIGNATURE_BYTES, mlen); + if (ret != 0) { + *smlen = 0; + goto err; + } + + signature_to_bytes(sm, &sigt); + *smlen = SIGNATURE_BYTES + mlen; + +err: + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + ret = !protocols_sign(&sigt, &pkt, &skt, m, mlen); + if (ret != 0) { + *slen = 0; + goto err; + } + + signature_to_bytes(s, &sigt); + *slen = SIGNATURE_BYTES; + +err: + secret_key_finalize(&skt); + return ret; +} +#endif + +SQISIGN_API +int +sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk) +{ + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sm); + + ret = !protocols_verify(&sigt, &pkt, sm + SIGNATURE_BYTES, smlen - SIGNATURE_BYTES); + + if (!ret) { + *mlen = smlen - SIGNATURE_BYTES; + memmove(m, sm + SIGNATURE_BYTES, *mlen); + } else { + *mlen = 0; + memset(m, 0, smlen - SIGNATURE_BYTES); + } + + return ret; +} + +SQISIGN_API +int +sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk) +{ + + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sig); + + ret = !protocols_verify(&sigt, &pkt, m, mlen); + + return ret; +} + +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk) +{ + return sqisign_verify(m, mlen, sig, siglen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h new file mode 100644 index 0000000000..007d2572b9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h @@ -0,0 +1,1071 @@ + +#ifndef SQISIGN_NAMESPACE_H +#define SQISIGN_NAMESPACE_H + +//#define DISABLE_NAMESPACING + +#if defined(_WIN32) +#define SQISIGN_API __declspec(dllexport) +#else +#define SQISIGN_API __attribute__((visibility("default"))) +#endif + +#define PARAM_JOIN3_(a, b, c) sqisign_##a##_##b##_##c +#define PARAM_JOIN3(a, b, c) PARAM_JOIN3_(a, b, c) +#define PARAM_NAME3(end, s) PARAM_JOIN3(SQISIGN_VARIANT, end, s) + +#define PARAM_JOIN2_(a, b) sqisign_##a##_##b +#define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) +#define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) + +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + +#if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) +#if defined(SQISIGN_BUILD_TYPE_REF) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) +#elif defined(SQISIGN_BUILD_TYPE_OPT) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(opt, s) +#elif defined(SQISIGN_BUILD_TYPE_BROADWELL) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(broadwell, s) +#elif defined(SQISIGN_BUILD_TYPE_ARM64CRYPTO) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(arm64crypto, s) +#else +#error "Build type not known" +#endif + +#else +#define SQISIGN_NAMESPACE(s) s +#endif + +// Namespacing symbols exported from algebra.c: +#undef quat_alg_add +#undef quat_alg_conj +#undef quat_alg_coord_mul +#undef quat_alg_elem_copy +#undef quat_alg_elem_copy_ibz +#undef quat_alg_elem_equal +#undef quat_alg_elem_is_zero +#undef quat_alg_elem_mul_by_scalar +#undef quat_alg_elem_set +#undef quat_alg_equal_denom +#undef quat_alg_init_set_ui +#undef quat_alg_make_primitive +#undef quat_alg_mul +#undef quat_alg_norm +#undef quat_alg_normalize +#undef quat_alg_scalar +#undef quat_alg_sub + +#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) + +// Namespacing symbols exported from api.c: +#undef crypto_sign +#undef crypto_sign_keypair +#undef crypto_sign_open + +#define crypto_sign SQISIGN_NAMESPACE(crypto_sign) +#define crypto_sign_keypair SQISIGN_NAMESPACE(crypto_sign_keypair) +#define crypto_sign_open SQISIGN_NAMESPACE(crypto_sign_open) + +// Namespacing symbols exported from basis.c: +#undef ec_curve_to_basis_2f_from_hint +#undef ec_curve_to_basis_2f_to_hint +#undef ec_recover_y +#undef lift_basis +#undef lift_basis_normalized + +#define ec_curve_to_basis_2f_from_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_from_hint) +#define ec_curve_to_basis_2f_to_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_to_hint) +#define ec_recover_y SQISIGN_NAMESPACE(ec_recover_y) +#define lift_basis SQISIGN_NAMESPACE(lift_basis) +#define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) + +// Namespacing symbols exported from biextension.c: +#undef clear_cofac +#undef ec_dlog_2_tate +#undef ec_dlog_2_weil +#undef fp2_frob +#undef reduced_tate +#undef weil + +#define clear_cofac SQISIGN_NAMESPACE(clear_cofac) +#define ec_dlog_2_tate SQISIGN_NAMESPACE(ec_dlog_2_tate) +#define ec_dlog_2_weil SQISIGN_NAMESPACE(ec_dlog_2_weil) +#define fp2_frob SQISIGN_NAMESPACE(fp2_frob) +#define reduced_tate SQISIGN_NAMESPACE(reduced_tate) +#define weil SQISIGN_NAMESPACE(weil) + +// Namespacing symbols exported from common.c: +#undef hash_to_challenge +#undef public_key_finalize +#undef public_key_init + +#define hash_to_challenge SQISIGN_NAMESPACE(hash_to_challenge) +#define public_key_finalize SQISIGN_NAMESPACE(public_key_finalize) +#define public_key_init SQISIGN_NAMESPACE(public_key_init) + +// Namespacing symbols exported from dim2.c: +#undef ibz_2x2_mul_mod +#undef ibz_mat_2x2_add +#undef ibz_mat_2x2_copy +#undef ibz_mat_2x2_det_from_ibz +#undef ibz_mat_2x2_eval +#undef ibz_mat_2x2_inv_mod +#undef ibz_mat_2x2_set +#undef ibz_vec_2_set + +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) + +// Namespacing symbols exported from dim2id2iso.c: +#undef dim2id2iso_arbitrary_isogeny_evaluation +#undef dim2id2iso_ideal_to_isogeny_clapotis +#undef find_uv +#undef fixed_degree_isogeny_and_eval + +#define dim2id2iso_arbitrary_isogeny_evaluation SQISIGN_NAMESPACE(dim2id2iso_arbitrary_isogeny_evaluation) +#define dim2id2iso_ideal_to_isogeny_clapotis SQISIGN_NAMESPACE(dim2id2iso_ideal_to_isogeny_clapotis) +#define find_uv SQISIGN_NAMESPACE(find_uv) +#define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) + +// Namespacing symbols exported from dim4.c: +#undef ibz_inv_dim4_make_coeff_mpm +#undef ibz_inv_dim4_make_coeff_pmp +#undef ibz_mat_4x4_copy +#undef ibz_mat_4x4_equal +#undef ibz_mat_4x4_eval +#undef ibz_mat_4x4_eval_t +#undef ibz_mat_4x4_gcd +#undef ibz_mat_4x4_identity +#undef ibz_mat_4x4_inv_with_det_as_denom +#undef ibz_mat_4x4_is_identity +#undef ibz_mat_4x4_mul +#undef ibz_mat_4x4_negate +#undef ibz_mat_4x4_scalar_div +#undef ibz_mat_4x4_scalar_mul +#undef ibz_mat_4x4_transpose +#undef ibz_mat_4x4_zero +#undef ibz_vec_4_add +#undef ibz_vec_4_content +#undef ibz_vec_4_copy +#undef ibz_vec_4_copy_ibz +#undef ibz_vec_4_is_zero +#undef ibz_vec_4_linear_combination +#undef ibz_vec_4_negate +#undef ibz_vec_4_scalar_div +#undef ibz_vec_4_scalar_mul +#undef ibz_vec_4_set +#undef ibz_vec_4_sub +#undef quat_qf_eval + +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) + +// Namespacing symbols exported from ec.c: +#undef cswap_points +#undef ec_biscalar_mul +#undef ec_curve_init +#undef ec_curve_init_from_A +#undef ec_curve_normalize_A24 +#undef ec_curve_verify_A +#undef ec_dbl +#undef ec_dbl_iter +#undef ec_dbl_iter_basis +#undef ec_has_zero_coordinate +#undef ec_is_basis_four_torsion +#undef ec_is_equal +#undef ec_is_four_torsion +#undef ec_is_two_torsion +#undef ec_is_zero +#undef ec_j_inv +#undef ec_ladder3pt +#undef ec_mul +#undef ec_normalize_curve +#undef ec_normalize_curve_and_A24 +#undef ec_normalize_point +#undef ec_point_init +#undef select_point +#undef xADD +#undef xDBL +#undef xDBLADD +#undef xDBLMUL +#undef xDBL_A24 +#undef xDBL_E0 +#undef xMUL + +#define cswap_points SQISIGN_NAMESPACE(cswap_points) +#define ec_biscalar_mul SQISIGN_NAMESPACE(ec_biscalar_mul) +#define ec_curve_init SQISIGN_NAMESPACE(ec_curve_init) +#define ec_curve_init_from_A SQISIGN_NAMESPACE(ec_curve_init_from_A) +#define ec_curve_normalize_A24 SQISIGN_NAMESPACE(ec_curve_normalize_A24) +#define ec_curve_verify_A SQISIGN_NAMESPACE(ec_curve_verify_A) +#define ec_dbl SQISIGN_NAMESPACE(ec_dbl) +#define ec_dbl_iter SQISIGN_NAMESPACE(ec_dbl_iter) +#define ec_dbl_iter_basis SQISIGN_NAMESPACE(ec_dbl_iter_basis) +#define ec_has_zero_coordinate SQISIGN_NAMESPACE(ec_has_zero_coordinate) +#define ec_is_basis_four_torsion SQISIGN_NAMESPACE(ec_is_basis_four_torsion) +#define ec_is_equal SQISIGN_NAMESPACE(ec_is_equal) +#define ec_is_four_torsion SQISIGN_NAMESPACE(ec_is_four_torsion) +#define ec_is_two_torsion SQISIGN_NAMESPACE(ec_is_two_torsion) +#define ec_is_zero SQISIGN_NAMESPACE(ec_is_zero) +#define ec_j_inv SQISIGN_NAMESPACE(ec_j_inv) +#define ec_ladder3pt SQISIGN_NAMESPACE(ec_ladder3pt) +#define ec_mul SQISIGN_NAMESPACE(ec_mul) +#define ec_normalize_curve SQISIGN_NAMESPACE(ec_normalize_curve) +#define ec_normalize_curve_and_A24 SQISIGN_NAMESPACE(ec_normalize_curve_and_A24) +#define ec_normalize_point SQISIGN_NAMESPACE(ec_normalize_point) +#define ec_point_init SQISIGN_NAMESPACE(ec_point_init) +#define select_point SQISIGN_NAMESPACE(select_point) +#define xADD SQISIGN_NAMESPACE(xADD) +#define xDBL SQISIGN_NAMESPACE(xDBL) +#define xDBLADD SQISIGN_NAMESPACE(xDBLADD) +#define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) +#define xMUL SQISIGN_NAMESPACE(xMUL) + +// Namespacing symbols exported from ec_jac.c: +#undef ADD +#undef DBL +#undef DBLW +#undef copy_jac_point +#undef jac_from_ws +#undef jac_init +#undef jac_is_equal +#undef jac_neg +#undef jac_to_ws +#undef jac_to_xz +#undef jac_to_xz_add_components +#undef select_jac_point + +#define ADD SQISIGN_NAMESPACE(ADD) +#define DBL SQISIGN_NAMESPACE(DBL) +#define DBLW SQISIGN_NAMESPACE(DBLW) +#define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) +#define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) +#define jac_init SQISIGN_NAMESPACE(jac_init) +#define jac_is_equal SQISIGN_NAMESPACE(jac_is_equal) +#define jac_neg SQISIGN_NAMESPACE(jac_neg) +#define jac_to_ws SQISIGN_NAMESPACE(jac_to_ws) +#define jac_to_xz SQISIGN_NAMESPACE(jac_to_xz) +#define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) +#define select_jac_point SQISIGN_NAMESPACE(select_jac_point) + +// Namespacing symbols exported from encode_signature.c: +#undef secret_key_from_bytes +#undef secret_key_to_bytes + +#define secret_key_from_bytes SQISIGN_NAMESPACE(secret_key_from_bytes) +#define secret_key_to_bytes SQISIGN_NAMESPACE(secret_key_to_bytes) + +// Namespacing symbols exported from encode_verification.c: +#undef public_key_from_bytes +#undef public_key_to_bytes +#undef signature_from_bytes +#undef signature_to_bytes + +#define public_key_from_bytes SQISIGN_NAMESPACE(public_key_from_bytes) +#define public_key_to_bytes SQISIGN_NAMESPACE(public_key_to_bytes) +#define signature_from_bytes SQISIGN_NAMESPACE(signature_from_bytes) +#define signature_to_bytes SQISIGN_NAMESPACE(signature_to_bytes) + +// Namespacing symbols exported from finit.c: +#undef ibz_mat_2x2_finalize +#undef ibz_mat_2x2_init +#undef ibz_mat_4x4_finalize +#undef ibz_mat_4x4_init +#undef ibz_vec_2_finalize +#undef ibz_vec_2_init +#undef ibz_vec_4_finalize +#undef ibz_vec_4_init +#undef quat_alg_elem_finalize +#undef quat_alg_elem_init +#undef quat_alg_finalize +#undef quat_alg_init_set +#undef quat_lattice_finalize +#undef quat_lattice_init +#undef quat_left_ideal_finalize +#undef quat_left_ideal_init + +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) + +// Namespacing symbols exported from fp.c: +#undef fp_select +#undef p +#undef p2 + +#define fp_select SQISIGN_NAMESPACE(fp_select) +#define p SQISIGN_NAMESPACE(p) +#define p2 SQISIGN_NAMESPACE(p2) + +// Namespacing symbols exported from fp.c, fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_exp3div4 +#undef fp_inv +#undef fp_is_square +#undef fp_sqrt + +#define fp_exp3div4 SQISIGN_NAMESPACE(fp_exp3div4) +#define fp_inv SQISIGN_NAMESPACE(fp_inv) +#define fp_is_square SQISIGN_NAMESPACE(fp_is_square) +#define fp_sqrt SQISIGN_NAMESPACE(fp_sqrt) + +// Namespacing symbols exported from fp2.c: +#undef fp2_add +#undef fp2_add_one +#undef fp2_batched_inv +#undef fp2_copy +#undef fp2_cswap +#undef fp2_decode +#undef fp2_encode +#undef fp2_half +#undef fp2_inv +#undef fp2_is_equal +#undef fp2_is_one +#undef fp2_is_square +#undef fp2_is_zero +#undef fp2_mul +#undef fp2_mul_small +#undef fp2_neg +#undef fp2_pow_vartime +#undef fp2_print +#undef fp2_select +#undef fp2_set_one +#undef fp2_set_small +#undef fp2_set_zero +#undef fp2_sqr +#undef fp2_sqrt +#undef fp2_sqrt_verify +#undef fp2_sub + +#define fp2_add SQISIGN_NAMESPACE(fp2_add) +#define fp2_add_one SQISIGN_NAMESPACE(fp2_add_one) +#define fp2_batched_inv SQISIGN_NAMESPACE(fp2_batched_inv) +#define fp2_copy SQISIGN_NAMESPACE(fp2_copy) +#define fp2_cswap SQISIGN_NAMESPACE(fp2_cswap) +#define fp2_decode SQISIGN_NAMESPACE(fp2_decode) +#define fp2_encode SQISIGN_NAMESPACE(fp2_encode) +#define fp2_half SQISIGN_NAMESPACE(fp2_half) +#define fp2_inv SQISIGN_NAMESPACE(fp2_inv) +#define fp2_is_equal SQISIGN_NAMESPACE(fp2_is_equal) +#define fp2_is_one SQISIGN_NAMESPACE(fp2_is_one) +#define fp2_is_square SQISIGN_NAMESPACE(fp2_is_square) +#define fp2_is_zero SQISIGN_NAMESPACE(fp2_is_zero) +#define fp2_mul SQISIGN_NAMESPACE(fp2_mul) +#define fp2_mul_small SQISIGN_NAMESPACE(fp2_mul_small) +#define fp2_neg SQISIGN_NAMESPACE(fp2_neg) +#define fp2_pow_vartime SQISIGN_NAMESPACE(fp2_pow_vartime) +#define fp2_print SQISIGN_NAMESPACE(fp2_print) +#define fp2_select SQISIGN_NAMESPACE(fp2_select) +#define fp2_set_one SQISIGN_NAMESPACE(fp2_set_one) +#define fp2_set_small SQISIGN_NAMESPACE(fp2_set_small) +#define fp2_set_zero SQISIGN_NAMESPACE(fp2_set_zero) +#define fp2_sqr SQISIGN_NAMESPACE(fp2_sqr) +#define fp2_sqrt SQISIGN_NAMESPACE(fp2_sqrt) +#define fp2_sqrt_verify SQISIGN_NAMESPACE(fp2_sqrt_verify) +#define fp2_sub SQISIGN_NAMESPACE(fp2_sub) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_copy +#undef fp_cswap +#undef fp_decode +#undef fp_decode_reduce +#undef fp_div3 +#undef fp_encode +#undef fp_half +#undef fp_is_equal +#undef fp_is_zero +#undef fp_mul_small +#undef fp_neg +#undef fp_set_one +#undef fp_set_small +#undef fp_set_zero + +#define fp_copy SQISIGN_NAMESPACE(fp_copy) +#define fp_cswap SQISIGN_NAMESPACE(fp_cswap) +#define fp_decode SQISIGN_NAMESPACE(fp_decode) +#define fp_decode_reduce SQISIGN_NAMESPACE(fp_decode_reduce) +#define fp_div3 SQISIGN_NAMESPACE(fp_div3) +#define fp_encode SQISIGN_NAMESPACE(fp_encode) +#define fp_half SQISIGN_NAMESPACE(fp_half) +#define fp_is_equal SQISIGN_NAMESPACE(fp_is_equal) +#define fp_is_zero SQISIGN_NAMESPACE(fp_is_zero) +#define fp_mul_small SQISIGN_NAMESPACE(fp_mul_small) +#define fp_neg SQISIGN_NAMESPACE(fp_neg) +#define fp_set_one SQISIGN_NAMESPACE(fp_set_one) +#define fp_set_small SQISIGN_NAMESPACE(fp_set_small) +#define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef fp_add +#undef fp_mul +#undef fp_sqr +#undef fp_sub + +#define fp_add SQISIGN_NAMESPACE(fp_add) +#define fp_mul SQISIGN_NAMESPACE(fp_mul) +#define fp_sqr SQISIGN_NAMESPACE(fp_sqr) +#define fp_sub SQISIGN_NAMESPACE(fp_sub) + +// Namespacing symbols exported from gf27500.c: +#undef gf27500_decode +#undef gf27500_decode_reduce +#undef gf27500_div +#undef gf27500_div3 +#undef gf27500_encode +#undef gf27500_invert +#undef gf27500_legendre +#undef gf27500_sqrt + +#define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) +#define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) +#define gf27500_div SQISIGN_NAMESPACE(gf27500_div) +#define gf27500_div3 SQISIGN_NAMESPACE(gf27500_div3) +#define gf27500_encode SQISIGN_NAMESPACE(gf27500_encode) +#define gf27500_invert SQISIGN_NAMESPACE(gf27500_invert) +#define gf27500_legendre SQISIGN_NAMESPACE(gf27500_legendre) +#define gf27500_sqrt SQISIGN_NAMESPACE(gf27500_sqrt) + +// Namespacing symbols exported from gf27500.c, gf5248.c, gf65376.c: +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 + +#define fp2_mul_c0 SQISIGN_NAMESPACE(fp2_mul_c0) +#define fp2_mul_c1 SQISIGN_NAMESPACE(fp2_mul_c1) +#define fp2_sq_c0 SQISIGN_NAMESPACE(fp2_sq_c0) +#define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) + +// Namespacing symbols exported from gf5248.c: +#undef gf5248_decode +#undef gf5248_decode_reduce +#undef gf5248_div +#undef gf5248_div3 +#undef gf5248_encode +#undef gf5248_invert +#undef gf5248_legendre +#undef gf5248_sqrt + +#define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) +#define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) +#define gf5248_div SQISIGN_NAMESPACE(gf5248_div) +#define gf5248_div3 SQISIGN_NAMESPACE(gf5248_div3) +#define gf5248_encode SQISIGN_NAMESPACE(gf5248_encode) +#define gf5248_invert SQISIGN_NAMESPACE(gf5248_invert) +#define gf5248_legendre SQISIGN_NAMESPACE(gf5248_legendre) +#define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) + +// Namespacing symbols exported from gf65376.c: +#undef gf65376_decode +#undef gf65376_decode_reduce +#undef gf65376_div +#undef gf65376_div3 +#undef gf65376_encode +#undef gf65376_invert +#undef gf65376_legendre +#undef gf65376_sqrt + +#define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) +#define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) +#define gf65376_div SQISIGN_NAMESPACE(gf65376_div) +#define gf65376_div3 SQISIGN_NAMESPACE(gf65376_div3) +#define gf65376_encode SQISIGN_NAMESPACE(gf65376_encode) +#define gf65376_invert SQISIGN_NAMESPACE(gf65376_invert) +#define gf65376_legendre SQISIGN_NAMESPACE(gf65376_legendre) +#define gf65376_sqrt SQISIGN_NAMESPACE(gf65376_sqrt) + +// Namespacing symbols exported from hd.c: +#undef add_couple_jac_points +#undef copy_bases_to_kernel +#undef couple_jac_to_xz +#undef double_couple_jac_point +#undef double_couple_jac_point_iter +#undef double_couple_point +#undef double_couple_point_iter + +#define add_couple_jac_points SQISIGN_NAMESPACE(add_couple_jac_points) +#define copy_bases_to_kernel SQISIGN_NAMESPACE(copy_bases_to_kernel) +#define couple_jac_to_xz SQISIGN_NAMESPACE(couple_jac_to_xz) +#define double_couple_jac_point SQISIGN_NAMESPACE(double_couple_jac_point) +#define double_couple_jac_point_iter SQISIGN_NAMESPACE(double_couple_jac_point_iter) +#define double_couple_point SQISIGN_NAMESPACE(double_couple_point) +#define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) + +// Namespacing symbols exported from hnf.c: +#undef ibz_mat_4x4_is_hnf +#undef ibz_mat_4xn_hnf_mod_core +#undef ibz_vec_4_copy_mod +#undef ibz_vec_4_linear_combination_mod +#undef ibz_vec_4_scalar_mul_mod + +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) + +// Namespacing symbols exported from hnf_internal.c: +#undef ibz_centered_mod +#undef ibz_conditional_assign +#undef ibz_mod_not_zero +#undef ibz_xgcd_with_u_not_0 + +#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) + +// Namespacing symbols exported from ibz_division.c: +#undef ibz_xgcd + +#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) + +// Namespacing symbols exported from id2iso.c: +#undef change_of_basis_matrix_tate +#undef change_of_basis_matrix_tate_invert +#undef ec_biscalar_mul_ibz_vec +#undef endomorphism_application_even_basis +#undef id2iso_ideal_to_kernel_dlogs_even +#undef id2iso_kernel_dlogs_to_ideal_even +#undef matrix_application_even_basis + +#define change_of_basis_matrix_tate SQISIGN_NAMESPACE(change_of_basis_matrix_tate) +#define change_of_basis_matrix_tate_invert SQISIGN_NAMESPACE(change_of_basis_matrix_tate_invert) +#define ec_biscalar_mul_ibz_vec SQISIGN_NAMESPACE(ec_biscalar_mul_ibz_vec) +#define endomorphism_application_even_basis SQISIGN_NAMESPACE(endomorphism_application_even_basis) +#define id2iso_ideal_to_kernel_dlogs_even SQISIGN_NAMESPACE(id2iso_ideal_to_kernel_dlogs_even) +#define id2iso_kernel_dlogs_to_ideal_even SQISIGN_NAMESPACE(id2iso_kernel_dlogs_to_ideal_even) +#define matrix_application_even_basis SQISIGN_NAMESPACE(matrix_application_even_basis) + +// Namespacing symbols exported from ideal.c: +#undef quat_lideal_add +#undef quat_lideal_class_gram +#undef quat_lideal_conjugate_without_hnf +#undef quat_lideal_copy +#undef quat_lideal_create +#undef quat_lideal_create_principal +#undef quat_lideal_equals +#undef quat_lideal_generator +#undef quat_lideal_inter +#undef quat_lideal_inverse_lattice_without_hnf +#undef quat_lideal_mul +#undef quat_lideal_norm +#undef quat_lideal_right_order +#undef quat_lideal_right_transporter +#undef quat_order_discriminant +#undef quat_order_is_maximal + +#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) + +// Namespacing symbols exported from intbig.c: +#undef ibz_abs +#undef ibz_add +#undef ibz_bitsize +#undef ibz_cmp +#undef ibz_cmp_int32 +#undef ibz_convert_to_str +#undef ibz_copy +#undef ibz_copy_digits +#undef ibz_div +#undef ibz_div_2exp +#undef ibz_div_floor +#undef ibz_divides +#undef ibz_finalize +#undef ibz_gcd +#undef ibz_get +#undef ibz_init +#undef ibz_invmod +#undef ibz_is_even +#undef ibz_is_odd +#undef ibz_is_one +#undef ibz_is_zero +#undef ibz_legendre +#undef ibz_mod +#undef ibz_mod_ui +#undef ibz_mul +#undef ibz_neg +#undef ibz_pow +#undef ibz_pow_mod +#undef ibz_print +#undef ibz_probab_prime +#undef ibz_rand_interval +#undef ibz_rand_interval_bits +#undef ibz_rand_interval_i +#undef ibz_rand_interval_minm_m +#undef ibz_set +#undef ibz_set_from_str +#undef ibz_size_in_base +#undef ibz_sqrt +#undef ibz_sqrt_floor +#undef ibz_sqrt_mod_p +#undef ibz_sub +#undef ibz_swap +#undef ibz_to_digits +#undef ibz_two_adic + +#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) +#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) +#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) + +// Namespacing symbols exported from integers.c: +#undef ibz_cornacchia_prime +#undef ibz_generate_random_prime + +#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) + +// Namespacing symbols exported from isog_chains.c: +#undef ec_eval_even +#undef ec_eval_small_chain +#undef ec_iso_eval +#undef ec_isomorphism + +#define ec_eval_even SQISIGN_NAMESPACE(ec_eval_even) +#define ec_eval_small_chain SQISIGN_NAMESPACE(ec_eval_small_chain) +#define ec_iso_eval SQISIGN_NAMESPACE(ec_iso_eval) +#define ec_isomorphism SQISIGN_NAMESPACE(ec_isomorphism) + +// Namespacing symbols exported from keygen.c: +#undef protocols_keygen +#undef secret_key_finalize +#undef secret_key_init + +#define protocols_keygen SQISIGN_NAMESPACE(protocols_keygen) +#define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) +#define secret_key_init SQISIGN_NAMESPACE(secret_key_init) + +// Namespacing symbols exported from l2.c: +#undef quat_lattice_lll +#undef quat_lll_core + +#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) + +// Namespacing symbols exported from lat_ball.c: +#undef quat_lattice_bound_parallelogram +#undef quat_lattice_sample_from_ball + +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) + +// Namespacing symbols exported from lattice.c: +#undef quat_lattice_add +#undef quat_lattice_alg_elem_mul +#undef quat_lattice_conjugate_without_hnf +#undef quat_lattice_contains +#undef quat_lattice_dual_without_hnf +#undef quat_lattice_equal +#undef quat_lattice_gram +#undef quat_lattice_hnf +#undef quat_lattice_inclusion +#undef quat_lattice_index +#undef quat_lattice_intersect +#undef quat_lattice_mat_alg_coord_mul_without_hnf +#undef quat_lattice_mul +#undef quat_lattice_reduce_denom + +#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) + +// Namespacing symbols exported from lll_applications.c: +#undef quat_lideal_lideal_mul_reduced +#undef quat_lideal_prime_norm_reduced_equivalent +#undef quat_lideal_reduce_basis + +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) + +// Namespacing symbols exported from lll_verification.c: +#undef ibq_vec_4_copy_ibz +#undef quat_lll_bilinear +#undef quat_lll_gram_schmidt_transposed_with_ibq +#undef quat_lll_set_ibq_parameters +#undef quat_lll_verify + +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) + +// Namespacing symbols exported from mem.c: +#undef sqisign_secure_clear +#undef sqisign_secure_free + +#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) + +// Namespacing symbols exported from mp.c: +#undef MUL +#undef mp_add +#undef mp_compare +#undef mp_copy +#undef mp_inv_2e +#undef mp_invert_matrix +#undef mp_is_one +#undef mp_is_zero +#undef mp_mod_2exp +#undef mp_mul +#undef mp_mul2 +#undef mp_neg +#undef mp_print +#undef mp_shiftl +#undef mp_shiftr +#undef mp_sub +#undef multiple_mp_shiftl +#undef select_ct +#undef swap_ct + +#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) +#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) +#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) +#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) +#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) +#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) + +// Namespacing symbols exported from normeq.c: +#undef quat_change_to_O0_basis +#undef quat_lattice_O0_set +#undef quat_lattice_O0_set_extremal +#undef quat_order_elem_create +#undef quat_represent_integer +#undef quat_sampling_random_ideal_O0_given_norm + +#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) + +// Namespacing symbols exported from printer.c: +#undef ibz_mat_2x2_print +#undef ibz_mat_4x4_print +#undef ibz_vec_2_print +#undef ibz_vec_4_print +#undef quat_alg_elem_print +#undef quat_alg_print +#undef quat_lattice_print +#undef quat_left_ideal_print + +#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) + +// Namespacing symbols exported from random_input_generation.c: +#undef quat_test_input_random_ideal_generation +#undef quat_test_input_random_ideal_lattice_generation +#undef quat_test_input_random_lattice_generation + +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) + +// Namespacing symbols exported from rationals.c: +#undef ibq_abs +#undef ibq_add +#undef ibq_cmp +#undef ibq_copy +#undef ibq_finalize +#undef ibq_init +#undef ibq_inv +#undef ibq_is_ibz +#undef ibq_is_one +#undef ibq_is_zero +#undef ibq_mat_4x4_finalize +#undef ibq_mat_4x4_init +#undef ibq_mat_4x4_print +#undef ibq_mul +#undef ibq_neg +#undef ibq_reduce +#undef ibq_set +#undef ibq_sub +#undef ibq_to_ibz +#undef ibq_vec_4_finalize +#undef ibq_vec_4_init +#undef ibq_vec_4_print + +#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) + +// Namespacing symbols exported from sign.c: +#undef protocols_sign + +#define protocols_sign SQISIGN_NAMESPACE(protocols_sign) + +// Namespacing symbols exported from sqisign.c: +#undef sqisign_keypair +#undef sqisign_open +#undef sqisign_sign +#undef sqisign_sign_signature +#undef sqisign_verify +#undef sqisign_verify_signature + +#define sqisign_keypair SQISIGN_NAMESPACE(sqisign_keypair) +#define sqisign_open SQISIGN_NAMESPACE(sqisign_open) +#define sqisign_sign SQISIGN_NAMESPACE(sqisign_sign) +#define sqisign_sign_signature SQISIGN_NAMESPACE(sqisign_sign_signature) +#define sqisign_verify SQISIGN_NAMESPACE(sqisign_verify) +#define sqisign_verify_signature SQISIGN_NAMESPACE(sqisign_verify_signature) + +// Namespacing symbols exported from theta_isogenies.c: +#undef theta_chain_compute_and_eval +#undef theta_chain_compute_and_eval_randomized +#undef theta_chain_compute_and_eval_verify + +#define theta_chain_compute_and_eval SQISIGN_NAMESPACE(theta_chain_compute_and_eval) +#define theta_chain_compute_and_eval_randomized SQISIGN_NAMESPACE(theta_chain_compute_and_eval_randomized) +#define theta_chain_compute_and_eval_verify SQISIGN_NAMESPACE(theta_chain_compute_and_eval_verify) + +// Namespacing symbols exported from theta_structure.c: +#undef double_iter +#undef double_point +#undef is_product_theta_point +#undef theta_precomputation + +#define double_iter SQISIGN_NAMESPACE(double_iter) +#define double_point SQISIGN_NAMESPACE(double_point) +#define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) +#define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) + +// Namespacing symbols exported from verify.c: +#undef protocols_verify + +#define protocols_verify SQISIGN_NAMESPACE(protocols_verify) + +// Namespacing symbols exported from xeval.c: +#undef xeval_2 +#undef xeval_2_singular +#undef xeval_4 + +#define xeval_2 SQISIGN_NAMESPACE(xeval_2) +#define xeval_2_singular SQISIGN_NAMESPACE(xeval_2_singular) +#define xeval_4 SQISIGN_NAMESPACE(xeval_4) + +// Namespacing symbols exported from xisog.c: +#undef xisog_2 +#undef xisog_2_singular +#undef xisog_4 + +#define xisog_2 SQISIGN_NAMESPACE(xisog_2) +#define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) +#define xisog_4 SQISIGN_NAMESPACE(xisog_4) + +// Namespacing symbols from precomp: +#undef BASIS_E0_PX +#undef BASIS_E0_QX +#undef p_cofactor_for_2f +#undef CURVES_WITH_ENDOMORPHISMS +#undef EVEN_INDEX +#undef CHI_EVAL +#undef FP2_CONSTANTS +#undef SPLITTING_TRANSFORMS +#undef NORMALIZATION_TRANSFORMS +#undef QUAT_prime_cofactor +#undef QUATALG_PINFTY +#undef EXTREMAL_ORDERS +#undef CONNECTING_IDEALS +#undef CONJUGATING_ELEMENTS +#undef TWO_TO_SECURITY_BITS +#undef TORSION_PLUS_2POWER +#undef SEC_DEGREE +#undef COM_DEGREE + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_parameters.txt b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_parameters.txt new file mode 100644 index 0000000000..8a1a26a502 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_parameters.txt @@ -0,0 +1,3 @@ +lvl = 1 +p = 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +num_orders = 7 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c new file mode 100644 index 0000000000..478a9ab25b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c @@ -0,0 +1,1283 @@ +#include "theta_isogenies.h" +#include +#include +#include +#include +#include + +// Select a base change matrix in constant time, with M1 a regular +// base change matrix and M2 a precomputed base change matrix +// If option = 0 then M <- M1, else if option = 0xFF...FF then M <- M2 +static inline void +select_base_change_matrix(basis_change_matrix_t *M, + const basis_change_matrix_t *M1, + const precomp_basis_change_matrix_t *M2, + const uint32_t option) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + fp2_select(&M->m[i][j], &M1->m[i][j], &FP2_CONSTANTS[M2->m[i][j]], option); +} + +// Set a regular base change matrix from a precomputed one +static inline void +set_base_change_matrix_from_precomp(basis_change_matrix_t *res, const precomp_basis_change_matrix_t *M) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + res->m[i][j] = FP2_CONSTANTS[M->m[i][j]]; +} + +static inline void +choose_index_theta_point(fp2_t *res, int ind, const theta_point_t *T) +{ + const fp2_t *src = NULL; + switch (ind % 4) { + case 0: + src = &T->x; + break; + case 1: + src = &T->y; + break; + case 2: + src = &T->z; + break; + case 3: + src = &T->t; + break; + default: + assert(0); + } + fp2_copy(res, src); +} + +// same as apply_isomorphism method but more efficient when the t component of P is zero. +static void +apply_isomorphism_general(theta_point_t *res, + const basis_change_matrix_t *M, + const theta_point_t *P, + const bool Pt_not_zero) +{ + fp2_t x1; + theta_point_t temp; + + fp2_mul(&temp.x, &P->x, &M->m[0][0]); + fp2_mul(&x1, &P->y, &M->m[0][1]); + fp2_add(&temp.x, &temp.x, &x1); + fp2_mul(&x1, &P->z, &M->m[0][2]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&temp.y, &P->x, &M->m[1][0]); + fp2_mul(&x1, &P->y, &M->m[1][1]); + fp2_add(&temp.y, &temp.y, &x1); + fp2_mul(&x1, &P->z, &M->m[1][2]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&temp.z, &P->x, &M->m[2][0]); + fp2_mul(&x1, &P->y, &M->m[2][1]); + fp2_add(&temp.z, &temp.z, &x1); + fp2_mul(&x1, &P->z, &M->m[2][2]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&temp.t, &P->x, &M->m[3][0]); + fp2_mul(&x1, &P->y, &M->m[3][1]); + fp2_add(&temp.t, &temp.t, &x1); + fp2_mul(&x1, &P->z, &M->m[3][2]); + fp2_add(&temp.t, &temp.t, &x1); + + if (Pt_not_zero) { + fp2_mul(&x1, &P->t, &M->m[0][3]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&x1, &P->t, &M->m[1][3]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&x1, &P->t, &M->m[2][3]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&x1, &P->t, &M->m[3][3]); + fp2_add(&temp.t, &temp.t, &x1); + } + + fp2_copy(&res->x, &temp.x); + fp2_copy(&res->y, &temp.y); + fp2_copy(&res->z, &temp.z); + fp2_copy(&res->t, &temp.t); +} + +static void +apply_isomorphism(theta_point_t *res, const basis_change_matrix_t *M, const theta_point_t *P) +{ + apply_isomorphism_general(res, M, P, true); +} + +// set res = M1 * M2 with matrix multiplication +static void +base_change_matrix_multiplication(basis_change_matrix_t *res, + const basis_change_matrix_t *M1, + const basis_change_matrix_t *M2) +{ + basis_change_matrix_t tmp; + fp2_t sum, m_ik, m_kj; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + fp2_set_zero(&sum); + for (int k = 0; k < 4; k++) { + m_ik = M1->m[i][k]; + m_kj = M2->m[k][j]; + fp2_mul(&m_ik, &m_ik, &m_kj); + fp2_add(&sum, &sum, &m_ik); + } + tmp.m[i][j] = sum; + } + } + *res = tmp; +} + +// compute the theta_point corresponding to the couple of point T on an elliptic product +static void +base_change(theta_point_t *out, const theta_gluing_t *phi, const theta_couple_point_t *T) +{ + theta_point_t null_point; + + // null_point = (a : b : c : d) + // a = P1.x P2.x, b = P1.x P2.z, c = P1.z P2.x, d = P1.z P2.z + fp2_mul(&null_point.x, &T->P1.x, &T->P2.x); + fp2_mul(&null_point.y, &T->P1.x, &T->P2.z); + fp2_mul(&null_point.z, &T->P2.x, &T->P1.z); + fp2_mul(&null_point.t, &T->P1.z, &T->P2.z); + + // Apply the basis change + apply_isomorphism(out, &phi->M, &null_point); +} + +static void +action_by_translation_z_and_det(fp2_t *z_inv, fp2_t *det_inv, const ec_point_t *P4, const ec_point_t *P2) +{ + // Store the Z-coordinate to invert + fp2_copy(z_inv, &P4->z); + + // Then collect detij = xij wij - uij zij + fp2_t tmp; + fp2_mul(det_inv, &P4->x, &P2->z); + fp2_mul(&tmp, &P4->z, &P2->x); + fp2_sub(det_inv, det_inv, &tmp); +} + +static void +action_by_translation_compute_matrix(translation_matrix_t *G, + const ec_point_t *P4, + const ec_point_t *P2, + const fp2_t *z_inv, + const fp2_t *det_inv) +{ + fp2_t tmp; + + // Gi.g10 = uij xij /detij - xij/zij + fp2_mul(&tmp, &P4->x, z_inv); + fp2_mul(&G->g10, &P4->x, &P2->x); + fp2_mul(&G->g10, &G->g10, det_inv); + fp2_sub(&G->g10, &G->g10, &tmp); + + // Gi.g11 = uij zij * detij + fp2_mul(&G->g11, &P2->x, det_inv); + fp2_mul(&G->g11, &G->g11, &P4->z); + + // Gi.g00 = -Gi.g11 + fp2_neg(&G->g00, &G->g11); + + // Gi.g01 = - wij zij detij + fp2_mul(&G->g01, &P2->z, det_inv); + fp2_mul(&G->g01, &G->g01, &P4->z); + fp2_neg(&G->g01, &G->g01); +} + +// Returns 1 if the basis is as expected and 0 otherwise +// We only expect this to fail for malformed signatures, so +// do not require this to run in constant time. +static int +verify_two_torsion(const theta_couple_point_t *K1_2, const theta_couple_point_t *K2_2, const theta_couple_curve_t *E12) +{ + // First check if any point in K1_2 or K2_2 is zero, if they are then the points did not have + // order 8 when we started gluing + if (ec_is_zero(&K1_2->P1) | ec_is_zero(&K1_2->P2) | ec_is_zero(&K2_2->P1) | ec_is_zero(&K2_2->P2)) { + return 0; + } + + // Now ensure that P1, Q1 and P2, Q2 are independent. For points of order two this means + // that they're not the same + if (ec_is_equal(&K1_2->P1, &K2_2->P1) | ec_is_equal(&K1_2->P2, &K2_2->P2)) { + return 0; + } + + // Finally, double points to ensure all points have order exactly 0 + theta_couple_point_t O1, O2; + double_couple_point(&O1, K1_2, E12); + double_couple_point(&O2, K2_2, E12); + // If this check fails then the points had order 2*f for some f, and the kernel is malformed. + if (!(ec_is_zero(&O1.P1) & ec_is_zero(&O1.P2) & ec_is_zero(&O2.P1) & ec_is_zero(&O2.P2))) { + return 0; + } + + return 1; +} + +// Computes the action by translation for four points +// (P1, P2) and (Q1, Q2) on E1 x E2 simultaneously to +// save on inversions. +// Returns 0 if any of Pi or Qi does not have order 2 +// and 1 otherwise +static int +action_by_translation(translation_matrix_t *Gi, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute points of order 2 from Ki_4 + theta_couple_point_t K1_2, K2_2; + double_couple_point(&K1_2, K1_4, E12); + double_couple_point(&K2_2, K2_4, E12); + + if (!verify_two_torsion(&K1_2, &K2_2, E12)) { + return 0; + } + + // We need to invert four Z coordinates and + // four determinants which we do with batched + // inversion + fp2_t inverses[8]; + action_by_translation_z_and_det(&inverses[0], &inverses[4], &K1_4->P1, &K1_2.P1); + action_by_translation_z_and_det(&inverses[1], &inverses[5], &K1_4->P2, &K1_2.P2); + action_by_translation_z_and_det(&inverses[2], &inverses[6], &K2_4->P1, &K2_2.P1); + action_by_translation_z_and_det(&inverses[3], &inverses[7], &K2_4->P2, &K2_2.P2); + + fp2_batched_inv(inverses, 8); + if (fp2_is_zero(&inverses[0])) + return 0; // something was wrong with our input (which somehow was not caught by + // verify_two_torsion) + + action_by_translation_compute_matrix(&Gi[0], &K1_4->P1, &K1_2.P1, &inverses[0], &inverses[4]); + action_by_translation_compute_matrix(&Gi[1], &K1_4->P2, &K1_2.P2, &inverses[1], &inverses[5]); + action_by_translation_compute_matrix(&Gi[2], &K2_4->P1, &K2_2.P1, &inverses[2], &inverses[6]); + action_by_translation_compute_matrix(&Gi[3], &K2_4->P2, &K2_2.P2, &inverses[3], &inverses[7]); + + return 1; +} + +// Given the appropriate four torsion, computes the +// change of basis to compute the correct theta null +// point. +// Returns 0 if the order of K1_4 or K2_4 is not 4 +static int +gluing_change_of_basis(basis_change_matrix_t *M, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute the four 2x2 matrices for the action by translation + // on the four points: + translation_matrix_t Gi[4]; + if (!action_by_translation(Gi, K1_4, K2_4, E12)) + return 0; + + // Computation of the 4x4 matrix from Mij + // t001, t101 (resp t002, t102) first column of M11 * M21 (resp M12 * M22) + fp2_t t001, t101, t002, t102, tmp; + + fp2_mul(&t001, &Gi[0].g00, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g01, &Gi[2].g10); + fp2_add(&t001, &t001, &tmp); + + fp2_mul(&t101, &Gi[0].g10, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g11, &Gi[2].g10); + fp2_add(&t101, &t101, &tmp); + + fp2_mul(&t002, &Gi[1].g00, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g01, &Gi[3].g10); + fp2_add(&t002, &t002, &tmp); + + fp2_mul(&t102, &Gi[1].g10, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g11, &Gi[3].g10); + fp2_add(&t102, &t102, &tmp); + + // trace for the first row + fp2_set_one(&M->m[0][0]); + fp2_mul(&tmp, &t001, &t002); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + + fp2_mul(&M->m[0][1], &t001, &t102); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + + fp2_mul(&M->m[0][2], &t101, &t002); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + + fp2_mul(&M->m[0][3], &t101, &t102); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + + // Compute the action of (0,out.K2_4.P2) for the second row + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][1]); + fp2_mul(&M->m[1][0], &Gi[3].g00, &M->m[0][0]); + fp2_add(&M->m[1][0], &M->m[1][0], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][1]); + fp2_mul(&M->m[1][1], &Gi[3].g10, &M->m[0][0]); + fp2_add(&M->m[1][1], &M->m[1][1], &tmp); + + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][3]); + fp2_mul(&M->m[1][2], &Gi[3].g00, &M->m[0][2]); + fp2_add(&M->m[1][2], &M->m[1][2], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][3]); + fp2_mul(&M->m[1][3], &Gi[3].g10, &M->m[0][2]); + fp2_add(&M->m[1][3], &M->m[1][3], &tmp); + + // compute the action of (K1_4.P1,0) for the third row + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][2]); + fp2_mul(&M->m[2][0], &Gi[0].g00, &M->m[0][0]); + fp2_add(&M->m[2][0], &M->m[2][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][3]); + fp2_mul(&M->m[2][1], &Gi[0].g00, &M->m[0][1]); + fp2_add(&M->m[2][1], &M->m[2][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][2]); + fp2_mul(&M->m[2][2], &Gi[0].g10, &M->m[0][0]); + fp2_add(&M->m[2][2], &M->m[2][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][3]); + fp2_mul(&M->m[2][3], &Gi[0].g10, &M->m[0][1]); + fp2_add(&M->m[2][3], &M->m[2][3], &tmp); + + // compute the action of (K1_4.P1,K2_4.P2) for the final row + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][2]); + fp2_mul(&M->m[3][0], &Gi[0].g00, &M->m[1][0]); + fp2_add(&M->m[3][0], &M->m[3][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][3]); + fp2_mul(&M->m[3][1], &Gi[0].g00, &M->m[1][1]); + fp2_add(&M->m[3][1], &M->m[3][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][2]); + fp2_mul(&M->m[3][2], &Gi[0].g10, &M->m[1][0]); + fp2_add(&M->m[3][2], &M->m[3][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][3]); + fp2_mul(&M->m[3][3], &Gi[0].g10, &M->m[1][1]); + fp2_add(&M->m[3][3], &M->m[3][3], &tmp); + + return 1; +} + +/** + * @brief Compute the gluing isogeny from an elliptic product + * + * @param out Output: the theta_gluing + * @param K1_8 a couple point + * @param E12 an elliptic curve product + * @param K2_8 a point in E2[8] + * + * out : E1xE2 -> A of kernel [4](K1_8,K2_8) + * if the kernel supplied has the incorrect order, or gluing seems malformed, + * returns 0, otherwise returns 1. + */ +static int +gluing_compute(theta_gluing_t *out, + const theta_couple_curve_t *E12, + const theta_couple_jac_point_t *xyK1_8, + const theta_couple_jac_point_t *xyK2_8, + bool verify) +{ + // Ensure that we have been given the eight torsion +#ifndef NDEBUG + { + int check = test_jac_order_twof(&xyK1_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK1_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK1_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P2 does not have order 8"); + } +#endif + + out->xyK1_8 = *xyK1_8; + out->domain = *E12; + + // Given points in E[8] x E[8] we need the four torsion below + theta_couple_jac_point_t xyK1_4, xyK2_4; + + double_couple_jac_point(&xyK1_4, xyK1_8, E12); + double_couple_jac_point(&xyK2_4, xyK2_8, E12); + + // Convert from (X:Y:Z) coordinates to (X:Z) + theta_couple_point_t K1_8, K2_8; + theta_couple_point_t K1_4, K2_4; + + couple_jac_to_xz(&K1_8, xyK1_8); + couple_jac_to_xz(&K2_8, xyK2_8); + couple_jac_to_xz(&K1_4, &xyK1_4); + couple_jac_to_xz(&K2_4, &xyK2_4); + + // Set the basis change matrix, if we have not been given a valid K[8] for this computation + // gluing_change_of_basis will detect this and return 0 + if (!gluing_change_of_basis(&out->M, &K1_4, &K2_4, E12)) { + debug_print("gluing failed as kernel does not have correct order"); + return 0; + } + + // apply the base change to the kernel + theta_point_t TT1, TT2; + + base_change(&TT1, out, &K1_8); + base_change(&TT2, out, &K2_8); + + // compute the codomain + to_squared_theta(&TT1, &TT1); + to_squared_theta(&TT2, &TT2); + + // If the kernel is well formed then TT1.t and TT2.t are zero + // if they are not, we exit early as the signature we are validating + // is probably malformed + if (!(fp2_is_zero(&TT1.t) & fp2_is_zero(&TT2.t))) { + debug_print("gluing failed TT1.t or TT2.t is not zero"); + return 0; + } + // Test our projective factors are non zero + if (fp2_is_zero(&TT1.x) | fp2_is_zero(&TT2.x) | fp2_is_zero(&TT1.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT1.z)) + return 0; // invalid input + + // Projective factor: Ax + fp2_mul(&out->codomain.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.y, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.z, &TT1.x, &TT2.z); + fp2_set_zero(&out->codomain.t); + // Projective factor: ABCxz + fp2_mul(&out->precomputation.x, &TT1.y, &TT2.z); + fp2_copy(&out->precomputation.y, &out->codomain.z); + fp2_copy(&out->precomputation.z, &out->codomain.y); + fp2_set_zero(&out->precomputation.t); + + // Compute the two components of phi(K1_8) = (x:x:y:y). + fp2_mul(&out->imageK1_8.x, &TT1.x, &out->precomputation.x); + fp2_mul(&out->imageK1_8.y, &TT1.z, &out->precomputation.z); + + // If K1_8 and K2_8 are our 8-torsion points, this ensures that the + // 4-torsion points [2]K1_8 and [2]K2_8 are isotropic. + if (verify) { + fp2_t t1, t2; + fp2_mul(&t1, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&out->imageK1_8.x, &t1)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t2, &t1)) + return 0; + } + + // compute the final codomain + hadamard(&out->codomain, &out->codomain); + return 1; +} + +// sub routine of the gluing eval +static void +gluing_eval_point(theta_point_t *image, const theta_couple_jac_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T1, T2; + add_components_t add_comp1, add_comp2; + + // Compute the cross addition components of P1+Q1 and P2+Q2 + jac_to_xz_add_components(&add_comp1, &P->P1, &phi->xyK1_8.P1, &phi->domain.E1); + jac_to_xz_add_components(&add_comp2, &P->P2, &phi->xyK1_8.P2, &phi->domain.E2); + + // Compute T1 and T2 derived from the cross addition components. + fp2_mul(&T1.x, &add_comp1.u, &add_comp2.u); // T1x = u1u2 + fp2_mul(&T2.t, &add_comp1.v, &add_comp2.v); // T2t = v1v2 + fp2_add(&T1.x, &T1.x, &T2.t); // T1x = u1u2 + v1v2 + fp2_mul(&T1.y, &add_comp1.u, &add_comp2.w); // T1y = u1w2 + fp2_mul(&T1.z, &add_comp1.w, &add_comp2.u); // T1z = w1u2 + fp2_mul(&T1.t, &add_comp1.w, &add_comp2.w); // T1t = w1w2 + fp2_add(&T2.x, &add_comp1.u, &add_comp1.v); // T2x = (u1+v1) + fp2_add(&T2.y, &add_comp2.u, &add_comp2.v); // T2y = (u2+v2) + fp2_mul(&T2.x, &T2.x, &T2.y); // T2x = (u1+v1)(u2+v2) + fp2_sub(&T2.x, &T2.x, &T1.x); // T1x = v1u2 + u1v2 + fp2_mul(&T2.y, &add_comp1.v, &add_comp2.w); // T2y = v1w2 + fp2_mul(&T2.z, &add_comp1.w, &add_comp2.v); // T2z = w1v2 + fp2_set_zero(&T2.t); // T2t = 0 + + // Apply the basis change and compute their respective square + // theta(P+Q) = M.T1 - M.T2 and theta(P-Q) = M.T1 + M.T2 + apply_isomorphism_general(&T1, &phi->M, &T1, true); + apply_isomorphism_general(&T2, &phi->M, &T2, false); + pointwise_square(&T1, &T1); + pointwise_square(&T2, &T2); + + // the difference between the two is therefore theta(P+Q)theta(P-Q) + // whose hadamard transform is then the product of the dual + // theta_points of phi(P) and phi(Q). + fp2_sub(&T1.x, &T1.x, &T2.x); + fp2_sub(&T1.y, &T1.y, &T2.y); + fp2_sub(&T1.z, &T1.z, &T2.z); + fp2_sub(&T1.t, &T1.t, &T2.t); + hadamard(&T1, &T1); + + // Compute (x, y, z, t) + // As imageK1_8 = (x:x:y:y), its inverse is (y:y:x:x). + fp2_mul(&image->x, &T1.x, &phi->imageK1_8.y); + fp2_mul(&image->y, &T1.y, &phi->imageK1_8.y); + fp2_mul(&image->z, &T1.z, &phi->imageK1_8.x); + fp2_mul(&image->t, &T1.t, &phi->imageK1_8.x); + + hadamard(image, image); +} + +// Same as gluing_eval_point but in the very special case where we already know that the point will +// have a zero coordinate at the place where the zero coordinate of the dual_theta_nullpoint would +// have made the computation difficult +static int +gluing_eval_point_special_case(theta_point_t *image, const theta_couple_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T; + + // Apply the basis change + base_change(&T, phi, P); + + // Apply the to_squared_theta transform + to_squared_theta(&T, &T); + + // This coordinate should always be 0 in a gluing because D=0. + // If this is not the case, something went very wrong, so reject + if (!fp2_is_zero(&T.t)) + return 0; + + // Compute (x, y, z, t) + fp2_mul(&image->x, &T.x, &phi->precomputation.x); + fp2_mul(&image->y, &T.y, &phi->precomputation.y); + fp2_mul(&image->z, &T.z, &phi->precomputation.z); + fp2_set_zero(&image->t); + + hadamard(image, image); + return 1; +} + +/** + * @brief Evaluate a gluing isogeny from an elliptic product on a basis + * + * @param image1 Output: the theta_point of the image of the first couple of points + * @param image2 Output : the theta point of the image of the second couple of points + * @param xyT1: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param xyT2: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param phi : a gluing isogeny E1 x E2 -> A + * + **/ +static void +gluing_eval_basis(theta_point_t *image1, + theta_point_t *image2, + const theta_couple_jac_point_t *xyT1, + const theta_couple_jac_point_t *xyT2, + const theta_gluing_t *phi) +{ + gluing_eval_point(image1, xyT1, phi); + gluing_eval_point(image2, xyT2, phi); +} + +/** + * @brief Compute a (2,2) isogeny in dimension 2 in the theta_model + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_8 a point in A[8] + * @param T2_8 a point in A[8] + * @param hadamard_bool_1 a boolean used for the last two steps of the chain + * @param hadamard_bool_2 a boolean used for the last two steps of the chain + * + * out : A -> B of kernel [4](T1_8,T2_8) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * verify: add extra sanity check to ensure our 8-torsion points are coherent with the isogeny + * + */ +static int +theta_isogeny_compute(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_8, + const theta_point_t *T2_8, + bool hadamard_bool_1, + bool hadamard_bool_2, + bool verify) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_8; + out->T2_8 = *T2_8; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_8); + to_squared_theta(&TT1, &TT1); + hadamard(&TT2, T2_8); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_8); + to_squared_theta(&TT2, T2_8); + } + + fp2_t t1, t2; + + // Test that our projective factor ABCDxzw is non zero, where + // TT1=(Ax, Bx, Cy, Dy), TT2=(Az, Bw, Cz, Dw) + // But ABCDxzw=0 can only happen if we had an unexpected splitting in + // the isogeny chain. + // In either case reject + // (this is not strictly necessary, we could just return (0:0:0:0)) + if (fp2_is_zero(&TT2.x) | fp2_is_zero(&TT2.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT2.t) | fp2_is_zero(&TT1.x) | + fp2_is_zero(&TT1.y)) + return 0; + + fp2_mul(&t1, &TT1.x, &TT2.y); + fp2_mul(&t2, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.null_point.x, &TT2.x, &t1); + fp2_mul(&out->codomain.null_point.y, &TT2.y, &t2); + fp2_mul(&out->codomain.null_point.z, &TT2.z, &t1); + fp2_mul(&out->codomain.null_point.t, &TT2.t, &t2); + fp2_t t3; + fp2_mul(&t3, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.x, &t3, &TT1.y); + fp2_mul(&out->precomputation.y, &t3, &TT1.x); + fp2_copy(&out->precomputation.z, &out->codomain.null_point.t); + fp2_copy(&out->precomputation.t, &out->codomain.null_point.z); + + // If T1_8 and T2_8 are our 8-torsion points, this ensures that the + // 4-torsion points 2T1_8 and 2T2_8 are isotropic. + if (verify) { + fp2_mul(&t1, &TT1.x, &out->precomputation.x); + fp2_mul(&t2, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT1.z, &out->precomputation.z); + fp2_mul(&t2, &TT1.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.y, &out->precomputation.y); + fp2_mul(&t2, &TT2.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + } + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } + return 1; +} + +/** + * @brief Compute a (2,2) isogeny when only the 4 torsion above the kernel is known and not the 8 + * torsion + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_4 a point in A[4] + * @param T2_4 a point in A[4] + * @param hadamard_bool_1 a boolean + * @param hadamard_bool_2 a boolean + * + * out : A -> B of kernel [2](T1_4,T2_4) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_4(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_4, + const theta_point_t *T2_4, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_4; + out->T2_8 = *T2_4; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + // we will compute: + // TT1 = (xAB, _ , xCD, _) + // TT2 = (AA,BB,CC,DD) + + // fp2_t xA_inv,zA_inv,tB_inv; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_4); + to_squared_theta(&TT1, &TT1); + + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_4); + to_squared_theta(&TT2, &A->null_point); + } + + fp2_t sqaabb, sqaacc; + fp2_mul(&sqaabb, &TT2.x, &TT2.y); + fp2_mul(&sqaacc, &TT2.x, &TT2.z); + // No need to check the square roots, only used for signing. + // sqaabb = sqrt(AA*BB) + fp2_sqrt(&sqaabb); + // sqaacc = sqrt(AA*CC) + fp2_sqrt(&sqaacc); + + // we compute out->codomain.null_point = (xAB * sqaacc * AA, xAB *sqaabb *sqaacc, xCD*sqaabb * + // AA) out->precomputation = (xAB * BB * CC *DD , sqaabb * CC * DD * xAB , sqaacc * BB* DD * xAB + // , xCD * sqaabb *sqaacc * BB) + + fp2_mul(&out->codomain.null_point.y, &sqaabb, &sqaacc); + fp2_mul(&out->precomputation.t, &out->codomain.null_point.y, &TT1.z); + fp2_mul(&out->codomain.null_point.y, &out->codomain.null_point.y, + &TT1.x); // done for out->codomain.null_point.y + + fp2_mul(&out->codomain.null_point.t, &TT1.z, &sqaabb); + fp2_mul(&out->codomain.null_point.t, &out->codomain.null_point.t, + &TT2.x); // done for out->codomain.null_point.t + + fp2_mul(&out->codomain.null_point.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.null_point.z, &out->codomain.null_point.x, + &TT2.z); // done for out->codomain.null_point.z + fp2_mul(&out->codomain.null_point.x, &out->codomain.null_point.x, + &sqaacc); // done for out->codomain.null_point.x + + fp2_mul(&out->precomputation.x, &TT1.x, &TT2.t); + fp2_mul(&out->precomputation.z, &out->precomputation.x, &TT2.y); + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.z); + fp2_mul(&out->precomputation.y, &out->precomputation.x, &sqaabb); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &out->precomputation.z, &sqaacc); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +/** + * @brief Compute a (2,2) isogeny when only the kernel is known and not the 8 or 4 torsion above + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_2 a point in A[2] + * @param T2_2 a point in A[2] + * @param hadamard_bool_1 a boolean + * @param boo2 a boolean + * + * out : A -> B of kernel (T1_2,T2_2) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_2(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_2, + const theta_point_t *T2_2, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_2; + out->T2_8 = *T2_2; + out->codomain.precomputation = false; + + theta_point_t TT2; + // we will compute: + // TT2 = (AA,BB,CC,DD) + + if (hadamard_bool_1) { + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT2, &A->null_point); + } + + // we compute out->codomain.null_point = (AA,sqaabb, sqaacc, sqaadd) + // out->precomputation = ( BB * CC *DD , sqaabb * CC * DD , sqaacc * BB* DD , sqaadd * BB * CC) + fp2_copy(&out->codomain.null_point.x, &TT2.x); + fp2_mul(&out->codomain.null_point.y, &TT2.x, &TT2.y); + fp2_mul(&out->codomain.null_point.z, &TT2.x, &TT2.z); + fp2_mul(&out->codomain.null_point.t, &TT2.x, &TT2.t); + // No need to check the square roots, only used for signing. + fp2_sqrt(&out->codomain.null_point.y); + fp2_sqrt(&out->codomain.null_point.z); + fp2_sqrt(&out->codomain.null_point.t); + + fp2_mul(&out->precomputation.x, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.y, + &out->precomputation.x, + &out->codomain.null_point.y); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &TT2.t, &out->codomain.null_point.z); + fp2_mul(&out->precomputation.z, &out->precomputation.z, &TT2.y); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &TT2.z, &out->codomain.null_point.t); + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +static void +theta_isogeny_eval(theta_point_t *out, const theta_isogeny_t *phi, const theta_point_t *P) +{ + if (phi->hadamard_bool_1) { + hadamard(out, P); + to_squared_theta(out, out); + } else { + to_squared_theta(out, P); + } + fp2_mul(&out->x, &out->x, &phi->precomputation.x); + fp2_mul(&out->y, &out->y, &phi->precomputation.y); + fp2_mul(&out->z, &out->z, &phi->precomputation.z); + fp2_mul(&out->t, &out->t, &phi->precomputation.t); + + if (phi->hadamard_bool_2) { + hadamard(out, out); + } +} + +#if defined(ENABLE_SIGN) +// Sample a random secret index in [0, 5] to select one of the 6 normalisation +// matrices for the normalisation of the output of the (2,2)-chain during +// splitting +static unsigned char +sample_random_index(void) +{ + // To avoid bias in reduction we should only consider integers smaller + // than 2^32 which are a multiple of 6, so we only reduce bytes with a + // value in [0, 4294967292-1]. + // We have 4294967292/2^32 = ~99.9999999% chance that the first try is "good". + unsigned char seed_arr[4]; + uint32_t seed; + + do { + randombytes(seed_arr, 4); + seed = (seed_arr[0] | (seed_arr[1] << 8) | (seed_arr[2] << 16) | (seed_arr[3] << 24)); + } while (seed >= 4294967292U); + + uint32_t secret_index = seed - (((uint64_t)seed * 2863311531U) >> 34) * 6; + assert(secret_index == seed % 6); // ensure the constant time trick above works + return (unsigned char)secret_index; +} +#endif + +static bool +splitting_compute(theta_splitting_t *out, const theta_structure_t *A, int zero_index, bool randomize) + +{ + // init + uint32_t ctl; + uint32_t count = 0; + fp2_t U_cst, t1, t2; + + memset(&out->M, 0, sizeof(basis_change_matrix_t)); + + // enumerate through all indices + for (int i = 0; i < 10; i++) { + fp2_set_zero(&U_cst); + for (int t = 0; t < 4; t++) { + // Iterate through the null point + choose_index_theta_point(&t2, t, &A->null_point); + choose_index_theta_point(&t1, t ^ EVEN_INDEX[i][1], &A->null_point); + + // Compute t1 * t2 + fp2_mul(&t1, &t1, &t2); + // If CHI_EVAL(i,t) is +1 we want ctl to be 0 and + // If CHI_EVAL(i,t) is -1 we want ctl to be 0xFF..FF + ctl = (uint32_t)(CHI_EVAL[EVEN_INDEX[i][0]][t] >> 1); + assert(ctl == 0 || ctl == 0xffffffff); + + fp2_neg(&t2, &t1); + fp2_select(&t1, &t1, &t2, ctl); + + // Then we compute U_cst ± (t1 * t2) + fp2_add(&U_cst, &U_cst, &t1); + } + + // If U_cst is 0 then update the splitting matrix + ctl = fp2_is_zero(&U_cst); + count -= ctl; + select_base_change_matrix(&out->M, &out->M, &SPLITTING_TRANSFORMS[i], ctl); + if (zero_index != -1 && i == zero_index && + !ctl) { // extra checks if we know exactly where the 0 index should be + return 0; + } + } + +#if defined(ENABLE_SIGN) + // Pick a random normalization matrix + if (randomize) { + unsigned char secret_index = sample_random_index(); + basis_change_matrix_t Mrandom; + + set_base_change_matrix_from_precomp(&Mrandom, &NORMALIZATION_TRANSFORMS[0]); + + // Use a constant time selection to pick the index we want + for (unsigned char i = 1; i < 6; i++) { + // When i == secret_index, mask == 0 and 0xFF..FF otherwise + int32_t mask = i - secret_index; + mask = (mask | -mask) >> 31; + select_base_change_matrix(&Mrandom, &Mrandom, &NORMALIZATION_TRANSFORMS[i], ~mask); + } + base_change_matrix_multiplication(&out->M, &Mrandom, &out->M); + } +#else + assert(!randomize); +#endif + + // apply the isomorphism to ensure the null point is compatible with splitting + apply_isomorphism(&out->B.null_point, &out->M, &A->null_point); + + // splitting was successful only if exactly one zero was identified + return count == 1; +} + +static int +theta_product_structure_to_elliptic_product(theta_couple_curve_t *E12, theta_structure_t *A) +{ + fp2_t xx, yy; + + // This should be true from our computations in splitting_compute + // but still check this for sanity + if (!is_product_theta_point(&A->null_point)) + return 0; + + ec_curve_init(&(E12->E1)); + ec_curve_init(&(E12->E2)); + + // A valid elliptic theta null point has no zero coordinate + if (fp2_is_zero(&A->null_point.x) | fp2_is_zero(&A->null_point.y) | fp2_is_zero(&A->null_point.z)) + return 0; + + // xx = x², yy = y² + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.y); + // xx = x^4, yy = y^4 + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A2 = -2(x^4+y^4)/(x^4-y^4) + fp2_add(&E12->E2.A, &xx, &yy); + fp2_sub(&E12->E2.C, &xx, &yy); + fp2_add(&E12->E2.A, &E12->E2.A, &E12->E2.A); + fp2_neg(&E12->E2.A, &E12->E2.A); + + // same with x,z + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.z); + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A1 = -2(x^4+z^4)/(x^4-z^4) + fp2_add(&E12->E1.A, &xx, &yy); + fp2_sub(&E12->E1.C, &xx, &yy); + fp2_add(&E12->E1.A, &E12->E1.A, &E12->E1.A); + fp2_neg(&E12->E1.A, &E12->E1.A); + + if (fp2_is_zero(&E12->E1.C) | fp2_is_zero(&E12->E2.C)) + return 0; + + return 1; +} + +static int +theta_point_to_montgomery_point(theta_couple_point_t *P12, const theta_point_t *P, const theta_structure_t *A) +{ + fp2_t temp; + const fp2_t *x, *z; + + if (!is_product_theta_point(P)) + return 0; + + x = &P->x; + z = &P->y; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->z; + z = &P->t; + } + if (fp2_is_zero(x) & fp2_is_zero(z)) { + return 0; // at this point P=(0:0:0:0) so is invalid + } + // P2.X = A.null_point.y * P.x + A.null_point.x * P.y + // P2.Z = - A.null_point.y * P.x + A.null_point.x * P.y + fp2_mul(&P12->P2.x, &A->null_point.y, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P2.z, &temp, &P12->P2.x); + fp2_add(&P12->P2.x, &P12->P2.x, &temp); + + x = &P->x; + z = &P->z; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->y; + z = &P->t; + } + // P1.X = A.null_point.z * P.x + A.null_point.x * P.z + // P1.Z = -A.null_point.z * P.x + A.null_point.x * P.z + fp2_mul(&P12->P1.x, &A->null_point.z, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P1.z, &temp, &P12->P1.x); + fp2_add(&P12->P1.x, &P12->P1.x, &temp); + return 1; +} + +static int +_theta_chain_compute_impl(unsigned n, + theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + bool verify, + bool randomize) +{ + theta_structure_t theta; + + // lift the basis + theta_couple_jac_point_t xyT1, xyT2; + + ec_basis_t bas1 = { .P = ker->T1.P1, .Q = ker->T2.P1, .PmQ = ker->T1m2.P1 }; + ec_basis_t bas2 = { .P = ker->T1.P2, .Q = ker->T2.P2, .PmQ = ker->T1m2.P2 }; + if (!lift_basis(&xyT1.P1, &xyT2.P1, &bas1, &E12->E1)) + return 0; + if (!lift_basis(&xyT1.P2, &xyT2.P2, &bas2, &E12->E2)) + return 0; + + const unsigned extra = HD_extra_torsion * extra_torsion; + +#ifndef NDEBUG + assert(extra == 0 || extra == 2); // only cases implemented + if (!test_point_order_twof(&bas2.P, &E12->E2, n + extra)) + debug_print("bas2.P does not have correct order"); + + if (!test_jac_order_twof(&xyT2.P2, &E12->E2, n + extra)) + debug_print("xyT2.P2 does not have correct order"); +#endif + + theta_point_t pts[numP ? numP : 1]; + + int space = 1; + for (unsigned i = 1; i < n; i *= 2) + ++space; + + uint16_t todo[space]; + todo[0] = n - 2 + extra; + + int current = 0; + + // kernel points for the gluing isogeny + theta_couple_jac_point_t jacQ1[space], jacQ2[space]; + jacQ1[0] = xyT1; + jacQ2[0] = xyT2; + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + // the gluing isogeny is quite a bit more expensive than the others, + // so we adjust the usual splitting rule here a little bit: towards + // the end of the doubling chain it will be cheaper to recompute the + // doublings after evaluation than to push the intermediate points. + const unsigned num_dbls = todo[current - 1] >= 16 ? todo[current - 1] / 2 : todo[current - 1] - 1; + assert(num_dbls && num_dbls < todo[current - 1]); + double_couple_jac_point_iter(&jacQ1[current], num_dbls, &jacQ1[current - 1], E12); + double_couple_jac_point_iter(&jacQ2[current], num_dbls, &jacQ2[current - 1], E12); + todo[current] = todo[current - 1] - num_dbls; + } + + // kernel points for the remaining isogeny steps + theta_point_t thetaQ1[space], thetaQ2[space]; + + // the gluing step + theta_gluing_t first_step; + { + assert(todo[current] == 1); + + // compute the gluing isogeny + if (!gluing_compute(&first_step, E12, &jacQ1[current], &jacQ2[current], verify)) + return 0; + + // evaluate + for (unsigned j = 0; j < numP; ++j) { + assert(ec_is_zero(&P12[j].P1) || ec_is_zero(&P12[j].P2)); + if (!gluing_eval_point_special_case(&pts[j], &P12[j], &first_step)) + return 0; + } + + // push kernel points through gluing isogeny + for (int j = 0; j < current; ++j) { + gluing_eval_basis(&thetaQ1[j], &thetaQ2[j], &jacQ1[j], &jacQ2[j], &first_step); + --todo[j]; + } + + --current; + } + + // set-up the theta_structure for the first codomain + theta.null_point = first_step.codomain; + theta.precomputation = 0; + theta_precomputation(&theta); + + theta_isogeny_t step; + + // and now we do the remaining steps + for (unsigned i = 1; current >= 0 && todo[current]; ++i) { + assert(current < space); + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + const unsigned num_dbls = todo[current - 1] / 2; + assert(num_dbls && num_dbls < todo[current - 1]); + double_iter(&thetaQ1[current], &theta, &thetaQ1[current - 1], num_dbls); + double_iter(&thetaQ2[current], &theta, &thetaQ2[current - 1], num_dbls); + todo[current] = todo[current - 1] - num_dbls; + } + + // computing the next step + int ret; + if (i == n - 2) // penultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 0, verify); + else if (i == n - 1) // ultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 1, 0, false); + else + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 1, verify); + if (!ret) + return 0; + + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + + // updating the codomain + theta = step.codomain; + + // pushing the kernel + assert(todo[current] == 1); + for (int j = 0; j < current; ++j) { + theta_isogeny_eval(&thetaQ1[j], &step, &thetaQ1[j]); + theta_isogeny_eval(&thetaQ2[j], &step, &thetaQ2[j]); + assert(todo[j]); + --todo[j]; + } + + --current; + } + + assert(current == -1); + + if (!extra_torsion) { + if (n >= 3) { + // in the last step we've skipped pushing the kernel since current was == 0, let's do it now + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + } + + // penultimate step + theta_isogeny_compute_4(&step, &theta, &thetaQ1[0], &thetaQ2[0], 0, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + + // ultimate step + theta_isogeny_compute_2(&step, &theta, &thetaQ1[0], &thetaQ2[0], 1, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + } + + // final splitting step + theta_splitting_t last_step; + + bool is_split = splitting_compute(&last_step, &theta, extra_torsion ? 8 : -1, randomize); + + if (!is_split) { + debug_print("kernel did not generate an isogeny between elliptic products"); + return 0; + } + + if (!theta_product_structure_to_elliptic_product(E34, &last_step.B)) + return 0; + + // evaluate + for (size_t j = 0; j < numP; ++j) { + apply_isomorphism(&pts[j], &last_step.M, &pts[j]); + if (!theta_point_to_montgomery_point(&P12[j], &pts[j], &last_step.B)) + return 0; + } + + return 1; +} + +int +theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, false); +} + +// Like theta_chain_compute_and_eval, adding extra verification checks; +// used in the signature verification +int +theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, true, false); +} + +int +theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.h new file mode 100644 index 0000000000..d151811fe7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.h @@ -0,0 +1,18 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta isogeny header + */ + +#ifndef THETA_ISOGENY_H +#define THETA_ISOGENY_H + +#include +#include +#include +#include "theta_structure.h" +#include +#include + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_structure.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_structure.c new file mode 100644 index 0000000000..ce97ac61a8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_structure.c @@ -0,0 +1,78 @@ +#include "theta_structure.h" +#include + +void +theta_precomputation(theta_structure_t *A) +{ + + if (A->precomputation) { + return; + } + + theta_point_t A_dual; + to_squared_theta(&A_dual, &A->null_point); + + fp2_t t1, t2; + fp2_mul(&t1, &A_dual.x, &A_dual.y); + fp2_mul(&t2, &A_dual.z, &A_dual.t); + fp2_mul(&A->XYZ0, &t1, &A_dual.z); + fp2_mul(&A->XYT0, &t1, &A_dual.t); + fp2_mul(&A->YZT0, &t2, &A_dual.y); + fp2_mul(&A->XZT0, &t2, &A_dual.x); + + fp2_mul(&t1, &A->null_point.x, &A->null_point.y); + fp2_mul(&t2, &A->null_point.z, &A->null_point.t); + fp2_mul(&A->xyz0, &t1, &A->null_point.z); + fp2_mul(&A->xyt0, &t1, &A->null_point.t); + fp2_mul(&A->yzt0, &t2, &A->null_point.y); + fp2_mul(&A->xzt0, &t2, &A->null_point.x); + + A->precomputation = true; +} + +void +double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in) +{ + to_squared_theta(out, in); + fp2_sqr(&out->x, &out->x); + fp2_sqr(&out->y, &out->y); + fp2_sqr(&out->z, &out->z); + fp2_sqr(&out->t, &out->t); + + if (!A->precomputation) { + theta_precomputation(A); + } + fp2_mul(&out->x, &out->x, &A->YZT0); + fp2_mul(&out->y, &out->y, &A->XZT0); + fp2_mul(&out->z, &out->z, &A->XYT0); + fp2_mul(&out->t, &out->t, &A->XYZ0); + + hadamard(out, out); + + fp2_mul(&out->x, &out->x, &A->yzt0); + fp2_mul(&out->y, &out->y, &A->xzt0); + fp2_mul(&out->z, &out->z, &A->xyt0); + fp2_mul(&out->t, &out->t, &A->xyz0); +} + +void +double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp) +{ + if (exp == 0) { + *out = *in; + } else { + double_point(out, A, in); + for (int i = 1; i < exp; i++) { + double_point(out, A, out); + } + } +} + +uint32_t +is_product_theta_point(const theta_point_t *P) +{ + fp2_t t1, t2; + fp2_mul(&t1, &P->x, &P->t); + fp2_mul(&t2, &P->y, &P->z); + return fp2_is_equal(&t1, &t2); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_structure.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_structure.h new file mode 100644 index 0000000000..fc630b750a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_structure.h @@ -0,0 +1,135 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta structure header + */ + +#ifndef THETA_STRUCTURE_H +#define THETA_STRUCTURE_H + +#include +#include +#include + +/** @internal + * @ingroup hd_module + * @defgroup hd_theta Functions for theta structures + * @{ + */ + +/** + * @brief Perform the hadamard transform on a theta point + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x+y+z+t, x-y+z-t, x+y-z-t, x-y-z+t) + * + */ +static inline void +hadamard(theta_point_t *out, const theta_point_t *in) +{ + fp2_t t1, t2, t3, t4; + + // t1 = x + y + fp2_add(&t1, &in->x, &in->y); + // t2 = x - y + fp2_sub(&t2, &in->x, &in->y); + // t3 = z + t + fp2_add(&t3, &in->z, &in->t); + // t4 = z - t + fp2_sub(&t4, &in->z, &in->t); + + fp2_add(&out->x, &t1, &t3); + fp2_add(&out->y, &t2, &t4); + fp2_sub(&out->z, &t1, &t3); + fp2_sub(&out->t, &t2, &t4); +} + +/** + * @brief Square the coordinates of a theta point + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2, y^2, z^2, t^2) + * + */ +static inline void +pointwise_square(theta_point_t *out, const theta_point_t *in) +{ + fp2_sqr(&out->x, &in->x); + fp2_sqr(&out->y, &in->y); + fp2_sqr(&out->z, &in->z); + fp2_sqr(&out->t, &in->t); +} + +/** + * @brief Square the coordinates and then perform the hadamard transform + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2+y^2+z^2+t^2, x^2-y^2+z^2-t^2, x^2+y^2-z^2-t^2, x^2-y^2-z^2+t^2) + * + */ +static inline void +to_squared_theta(theta_point_t *out, const theta_point_t *in) +{ + pointwise_square(out, in); + hadamard(out, out); +} + +/** + * @brief Perform the theta structure precomputation + * + * @param A Output: the theta_structure + * + * if A.null_point = (x,y,z,t) + * if (xx,yy,zz,tt) = to_squared_theta(A.null_point) + * Computes y0,z0,t0,Y0,Z0,T0 = x/y,x/z,x/t,XX/YY,XX/ZZ,XX/TT + * + */ +void theta_precomputation(theta_structure_t *A); + +/** + * @brief Compute the double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * in = (x,y,z,t) + * out = [2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in); + +/** + * @brief Compute the iterated double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * @param exp the exponent + * in = (x,y,z,t) + * out = [2^2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp); + +/* + * @brief Check if a theta point is a product theta point + * + * @param P a theta point + * @return 0xFFFFFFFF if true, zero otherwise + */ +uint32_t is_product_theta_point(const theta_point_t *P); + +// end hd_theta +/** + * @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.c new file mode 100644 index 0000000000..242ea08fe2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.c @@ -0,0 +1,75 @@ +#include +#include + +static clock_t global_timer; + +clock_t +tic(void) +{ + global_timer = clock(); + return global_timer; +} + +float +tac(void) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); + return ms; +} + +float +TAC(const char *str) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); +#ifndef NDEBUG + printf("%s [%d ms]\n", str, (int)ms); +#endif + return ms; +} + +float +toc(const clock_t t) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + return ms; +} + +float +TOC(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,clock()-t); + // return (float) (clock()-t); +} + +float +TOC_clock(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, clock() - t); + return (float)(clock() - t); +} + +clock_t +dclock(const clock_t t) +{ + return (clock() - t); +} + +float +clock_to_time(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,t); + // return (float) (t); +} + +float +clock_print(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, t); + return (float)(t); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.h new file mode 100644 index 0000000000..5a6a505fc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.h @@ -0,0 +1,49 @@ + +#ifndef TOOLS_H +#define TOOLS_H + +#include + +// Debug printing: +// https://stackoverflow.com/questions/1644868/define-macro-for-debug-printing-in-c +#ifndef NDEBUG +#define DEBUG_PRINT 1 +#else +#define DEBUG_PRINT 0 +#endif + +#ifndef __FILE_NAME__ +#define __FILE_NAME__ "NA" +#endif + +#ifndef __LINE__ +#define __LINE__ 0 +#endif + +#ifndef __func__ +#define __func__ "NA" +#endif + +#define debug_print(fmt) \ + do { \ + if (DEBUG_PRINT) \ + printf("warning: %s, file %s, line %d, function %s().\n", \ + fmt, \ + __FILE_NAME__, \ + __LINE__, \ + __func__); \ + } while (0) + + +clock_t tic(void); +float tac(void); /* time in ms since last tic */ +float TAC(const char *str); /* same, but prints it with label 'str' */ +float toc(const clock_t t); /* time in ms since t */ +float TOC(const clock_t t, const char *str); /* same, but prints it with label 'str' */ +float TOC_clock(const clock_t t, const char *str); + +clock_t dclock(const clock_t t); // return the clock cycle diff between now and t +float clock_to_time(const clock_t t, + const char *str); // convert the number of clock cycles t to time +float clock_print(const clock_t t, const char *str); +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c new file mode 100644 index 0000000000..d7a42bcbe9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c @@ -0,0 +1,43 @@ +#include +#include +#include +const ibz_t TWO_TO_SECURITY_BITS = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x1}}} +#endif +; +const ibz_t TORSION_PLUS_2POWER = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x100000000000000}}} +#endif +; +const ibz_t SEC_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t COM_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.h new file mode 100644 index 0000000000..2756a2715f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.h @@ -0,0 +1,6 @@ +#include +#define TORSION_2POWER_BYTES 32 +extern const ibz_t TWO_TO_SECURITY_BITS; +extern const ibz_t TORSION_PLUS_2POWER; +extern const ibz_t SEC_DEGREE; +extern const ibz_t COM_DEGREE; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tutil.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tutil.h new file mode 100644 index 0000000000..59f162093e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tutil.h @@ -0,0 +1,36 @@ +#ifndef TUTIL_H +#define TUTIL_H + +#include +#include + +#if defined(__GNUC__) || defined(__clang__) +#define BSWAP16(i) __builtin_bswap16((i)) +#define BSWAP32(i) __builtin_bswap32((i)) +#define BSWAP64(i) __builtin_bswap64((i)) +#define UNUSED __attribute__((unused)) +#else +#define BSWAP16(i) ((((i) >> 8) & 0xff) | (((i) & 0xff00) << 8)) +#define BSWAP32(i) \ + ((((i) >> 24) & 0xff) | (((i) >> 8) & 0xff00) | (((i) & 0xff00) << 8) | ((i) << 24)) +#define BSWAP64(i) ((BSWAP32((i) >> 32) & 0xffffffff) | (BSWAP32(i) << 32) +#define UNUSED +#endif + +#if defined(RADIX_64) +#define digit_t uint64_t +#define sdigit_t int64_t +#define RADIX 64 +#define LOG2RADIX 6 +#define BSWAP_DIGIT(i) BSWAP64(i) +#elif defined(RADIX_32) +#define digit_t uint32_t +#define sdigit_t int32_t +#define RADIX 32 +#define LOG2RADIX 5 +#define BSWAP_DIGIT(i) BSWAP32(i) +#else +#error "Radix must be 32bit or 64 bit" +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S new file mode 100644 index 0000000000..2311fa9bc8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S @@ -0,0 +1,122 @@ +#*************************************************************************** +# This implementation is a modified version of the code, +# written by Nir Drucker and Shay Gueron +# AWS Cryptographic Algorithms Group +# (ndrucker@amazon.com, gueron@amazon.com) +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# The license is detailed in the file LICENSE.txt, and applies to this file. +#*************************************************************************** + +.intel_syntax noprefix +.data + +.p2align 4, 0x90 +MASK1: +.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d +CON1: +.long 1,1,1,1 + +.set k256_size, 32 + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",@progbits +#endif +.text + +################################################################################ +# void aes256_key_expansion(OUT aes256_ks_t* ks, IN const uint8_t* key); +# The output parameter must be 16 bytes aligned! +# +#Linux ABI +#define out rdi +#define in rsi + +#define CON xmm0 +#define MASK_REG xmm1 + +#define IN0 xmm2 +#define IN1 xmm3 + +#define TMP1 xmm4 +#define TMP2 xmm5 + +#define ZERO xmm15 + +.macro ROUND1 in0 in1 + add out, k256_size + vpshufb TMP2, \in1, MASK_REG + aesenclast TMP2, CON + vpslld CON, CON, 1 + vpslldq TMP1, \in0, 4 + vpxor \in0, \in0, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor \in0, \in0, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor \in0, \in0, TMP1 + vpxor \in0, \in0, TMP2 + vmovdqa [out], \in0 + +.endm + +.macro ROUND2 + vpshufd TMP2, IN0, 0xff + aesenclast TMP2, ZERO + vpslldq TMP1, IN1, 4 + vpxor IN1, IN1, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor IN1, IN1, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor IN1, IN1, TMP1 + vpxor IN1, IN1, TMP2 + vmovdqa [out+16], IN1 +.endm + +#ifdef __APPLE__ +#define AES256_KEY_EXPANSION _aes256_key_expansion +#else +#define AES256_KEY_EXPANSION aes256_key_expansion +#endif + +#ifndef __APPLE__ +.type AES256_KEY_EXPANSION,@function +.hidden AES256_KEY_EXPANSION +#endif +.globl AES256_KEY_EXPANSION +AES256_KEY_EXPANSION: + vmovdqu IN0, [in] + vmovdqu IN1, [in+16] + vmovdqa [out], IN0 + vmovdqa [out+16], IN1 + + vmovdqa CON, [rip+CON1] + vmovdqa MASK_REG, [rip+MASK1] + + vpxor ZERO, ZERO, ZERO + + mov ax, 6 +.loop256: + + ROUND1 IN0, IN1 + dec ax + ROUND2 + jne .loop256 + + ROUND1 IN0, IN1 + + ret +#ifndef __APPLE__ +.size AES256_KEY_EXPANSION, .-AES256_KEY_EXPANSION +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/verification.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/verification.h new file mode 100644 index 0000000000..af674691da --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/verification.h @@ -0,0 +1,123 @@ +/** @file + * + * @brief The verification protocol + */ + +#ifndef VERIFICATION_H +#define VERIFICATION_H + +#include +#include + +/** @defgroup verification SQIsignHD verification protocol + * @{ + */ + +/** @defgroup verification_t Types for SQIsignHD verification protocol + * @{ + */ + +typedef digit_t scalar_t[NWORDS_ORDER]; +typedef scalar_t scalar_mtx_2x2_t[2][2]; + +/** @brief Type for the signature + * + * @typedef signature_t + * + * @struct signature + * + */ +typedef struct signature +{ + fp2_t E_aux_A; // the Montgomery A-coefficient for the auxiliary curve + uint8_t backtracking; + uint8_t two_resp_length; + scalar_mtx_2x2_t mat_Bchall_can_to_B_chall; // the matrix of the desired basis + scalar_t chall_coeff; + uint8_t hint_aux; + uint8_t hint_chall; +} signature_t; + +/** @brief Type for the public keys + * + * @typedef public_key_t + * + * @struct public_key + * + */ +typedef struct public_key +{ + ec_curve_t curve; // the normalized A-coefficient of the Montgomery curve + uint8_t hint_pk; +} public_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void public_key_init(public_key_t *pk); +void public_key_finalize(public_key_t *pk); + +void hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length); + +/** + * @brief Verification + * + * @param sig signature + * @param pk public key + * @param m message + * @param l size + * @returns 1 if the signature verifies, 0 otherwise + */ +int protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a signature as a byte array + * + * @param enc : Byte array to encode the signature in + * @param sig : Signature to encode + */ +void signature_to_bytes(unsigned char *enc, const signature_t *sig); + +/** + * @brief Decodes a signature from a byte array + * + * @param sig : Structure to decode the signature in + * @param enc : Byte array to decode + */ +void signature_from_bytes(signature_t *sig, const unsigned char *enc); + +/** + * @brief Encodes a public key as a byte array + * + * @param enc : Byte array to encode the public key in + * @param pk : Public key to encode + */ +unsigned char *public_key_to_bytes(unsigned char *enc, const public_key_t *pk); + +/** + * @brief Decodes a public key from a byte array + * + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +const unsigned char *public_key_from_bytes(public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/verify.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/verify.c new file mode 100644 index 0000000000..b5f78ad398 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/verify.c @@ -0,0 +1,309 @@ +#include +#include +#include +#include +#include + +// Check that the basis change matrix elements are canonical +// representatives modulo 2^(SQIsign_response_length + 2). +static int +check_canonical_basis_change_matrix(const signature_t *sig) +{ + // This works as long as all values in sig->mat_Bchall_can_to_B_chall are + // positive integers. + int ret = 1; + scalar_t aux; + + memset(aux, 0, NWORDS_ORDER * sizeof(digit_t)); + aux[0] = 0x1; + multiple_mp_shiftl(aux, SQIsign_response_length + HD_extra_torsion - (int)sig->backtracking, NWORDS_ORDER); + + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + if (mp_compare(aux, sig->mat_Bchall_can_to_B_chall[i][j], NWORDS_ORDER) <= 0) { + ret = 0; + } + } + } + + return ret; +} + +// Compute the 2^n isogeny from the signature with kernel +// P + [chall_coeff]Q and store the codomain in E_chall +static int +compute_challenge_verify(ec_curve_t *E_chall, const signature_t *sig, const ec_curve_t *Epk, const uint8_t hint_pk) +{ + ec_basis_t bas_EA; + ec_isog_even_t phi_chall; + + // Set domain and length of 2^n isogeny + copy_curve(&phi_chall.curve, Epk); + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + + // Compute the basis from the supplied hint + if (!ec_curve_to_basis_2f_from_hint(&bas_EA, &phi_chall.curve, TORSION_EVEN_POWER, hint_pk)) // canonical + return 0; + + // recovering the exact challenge + { + if (!ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_EA.P, &bas_EA.Q, &bas_EA.PmQ, &phi_chall.curve)) { + return 0; + }; + } + + // Double the kernel until is has the correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &phi_chall.curve); + + // Compute the codomain + copy_curve(E_chall, &phi_chall.curve); + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + return 1; +} + +// same as matrix_application_even_basis() in id2iso.c, with some modifications: +// - this version works with a matrix of scalars (not ibz_t). +// - reduction modulo 2^f of matrix elements is removed here, because it is +// assumed that the elements are already cannonical representatives modulo +// 2^f; this is ensured by calling check_canonical_basis_change_matrix() at +// the beginning of protocols_verify(). +static int +matrix_scalar_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, scalar_mtx_2x2_t *mat, int f) +{ + scalar_t scalar0, scalar1; + memset(scalar0, 0, NWORDS_ORDER * sizeof(digit_t)); + memset(scalar1, 0, NWORDS_ORDER * sizeof(digit_t)); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + if (!ec_biscalar_mul(&bas->P, (*mat)[0][0], (*mat)[1][0], f, &tmp_bas, E)) + return 0; + // second basis element S = [c]P + [d]Q + if (!ec_biscalar_mul(&bas->Q, (*mat)[0][1], (*mat)[1][1], f, &tmp_bas, E)) + return 0; + // Their difference R - S = [a - c]P + [b - d]Q + mp_sub(scalar0, (*mat)[0][0], (*mat)[0][1], NWORDS_ORDER); + mp_mod_2exp(scalar0, f, NWORDS_ORDER); + mp_sub(scalar1, (*mat)[1][0], (*mat)[1][1], NWORDS_ORDER); + mp_mod_2exp(scalar1, f, NWORDS_ORDER); + return ec_biscalar_mul(&bas->PmQ, scalar0, scalar1, f, &tmp_bas, E); +} + +// Compute the bases for the challenge and auxillary curve from +// the canonical bases. Challenge basis is reconstructed from the +// compressed scalars within the challenge. +static int +challenge_and_aux_basis_verify(ec_basis_t *B_chall_can, + ec_basis_t *B_aux_can, + ec_curve_t *E_chall, + ec_curve_t *E_aux, + signature_t *sig, + const int pow_dim2_deg_resp) +{ + + // recovering the canonical basis as TORSION_EVEN_POWER for consistency with signing + if (!ec_curve_to_basis_2f_from_hint(B_chall_can, E_chall, TORSION_EVEN_POWER, sig->hint_chall)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_chall_can, + TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion - sig->two_resp_length, + B_chall_can, + E_chall); + + if (!ec_curve_to_basis_2f_from_hint(B_aux_can, E_aux, TORSION_EVEN_POWER, sig->hint_aux)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_aux_can, TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion, B_aux_can, E_aux); + +#ifndef NDEBUG + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp + sig->two_resp_length)) + debug_print("canonical basis has wrong order, expect something to fail"); +#endif + + // applying the change matrix on the basis of E_chall + return matrix_scalar_application_even_basis(B_chall_can, + E_chall, + &sig->mat_Bchall_can_to_B_chall, + pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length); +} + +// When two_resp_length is non-zero, we must compute a small 2^n-isogeny +// updating E_chall as the codomain as well as push the basis on E_chall +// through this isogeny +static int +two_response_isogeny_verify(ec_curve_t *E_chall, ec_basis_t *B_chall_can, const signature_t *sig, int pow_dim2_deg_resp) +{ + ec_point_t ker, points[3]; + + // choosing the right point for the small two_isogenies + if (mp_is_even(sig->mat_Bchall_can_to_B_chall[0][0], NWORDS_ORDER) && + mp_is_even(sig->mat_Bchall_can_to_B_chall[1][0], NWORDS_ORDER)) { + copy_point(&ker, &B_chall_can->Q); + } else { + copy_point(&ker, &B_chall_can->P); + } + + copy_point(&points[0], &B_chall_can->P); + copy_point(&points[1], &B_chall_can->Q); + copy_point(&points[2], &B_chall_can->PmQ); + + ec_dbl_iter(&ker, pow_dim2_deg_resp + HD_extra_torsion, &ker, E_chall); + +#ifndef NDEBUG + if (!test_point_order_twof(&ker, E_chall, sig->two_resp_length)) + debug_print("kernel does not have order 2^(two_resp_length"); +#endif + + if (ec_eval_small_chain(E_chall, &ker, sig->two_resp_length, points, 3, false)) { + return 0; + } + +#ifndef NDEBUG + if (!test_point_order_twof(&points[0], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[0] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[1], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[1] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[2], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[2] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + copy_point(&B_chall_can->P, &points[0]); + copy_point(&B_chall_can->Q, &points[1]); + copy_point(&B_chall_can->PmQ, &points[2]); + return 1; +} + +// The commitment curve can be recovered from the codomain of the 2D +// isogeny built from the bases computed during verification. +static int +compute_commitment_curve_verify(ec_curve_t *E_com, + const ec_basis_t *B_chall_can, + const ec_basis_t *B_aux_can, + const ec_curve_t *E_chall, + const ec_curve_t *E_aux, + int pow_dim2_deg_resp) + +{ +#ifndef NDEBUG + // Check all the points are the correct order + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_chall_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + + if (!test_basis_order_twof(B_aux_can, E_aux, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_aux_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + // now compute the dim2 isogeny from Echall x E_aux -> E_com x E_aux' + // of kernel B_chall_can x B_aux_can + + // first we set-up the kernel + theta_couple_curve_t EchallxEaux; + copy_curve(&EchallxEaux.E1, E_chall); + copy_curve(&EchallxEaux.E2, E_aux); + + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, B_chall_can, B_aux_can); + + // computing the isogeny + theta_couple_curve_t codomain; + int codomain_splits; + ec_curve_init(&codomain.E1); + ec_curve_init(&codomain.E2); + // handling the special case where we don't need to perform any dim2 computation + if (pow_dim2_deg_resp == 0) { + codomain_splits = 1; + copy_curve(&codomain.E1, &EchallxEaux.E1); + copy_curve(&codomain.E2, &EchallxEaux.E2); + // We still need to check that E_chall is supersingular + // This assumes that HD_extra_torsion == 2 + if (!ec_is_basis_four_torsion(B_chall_can, E_chall)) { + return 0; + } + } else { + codomain_splits = theta_chain_compute_and_eval_verify( + pow_dim2_deg_resp, &EchallxEaux, &dim_two_ker, true, &codomain, NULL, 0); + } + + // computing the commitment curve + // its always the first one because of our (2^n,2^n)-isogeny formulae + copy_curve(E_com, &codomain.E1); + + return codomain_splits; +} + +// SQIsign verification +int +protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l) +{ + int verify; + + if (!check_canonical_basis_change_matrix(sig)) + return 0; + + // Computation of the length of the dim 2 2^n isogeny + int pow_dim2_deg_resp = SQIsign_response_length - (int)sig->two_resp_length - (int)sig->backtracking; + + // basic sanity test: checking that the response is not too long + if (pow_dim2_deg_resp < 0) + return 0; + // The dim 2 isogeny embeds a dim 1 isogeny of odd degree, so it can + // never be of length 2. + if (pow_dim2_deg_resp == 1) + return 0; + + // check the public curve is valid + if (!ec_curve_verify_A(&(pk->curve).A)) + return 0; + + // Set auxiliary curve from the A-coefficient within the signature + ec_curve_t E_aux; + if (!ec_curve_init_from_A(&E_aux, &sig->E_aux_A)) + return 0; // invalid curve + + // checking that we are given A-coefficients and no precomputation + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF && !pk->curve.is_A24_computed_and_normalized); + + // computation of the challenge + ec_curve_t E_chall; + if (!compute_challenge_verify(&E_chall, sig, &pk->curve, pk->hint_pk)) { + return 0; + } + + // Computation of the canonical bases for the challenge and aux curve + ec_basis_t B_chall_can, B_aux_can; + + if (!challenge_and_aux_basis_verify(&B_chall_can, &B_aux_can, &E_chall, &E_aux, sig, pow_dim2_deg_resp)) { + return 0; + } + + // When two_resp_length != 0 we need to compute a second, short 2^r-isogeny + if (sig->two_resp_length > 0) { + if (!two_response_isogeny_verify(&E_chall, &B_chall_can, sig, pow_dim2_deg_resp)) { + return 0; + } + } + + // We can recover the commitment curve with a 2D isogeny + // The supplied signature did not compute an isogeny between eliptic products + // and so definitely is an invalid signature. + ec_curve_t E_com; + if (!compute_commitment_curve_verify(&E_com, &B_chall_can, &B_aux_can, &E_chall, &E_aux, pow_dim2_deg_resp)) + return 0; + + scalar_t chk_chall; + + // recomputing the challenge vector + hash_to_challenge(&chk_chall, pk, &E_com, m, l); + + // performing the final check + verify = mp_compare(sig->chall_coeff, chk_chall, NWORDS_ORDER) == 0; + + return verify; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/xeval.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/xeval.c new file mode 100644 index 0000000000..7fc7170423 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/xeval.c @@ -0,0 +1,64 @@ +#include "isog.h" +#include "ec.h" +#include + +// ----------------------------------------------------------------------------------------- +// ----------------------------------------------------------------------------------------- + +// Degree-2 isogeny evaluation with kenerl generated by P != (0, 0) +void +xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1, t2; + for (int j = 0; j < lenQ; j++) { + fp2_add(&t0, &Q[j].x, &Q[j].z); + fp2_sub(&t1, &Q[j].x, &Q[j].z); + fp2_mul(&t2, &kps->K.x, &t1); + fp2_mul(&t1, &kps->K.z, &t0); + fp2_add(&t0, &t2, &t1); + fp2_sub(&t1, &t2, &t1); + fp2_mul(&R[j].x, &Q[j].x, &t0); + fp2_mul(&R[j].z, &Q[j].z, &t1); + } +} + +void +xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1; + for (int i = 0; i < lenQ; i++) { + fp2_mul(&t0, &Q[i].x, &Q[i].z); + fp2_mul(&t1, &kps->K.x, &Q[i].z); + fp2_add(&t1, &t1, &Q[i].x); + fp2_mul(&t1, &t1, &Q[i].x); + fp2_sqr(&R[i].x, &Q[i].z); + fp2_add(&R[i].x, &R[i].x, &t1); + fp2_mul(&R[i].z, &t0, &kps->K.z); + } +} + +// Degree-4 isogeny evaluation with kenerl generated by P such that [2]P != (0, 0) +void +xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps) +{ + const ec_point_t *K = kps->K; + + fp2_t t0, t1; + + for (int i = 0; i < lenQ; i++) { + fp2_add(&t0, &Q[i].x, &Q[i].z); + fp2_sub(&t1, &Q[i].x, &Q[i].z); + fp2_mul(&(R[i].x), &t0, &K[1].x); + fp2_mul(&(R[i].z), &t1, &K[2].x); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &K[0].x); + fp2_add(&t1, &(R[i].x), &(R[i].z)); + fp2_sub(&(R[i].z), &(R[i].x), &(R[i].z)); + fp2_sqr(&t1, &t1); + fp2_sqr(&(R[i].z), &(R[i].z)); + fp2_add(&(R[i].x), &t0, &t1); + fp2_sub(&t0, &t0, &(R[i].z)); + fp2_mul(&(R[i].x), &(R[i].x), &t1); + fp2_mul(&(R[i].z), &(R[i].z), &t0); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/xisog.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/xisog.c new file mode 100644 index 0000000000..7242d29433 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/xisog.c @@ -0,0 +1,61 @@ +#include "isog.h" +#include "ec.h" +#include + +// ------------------------------------------------------------------------- +// ------------------------------------------------------------------------- + +// Degree-2 isogeny with kernel generated by P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P) +{ + fp2_sqr(&B->x, &P.x); + fp2_sqr(&B->z, &P.z); + fp2_sub(&B->x, &B->z, &B->x); + fp2_add(&kps->K.x, &P.x, &P.z); + fp2_sub(&kps->K.z, &P.x, &P.z); +} + +void +xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24) +{ + // No need to check the square root, only used for signing. + fp2_t t0, four; + fp2_set_small(&four, 4); + fp2_add(&t0, &A24.x, &A24.x); + fp2_sub(&t0, &t0, &A24.z); + fp2_add(&t0, &t0, &t0); + fp2_inv(&A24.z); + fp2_mul(&t0, &t0, &A24.z); + fp2_copy(&kps->K.x, &t0); + fp2_add(&B24->x, &t0, &t0); + fp2_sqr(&t0, &t0); + fp2_sub(&t0, &t0, &four); + fp2_sqrt(&t0); + fp2_neg(&kps->K.z, &t0); + fp2_add(&B24->z, &t0, &t0); + fp2_add(&B24->x, &B24->x, &B24->z); + fp2_add(&B24->z, &B24->z, &B24->z); +} + +// Degree-4 isogeny with kernel generated by P such that [2]P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P) +{ + ec_point_t *K = kps->K; + + fp2_sqr(&K[0].x, &P.x); + fp2_sqr(&K[0].z, &P.z); + fp2_add(&K[1].x, &K[0].z, &K[0].x); + fp2_sub(&K[1].z, &K[0].z, &K[0].x); + fp2_mul(&B->x, &K[1].x, &K[1].z); + fp2_sqr(&B->z, &K[0].z); + + // Constants for xeval_4 + fp2_add(&K[2].x, &P.x, &P.z); + fp2_sub(&K[1].x, &P.x, &P.z); + fp2_add(&K[0].x, &K[0].z, &K[0].z); + fp2_add(&K[0].x, &K[0].x, &K[0].x); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/COPYING.LGPL new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/COPYING.LGPL @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/LICENSE b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/LICENSE new file mode 100644 index 0000000000..8f71f43fee --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/NOTICE b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/NOTICE new file mode 100644 index 0000000000..6eccf392fa --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/NOTICE @@ -0,0 +1,21 @@ +Copyright 2023-2025 the SQIsign team. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +The DPE Library is (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, +LORIA/INRIA, and licensed under the GNU Lesser General Public License, +version 3. You may obtain a copy of the License at + + https://www.gnu.org/licenses/lgpl-3.0.en.html + +or in the file COPYING.LGPL. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes.h new file mode 100644 index 0000000000..e35ec3705b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef AES_H +#define AES_H + +#include +#include + +void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); +#define AES_ECB_encrypt AES_256_ECB + +#ifdef ENABLE_AESNI +int AES_128_CTR_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +int AES_128_CTR_4R_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#define AES_128_CTR AES_128_CTR_NI +#else +int AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes_c.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes_c.c new file mode 100644 index 0000000000..5e2d7d6161 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes_c.c @@ -0,0 +1,783 @@ +// SPDX-License-Identifier: MIT and Apache-2.0 + +/* + * AES implementation based on code from PQClean, + * which is in turn based on BearSSL (https://bearssl.org/) + * by Thomas Pornin. + * + * + * Copyright (c) 2016 Thomas Pornin + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#define AES128_KEYBYTES 16 +#define AES192_KEYBYTES 24 +#define AES256_KEYBYTES 32 +#define AESCTR_NONCEBYTES 12 +#define AES_BLOCKBYTES 16 + +#define PQC_AES128_STATESIZE 88 +typedef struct +{ + uint64_t sk_exp[PQC_AES128_STATESIZE]; +} aes128ctx; + +#define PQC_AES192_STATESIZE 104 +typedef struct +{ + uint64_t sk_exp[PQC_AES192_STATESIZE]; +} aes192ctx; + +#define PQC_AES256_STATESIZE 120 +typedef struct +{ + uint64_t sk_exp[PQC_AES256_STATESIZE]; +} aes256ctx; + +/** Initializes the context **/ +void aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key); + +void aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key); + +void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx); + +void aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx); + +/** Frees the context **/ +void aes128_ctx_release(aes128ctx *r); + +/** Initializes the context **/ +void aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key); + +void aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key); + +void aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx); + +void aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx); + +void aes192_ctx_release(aes192ctx *r); + +/** Initializes the context **/ +void aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key); + +void aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key); + +void aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx); + +void aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx); + +/** Frees the context **/ +void aes256_ctx_release(aes256ctx *r); + +static inline uint32_t +br_dec32le(const unsigned char *src) +{ + return (uint32_t)src[0] | ((uint32_t)src[1] << 8) | ((uint32_t)src[2] << 16) | + ((uint32_t)src[3] << 24); +} + +static void +br_range_dec32le(uint32_t *v, size_t num, const unsigned char *src) +{ + while (num-- > 0) { + *v++ = br_dec32le(src); + src += 4; + } +} + +static inline uint32_t +br_swap32(uint32_t x) +{ + x = ((x & (uint32_t)0x00FF00FF) << 8) | ((x >> 8) & (uint32_t)0x00FF00FF); + return (x << 16) | (x >> 16); +} + +static inline void +br_enc32le(unsigned char *dst, uint32_t x) +{ + dst[0] = (unsigned char)x; + dst[1] = (unsigned char)(x >> 8); + dst[2] = (unsigned char)(x >> 16); + dst[3] = (unsigned char)(x >> 24); +} + +static void +br_range_enc32le(unsigned char *dst, const uint32_t *v, size_t num) +{ + while (num-- > 0) { + br_enc32le(dst, *v++); + dst += 4; + } +} + +static void +br_aes_ct64_bitslice_Sbox(uint64_t *q) +{ + /* + * This S-box implementation is a straightforward translation of + * the circuit described by Boyar and Peralta in "A new + * combinational logic minimization technique with applications + * to cryptology" (https://eprint.iacr.org/2009/191.pdf). + * + * Note that variables x* (input) and s* (output) are numbered + * in "reverse" order (x0 is the high bit, x7 is the low bit). + */ + + uint64_t x0, x1, x2, x3, x4, x5, x6, x7; + uint64_t y1, y2, y3, y4, y5, y6, y7, y8, y9; + uint64_t y10, y11, y12, y13, y14, y15, y16, y17, y18, y19; + uint64_t y20, y21; + uint64_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9; + uint64_t z10, z11, z12, z13, z14, z15, z16, z17; + uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; + uint64_t t10, t11, t12, t13, t14, t15, t16, t17, t18, t19; + uint64_t t20, t21, t22, t23, t24, t25, t26, t27, t28, t29; + uint64_t t30, t31, t32, t33, t34, t35, t36, t37, t38, t39; + uint64_t t40, t41, t42, t43, t44, t45, t46, t47, t48, t49; + uint64_t t50, t51, t52, t53, t54, t55, t56, t57, t58, t59; + uint64_t t60, t61, t62, t63, t64, t65, t66, t67; + uint64_t s0, s1, s2, s3, s4, s5, s6, s7; + + x0 = q[7]; + x1 = q[6]; + x2 = q[5]; + x3 = q[4]; + x4 = q[3]; + x5 = q[2]; + x6 = q[1]; + x7 = q[0]; + + /* + * Top linear transformation. + */ + y14 = x3 ^ x5; + y13 = x0 ^ x6; + y9 = x0 ^ x3; + y8 = x0 ^ x5; + t0 = x1 ^ x2; + y1 = t0 ^ x7; + y4 = y1 ^ x3; + y12 = y13 ^ y14; + y2 = y1 ^ x0; + y5 = y1 ^ x6; + y3 = y5 ^ y8; + t1 = x4 ^ y12; + y15 = t1 ^ x5; + y20 = t1 ^ x1; + y6 = y15 ^ x7; + y10 = y15 ^ t0; + y11 = y20 ^ y9; + y7 = x7 ^ y11; + y17 = y10 ^ y11; + y19 = y10 ^ y8; + y16 = t0 ^ y11; + y21 = y13 ^ y16; + y18 = x0 ^ y16; + + /* + * Non-linear section. + */ + t2 = y12 & y15; + t3 = y3 & y6; + t4 = t3 ^ t2; + t5 = y4 & x7; + t6 = t5 ^ t2; + t7 = y13 & y16; + t8 = y5 & y1; + t9 = t8 ^ t7; + t10 = y2 & y7; + t11 = t10 ^ t7; + t12 = y9 & y11; + t13 = y14 & y17; + t14 = t13 ^ t12; + t15 = y8 & y10; + t16 = t15 ^ t12; + t17 = t4 ^ t14; + t18 = t6 ^ t16; + t19 = t9 ^ t14; + t20 = t11 ^ t16; + t21 = t17 ^ y20; + t22 = t18 ^ y19; + t23 = t19 ^ y21; + t24 = t20 ^ y18; + + t25 = t21 ^ t22; + t26 = t21 & t23; + t27 = t24 ^ t26; + t28 = t25 & t27; + t29 = t28 ^ t22; + t30 = t23 ^ t24; + t31 = t22 ^ t26; + t32 = t31 & t30; + t33 = t32 ^ t24; + t34 = t23 ^ t33; + t35 = t27 ^ t33; + t36 = t24 & t35; + t37 = t36 ^ t34; + t38 = t27 ^ t36; + t39 = t29 & t38; + t40 = t25 ^ t39; + + t41 = t40 ^ t37; + t42 = t29 ^ t33; + t43 = t29 ^ t40; + t44 = t33 ^ t37; + t45 = t42 ^ t41; + z0 = t44 & y15; + z1 = t37 & y6; + z2 = t33 & x7; + z3 = t43 & y16; + z4 = t40 & y1; + z5 = t29 & y7; + z6 = t42 & y11; + z7 = t45 & y17; + z8 = t41 & y10; + z9 = t44 & y12; + z10 = t37 & y3; + z11 = t33 & y4; + z12 = t43 & y13; + z13 = t40 & y5; + z14 = t29 & y2; + z15 = t42 & y9; + z16 = t45 & y14; + z17 = t41 & y8; + + /* + * Bottom linear transformation. + */ + t46 = z15 ^ z16; + t47 = z10 ^ z11; + t48 = z5 ^ z13; + t49 = z9 ^ z10; + t50 = z2 ^ z12; + t51 = z2 ^ z5; + t52 = z7 ^ z8; + t53 = z0 ^ z3; + t54 = z6 ^ z7; + t55 = z16 ^ z17; + t56 = z12 ^ t48; + t57 = t50 ^ t53; + t58 = z4 ^ t46; + t59 = z3 ^ t54; + t60 = t46 ^ t57; + t61 = z14 ^ t57; + t62 = t52 ^ t58; + t63 = t49 ^ t58; + t64 = z4 ^ t59; + t65 = t61 ^ t62; + t66 = z1 ^ t63; + s0 = t59 ^ t63; + s6 = t56 ^ ~t62; + s7 = t48 ^ ~t60; + t67 = t64 ^ t65; + s3 = t53 ^ t66; + s4 = t51 ^ t66; + s5 = t47 ^ t65; + s1 = t64 ^ ~s3; + s2 = t55 ^ ~t67; + + q[7] = s0; + q[6] = s1; + q[5] = s2; + q[4] = s3; + q[3] = s4; + q[2] = s5; + q[1] = s6; + q[0] = s7; +} + +static void +br_aes_ct64_ortho(uint64_t *q) +{ +#define SWAPN(cl, ch, s, x, y) \ + do { \ + uint64_t a, b; \ + a = (x); \ + b = (y); \ + (x) = (a & (uint64_t)(cl)) | ((b & (uint64_t)(cl)) << (s)); \ + (y) = ((a & (uint64_t)(ch)) >> (s)) | (b & (uint64_t)(ch)); \ + } while (0) + +#define SWAP2(x, y) SWAPN(0x5555555555555555, 0xAAAAAAAAAAAAAAAA, 1, x, y) +#define SWAP4(x, y) SWAPN(0x3333333333333333, 0xCCCCCCCCCCCCCCCC, 2, x, y) +#define SWAP8(x, y) SWAPN(0x0F0F0F0F0F0F0F0F, 0xF0F0F0F0F0F0F0F0, 4, x, y) + + SWAP2(q[0], q[1]); + SWAP2(q[2], q[3]); + SWAP2(q[4], q[5]); + SWAP2(q[6], q[7]); + + SWAP4(q[0], q[2]); + SWAP4(q[1], q[3]); + SWAP4(q[4], q[6]); + SWAP4(q[5], q[7]); + + SWAP8(q[0], q[4]); + SWAP8(q[1], q[5]); + SWAP8(q[2], q[6]); + SWAP8(q[3], q[7]); +} + +static void +br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t *w) +{ + uint64_t x0, x1, x2, x3; + + x0 = w[0]; + x1 = w[1]; + x2 = w[2]; + x3 = w[3]; + x0 |= (x0 << 16); + x1 |= (x1 << 16); + x2 |= (x2 << 16); + x3 |= (x3 << 16); + x0 &= (uint64_t)0x0000FFFF0000FFFF; + x1 &= (uint64_t)0x0000FFFF0000FFFF; + x2 &= (uint64_t)0x0000FFFF0000FFFF; + x3 &= (uint64_t)0x0000FFFF0000FFFF; + x0 |= (x0 << 8); + x1 |= (x1 << 8); + x2 |= (x2 << 8); + x3 |= (x3 << 8); + x0 &= (uint64_t)0x00FF00FF00FF00FF; + x1 &= (uint64_t)0x00FF00FF00FF00FF; + x2 &= (uint64_t)0x00FF00FF00FF00FF; + x3 &= (uint64_t)0x00FF00FF00FF00FF; + *q0 = x0 | (x2 << 8); + *q1 = x1 | (x3 << 8); +} + +static void +br_aes_ct64_interleave_out(uint32_t *w, uint64_t q0, uint64_t q1) +{ + uint64_t x0, x1, x2, x3; + + x0 = q0 & (uint64_t)0x00FF00FF00FF00FF; + x1 = q1 & (uint64_t)0x00FF00FF00FF00FF; + x2 = (q0 >> 8) & (uint64_t)0x00FF00FF00FF00FF; + x3 = (q1 >> 8) & (uint64_t)0x00FF00FF00FF00FF; + x0 |= (x0 >> 8); + x1 |= (x1 >> 8); + x2 |= (x2 >> 8); + x3 |= (x3 >> 8); + x0 &= (uint64_t)0x0000FFFF0000FFFF; + x1 &= (uint64_t)0x0000FFFF0000FFFF; + x2 &= (uint64_t)0x0000FFFF0000FFFF; + x3 &= (uint64_t)0x0000FFFF0000FFFF; + w[0] = (uint32_t)x0 | (uint32_t)(x0 >> 16); + w[1] = (uint32_t)x1 | (uint32_t)(x1 >> 16); + w[2] = (uint32_t)x2 | (uint32_t)(x2 >> 16); + w[3] = (uint32_t)x3 | (uint32_t)(x3 >> 16); +} + +static const unsigned char Rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36 }; + +static uint32_t +sub_word(uint32_t x) +{ + uint64_t q[8]; + + memset(q, 0, sizeof q); + q[0] = x; + br_aes_ct64_ortho(q); + br_aes_ct64_bitslice_Sbox(q); + br_aes_ct64_ortho(q); + return (uint32_t)q[0]; +} + +static void +br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, unsigned int key_len) +{ + unsigned int i, j, k, nk, nkf; + uint32_t tmp; + uint32_t skey[60]; + unsigned nrounds = 10 + ((key_len - 16) >> 2); + + nk = (key_len >> 2); + nkf = ((nrounds + 1) << 2); + br_range_dec32le(skey, (key_len >> 2), key); + tmp = skey[(key_len >> 2) - 1]; + for (i = nk, j = 0, k = 0; i < nkf; i++) { + if (j == 0) { + tmp = (tmp << 24) | (tmp >> 8); + tmp = sub_word(tmp) ^ Rcon[k]; + } else if (nk > 6 && j == 4) { + tmp = sub_word(tmp); + } + tmp ^= skey[i - nk]; + skey[i] = tmp; + if (++j == nk) { + j = 0; + k++; + } + } + + for (i = 0, j = 0; i < nkf; i += 4, j += 2) { + uint64_t q[8]; + + br_aes_ct64_interleave_in(&q[0], &q[4], skey + i); + q[1] = q[0]; + q[2] = q[0]; + q[3] = q[0]; + q[5] = q[4]; + q[6] = q[4]; + q[7] = q[4]; + br_aes_ct64_ortho(q); + comp_skey[j + 0] = + (q[0] & (uint64_t)0x1111111111111111) | (q[1] & (uint64_t)0x2222222222222222) | + (q[2] & (uint64_t)0x4444444444444444) | (q[3] & (uint64_t)0x8888888888888888); + comp_skey[j + 1] = + (q[4] & (uint64_t)0x1111111111111111) | (q[5] & (uint64_t)0x2222222222222222) | + (q[6] & (uint64_t)0x4444444444444444) | (q[7] & (uint64_t)0x8888888888888888); + } +} + +static void +br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, unsigned int nrounds) +{ + unsigned u, v, n; + + n = (nrounds + 1) << 1; + for (u = 0, v = 0; u < n; u++, v += 4) { + uint64_t x0, x1, x2, x3; + + x0 = x1 = x2 = x3 = comp_skey[u]; + x0 &= (uint64_t)0x1111111111111111; + x1 &= (uint64_t)0x2222222222222222; + x2 &= (uint64_t)0x4444444444444444; + x3 &= (uint64_t)0x8888888888888888; + x1 >>= 1; + x2 >>= 2; + x3 >>= 3; + skey[v + 0] = (x0 << 4) - x0; + skey[v + 1] = (x1 << 4) - x1; + skey[v + 2] = (x2 << 4) - x2; + skey[v + 3] = (x3 << 4) - x3; + } +} + +static inline void +add_round_key(uint64_t *q, const uint64_t *sk) +{ + q[0] ^= sk[0]; + q[1] ^= sk[1]; + q[2] ^= sk[2]; + q[3] ^= sk[3]; + q[4] ^= sk[4]; + q[5] ^= sk[5]; + q[6] ^= sk[6]; + q[7] ^= sk[7]; +} + +static inline void +shift_rows(uint64_t *q) +{ + int i; + + for (i = 0; i < 8; i++) { + uint64_t x; + + x = q[i]; + q[i] = + (x & (uint64_t)0x000000000000FFFF) | ((x & (uint64_t)0x00000000FFF00000) >> 4) | + ((x & (uint64_t)0x00000000000F0000) << 12) | ((x & (uint64_t)0x0000FF0000000000) >> 8) | + ((x & (uint64_t)0x000000FF00000000) << 8) | ((x & (uint64_t)0xF000000000000000) >> 12) | + ((x & (uint64_t)0x0FFF000000000000) << 4); + } +} + +static inline uint64_t +rotr32(uint64_t x) +{ + return (x << 32) | (x >> 32); +} + +static inline void +mix_columns(uint64_t *q) +{ + uint64_t q0, q1, q2, q3, q4, q5, q6, q7; + uint64_t r0, r1, r2, r3, r4, r5, r6, r7; + + q0 = q[0]; + q1 = q[1]; + q2 = q[2]; + q3 = q[3]; + q4 = q[4]; + q5 = q[5]; + q6 = q[6]; + q7 = q[7]; + r0 = (q0 >> 16) | (q0 << 48); + r1 = (q1 >> 16) | (q1 << 48); + r2 = (q2 >> 16) | (q2 << 48); + r3 = (q3 >> 16) | (q3 << 48); + r4 = (q4 >> 16) | (q4 << 48); + r5 = (q5 >> 16) | (q5 << 48); + r6 = (q6 >> 16) | (q6 << 48); + r7 = (q7 >> 16) | (q7 << 48); + + q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0); + q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1); + q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2); + q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3); + q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4); + q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5); + q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6); + q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7); +} + +static void +inc4_be(uint32_t *x) +{ + uint32_t t = br_swap32(*x) + 4; + *x = br_swap32(t); +} + +static void +aes_ecb4x(unsigned char out[64], + const uint32_t ivw[16], + const uint64_t *sk_exp, + unsigned int nrounds) +{ + uint32_t w[16]; + uint64_t q[8]; + unsigned int i; + + memcpy(w, ivw, sizeof(w)); + for (i = 0; i < 4; i++) { + br_aes_ct64_interleave_in(&q[i], &q[i + 4], w + (i << 2)); + } + br_aes_ct64_ortho(q); + + add_round_key(q, sk_exp); + for (i = 1; i < nrounds; i++) { + br_aes_ct64_bitslice_Sbox(q); + shift_rows(q); + mix_columns(q); + add_round_key(q, sk_exp + (i << 3)); + } + br_aes_ct64_bitslice_Sbox(q); + shift_rows(q); + add_round_key(q, sk_exp + 8 * nrounds); + + br_aes_ct64_ortho(q); + for (i = 0; i < 4; i++) { + br_aes_ct64_interleave_out(w + (i << 2), q[i], q[i + 4]); + } + br_range_enc32le(out, w, 16); +} + +static void +aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) +{ + aes_ecb4x(out, ivw, sk_exp, nrounds); + + /* Increase counter for next 4 blocks */ + inc4_be(ivw + 3); + inc4_be(ivw + 7); + inc4_be(ivw + 11); + inc4_be(ivw + 15); +} + +static void +aes_ecb(unsigned char *out, + const unsigned char *in, + size_t nblocks, + const uint64_t *rkeys, + unsigned int nrounds) +{ + uint32_t blocks[16]; + unsigned char t[64]; + + while (nblocks >= 4) { + br_range_dec32le(blocks, 16, in); + aes_ecb4x(out, blocks, rkeys, nrounds); + nblocks -= 4; + in += 64; + out += 64; + } + + if (nblocks) { + br_range_dec32le(blocks, nblocks * 4, in); + aes_ecb4x(t, blocks, rkeys, nrounds); + memcpy(out, t, nblocks * 16); + } +} + +static void +aes_ctr(unsigned char *out, + size_t outlen, + const unsigned char *iv, + const uint64_t *rkeys, + unsigned int nrounds) +{ + uint32_t ivw[16]; + size_t i; + uint32_t cc = 0; + + br_range_dec32le(ivw, 3, iv); + memcpy(ivw + 4, ivw, 3 * sizeof(uint32_t)); + memcpy(ivw + 8, ivw, 3 * sizeof(uint32_t)); + memcpy(ivw + 12, ivw, 3 * sizeof(uint32_t)); + ivw[3] = br_swap32(cc); + ivw[7] = br_swap32(cc + 1); + ivw[11] = br_swap32(cc + 2); + ivw[15] = br_swap32(cc + 3); + + while (outlen > 64) { + aes_ctr4x(out, ivw, rkeys, nrounds); + out += 64; + outlen -= 64; + } + if (outlen > 0) { + unsigned char tmp[64]; + aes_ctr4x(tmp, ivw, rkeys, nrounds); + for (i = 0; i < outlen; i++) { + out[i] = tmp[i]; + } + } +} + +void +aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key) +{ + uint64_t skey[22]; + + br_aes_ct64_keysched(skey, key, 16); + br_aes_ct64_skey_expand(r->sk_exp, skey, 10); +} + +void +aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key) +{ + aes128_ecb_keyexp(r, key); +} + +void +aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key) +{ + uint64_t skey[26]; + + br_aes_ct64_keysched(skey, key, 24); + br_aes_ct64_skey_expand(r->sk_exp, skey, 12); +} + +void +aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key) +{ + aes192_ecb_keyexp(r, key); +} + +void +aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key) +{ + uint64_t skey[30]; + + br_aes_ct64_keysched(skey, key, 32); + br_aes_ct64_skey_expand(r->sk_exp, skey, 14); +} + +void +aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key) +{ + aes256_ecb_keyexp(r, key); +} + +void +aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 10); +} + +void +aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 10); +} + +void +aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 12); +} + +void +aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 12); +} + +void +aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 14); +} + +void +aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 14); +} + +void +aes128_ctx_release(aes128ctx *r) +{ +} + +void +aes192_ctx_release(aes192ctx *r) +{ +} + +void +aes256_ctx_release(aes256ctx *r) +{ +} + +int +AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen) +{ + aes128ctx ctx; + const unsigned char iv[16] = { 0 }; + + aes128_ctr_keyexp(&ctx, input); + aes128_ctr(output, outputByteLen, iv, &ctx); + aes128_ctx_release(&ctx); + + return (int)outputByteLen; +} + +void +AES_256_ECB(const uint8_t *input, const unsigned char *key, unsigned char *output) +{ + aes256ctx ctx; + + aes256_ecb_keyexp(&ctx, key); + aes256_ecb(output, input, 1, &ctx); + aes256_ctx_release(&ctx); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c new file mode 100644 index 0000000000..50629f9fec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c @@ -0,0 +1,280 @@ +#include +#include "internal.h" + +// Internal helper functions + +void +quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) +{ + ibz_t bp; + ibz_init(&bp); + ibz_set(&bp, p); + quat_alg_init_set(alg, &bp); + ibz_finalize(&bp); +} + +void +quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg) +{ + ibz_t prod; + ibz_vec_4_t sum; + ibz_init(&prod); + ibz_vec_4_init(&sum); + + ibz_set(&(sum[0]), 0); + ibz_set(&(sum[1]), 0); + ibz_set(&(sum[2]), 0); + ibz_set(&(sum[3]), 0); + + // compute 1 coordinate + ibz_mul(&prod, &((*a)[2]), &((*b)[2])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[3])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[0])); + ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[1])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + // compute i coordiante + ibz_mul(&prod, &((*a)[2]), &((*b)[3])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[2])); + ibz_sub(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[1])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[0])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + // compute j coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[2])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[0])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[3])); + ibz_sub(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[1])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + // compute ij coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[3])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[0])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[1])); + ibz_sub(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[2])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + + ibz_copy(&((*res)[0]), &(sum[0])); + ibz_copy(&((*res)[1]), &(sum[1])); + ibz_copy(&((*res)[2]), &(sum[2])); + ibz_copy(&((*res)[3]), &(sum[3])); + + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &(a->denom), &(b->denom)); + // temporarily set res_a.denom to a.denom/gcd, and res_b.denom to b.denom/gcd + ibz_div(&(res_a->denom), &r, &(a->denom), &gcd); + ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); + for (int i = 0; i < 4; i++) { + // multiply coordiates by reduced denominators from the other element + ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + } + // multiply both reduced denominators + ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); + // multiply them by the gcd to get the new common denominator + ibz_mul(&(res_b->denom), &(res_a->denom), &gcd); + ibz_mul(&(res_a->denom), &(res_a->denom), &gcd); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +// Public Functions + +void +quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then add + ibz_copy(&(res->denom), &(res_a.denom)); + ibz_vec_4_add(&(res->coord), &(res_a.coord), &(res_b.coord)); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then substract + ibz_copy(&res->denom, &res_a.denom); + ibz_vec_4_sub(&res->coord, &res_a.coord, &res_b.coord); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg) +{ + // denominator: product of denominators + ibz_mul(&(res->denom), &(a->denom), &(b->denom)); + quat_alg_coord_mul(&(res->coord), &(a->coord), &(b->coord), alg); +} + +void +quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_t *alg) +{ + ibz_t r, g; + quat_alg_elem_t norm; + ibz_init(&r); + ibz_init(&g); + quat_alg_elem_init(&norm); + + quat_alg_conj(&norm, a); + quat_alg_mul(&norm, a, &norm, alg); + ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_div(res_denom, &r, &(norm.denom), &g); + ibz_abs(res_denom, res_denom); + ibz_abs(res_num, res_num); + assert(ibz_cmp(res_denom, &ibz_const_zero) > 0); + + quat_alg_elem_finalize(&norm); + ibz_finalize(&r); + ibz_finalize(&g); +} + +void +quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) +{ + ibz_copy(&(elem->denom), denominator); + ibz_copy(&(elem->coord[0]), numerator); + ibz_set(&(elem->coord[1]), 0); + ibz_set(&(elem->coord[2]), 0); + ibz_set(&(elem->coord[3]), 0); +} + +void +quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) +{ + ibz_copy(&(conj->denom), &(x->denom)); + ibz_copy(&(conj->coord[0]), &(x->coord[0])); + ibz_neg(&(conj->coord[1]), &(x->coord[1])); + ibz_neg(&(conj->coord[2]), &(x->coord[2])); + ibz_neg(&(conj->coord[3]), &(x->coord[3])); +} + +void +quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg_elem_t *x, const quat_lattice_t *order) +{ + int ok UNUSED = quat_lattice_contains(primitive_x, order, x); + assert(ok); + ibz_vec_4_content(content, primitive_x); + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + } + ibz_finalize(&r); +} + +void +quat_alg_normalize(quat_alg_elem_t *x) +{ + ibz_t gcd, sign, r; + ibz_init(&gcd); + ibz_init(&sign); + ibz_init(&r); + ibz_vec_4_content(&gcd, &(x->coord)); + ibz_gcd(&gcd, &gcd, &(x->denom)); + ibz_div(&(x->denom), &r, &(x->denom), &gcd); + ibz_vec_4_scalar_div(&(x->coord), &gcd, &(x->coord)); + ibz_set(&sign, 2 * (0 > ibz_cmp(&ibz_const_zero, &(x->denom))) - 1); + ibz_vec_4_scalar_mul(&(x->coord), &sign, &(x->coord)); + ibz_mul(&(x->denom), &sign, &(x->denom)); + ibz_finalize(&gcd); + ibz_finalize(&sign); + ibz_finalize(&r); +} + +int +quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t diff; + quat_alg_elem_init(&diff); + quat_alg_sub(&diff, a, b); + int res = quat_alg_elem_is_zero(&diff); + quat_alg_elem_finalize(&diff); + return (res); +} + +int +quat_alg_elem_is_zero(const quat_alg_elem_t *x) +{ + int res = ibz_vec_4_is_zero(&(x->coord)); + return (res); +} + +void +quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&(elem->coord[0]), coord0); + ibz_set(&(elem->coord[1]), coord1); + ibz_set(&(elem->coord[2]), coord2); + ibz_set(&(elem->coord[3]), coord3); + + ibz_set(&(elem->denom), denom); +} + +void +quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) +{ + ibz_copy(©->denom, &copied->denom); + ibz_copy(©->coord[0], &copied->coord[0]); + ibz_copy(©->coord[1], &copied->coord[1]); + ibz_copy(©->coord[2], &copied->coord[2]); + ibz_copy(©->coord[3], &copied->coord[3]); +} + +// helper functions for lattices +void +quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3) +{ + ibz_copy(&(elem->coord[0]), coord0); + ibz_copy(&(elem->coord[1]), coord1); + ibz_copy(&(elem->coord[2]), coord2); + ibz_copy(&(elem->coord[3]), coord3); + + ibz_copy(&(elem->denom), denom); +} + +void +quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + } + ibz_copy(&(res->denom), &(elem->denom)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/api.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/api.c new file mode 100644 index 0000000000..baccd590b5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/api.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include + +#if defined(ENABLE_SIGN) + +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk) +{ + + return sqisign_keypair(pk, sk); +} + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk) +{ + return sqisign_sign(sm, smlen, m, mlen, sk); +} +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk) +{ + return sqisign_open(m, mlen, sm, smlen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/api.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/api.h new file mode 100644 index 0000000000..93a39842fc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/api.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef api_h +#define api_h + +#include + +#define CRYPTO_SECRETKEYBYTES 353 +#define CRYPTO_PUBLICKEYBYTES 65 +#define CRYPTO_BYTES 148 + +#define CRYPTO_ALGNAME "SQIsign_lvl1" + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk); + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk); +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk); + +#endif /* api_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/basis.c new file mode 100644 index 0000000000..94cb7fcacb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/basis.c @@ -0,0 +1,416 @@ +#include "ec.h" +#include "fp2.h" +#include "e0_basis.h" +#include + +uint32_t +ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve) +{ // Recover y-coordinate of a point on the Montgomery curve y^2 = x^3 + Ax^2 + x + fp2_t t0; + + fp2_sqr(&t0, Px); + fp2_mul(y, &t0, &curve->A); // Ax^2 + fp2_add(y, y, Px); // Ax^2 + x + fp2_mul(&t0, &t0, Px); + fp2_add(y, y, &t0); // x^3 + Ax^2 + x + // This is required, because we do not yet know that our curves are + // supersingular so our points live on the twist with B = 1. + return fp2_sqrt_verify(y); +} + +static void +difference_point(ec_point_t *PQ, const ec_point_t *P, const ec_point_t *Q, const ec_curve_t *curve) +{ + // Given P,Q in projective x-only, computes a deterministic choice for (P-Q) + // Based on Proposition 3 of https://eprint.iacr.org/2017/518.pdf + + fp2_t Bxx, Bxz, Bzz, t0, t1; + + fp2_mul(&t0, &P->x, &Q->x); + fp2_mul(&t1, &P->z, &Q->z); + fp2_sub(&Bxx, &t0, &t1); + fp2_sqr(&Bxx, &Bxx); + fp2_mul(&Bxx, &Bxx, &curve->C); // C*(P.x*Q.x-P.z*Q.z)^2 + fp2_add(&Bxz, &t0, &t1); + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + fp2_add(&Bzz, &t0, &t1); + fp2_mul(&Bxz, &Bxz, &Bzz); // (P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_sub(&Bzz, &t0, &t1); + fp2_sqr(&Bzz, &Bzz); + fp2_mul(&Bzz, &Bzz, &curve->C); // C*(P.x*Q.z-P.z*Q.x)^2 + fp2_mul(&Bxz, &Bxz, &curve->C); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &curve->A); + fp2_add(&t0, &t0, &t0); + fp2_add(&Bxz, &Bxz, &t0); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + 2*A*P.x*Q.z*P.z*Q.x + + // To ensure that the denominator is a fourth power in Fp, we normalize by + // C*C_bar^2*(P.z)_bar^2*(Q.z)_bar^2 + fp_copy(&t0.re, &curve->C.re); + fp_neg(&t0.im, &curve->C.im); + fp2_sqr(&t0, &t0); + fp2_mul(&t0, &t0, &curve->C); + fp_copy(&t1.re, &P->z.re); + fp_neg(&t1.im, &P->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp_copy(&t1.re, &Q->z.re); + fp_neg(&t1.im, &Q->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&Bxx, &Bxx, &t0); + fp2_mul(&Bxz, &Bxz, &t0); + fp2_mul(&Bzz, &Bzz, &t0); + + // Solving quadratic equation + fp2_sqr(&t0, &Bxz); + fp2_mul(&t1, &Bxx, &Bzz); + fp2_sub(&t0, &t0, &t1); + // No need to check if t0 is square, as per the entangled basis algorithm. + fp2_sqrt(&t0); + fp2_add(&PQ->x, &Bxz, &t0); + fp2_copy(&PQ->z, &Bzz); +} + +// Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and the point +// P = (X/Z : 1). For generic implementation see lift_basis() +uint32_t +lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + assert(fp2_is_one(&B->P.z)); + assert(fp2_is_one(&E->C)); + + fp2_copy(&P->x, &B->P.x); + fp2_copy(&Q->x, &B->Q.x); + fp2_copy(&Q->z, &B->Q.z); + fp2_set_one(&P->z); + uint32_t ret = ec_recover_y(&P->y, &P->x, E); + + // Algorithm of Okeya-Sakurai to recover y.Q in the montgomery model + fp2_t v1, v2, v3, v4; + fp2_mul(&v1, &P->x, &Q->z); + fp2_add(&v2, &Q->x, &v1); + fp2_sub(&v3, &Q->x, &v1); + fp2_sqr(&v3, &v3); + fp2_mul(&v3, &v3, &B->PmQ.x); + fp2_add(&v1, &E->A, &E->A); + fp2_mul(&v1, &v1, &Q->z); + fp2_add(&v2, &v2, &v1); + fp2_mul(&v4, &P->x, &Q->x); + fp2_add(&v4, &v4, &Q->z); + fp2_mul(&v2, &v2, &v4); + fp2_mul(&v1, &v1, &Q->z); + fp2_sub(&v2, &v2, &v1); + fp2_mul(&v2, &v2, &B->PmQ.z); + fp2_sub(&Q->y, &v3, &v2); + fp2_add(&v1, &P->y, &P->y); + fp2_mul(&v1, &v1, &Q->z); + fp2_mul(&v1, &v1, &B->PmQ.z); + fp2_mul(&Q->x, &Q->x, &v1); + fp2_mul(&Q->z, &Q->z, &v1); + + // Transforming to a jacobian coordinate + fp2_sqr(&v1, &Q->z); + fp2_mul(&Q->y, &Q->y, &v1); + fp2_mul(&Q->x, &Q->x, &Q->z); + return ret; +} + +uint32_t +lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + // Normalise the curve E such that (A : C) is (A/C : 1) + // and the point x(P) = (X/Z : 1). + fp2_t inverses[2]; + fp2_copy(&inverses[0], &B->P.z); + fp2_copy(&inverses[1], &E->C); + + fp2_batched_inv(inverses, 2); + fp2_set_one(&B->P.z); + fp2_set_one(&E->C); + + fp2_mul(&B->P.x, &B->P.x, &inverses[0]); + fp2_mul(&E->A, &E->A, &inverses[1]); + + // Lift the basis to Jacobian points P, Q + return lift_basis_normalized(P, Q, B, E); +} + +// Given an x-coordinate, determines if this is a valid +// point on the curve. Assumes C=1. +static uint32_t +is_on_curve(const fp2_t *x, const ec_curve_t *curve) +{ + assert(fp2_is_one(&curve->C)); + fp2_t t0; + + fp2_add(&t0, x, &curve->A); // x + (A/C) + fp2_mul(&t0, &t0, x); // x^2 + (A/C)*x + fp2_add_one(&t0, &t0); // x^2 + (A/C)*x + 1 + fp2_mul(&t0, &t0, x); // x^3 + (A/C)*x^2 + x + + return fp2_is_square(&t0); +} + +// Helper function which given a point of order k*2^n with n maximal +// and k odd, computes a point of order 2^f +static inline void +clear_cofactor_for_maximal_even_order(ec_point_t *P, ec_curve_t *curve, int f) +{ + // clear out the odd cofactor to get a point of order 2^n + ec_mul(P, p_cofactor_for_2f, P_COFACTOR_FOR_2F_BITLENGTH, P, curve); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_A24(P, P, &curve->A24, curve->is_A24_computed_and_normalized); + } +} + +// Helper function which finds an NQR -1 / (1 + i*b) for entangled basis generation +static uint8_t +find_nqr_factor(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + // factor = -1/(1 + i*b) for b in Fp will be NQR whenever 1 + b^2 is NQR + // in Fp, so we find one of these and then invert (1 + i*b). We store b + // as a u8 hint to save time in verification. + + // We return the hint as a u8, but use (uint16_t)n to give 2^16 - 1 + // to make failure cryptographically negligible, with a fallback when + // n > 128 is required. + uint8_t hint; + uint32_t found = 0; + uint16_t n = start; + + bool qr_b = 1; + fp_t b, tmp; + fp2_t z, t0, t1; + + do { + while (qr_b) { + // find b with 1 + b^2 a non-quadratic residue + fp_set_small(&tmp, (uint32_t)n * n + 1); + qr_b = fp_is_square(&tmp); + n++; // keeps track of b = n - 1 + } + + // for Px := -A/(1 + i*b) to be on the curve + // is equivalent to A^2*(z-1) - z^2 NQR for z = 1 + i*b + // thus prevents unnecessary inversion pre-check + + // t0 = z - 1 = i*b + // t1 = z = 1 + i*b + fp_set_small(&b, (uint32_t)n - 1); + fp2_set_zero(&t0); + fp2_set_one(&z); + fp_copy(&z.im, &b); + fp_copy(&t0.im, &b); + + // A^2*(z-1) - z^2 + fp2_sqr(&t1, &curve->A); + fp2_mul(&t0, &t0, &t1); // A^2 * (z - 1) + fp2_sqr(&t1, &z); + fp2_sub(&t0, &t0, &t1); // A^2 * (z - 1) - z^2 + found = !fp2_is_square(&t0); + + qr_b = 1; + } while (!found); + + // set Px to -A/(1 + i*b) + fp2_copy(x, &z); + fp2_inv(x); + fp2_mul(x, x, &curve->A); + fp2_neg(x, x); + + /* + * With very low probability n will not fit in 7 bits. + * We set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + hint = n <= 128 ? n - 1 : 0; + + return hint; +} + +// Helper function which finds a point x(P) = n * A +static uint8_t +find_nA_x_coord(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + assert(!fp2_is_square(&curve->A)); // Only to be called when A is a NQR + + // when A is NQR we allow x(P) to be a multiple n*A of A + uint8_t n = start; + if (n == 1) { + fp2_copy(x, &curve->A); + } else { + fp2_mul_small(x, &curve->A, n); + } + + while (!is_on_curve(x, curve)) { + fp2_add(x, x, &curve->A); + n++; + } + + /* + * With very low probability (1/2^128), n will not fit in 7 bits. + * In this case, we set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + uint8_t hint = n < 128 ? n : 0; + return hint; +} + +// The entangled basis generation does not allow A = 0 +// so we simply return the one we have already precomputed +static void +ec_basis_E0_2f(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + assert(fp2_is_zero(&curve->A)); + ec_point_t P, Q; + + // Set P, Q to precomputed (X : 1) values + fp2_copy(&P.x, &BASIS_E0_PX); + fp2_copy(&Q.x, &BASIS_E0_QX); + fp2_set_one(&P.z); + fp2_set_one(&Q.z); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_E0(&P, &P); + xDBL_E0(&Q, &Q); + } + + // Set P, Q in the basis and compute x(P - Q) + copy_point(&PQ2->P, &P); + copy_point(&PQ2->Q, &Q); + difference_point(&PQ2->PmQ, &P, &Q, curve); +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// and stores hints as an array for faster recomputation at a later point +uint8_t +ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 0; + } + + uint8_t hint; + bool hint_A = fp2_is_square(&curve->A); + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_A) { + // when A is NQR we allow x(P) to be a multiple n*A of A + hint = find_nA_x_coord(&P.x, curve, 1); + } else { + // when A is QR we instead have to find (1 + b^2) a NQR + // such that x(P) = -A / (1 + i*b) + hint = find_nqr_factor(&P.x, curve, 1); + } + + fp2_set_one(&P.z); + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + + // Finally, we compress hint_A and hint into a single bytes. + // We choose to set the LSB of hint to hint_A + assert(hint < 128); // We expect hint to be 7-bits in size + return (hint << 1) | hint_A; +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// given the hints as an array for faster basis computation +int +ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 1; + } + + // The LSB of hint encodes whether A is a QR + // The remaining 7-bits are used to find a valid x(P) + bool hint_A = hint & 1; + uint8_t hint_P = hint >> 1; + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_P) { + // When hint_P = 0 it means we did not find a point in 128 attempts + // this is very rare and we almost never expect to need this fallback + // In either case, we can start with b = 128 to skip testing the known + // values which will not work + if (!hint_A) { + find_nA_x_coord(&P.x, curve, 128); + } else { + find_nqr_factor(&P.x, curve, 128); + } + } else { + // Otherwise we use the hint to directly find x(P) based on hint_A + if (!hint_A) { + // when A is NQR, we have found n such that x(P) = n*A + fp2_mul_small(&P.x, &curve->A, hint_P); + } else { + // when A is QR we have found b such that (1 + b^2) is a NQR in + // Fp, so we must compute x(P) = -A / (1 + i*b) + fp_set_one(&P.x.re); + fp_set_small(&P.x.im, hint_P); + fp2_inv(&P.x); + fp2_mul(&P.x, &P.x, &curve->A); + fp2_neg(&P.x, &P.x); + } + } + fp2_set_one(&P.z); + +#ifndef NDEBUG + int passed = 1; + passed = is_on_curve(&P.x, curve); + passed &= !fp2_is_square(&P.x); + + if (!passed) + return 0; +#endif + + // set xQ to -xP - A + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + +#ifndef NDEBUG + passed &= test_basis_order_twof(PQ2, curve, f); + + if (!passed) + return 0; +#endif + + return 1; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/bench.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/bench.h new file mode 100644 index 0000000000..c253825828 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/bench.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: Apache-2.0 +#ifndef BENCH_H__ +#define BENCH_H__ + +#include +#include +#include +#include +#include +#if defined(__APPLE__) +#include "bench_macos.h" +#endif + +#if defined(TARGET_ARM) || defined(TARGET_S390X) || defined(NO_CYCLE_COUNTER) +#define BENCH_UNIT0 "nanoseconds" +#define BENCH_UNIT3 "microseconds" +#define BENCH_UNIT6 "milliseconds" +#define BENCH_UNIT9 "seconds" +#else +#define BENCH_UNIT0 "cycles" +#define BENCH_UNIT3 "kilocycles" +#define BENCH_UNIT6 "megacycles" +#define BENCH_UNIT9 "gigacycles" +#endif + +static inline void +cpucycles_init(void) { +#if defined(__APPLE__) && defined(TARGET_ARM64) + macos_init_rdtsc(); +#endif +} + +static inline uint64_t +cpucycles(void) +{ +#if defined(TARGET_AMD64) || defined(TARGET_X86) + uint32_t hi, lo; + + asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)lo) | ((uint64_t)hi << 32); +#elif defined(TARGET_S390X) + uint64_t tod; + asm volatile("stckf %0\n" : "=Q"(tod) : : "cc"); + return (tod * 1000 / 4096); +#elif defined(TARGET_ARM64) && !defined(NO_CYCLE_COUNTER) +#if defined(__APPLE__) + return macos_rdtsc(); +#else + uint64_t cycles; + asm volatile("mrs %0, PMCCNTR_EL0" : "=r"(cycles)); + return cycles; +#endif // __APPLE__ +#else + struct timespec time; + clock_gettime(CLOCK_REALTIME, &time); + return (uint64_t)time.tv_sec * 1000000000 + time.tv_nsec; +#endif +} + +static inline int +CMPFUNC(const void *a, const void *b) +{ + uint64_t aa = *(uint64_t *)a, bb = *(uint64_t *)b; + + if (aa > bb) + return +1; + if (aa < bb) + return -1; + return 0; +} + +static inline uint32_t +ISQRT(uint64_t x) +{ + uint32_t r = 0; + for (ssize_t i = 31; i >= 0; --i) { + uint32_t s = r + (1 << i); + if ((uint64_t)s * s <= x) + r = s; + } + return r; +} + +static inline double +_TRUNC(uint64_t x) +{ + return x / 1000 / 1000.; +} +#define _FMT ".3lf" +#define _UNIT BENCH_UNIT6 + +#define BENCH_CODE_1(RUNS) \ + { \ + const size_t count = (RUNS); \ + if (!count) \ + abort(); \ + uint64_t cycles, cycles1, cycles2; \ + uint64_t cycles_list[count]; \ + cycles = 0; \ + for (size_t i = 0; i < count; ++i) { \ + cycles1 = cpucycles(); + +#define BENCH_CODE_2(name) \ + cycles2 = cpucycles(); \ + cycles_list[i] = cycles2 - cycles1; \ + cycles += cycles2 - cycles1; \ + } \ + qsort(cycles_list, count, sizeof(uint64_t), CMPFUNC); \ + uint64_t variance = 0; \ + for (size_t i = 0; i < count; ++i) { \ + int64_t off = cycles_list[i] - cycles / count; \ + variance += off * off; \ + } \ + variance /= count; \ + printf(" %-10s", name); \ + printf(" | average %9" _FMT " | stddev %9" _FMT, \ + _TRUNC(cycles / count), \ + _TRUNC(ISQRT(variance))); \ + printf(" | median %9" _FMT " | min %9" _FMT " | max %9" _FMT, \ + _TRUNC(cycles_list[count / 2]), \ + _TRUNC(cycles_list[0]), \ + _TRUNC(cycles_list[count - 1])); \ + printf(" (%s)\n", _UNIT); \ + } + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/bench_macos.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/bench_macos.h new file mode 100644 index 0000000000..0494fc85e9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/bench_macos.h @@ -0,0 +1,143 @@ +// WARNING: must be run as root on an M1 device +// WARNING: fragile, uses private apple APIs +// currently no command line interface, see variables at top of main + +/* +no warranty; use at your own risk - i believe this code needs +some minor changes to work on some later hardware and/or software revisions, +which is unsurprising given the use of undocumented, private APIs. +------------------------------------------------------------------------------ +This code is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2020 Dougall Johnson +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ + +/* + Based on https://github.com/travisdowns/robsize + Henry Wong + http://blog.stuffedcow.net/2013/05/measuring-rob-capacity/ + 2014-10-14 +*/ + +#include +#include +#include +#include + +#define KPERF_LIST \ + /* ret, name, params */ \ + F(int, kpc_force_all_ctrs_set, int) \ + F(int, kpc_set_counting, uint32_t) \ + F(int, kpc_set_thread_counting, uint32_t) \ + F(int, kpc_set_config, uint32_t, void *) \ + F(int, kpc_get_thread_counters, int, unsigned int, void *) + +#define F(ret, name, ...) \ + typedef ret name##proc(__VA_ARGS__); \ + static name##proc *name; +KPERF_LIST +#undef F + +#define CFGWORD_EL0A64EN_MASK (0x20000) + +#define CPMU_CORE_CYCLE 0x02 + +#define KPC_CLASS_FIXED (0) +#define KPC_CLASS_CONFIGURABLE (1) + +#define COUNTERS_COUNT 10 +#define KPC_MASK ((1u << KPC_CLASS_CONFIGURABLE) | (1u << KPC_CLASS_FIXED)) +static uint64_t g_config[COUNTERS_COUNT]; +static uint64_t g_counters[COUNTERS_COUNT]; + +static void +macos_configure_rdtsc() +{ + if (kpc_force_all_ctrs_set(1)) { + printf("kpc_force_all_ctrs_set failed\n"); + return; + } + + if (kpc_set_config(KPC_MASK, g_config)) { + printf("kpc_set_config failed\n"); + return; + } + + if (kpc_set_counting(KPC_MASK)) { + printf("kpc_set_counting failed\n"); + return; + } + + if (kpc_set_thread_counting(KPC_MASK)) { + printf("kpc_set_thread_counting failed\n"); + return; + } +} + +static void +macos_init_rdtsc() +{ + void *kperf = + dlopen("/System/Library/PrivateFrameworks/kperf.framework/Versions/A/kperf", RTLD_LAZY); + if (!kperf) { + printf("kperf = %p\n", kperf); + return; + } +#define F(ret, name, ...) \ + name = (name##proc *)(intptr_t)(dlsym(kperf, #name)); \ + if (!name) { \ + printf("%s = %p\n", #name, (void *)(intptr_t)name); \ + return; \ + } + KPERF_LIST +#undef F + + g_config[0] = CPMU_CORE_CYCLE | CFGWORD_EL0A64EN_MASK; + + macos_configure_rdtsc(); +} + +static uint64_t +macos_rdtsc(void) +{ + if (kpc_get_thread_counters(0, COUNTERS_COUNT, g_counters)) { + printf("kpc_get_thread_counters failed\n"); + return 1; + } + return g_counters[2]; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/biextension.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/biextension.c new file mode 100644 index 0000000000..1df7ab938b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/biextension.c @@ -0,0 +1,770 @@ +#include +#include +#include +#include + +/* + * We implement the biextension arithmetic by using the cubical torsor + * representation. For now only implement the 2^e-ladder. + * + * Warning: cubicalADD is off by a factor x4 with respect to the correct + * cubical arithmetic. This does not affect the Weil pairing or the Tate + * pairing over F_{p^2} (due to the final exponentiation), but would give + * the wrong result if we compute the Tate pairing over F_p. + */ + +// this would be exactly like xADD if PQ was 'antinormalised' as (1,z) +// Cost: 3M + 2S + 3a + 3s +// Note: if needed, cubicalDBL is simply xDBL_A24 normalized and +// costs 3M + 2S + 2a + 2s + +static void +cubicalADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const fp2_t *ixPQ) +{ + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&R->z, &t3); + fp2_sqr(&t2, &t2); + fp2_mul(&R->x, ixPQ, &t2); +} + +// Given cubical reps of P, Q and x(P - Q) = (1 : ixPQ) +// compute P + Q, [2]Q +// Cost: 6M + 4S + 4a + 4s +static void +cubicalDBLADD(ec_point_t *PpQ, + ec_point_t *QQ, + const ec_point_t *P, + const ec_point_t *Q, + const fp2_t *ixPQ, + const ec_point_t *A24) +{ + // A24 = (A+2C/4C: 1) + assert(fp2_is_one(&A24->z)); + + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&PpQ->x, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_sqr(&t2, &PpQ->x); + fp2_sqr(&QQ->z, &t3); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &PpQ->x); + fp2_add(&PpQ->x, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&PpQ->z, &t3); + fp2_sqr(&PpQ->x, &PpQ->x); + fp2_mul(&PpQ->x, ixPQ, &PpQ->x); + fp2_sub(&t3, &t2, &QQ->z); + fp2_mul(&QQ->x, &t2, &QQ->z); + fp2_mul(&t0, &t3, &A24->x); + fp2_add(&t0, &t0, &QQ->z); + fp2_mul(&QQ->z, &t0, &t3); +} + +// iterative biextension doubling +static void +biext_ladder_2e(uint32_t e, + ec_point_t *PnQ, + ec_point_t *nQ, + const ec_point_t *PQ, + const ec_point_t *Q, + const fp2_t *ixP, + const ec_point_t *A24) +{ + copy_point(PnQ, PQ); + copy_point(nQ, Q); + for (uint32_t i = 0; i < e; i++) { + cubicalDBLADD(PnQ, nQ, PnQ, nQ, ixP, A24); + } +} + +// Compute the monodromy ratio X/Z above as a (X:Z) point to avoid a division +// We implicitly use (1,0) as a cubical point above 0_E +static void +point_ratio(ec_point_t *R, const ec_point_t *PnQ, const ec_point_t *nQ, const ec_point_t *P) +{ + // Sanity tests + assert(ec_is_zero(nQ)); + assert(ec_is_equal(PnQ, P)); + + fp2_mul(&R->x, &nQ->x, &P->x); + fp2_copy(&R->z, &PnQ->x); +} + +// Compute the cubical translation of P by a point of 2-torsion T +static void +translate(ec_point_t *P, const ec_point_t *T) +{ + // When we translate, the following three things can happen: + // T = (A : 0) then the translation of P should be P + // T = (0 : B) then the translation of P = (X : Z) should be (Z : X) + // Otherwise T = (A : B) and P translates to (AX - BZ : BX - AZ) + // We compute this in constant time by computing the generic case + // and then using constant time swaps. + fp2_t PX_new, PZ_new; + + { + fp2_t t0, t1; + + // PX_new = AX - BZ + fp2_mul(&t0, &T->x, &P->x); + fp2_mul(&t1, &T->z, &P->z); + fp2_sub(&PX_new, &t0, &t1); + + // PZ_new = BX - AZ + fp2_mul(&t0, &T->z, &P->x); + fp2_mul(&t1, &T->x, &P->z); + fp2_sub(&PZ_new, &t0, &t1); + } + + // When we have A zero we should return (Z : X) + uint32_t TA_is_zero = fp2_is_zero(&T->x); + fp2_select(&PX_new, &PX_new, &P->z, TA_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->x, TA_is_zero); + + // When we have B zero we should return (X : Z) + uint32_t TB_is_zero = fp2_is_zero(&T->z); + fp2_select(&PX_new, &PX_new, &P->x, TB_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->z, TB_is_zero); + + // Set the point to the desired result + fp2_copy(&P->x, &PX_new); + fp2_copy(&P->z, &PZ_new); +} + +// Compute the biextension monodromy g_P,Q^{2^g} (in level 1) via the +// cubical arithmetic of P+2^e Q. +// The suffix _i means that we are given 1/x(P) as parameter. Warning: to +// get meaningful result when using the monodromy to compute pairings, we +// need P, Q, PQ, A24 to be normalised (this is not strictly necessary, but +// care need to be taken when they are not normalised. Only handle the +// normalised case for now) +static void +monodromy_i(ec_point_t *R, const pairing_params_t *pairing_data, bool swap_PQ) +{ + fp2_t ixP; + ec_point_t P, Q, PnQ, nQ; + + // When we compute the Weil pairing we need both P + [2^e]Q and + // Q + [2^e]P which we can do easily with biext_ladder_2e() below + // we use a bool to decide wether to use Q, ixP or P, ixQ in the + // ladder and P or Q in translation. + if (!swap_PQ) { + copy_point(&P, &pairing_data->P); + copy_point(&Q, &pairing_data->Q); + fp2_copy(&ixP, &pairing_data->ixP); + } else { + copy_point(&P, &pairing_data->Q); + copy_point(&Q, &pairing_data->P); + fp2_copy(&ixP, &pairing_data->ixQ); + } + + // Compute the biextension ladder P + [2^e]Q + biext_ladder_2e(pairing_data->e - 1, &PnQ, &nQ, &pairing_data->PQ, &Q, &ixP, &pairing_data->A24); + translate(&PnQ, &nQ); + translate(&nQ, &nQ); + point_ratio(R, &PnQ, &nQ, &P); +} + +// Normalize the points and also store 1/x(P), 1/x(Q) +static void +cubical_normalization(pairing_params_t *pairing_data, const ec_point_t *P, const ec_point_t *Q) +{ + fp2_t t[4]; + fp2_copy(&t[0], &P->x); + fp2_copy(&t[1], &P->z); + fp2_copy(&t[2], &Q->x); + fp2_copy(&t[3], &Q->z); + fp2_batched_inv(t, 4); + + // Store PZ / PX and QZ / QX + fp2_mul(&pairing_data->ixP, &P->z, &t[0]); + fp2_mul(&pairing_data->ixQ, &Q->z, &t[2]); + + // Store x(P), x(Q) normalised to (X/Z : 1) + fp2_mul(&pairing_data->P.x, &P->x, &t[1]); + fp2_mul(&pairing_data->Q.x, &Q->x, &t[3]); + fp2_set_one(&pairing_data->P.z); + fp2_set_one(&pairing_data->Q.z); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// We assume the points are normalised correctly +static void +weil_n(fp2_t *r, const pairing_params_t *pairing_data) +{ + ec_point_t R0, R1; + monodromy_i(&R0, pairing_data, true); + monodromy_i(&R1, pairing_data, false); + + fp2_mul(r, &R0.x, &R1.z); + fp2_inv(r); + fp2_mul(r, r, &R0.z); + fp2_mul(r, r, &R1.x); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// Normalise the points and call the code above +// The code will crash (division by 0) if either P or Q is (0:1) +void +weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + pairing_params_t pairing_data; + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + // Compute the Weil pairing e_(2^n)(P, Q) + weil_n(r, &pairing_data); +} + +// two helper functions for reducing the tate pairing +// clear_cofac clears (p + 1) // 2^f for an Fp2 value +void +clear_cofac(fp2_t *r, const fp2_t *a) +{ + digit_t exp = *p_cofactor_for_2f; + exp >>= 1; + + fp2_t x; + fp2_copy(&x, a); + fp2_copy(r, a); + + // removes cofac + while (exp > 0) { + fp2_sqr(r, r); + if (exp & 1) { + fp2_mul(r, r, &x); + } + exp >>= 1; + } +} + +// applies frobenius a + ib --> a - ib to an fp2 element +void +fp2_frob(fp2_t *out, const fp2_t *in) +{ + fp_copy(&(out->re), &(in->re)); + fp_neg(&(out->im), &(in->im)); +} + +// reduced Tate pairing, normalizes the points, assumes PQ is P+Q in (X:Z) +// coordinates. Computes 1/x(P) and 1/x(Q) for efficient cubical ladder +void +reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - e; + ec_point_t R; + pairing_params_t pairing_data; + + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + monodromy_i(&R, &pairing_data, true); + + // we get unreduced tate as R.X, R.Z + // reduced tate is -(R.Z/R.X)^((p^2 - 1) div 2^f) + // we reuse R.X and R.Z to split reduction step ^(p-1) into frobenius and ^-1 + fp2_t frob, tmp; + fp2_copy(&tmp, &R.x); + fp2_frob(&frob, &R.x); + fp2_mul(&R.x, &R.z, &frob); + fp2_frob(&frob, &R.z); + fp2_mul(&R.z, &tmp, &frob); + fp2_inv(&R.x); + fp2_mul(r, &R.x, &R.z); + + clear_cofac(r, r); + // clear remaining 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(r, r); + } +} + +// Functions to compute discrete logs by computing the Weil pairing of points +// followed by computing the dlog in Fp^2 +// (If we work with full order points, it would be faster to use the Tate +// pairings rather than the Weil pairings; this is not implemented yet) + +// recursive dlog function +static bool +fp2_dlog_2e_rec(digit_t *a, long len, fp2_t *pows_f, fp2_t *pows_g, long stacklen) +{ + if (len == 0) { + // *a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + return true; + } else if (len == 1) { + if (fp2_is_one(&pows_f[stacklen - 1])) { + // a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else if (fp2_is_equal(&pows_f[stacklen - 1], &pows_g[stacklen - 1])) { + // a = 1; + a[0] = 1; + for (int i = 1; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_mul(&pows_f[i], &pows_f[i], &pows_g[i]); // new_f = f*g + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else { + return false; + } + } else { + long right = (double)len * 0.5; + long left = len - right; + pows_f[stacklen] = pows_f[stacklen - 1]; + pows_g[stacklen] = pows_g[stacklen - 1]; + for (int i = 0; i < left; i++) { + fp2_sqr(&pows_f[stacklen], &pows_f[stacklen]); + fp2_sqr(&pows_g[stacklen], &pows_g[stacklen]); + } + // uint32_t dlp1 = 0, dlp2 = 0; + digit_t dlp1[NWORDS_ORDER], dlp2[NWORDS_ORDER]; + bool ok; + ok = fp2_dlog_2e_rec(dlp1, right, pows_f, pows_g, stacklen + 1); + if (!ok) + return false; + ok = fp2_dlog_2e_rec(dlp2, left, pows_f, pows_g, stacklen); + if (!ok) + return false; + // a = dlp1 + 2^right * dlp2 + multiple_mp_shiftl(dlp2, right, NWORDS_ORDER); + mp_add(a, dlp2, dlp1, NWORDS_ORDER); + + return true; + } +} + +// compute DLP: compute scal such that f = g^scal with f, 1/g as input +static bool +fp2_dlog_2e(digit_t *scal, const fp2_t *f, const fp2_t *g_inverse, int e) +{ + long log, len = e; + for (log = 0; len > 1; len >>= 1) + log++; + log += 1; + + fp2_t pows_f[log], pows_g[log]; + pows_f[0] = *f; + pows_g[0] = *g_inverse; + + for (int i = 0; i < NWORDS_ORDER; i++) { + scal[i] = 0; + } + + bool ok = fp2_dlog_2e_rec(scal, e, pows_f, pows_g, 1); + assert(ok); + + return ok; +} + +// Normalize the bases (P, Q), (R, S) and store their inverse +// and additionally normalise the curve to (A/C : 1) +static void +cubical_normalization_dlog(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + fp2_t t[11]; + ec_basis_t *PQ = &pairing_dlog_data->PQ; + ec_basis_t *RS = &pairing_dlog_data->RS; + fp2_copy(&t[0], &PQ->P.x); + fp2_copy(&t[1], &PQ->P.z); + fp2_copy(&t[2], &PQ->Q.x); + fp2_copy(&t[3], &PQ->Q.z); + fp2_copy(&t[4], &PQ->PmQ.x); + fp2_copy(&t[5], &PQ->PmQ.z); + fp2_copy(&t[6], &RS->P.x); + fp2_copy(&t[7], &RS->P.z); + fp2_copy(&t[8], &RS->Q.x); + fp2_copy(&t[9], &RS->Q.z); + fp2_copy(&t[10], &curve->C); + + fp2_batched_inv(t, 11); + + fp2_mul(&pairing_dlog_data->ixP, &PQ->P.z, &t[0]); + fp2_mul(&PQ->P.x, &PQ->P.x, &t[1]); + fp2_set_one(&PQ->P.z); + + fp2_mul(&pairing_dlog_data->ixQ, &PQ->Q.z, &t[2]); + fp2_mul(&PQ->Q.x, &PQ->Q.x, &t[3]); + fp2_set_one(&PQ->Q.z); + + fp2_mul(&PQ->PmQ.x, &PQ->PmQ.x, &t[5]); + fp2_set_one(&PQ->PmQ.z); + + fp2_mul(&pairing_dlog_data->ixR, &RS->P.z, &t[6]); + fp2_mul(&RS->P.x, &RS->P.x, &t[7]); + fp2_set_one(&RS->P.z); + + fp2_mul(&pairing_dlog_data->ixS, &RS->Q.z, &t[8]); + fp2_mul(&RS->Q.x, &RS->Q.x, &t[9]); + fp2_set_one(&RS->Q.z); + + fp2_mul(&curve->A, &curve->A, &t[10]); + fp2_set_one(&curve->C); +} + +// Given two bases and basis = compute +// x(P - R), x(P - S), x(R - Q), x(S - Q) +static void +compute_difference_points(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + jac_point_t xyP, xyQ, xyR, xyS, temp; + + // lifting the two basis points, assumes that x(P) and x(R) + // and the curve itself are normalised to (X : 1) + lift_basis_normalized(&xyP, &xyQ, &pairing_dlog_data->PQ, curve); + lift_basis_normalized(&xyR, &xyS, &pairing_dlog_data->RS, curve); + + // computation of the differences + // x(P - R) + jac_neg(&temp, &xyR); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmR, &temp); + + // x(P - S) + jac_neg(&temp, &xyS); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmS, &temp); + + // x(R - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyR, curve); + jac_to_xz(&pairing_dlog_data->diff.RmQ, &temp); + + // x(S - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyS, curve); + jac_to_xz(&pairing_dlog_data->diff.SmQ, &temp); +} + +// Inline all the Weil pairing computations needed for ec_dlog_2_weil +static void +weil_dlog(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + ec_point_t nP, nQ, nR, nS, nPQ, PnQ, nPR, PnR, nPS, PnS, nRQ, RnQ, nSQ, SnQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&nPR, &pairing_dlog_data->diff.PmR); + copy_point(&nPS, &pairing_dlog_data->diff.PmS); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + copy_point(&RnQ, &pairing_dlog_data->diff.RmQ); + copy_point(&SnQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&nPQ, &nPQ, &nP, &pairing_dlog_data->ixQ); + cubicalADD(&nPR, &nPR, &nP, &pairing_dlog_data->ixR); + cubicalDBLADD(&nPS, &nP, &nPS, &nP, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnQ, &PnQ, &nQ, &pairing_dlog_data->ixP); + cubicalADD(&RnQ, &RnQ, &nQ, &pairing_dlog_data->ixR); + cubicalDBLADD(&SnQ, &nQ, &SnQ, &nQ, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + // weil(&w0,e,&PQ->P,&PQ->Q,&PQ->PmQ,&A24); + translate(&nPQ, &nP); + translate(&nPR, &nP); + translate(&nPS, &nP); + translate(&PnQ, &nQ); + translate(&RnQ, &nQ); + translate(&SnQ, &nQ); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference weil pairing + ec_point_t T0, T1; + fp2_t w1[5], w2[5]; + + // e(P, Q) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &PnQ, &nQ, &pairing_dlog_data->PQ.P); + // For the first element we need it's inverse for + // fp2_dlog_2e so we swap w1 and w2 here to save inversions + fp2_mul(&w2[0], &T0.x, &T1.z); + fp2_mul(&w1[0], &T1.x, &T0.z); + + // e(P,R) = w0^r2 + point_ratio(&T0, &nPR, &nP, &pairing_dlog_data->RS.P); + point_ratio(&T1, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[1], &T0.x, &T1.z); + fp2_mul(&w2[1], &T1.x, &T0.z); + + // e(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &RnQ, &nQ, &pairing_dlog_data->RS.P); + fp2_mul(&w1[2], &T0.x, &T1.z); + fp2_mul(&w2[2], &T1.x, &T0.z); + + // e(P,S) = w0^s2 + point_ratio(&T0, &nPS, &nP, &pairing_dlog_data->RS.Q); + point_ratio(&T1, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[3], &T0.x, &T1.z); + fp2_mul(&w2[3], &T1.x, &T0.z); + + // e(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &SnQ, &nQ, &pairing_dlog_data->RS.Q); + fp2_mul(&w1[4], &T0.x, &T1.z); + fp2_mul(&w2[4], &T1.x, &T0.z); + + fp2_batched_inv(w1, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + assert(test_point_order_twof(&PQ->Q, curve, e)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + + weil_dlog(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} + +// Inline all the Tate pairing computations needed for ec_dlog_2_weil +// including reduction, assumes a bases PQ of full E[2^e_full] torsion +// and a bases RS of smaller E[2^e] torsion +static void +tate_dlog_partial(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - pairing_dlog_data->e; + + ec_point_t nP, nQ, nR, nS, nPQ, PnR, PnS, nRQ, nSQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < e_full - 1; i++) { + cubicalDBLADD(&nPQ, &nP, &nPQ, &nP, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + translate(&nPQ, &nP); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference Tate pairing + ec_point_t T0; + fp2_t w1[5], w2[5]; + + // t(P, Q)^(2^e_diff) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + fp2_copy(&w1[0], &T0.x); + fp2_copy(&w2[0], &T0.z); + + // t(R,P) = w0^r2 + point_ratio(&T0, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[1], &T0.x); + fp2_copy(&w2[1], &T0.z); + + // t(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[2], &T0.x); + fp2_copy(&w1[2], &T0.z); + + // t(S,P) = w0^s2 + point_ratio(&T0, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[3], &T0.x); + fp2_copy(&w2[3], &T0.z); + + // t(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[4], &T0.x); + fp2_copy(&w1[4], &T0.z); + + // batched reduction using projective representation + for (int i = 0; i < 5; i++) { + fp2_t frob, tmp; + fp2_copy(&tmp, &w1[i]); + // inline frobenius for ^p + // multiply by inverse to get ^(p-1) + fp2_frob(&frob, &w1[i]); + fp2_mul(&w1[i], &w2[i], &frob); + + // repeat for denom + fp2_frob(&frob, &w2[i]); + fp2_mul(&w2[i], &tmp, &frob); + } + + // batched normalization + fp2_batched_inv(w2, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + for (int i = 0; i < 5; i++) { + clear_cofac(&w1[i], &w1[i]); + + // removes 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(&w1[i], &w1[i]); + } + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + // assume PQ is a full torsion basis + // returns a, b, c, d such that R = [a]P + [b]Q, S = [c]P + [d]Q + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - e; +#endif + assert(test_basis_order_twof(PQ, curve, e_full)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + tate_dlog_partial(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/biextension.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/biextension.h new file mode 100644 index 0000000000..1a50fcc738 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/biextension.h @@ -0,0 +1,82 @@ +#ifndef _BIEXT_H_ +#define _BIEXT_H_ + +#include +#include + +typedef struct pairing_params +{ + uint32_t e; // Points have order 2^e + ec_point_t P; // x(P) + ec_point_t Q; // x(Q) + ec_point_t PQ; // x(P-Q) = (PQX/PQZ : 1) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_params_t; + +// For two bases and store: +// x(P - R), x(P - S), x(R - Q), x(S - Q) +typedef struct pairing_dlog_diff_points +{ + ec_point_t PmR; // x(P - R) + ec_point_t PmS; // x(P - S) + ec_point_t RmQ; // x(R - Q) + ec_point_t SmQ; // x(S - Q) +} pairing_dlog_diff_points_t; + +typedef struct pairing_dlog_params +{ + uint32_t e; // Points have order 2^e + ec_basis_t PQ; // x(P), x(Q), x(P-Q) + ec_basis_t RS; // x(R), x(S), x(R-S) + pairing_dlog_diff_points_t diff; // x(P - R), x(P - S), x(R - Q), x(S - Q) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + fp2_t ixR; // RZ/RX + fp2_t ixS; // SZ/SX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_dlog_params_t; + +// Computes e = e_{2^e}(P, Q) using biextension ladder +void weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Computes (reduced) z = t_{2^e}(P, Q) using biextension ladder +void reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Given two bases and computes scalars +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +// Given two bases and +// where is a basis for E[2^f] +// the full 2-torsion, and a basis +// for smaller torsion E[2^e] +// computes scalars r1, r2, s1, s2 +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +void ec_dlog_2_tate_to_full(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + ec_basis_t *RS, + ec_curve_t *curve, + int e); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c new file mode 100644 index 0000000000..d393e9cb11 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include + +void +public_key_init(public_key_t *pk) +{ + ec_curve_init(&pk->curve); +} + +void +public_key_finalize(public_key_t *pk) +{ +} + +// compute the challenge as the hash of the message and the commitment curve and public key +void +hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length) +{ + unsigned char buf[2 * FP2_ENCODED_BYTES]; + { + fp2_t j1, j2; + ec_j_inv(&j1, &pk->curve); + ec_j_inv(&j2, com_curve); + fp2_encode(buf, &j1); + fp2_encode(buf + FP2_ENCODED_BYTES, &j2); + } + + { + // The type scalar_t represents an element of GF(p), which is about + // 2*lambda bits, where lambda = 128, 192 or 256, according to the + // security level. Thus, the variable scalar should have enough memory + // for the values produced by SHAKE256 in the intermediate iterations. + + shake256incctx ctx; + + size_t hash_bytes = ((2 * SECURITY_BITS) + 7) / 8; + size_t limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + size_t bits = (2 * SECURITY_BITS) % RADIX; + digit_t mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, buf, 2 * FP2_ENCODED_BYTES); + shake256_inc_absorb(&ctx, message, length); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + for (int i = 2; i < HASH_ITERATIONS; i++) { + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + } + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + + hash_bytes = ((TORSION_EVEN_POWER - SQIsign_response_length) + 7) / 8; + limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + bits = (TORSION_EVEN_POWER - SQIsign_response_length) % RADIX; + mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + +#ifdef TARGET_BIG_ENDIAN + for (int i = 0; i < NWORDS_ORDER; i++) + (*scalar)[i] = BSWAP_DIGIT((*scalar)[i]); +#endif + + mp_mod_2exp(*scalar, SECURITY_BITS, NWORDS_ORDER); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2.c new file mode 100644 index 0000000000..b31ae7771a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +// internal helpers, also for other files +void +ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) +{ + ibz_set(&((*vec)[0]), a0); + ibz_set(&((*vec)[1]), a1); +} +void +ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) +{ + ibz_set(&((*mat)[0][0]), a00); + ibz_set(&((*mat)[0][1]), a01); + ibz_set(&((*mat)[1][0]), a10); + ibz_set(&((*mat)[1][1]), a11); +} + +void +ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) +{ + ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); + ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); + ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); + ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); +} + +void +ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) +{ + ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); + ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); + ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); + ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); +} + +void +ibz_mat_2x2_det_from_ibz(ibz_t *det, const ibz_t *a11, const ibz_t *a12, const ibz_t *a21, const ibz_t *a22) +{ + ibz_t prod; + ibz_init(&prod); + ibz_mul(&prod, a12, a21); + ibz_mul(det, a11, a22); + ibz_sub(det, det, &prod); + ibz_finalize(&prod); +} + +void +ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec) +{ + ibz_t prod; + ibz_vec_2_t matvec; + ibz_init(&prod); + ibz_vec_2_init(&matvec); + ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); + ibz_copy(&(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); + ibz_add(&(matvec[0]), &(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); + ibz_copy(&(matvec[1]), &prod); + ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); + ibz_add(&(matvec[1]), &(matvec[1]), &prod); + ibz_copy(&((*res)[0]), &(matvec[0])); + ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_finalize(&prod); + ibz_vec_2_finalize(&matvec); +} + +// modular 2x2 operations + +void +ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2x2_t *mat_b, const ibz_t *m) +{ + ibz_t mul; + ibz_mat_2x2_t sums; + ibz_init(&mul); + ibz_mat_2x2_init(&sums); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_set(&(sums[i][j]), 0); + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + for (int k = 0; k < 2; k++) { + ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); + ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); + ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + } + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + } + } + ibz_finalize(&mul); + ibz_mat_2x2_finalize(&sums); +} + +int +ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m) +{ + ibz_t det, prod; + ibz_init(&det); + ibz_init(&prod); + ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mod(&det, &det, m); + ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_sub(&det, &det, &prod); + ibz_mod(&det, &det, m); + int res = ibz_invmod(&det, &det, m); + // return 0 matrix if non invertible determinant + ibz_set(&prod, res); + ibz_mul(&det, &det, &prod); + // compute inverse + ibz_copy(&prod, &((*mat)[0][0])); + ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); + ibz_copy(&((*inv)[1][1]), &prod); + ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); + ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); + ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + } + } + ibz_finalize(&det); + ibz_finalize(&prod); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c new file mode 100644 index 0000000000..171473d481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c @@ -0,0 +1,1172 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +_fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + + // var declaration + int ret; + ibz_t two_pow, tmp; + quat_alg_elem_t theta; + + ec_curve_t E0; + copy_curve(&E0, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].curve); + ec_curve_normalize_A24(&E0); + + unsigned length; + + int u_bitsize = ibz_bitsize(u); + + // deciding the power of 2 of the dim2 isogeny we use for this + // the smaller the faster, but if it set too low there is a risk that + // RepresentInteger will fail + if (!small) { + // in that case, we just set it to be the biggest value possible + length = TORSION_EVEN_POWER - HD_extra_torsion; + } else { + length = ibz_bitsize(&QUATALG_PINFTY.p) + QUAT_repres_bound_input - u_bitsize; + assert(u_bitsize < (int)length); + assert(length < TORSION_EVEN_POWER - HD_extra_torsion); + } + assert(length); + + // var init + ibz_init(&two_pow); + ibz_init(&tmp); + quat_alg_elem_init(&theta); + + ibz_pow(&two_pow, &ibz_const_two, length); + ibz_copy(&tmp, u); + assert(ibz_cmp(&two_pow, &tmp) > 0); + assert(!ibz_is_even(&tmp)); + + // computing the endomorphism theta of norm u * (2^(length) - u) + ibz_sub(&tmp, &two_pow, &tmp); + ibz_mul(&tmp, &tmp, u); + assert(!ibz_is_even(&tmp)); + + // setting-up the quat_represent_integer_params + quat_represent_integer_params_t ri_params; + ri_params.primality_test_iterations = QUAT_represent_integer_params.primality_test_iterations; + + quat_p_extremal_maximal_order_t order_hnf; + quat_alg_elem_init(&order_hnf.z); + quat_alg_elem_copy(&order_hnf.z, &EXTREMAL_ORDERS[index_alternate_order].z); + quat_alg_elem_init(&order_hnf.t); + quat_alg_elem_copy(&order_hnf.t, &EXTREMAL_ORDERS[index_alternate_order].t); + quat_lattice_init(&order_hnf.order); + ibz_copy(&order_hnf.order.denom, &EXTREMAL_ORDERS[index_alternate_order].order.denom); + ibz_mat_4x4_copy(&order_hnf.order.basis, &EXTREMAL_ORDERS[index_alternate_order].order.basis); + order_hnf.q = EXTREMAL_ORDERS[index_alternate_order].q; + ri_params.order = &order_hnf; + ri_params.algebra = &QUATALG_PINFTY; + +#ifndef NDEBUG + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->z)); + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->t)); +#endif + + ret = quat_represent_integer(&theta, &tmp, 1, &ri_params); + + assert(!ibz_is_even(&tmp)); + + if (!ret) { + printf("represent integer failed for the alternate order number %d and for " + "a target of " + "size %d for a u of size %d with length = " + "%u \n", + index_alternate_order, + ibz_bitsize(&tmp), + ibz_bitsize(u), + length); + goto cleanup; + } + quat_lideal_create(lideal, &theta, u, &order_hnf.order, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&order_hnf.z); + quat_alg_elem_finalize(&order_hnf.t); + quat_lattice_finalize(&order_hnf.order); + +#ifndef NDEBUG + ibz_t test_norm, test_denom; + ibz_init(&test_denom); + ibz_init(&test_norm); + quat_alg_norm(&test_norm, &test_denom, &theta, &QUATALG_PINFTY); + assert(ibz_is_one(&test_denom)); + assert(ibz_cmp(&test_norm, &tmp) == 0); + assert(!ibz_is_even(&tmp)); + assert(quat_lattice_contains(NULL, &EXTREMAL_ORDERS[index_alternate_order].order, &theta)); + ibz_finalize(&test_norm); + ibz_finalize(&test_denom); +#endif + + ec_basis_t B0_two; + // copying the basis + copy_basis(&B0_two, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].basis_even); + assert(test_basis_order_twof(&B0_two, &E0, TORSION_EVEN_POWER)); + ec_dbl_iter_basis(&B0_two, TORSION_EVEN_POWER - length - HD_extra_torsion, &B0_two, &E0); + + assert(test_basis_order_twof(&B0_two, &E0, length + HD_extra_torsion)); + + // now we set-up the kernel + theta_couple_point_t T1; + theta_couple_point_t T2, T1m2; + + copy_point(&T1.P1, &B0_two.P); + copy_point(&T2.P1, &B0_two.Q); + copy_point(&T1m2.P1, &B0_two.PmQ); + + // multiplication of theta by (u)^-1 mod 2^(length+2) + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_copy(&tmp, u); + ibz_invmod(&tmp, &tmp, &two_pow); + assert(!ibz_is_even(&tmp)); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta to the basis + ec_basis_t B0_two_theta; + copy_basis(&B0_two_theta, &B0_two); + endomorphism_application_even_basis(&B0_two_theta, index_alternate_order, &E0, &theta, length + HD_extra_torsion); + + // Ensure the basis we're using has the expected order + assert(test_basis_order_twof(&B0_two_theta, &E0, length + HD_extra_torsion)); + + // Set-up the domain E0 x E0 + theta_couple_curve_t E00; + E00.E1 = E0; + E00.E2 = E0; + + // Set-up the kernel from the bases + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &B0_two, &B0_two_theta); + + ret = theta_chain_compute_and_eval(length, &E00, &dim_two_ker, true, E34, P12, numP); + if (!ret) + goto cleanup; + + assert(length); + ret = (int)length; + +cleanup: + // var finalize + ibz_finalize(&two_pow); + ibz_finalize(&tmp); + quat_alg_elem_finalize(&theta); + + return ret; +} + +int +fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + return _fixed_degree_isogeny_impl(lideal, u, small, E34, P12, numP, index_alternate_order); +} + +// takes the output of LLL and apply some small treatment on the basis +// reordering vectors and switching some signs if needed to make it in a nicer +// shape +static void +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +{ + // if the left order is the special one, then we apply some additional post + // treatment + if (is_special_order) { + // reordering the basis if needed + if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + } + ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); + ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); + ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); + ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + // in this case it seems that we need to swap the second and third + // element, and then recompute entirely the second element from the first + // first we swap the second and third element + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } + + // adjusting the sign if needed + if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); + ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); + ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + } + } + if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); + ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); + ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + } + // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + } + } +} + +// enumerate all vectors in an hypercube of norm m for the infinity norm +// with respect to a basis whose gram matrix is given by gram +// Returns an int `count`, the number of vectors found with the desired +// properties +static int +enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t *gram, const ibz_t *adjusted_norm) +{ + + ibz_t remain, norm; + ibz_vec_4_t point; + + ibz_init(&remain); + ibz_init(&norm); + ibz_vec_4_init(&point); + + assert(m > 0); + + int count = 0; + int dim = 2 * m + 1; + int dim2 = dim * dim; + int dim3 = dim2 * dim; + + // if the basis is of the form alpha, i*alpha, beta, i*beta + // we can remove some values due to symmetry of the basis that + bool need_remove_symmetry = + (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + + int check1, check2, check3; + + // Enumerate over points in a hypercube with coordinates (x, y, z, w) + for (int x = -m; x <= 0; x++) { // We only check non-positive x-values + for (int y = -m; y < m + 1; y++) { + // Once x = 0 we only consider non-positive y values + if (x == 0 && y > 0) { + break; + } + for (int z = -m; z < m + 1; z++) { + // If x and y are both zero, we only consider non-positive z values + if (x == 0 && y == 0 && z > 0) { + break; + } + for (int w = -m; w < m + 1; w++) { + // If x, y, z are all zero, we only consider negative w values + if (x == 0 && y == 0 && z == 0 && w >= 0) { + break; + } + + // Now for each candidate (x, y, z, w) we need to check a number of + // conditions We have already filtered for symmetry with several break + // statements, but there are more checks. + + // 1. We do not allow all (x, y, z, w) to be multiples of 2 + // 2. We do not allow all (x, y, z, w) to be multiples of 3 + // 3. We do not want elements of the same norm, so we quotient out the + // action + // of a group of order four generated by i for a basis expected to + // be of the form: [gamma, i gamma, beta, i beta ]. + + // Ensure that not all values are even + if (!((x | y | z | w) & 1)) { + continue; + } + // Ensure that not all values are multiples of three + if (x % 3 == 0 && y % 3 == 0 && z % 3 == 0 && w % 3 == 0) { + continue; + } + + check1 = (m + w) + dim * (m + z) + dim2 * (m + y) + dim3 * (m + x); + check2 = (m - z) + dim * (m + w) + dim2 * (m - x) + dim3 * (m + y); + check3 = (m + z) + dim * (m - w) + dim2 * (m + x) + dim3 * (m - y); + + // either the basis does not have symmetry and we are good, + // or there is a special symmetry that we can exploit + // and we ensure that we don't record the same norm in the list + if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { + // Set the point as a vector (x, y, z, w) + ibz_set(&point[0], x); + ibz_set(&point[1], y); + ibz_set(&point[2], z); + ibz_set(&point[3], w); + + // Evaluate this through the gram matrix and divide out by the + // adjusted_norm + quat_qf_eval(&norm, gram, &point); + ibz_div(&norm, &remain, &norm, adjusted_norm); + assert(ibz_is_zero(&remain)); + + if (ibz_mod_ui(&norm, 2) == 1) { + ibz_set(&vecs[count][0], x); + ibz_set(&vecs[count][1], y); + ibz_set(&vecs[count][2], z); + ibz_set(&vecs[count][3], w); + ibz_copy(&norms[count], &norm); + count++; + } + } + } + } + } + } + + ibz_finalize(&remain); + ibz_finalize(&norm); + ibz_vec_4_finalize(&point); + + return count - 1; +} + +// enumerate through the two list given in input to find to integer d1,d2 such +// that there exists u,v with u d1 + v d2 = target the bool is diagonal +// indicates if the two lists are the same +static int +find_uv_from_lists(ibz_t *au, + ibz_t *bu, + ibz_t *av, + ibz_t *bv, + ibz_t *u, + ibz_t *v, + int *index_sol1, + int *index_sol2, + const ibz_t *target, + const ibz_t *small_norms1, + const ibz_t *small_norms2, + const ibz_t *quotients, + const int index1, + const int index2, + const int is_diagonal, + const int number_sum_square) +{ + + ibz_t n, remain, adjusted_norm; + ibz_init(&n); + ibz_init(&remain); + ibz_init(&adjusted_norm); + + int found = 0; + int cmp; + ibz_copy(&n, target); + + // enumerating through the list + for (int i1 = 0; i1 < index1; i1++) { + ibz_mod(&adjusted_norm, &n, &small_norms1[i1]); + int starting_index2; + if (is_diagonal) { + starting_index2 = i1; + } else { + starting_index2 = 0; + } + for (int i2 = starting_index2; i2 < index2; i2++) { + // u = target / d1 mod d2 + if (!ibz_invmod(&remain, &small_norms2[i2], &small_norms1[i1])) { + continue; + } + ibz_mul(v, &remain, &adjusted_norm); + ibz_mod(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + while (!found && cmp < 0) { + if (number_sum_square > 0) { + found = ibz_cornacchia_prime(av, bv, &ibz_const_one, v); + } else if (number_sum_square == 0) { + found = 1; + } + if (found) { + ibz_mul(&remain, v, &small_norms2[i2]); + ibz_copy(au, &n); + ibz_sub(u, au, &remain); + assert(ibz_cmp(u, &ibz_const_zero) > 0); + ibz_div(u, &remain, u, &small_norms1[i1]); + assert(ibz_is_zero(&remain)); + // we want to remove weird cases where u,v have big power of two + found = found && (ibz_get(u) != 0 && ibz_get(v) != 0); + if (number_sum_square == 2) { + found = ibz_cornacchia_prime(au, bu, &ibz_const_one, u); + } + } + if (!found) { + ibz_add(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + } + } + + if (found) { + // copying the indices + *index_sol1 = i1; + *index_sol2 = i2; + break; + } + } + if (found) { + break; + } + } + + ibz_finalize(&n); + ibz_finalize(&remain); + ibz_finalize(&adjusted_norm); + + return found; +} + +struct vec_and_norm +{ + ibz_vec_4_t vec; + ibz_t norm; + int idx; +}; + +static int +compare_vec_by_norm(const void *_first, const void *_second) +{ + const struct vec_and_norm *first = _first, *second = _second; + int res = ibz_cmp(&first->norm, &second->norm); + if (res != 0) + return res; + else + return first->idx - second->idx; +} + +// use several special curves +// we assume that the first one is always j=1728 +int +find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order) + +{ + + // variable declaration & init + ibz_vec_4_t vec; + ibz_t n; + ibz_t au, bu, av, bv; + ibz_t norm_d; + ibz_t remain; + ibz_init(&au); + ibz_init(&bu); + ibz_init(&av); + ibz_init(&bv); + ibz_init(&norm_d); + ibz_init(&n); + ibz_vec_4_init(&vec); + ibz_init(&remain); + + ibz_copy(&n, target); + + ibz_t adjusted_norm[num_alternate_order + 1]; + ibz_mat_4x4_t gram[num_alternate_order + 1], reduced[num_alternate_order + 1]; + quat_left_ideal_t ideal[num_alternate_order + 1]; + + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_init(&adjusted_norm[i]); + ibz_mat_4x4_init(&gram[i]); + ibz_mat_4x4_init(&reduced[i]); + quat_left_ideal_init(&ideal[i]); + } + + // first we reduce the ideal given in input + quat_lideal_copy(&ideal[0], lideal); + quat_lideal_reduce_basis(&reduced[0], &gram[0], &ideal[0], Bpoo); + + ibz_mat_4x4_copy(&ideal[0].lattice.basis, &reduced[0]); + ibz_set(&adjusted_norm[0], 1); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + + // for efficient lattice reduction, we replace ideal[0] by the equivalent + // ideal of smallest norm + quat_left_ideal_t reduced_id; + quat_left_ideal_init(&reduced_id); + quat_lideal_copy(&reduced_id, &ideal[0]); + quat_alg_elem_t delta; + // delta will be the element of smallest norm + quat_alg_elem_init(&delta); + ibz_set(&delta.coord[0], 1); + ibz_set(&delta.coord[1], 0); + ibz_set(&delta.coord[2], 0); + ibz_set(&delta.coord[3], 0); + ibz_copy(&delta.denom, &reduced_id.lattice.denom); + ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); + assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); + + // reduced_id = ideal[0] * \overline{delta}/n(ideal[0]) + quat_alg_conj(&delta, &delta); + ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); + quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); + ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + + // and conj_ideal is the conjugate of reduced_id + // init the right order; + quat_lattice_t right_order; + quat_lattice_init(&right_order); + // computing the conjugate + quat_left_ideal_t conj_ideal; + quat_left_ideal_init(&conj_ideal); + quat_lideal_conjugate_without_hnf(&conj_ideal, &right_order, &reduced_id, Bpoo); + + // computing all the other connecting ideals and reducing them + for (int i = 1; i < num_alternate_order + 1; i++) { + quat_lideal_lideal_mul_reduced(&ideal[i], &gram[i], &conj_ideal, &ALTERNATE_CONNECTING_IDEALS[i - 1], Bpoo); + ibz_mat_4x4_copy(&reduced[i], &ideal[i].lattice.basis); + ibz_set(&adjusted_norm[i], 1); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + } + + // enumerating small vectors + + // global parameters for the enumeration + int m = FINDUV_box_size; + int m4 = FINDUV_cube_size; + + ibz_vec_4_t small_vecs[num_alternate_order + 1][m4]; + ibz_t small_norms[num_alternate_order + 1][m4]; + ibz_vec_4_t alternate_small_vecs[num_alternate_order + 1][m4]; + ibz_t alternate_small_norms[num_alternate_order + 1][m4]; + ibz_t quotients[num_alternate_order + 1][m4]; + int indices[num_alternate_order + 1]; + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_init(&small_norms[j][i]); + ibz_vec_4_init(&small_vecs[j][i]); + ibz_init(&alternate_small_norms[j][i]); + ibz_init("ients[j][i]); + ibz_vec_4_init(&alternate_small_vecs[j][i]); + } + // enumeration in the hypercube of norm m + indices[j] = enumerate_hypercube(small_vecs[j], small_norms[j], m, &gram[j], &adjusted_norm[j]); + + // sorting the list + { + struct vec_and_norm small_vecs_and_norms[indices[j]]; + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs_and_norms[i].vec, &small_vecs[j][i], sizeof(ibz_vec_4_t)); + memcpy(&small_vecs_and_norms[i].norm, &small_norms[j][i], sizeof(ibz_t)); + small_vecs_and_norms[i].idx = i; + } + qsort(small_vecs_and_norms, indices[j], sizeof(*small_vecs_and_norms), compare_vec_by_norm); + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs[j][i], &small_vecs_and_norms[i].vec, sizeof(ibz_vec_4_t)); + memcpy(&small_norms[j][i], &small_vecs_and_norms[i].norm, sizeof(ibz_t)); + } +#ifndef NDEBUG + for (int i = 1; i < indices[j]; ++i) + assert(ibz_cmp(&small_norms[j][i - 1], &small_norms[j][i]) <= 0); +#endif + } + + for (int i = 0; i < indices[j]; i++) { + ibz_div("ients[j][i], &remain, &n, &small_norms[j][i]); + } + } + + int found = 0; + int i1; + int i2; + for (int j1 = 0; j1 < num_alternate_order + 1; j1++) { + for (int j2 = j1; j2 < num_alternate_order + 1; j2++) { + // in this case, there are some small adjustements to make + int is_diago = (j1 == j2); + found = find_uv_from_lists(&au, + &bu, + &av, + &bv, + u, + v, + &i1, + &i2, + target, + small_norms[j1], + small_norms[j2], + quotients[j2], + indices[j1], + indices[j2], + is_diago, + 0); + // } + + if (found) { + // recording the solutions that we found + ibz_copy(&beta1->denom, &ideal[j1].lattice.denom); + ibz_copy(&beta2->denom, &ideal[j2].lattice.denom); + ibz_copy(d1, &small_norms[j1][i1]); + ibz_copy(d2, &small_norms[j2][i2]); + ibz_mat_4x4_eval(&beta1->coord, &reduced[j1], &small_vecs[j1][i1]); + ibz_mat_4x4_eval(&beta2->coord, &reduced[j2], &small_vecs[j2][i2]); + assert(quat_lattice_contains(NULL, &ideal[j1].lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal[j2].lattice, beta2)); + if (j1 != 0 || j2 != 0) { + ibz_div(&delta.denom, &remain, &delta.denom, &lideal->norm); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + ibz_mul(&delta.denom, &delta.denom, &conj_ideal.norm); + } + if (j1 != 0) { + // we send back beta1 to the original ideal + quat_alg_mul(beta1, &delta, beta1, Bpoo); + quat_alg_normalize(beta1); + } + if (j2 != 0) { + // we send back beta2 to the original ideal + quat_alg_mul(beta2, &delta, beta2, Bpoo); + quat_alg_normalize(beta2); + } + + // if the selected element belong to an alternate order, we conjugate it + if (j1 != 0) { + quat_alg_conj(beta1, beta1); + } + if (j2 != 0) { + quat_alg_conj(beta2, beta2); + } + +#ifndef NDEBUG + quat_alg_norm(&remain, &norm_d, beta1, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d1, &ideal->norm); + if (j1 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j1 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + quat_alg_norm(&remain, &norm_d, beta2, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d2, &ideal->norm); + if (j2 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j2 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta2)); + + quat_left_ideal_t ideal_test; + quat_lattice_t ro; + quat_left_ideal_init(&ideal_test); + quat_lattice_init(&ro); + if (j1 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j1 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta1)); + } + if (j2 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j2 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta2)); + } + + quat_lattice_finalize(&ro); + quat_left_ideal_finalize(&ideal_test); +#endif + + *index_alternate_order_1 = j1; + *index_alternate_order_2 = j2; + break; + } + } + if (found) { + break; + } + } + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_finalize(&small_norms[j][i]); + ibz_vec_4_finalize(&small_vecs[j][i]); + ibz_finalize(&alternate_small_norms[j][i]); + ibz_finalize("ients[j][i]); + ibz_vec_4_finalize(&alternate_small_vecs[j][i]); + } + } + + // var finalize + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_mat_4x4_finalize(&gram[i]); + ibz_mat_4x4_finalize(&reduced[i]); + quat_left_ideal_finalize(&ideal[i]); + ibz_finalize(&adjusted_norm[i]); + } + + ibz_finalize(&n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&au); + ibz_finalize(&bu); + ibz_finalize(&av); + ibz_finalize(&bv); + ibz_finalize(&remain); + ibz_finalize(&norm_d); + quat_lattice_finalize(&right_order); + quat_left_ideal_finalize(&conj_ideal); + quat_left_ideal_finalize(&reduced_id); + quat_alg_elem_finalize(&delta); + + return found; +} + +int +dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo) +{ + ibz_t target, tmp, two_pow; + ; + quat_alg_elem_t theta; + + ibz_t norm_d; + ibz_init(&norm_d); + ibz_t test1, test2; + ibz_init(&test1); + ibz_init(&test2); + + ibz_init(&target); + ibz_init(&tmp); + ibz_init(&two_pow); + int exp = TORSION_EVEN_POWER; + quat_alg_elem_init(&theta); + + // first, we find u,v,d1,d2,beta1,beta2 + // such that u*d1 + v*d2 = 2^TORSION_EVEN_POWER and there are ideals of + // norm d1,d2 equivalent to ideal beta1 and beta2 are elements of norm nd1, + // nd2 where n=n(lideal) + int ret; + int index_order1 = 0, index_order2 = 0; +#ifndef NDEBUG + unsigned int Fu_length, Fv_length; +#endif + ret = find_uv(u, + v, + beta1, + beta2, + d1, + d2, + &index_order1, + &index_order2, + &TORSION_PLUS_2POWER, + lideal, + Bpoo, + NUM_ALTERNATE_EXTREMAL_ORDERS); + if (!ret) { + goto cleanup; + } + + assert(ibz_is_odd(d1) && ibz_is_odd(d2)); + // compute the valuation of the GCD of u,v + ibz_gcd(&tmp, u, v); + assert(ibz_cmp(&tmp, &ibz_const_zero) != 0); + int exp_gcd = ibz_two_adic(&tmp); + exp = TORSION_EVEN_POWER - exp_gcd; + // removing the power of 2 from u and v + ibz_div(u, &test1, u, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + ibz_div(v, &test1, v, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + +#ifndef NDEBUG + // checking that ud1+vd2 = 2^exp + ibz_t pow_check, tmp_check; + ibz_init(&pow_check); + ibz_init(&tmp_check); + ibz_pow(&pow_check, &ibz_const_two, exp); + ibz_mul(&tmp_check, d1, u); + ibz_sub(&pow_check, &pow_check, &tmp_check); + ibz_mul(&tmp_check, v, d2); + ibz_sub(&pow_check, &pow_check, &tmp_check); + assert(ibz_cmp(&pow_check, &ibz_const_zero) == 0); + ibz_finalize(&tmp_check); + ibz_finalize(&pow_check); +#endif + + // now we compute the dimension 2 isogeny + // F : Eu x Ev -> E x E' + // where we have phi_u : Eu -> E_index_order1 and phi_v : Ev -> E_index_order2 + // if we have phi1 : E_index_order_1 -> E of degree d1 + // and phi2 : E_index_order_2 -> E of degree d2 + // we can define theta = phi2 o hat{phi1} + // and the kernel of F is given by + // ( [ud1](P), phiv o theta o hat{phiu} (P)),( [ud1](Q), phiv o theta o + // hat{phiu} (Q)) where P,Q is a basis of E0[2e] + + // now we set-up the kernel + // ec_curve_t E0 = CURVE_E0; + ec_curve_t E1; + copy_curve(&E1, &CURVES_WITH_ENDOMORPHISMS[index_order1].curve); + ec_curve_t E2; + copy_curve(&E2, &CURVES_WITH_ENDOMORPHISMS[index_order2].curve); + ec_basis_t bas1, bas2; + theta_couple_curve_t E01; + theta_kernel_couple_points_t ker; + + ec_basis_t bas_u; + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + + // we start by computing theta = beta2 \hat{beta1}/n + ibz_set(&theta.denom, 1); + quat_alg_conj(&theta, beta1); + quat_alg_mul(&theta, beta2, &theta, &QUATALG_PINFTY); + ibz_mul(&theta.denom, &theta.denom, &lideal->norm); + + // now we perform the actual computation + quat_left_ideal_t idealu, idealv; + quat_left_ideal_init(&idealu); + quat_left_ideal_init(&idealv); + theta_couple_curve_t Fu_codomain, Fv_codomain; + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const V1 = pushed_points + 0, *const V2 = pushed_points + 1, *const V1m2 = pushed_points + 2; + theta_couple_point_t P, Q, PmQ; + + copy_point(&P.P1, &bas1.P); + copy_point(&PmQ.P1, &bas1.PmQ); + copy_point(&Q.P1, &bas1.Q); + // Set points to zero + ec_point_init(&P.P2); + ec_point_init(&Q.P2); + ec_point_init(&PmQ.P2); + + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + // we perform the computation of phiu with a fixed degree isogeny + ret = fixed_degree_isogeny_and_eval( + &idealu, u, true, &Fu_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order1); + + if (!ret) { + goto cleanup; + } + assert(test_point_order_twof(&V1->P1, &Fu_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fu_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fu_length = (unsigned int)ret; + // presumably the correct curve is the first one, we check this + fp2_t w0a, w1a, w2a; + ec_curve_t E1_tmp, Fu_codomain_E1_tmp, Fu_codomain_E2_tmp; + copy_curve(&E1_tmp, &E1); + copy_curve(&Fu_codomain_E1_tmp, &Fu_codomain.E1); + copy_curve(&Fu_codomain_E2_tmp, &Fu_codomain.E2); + weil(&w0a, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fu_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fu_codomain_E2_tmp); + ibz_pow(&two_pow, &ibz_const_two, Fu_length); + ibz_sub(&two_pow, &two_pow, u); + + // now we are checking that the weil pairings are equal to the correct value + digit_t digit_u[NWORDS_ORDER] = { 0 }; + ibz_to_digit_array(digit_u, u); + fp2_t test_powa; + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); +#endif + + // copying the basis images + copy_point(&bas_u.P, &V1->P1); + copy_point(&bas_u.Q, &V2->P1); + copy_point(&bas_u.PmQ, &V1m2->P1); + + // copying the points to the first part of the kernel + copy_point(&ker.T1.P1, &bas_u.P); + copy_point(&ker.T2.P1, &bas_u.Q); + copy_point(&ker.T1m2.P1, &bas_u.PmQ); + copy_curve(&E01.E1, &Fu_codomain.E1); + + copy_point(&P.P1, &bas2.P); + copy_point(&PmQ.P1, &bas2.PmQ); + copy_point(&Q.P1, &bas2.Q); + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + + // computation of phiv + ret = fixed_degree_isogeny_and_eval( + &idealv, v, true, &Fv_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order2); + if (!ret) { + goto cleanup; + } + + assert(test_point_order_twof(&V1->P1, &Fv_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fv_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fv_length = (unsigned int)ret; + ec_curve_t E2_tmp, Fv_codomain_E1_tmp, Fv_codomain_E2_tmp; + copy_curve(&E2_tmp, &E2); + copy_curve(&Fv_codomain_E1_tmp, &Fv_codomain.E1); + copy_curve(&Fv_codomain_E2_tmp, &Fv_codomain.E2); + // presumably the correct curve is the first one, we check this + weil(&w0a, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fv_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fv_codomain_E2_tmp); + if (Fv_length == 0) { + ibz_set(&tmp, 1); + ibz_set(&two_pow, 1); + } else { + ibz_pow(&two_pow, &ibz_const_two, Fv_length); + ibz_sub(&two_pow, &two_pow, v); + } + + // now we are checking that one of the two is equal to the correct value + ibz_to_digit_array(digit_u, v); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); + +#endif + + copy_point(&bas2.P, &V1->P1); + copy_point(&bas2.Q, &V2->P1); + copy_point(&bas2.PmQ, &V1m2->P1); + + // multiplying theta by 1 / (d1 * n(connecting_ideal2)) + ibz_pow(&two_pow, &ibz_const_two, TORSION_EVEN_POWER); + ibz_copy(&tmp, d1); + if (index_order2 > 0) { + ibz_mul(&tmp, &tmp, &ALTERNATE_CONNECTING_IDEALS[index_order2 - 1].norm); + } + ibz_invmod(&tmp, &tmp, &two_pow); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta + endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); + + assert(test_basis_order_twof(&bas2, &Fv_codomain.E1, TORSION_EVEN_POWER)); + + // copying points to the second part of the kernel + copy_point(&ker.T1.P2, &bas2.P); + copy_point(&ker.T2.P2, &bas2.Q); + copy_point(&ker.T1m2.P2, &bas2.PmQ); + copy_curve(&E01.E2, &Fv_codomain.E1); + + // copying the points to the first part of the kernel + quat_left_ideal_finalize(&idealu); + quat_left_ideal_finalize(&idealv); + + double_couple_point_iter(&ker.T1, TORSION_EVEN_POWER - exp, &ker.T1, &E01); + double_couple_point_iter(&ker.T2, TORSION_EVEN_POWER - exp, &ker.T2, &E01); + double_couple_point_iter(&ker.T1m2, TORSION_EVEN_POWER - exp, &ker.T1m2, &E01); + + assert(test_point_order_twof(&ker.T1.P1, &E01.E1, exp)); + assert(test_point_order_twof(&ker.T1m2.P2, &E01.E2, exp)); + + assert(ibz_is_odd(u)); + + // now we evaluate the basis points through the isogeny + assert(test_basis_order_twof(&bas_u, &E01.E1, TORSION_EVEN_POWER)); + + // evaluating the basis through the isogeny of degree u*d1 + copy_point(&pushed_points[0].P1, &bas_u.P); + copy_point(&pushed_points[2].P1, &bas_u.PmQ); + copy_point(&pushed_points[1].P1, &bas_u.Q); + // Set points to zero + ec_point_init(&pushed_points[0].P2); + ec_point_init(&pushed_points[1].P2); + ec_point_init(&pushed_points[2].P2); + + theta_couple_curve_t theta_codomain; + + ret = theta_chain_compute_and_eval_randomized( + exp, &E01, &ker, false, &theta_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points)); + if (!ret) { + goto cleanup; + } + + theta_couple_point_t T1, T2, T1m2; + T1 = pushed_points[0]; + T2 = pushed_points[1]; + T1m2 = pushed_points[2]; + + assert(test_point_order_twof(&T1.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1.P1, &theta_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1m2.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + + copy_point(&basis->P, &T1.P1); + copy_point(&basis->Q, &T2.P1); + copy_point(&basis->PmQ, &T1m2.P1); + copy_curve(codomain, &theta_codomain.E1); + + // using weil pairing to verify that we selected the correct curve + fp2_t w0, w1; + // ec_curve_t E0 = CURVE_E0; + // ec_basis_t bas0 = BASIS_EVEN; + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, codomain); + + digit_t digit_d[NWORDS_ORDER] = { 0 }; + ibz_mul(&tmp, d1, u); + ibz_mul(&tmp, &tmp, u); + ibz_mod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_to_digit_array(digit_d, &tmp); + fp2_t test_pow; + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + + // then we have selected the wrong one + if (!fp2_is_equal(&w1, &test_pow)) { + copy_point(&basis->P, &T1.P2); + copy_point(&basis->Q, &T2.P2); + copy_point(&basis->PmQ, &T1m2.P2); + copy_curve(codomain, &theta_codomain.E2); + +// verifying that the other one is the good one +#ifndef NDEBUG + ec_curve_t codomain_tmp; + copy_curve(&codomain_tmp, codomain); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1)); +#endif + } + + // now we apply M / (u * d1) where M is the matrix corresponding to the + // endomorphism beta1 = phi o dual(phi1) we multiply beta1 by the inverse of + // (u*d1) mod 2^TORSION_EVEN_POWER + ibz_mul(&tmp, u, d1); + if (index_order1 != 0) { + ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); + } + ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); + ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); + ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); + ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + + endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + ec_curve_t E0 = CURVE_E0; + ec_curve_t codomain_tmp; + ec_basis_t bas0 = CURVES_WITH_ENDOMORPHISMS[0].basis_even; + copy_curve(&codomain_tmp, codomain); + copy_curve(&E1_tmp, &E1); + copy_curve(&E2_tmp, &E2); + weil(&w0a, TORSION_EVEN_POWER, &bas0.P, &bas0.Q, &bas0.PmQ, &E0); + weil(&w1a, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + digit_t tmp_d[2 * NWORDS_ORDER] = { 0 }; + if (index_order1 != 0) { + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order1].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + if (index_order2 != 0) { + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order2].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + ibz_to_digit_array(tmp_d, &lideal->norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1a)); + } +#endif + +cleanup: + ibz_finalize(&norm_d); + ibz_finalize(&test1); + ibz_finalize(&test2); + ibz_finalize(&target); + ibz_finalize(&tmp); + ibz_finalize(&two_pow); + quat_alg_elem_finalize(&theta); + return ret; +} + +int +dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal) +{ + int ret; + + quat_alg_elem_t beta1, beta2; + ibz_t u, v, d1, d2; + + quat_alg_elem_init(&beta1); + quat_alg_elem_init(&beta2); + + ibz_init(&u); + ibz_init(&v); + ibz_init(&d1); + ibz_init(&d2); + + ret = dim2id2iso_ideal_to_isogeny_clapotis( + &beta1, &beta2, &u, &v, &d1, &d2, codomain, basis, lideal, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&beta1); + quat_alg_elem_finalize(&beta2); + + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&d1); + ibz_finalize(&d2); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim4.c new file mode 100644 index 0000000000..495dc2dcb2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim4.c @@ -0,0 +1,470 @@ +#include +#include "internal.h" + +// internal helper functions +void +ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b) +{ + ibz_mat_4x4_t mat; + ibz_t prod; + ibz_init(&prod); + ibz_mat_4x4_init(&mat); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(mat[i][j]), 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); + ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + } + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*res)[i][j]), &(mat[i][j])); + } + } + ibz_mat_4x4_finalize(&mat); + ibz_finalize(&prod); +} + +// helper functions for lattices +void +ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&((*vec)[0]), coord0); + ibz_set(&((*vec)[1]), coord1); + ibz_set(&((*vec)[2]), coord2); + ibz_set(&((*vec)[3]), coord3); +} + +void +ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_copy(&((*new)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) +{ + ibz_copy(&((*res)[0]), coord0); + ibz_copy(&((*res)[1]), coord1); + ibz_copy(&((*res)[2]), coord2); + ibz_copy(&((*res)[3]), coord3); +} + +void +ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) +{ + ibz_gcd(content, &((*v)[0]), &((*v)[1])); + ibz_gcd(content, &((*v)[2]), content); + ibz_gcd(content, &((*v)[3]), content); +} + +void +ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_neg(&((*neg)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +void +ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +int +ibz_vec_4_is_zero(const ibz_vec_4_t *x) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + res &= ibz_is_zero(&((*x)[i])); + } + return (res); +} + +void +ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b) +{ + ibz_t prod; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + } +} + +int +ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + res = res && ibz_is_zero(&r); + } + ibz_finalize(&r); + return (res); +} + +void +ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) +{ + ibz_mat_4x4_t work; + ibz_mat_4x4_init(&work); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(work[i][j]), &((*mat)[j][i])); + } + } + ibz_mat_4x4_copy(transposed, &work); + ibz_mat_4x4_finalize(&work); +} + +void +ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*zero)[i][j]), 0); + } + } +} + +void +ibz_mat_4x4_identity(ibz_mat_4x4_t *id) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*id)[i][j]), 0); + } + ibz_set(&((*id)[i][i]), 1); + } +} + +int +ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + } + } + return (res); +} + +int +ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) +{ + int res = 0; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + } + } + return (!res); +} + +void +ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + } + } +} + +void +ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) +{ + ibz_t d; + ibz_init(&d); + ibz_copy(&d, &((*mat)[0][0])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_gcd(&d, &d, &((*mat)[i][j])); + } + } + ibz_copy(gcd, &d); + ibz_finalize(&d); +} + +int +ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + res = res && ibz_is_zero(&r); + } + } + ibz_finalize(&r); + return (res); +} + +// 4x4 inversion helper functions +void +ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, a1, a2); + ibz_mul(&prod, b1, b2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_add(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +void +ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, b1, b2); + ibz_mul(&prod, a1, a2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_sub(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +// Method from https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf 3rd of May +// 2023, 16h15 CEST +int +ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat) +{ + ibz_t prod, work_det; + ibz_mat_4x4_t work; + ibz_t s[6]; + ibz_t c[6]; + for (int i = 0; i < 6; i++) { + ibz_init(&(s[i])); + ibz_init(&(c[i])); + } + ibz_mat_4x4_init(&work); + ibz_init(&prod); + ibz_init(&work_det); + + // compute some 2x2 minors, store them in s and c + for (int i = 0; i < 3; i++) { + ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + } + for (int i = 0; i < 2; i++) { + ibz_mat_2x2_det_from_ibz( + &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + ibz_mat_2x2_det_from_ibz( + &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + } + ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + + // compute det + ibz_set(&work_det, 0); + for (int i = 0; i < 6; i++) { + ibz_mul(&prod, &(s[i]), &(c[5 - i])); + if ((i != 1) && (i != 4)) { + ibz_add(&work_det, &work_det, &prod); + } else { + ibz_sub(&work_det, &work_det, &prod); + } + } + // compute transposed adjugate + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 2; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } + } + for (int k = 2; k < 4; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } + } + } + if (inv != NULL) { + // put transposed adjugate in result, or 0 if no inverse + ibz_set(&prod, !ibz_is_zero(&work_det)); + ibz_mat_4x4_scalar_mul(inv, &prod, &work); + } + // output det + if (det != NULL) + ibz_copy(det, &work_det); + for (int i = 0; i < 6; i++) { + ibz_finalize(&s[i]); + ibz_finalize(&c[i]); + } + ibz_mat_4x4_finalize(&work); + ibz_finalize(&work_det); + ibz_finalize(&prod); + return (!ibz_is_zero(det)); +} + +// matrix evaluation + +void +ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +// quadratic forms + +void +quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + ibz_mat_4x4_eval(&sum, qf, coord); + for (int i = 0; i < 4; i++) { + ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + if (i > 0) { + ibz_add(&(sum[0]), &(sum[0]), &prod); + } else { + ibz_copy(&sum[0], &prod); + } + } + ibz_copy(res, &sum[0]); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dpe.h new file mode 100644 index 0000000000..b9a7a35e0b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dpe.h @@ -0,0 +1,743 @@ +/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. + +This file is part of the DPE Library. + +The DPE Library is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 3 of the License, or (at your +option) any later version. + +The DPE Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with the DPE Library; see the file COPYING.LIB. +If not, see . */ + +#ifndef __DPE +#define __DPE + +#include /* For abort */ +#include /* For fprintf */ +#include /* for round, floor, ceil */ +#include + +/* if you change the version, please change it in Makefile too */ +#define DPE_VERSION_MAJOR 1 +#define DPE_VERSION_MINOR 7 + +#if defined(__GNUC__) && (__GNUC__ >= 3) +# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) +# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) +# define DPE_UNUSED_ATTR __attribute__((unused)) +#else +# define DPE_LIKELY(x) (x) +# define DPE_UNLIKELY(x) (x) +# define DPE_UNUSED_ATTR +#endif + +/* If no user defined mode, define it to double */ +#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) +# define DPE_USE_DOUBLE +#endif + +#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) +# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." +#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#endif + +#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) +# define DPE_LITTLEENDIAN32 +#endif + +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) +# define DPE_DEFINE_ROUND_TRUNC +#endif + +#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 +# define DPE_ISFINITE __builtin_isfinite +#elif defined(isfinite) +# define DPE_ISFINITE isfinite /* new C99 function */ +#else +# define DPE_ISFINITE finite /* obsolete BSD function */ +#endif + +/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ +/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with + 1/2 <= m < 1 */ +/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ +#if defined(DPE_USE_DOUBLE) +# define DPE_DOUBLE double /* mantissa type */ +# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ +# define DPE_2_POW_BITSIZE 0x1P53 +# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 +# define DPE_LDEXP __builtin_ldexp +# define DPE_FREXP __builtin_frexp +# define DPE_FLOOR __builtin_floor +# define DPE_CEIL __builtin_ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND __builtin_round +# define DPE_TRUNC __builtin_trunc +# endif +# else +# define DPE_LDEXP ldexp +# define DPE_FREXP frexp +# define DPE_FLOOR floor +# define DPE_CEIL ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND round +# define DPE_TRUNC trunc +# endif +# endif + +#elif defined(DPE_USE_LONGDOUBLE) +# define DPE_DOUBLE long double +# define DPE_BITSIZE 64 +# define DPE_2_POW_BITSIZE 0x1P64 +# define DPE_LDEXP ldexpl +# define DPE_FREXP frexpl +# define DPE_FLOOR floorl +# define DPE_CEIL ceill +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundl +# define DPE_TRUNC truncl +# endif + +#elif defined(DPE_USE_FLOAT128) +# include "quadmath.h" +# define DPE_DOUBLE __float128 +# define DPE_BITSIZE 113 +# define DPE_2_POW_BITSIZE 0x1P113 +# define DPE_LDEXP ldexpq +# define DPE_FLOOR floorq +# define DPE_CEIL ceilq +# define DPE_FREXP frexpq +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundq +# define DPE_TRUNC truncq +# endif + +#else +# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" +#endif + +/* If no C99, do what we can */ +#ifndef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) +# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) +#endif + +#if defined(DPE_USE_LONG) +# define DPE_EXP_T long /* exponent type */ +# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ +#elif defined(DPE_USE_LONGLONG) +# define DPE_EXP_T long long +# define DPE_EXPMIN LLONG_MIN +#else +# define DPE_EXP_T int /* exponent type */ +# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ +#endif + +#ifdef DPE_LITTLEENDIAN32 +typedef union +{ + double d; +#if INT_MAX == 0x7FFFFFFFL + int i[2]; +#elif LONG_MAX == 0x7FFFFFFFL + long i[2]; +#elif SHRT_MAX == 0x7FFFFFFFL + short i[2]; +#else +# error Cannot find a 32 bits integer type. +#endif +} dpe_double_words; +#endif + +typedef struct +{ + DPE_DOUBLE d; /* significand */ + DPE_EXP_T exp; /* exponent */ +} dpe_struct; + +typedef dpe_struct dpe_t[1]; + +#define DPE_MANT(x) ((x)->d) +#define DPE_EXP(x) ((x)->exp) +#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) + +#define DPE_INLINE static inline + +/* initialize */ +DPE_INLINE void +dpe_init (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* clear */ +DPE_INLINE void +dpe_clear (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* set x to y */ +DPE_INLINE void +dpe_set (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to -y */ +DPE_INLINE void +dpe_neg (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to |y| */ +DPE_INLINE void +dpe_abs (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ +/* FIXME: don't inline this function yet ? */ +static void +dpe_normalize (dpe_t x) +{ + if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) + { + if (DPE_MANT(x) == 0.0) + DPE_EXP(x) = DPE_EXPMIN; + /* otherwise let the exponent of NaN, Inf unchanged */ + } + else + { + DPE_EXP_T e; +#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ + dpe_double_words dw; + dw.d = DPE_MANT(x); + e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ + DPE_EXP(x) += e - 1022; + dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; + DPE_MANT(x) = dw.d; +#else /* portable code */ + double m = DPE_MANT(x); + DPE_MANT(x) = DPE_FREXP (m, &e); + DPE_EXP(x) += e; +#endif + } +} + +#if defined(DPE_USE_DOUBLE) +static const double dpe_scale_tab[54] = { + 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, + 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, + 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, + 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, + 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, + 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, + 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; +#endif + +DPE_INLINE DPE_DOUBLE +dpe_scale (DPE_DOUBLE d, int s) +{ + /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ +#if defined(DPE_USE_DOUBLE) + return d * dpe_scale_tab [-s]; +#else /* portable code */ + return DPE_LDEXP (d, s); +#endif +} + +/* set x to y */ +DPE_INLINE void +dpe_set_d (dpe_t x, double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ld (dpe_t x, long double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ui (dpe_t x, unsigned long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_si (dpe_t x, long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +DPE_INLINE long +dpe_get_si (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (long) d; +} + +DPE_INLINE unsigned long +dpe_get_ui (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (d < 0.0) ? 0 : (unsigned long) d; +} + +DPE_INLINE double +dpe_get_d (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +DPE_INLINE long double +dpe_get_ld (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +#if defined(__GMP_H__) || defined(__MINI_GMP_H__) +/* set x to y */ +DPE_INLINE void +dpe_set_z (dpe_t x, mpz_t y) +{ + long e; + DPE_MANT(x) = mpz_get_d_2exp (&e, y); + DPE_EXP(x) = (DPE_EXP_T) e; +} + +/* set x to y, rounded to nearest */ +DPE_INLINE void +dpe_get_z (mpz_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey >= DPE_BITSIZE) /* y is an integer */ + { + DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ + mpz_set_d (x, d); /* should be exact */ + mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); + } + else /* DPE_EXP(y) < DPE_BITSIZE */ + { + if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ + mpz_set_ui (x, 0); + else + { + DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); + mpz_set_d (x, (double) DPE_ROUND(d)); + } + } +} + +/* return e and x such that y = x*2^e */ +DPE_INLINE mp_exp_t +dpe_get_z_exp (mpz_t x, dpe_t y) +{ + mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); + return DPE_EXP(y) - DPE_BITSIZE; +} +#endif + +/* x <- y + z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_add (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y+z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_set (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y - z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_sub (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y-z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_neg (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y * z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_mul (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- sqrt(y), assuming y is normalized, returns x normalized */ +DPE_INLINE void +dpe_sqrt (dpe_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey % 2) + { + /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ + DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); + DPE_EXP(x) = (ey + 1) / 2; + } + else + { + DPE_MANT(x) = sqrt (DPE_MANT(y)); + DPE_EXP(x) = ey / 2; + } +} + +/* x <- y / z, assuming y and z are normalized, returns x normalized. + Assumes z is not zero. */ +DPE_INLINE void +dpe_div (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- y * z, assuming y normalized, returns x normalized */ +DPE_INLINE void +dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ +DPE_INLINE void +dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y * 2^e */ +DPE_INLINE void +dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; +} + +/* x <- y / 2^e */ +DPE_INLINE void +dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; +} + +/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' + type has fewer bits than the significand in dpe_t) */ +DPE_INLINE DPE_EXP_T +dpe_get_si_exp (long *x, dpe_t y) +{ + if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ + { + *x = (long) (DPE_MANT(y) * 2147483648.0); + return DPE_EXP(y) - 31; + } + else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ + { + *x = (long) (DPE_MANT (y) * 9223372036854775808.0); + return DPE_EXP(y) - 63; + } + else + { + fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); + exit (1); + } +} + +static DPE_UNUSED_ATTR int dpe_str_prec = 16; +static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; + +static int +dpe_out_str (FILE *s, int base, dpe_t x) +{ + DPE_DOUBLE d = DPE_MANT(x); + DPE_EXP_T e2 = DPE_EXP(x); + int e10 = 0; + char sign = ' '; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } + if (d == 0.0) +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%1.*f", dpe_str_prec, d); +#else + return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); +#endif + if (d < 0) + { + d = -d; + sign = '-'; + } + if (e2 > 0) + { + while (e2 > 0) + { + e2 --; + d *= 2.0; + if (d >= 10.0) + { + d /= 10.0; + e10 ++; + } + } + } + else /* e2 <= 0 */ + { + while (e2 < 0) + { + e2 ++; + d /= 2.0; + if (d < 1.0) + { + d *= 10.0; + e10 --; + } + } + } +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); +#else + return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); +#endif +} + +static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; + +static size_t +dpe_inp_str (dpe_t x, FILE *s, int base) +{ + size_t res; + DPE_DOUBLE d; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } +#ifdef DPE_USE_DOUBLE + res = fscanf (s, "%lf", &d); +#elif defined(DPE_USE_LONGDOUBLE) + res = fscanf (s, "%Lf", &d); +#else + { + long double d_ld; + res = fscanf (s, "%Lf", &d_ld); + d = d_ld; + } +#endif + dpe_set_d (x, d); + return res; +} + +DPE_INLINE void +dpe_dump (dpe_t x) +{ + dpe_out_str (stdout, 10, x); + putchar ('\n'); +} + +DPE_INLINE int +dpe_zero_p (dpe_t x) +{ + return DPE_MANT (x) == 0; +} + +/* return a positive value if x > y + a negative value if x < y + and 0 otherwise (x=y). */ +DPE_INLINE int +dpe_cmp (dpe_t x, dpe_t y) +{ + int sx = DPE_SIGN(x); + int d = sx - DPE_SIGN(y); + + if (d != 0) + return d; + else if (DPE_EXP(x) > DPE_EXP(y)) + return (sx > 0) ? 1 : -1; + else if (DPE_EXP(y) > DPE_EXP(x)) + return (sx > 0) ? -1 : 1; + else /* DPE_EXP(x) = DPE_EXP(y) */ + return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); +} + +DPE_INLINE int +dpe_cmp_d (dpe_t x, double d) +{ + dpe_t y; + dpe_set_d (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_ui (dpe_t x, unsigned long d) +{ + dpe_t y; + dpe_set_ui (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_si (dpe_t x, long d) +{ + dpe_t y; + dpe_set_si (y, d); + return dpe_cmp (x, y); +} + +/* set x to integer nearest to y */ +DPE_INLINE void +dpe_round (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) < 0) /* |y| < 1/2 */ + dpe_set_ui (x, 0); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_ROUND(d)); + } +} + +/* set x to the fractional part of y, defined as y - trunc(y), thus the + fractional part has absolute value in [0, 1), and same sign as y */ +DPE_INLINE void +dpe_frac (dpe_t x, dpe_t y) +{ + /* If |y| is smaller than 1, keep it */ + if (DPE_EXP(y) <= 0) + dpe_set (x, y); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set_ui (x, 0); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, d - DPE_TRUNC(d)); + } +} + +/* set x to largest integer <= y */ +DPE_INLINE void +dpe_floor (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ + dpe_set_ui (x, 0); + else /* -1 < y < 0 */ + dpe_set_si (x, -1); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_FLOOR(d)); + } +} + +/* set x to smallest integer >= y */ +DPE_INLINE void +dpe_ceil (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ + dpe_set_ui (x, 1); + else /* -1 < y <= 0 */ + dpe_set_si (x, 0); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_CEIL(d)); + } +} + +DPE_INLINE void +dpe_swap (dpe_t x, dpe_t y) +{ + DPE_EXP_T i = DPE_EXP (x); + DPE_DOUBLE d = DPE_MANT (x); + DPE_EXP (x) = DPE_EXP (y); + DPE_MANT (x) = DPE_MANT (y); + DPE_EXP (y) = i; + DPE_MANT (y) = d; +} + +#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.c new file mode 100644 index 0000000000..5be2b8e57e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.c @@ -0,0 +1,55 @@ +#include +const fp2_t BASIS_E0_PX = { +#if 0 +#elif RADIX == 16 +{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3} +#elif RADIX == 32 +{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3} +#else +{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7} +#elif RADIX == 32 +{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10} +#else +{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166} +#endif +#endif +}; +const fp2_t BASIS_E0_QX = { +#if 0 +#elif RADIX == 16 +{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9} +#elif RADIX == 32 +{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec} +#else +{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8} +#elif RADIX == 32 +{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40} +#else +{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52} +#endif +#endif +}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.h new file mode 100644 index 0000000000..05cafb8462 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.h @@ -0,0 +1,3 @@ +#include +extern const fp2_t BASIS_E0_PX; +extern const fp2_t BASIS_E0_QX; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.c new file mode 100644 index 0000000000..be4e4e55b1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.c @@ -0,0 +1,665 @@ +#include +#include +#include +#include + +void +ec_point_init(ec_point_t *P) +{ // Initialize point as identity element (1:0) + fp2_set_one(&(P->x)); + fp2_set_zero(&(P->z)); +} + +void +ec_curve_init(ec_curve_t *E) +{ // Initialize the curve struct + // Initialize the constants + fp2_set_zero(&(E->A)); + fp2_set_one(&(E->C)); + + // Initialize the point (A+2 : 4C) + ec_point_init(&(E->A24)); + + // Set the bool to be false by default + E->is_A24_computed_and_normalized = false; +} + +void +select_point(ec_point_t *Q, const ec_point_t *P1, const ec_point_t *P2, const digit_t option) +{ // Select points in constant time + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +cswap_points(ec_point_t *P, ec_point_t *Q, const digit_t option) +{ // Swap points in constant time + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then P <- Q and Q <- P + fp2_cswap(&(P->x), &(Q->x), option); + fp2_cswap(&(P->z), &(Q->z), option); +} + +void +ec_normalize_point(ec_point_t *P) +{ + fp2_inv(&P->z); + fp2_mul(&P->x, &P->x, &P->z); + fp2_set_one(&(P->z)); +} + +void +ec_normalize_curve(ec_curve_t *E) +{ + fp2_inv(&E->C); + fp2_mul(&E->A, &E->A, &E->C); + fp2_set_one(&E->C); +} + +void +ec_curve_normalize_A24(ec_curve_t *E) +{ + if (!E->is_A24_computed_and_normalized) { + AC_to_A24(&E->A24, E); + ec_normalize_point(&E->A24); + E->is_A24_computed_and_normalized = true; + } + assert(fp2_is_one(&E->A24.z)); +} + +void +ec_normalize_curve_and_A24(ec_curve_t *E) +{ // Neither the curve or A24 are guaranteed to be normalized. + // First we normalize (A/C : 1) and conditionally compute + if (!fp2_is_one(&E->C)) { + ec_normalize_curve(E); + } + + if (!E->is_A24_computed_and_normalized) { + // Now compute A24 = ((A + 2) / 4 : 1) + fp2_add_one(&E->A24.x, &E->A); // re(A24.x) = re(A) + 1 + fp2_add_one(&E->A24.x, &E->A24.x); // re(A24.x) = re(A) + 2 + fp_copy(&E->A24.x.im, &E->A.im); // im(A24.x) = im(A) + + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 2 + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 4 + fp2_set_one(&E->A24.z); + + E->is_A24_computed_and_normalized = true; + } +} + +uint32_t +ec_is_zero(const ec_point_t *P) +{ + return fp2_is_zero(&P->z); +} + +uint32_t +ec_has_zero_coordinate(const ec_point_t *P) +{ + return fp2_is_zero(&P->x) | fp2_is_zero(&P->z); +} + +uint32_t +ec_is_equal(const ec_point_t *P, const ec_point_t *Q) +{ // Evaluate if two points in Montgomery coordinates (X:Z) are equal + // Returns 0xFFFFFFFF (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1; + + // Check if P, Q are the points at infinity + uint32_t l_zero = ec_is_zero(P); + uint32_t r_zero = ec_is_zero(Q); + + // Check if PX * QZ = QX * PZ + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + uint32_t lr_equal = fp2_is_equal(&t0, &t1); + + // Points are equal if + // - Both are zero, or + // - neither are zero AND PX * QZ = QX * PZ + return (l_zero & r_zero) | (~l_zero & ~r_zero * lr_equal); +} + +uint32_t +ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + if (ec_is_zero(P)) + return 0; + + uint32_t x_is_zero, tmp_is_zero; + fp2_t t0, t1, t2; + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t0, &t1); + fp2_mul(&t2, &t2, &E->A); + fp2_mul(&t1, &t1, &E->C); + fp2_add(&t1, &t1, &t1); + fp2_add(&t0, &t1, &t2); // 4 (CX^2+CZ^2+AXZ) + + x_is_zero = fp2_is_zero(&P->x); + tmp_is_zero = fp2_is_zero(&t0); + + // two torsion if x or x^2 + Ax + 1 is zero + return x_is_zero | tmp_is_zero; +} + +uint32_t +ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + ec_point_t test; + xDBL_A24(&test, P, &E->A24, E->is_A24_computed_and_normalized); + return ec_is_two_torsion(&test, E); +} + +uint32_t +ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E) +{ // Check if basis points (P, Q) form a full 2^t-basis + ec_point_t P2, Q2; + xDBL_A24(&P2, &B->P, &E->A24, E->is_A24_computed_and_normalized); + xDBL_A24(&Q2, &B->Q, &E->A24, E->is_A24_computed_and_normalized); + return (ec_is_two_torsion(&P2, E) & ec_is_two_torsion(&Q2, E) & ~ec_is_equal(&P2, &Q2)); +} + +int +ec_curve_verify_A(const fp2_t *A) +{ // Verify the Montgomery coefficient A is valid (A^2-4 \ne 0) + // Return 1 if curve is valid, 0 otherwise + fp2_t t; + fp2_set_one(&t); + fp_add(&t.re, &t.re, &t.re); // t=2 + if (fp2_is_equal(A, &t)) + return 0; + fp_neg(&t.re, &t.re); // t=-2 + if (fp2_is_equal(A, &t)) + return 0; + return 1; +} + +int +ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A) +{ // Initialize the curve from the A coefficient and check it is valid + // Return 1 if curve is valid, 0 otherwise + ec_curve_init(E); + fp2_copy(&E->A, A); // Set A + return ec_curve_verify_A(A); +} + +void +ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve) +{ // j-invariant computation for Montgommery coefficient A2=(A+2C:4C) + fp2_t t0, t1; + + fp2_sqr(&t1, &curve->C); + fp2_sqr(j_inv, &curve->A); + fp2_add(&t0, &t1, &t1); + fp2_sub(&t0, j_inv, &t0); + fp2_sub(&t0, &t0, &t1); + fp2_sub(j_inv, &t0, &t1); + fp2_sqr(&t1, &t1); + fp2_mul(j_inv, j_inv, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_sqr(&t1, &t0); + fp2_mul(&t0, &t0, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_inv(j_inv); + fp2_mul(j_inv, &t0, j_inv); +} + +void +xDBL_E0(ec_point_t *Q, const ec_point_t *P) +{ // Doubling of a Montgomery point in projective coordinates (X:Z) on the curve E0 with (A:C) = (0:1). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C) = (0:1). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&Q->z, &t1, &t2); + fp2_mul(&Q->z, &Q->z, &t2); +} + +void +xDBL(ec_point_t *Q, const ec_point_t *P, const ec_point_t *AC) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). Computation of coefficient values A+2C and 4C + // on-the-fly. + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t3, &AC->z, &AC->z); + fp2_mul(&t1, &t1, &t3); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&t0, &t3, &AC->x); + fp2_mul(&t0, &t0, &t2); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and + // the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + if (!A24_normalized) + fp2_mul(&t1, &t1, &A24->z); + fp2_mul(&Q->x, &t0, &t1); + fp2_mul(&t0, &t2, &A24->x); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ) +{ // Differential addition of Montgomery points in projective coordinates (X:Z). + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, and difference + // PQ=P-Q=(XPQ:ZPQ). + // Output: projective Montgomery point R <- P+Q = (XR:ZR) such that x(P+Q)=XR/ZR. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&t2, &t2); + fp2_sqr(&t3, &t3); + fp2_mul(&t2, &PQ->z, &t2); + fp2_mul(&R->z, &PQ->x, &t3); + fp2_copy(&R->x, &t2); +} + +void +xDBLADD(ec_point_t *R, + ec_point_t *S, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_point_t *A24, + const bool A24_normalized) +{ // Simultaneous doubling and differential addition. + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, the difference + // PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points R <- 2*P = (XR:ZR) such that x(2P)=XR/ZR, and S <- P+Q = (XS:ZS) such that = + // x(Q+P)=XS/ZS. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&R->x, &t0); + fp2_sub(&t2, &Q->x, &Q->z); + fp2_add(&S->x, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t2); + fp2_sqr(&R->z, &t1); + fp2_mul(&t1, &t1, &S->x); + fp2_sub(&t2, &R->x, &R->z); + if (!A24_normalized) + fp2_mul(&R->z, &R->z, &A24->z); + fp2_mul(&R->x, &R->x, &R->z); + fp2_mul(&S->x, &A24->x, &t2); + fp2_sub(&S->z, &t0, &t1); + fp2_add(&R->z, &R->z, &S->x); + fp2_add(&S->x, &t0, &t1); + fp2_mul(&R->z, &R->z, &t2); + fp2_sqr(&S->z, &S->z); + fp2_sqr(&S->x, &S->x); + fp2_mul(&S->z, &S->z, &PQ->x); + fp2_mul(&S->x, &S->x, &PQ->z); +} + +void +xMUL(ec_point_t *Q, const ec_point_t *P, const digit_t *k, const int kbits, const ec_curve_t *curve) +{ // The Montgomery ladder + // Input: projective Montgomery point P=(XP:ZP) such that xP=XP/ZP, a scalar k of bitlength kbits, and + // the Montgomery curve constants (A:C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points Q <- k*P = (XQ:ZQ) such that x(k*P)=XQ/ZQ. + ec_point_t R0, R1, A24; + digit_t mask; + unsigned int bit, prevbit = 0, swap; + + if (!curve->is_A24_computed_and_normalized) { + // Computation of A24=(A+2C:4C) + fp2_add(&A24.x, &curve->C, &curve->C); + fp2_add(&A24.z, &A24.x, &A24.x); + fp2_add(&A24.x, &A24.x, &curve->A); + } else { + fp2_copy(&A24.x, &curve->A24.x); + fp2_copy(&A24.z, &curve->A24.z); + // Assert A24 has been normalised + assert(fp2_is_one(&A24.z)); + } + + // R0 <- (1:0), R1 <- P + ec_point_init(&R0); + fp2_copy(&R1.x, &P->x); + fp2_copy(&R1.z, &P->z); + + // Main loop + for (int i = kbits - 1; i >= 0; i--) { + bit = (k[i >> LOG2RADIX] >> (i & (RADIX - 1))) & 1; + swap = bit ^ prevbit; + prevbit = bit; + mask = 0 - (digit_t)swap; + + cswap_points(&R0, &R1, mask); + xDBLADD(&R0, &R1, &R0, &R1, P, &A24, true); + } + swap = 0 ^ prevbit; + mask = 0 - (digit_t)swap; + cswap_points(&R0, &R1, mask); + + fp2_copy(&Q->x, &R0.x); + fp2_copy(&Q->z, &R0.z); +} + +int +xDBLMUL(ec_point_t *S, + const ec_point_t *P, + const digit_t *k, + const ec_point_t *Q, + const digit_t *l, + const ec_point_t *PQ, + const int kbits, + const ec_curve_t *curve) +{ // The Montgomery biladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, scalars k and l of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants (A:C). + // Output: projective Montgomery point S <- k*P + l*Q = (XS:ZS) such that x(k*P + l*Q)=XS/ZS. + + int i, A_is_zero; + digit_t evens, mevens, bitk0, bitl0, maskk, maskl, temp, bs1_ip1, bs2_ip1, bs1_i, bs2_i, h; + digit_t sigma[2] = { 0 }, pre_sigma = 0; + digit_t k_t[NWORDS_ORDER], l_t[NWORDS_ORDER], one[NWORDS_ORDER] = { 0 }, r[2 * BITS] = { 0 }; + ec_point_t DIFF1a, DIFF1b, DIFF2a, DIFF2b, R[3] = { 0 }, T[3]; + + // differential additions formulas are invalid in this case + if (ec_has_zero_coordinate(P) | ec_has_zero_coordinate(Q) | ec_has_zero_coordinate(PQ)) + return 0; + + // Derive sigma according to parity + bitk0 = (k[0] & 1); + bitl0 = (l[0] & 1); + maskk = 0 - bitk0; // Parity masks: 0 if even, otherwise 1...1 + maskl = 0 - bitl0; + sigma[0] = (bitk0 ^ 1); + sigma[1] = (bitl0 ^ 1); + evens = sigma[0] + sigma[1]; // Count number of even scalars + mevens = 0 - (evens & 1); // Mask mevens <- 0 if # even of scalars = 0 or 2, otherwise mevens = 1...1 + + // If k and l are both even or both odd, pick sigma = (0,1) + sigma[0] = (sigma[0] & mevens); + sigma[1] = (sigma[1] & mevens) | (1 & ~mevens); + + // Convert even scalars to odd + one[0] = 1; + mp_sub(k_t, k, one, NWORDS_ORDER); + mp_sub(l_t, l, one, NWORDS_ORDER); + select_ct(k_t, k_t, k, maskk, NWORDS_ORDER); + select_ct(l_t, l_t, l, maskl, NWORDS_ORDER); + + // Scalar recoding + for (i = 0; i < kbits; i++) { + // If sigma[0] = 1 swap k_t and l_t + maskk = 0 - (sigma[0] ^ pre_sigma); + swap_ct(k_t, l_t, maskk, NWORDS_ORDER); + + if (i == kbits - 1) { + bs1_ip1 = 0; + bs2_ip1 = 0; + } else { + bs1_ip1 = mp_shiftr(k_t, 1, NWORDS_ORDER); + bs2_ip1 = mp_shiftr(l_t, 1, NWORDS_ORDER); + } + bs1_i = k_t[0] & 1; + bs2_i = l_t[0] & 1; + + r[2 * i] = bs1_i ^ bs1_ip1; + r[2 * i + 1] = bs2_i ^ bs2_ip1; + + // Revert sigma if second bit, r_(2i+1), is 1 + pre_sigma = sigma[0]; + maskk = 0 - r[2 * i + 1]; + select_ct(&temp, &sigma[0], &sigma[1], maskk, 1); + select_ct(&sigma[1], &sigma[1], &sigma[0], maskk, 1); + sigma[0] = temp; + } + + // Point initialization + ec_point_init(&R[0]); + maskk = 0 - sigma[0]; + select_point(&R[1], P, Q, maskk); + select_point(&R[2], Q, P, maskk); + + fp2_copy(&DIFF1a.x, &R[1].x); + fp2_copy(&DIFF1a.z, &R[1].z); + fp2_copy(&DIFF1b.x, &R[2].x); + fp2_copy(&DIFF1b.z, &R[2].z); + + // Initialize DIFF2a <- P+Q, DIFF2b <- P-Q + xADD(&R[2], &R[1], &R[2], PQ); + if (ec_has_zero_coordinate(&R[2])) + return 0; // non valid formulas + + fp2_copy(&DIFF2a.x, &R[2].x); + fp2_copy(&DIFF2a.z, &R[2].z); + fp2_copy(&DIFF2b.x, &PQ->x); + fp2_copy(&DIFF2b.z, &PQ->z); + + A_is_zero = fp2_is_zero(&curve->A); + + // Main loop + for (i = kbits - 1; i >= 0; i--) { + h = r[2 * i] + r[2 * i + 1]; // in {0, 1, 2} + maskk = 0 - (h & 1); + select_point(&T[0], &R[0], &R[1], maskk); + maskk = 0 - (h >> 1); + select_point(&T[0], &T[0], &R[2], maskk); + if (A_is_zero) { + xDBL_E0(&T[0], &T[0]); + } else { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(&T[0], &T[0], &curve->A24, true); + } + + maskk = 0 - r[2 * i + 1]; // in {0, 1} + select_point(&T[1], &R[0], &R[1], maskk); + select_point(&T[2], &R[1], &R[2], maskk); + + cswap_points(&DIFF1a, &DIFF1b, maskk); + xADD(&T[1], &T[1], &T[2], &DIFF1a); + xADD(&T[2], &R[0], &R[2], &DIFF2a); + + // If hw (mod 2) = 1 then swap DIFF2a and DIFF2b + maskk = 0 - (h & 1); + cswap_points(&DIFF2a, &DIFF2b, maskk); + + // R <- T + copy_point(&R[0], &T[0]); + copy_point(&R[1], &T[1]); + copy_point(&R[2], &T[2]); + } + + // Output R[evens] + select_point(S, &R[0], &R[1], mevens); + + maskk = 0 - (bitk0 & bitl0); + select_point(S, S, &R[2], maskk); + return 1; +} + +int +ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *E) +{ // The 3-point Montgomery ladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, a scalar k of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C/4C:1). + // Output: projective Montgomery point R <- P + m*Q = (XR:ZR) such that x(P + m*Q)=XR/ZR. + assert(E->is_A24_computed_and_normalized); + if (!fp2_is_one(&E->A24.z)) { + return 0; + } + // Formulas are not valid in that case + if (ec_has_zero_coordinate(PQ)) { + return 0; + } + + ec_point_t X0, X1, X2; + copy_point(&X0, Q); + copy_point(&X1, P); + copy_point(&X2, PQ); + + int i, j; + digit_t t; + for (i = 0; i < NWORDS_ORDER; i++) { + t = 1; + for (j = 0; j < RADIX; j++) { + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + xDBLADD(&X0, &X1, &X0, &X1, &X2, &E->A24, true); + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + t <<= 1; + }; + }; + copy_point(R, &X1); + return 1; +} + +// WRAPPERS to export + +void +ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve) +{ + // If A24 = ((A+2)/4 : 1) we save multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + } else { + // Otherwise we compute A24 on the fly for doubling + xDBL(res, P, (const ec_point_t *)curve); + } +} + +void +ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve) +{ + if (n == 0) { + copy_point(res, P); + return; + } + + // When the chain is long enough, we should normalise A24 + if (n > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is normalized we can save some multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + for (int i = 0; i < n - 1; i++) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, res, &curve->A24, true); + } + } else { + // Otherwise we do normal doubling + xDBL(res, P, (const ec_point_t *)curve); + for (int i = 0; i < n - 1; i++) { + xDBL(res, res, (const ec_point_t *)curve); + } + } +} + +void +ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve) +{ + ec_dbl_iter(&res->P, n, &B->P, curve); + ec_dbl_iter(&res->Q, n, &B->Q, curve); + ec_dbl_iter(&res->PmQ, n, &B->PmQ, curve); +} + +void +ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve) +{ + // For large scalars it's worth normalising anyway + if (kbits > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is computed and normalized we save some Fp2 multiplications + xMUL(res, P, scalar, kbits, curve); +} + +int +ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + if (fp2_is_zero(&PQ->PmQ.z)) + return 0; + + /* Differential additions behave badly when PmQ = (0:1), so we need to + * treat this case specifically. Since we assume P, Q are a basis, this + * can happen only if kbits==1 */ + if (kbits == 1) { + // Sanity check: our basis should be given by 2-torsion points + if (!ec_is_two_torsion(&PQ->P, curve) || !ec_is_two_torsion(&PQ->Q, curve) || + !ec_is_two_torsion(&PQ->PmQ, curve)) + return 0; + digit_t bP, bQ; + bP = (scalarP[0] & 1); + bQ = (scalarQ[0] & 1); + if (bP == 0 && bQ == 0) + ec_point_init(res); //(1: 0) + else if (bP == 1 && bQ == 0) + copy_point(res, &PQ->P); + else if (bP == 0 && bQ == 1) + copy_point(res, &PQ->Q); + else if (bP == 1 && bQ == 1) + copy_point(res, &PQ->PmQ); + else // should never happen + assert(0); + return 1; + } else { + ec_curve_t E; + copy_curve(&E, curve); + + if (!fp2_is_zero(&curve->A)) { // If A is not zero normalize + ec_curve_normalize_A24(&E); + } + return xDBLMUL(res, &PQ->P, scalarP, &PQ->Q, scalarQ, &PQ->PmQ, kbits, (const ec_curve_t *)&E); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h new file mode 100644 index 0000000000..ee2be38060 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h @@ -0,0 +1,668 @@ +/** @file + * + * @authors Luca De Feo, Francisco RH + * + * @brief Elliptic curve stuff + */ + +#ifndef EC_H +#define EC_H +#include +#include +#include +#include +#include + +/** @defgroup ec Elliptic curves + * @{ + */ + +/** @defgroup ec_t Data structures + * @{ + */ + +/** @brief Projective point on the Kummer line E/pm 1 in Montgomery coordinates + * + * @typedef ec_point_t + * + * @struct ec_point_t + * + * A projective point in (X:Z) or (X:Y:Z) coordinates (tbd). + */ +typedef struct ec_point_t +{ + fp2_t x; + fp2_t z; +} ec_point_t; + +/** @brief Projective point in Montgomery coordinates + * + * @typedef jac_point_t + * + * @struct jac_point_t + * + * A projective point in (X:Y:Z) coordinates + */ +typedef struct jac_point_t +{ + fp2_t x; + fp2_t y; + fp2_t z; +} jac_point_t; + +/** @brief Addition components + * + * @typedef add_components_t + * + * @struct add_components_t + * + * 3 components u,v,w that define the (X:Z) coordinates of both + * addition and substraction of two distinct points with + * P+Q =(u-v:w) and P-Q = (u+v=w) + */ +typedef struct add_components_t +{ + fp2_t u; + fp2_t v; + fp2_t w; +} add_components_t; + +/** @brief A basis of a torsion subgroup + * + * @typedef ec_basis_t + * + * @struct ec_basis_t + * + * A pair of points (or a triplet, tbd) forming a basis of a torsion subgroup. + */ +typedef struct ec_basis_t +{ + ec_point_t P; + ec_point_t Q; + ec_point_t PmQ; +} ec_basis_t; + +/** @brief An elliptic curve + * + * @typedef ec_curve_t + * + * @struct ec_curve_t + * + * An elliptic curve in projective Montgomery form + */ +typedef struct ec_curve_t +{ + fp2_t A; + fp2_t C; ///< cannot be 0 + ec_point_t A24; // the point (A+2 : 4C) + bool is_A24_computed_and_normalized; // says if A24 has been computed and normalized +} ec_curve_t; + +/** @brief An isogeny of degree a power of 2 + * + * @typedef ec_isog_even_t + * + * @struct ec_isog_even_t + */ +typedef struct ec_isog_even_t +{ + ec_curve_t curve; ///< The domain curve + ec_point_t kernel; ///< A kernel generator + unsigned length; ///< The length as a 2-isogeny walk +} ec_isog_even_t; + +/** @brief Isomorphism of Montgomery curves + * + * @typedef ec_isom_t + * + * @struct ec_isom_t + * + * The isomorphism is given by the map maps (X:Z) ↦ ( (Nx X + Nz Z) : (D Z) ) + */ +typedef struct ec_isom_t +{ + fp2_t Nx; + fp2_t Nz; + fp2_t D; +} ec_isom_t; + +// end ec_t +/** @} + */ + +/** @defgroup ec_curve_t Curves and isomorphisms + * @{ + */ + +// Initalisation for curves and points +void ec_curve_init(ec_curve_t *E); +void ec_point_init(ec_point_t *P); + +/** + * @brief Verify that a Montgomery coefficient is valid + * + * @param A an fp2_t + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_verify_A(const fp2_t *A); + +/** + * @brief Initialize an elliptic curve from a coefficient + * + * @param A an fp2_t + * @param E the elliptic curve to initialize + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A); + +// Copying points, bases and curves +static inline void +copy_point(ec_point_t *P, const ec_point_t *Q) +{ + fp2_copy(&P->x, &Q->x); + fp2_copy(&P->z, &Q->z); +} + +static inline void +copy_basis(ec_basis_t *B1, const ec_basis_t *B0) +{ + copy_point(&B1->P, &B0->P); + copy_point(&B1->Q, &B0->Q); + copy_point(&B1->PmQ, &B0->PmQ); +} + +static inline void +copy_curve(ec_curve_t *E1, const ec_curve_t *E2) +{ + fp2_copy(&(E1->A), &(E2->A)); + fp2_copy(&(E1->C), &(E2->C)); + E1->is_A24_computed_and_normalized = E2->is_A24_computed_and_normalized; + copy_point(&E1->A24, &E2->A24); +} + +// Functions for working with the A24 point and normalisation + +/** + * @brief Reduce (A : C) to (A/C : 1) in place + * + * @param E a curve + */ +void ec_normalize_curve(ec_curve_t *E); + +/** + * @brief Reduce (A + 2 : 4C) to ((A+2)/4C : 1) in place + * + * @param E a curve + */ +void ec_curve_normalize_A24(ec_curve_t *E); + +/** + * @brief Normalise both (A : C) and (A + 2 : 4C) as above, in place + * + * @param E a curve + */ +void ec_normalize_curve_and_A24(ec_curve_t *E); + +/** + * @brief Given a curve E, compute (A+2 : 4C) + * + * @param A24 the value (A+2 : 4C) to return into + * @param E a curve + */ +static inline void +AC_to_A24(ec_point_t *A24, const ec_curve_t *E) +{ + // Maybe we already have this computed + if (E->is_A24_computed_and_normalized) { + copy_point(A24, &E->A24); + return; + } + + // A24 = (A+2C : 4C) + fp2_add(&A24->z, &E->C, &E->C); + fp2_add(&A24->x, &E->A, &A24->z); + fp2_add(&A24->z, &A24->z, &A24->z); +} + +/** + * @brief Given a curve the point (A+2 : 4C) compute the curve coefficients (A : C) + * + * @param E a curve to compute + * @param A24 the value (A+2 : 4C) + */ +static inline void +A24_to_AC(ec_curve_t *E, const ec_point_t *A24) +{ + // (A:C) = ((A+2C)*2-4C : 4C) + fp2_add(&E->A, &A24->x, &A24->x); + fp2_sub(&E->A, &E->A, &A24->z); + fp2_add(&E->A, &E->A, &E->A); + fp2_copy(&E->C, &A24->z); +} + +/** + * @brief j-invariant. + * + * @param j_inv computed j_invariant + * @param curve input curve + */ +void ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve); + +/** + * @brief Isomorphism of elliptic curve + * Takes as input two isomorphic Kummer lines in Montgomery form, and output an isomorphism between + * them + * + * @param isom computed isomorphism + * @param from domain curve + * @param to image curve + * @return 0xFFFFFFFF if there was an error during the computation, zero otherwise + */ +uint32_t ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to); + +/** + * @brief In-place evaluation of an isomorphism + * + * @param P a point + * @param isom an isomorphism + */ +void ec_iso_eval(ec_point_t *P, ec_isom_t *isom); + +/** @} + */ +/** @defgroup ec_point_t Point operations + * @{ + */ + +/** + * @brief Point equality + * + * @param P a point + * @param Q a point + * @return 0xFFFFFFFF if equal, zero otherwise + */ +uint32_t ec_is_equal(const ec_point_t *P, const ec_point_t *Q); + +/** + * @brief Point equality + * + * @param P a point + * @return 0xFFFFFFFF if point at infinity, zero otherwise + */ +uint32_t ec_is_zero(const ec_point_t *P); + +/** + * @brief Two torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Four torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Reduce Z-coordinate of point in place + * + * @param P a point + */ +void ec_normalize_point(ec_point_t *P); + +void xDBL_E0(ec_point_t *Q, const ec_point_t *P); +void xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ); +void xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized); + +/** + * @brief Point doubling + * + * @param res computed double of P + * @param P a point + * @param curve an elliptic curve + */ +void ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve); + +/** + * @brief Point iterated doubling + * + * @param res computed double of P + * @param P a point + * @param n the number of double + * @param curve the curve on which P lays + */ +void ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Iterated doubling for a basis P, Q, PmQ + * + * @param res the computed iterated double of basis B + * @param n the number of doubles + * @param B the basis to double + * @param curve the parent curve of the basis + */ +void ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve); + +/** + * @brief Point multiplication + * + * @param res computed scalar * P + * @param curve the curve + * @param scalar an unsigned multi-precision integer + * @param P a point + * @param kbits numer of bits of the scalar + */ +void ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Combination P+m*Q + * + * @param R computed P + m * Q + * @param curve the curve + * @param m an unsigned multi-precision integer + * @param P a point + * @param Q a point + * @param PQ the difference P-Q + * @return 0 if there was an error, 1 otherwise + */ +int ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Linear combination of points of a basis + * + * @param res computed scalarP * P + scalarQ * Q + * @param scalarP an unsigned multi-precision integer + * @param scalarQ an unsigned multi-precision integer + * @param kbits number of bits of the scalars, or n for points of order 2^n + * @param PQ a torsion basis consisting of points P and Q + * @param curve the curve + * + * @return 0 if there was an error, 1 otherwise + */ +int ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +// end point computations +/** + * @} + */ + +/** @defgroup ec_dlog_t Torsion basis computations + * @{ + */ + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve along with a hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * + * @return A hint + * + * The algorithm is deterministc + */ +uint8_t ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f); + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve and a given hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * @param hint the hint + * + * @return 1 is the basis is valid, 0 otherwise + * + * The algorithm is deterministc + */ +int ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint); +/** // end basis computations + * @} + */ + +/** @defgroup ec_isog_t Isogenies + * @{ + */ + +/** + * @brief Evaluate isogeny of even degree on list of points. + * Returns 0 if successful and -1 if kernel has the wrong order or includes (0:1). + * + * @param image computed image curve + * @param phi isogeny + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points); + +/** + * @brief Multiplicative strategy for a short isogeny chain. Returns 1 if successfull and -1 + * if kernel has the wrong order or includes (0:1) when special=false. + * + * @param curve domain curve, to be overwritten by the codomain curve. + * @param kernel a kernel generator of order 2^len + * @param len the length of t he 2-isogeny chain + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * @param special if true, allow isogenies with (0:1) in the kernel + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special); + +/** + * @brief Recover Y-coordinate from X-coordinate and curve coefficients. + * + * @param y: a y-coordinate + * @param Px: a x-coordinate + * @param curve: the elliptic curve + * + * @return 0xFFFFFFFF if the point was on the curve, 0 otherwise + */ +uint32_t ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve); + +// Jacobian point init and copying +void jac_init(jac_point_t *P); +void copy_jac_point(jac_point_t *P, const jac_point_t *Q); + +/** + * @brief Test if two Jacobian points are equal + * + * @param P: a point + * @param Q: a point + * + * @return 0xFFFFFFFF if they are equal, 0 otherwise + */ +uint32_t jac_is_equal(const jac_point_t *P, const jac_point_t *Q); + +// Convert from Jacobian to x-only (just drop the Y-coordinate) +void jac_to_xz(ec_point_t *P, const jac_point_t *xyP); +// Convert from Jacobian coordinates in Montgomery model to Weierstrass +void jac_to_ws(jac_point_t *P, fp2_t *t, fp2_t *ao3, const jac_point_t *Q, const ec_curve_t *curve); +void jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve); + +// Jacobian arithmetic +void jac_neg(jac_point_t *Q, const jac_point_t *P); +void ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); +void DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC); +void DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t); +void jac_to_xz_add_components(add_components_t *uvw, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + * + * + * Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and + * the point P = (X/Z : 1). For generic implementation see lift_basis() + */ +uint32_t lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + */ +uint32_t lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Check if basis points (P, Q) form a full 4-basis + * + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if they form a basis, 0 otherwise + */ +uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); + +/* + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Test functions for printing and order checking, only used in debug mode + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + */ + +/** + * @brief Check if a point (X : Z) has order exactly 2^t + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) +{ + ec_point_t test; + ec_curve_t curve; + test = *P; + copy_curve(&curve, E); + + if (ec_is_zero(&test)) + return 0; + // Scale point by 2^(t-1) + ec_dbl_iter(&test, t - 1, &test, &curve); + // If it's zero now, it doesnt have order 2^t + if (ec_is_zero(&test)) + return 0; + // Ensure [2^t] P = 0 + ec_dbl(&test, &test, &curve); + return ec_is_zero(&test); +} + +/** + * @brief Check if basis points (P, Q, PmQ) all have order exactly 2^t + * + * @param B: a basis + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) +{ + int check_P = test_point_order_twof(&B->P, E, t); + int check_Q = test_point_order_twof(&B->Q, E, t); + int check_PmQ = test_point_order_twof(&B->PmQ, E, t); + + return check_P & check_Q & check_PmQ; +} + +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} + +// Prints the x-coordinate of the point (X : 1) +static void +ec_point_print(const char *name, ec_point_t P) +{ + fp2_t a; + if (fp2_is_zero(&P.z)) { + printf("%s = INF\n", name); + } else { + fp2_copy(&a, &P.z); + fp2_inv(&a); + fp2_mul(&a, &a, &P.x); + fp2_print(name, &a); + } +} + +// Prints the Montgomery coefficient A +static void +ec_curve_print(const char *name, ec_curve_t E) +{ + fp2_t a; + fp2_copy(&a, &E.C); + fp2_inv(&a); + fp2_mul(&a, &a, &E.A); + fp2_print(name, &a); +} + +#endif +// end isogeny computations +/** + * @} + */ + +// end ec +/** + * @} + */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_jac.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_jac.c new file mode 100644 index 0000000000..20ca68c9b2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_jac.c @@ -0,0 +1,335 @@ +#include +#include + +void +jac_init(jac_point_t *P) +{ // Initialize Montgomery in Jacobian coordinates as identity element (0:1:0) + fp2_set_zero(&P->x); + fp2_set_one(&P->y); + fp2_set_zero(&P->z); +} + +uint32_t +jac_is_equal(const jac_point_t *P, const jac_point_t *Q) +{ // Evaluate if two points in Jacobian coordinates (X:Y:Z) are equal + // Returns 1 (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1, t2, t3; + + fp2_sqr(&t0, &Q->z); + fp2_mul(&t2, &P->x, &t0); // x1*z2^2 + fp2_sqr(&t1, &P->z); + fp2_mul(&t3, &Q->x, &t1); // x2*z1^2 + fp2_sub(&t2, &t2, &t3); + + fp2_mul(&t0, &t0, &Q->z); + fp2_mul(&t0, &P->y, &t0); // y1*z2^3 + fp2_mul(&t1, &t1, &P->z); + fp2_mul(&t1, &Q->y, &t1); // y2*z1^3 + fp2_sub(&t0, &t0, &t1); + + return fp2_is_zero(&t0) & fp2_is_zero(&t2); +} + +void +jac_to_xz(ec_point_t *P, const jac_point_t *xyP) +{ + fp2_copy(&P->x, &xyP->x); + fp2_copy(&P->z, &xyP->z); + fp2_sqr(&P->z, &P->z); + + // If xyP = (0:1:0), we currently have P=(0 : 0) but we want to set P=(1:0) + uint32_t c1, c2; + fp2_t one; + fp2_set_one(&one); + + c1 = fp2_is_zero(&P->x); + c2 = fp2_is_zero(&P->z); + fp2_select(&P->x, &P->x, &one, c1 & c2); +} + +void +jac_to_ws(jac_point_t *Q, fp2_t *t, fp2_t *ao3, const jac_point_t *P, const ec_curve_t *curve) +{ + // Cost of 3M + 2S when A != 0. + fp_t one; + fp2_t a; + /* a = 1 - A^2/3, U = X + (A*Z^2)/3, V = Y, W = Z, T = a*Z^4*/ + fp_set_one(&one); + if (!fp2_is_zero(&(curve->A))) { + fp_div3(&(ao3->re), &(curve->A.re)); + fp_div3(&(ao3->im), &(curve->A.im)); + fp2_sqr(t, &P->z); + fp2_mul(&Q->x, ao3, t); + fp2_add(&Q->x, &Q->x, &P->x); + fp2_sqr(t, t); + fp2_mul(&a, ao3, &(curve->A)); + fp_sub(&(a.re), &one, &(a.re)); + fp_neg(&(a.im), &(a.im)); + fp2_mul(t, t, &a); + } else { + fp2_copy(&Q->x, &P->x); + fp2_sqr(t, &P->z); + fp2_sqr(t, t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve) +{ + // Cost of 1M + 1S when A != 0. + fp2_t t; + /* X = U - (A*W^2)/3, Y = V, Z = W. */ + if (!fp2_is_zero(&(curve->A))) { + fp2_sqr(&t, &P->z); + fp2_mul(&t, &t, ao3); + fp2_sub(&Q->x, &P->x, &t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +copy_jac_point(jac_point_t *P, const jac_point_t *Q) +{ + fp2_copy(&(P->x), &(Q->x)); + fp2_copy(&(P->y), &(Q->y)); + fp2_copy(&(P->z), &(Q->z)); +} + +void +jac_neg(jac_point_t *Q, const jac_point_t *P) +{ + fp2_copy(&Q->x, &P->x); + fp2_neg(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC) +{ // Cost of 6M + 6S. + // Doubling on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding to + // (X/Z^2,Y/Z^3) This version receives the coefficient value A + fp2_t t0, t1, t2, t3; + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // t0 = 3x1^2 + fp2_sqr(&t1, &P->z); // t1 = z1^2 + fp2_mul(&t2, &P->x, &AC->A); + fp2_add(&t2, &t2, &t2); // t2 = 2Ax1 + fp2_add(&t2, &t1, &t2); // t2 = 2Ax1+z1^2 + fp2_mul(&t2, &t1, &t2); // t2 = z1^2(2Ax1+z1^2) + fp2_add(&t2, &t0, &t2); // t2 = alpha = 3x1^2 + z1^2(2Ax1+z1^2) + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); // z2 = 2y1z1 + fp2_sqr(&t0, &Q->z); + fp2_mul(&t0, &t0, &AC->A); // t0 = 4Ay1^2z1^2 + fp2_sqr(&t1, &P->y); + fp2_add(&t1, &t1, &t1); // t1 = 2y1^2 + fp2_add(&t3, &P->x, &P->x); // t3 = 2x1 + fp2_mul(&t3, &t1, &t3); // t3 = 4x1y1^2 + fp2_sqr(&Q->x, &t2); // x2 = alpha^2 + fp2_sub(&Q->x, &Q->x, &t0); // x2 = alpha^2 - 4Ay1^2z1^2 + fp2_sub(&Q->x, &Q->x, &t3); + fp2_sub(&Q->x, &Q->x, &t3); // x2 = alpha^2 - 4Ay1^2z1^2 - 8x1y1^2 + fp2_sub(&Q->y, &t3, &Q->x); // y2 = 4x1y1^2 - x2 + fp2_mul(&Q->y, &Q->y, &t2); // y2 = alpha(4x1y1^2 - x2) + fp2_sqr(&t1, &t1); // t1 = 4y1^4 + fp2_sub(&Q->y, &Q->y, &t1); + fp2_sub(&Q->y, &Q->y, &t1); // y2 = alpha(4x1y1^2 - x2) - 8y1^4 + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t) +{ // Cost of 3M + 5S. + // Doubling on a Weierstrass curve, representation in modified Jacobian coordinates + // (X:Y:Z:T=a*Z^4) corresponding to (X/Z^2,Y/Z^3), where a is the curve coefficient. + // Formula from https://hyperelliptic.org/EFD/g1p/auto-shortw-modified.html + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_t xx, c, cc, r, s, m; + // XX = X^2 + fp2_sqr(&xx, &P->x); + // A = 2*Y^2 + fp2_sqr(&c, &P->y); + fp2_add(&c, &c, &c); + // AA = A^2 + fp2_sqr(&cc, &c); + // R = 2*AA + fp2_add(&r, &cc, &cc); + // S = (X+A)^2-XX-AA + fp2_add(&s, &P->x, &c); + fp2_sqr(&s, &s); + fp2_sub(&s, &s, &xx); + fp2_sub(&s, &s, &cc); + // M = 3*XX+T1 + fp2_add(&m, &xx, &xx); + fp2_add(&m, &m, &xx); + fp2_add(&m, &m, t); + // X3 = M^2-2*S + fp2_sqr(&Q->x, &m); + fp2_sub(&Q->x, &Q->x, &s); + fp2_sub(&Q->x, &Q->x, &s); + // Z3 = 2*Y*Z + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); + // Y3 = M*(S-X3)-R + fp2_sub(&Q->y, &s, &Q->x); + fp2_mul(&Q->y, &Q->y, &m); + fp2_sub(&Q->y, &Q->y, &r); + // T3 = 2*R*T1 + fp2_mul(u, t, &r); + fp2_add(u, u, u); + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +select_jac_point(jac_point_t *Q, const jac_point_t *P1, const jac_point_t *P2, const digit_t option) +{ // Select points + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->y), &(P1->y), &(P2->y), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Addition on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding + // to (x,y) = (X/Z^2,Y/Z^3) This version receives the coefficient value A + // + // Complete routine, to handle all edge cases: + // if ZP == 0: # P == inf + // return Q + // if ZQ == 0: # Q == inf + // return P + // dy <- YQ*ZP**3 - YP*ZQ**3 + // dx <- XQ*ZP**2 - XP*ZQ**2 + // if dx == 0: # x1 == x2 + // if dy == 0: # ... and y1 == y2: doubling case + // dy <- ZP*ZQ * (3*XP^2 + ZP^2 * (2*A*XP + ZP^2)) + // dx <- 2*YP*ZP + // else: # ... but y1 != y2, thus P = -Q + // return inf + // XR <- dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) + // YR <- dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3 + // ZR <- dx * ZP * ZQ + + // Constant time processing: + // - The case for P == 0 or Q == 0 is handled at the end with conditional select + // - dy and dx are computed for both the normal and doubling cases, we switch when + // dx == dy == 0 for the normal case. + // - If we have that P = -Q then dx = 0 and so ZR will be zero, giving us the point + // at infinity for "free". + // + // These current formula are expensive and I'm probably missing some tricks... + // Thought I'd get the ball rolling. + // Cost 17M + 6S + 13a + fp2_t t0, t1, t2, t3, u1, u2, v1, dx, dy; + + /* If P is zero or Q is zero we will conditionally swap before returning. */ + uint32_t ctl1 = fp2_is_zero(&P->z); + uint32_t ctl2 = fp2_is_zero(&Q->z); + + /* Precompute some values */ + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + + /* Compute dy and dx for ordinary case */ + fp2_mul(&v1, &t1, &Q->z); // v1 = z2^3 + fp2_mul(&t2, &t0, &P->z); // t2 = z1^3 + fp2_mul(&v1, &v1, &P->y); // v1 = y1z2^3 + fp2_mul(&t2, &t2, &Q->y); // t2 = y2z1^3 + fp2_sub(&dy, &t2, &v1); // dy = y2z1^3 - y1z2^3 + fp2_mul(&u2, &t0, &Q->x); // u2 = x2z1^2 + fp2_mul(&u1, &t1, &P->x); // u1 = x1z2^2 + fp2_sub(&dx, &u2, &u1); // dx = x2z1^2 - x1z2^2 + + /* Compute dy and dx for doubling case */ + fp2_add(&t1, &P->y, &P->y); // dx_dbl = t1 = 2y1 + fp2_add(&t2, &AC->A, &AC->A); // t2 = 2A + fp2_mul(&t2, &t2, &P->x); // t2 = 2Ax1 + fp2_add(&t2, &t2, &t0); // t2 = 2Ax1 + z1^2 + fp2_mul(&t2, &t2, &t0); // t2 = z1^2 * (2Ax1 + z1^2) + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t2, &t2, &t0); // t2 = x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 2*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 3*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_mul(&t2, &t2, &Q->z); // dy_dbl = t2 = z2 * (3*x1^2 + z1^2 * (2Ax1 + z1^2)) + + /* If dx is zero and dy is zero swap with double variables */ + uint32_t ctl = fp2_is_zero(&dx) & fp2_is_zero(&dy); + fp2_select(&dx, &dx, &t1, ctl); + fp2_select(&dy, &dy, &t2, ctl); + + /* Some more precomputations */ + fp2_mul(&t0, &P->z, &Q->z); // t0 = z1z2 + fp2_sqr(&t1, &t0); // t1 = z1z2^2 + fp2_sqr(&t2, &dx); // t2 = dx^2 + fp2_sqr(&t3, &dy); // t3 = dy^2 + + /* Compute x3 = dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) */ + fp2_mul(&R->x, &AC->A, &t1); // x3 = A*(z1z2)^2 + fp2_add(&R->x, &R->x, &u1); // x3 = A*(z1z2)^2 + u1 + fp2_add(&R->x, &R->x, &u2); // x3 = A*(z1z2)^2 + u1 + u2 + fp2_mul(&R->x, &R->x, &t2); // x3 = dx^2 * (A*(z1z2)^2 + u1 + u2) + fp2_sub(&R->x, &t3, &R->x); // x3 = dy^2 - dx^2 * (A*(z1z2)^2 + u1 + u2) + + /* Compute y3 = dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3*/ + fp2_mul(&R->y, &u1, &t2); // y3 = u1 * dx^2 + fp2_sub(&R->y, &R->y, &R->x); // y3 = u1 * dx^2 - x3 + fp2_mul(&R->y, &R->y, &dy); // y3 = dy * (u1 * dx^2 - x3) + fp2_mul(&t3, &t2, &dx); // t3 = dx^3 + fp2_mul(&t3, &t3, &v1); // t3 = v1 * dx^3 + fp2_sub(&R->y, &R->y, &t3); // y3 = dy * (u1 * dx^2 - x3) - v1 * dx^3 + + /* Compute z3 = dx * z1 * z2 */ + fp2_mul(&R->z, &dx, &t0); + + /* Finally, we need to set R = P is Q.Z = 0 and R = Q if P.Z = 0 */ + select_jac_point(R, R, Q, ctl1); + select_jac_point(R, R, P, ctl2); +} + +void +jac_to_xz_add_components(add_components_t *add_comp, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Take P and Q in E distinct, two jac_point_t, return three components u,v and w in Fp2 such + // that the xz coordinates of P+Q are (u-v:w) and of P-Q are (u+v:w) + + fp2_t t0, t1, t2, t3, t4, t5, t6; + + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + fp2_mul(&t2, &P->x, &t1); // t2 = x1z2^2 + fp2_mul(&t3, &t0, &Q->x); // t3 = z1^2x2 + fp2_mul(&t4, &P->y, &Q->z); // t4 = y1z2 + fp2_mul(&t4, &t4, &t1); // t4 = y1z2^3 + fp2_mul(&t5, &P->z, &Q->y); // t5 = z1y2 + fp2_mul(&t5, &t5, &t0); // t5 = z1^3y2 + fp2_mul(&t0, &t0, &t1); // t0 = (z1z2)^2 + fp2_mul(&t6, &t4, &t5); // t6 = (z1z_2)^3y1y2 + fp2_add(&add_comp->v, &t6, &t6); // v = 2(z1z_2)^3y1y2 + fp2_sqr(&t4, &t4); // t4 = y1^2z2^6 + fp2_sqr(&t5, &t5); // t5 = z1^6y_2^2 + fp2_add(&t4, &t4, &t5); // t4 = z1^6y_2^2 + y1^2z2^6 + fp2_add(&t5, &t2, &t3); // t5 = x1z2^2 +z_1^2x2 + fp2_add(&t6, &t3, &t3); // t6 = 2z_1^2x2 + fp2_sub(&t6, &t5, &t6); // t6 = lambda = x1z2^2 - z_1^2x2 + fp2_sqr(&t6, &t6); // t6 = lambda^2 = (x1z2^2 - z_1^2x2)^2 + fp2_mul(&t1, &AC->A, &t0); // t1 = A*(z1z2)^2 + fp2_add(&t1, &t5, &t1); // t1 = gamma =A*(z1z2)^2 + x1z2^2 +z_1^2x2 + fp2_mul(&t1, &t1, &t6); // t1 = gamma*lambda^2 + fp2_sub(&add_comp->u, &t4, &t1); // u = z1^6y_2^2 + y1^2z2^6 - gamma*lambda^2 + fp2_mul(&add_comp->w, &t6, &t0); // w = (z1z2)^2(lambda)^2 +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_params.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_params.c new file mode 100644 index 0000000000..5011f102e1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_params.c @@ -0,0 +1,4 @@ +#include +// p+1 divided by the power of 2 +const digit_t p_cofactor_for_2f[1] = {5}; + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_params.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_params.h new file mode 100644 index 0000000000..e02ac1d146 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec_params.h @@ -0,0 +1,12 @@ +#ifndef EC_PARAMS_H +#define EC_PARAMS_H + +#include + +#define TORSION_EVEN_POWER 248 + +// p+1 divided by the power of 2 +extern const digit_t p_cofactor_for_2f[1]; +#define P_COFACTOR_FOR_2F_BITLENGTH 3 + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_signature.c new file mode 100644 index 0000000000..112c695941 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_signature.c @@ -0,0 +1,208 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// ibz_t + +static byte_t * +ibz_to_bytes(byte_t *enc, const ibz_t *x, size_t nbytes, bool sgn) +{ +#ifndef NDEBUG + { + // make sure there is enough space + ibz_t abs, bnd; + ibz_init(&bnd); + ibz_init(&abs); + ibz_pow(&bnd, &ibz_const_two, 8 * nbytes - sgn); + ibz_abs(&abs, x); + assert(ibz_cmp(&abs, &bnd) < 0); + ibz_finalize(&bnd); + ibz_finalize(&abs); + } +#endif + const size_t digits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + digit_t d[digits]; + memset(d, 0, sizeof(d)); + if (ibz_cmp(x, &ibz_const_zero) >= 0) { + // non-negative, straightforward. + ibz_to_digits(d, x); + } else { + assert(sgn); + // negative; use two's complement. + ibz_t tmp; + ibz_init(&tmp); + ibz_neg(&tmp, x); + ibz_sub(&tmp, &tmp, &ibz_const_one); + ibz_to_digits(d, &tmp); + for (size_t i = 0; i < digits; ++i) + d[i] = ~d[i]; +#ifndef NDEBUG + { + // make sure the result is correct + ibz_t chk; + ibz_init(&chk); + ibz_copy_digit_array(&tmp, d); + ibz_sub(&tmp, &tmp, x); + ibz_pow(&chk, &ibz_const_two, 8 * sizeof(d)); + assert(!ibz_cmp(&tmp, &chk)); + ibz_finalize(&chk); + } +#endif + ibz_finalize(&tmp); + } + encode_digits(enc, d, nbytes); + return enc + nbytes; +} + +static const byte_t * +ibz_from_bytes(ibz_t *x, const byte_t *enc, size_t nbytes, bool sgn) +{ + assert(nbytes > 0); + const size_t ndigits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + assert(ndigits > 0); + digit_t d[ndigits]; + memset(d, 0, sizeof(d)); + decode_digits(d, enc, nbytes, ndigits); + if (sgn && enc[nbytes - 1] >> 7) { + // negative, decode two's complement + const size_t s = sizeof(digit_t) - 1 - (sizeof(d) - nbytes); + assert(s < sizeof(digit_t)); + d[ndigits - 1] |= ((digit_t)-1) >> 8 * s << 8 * s; + for (size_t i = 0; i < ndigits; ++i) + d[i] = ~d[i]; + ibz_copy_digits(x, d, ndigits); + ibz_add(x, x, &ibz_const_one); + ibz_neg(x, x); + } else { + // non-negative + ibz_copy_digits(x, d, ndigits); + } + return enc + nbytes; +} + +// public API + +void +secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = public_key_to_bytes(enc, pk); + +#ifndef NDEBUG + { + fp2_t lhs, rhs; + fp2_mul(&lhs, &sk->curve.A, &pk->curve.C); + fp2_mul(&rhs, &sk->curve.C, &pk->curve.A); + assert(fp2_is_equal(&lhs, &rhs)); + } +#endif + + enc = ibz_to_bytes(enc, &sk->secret_ideal.norm, FP_ENCODED_BYTES, false); + { + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + int ret UNUSED = quat_lideal_generator(&gen, &sk->secret_ideal, &QUATALG_PINFTY); + assert(ret); + // we skip encoding the denominator since it won't change the generated ideal +#ifndef NDEBUG + { + // let's make sure that the denominator is indeed coprime to the norm of the ideal + ibz_t gcd; + ibz_init(&gcd); + ibz_gcd(&gcd, &gen.denom, &sk->secret_ideal.norm); + assert(!ibz_cmp(&gcd, &ibz_const_one)); + ibz_finalize(&gcd); + } +#endif + enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); +} + +void +secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = public_key_from_bytes(pk, enc); + + { + ibz_t norm; + ibz_init(&norm); + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); + enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); + ibz_finalize(&norm); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); + + sk->curve = pk->curve; + ec_curve_to_basis_2f_from_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER, pk->hint_pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_verification.c new file mode 100644 index 0000000000..fecdb9c259 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_verification.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// fp2_t + +static byte_t * +fp2_to_bytes(byte_t *enc, const fp2_t *x) +{ + fp2_encode(enc, x); + return enc + FP2_ENCODED_BYTES; +} + +static const byte_t * +fp2_from_bytes(fp2_t *x, const byte_t *enc) +{ + fp2_decode(x, enc); + return enc + FP2_ENCODED_BYTES; +} + +// curves and points + +static byte_t * +proj_to_bytes(byte_t *enc, const fp2_t *x, const fp2_t *z) +{ + assert(!fp2_is_zero(z)); + fp2_t tmp = *z; + fp2_inv(&tmp); +#ifndef NDEBUG + { + fp2_t chk; + fp2_mul(&chk, z, &tmp); + fp2_t one; + fp2_set_one(&one); + assert(fp2_is_equal(&chk, &one)); + } +#endif + fp2_mul(&tmp, x, &tmp); + enc = fp2_to_bytes(enc, &tmp); + return enc; +} + +static const byte_t * +proj_from_bytes(fp2_t *x, fp2_t *z, const byte_t *enc) +{ + enc = fp2_from_bytes(x, enc); + fp2_set_one(z); + return enc; +} + +static byte_t * +ec_curve_to_bytes(byte_t *enc, const ec_curve_t *curve) +{ + return proj_to_bytes(enc, &curve->A, &curve->C); +} + +static const byte_t * +ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) +{ + memset(curve, 0, sizeof(*curve)); + return proj_from_bytes(&curve->A, &curve->C, enc); +} + +static byte_t * +ec_point_to_bytes(byte_t *enc, const ec_point_t *point) +{ + return proj_to_bytes(enc, &point->x, &point->z); +} + +static const byte_t * +ec_point_from_bytes(ec_point_t *point, const byte_t *enc) +{ + return proj_from_bytes(&point->x, &point->z, enc); +} + +static byte_t * +ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) +{ + enc = ec_point_to_bytes(enc, &basis->P); + enc = ec_point_to_bytes(enc, &basis->Q); + enc = ec_point_to_bytes(enc, &basis->PmQ); + return enc; +} + +static const byte_t * +ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) +{ + enc = ec_point_from_bytes(&basis->P, enc); + enc = ec_point_from_bytes(&basis->Q, enc); + enc = ec_point_from_bytes(&basis->PmQ, enc); + return enc; +} + +// public API + +byte_t * +public_key_to_bytes(byte_t *enc, const public_key_t *pk) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_to_bytes(enc, &pk->curve); + *enc++ = pk->hint_pk; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +const byte_t * +public_key_from_bytes(public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_from_bytes(&pk->curve, enc); + pk->hint_pk = *enc++; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +void +signature_to_bytes(byte_t *enc, const signature_t *sig) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = fp2_to_bytes(enc, &sig->E_aux_A); + + *enc++ = sig->backtracking; + *enc++ = sig->two_resp_length; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][1], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][1], nbytes); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + encode_digits(enc, sig->chall_coeff, nbytes); + enc += nbytes; + + *enc++ = sig->hint_aux; + *enc++ = sig->hint_chall; + + assert(enc - start == SIGNATURE_BYTES); +} + +void +signature_from_bytes(signature_t *sig, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = fp2_from_bytes(&sig->E_aux_A, enc); + + sig->backtracking = *enc++; + sig->two_resp_length = *enc++; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + decode_digits(sig->chall_coeff, enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + sig->hint_aux = *enc++; + sig->hint_chall = *enc++; + + assert(enc - start == SIGNATURE_BYTES); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encoded_sizes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encoded_sizes.h new file mode 100644 index 0000000000..02f8642967 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encoded_sizes.h @@ -0,0 +1,11 @@ +#define SECURITY_BITS 128 +#define SQIsign_response_length 126 +#define HASH_ITERATIONS 64 +#define FP_ENCODED_BYTES 32 +#define FP2_ENCODED_BYTES 64 +#define EC_CURVE_ENCODED_BYTES 64 +#define EC_POINT_ENCODED_BYTES 64 +#define EC_BASIS_ENCODED_BYTES 192 +#define PUBLICKEY_BYTES 65 +#define SECRETKEY_BYTES 353 +#define SIGNATURE_BYTES 148 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c new file mode 100644 index 0000000000..abeddc30a7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c @@ -0,0 +1,3336 @@ +#include +#include +#include +const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x199, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6} +#elif RADIX == 32 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x19, 0x0, 0x0, 0x300000000000000} +#else +{0xc, 0x0, 0x0, 0x0, 0x400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3} +#elif RADIX == 32 +{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3} +#else +{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7} +#elif RADIX == 32 +{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10} +#else +{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9} +#elif RADIX == 32 +{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec} +#else +{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8} +#elif RADIX == 32 +{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40} +#else +{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x342, 0xfb7, 0xed, 0x1d80, 0x17f1, 0x4a2, 0x1c26, 0xb96, 0x1367, 0x3dc, 0x1624, 0x1f2a, 0x5e, 0x1cab, 0x27, 0x1e89, 0x1293, 0x1e24, 0x417, 0x5} +#elif RADIX == 32 +{0xbedc685, 0x11ec003b, 0x4c4a2bf, 0xd9d72dc, 0xb120f72, 0x1605ef95, 0x2404fca, 0x1124a4fd, 0x20bf} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x57f1ec003b5f6e34, 0x7b93675cb709894, 0x809f95605ef95589, 0xc905fc49293f44} +#else +{0xf6001dafb71a, 0x75cb70989457f, 0x5f2ab120f726c, 0x7d12027e55817, 0x6482fe24949} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xf3c, 0x1d21, 0xd78, 0xe8e, 0x1f3c, 0x11b, 0x12c, 0x1851, 0x19b1, 0xd9, 0xf3f, 0x759, 0xf47, 0x1e88, 0x56e, 0x8ef, 0x116e, 0x1fa1, 0x1199, 0x0} +#elif RADIX == 32 +{0x7485e78, 0x1c74735e, 0x5811bf9, 0x6c70a21, 0x179f8367, 0x10f473ac, 0x1bcadde8, 0x1d0c5b91, 0x8ccf} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x7f3c74735e3a42f3, 0xc1b39b1c2884b023, 0x95bbd10f473acbcf, 0x3c4667f4316e477} +#else +{0x63a39af1d2179, 0x1c2884b0237f3, 0x675979f836736, 0x11de56ef443d1, 0x462333fa18b7} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xabf,0x5490,0xd5fd,0x36ba,0xda0f,0x4a59,0x4eea,0xd1,0xa3f0,0xa7ae,0x6f6,0x9146,0x5004,0xcde6,0xa2d2,0x7d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x54900abf,0x36bad5fd,0x4a59da0f,0xd14eea,0xa7aea3f0,0x914606f6,0xcde65004,0x7da2d2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x36bad5fd54900abf,0xd14eea4a59da0f,0x914606f6a7aea3f0,0x7da2d2cde65004}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8680,0xb787,0xbde3,0x611d,0xa95f,0x8b68,0xc9ec,0x819,0x2361,0xf73e,0x5e31,0xbd7b,0x2b45,0x40d7,0x2400,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7878680,0x611dbde3,0x8b68a95f,0x819c9ec,0xf73e2361,0xbd7b5e31,0x40d72b45,0x682400}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x611dbde3b7878680,0x819c9ec8b68a95f,0xbd7b5e31f73e2361,0x68240040d72b45}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4277,0x6d20,0x9e12,0x1f0c,0x977f,0xf854,0x9d1c,0x563f,0xdb,0xc2ed,0xaf54,0xe829,0x4fb,0xd83,0x7be8,0xca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6d204277,0x1f0c9e12,0xf854977f,0x563f9d1c,0xc2ed00db,0xe829af54,0xd8304fb,0xca7be8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1f0c9e126d204277,0x563f9d1cf854977f,0xe829af54c2ed00db,0xca7be80d8304fb}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf541,0xab6f,0x2a02,0xc945,0x25f0,0xb5a6,0xb115,0xff2e,0x5c0f,0x5851,0xf909,0x6eb9,0xaffb,0x3219,0x5d2d,0x82}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xab6ff541,0xc9452a02,0xb5a625f0,0xff2eb115,0x58515c0f,0x6eb9f909,0x3219affb,0x825d2d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9452a02ab6ff541,0xff2eb115b5a625f0,0x6eb9f90958515c0f,0x825d2d3219affb}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x30cd,0xb7f2,0x49cf,0xfe47,0xdb8a,0x683b,0x7335,0xbaa3,0xebe0,0x74ae,0x9dd4,0x8871,0x67c8,0x3c39,0x2ba2,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7f230cd,0xfe4749cf,0x683bdb8a,0xbaa37335,0x74aeebe0,0x88719dd4,0x3c3967c8,0x242ba2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe4749cfb7f230cd,0xbaa37335683bdb8a,0x88719dd474aeebe0,0x242ba23c3967c8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81fd,0xde09,0x9d8a,0x6e8c,0xa299,0x77a0,0xadb7,0x58b7,0x13a1,0x7d41,0x6349,0x1a1d,0xc40b,0x17c5,0xb772,0xdf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xde0981fd,0x6e8c9d8a,0x77a0a299,0x58b7adb7,0x7d4113a1,0x1a1d6349,0x17c5c40b,0xdfb772}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6e8c9d8ade0981fd,0x58b7adb777a0a299,0x1a1d63497d4113a1,0xdfb77217c5c40b}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4363,0xd1dc,0x3a2d,0x523e,0xecad,0x20f1,0x267e,0x376e,0x661b,0x53fc,0xddaa,0xf004,0x267a,0x5b07,0xd8e1,0x6f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd1dc4363,0x523e3a2d,0x20f1ecad,0x376e267e,0x53fc661b,0xf004ddaa,0x5b07267a,0x6fd8e1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523e3a2dd1dc4363,0x376e267e20f1ecad,0xf004ddaa53fc661b,0x6fd8e15b07267a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf33,0x480d,0xb630,0x1b8,0x2475,0x97c4,0x8cca,0x455c,0x141f,0x8b51,0x622b,0x778e,0x9837,0xc3c6,0xd45d,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x480dcf33,0x1b8b630,0x97c42475,0x455c8cca,0x8b51141f,0x778e622b,0xc3c69837,0xdbd45d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b8b630480dcf33,0x455c8cca97c42475,0x778e622b8b51141f,0xdbd45dc3c69837}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x19e1, 0x1d98, 0x1de9, 0x1dfc, 0x922, 0x1fb8, 0x476, 0xd05, 0xc85, 0x1788, 0x1967, 0x155d, 0x1f93, 0x629, 0x188f, 0x119, 0x1f6f, 0x241, 0x1378, 0x0} +#elif RADIX == 32 +{0xf6633c2, 0x2efe77a, 0xedfb849, 0x1215a0a4, 0x1cb3de21, 0x13f93aae, 0x6711e62, 0x120fdbc2, 0x9bc0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x922efe77a7b319e, 0xef10c8568291dbf7, 0xe23cc53f93aaee59, 0x54de0483f6f08c} +#else +{0x177f3bd3d98cf, 0x568291dbf7092, 0x755dcb3de2190, 0x423388f314fe4, 0x2a6f0241fb7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x811, 0xf66, 0x77a, 0x177f, 0x248, 0x17ee, 0x91d, 0xb41, 0x321, 0x1de2, 0xe59, 0x1d57, 0xfe4, 0x198a, 0xe23, 0x1846, 0xfdb, 0x90, 0x14de, 0x8} +#elif RADIX == 32 +{0x13d99023, 0x8bbf9de, 0x3b7ee12, 0xc856829, 0x172cf788, 0x14fe4eab, 0x119c4798, 0x483f6f0, 0x3a6f0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc248bbf9de9ecc81, 0x7bc43215a0a476fd, 0x388f314fe4eabb96, 0x95378120fdbc23} +#else +{0x45dfcef4f6640, 0x15a0a476fdc24, 0x1d5772cf78864, 0x708ce23cc53f9, 0x2ca9bc0907ed} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x869, 0x197b, 0xcdb, 0x1d89, 0xf9b, 0x1d79, 0x18ec, 0xafe, 0x1d41, 0x77, 0x9d4, 0x1a3f, 0x2b, 0x46d, 0x173e, 0xedd, 0x172, 0x1c77, 0x8a6, 0x8} +#elif RADIX == 32 +{0x1e5ed0d3, 0x1bec4b36, 0x1d9d797c, 0x15055fd8, 0x14ea01df, 0x1a02bd1f, 0x176e7c46, 0x3b85c9d, 0x34537} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2f9bec4b36f2f686, 0xefd4157f63b3af, 0xdcf88da02bd1fa75, 0x31229b8ee17276e} +#else +{0x5f6259b797b43, 0x157f63b3af2f9, 0x7a3f4ea01dfa8, 0x1dbb73e23680a, 0x18914dc770b9} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x124b, 0xed4, 0x1706, 0x32d, 0x1541, 0x11b8, 0x2b0, 0xbe4, 0x1ee8, 0x1a3c, 0x16e3, 0x1d25, 0x19bb, 0xb63, 0x1fc1, 0x5fa, 0xf03, 0xfa, 0x1ec, 0x9} +#elif RADIX == 32 +{0x13b52497, 0x1196dc1, 0x1611b8aa, 0x1ba17c82, 0x1b71e8f3, 0x79bbe92, 0x1ebf82b6, 0x7d3c0cb, 0x40f60} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1541196dc19da924, 0xf479ee85f20ac237, 0x7f056c79bbe92db8, 0x3b87b01f4f032fd} +#else +{0x8cb6e0ced492, 0x5f20ac237154, 0x7d25b71e8f3dd, 0x4bf5fc15b1e6e, 0x1dc3d80fa781} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1e71, 0xd67, 0x13da, 0x19eb, 0x137a, 0x1d27, 0x1ba7, 0x1996, 0x755, 0xe3d, 0x1139, 0x1764, 0x18ac, 0x1020, 0x3c4, 0x150e, 0x1ffd, 0x14fe, 0xa16, 0x6} +#elif RADIX == 32 +{0x1359fce3, 0x1acf5cf6, 0x14fd279b, 0x1d5732db, 0x89cb8f4, 0x18acbb2, 0x3878902, 0x7f7ff6a, 0x150b5} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf37acf5cf69acfe7, 0x5c7a755ccb6e9fa4, 0xf120418acbb244e, 0x8285a9fdffda87} +#else +{0x567ae7b4d67f3, 0x5ccb6e9fa4f37, 0x176489cb8f4ea, 0x6a1c3c481062b, 0x2c142d4feffe} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x13ec, 0x10a3, 0x1e69, 0x106f, 0x619, 0x1cb5, 0x9aa, 0x362, 0x53a, 0x1af5, 0x1bae, 0x60a, 0x2a4, 0x448, 0x3d0, 0x535, 0xeb1, 0x1a6e, 0x978, 0x5} +#elif RADIX == 32 +{0xc28e7d9, 0x19837f9a, 0x155cb530, 0x14e86c49, 0xdd76bd4, 0x102a4305, 0xd47a044, 0x1373ac4a, 0x4bc6} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa619837f9a61473e, 0xb5ea53a1b126ab96, 0x8f408902a43056eb, 0x3ea5e34dceb129a} +#else +{0x4c1bfcd30a39f, 0x21b126ab96a61, 0x60add76bd4a7, 0x4a6a3d02240a9, 0x1f52f1a6e758} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x77a, 0x201, 0x168d, 0x8fe, 0x780, 0x1ccb, 0x52b, 0x1c83, 0x18dd, 0xcef, 0x11f5, 0x1446, 0x301, 0xb63, 0xe3f, 0x1b72, 0x1, 0x1da9, 0x1281, 0x8} +#elif RADIX == 32 +{0x8804ef5, 0x47f5a3, 0x57ccb3c, 0x3779065, 0x8fab3bf, 0x6301a23, 0x1c9c7eb6, 0xd480076, 0x3940f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x678047f5a3440277, 0x59df8dde4194af99, 0x38fd6c6301a2347d, 0x364a07b52001db9} +#else +{0x23fad1a2013b, 0x5e4194af99678, 0x34468fab3bf1b, 0x76e4e3f5b18c0, 0x432503da9000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1, 0xb39, 0x969, 0x1324, 0xbe6, 0x86e, 0x1021, 0x29a, 0x1ff0, 0xd23, 0x7d5, 0x72a, 0x1e33, 0x1fd9, 0x10af, 0x15bc, 0x1d56, 0x928, 0x1d49, 0x0} +#elif RADIX == 32 +{0xace4002, 0x699225a, 0x4286e5f, 0x1fc05350, 0x3eab48f, 0x13e33395, 0xf215ffd, 0x94755ab, 0xea4a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xcbe699225a567200, 0x5a47ff014d40850d, 0x42bffb3e333951f5, 0x57525251d56ade} +#else +{0x34c912d2b3900, 0x14d40850dcbe, 0x672a3eab48ffe, 0x2b790affecf8c, 0x2ba92928eab} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8d79,0x38f8,0xf94c,0xe776,0x2bdf,0x2d2e,0x4242,0x8677,0xddf0,0x1736,0xa2e3,0x8ee7,0x52ac,0x4bb1,0xbb55,0xa4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38f88d79,0xe776f94c,0x2d2e2bdf,0x86774242,0x1736ddf0,0x8ee7a2e3,0x4bb152ac,0xa4bb55}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe776f94c38f88d79,0x867742422d2e2bdf,0x8ee7a2e31736ddf0,0xa4bb554bb152ac}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6774,0xe280,0xc0b8,0xd49d,0x3b88,0x2577,0xc53f,0x7a5d,0x3032,0x4cfb,0xd6b2,0x3ed5,0x27b8,0x584c,0x85b1,0xfc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe2806774,0xd49dc0b8,0x25773b88,0x7a5dc53f,0x4cfb3032,0x3ed5d6b2,0x584c27b8,0xfc85b1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd49dc0b8e2806774,0x7a5dc53f25773b88,0x3ed5d6b24cfb3032,0xfc85b1584c27b8}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc139,0x25cf,0xd25b,0xadb9,0xbd39,0xaa20,0x8867,0x4e7a,0x8b24,0xa81f,0x412a,0xacfc,0xee2d,0xab0c,0x1d50,0x20}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x25cfc139,0xadb9d25b,0xaa20bd39,0x4e7a8867,0xa81f8b24,0xacfc412a,0xab0cee2d,0x201d50}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xadb9d25b25cfc139,0x4e7a8867aa20bd39,0xacfc412aa81f8b24,0x201d50ab0cee2d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7287,0xc707,0x6b3,0x1889,0xd420,0xd2d1,0xbdbd,0x7988,0x220f,0xe8c9,0x5d1c,0x7118,0xad53,0xb44e,0x44aa,0x5b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc7077287,0x188906b3,0xd2d1d420,0x7988bdbd,0xe8c9220f,0x71185d1c,0xb44ead53,0x5b44aa}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x188906b3c7077287,0x7988bdbdd2d1d420,0x71185d1ce8c9220f,0x5b44aab44ead53}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7029,0x8b30,0x7529,0x9941,0x2be8,0x7b3f,0xe3d7,0x4553,0x7065,0x7bef,0xb49c,0xc80b,0xfa3e,0x950c,0x1ece,0x18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b307029,0x99417529,0x7b3f2be8,0x4553e3d7,0x7bef7065,0xc80bb49c,0x950cfa3e,0x181ece}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x994175298b307029,0x4553e3d77b3f2be8,0xc80bb49c7bef7065,0x181ece950cfa3e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb399,0x92ce,0x85e8,0x7c82,0x86eb,0xb186,0x8924,0x64f1,0xd93,0x5e9a,0x3165,0x4196,0x5e79,0x158,0x55d5,0x31}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92ceb399,0x7c8285e8,0xb18686eb,0x64f18924,0x5e9a0d93,0x41963165,0x1585e79,0x3155d5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c8285e892ceb399,0x64f18924b18686eb,0x419631655e9a0d93,0x3155d501585e79}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda47,0x29f8,0x7209,0xaa0c,0xfc22,0x39c9,0x6e19,0x517c,0xc94e,0xcfa4,0x20fc,0x1edc,0xe0d0,0x396d,0x85f0,0xdf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x29f8da47,0xaa0c7209,0x39c9fc22,0x517c6e19,0xcfa4c94e,0x1edc20fc,0x396de0d0,0xdf85f0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa0c720929f8da47,0x517c6e1939c9fc22,0x1edc20fccfa4c94e,0xdf85f0396de0d0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fd7,0x74cf,0x8ad6,0x66be,0xd417,0x84c0,0x1c28,0xbaac,0x8f9a,0x8410,0x4b63,0x37f4,0x5c1,0x6af3,0xe131,0xe7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x74cf8fd7,0x66be8ad6,0x84c0d417,0xbaac1c28,0x84108f9a,0x37f44b63,0x6af305c1,0xe7e131}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x66be8ad674cf8fd7,0xbaac1c2884c0d417,0x37f44b6384108f9a,0xe7e1316af305c1}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x13f5, 0x8b7, 0x1873, 0x144a, 0x1a89, 0x13f9, 0xd49, 0x6f2, 0x3bb, 0x102, 0x1ecb, 0x180b, 0x4d5, 0x608, 0x119, 0x5e6, 0x751, 0x961, 0xd37, 0x5} +#elif RADIX == 32 +{0x1a2de7eb, 0x9a2561c, 0x933f9d4, 0xeecde4d, 0x1f658408, 0x104d5c05, 0x19823260, 0xb09d44b, 0x69ba} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3a89a2561cd16f3f, 0xc2043bb37935267f, 0x464c104d5c05fb2, 0x1bb4dd2c27512f3} +#else +{0x4d12b0e68b79f, 0x337935267f3a8, 0x380bf65840877, 0x4bcc119304135, 0x35da6e9613a8} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1e96, 0x1a2d, 0x161c, 0xd12, 0xea2, 0xcfe, 0x1352, 0x19bc, 0x10ee, 0x1840, 0x1fb2, 0xe02, 0x135, 0x982, 0x1046, 0x979, 0x9d4, 0x1a58, 0x1b4d, 0x9} +#elif RADIX == 32 +{0x68b7d2d, 0x2689587, 0xa4cfe75, 0x3bb3793, 0xfd96102, 0x4135701, 0x1e608c98, 0x12c27512, 0x4da6e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xcea2689587345be9, 0xb0810eecde4d499f, 0xc1193041357017ec, 0x22ed374b09d44bc} +#else +{0x1344ac39a2df4, 0x6cde4d499fcea, 0x2e02fd961021d, 0x12f30464c104d, 0x39769ba584ea} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xa82, 0x1d2d, 0x15b8, 0x404, 0x1a32, 0xaf9, 0xa86, 0xddf, 0x14bf, 0x100c, 0xc42, 0xa89, 0x1df, 0x82f, 0x1f07, 0x782, 0x664, 0x1ba5, 0x5d7, 0x2} +#elif RADIX == 32 +{0x74b5504, 0x1220256e, 0x10caf9d1, 0x12fdbbea, 0x16214032, 0x1e1df544, 0xbe0e82, 0x1d29990f, 0x22ebe} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3a3220256e3a5aa8, 0xa0194bf6efaa195f, 0x7c1d05e1df544b10, 0xb175f74a6643c1} +#else +{0x11012b71d2d54, 0x76efaa195f3a3, 0x6a89621403297, 0xf05f07417877, 0x58bafba5332} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x5a1, 0x46a, 0x17ab, 0x1cfa, 0x547, 0x1b9c, 0xda5, 0x141e, 0x216, 0x1f49, 0xaca, 0x15a1, 0xfe0, 0x1afb, 0x1a47, 0x133d, 0x1887, 0x590, 0xbc2, 0x1} +#elif RADIX == 32 +{0x191a8b42, 0x7e7d5ea, 0x14bb9c2a, 0x85a83cd, 0x15657d24, 0x16fe0ad0, 0xf748faf, 0xc8621e6, 0x15e11} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8547e7d5eac8d45a, 0xbe92216a0f369773, 0xe91f5f6fe0ad0ab2, 0x5af08b2188799e} +#else +{0x3f3eaf5646a2d, 0x6a0f369773854, 0x15a15657d2442, 0x667ba47d7dbf8, 0x2d784590c43} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1311, 0x910, 0x413, 0x1d16, 0x14f7, 0x19c9, 0x14d3, 0x1504, 0x776, 0x1c2c, 0x15b0, 0xc6e, 0x36b, 0x1777, 0x1ed2, 0xb34, 0x1281, 0x1281, 0xd0f, 0x4} +#elif RADIX == 32 +{0x1a442622, 0x17e8b104, 0x1a79c9a7, 0x1ddaa094, 0xad870b0, 0xe36b637, 0xd3da577, 0x140ca056, 0x4687c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x34f7e8b104d22131, 0x3858776a82534f39, 0x7b4aeee36b63756c, 0x7343e50328159a} +#else +{0x3f45882691098, 0x6a82534f3934f, 0x6c6ead870b0ee, 0x5669ed2bbb8da, 0x2b9a1f281940} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x12d2, 0x6d8, 0x1e2c, 0x6f9, 0x5e8, 0x4e5, 0x32c, 0x58d, 0x1bda, 0x16f9, 0x8b5, 0x3c0, 0x10c, 0xb18, 0x450, 0x834, 0x3b7, 0x8d7, 0x15bf, 0x0} +#elif RADIX == 32 +{0x1b625a4, 0x837cf8b, 0x584e52f, 0xf68b1a3, 0x45adbe7, 0x1010c1e0, 0xd08a0b1, 0x6b8edd0, 0xadfa} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa5e837cf8b0db12d, 0x6df3bda2c68cb09c, 0x114163010c1e022d, 0xa56fd1ae3b741a} +#else +{0x41be7c586d896, 0x22c68cb09ca5e, 0x3c045adbe77b, 0x506845058c043, 0x2d2b7e8d71db} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x5f, 0x444, 0x49e, 0xae7, 0x248, 0x1a37, 0x9b6, 0xc28, 0x464, 0x19b7, 0x1560, 0xd7a, 0x2e3, 0x81a, 0x6f5, 0x5f9, 0x1818, 0x164c, 0x1713, 0x7} +#elif RADIX == 32 +{0x111100bf, 0x8573927, 0x16da3712, 0x11918509, 0xab066dc, 0x142e36bd, 0x1e4dea81, 0x1266060b, 0x2b89d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe248573927888805, 0x336e46461426db46, 0x9bd50342e36bd558, 0x4edc4ec998182fc} +#else +{0x42b9c93c44402, 0x461426db46e24, 0x6d7aab066dc8c, 0xbf26f540d0b8, 0x4f6e2764cc0c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x19b1, 0x1912, 0x1eb, 0x1cbc, 0x210, 0x17cf, 0x1b9e, 0x754, 0x38c, 0x816, 0x1431, 0x79a, 0xa57, 0x15ff, 0x756, 0xa60, 0x1064, 0x162f, 0x1e5e, 0x0} +#elif RADIX == 32 +{0x1e44b362, 0x10e5e07a, 0x13d7cf10, 0xe30ea9b, 0xa18a058, 0x1ea573cd, 0x180ead5f, 0x117c1914, 0xf2f5} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe210e5e07af2259b, 0x502c38c3aa6e7af9, 0x1d5abfea573cd50c, 0x5797ac5f064530} +#else +{0x72f03d7912cd, 0x43aa6e7af9e21, 0x679aa18a05871, 0x14c0756affa95, 0x2abcbd62f832} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffc3,0x1fbe,0xc7ef,0x56c4,0x2834,0xfa5c,0x36aa,0x1ced,0x9076,0xa31d,0x8890,0xe52,0x87d2,0xef68,0x98bc,0xc2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1fbeffc3,0x56c4c7ef,0xfa5c2834,0x1ced36aa,0xa31d9076,0xe528890,0xef6887d2,0xc298bc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x56c4c7ef1fbeffc3,0x1ced36aafa5c2834,0xe528890a31d9076,0xc298bcef6887d2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4098,0xd740,0xb5c6,0x8109,0x299,0x3a8c,0x81c2,0xc0d0,0xe848,0x9243,0x8996,0x656a,0x8c87,0x6c99,0xb9f5,0x4c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd7404098,0x8109b5c6,0x3a8c0299,0xc0d081c2,0x9243e848,0x656a8996,0x6c998c87,0x4cb9f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8109b5c6d7404098,0xc0d081c23a8c0299,0x656a89969243e848,0x4cb9f56c998c87}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x712b,0xfeed,0x55b5,0xc5fe,0xe867,0x77a9,0x1775,0x7814,0x4780,0x73b1,0x86b1,0x3973,0x797a,0x7f0b,0x1fa,0xb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfeed712b,0xc5fe55b5,0x77a9e867,0x78141775,0x73b14780,0x397386b1,0x7f0b797a,0xb001fa}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5fe55b5feed712b,0x7814177577a9e867,0x397386b173b14780,0xb001fa7f0b797a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d,0xe041,0x3810,0xa93b,0xd7cb,0x5a3,0xc955,0xe312,0x6f89,0x5ce2,0x776f,0xf1ad,0x782d,0x1097,0x6743,0x3d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe041003d,0xa93b3810,0x5a3d7cb,0xe312c955,0x5ce26f89,0xf1ad776f,0x1097782d,0x3d6743}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa93b3810e041003d,0xe312c95505a3d7cb,0xf1ad776f5ce26f89,0x3d67431097782d}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d2b,0x1bd6,0xcc3f,0x7e74,0x4fea,0xfba0,0x9f84,0xd6d4,0x42a1,0x88d1,0x68b1,0x4f4e,0x13ec,0xa60c,0xb13b,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1bd65d2b,0x7e74cc3f,0xfba04fea,0xd6d49f84,0x88d142a1,0x4f4e68b1,0xa60c13ec,0x2eb13b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e74cc3f1bd65d2b,0xd6d49f84fba04fea,0x4f4e68b188d142a1,0x2eb13ba60c13ec}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b4f,0x9448,0xaa16,0x649a,0xe4b4,0x3bc2,0xd3fd,0x8df1,0x931e,0x4078,0x8caa,0xe896,0xdeec,0xbed5,0x166e,0x7c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x94487b4f,0x649aaa16,0x3bc2e4b4,0x8df1d3fd,0x4078931e,0xe8968caa,0xbed5deec,0x7c166e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x649aaa1694487b4f,0x8df1d3fd3bc2e4b4,0xe8968caa4078931e,0x7c166ebed5deec}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x101d,0x51aa,0xd32d,0x2b40,0x7ba,0xc5f8,0x257a,0xb323,0x9bde,0x20c5,0xdc8f,0x2c3d,0x4e7b,0x54a6,0x17b9,0x99}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x51aa101d,0x2b40d32d,0xc5f807ba,0xb323257a,0x20c59bde,0x2c3ddc8f,0x54a64e7b,0x9917b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2b40d32d51aa101d,0xb323257ac5f807ba,0x2c3ddc8f20c59bde,0x9917b954a64e7b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa2d5,0xe429,0x33c0,0x818b,0xb015,0x45f,0x607b,0x292b,0xbd5e,0x772e,0x974e,0xb0b1,0xec13,0x59f3,0x4ec4,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe429a2d5,0x818b33c0,0x45fb015,0x292b607b,0x772ebd5e,0xb0b1974e,0x59f3ec13,0xd14ec4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x818b33c0e429a2d5,0x292b607b045fb015,0xb0b1974e772ebd5e,0xd14ec459f3ec13}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0xa72, 0x186f, 0x81c, 0x105c, 0x151, 0x1451, 0x1b96, 0x4d1, 0x12be, 0x3bf, 0x996, 0x1130, 0x1460, 0x1ed8, 0xc2a, 0x1c23, 0x1ee, 0x3f4, 0xbe2, 0x9} +#elif RADIX == 32 +{0x61bd4e5, 0x1182e207, 0x12d4510a, 0xaf89a3b, 0x4cb0efe, 0x11460898, 0x8d855ed, 0x1fa07bb8, 0x45f10} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x215182e20730dea7, 0x877f2be268ee5a8a, 0xb0abdb1460898265, 0xeaf887e81eee11} +#else +{0xc17103986f53, 0x6268ee5a8a215, 0x11304cb0efe57, 0x3846c2af6c518, 0x2f57c43f40f7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1c36, 0x61b, 0x207, 0xc17, 0x854, 0x1514, 0xee5, 0x1134, 0x1caf, 0x10ef, 0x265, 0x44c, 0x518, 0x17b6, 0x1b0a, 0x1708, 0x7b, 0x10fd, 0xaf8, 0x3} +#elif RADIX == 32 +{0x1986f86c, 0x1460b881, 0x1cb51442, 0x12be268e, 0x132c3bf, 0xc518226, 0x236157b, 0x7e81eee, 0x357c4} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x885460b881cc37c3, 0x61dfcaf89a3b96a2, 0x6c2af6c518226099, 0x1fabe21fa07bb84} +#else +{0x2305c40e61be1, 0x789a3b96a2885, 0x44c132c3bf95, 0x6e11b0abdb146, 0x37d5f10fd03d} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1c07, 0x15d6, 0x526, 0xde7, 0x149b, 0x719, 0x1786, 0x1272, 0x18b, 0x1bac, 0xf74, 0x1588, 0xe6f, 0x24c, 0x1204, 0x1e9d, 0x13bb, 0x1ccb, 0x78d, 0x9} +#elif RADIX == 32 +{0x1575b80f, 0x1b6f3949, 0x10c719a4, 0x62e4e57, 0x7ba6eb0, 0x18e6fac4, 0x7640824, 0x65ceefd, 0x43c6f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x349b6f3949abadc0, 0x375818b9395e18e3, 0xc810498e6fac43dd, 0x279e379973bbf4e} +#else +{0x5b79ca4d5d6e0, 0x39395e18e3349, 0x75887ba6eb031, 0x7d3b20412639b, 0x13cf1bccb9dd} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xddf, 0x238, 0xe4b, 0x1958, 0xe6e, 0x1059, 0x133, 0x1e11, 0x5ae, 0x2ab, 0x1044, 0xdd, 0xe9d, 0x1aa8, 0x15e2, 0xc9b, 0xaa6, 0x3c8, 0x10ac, 0x0} +#elif RADIX == 32 +{0x188e1bbe, 0xecac392, 0x6705973, 0x16bbc221, 0x18220aac, 0x10e9d06e, 0x6ebc5aa, 0x1e42a999, 0x8560} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2e6ecac392c470dd, 0x5565aef0884ce0b, 0xd78b550e9d06ec11, 0x4b42b0790aa664d} +#else +{0x76561c962386e, 0x6f0884ce0b2e6, 0x20dd8220aacb5, 0x19375e2d543a7, 0x4da1583c8553} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x192, 0x1c6d, 0x18a4, 0x152, 0x1aa9, 0xec4, 0x1be8, 0x1209, 0x7f, 0x797, 0x1295, 0x1433, 0x1a75, 0x15a, 0x1d64, 0x146c, 0x12df, 0x10af, 0x188f, 0x1} +#elif RADIX == 32 +{0x71b4324, 0x90a9629, 0x1d0ec4d5, 0x1fe413b, 0x194a9e5c, 0x15a75a19, 0x1b3ac815, 0x57cb7e8, 0x1c47c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9aa90a962938da19, 0x4f2e07f904efa1d8, 0x75902b5a75a19ca5, 0xae23e15f2dfa36} +#else +{0x4854b149c6d0c, 0x7904efa1d89aa, 0x343394a9e5c0f, 0x68d9d640ad69d, 0x2d711f0af96f} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x129c, 0xe1d, 0x1bd3, 0xf2a, 0x937, 0xf81, 0xa47, 0x186b, 0x1bbe, 0x1c6d, 0x1edd, 0x1b51, 0xa10, 0x167a, 0x1f0b, 0x374, 0x720, 0x1547, 0x726, 0x1} +#elif RADIX == 32 +{0x1b876538, 0x177956f4, 0x8ef8149, 0xefb0d6a, 0x1f6ef1b7, 0x14a10da8, 0x1d3e1767, 0xa39c806, 0x13935} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x29377956f4dc3b29, 0x78dbbbec35a91df0, 0x7c2ecf4a10da8fb7, 0x3c9c9aa8e7201ba} +#else +{0x3bcab7a6e1d94, 0x6c35a91df0293, 0x1b51f6ef1b777, 0x6e9f0bb3d284, 0x464e4d547390} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x12cc, 0x495, 0x1a14, 0x1db0, 0xb66, 0x76a, 0x1a77, 0xaf6, 0x1656, 0x1ad7, 0xb35, 0x4b1, 0xffa, 0x37b, 0xabf, 0xa5c, 0xdc9, 0x1a74, 0x11c9, 0x8} +#elif RADIX == 32 +{0x1256599, 0x6ed8685, 0xee76a5b, 0x19595eda, 0x159aeb5e, 0x16ffa258, 0x17157e37, 0x13a37254, 0x38e4e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x4b66ed8685092b2c, 0x75af65657b69dced, 0x2afc6f6ffa258acd, 0x4047274e8dc952e} +#else +{0x376c342849596, 0x657b69dced4b6, 0x44b159aeb5eca, 0x54b8abf1bdbfe, 0x202393a746e4} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1379, 0x125e, 0x1c56, 0x1811, 0x144, 0x2a8, 0xbb3, 0x2ca, 0x6d2, 0x565, 0x91e, 0x1280, 0x1b4f, 0x51a, 0x1eb7, 0x35a, 0x14fe, 0x1b59, 0x182e, 0x2} +#elif RADIX == 32 +{0x1497a6f2, 0x4c08f15, 0x1662a80a, 0x1b48594b, 0x48f1594, 0x15b4f940, 0x16bd6e51, 0x1acd3f86, 0x2c176} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x144c08f15a4bd37, 0x8aca6d21652ecc55, 0x7adca35b4f940247, 0x2e60bb6b34fe1ad} +#else +{0x260478ad25e9b, 0x21652ecc55014, 0x728048f1594da, 0x6b5eb728d6d3, 0x3f305db59a7f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7363,0xbe7a,0xc901,0xb6e0,0x6a56,0x779d,0xbc42,0xd659,0x3476,0x3868,0x12f4,0x923a,0x6fa8,0x5412,0xd5f9,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe7a7363,0xb6e0c901,0x779d6a56,0xd659bc42,0x38683476,0x923a12f4,0x54126fa8,0x3d5f9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6e0c901be7a7363,0xd659bc42779d6a56,0x923a12f438683476,0x3d5f954126fa8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xedb4,0x4fd4,0x5c14,0x14b,0xf702,0xd6be,0x9c11,0x4bb,0x9f10,0xde25,0xb159,0x5085,0xb0a9,0x6f42,0xc4d3,0x1d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4fd4edb4,0x14b5c14,0xd6bef702,0x4bb9c11,0xde259f10,0x5085b159,0x6f42b0a9,0x1dc4d3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x14b5c144fd4edb4,0x4bb9c11d6bef702,0x5085b159de259f10,0x1dc4d36f42b0a9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe873,0x4974,0xc7ed,0x6b01,0xaffb,0xf3d4,0xc641,0x20d6,0xca22,0x2d69,0x9f01,0x451e,0xfa05,0xef65,0xb43b,0xde}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4974e873,0x6b01c7ed,0xf3d4affb,0x20d6c641,0x2d69ca22,0x451e9f01,0xef65fa05,0xdeb43b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6b01c7ed4974e873,0x20d6c641f3d4affb,0x451e9f012d69ca22,0xdeb43bef65fa05}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c9d,0x4185,0x36fe,0x491f,0x95a9,0x8862,0x43bd,0x29a6,0xcb89,0xc797,0xed0b,0x6dc5,0x9057,0xabed,0x2a06,0xfc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41858c9d,0x491f36fe,0x886295a9,0x29a643bd,0xc797cb89,0x6dc5ed0b,0xabed9057,0xfc2a06}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x491f36fe41858c9d,0x29a643bd886295a9,0x6dc5ed0bc797cb89,0xfc2a06abed9057}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca5b,0x1036,0x34a6,0x490c,0xc0ed,0x771b,0x1590,0x1c17,0x4855,0x977e,0x8054,0xdb98,0xb26f,0x1175,0x7722,0xfe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1036ca5b,0x490c34a6,0x771bc0ed,0x1c171590,0x977e4855,0xdb988054,0x1175b26f,0xfe7722}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x490c34a61036ca5b,0x1c171590771bc0ed,0xdb988054977e4855,0xfe77221175b26f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf543,0x821c,0xae0a,0xb0cb,0x642d,0x5a80,0xd2bf,0x2340,0xc8f,0xe1ce,0x4e38,0xdace,0x3445,0x807e,0x9bc4,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x821cf543,0xb0cbae0a,0x5a80642d,0x2340d2bf,0xe1ce0c8f,0xdace4e38,0x807e3445,0x59bc4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb0cbae0a821cf543,0x2340d2bf5a80642d,0xdace4e38e1ce0c8f,0x59bc4807e3445}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e85,0xc3dc,0xfd4,0x39a7,0x5158,0x777b,0xb83,0xb0fe,0x55de,0x45b3,0x103f,0x53dc,0x27e2,0xb6cb,0x2b18,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc3dc6e85,0x39a70fd4,0x777b5158,0xb0fe0b83,0x45b355de,0x53dc103f,0xb6cb27e2,0x12b18}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x39a70fd4c3dc6e85,0xb0fe0b83777b5158,0x53dc103f45b355de,0x12b18b6cb27e2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35a5,0xefc9,0xcb59,0xb6f3,0x3f12,0x88e4,0xea6f,0xe3e8,0xb7aa,0x6881,0x7fab,0x2467,0x4d90,0xee8a,0x88dd,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xefc935a5,0xb6f3cb59,0x88e43f12,0xe3e8ea6f,0x6881b7aa,0x24677fab,0xee8a4d90,0x188dd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f3cb59efc935a5,0xe3e8ea6f88e43f12,0x24677fab6881b7aa,0x188ddee8a4d90}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x102e, 0x4c4, 0x1586, 0x1184, 0xa12, 0x9ce, 0x1866, 0x433, 0x5ef, 0x82a, 0x13a5, 0xbc6, 0x591, 0x175b, 0x10bc, 0xfa5, 0x109d, 0x8d4, 0x1325, 0x9} +#elif RADIX == 32 +{0x1131205d, 0x128c2561, 0xcc9ce50, 0x17bc8678, 0x9d2a0a8, 0x165915e3, 0x9617975, 0x6a4275f, 0x4992a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xca128c2561898902, 0x50545ef219e19939, 0xc2f2eb65915e34e9, 0x4acc951a909d7d2} +#else +{0x14612b0c4c481, 0x7219e19939ca1, 0x2bc69d2a0a8bd, 0x5f4b0bcbad964, 0x25664a8d484e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x5a5, 0x1131, 0x561, 0x1461, 0x1284, 0x1273, 0x1e19, 0x190c, 0x117b, 0xa0a, 0x14e9, 0xaf1, 0x1964, 0x5d6, 0xc2f, 0xbe9, 0x427, 0xa35, 0xcc9, 0x3} +#elif RADIX == 32 +{0xc4c4b4a, 0x4a30958, 0x3327394, 0x5ef219e, 0x1a74a82a, 0xd964578, 0x1a585e5d, 0x11a909d7, 0x3664a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x7284a3095862625a, 0x541517bc8678664e, 0xb0bcbad964578d3a, 0x1ab32546a4275f4} +#else +{0x25184ac31312d, 0x3c8678664e728, 0xaf1a74a82a2f, 0x57d2c2f2eb659, 0xd5992a35213} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1b4a, 0xf6a, 0xadd, 0x302, 0x196b, 0x366, 0x1399, 0xe83, 0x1540, 0xcd, 0x169d, 0x1007, 0xfe6, 0x1fd2, 0xebb, 0x808, 0x1725, 0x1c1e, 0x1009, 0x8} +#elif RADIX == 32 +{0xbdab695, 0xb1812b7, 0x132366cb, 0x1501d073, 0x1b4e8336, 0x4fe6803, 0x21d77fd, 0xf5c950, 0x3804f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd96b1812b75ed5b4, 0x419b540741ce646c, 0x3aeffa4fe6803da7, 0x36402783d725404} +#else +{0x58c095baf6ada, 0x741ce646cd96, 0x5007b4e8336a8, 0x5010ebbfe93f9, 0x1b2013c1eb92} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x122a, 0x94e, 0x1927, 0x1701, 0x58e, 0x79, 0x134e, 0xecc, 0xa0f, 0x7be, 0xc39, 0xfb2, 0x1df0, 0x79a, 0x154a, 0x1a4a, 0x23f, 0x3de, 0x1be1, 0x9} +#elif RADIX == 32 +{0x1a53a455, 0xeb80e49, 0x9c0792c, 0x83dd993, 0x61c9ef9, 0x15df07d9, 0x12aa9479, 0x1ef08ff4, 0x4df08} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x258eb80e49d29d22, 0x4f7ca0f7664d380f, 0x5528f35df07d930e, 0x36ef847bc23fd25} +#else +{0x75c0724e94e91, 0x77664d380f258, 0xfb261c9ef941, 0x749554a3cd77c, 0x1b77c23de11f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1943, 0x2e1, 0x677, 0x614, 0x19e, 0x11e6, 0xde2, 0x104d, 0x551, 0x1455, 0x1d7e, 0xdd, 0x15e0, 0x14c5, 0xeeb, 0x14b5, 0x168f, 0x1a03, 0xa9d, 0x4} +#elif RADIX == 32 +{0x18b87286, 0x1e30a19d, 0x1c51e60c, 0x154609ad, 0x1ebf5154, 0xb5e006e, 0xd5dd74c, 0x101da3e9, 0x454ee} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc19e30a19dc5c394, 0xa8aa551826b78a3c, 0xbbae98b5e006ef5f, 0x112a7740768fa5a} +#else +{0x71850cee2e1ca, 0x1826b78a3cc19, 0xddebf5154aa, 0x696aeeba62d78, 0x8953ba03b47} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x512, 0xda9, 0x31a, 0x1711, 0x1b65, 0x9f0, 0xe54, 0x1d4a, 0xe1c, 0xc90, 0x1837, 0x1728, 0x15fa, 0xa40, 0xf21, 0x1b43, 0x1716, 0x1277, 0x11a8, 0x9} +#elif RADIX == 32 +{0x136a4a25, 0x5b888c6, 0xa89f0db, 0x1873a94e, 0xc1bb241, 0x15fab94, 0x10de42a4, 0x13bdc5b6, 0x48d44} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1b65b888c69b5251, 0xd920e1cea539513e, 0xbc854815fab9460d, 0xec6a24ef716da1} +#else +{0x2dc44634da928, 0x4ea539513e1b6, 0x5728c1bb241c3, 0x3686f2152057e, 0x2f6351277b8b} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x822, 0x1a13, 0x11d, 0x10e0, 0x2b9, 0x1d20, 0x19f9, 0x1dc2, 0x1770, 0x135e, 0x1c13, 0x1cba, 0x14df, 0x5c8, 0x1f31, 0x215, 0x16ed, 0x1f7a, 0xc6c, 0x5} +#elif RADIX == 32 +{0xe84d045, 0x19870047, 0x1f3d2015, 0x1dc3b859, 0xe09cd7a, 0x114dfe5d, 0x57e625c, 0x1bd5bb44, 0x6367} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2b9870047742682, 0xe6bd770ee167e7a4, 0xfcc4b914dfe5d704, 0xcb1b3ef56ed10a} +#else +{0x4c38023ba1341, 0xee167e7a402b, 0x7cbae09cd7aee, 0x442bf312e4537, 0x658d9f7ab76} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x2cc, 0xd50, 0xeda, 0x1c3c, 0x8a6, 0x1659, 0xffb, 0x1cee, 0x1f14, 0x17fe, 0x1860, 0x427, 0x132c, 0x5c0, 0xb9f, 0x143d, 0x639, 0x19f0, 0x1551, 0x7} +#elif RADIX == 32 +{0x13540599, 0x6e1e3b6, 0x1f765945, 0x1c539dcf, 0x1c305ffb, 0x132c213, 0xf573e5c, 0xf818e68, 0x2aa8e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x28a6e1e3b69aa02c, 0x2ffdf14e773feecb, 0xae7cb8132c213e18, 0x3fd5473e0639a1e} +#else +{0x370f1db4d5016, 0x4e773feecb28a, 0x427c305ffbe2, 0x687ab9f2e04cb, 0x1feaa39f031c} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2417,0x1b00,0xcfe,0x8960,0x662e,0x42d2,0xc00f,0x222c,0x7671,0x278b,0x863f,0xbcac,0xdb9c,0x6e5e,0x4c5a,0x1b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b002417,0x89600cfe,0x42d2662e,0x222cc00f,0x278b7671,0xbcac863f,0x6e5edb9c,0x1b4c5a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x89600cfe1b002417,0x222cc00f42d2662e,0xbcac863f278b7671,0x1b4c5a6e5edb9c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x21e8,0xd92b,0x5a2d,0xef86,0xf492,0x1483,0x8ae0,0x6b37,0x7f78,0x7b90,0x69c5,0xf4ec,0x2fb9,0x1660,0x8296,0xf8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd92b21e8,0xef865a2d,0x1483f492,0x6b378ae0,0x7b907f78,0xf4ec69c5,0x16602fb9,0xf88296}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xef865a2dd92b21e8,0x6b378ae01483f492,0xf4ec69c57b907f78,0xf8829616602fb9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x38ff,0x5dc5,0x9aea,0xbc0e,0xbea5,0x775d,0x447b,0xc311,0xf01c,0xb63a,0x15fd,0x162a,0xab76,0x9def,0x2a0d,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5dc538ff,0xbc0e9aea,0x775dbea5,0xc311447b,0xb63af01c,0x162a15fd,0x9defab76,0xc52a0d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbc0e9aea5dc538ff,0xc311447b775dbea5,0x162a15fdb63af01c,0xc52a0d9defab76}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdbe9,0xe4ff,0xf301,0x769f,0x99d1,0xbd2d,0x3ff0,0xddd3,0x898e,0xd874,0x79c0,0x4353,0x2463,0x91a1,0xb3a5,0xe4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4ffdbe9,0x769ff301,0xbd2d99d1,0xddd33ff0,0xd874898e,0x435379c0,0x91a12463,0xe4b3a5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x769ff301e4ffdbe9,0xddd33ff0bd2d99d1,0x435379c0d874898e,0xe4b3a591a12463}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x20f0,0x2693,0xacbf,0x731a,0xb0f3,0xd8ce,0x1bcd,0xf836,0x8469,0x44d5,0xd604,0xd3aa,0x4aa8,0xcdc3,0x9086,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x269320f0,0x731aacbf,0xd8ceb0f3,0xf8361bcd,0x44d58469,0xd3aad604,0xcdc34aa8,0x3f9086}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x731aacbf269320f0,0xf8361bcdd8ceb0f3,0xd3aad60444d58469,0x3f9086cdc34aa8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcc11,0xe55a,0x932f,0x9534,0x2895,0xaf43,0x2956,0x614f,0x4e84,0xe4b2,0x60c6,0x255,0xbb14,0xd70d,0xc61e,0x13}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe55acc11,0x9534932f,0xaf432895,0x614f2956,0xe4b24e84,0x25560c6,0xd70dbb14,0x13c61e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9534932fe55acc11,0x614f2956af432895,0x25560c6e4b24e84,0x13c61ed70dbb14}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x28d6,0x450d,0xd24f,0x54e4,0x6e67,0x81d,0x9b71,0xadbe,0x1088,0x6148,0x4ebf,0x4b68,0x829e,0x65c8,0xe1a6,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450d28d6,0x54e4d24f,0x81d6e67,0xadbe9b71,0x61481088,0x4b684ebf,0x65c8829e,0xe5e1a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x54e4d24f450d28d6,0xadbe9b71081d6e67,0x4b684ebf61481088,0xe5e1a665c8829e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf10,0xd96c,0x5340,0x8ce5,0x4f0c,0x2731,0xe432,0x7c9,0x7b96,0xbb2a,0x29fb,0x2c55,0xb557,0x323c,0x6f79,0xc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd96cdf10,0x8ce55340,0x27314f0c,0x7c9e432,0xbb2a7b96,0x2c5529fb,0x323cb557,0xc06f79}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8ce55340d96cdf10,0x7c9e43227314f0c,0x2c5529fbbb2a7b96,0xc06f79323cb557}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x6b9, 0xd4c, 0x1d8d, 0x1f99, 0x1be4, 0x1f53, 0x5c1, 0x1937, 0x9c, 0xe68, 0x61e, 0x14e8, 0x352, 0x3d6, 0x174, 0x133, 0x18a6, 0x1b19, 0x94b, 0x9} +#elif RADIX == 32 +{0xb530d73, 0x4fccf63, 0x183f53df, 0x27326e5, 0x30f39a0, 0xc352a74, 0xcc2e83d, 0x18ce2982, 0x44a5e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x7be4fccf635a986b, 0x9cd009cc9b9707ea, 0x85d07ac352a74187, 0x31a52f6338a6099} +#else +{0x27e67b1ad4c35, 0x4c9b9707ea7be, 0x54e830f39a013, 0x2661741eb0d4, 0x40d297b19c53} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x348, 0xb53, 0xf63, 0x7e6, 0x1ef9, 0xfd4, 0x1970, 0x64d, 0x27, 0x139a, 0x187, 0x153a, 0x10d4, 0xf5, 0x185d, 0x104c, 0xe29, 0x1ec6, 0x1a52, 0x0} +#elif RADIX == 32 +{0x1ad4c690, 0x193f33d8, 0xe0fd4f7, 0x9cc9b9, 0xc3ce68, 0xb0d4a9d, 0x1330ba0f, 0x16338a60, 0xd297} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9ef93f33d8d6a634, 0xe734027326e5c1fa, 0x61741eb0d4a9d061, 0x28694bd8ce29826} +#else +{0x49f99ec6b531a, 0x7326e5c1fa9ef, 0x153a0c3ce6804, 0x609985d07ac35, 0x1434a5ec6714} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x18af, 0xb6e, 0x124d, 0xa49, 0xa8c, 0x11f5, 0xea9, 0x298, 0xa55, 0x1738, 0xb61, 0x2b9, 0x8a, 0x167a, 0x17e6, 0x2b0, 0x1290, 0x16ad, 0x1505, 0x2} +#elif RADIX == 32 +{0xadbb15e, 0xc524c93, 0x1531f554, 0x954530e, 0x15b0dce1, 0x1408a15c, 0xc2fcd67, 0x156ca405, 0x2a82d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xaa8c524c9356dd8a, 0x6e70a5514c3aa63e, 0x5f9acf408a15cad8, 0x4c5416d5b290158} +#else +{0x6292649ab6ec5, 0x514c3aa63eaa8, 0x42b95b0dce14a, 0x5617e6b3d022, 0x262a0b6ad948} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1390, 0x1895, 0x9b7, 0xa5a, 0x1030, 0x16c1, 0xd21, 0x1053, 0x327, 0x1a4c, 0x1a22, 0x11e4, 0x16ba, 0x13a1, 0x1dbc, 0x1aac, 0x148c, 0x5c8, 0x15d2, 0x0} +#elif RADIX == 32 +{0x1e256720, 0x1052d26d, 0x436c181, 0xc9e0a6d, 0xd116930, 0x36ba8f2, 0xb3b793a, 0xe452335, 0xae91} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x303052d26df12b39, 0xb498327829b486d8, 0x76f27436ba8f2688, 0x5748b9148cd56} +#else +{0x296936f8959c, 0x7829b486d8303, 0x51e4d11693064, 0x3559dbc9d0dae, 0x282ba45c8a46} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1be6, 0x11b3, 0x14ba, 0xf43, 0x1bd1, 0x215, 0x1e9a, 0x137a, 0x7b2, 0x15, 0x126, 0x148, 0x1c2b, 0x1b70, 0xf1c, 0x1e48, 0x1259, 0x188a, 0x1e44, 0x7} +#elif RADIX == 32 +{0x146cf7cd, 0x117a1d2e, 0x134215de, 0x1eca6f5e, 0x930054, 0x1c2b0a4, 0x121e39b7, 0x454967c, 0x2f226} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbbd17a1d2ea367be, 0x802a7b29bd7a6842, 0x3c736e1c2b0a4049, 0x21f913115259f24} +#else +{0xbd0e9751b3df, 0x29bd7a6842bbd, 0x61480930054f6, 0x7c90f1cdb870a, 0x10fc8988a92c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x4c5, 0x37e, 0xafa, 0x1b90, 0x13d, 0x8d3, 0xaa7, 0x489, 0x1d4a, 0x17bc, 0x168, 0x37f, 0x1ed6, 0x666, 0x1889, 0x1a4e, 0xa57, 0xeb7, 0xd37, 0x7} +#elif RADIX == 32 +{0x10df898b, 0x1ddc82be, 0x14e8d309, 0x1528912a, 0x10b45ef3, 0xded61bf, 0x13b11266, 0x15ba95f4, 0x269bb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x613ddc82be86fc4c, 0x2f79d4a244aa9d1a, 0x6224ccded61bf85a, 0x1cb4ddd6ea57d27} +#else +{0x6ee415f437e26, 0x2244aa9d1a613, 0x437f0b45ef3a9, 0x749d8893337b5, 0xe5a6eeb752b} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x447, 0x1b87, 0x1cf0, 0x155, 0xb1, 0x804, 0x97a, 0x64a, 0x886, 0x3a3, 0x126f, 0x1553, 0x74d, 0xde9, 0x941, 0x39c, 0x8f, 0x1bbb, 0xf3, 0x1} +#elif RADIX == 32 +{0x6e1c88e, 0x110aaf3c, 0xf480405, 0x218c949, 0x19378e8d, 0x1274daa9, 0x71282de, 0x1dd823c7, 0x1079e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x80b10aaf3c370e44, 0xc74688632525e900, 0x2505bd274daa9c9b, 0x2383cf77608f1ce} +#else +{0x85579e1b8722, 0x632525e90080b, 0x35539378e8d10, 0x47389416f49d3, 0x11c1e7bbb047} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xf9d, 0x552, 0x797, 0x19fc, 0x166, 0x7a8, 0x1ee5, 0xc77, 0x1ee7, 0x15ef, 0x340, 0x10df, 0x1d5f, 0x170, 0xf2, 0x123, 0x1bb1, 0xd23, 0x3fc, 0x6} +#elif RADIX == 32 +{0x19549f3b, 0x6cfe1e5, 0x1ca7a80b, 0x1b9d8efe, 0x11a057bf, 0x1d5f86f, 0x8c1e417, 0x91eec42, 0x11fe3} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x166cfe1e5caa4f9, 0x2bdfee763bfb94f5, 0x83c82e1d5f86f8d0, 0x440ff1a47bb1091} +#else +{0x367f0f2e5527c, 0x763bfb94f5016, 0x70df1a057bfdc, 0x42460f20b8757, 0x4a07f8d23dd8} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x94df,0x6dc7,0xcd7f,0xebb2,0xb290,0x811d,0x2825,0xc88,0xd514,0x959a,0x7d64,0xc8c3,0x16a9,0x106a,0x1eea,0x32}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6dc794df,0xebb2cd7f,0x811db290,0xc882825,0x959ad514,0xc8c37d64,0x106a16a9,0x321eea}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xebb2cd7f6dc794df,0xc882825811db290,0xc8c37d64959ad514,0x321eea106a16a9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe08c,0xe778,0x1464,0x19fe,0xef25,0x1d24,0xa98f,0x4af0,0x70d3,0x8e4d,0x2b82,0x95ea,0x3277,0xc267,0x1695,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe778e08c,0x19fe1464,0x1d24ef25,0x4af0a98f,0x8e4d70d3,0x95ea2b82,0xc2673277,0xf1695}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19fe1464e778e08c,0x4af0a98f1d24ef25,0x95ea2b828e4d70d3,0xf1695c2673277}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1df,0xb6e1,0xe2a4,0x4bc9,0xdc85,0x6365,0x3fca,0x9a38,0xee2,0xed03,0xca7f,0x1984,0xe709,0x1efe,0xc173,0x8b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6e1f1df,0x4bc9e2a4,0x6365dc85,0x9a383fca,0xed030ee2,0x1984ca7f,0x1efee709,0x8bc173}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4bc9e2a4b6e1f1df,0x9a383fca6365dc85,0x1984ca7fed030ee2,0x8bc1731efee709}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b21,0x9238,0x3280,0x144d,0x4d6f,0x7ee2,0xd7da,0xf377,0x2aeb,0x6a65,0x829b,0x373c,0xe956,0xef95,0xe115,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92386b21,0x144d3280,0x7ee24d6f,0xf377d7da,0x6a652aeb,0x373c829b,0xef95e956,0xcde115}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x144d328092386b21,0xf377d7da7ee24d6f,0x373c829b6a652aeb,0xcde115ef95e956}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xf187,0x9a31,0x1ee,0x193b,0xeec2,0xbfed,0x9418,0x15b6,0xe9a,0x4c74,0xae85,0x3ebe,0x2677,0x3f12,0x42}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf187d647,0x1ee9a31,0xeec2193b,0x9418bfed,0xe9a15b6,0xae854c74,0x26773ebe,0x423f12}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1ee9a31f187d647,0x9418bfedeec2193b,0xae854c740e9a15b6,0x423f1226773ebe}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x68ff,0x99be,0x416c,0x7bbf,0xd44f,0x609f,0x7682,0xa8ff,0xa6bb,0xec03,0x8e77,0xc076,0x7873,0x9676,0xa152,0xf5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x99be68ff,0x7bbf416c,0x609fd44f,0xa8ff7682,0xec03a6bb,0xc0768e77,0x96767873,0xf5a152}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bbf416c99be68ff,0xa8ff7682609fd44f,0xc0768e77ec03a6bb,0xf5a15296767873}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3739,0xf7da,0xbd23,0xa38e,0x8cf9,0x7690,0x6b0e,0x1a7,0x77f0,0xa2bd,0x5ac7,0x5101,0x3aae,0xa922,0x2d3a,0x95}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7da3739,0xa38ebd23,0x76908cf9,0x1a76b0e,0xa2bd77f0,0x51015ac7,0xa9223aae,0x952d3a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa38ebd23f7da3739,0x1a76b0e76908cf9,0x51015ac7a2bd77f0,0x952d3aa9223aae}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x29b9,0xe78,0x65ce,0xfe11,0xe6c4,0x113d,0x4012,0x6be7,0xea49,0xf165,0xb38b,0x517a,0xc141,0xd988,0xc0ed,0xbd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7829b9,0xfe1165ce,0x113de6c4,0x6be74012,0xf165ea49,0x517ab38b,0xd988c141,0xbdc0ed}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe1165ce0e7829b9,0x6be74012113de6c4,0x517ab38bf165ea49,0xbdc0edd988c141}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x1068, 0xf2c, 0x1ada, 0x1f58, 0x1343, 0x5cc, 0x90, 0x1bba, 0x50b, 0x1a35, 0x35f, 0x1388, 0x5ce, 0xc4f, 0x1462, 0x191f, 0x665, 0x843, 0x1cb1, 0x1} +#elif RADIX == 32 +{0x13cb20d0, 0x3fac6b6, 0x1205cc9a, 0x142f7740, 0x1afe8d4, 0x1e5ce9c4, 0x7e8c4c4, 0x2199972, 0x1e58a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9343fac6b69e5906, 0xf46a50bddd0240b9, 0xd18989e5ce9c40d7, 0x28f2c5086665c8f} +#else +{0x1fd635b4f2c83, 0x3ddd0240b9934, 0x53881afe8d4a1, 0x723f462627973, 0x147962843332} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x5b3, 0x13cb, 0x6b6, 0x1fd6, 0x4d0, 0x173, 0x1024, 0x1eee, 0x942, 0x1e8d, 0xd7, 0x14e2, 0x1973, 0x1313, 0x1d18, 0xe47, 0x1999, 0xa10, 0xf2c, 0x6} +#elif RADIX == 32 +{0x14f2cb67, 0x10feb1ad, 0x4817326, 0x50bddd0, 0x6bfa35, 0x7973a71, 0x11fa3131, 0x1086665c, 0x17962} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x64d0feb1ada7965b, 0xfd1a942f7740902e, 0xf462627973a71035, 0x123cb1421999723} +#else +{0x7f58d6d3cb2d, 0x2f7740902e64d, 0x74e206bfa3528, 0x5c8fd18989e5c, 0x311e58a10ccc} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x14ba, 0xa50, 0x219, 0x1ca8, 0x1858, 0xe67, 0x1b19, 0xb09, 0x17fa, 0x89f, 0x10d7, 0x1a55, 0x14de, 0x1f37, 0x12f0, 0x1247, 0x1aa6, 0x109f, 0x493, 0x6} +#elif RADIX == 32 +{0xa942975, 0x18e54086, 0x32e67c2, 0x1fe9613b, 0x186ba27e, 0xf4ded2a, 0x11e5e1f3, 0x4fea9a4, 0x1249c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf858e5408654a14b, 0xd13f7fa584ec65cc, 0xcbc3e6f4ded2ac35, 0x35124e13faa6923} +#else +{0x472a0432a50a5, 0x2584ec65ccf85, 0x5a5586ba27eff, 0x248f2f0f9bd37, 0x42892709fd53} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1ba, 0xab8, 0x1ded, 0xdc9, 0xf40, 0xaa3, 0x169, 0x53c, 0x2, 0x848, 0x9a6, 0xbad, 0xb7e, 0x15dc, 0x87, 0x1cf3, 0x1791, 0x1af2, 0x1cdf, 0x7} +#elif RADIX == 32 +{0xaae0375, 0x6e4f7b, 0xd2aa37a, 0x8a781, 0x14d32120, 0x18b7e5d6, 0x1cc10f5d, 0x1795e479, 0x2e6fe} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6f406e4f7b55701b, 0x909000229e05a554, 0x821ebb8b7e5d6a69, 0x35f37f5e5791e79} +#else +{0x3727bdaab80d, 0x229e05a5546f4, 0x4bad4d3212000, 0x79e6087aee2df, 0x42f9bfaf2bc8} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x5b, 0xad0, 0x69, 0x1038, 0x18d2, 0x180d, 0x1871, 0x46b, 0x26b, 0x1ef2, 0xe46, 0x72d, 0xc0d, 0x15a4, 0x6d7, 0x221, 0x1611, 0x1a89, 0xd3f, 0x8} +#elif RADIX == 32 +{0xab400b7, 0x1281c01a, 0xe380dc6, 0x9ac8d78, 0x17237bc8, 0x8c0d396, 0x84daf5a, 0x144d8444, 0x369fe} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xb8d281c01a55a005, 0xbde426b235e1c701, 0x9b5eb48c0d396b91, 0x3b34ff513611110} +#else +{0x140e00d2ad002, 0x3235e1c701b8d, 0x272d7237bc84d, 0x44426d7ad2303, 0x459a7fa89b08} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1131, 0xac7, 0xa16, 0x918, 0x5d8, 0x1e64, 0x3e5, 0x142c, 0x1f89, 0x1cb7, 0xf96, 0x370, 0x4da, 0xf45, 0x1aa5, 0x1872, 0x1fc, 0xd83, 0x1145, 0x6} +#elif RADIX == 32 +{0x12b1e263, 0x1848c285, 0x1cbe642e, 0x1e268583, 0x7cb72df, 0xa4da1b8, 0x1cb54af4, 0xc187f30, 0x18a2b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x85d848c285958f13, 0xb96ff89a160f97cc, 0x6a95e8a4da1b83e5, 0x84515b061fcc39} +#else +{0x4246142cac789, 0x1a160f97cc85d, 0x43707cb72dff1, 0x30e5aa57a2936, 0x2c228ad830fe} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x7a4, 0x388, 0xd00, 0x66c, 0x1a9a, 0xabc, 0x97b, 0xadc, 0xaab, 0x1601, 0x287, 0xb2a, 0x1ab7, 0x1803, 0x1d06, 0x81c, 0x890, 0x11e0, 0x1e19, 0x0} +#elif RADIX == 32 +{0xe20f48, 0x1a336340, 0xf6abcd4, 0xaad5b89, 0x143d805, 0x7ab7595, 0x73a0d80, 0xf022410, 0xf0cc} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9a9a33634007107a, 0xec02aab56e25ed57, 0x741b007ab75950a1, 0x1478663c089040e} +#else +{0x519b1a003883d, 0x356e25ed579a9, 0x6b2a143d80555, 0x1039d06c01ead, 0xa3c331e0448} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1e68, 0xcde, 0x29, 0x1777, 0x1ef8, 0x1a1c, 0x204, 0x148, 0x14ba, 0x1c39, 0x175, 0x1263, 0x4de, 0x1032, 0x1649, 0x5a4, 0xad, 0xcfb, 0x870, 0x3} +#elif RADIX == 32 +{0xb37bcd0, 0x18bbb80a, 0x9a1cf7, 0x12e82902, 0x10baf0e6, 0x44de931, 0x92c9303, 0x7d82b4b, 0x34383} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9ef8bbb80a59bde6, 0x78734ba0a4081343, 0x59260644de93185d, 0x29a1c19f60ad2d2} +#else +{0x45ddc052cdef3, 0x20a40813439ef, 0x52630baf0e697, 0x4b49649819137, 0x14d0e0cfb056} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1975,0x2b02,0x86c,0x9cbe,0x7576,0xb1c3,0xd9a7,0x737e,0x4de1,0xa245,0x7652,0xf9bf,0x4bf8,0xdc2c,0xeaa1,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b021975,0x9cbe086c,0xb1c37576,0x737ed9a7,0xa2454de1,0xf9bf7652,0xdc2c4bf8,0x8eaa1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9cbe086c2b021975,0x737ed9a7b1c37576,0xf9bf7652a2454de1,0x8eaa1dc2c4bf8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee88,0x46bc,0x7177,0x337c,0x92b6,0x40dc,0xb657,0x3366,0x6c8a,0x2b98,0x40eb,0x1146,0xe116,0xb00a,0xa22f,0xe3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x46bcee88,0x337c7177,0x40dc92b6,0x3366b657,0x2b986c8a,0x114640eb,0xb00ae116,0xe3a22f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x337c717746bcee88,0x3366b65740dc92b6,0x114640eb2b986c8a,0xe3a22fb00ae116}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf28d,0x64d3,0xe248,0x40b9,0x5141,0x82bb,0x82ea,0xcf35,0xfaf0,0x3,0xd71f,0x6e88,0x7ac9,0xf4c9,0x6b9e,0xcc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x64d3f28d,0x40b9e248,0x82bb5141,0xcf3582ea,0x3faf0,0x6e88d71f,0xf4c97ac9,0xcc6b9e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x40b9e24864d3f28d,0xcf3582ea82bb5141,0x6e88d71f0003faf0,0xcc6b9ef4c97ac9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe68b,0xd4fd,0xf793,0x6341,0x8a89,0x4e3c,0x2658,0x8c81,0xb21e,0x5dba,0x89ad,0x640,0xb407,0x23d3,0x155e,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4fde68b,0x6341f793,0x4e3c8a89,0x8c812658,0x5dbab21e,0x64089ad,0x23d3b407,0xf7155e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6341f793d4fde68b,0x8c8126584e3c8a89,0x64089ad5dbab21e,0xf7155e23d3b407}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x84a0,0x8ad1,0xbcc4,0xc440,0x94e1,0x46ea,0x15c6,0x784e,0x190,0xd26f,0x630,0x2bee,0x74b1,0x93ce,0xe061,0x3c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8ad184a0,0xc440bcc4,0x46ea94e1,0x784e15c6,0xd26f0190,0x2bee0630,0x93ce74b1,0x3ce061}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc440bcc48ad184a0,0x784e15c646ea94e1,0x2bee0630d26f0190,0x3ce06193ce74b1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e2b,0xdafe,0xfa45,0xa69b,0xb77e,0xf670,0x927d,0xa0f9,0xccb5,0xc897,0x9607,0x5f22,0x47bf,0x867,0xf781,0xd9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdafe1e2b,0xa69bfa45,0xf670b77e,0xa0f9927d,0xc897ccb5,0x5f229607,0x86747bf,0xd9f781}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa69bfa45dafe1e2b,0xa0f9927df670b77e,0x5f229607c897ccb5,0xd9f781086747bf}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2aa2,0xbd3f,0x2ad,0x19bd,0xe6f0,0x3b95,0x3fff,0xd17e,0xf3a6,0x7888,0xda46,0x3b21,0xcc57,0x5301,0x3e50,0xc4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbd3f2aa2,0x19bd02ad,0x3b95e6f0,0xd17e3fff,0x7888f3a6,0x3b21da46,0x5301cc57,0xc43e50}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19bd02adbd3f2aa2,0xd17e3fff3b95e6f0,0x3b21da467888f3a6,0xc43e505301cc57}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b60,0x752e,0x433b,0x3bbf,0x6b1e,0xb915,0xea39,0x87b1,0xfe6f,0x2d90,0xf9cf,0xd411,0x8b4e,0x6c31,0x1f9e,0xc3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x752e7b60,0x3bbf433b,0xb9156b1e,0x87b1ea39,0x2d90fe6f,0xd411f9cf,0x6c318b4e,0xc31f9e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3bbf433b752e7b60,0x87b1ea39b9156b1e,0xd411f9cf2d90fe6f,0xc31f9e6c318b4e}}} +#endif +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.h new file mode 100644 index 0000000000..1cc782a5bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.h @@ -0,0 +1,31 @@ +#ifndef ENDOMORPHISM_ACTION_H +#define ENDOMORPHISM_ACTION_H +#include +#include +#include +/** Type for precomputed endomorphism rings applied to precomputed torsion bases. + * + * Precomputed by the precompute scripts. + * + * @typedef curve_with_endomorphism_ring_t + * + * @struct curve_with_endomorphism_ring + **/ +typedef struct curve_with_endomorphism_ring { + ec_curve_t curve; + ec_basis_t basis_even; + ibz_mat_2x2_t action_i, action_j, action_k; + ibz_mat_2x2_t action_gen2, action_gen3, action_gen4; +} curve_with_endomorphism_ring_t; +#define CURVE_E0 (CURVES_WITH_ENDOMORPHISMS->curve) +#define BASIS_EVEN (CURVES_WITH_ENDOMORPHISMS->basis_even) +#define ACTION_I (CURVES_WITH_ENDOMORPHISMS->action_i) +#define ACTION_J (CURVES_WITH_ENDOMORPHISMS->action_j) +#define ACTION_K (CURVES_WITH_ENDOMORPHISMS->action_k) +#define ACTION_GEN2 (CURVES_WITH_ENDOMORPHISMS->action_gen2) +#define ACTION_GEN3 (CURVES_WITH_ENDOMORPHISMS->action_gen3) +#define ACTION_GEN4 (CURVES_WITH_ENDOMORPHISMS->action_gen4) +#define NUM_ALTERNATE_STARTING_CURVES 6 +#define ALTERNATE_STARTING_CURVES (CURVES_WITH_ENDOMORPHISMS+1) +extern const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7]; +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/finit.c new file mode 100644 index 0000000000..b3808edf07 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/finit.c @@ -0,0 +1,122 @@ +#include "internal.h" + +void +quat_alg_init_set(quat_alg_t *alg, const ibz_t *p) +{ + ibz_init(&(*alg).p); + ibz_copy(&(*alg).p, p); +} +void +quat_alg_finalize(quat_alg_t *alg) +{ + ibz_finalize(&(*alg).p); +} + +void +quat_alg_elem_init(quat_alg_elem_t *elem) +{ + ibz_vec_4_init(&(*elem).coord); + ibz_init(&(*elem).denom); + ibz_set(&(*elem).denom, 1); +} +void +quat_alg_elem_finalize(quat_alg_elem_t *elem) +{ + ibz_vec_4_finalize(&(*elem).coord); + ibz_finalize(&(*elem).denom); +} + +void +ibz_vec_2_init(ibz_vec_2_t *vec) +{ + ibz_init(&((*vec)[0])); + ibz_init(&((*vec)[1])); +} + +void +ibz_vec_2_finalize(ibz_vec_2_t *vec) +{ + ibz_finalize(&((*vec)[0])); + ibz_finalize(&((*vec)[1])); +} + +void +ibz_vec_4_init(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_init(&(*vec)[i]); + } +} +void +ibz_vec_4_finalize(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_finalize(&(*vec)[i]); + } +} + +void +ibz_mat_2x2_init(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +ibz_mat_4x4_init(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +quat_lattice_init(quat_lattice_t *lat) +{ + ibz_mat_4x4_init(&(*lat).basis); + ibz_init(&(*lat).denom); + ibz_set(&(*lat).denom, 1); +} +void +quat_lattice_finalize(quat_lattice_t *lat) +{ + ibz_finalize(&(*lat).denom); + ibz_mat_4x4_finalize(&(*lat).basis); +} + +void +quat_left_ideal_init(quat_left_ideal_t *lideal) +{ + quat_lattice_init(&(*lideal).lattice); + ibz_init(&(*lideal).norm); + (*lideal).parent_order = NULL; +} +void +quat_left_ideal_finalize(quat_left_ideal_t *lideal) +{ + ibz_finalize(&(*lideal).norm); + quat_lattice_finalize(&(*lideal).lattice); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.c new file mode 100644 index 0000000000..f2992d8c7f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: PD and Apache-2.0 + +/* FIPS202 implementation based on code from PQClean, + * which is in turn based based on the public domain implementation in + * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html + * by Ronny Van Keer + * and the public domain "TweetFips202" implementation + * from https://twitter.com/tweetfips202 + * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ + +#include +#include +#include +#include + +#include "fips202.h" + +#define NROUNDS 24 +#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) + +/************************************************* + * Name: load64 + * + * Description: Load 8 bytes into uint64_t in little-endian order + * + * Arguments: - const uint8_t *x: pointer to input byte array + * + * Returns the loaded 64-bit unsigned integer + **************************************************/ +static uint64_t load64(const uint8_t *x) { + uint64_t r = 0; + for (size_t i = 0; i < 8; ++i) { + r |= (uint64_t)x[i] << 8 * i; + } + + return r; +} + +/************************************************* + * Name: store64 + * + * Description: Store a 64-bit integer to a byte array in little-endian order + * + * Arguments: - uint8_t *x: pointer to the output byte array + * - uint64_t u: input 64-bit unsigned integer + **************************************************/ +static void store64(uint8_t *x, uint64_t u) { + for (size_t i = 0; i < 8; ++i) { + x[i] = (uint8_t) (u >> 8 * i); + } +} + +/* Keccak round constants */ +static const uint64_t KeccakF_RoundConstants[NROUNDS] = { + 0x0000000000000001ULL, 0x0000000000008082ULL, + 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, + 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, + 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, + 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, + 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, + 0x0000000080000001ULL, 0x8000000080008008ULL +}; + +/************************************************* + * Name: KeccakF1600_StatePermute + * + * Description: The Keccak F1600 Permutation + * + * Arguments: - uint64_t *state: pointer to input/output Keccak state + **************************************************/ +static void KeccakF1600_StatePermute(uint64_t *state) { + int round; + + uint64_t Aba, Abe, Abi, Abo, Abu; + uint64_t Aga, Age, Agi, Ago, Agu; + uint64_t Aka, Ake, Aki, Ako, Aku; + uint64_t Ama, Ame, Ami, Amo, Amu; + uint64_t Asa, Ase, Asi, Aso, Asu; + uint64_t BCa, BCe, BCi, BCo, BCu; + uint64_t Da, De, Di, Do, Du; + uint64_t Eba, Ebe, Ebi, Ebo, Ebu; + uint64_t Ega, Ege, Egi, Ego, Egu; + uint64_t Eka, Eke, Eki, Eko, Eku; + uint64_t Ema, Eme, Emi, Emo, Emu; + uint64_t Esa, Ese, Esi, Eso, Esu; + + // copyFromState(A, state) + Aba = state[0]; + Abe = state[1]; + Abi = state[2]; + Abo = state[3]; + Abu = state[4]; + Aga = state[5]; + Age = state[6]; + Agi = state[7]; + Ago = state[8]; + Agu = state[9]; + Aka = state[10]; + Ake = state[11]; + Aki = state[12]; + Ako = state[13]; + Aku = state[14]; + Ama = state[15]; + Ame = state[16]; + Ami = state[17]; + Amo = state[18]; + Amu = state[19]; + Asa = state[20]; + Ase = state[21]; + Asi = state[22]; + Aso = state[23]; + Asu = state[24]; + + for (round = 0; round < NROUNDS; round += 2) { + // prepareTheta + BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; + BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; + BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; + BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; + BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; + + // thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Aba ^= Da; + BCa = Aba; + Age ^= De; + BCe = ROL(Age, 44); + Aki ^= Di; + BCi = ROL(Aki, 43); + Amo ^= Do; + BCo = ROL(Amo, 21); + Asu ^= Du; + BCu = ROL(Asu, 14); + Eba = BCa ^ ((~BCe) & BCi); + Eba ^= KeccakF_RoundConstants[round]; + Ebe = BCe ^ ((~BCi) & BCo); + Ebi = BCi ^ ((~BCo) & BCu); + Ebo = BCo ^ ((~BCu) & BCa); + Ebu = BCu ^ ((~BCa) & BCe); + + Abo ^= Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka ^= Da; + BCi = ROL(Aka, 3); + Ame ^= De; + BCo = ROL(Ame, 45); + Asi ^= Di; + BCu = ROL(Asi, 61); + Ega = BCa ^ ((~BCe) & BCi); + Ege = BCe ^ ((~BCi) & BCo); + Egi = BCi ^ ((~BCo) & BCu); + Ego = BCo ^ ((~BCu) & BCa); + Egu = BCu ^ ((~BCa) & BCe); + + Abe ^= De; + BCa = ROL(Abe, 1); + Agi ^= Di; + BCe = ROL(Agi, 6); + Ako ^= Do; + BCi = ROL(Ako, 25); + Amu ^= Du; + BCo = ROL(Amu, 8); + Asa ^= Da; + BCu = ROL(Asa, 18); + Eka = BCa ^ ((~BCe) & BCi); + Eke = BCe ^ ((~BCi) & BCo); + Eki = BCi ^ ((~BCo) & BCu); + Eko = BCo ^ ((~BCu) & BCa); + Eku = BCu ^ ((~BCa) & BCe); + + Abu ^= Du; + BCa = ROL(Abu, 27); + Aga ^= Da; + BCe = ROL(Aga, 36); + Ake ^= De; + BCi = ROL(Ake, 10); + Ami ^= Di; + BCo = ROL(Ami, 15); + Aso ^= Do; + BCu = ROL(Aso, 56); + Ema = BCa ^ ((~BCe) & BCi); + Eme = BCe ^ ((~BCi) & BCo); + Emi = BCi ^ ((~BCo) & BCu); + Emo = BCo ^ ((~BCu) & BCa); + Emu = BCu ^ ((~BCa) & BCe); + + Abi ^= Di; + BCa = ROL(Abi, 62); + Ago ^= Do; + BCe = ROL(Ago, 55); + Aku ^= Du; + BCi = ROL(Aku, 39); + Ama ^= Da; + BCo = ROL(Ama, 41); + Ase ^= De; + BCu = ROL(Ase, 2); + Esa = BCa ^ ((~BCe) & BCi); + Ese = BCe ^ ((~BCi) & BCo); + Esi = BCi ^ ((~BCo) & BCu); + Eso = BCo ^ ((~BCu) & BCa); + Esu = BCu ^ ((~BCa) & BCe); + + // prepareTheta + BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; + BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; + BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; + BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; + BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; + + // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^ ((~BCe) & BCi); + Aba ^= KeccakF_RoundConstants[round + 1]; + Abe = BCe ^ ((~BCi) & BCo); + Abi = BCi ^ ((~BCo) & BCu); + Abo = BCo ^ ((~BCu) & BCa); + Abu = BCu ^ ((~BCa) & BCe); + + Ebo ^= Do; + BCa = ROL(Ebo, 28); + Egu ^= Du; + BCe = ROL(Egu, 20); + Eka ^= Da; + BCi = ROL(Eka, 3); + Eme ^= De; + BCo = ROL(Eme, 45); + Esi ^= Di; + BCu = ROL(Esi, 61); + Aga = BCa ^ ((~BCe) & BCi); + Age = BCe ^ ((~BCi) & BCo); + Agi = BCi ^ ((~BCo) & BCu); + Ago = BCo ^ ((~BCu) & BCa); + Agu = BCu ^ ((~BCa) & BCe); + + Ebe ^= De; + BCa = ROL(Ebe, 1); + Egi ^= Di; + BCe = ROL(Egi, 6); + Eko ^= Do; + BCi = ROL(Eko, 25); + Emu ^= Du; + BCo = ROL(Emu, 8); + Esa ^= Da; + BCu = ROL(Esa, 18); + Aka = BCa ^ ((~BCe) & BCi); + Ake = BCe ^ ((~BCi) & BCo); + Aki = BCi ^ ((~BCo) & BCu); + Ako = BCo ^ ((~BCu) & BCa); + Aku = BCu ^ ((~BCa) & BCe); + + Ebu ^= Du; + BCa = ROL(Ebu, 27); + Ega ^= Da; + BCe = ROL(Ega, 36); + Eke ^= De; + BCi = ROL(Eke, 10); + Emi ^= Di; + BCo = ROL(Emi, 15); + Eso ^= Do; + BCu = ROL(Eso, 56); + Ama = BCa ^ ((~BCe) & BCi); + Ame = BCe ^ ((~BCi) & BCo); + Ami = BCi ^ ((~BCo) & BCu); + Amo = BCo ^ ((~BCu) & BCa); + Amu = BCu ^ ((~BCa) & BCe); + + Ebi ^= Di; + BCa = ROL(Ebi, 62); + Ego ^= Do; + BCe = ROL(Ego, 55); + Eku ^= Du; + BCi = ROL(Eku, 39); + Ema ^= Da; + BCo = ROL(Ema, 41); + Ese ^= De; + BCu = ROL(Ese, 2); + Asa = BCa ^ ((~BCe) & BCi); + Ase = BCe ^ ((~BCi) & BCo); + Asi = BCi ^ ((~BCo) & BCu); + Aso = BCo ^ ((~BCu) & BCa); + Asu = BCu ^ ((~BCa) & BCe); + } + + // copyToState(state, A) + state[0] = Aba; + state[1] = Abe; + state[2] = Abi; + state[3] = Abo; + state[4] = Abu; + state[5] = Aga; + state[6] = Age; + state[7] = Agi; + state[8] = Ago; + state[9] = Agu; + state[10] = Aka; + state[11] = Ake; + state[12] = Aki; + state[13] = Ako; + state[14] = Aku; + state[15] = Ama; + state[16] = Ame; + state[17] = Ami; + state[18] = Amo; + state[19] = Amu; + state[20] = Asa; + state[21] = Ase; + state[22] = Asi; + state[23] = Aso; + state[24] = Asu; +} + +/************************************************* + * Name: keccak_absorb + * + * Description: Absorb step of Keccak; + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, + size_t mlen, uint8_t p) { + size_t i; + uint8_t t[200]; + + /* Zero state */ + for (i = 0; i < 25; ++i) { + s[i] = 0; + } + + while (mlen >= r) { + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(m + 8 * i); + } + + KeccakF1600_StatePermute(s); + mlen -= r; + m += r; + } + + for (i = 0; i < r; ++i) { + t[i] = 0; + } + for (i = 0; i < mlen; ++i) { + t[i] = m[i]; + } + t[i] = p; + t[r - 1] |= 128; + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(t + 8 * i); + } +} + +/************************************************* + * Name: keccak_squeezeblocks + * + * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. + * Modifies the state. Can be called multiple times to keep + * squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *h: pointer to output blocks + * - size_t nblocks: number of blocks to be + * squeezed (written to h) + * - uint64_t *s: pointer to input/output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, + uint64_t *s, uint32_t r) { + while (nblocks > 0) { + KeccakF1600_StatePermute(s); + for (size_t i = 0; i < (r >> 3); i++) { + store64(h + 8 * i, s[i]); + } + h += r; + nblocks--; + } +} + +/************************************************* + * Name: keccak_inc_init + * + * Description: Initializes the incremental Keccak state to zero. + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + **************************************************/ +static void keccak_inc_init(uint64_t *s_inc) { + size_t i; + + for (i = 0; i < 25; ++i) { + s_inc[i] = 0; + } + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_absorb + * + * Description: Incremental keccak absorb + * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + **************************************************/ +static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, + size_t mlen) { + size_t i; + + /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ + while (mlen + s_inc[25] >= r) { + for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { + /* Take the i'th byte from message + xor with the s_inc[25] + i'th byte of the state; little-endian */ + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + mlen -= (size_t)(r - s_inc[25]); + m += r - s_inc[25]; + s_inc[25] = 0; + + KeccakF1600_StatePermute(s_inc); + } + + for (i = 0; i < mlen; i++) { + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + s_inc[25] += mlen; +} + +/************************************************* + * Name: keccak_inc_finalize + * + * Description: Finalizes Keccak absorb phase, prepares for squeezing + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { + /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, + so we can always use one more byte for p in the current state. */ + s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); + s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_squeeze + * + * Description: Incremental Keccak squeeze; can be called on byte-level + * + * Arguments: - uint8_t *h: pointer to output bytes + * - size_t outlen: number of bytes to be squeezed + * - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_inc_squeeze(uint8_t *h, size_t outlen, + uint64_t *s_inc, uint32_t r) { + size_t i; + + /* First consume any bytes we still have sitting around */ + for (i = 0; i < outlen && i < s_inc[25]; i++) { + /* There are s_inc[25] bytes left, so r - s_inc[25] is the first + available byte. We consume from there, i.e., up to r. */ + h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] -= i; + + /* Then squeeze the remaining necessary blocks */ + while (outlen > 0) { + KeccakF1600_StatePermute(s_inc); + + for (i = 0; i < outlen && i < r; i++) { + h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] = r - i; + } +} + +void shake128_inc_init(shake128incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); +} + +void shake128_inc_finalize(shake128incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); +} + +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); +} + +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake128_inc_ctx_release(shake128incctx *state) { + (void)state; +} + +void shake256_inc_init(shake256incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); +} + +void shake256_inc_finalize(shake256incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); +} + +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); +} + +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake256_inc_ctx_release(shake256incctx *state) { + (void)state; +} + + +/************************************************* + * Name: shake128_absorb + * + * Description: Absorb step of the SHAKE128 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake128_squeezeblocks + * + * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of + * SHAKE128_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake128ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); +} + +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake128_ctx_release(shake128ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake256_absorb + * + * Description: Absorb step of the SHAKE256 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake256_squeezeblocks + * + * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of + * SHAKE256_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake256ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); +} + +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake256_ctx_release(shake256ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake128 + * + * Description: SHAKE128 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE128_RATE; + uint8_t t[SHAKE128_RATE]; + shake128ctx s; + + shake128_absorb(&s, input, inlen); + shake128_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE128_RATE; + outlen -= nblocks * SHAKE128_RATE; + + if (outlen) { + shake128_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake128_ctx_release(&s); +} + +/************************************************* + * Name: shake256 + * + * Description: SHAKE256 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE256_RATE; + uint8_t t[SHAKE256_RATE]; + shake256ctx s; + + shake256_absorb(&s, input, inlen); + shake256_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE256_RATE; + outlen -= nblocks * SHAKE256_RATE; + + if (outlen) { + shake256_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake256_ctx_release(&s); +} + +void sha3_256_inc_init(sha3_256incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_256_inc_ctx_release(sha3_256incctx *state) { + (void)state; +} + +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); +} + +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { + uint8_t t[SHA3_256_RATE]; + keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); + + sha3_256_inc_ctx_release(state); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_256 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_256_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +void sha3_384_inc_init(sha3_384incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); +} + +void sha3_384_inc_ctx_release(sha3_384incctx *state) { + (void)state; +} + +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { + uint8_t t[SHA3_384_RATE]; + keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); + + sha3_384_inc_ctx_release(state); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_384 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_384_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +void sha3_512_inc_init(sha3_512incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); +} + +void sha3_512_inc_ctx_release(sha3_512incctx *state) { + (void)state; +} + +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { + uint8_t t[SHA3_512_RATE]; + keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); + + sha3_512_inc_ctx_release(state); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_512 + * + * Description: SHA3-512 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_512_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h new file mode 100644 index 0000000000..c29ebd8f9d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef FIPS202_H +#define FIPS202_H + +#include +#include + +#define SHAKE128_RATE 168 +#define SHAKE256_RATE 136 +#define SHA3_256_RATE 136 +#define SHA3_384_RATE 104 +#define SHA3_512_RATE 72 + +#define PQC_SHAKEINCCTX_U64WORDS 26 +#define PQC_SHAKECTX_U64WORDS 25 + +#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) +#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake128incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake128ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake256incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake256ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_256incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_384incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_512incctx; + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); +/* Free the state */ +void shake128_ctx_release(shake128ctx *state); +/* Copy the state. */ +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); + +/* Initialize incremental hashing API */ +void shake128_inc_init(shake128incctx *state); +/* Absorb more information into the XOF. + * + * Can be called multiple times. + */ +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); +/* Finalize the XOF for squeezing */ +void shake128_inc_finalize(shake128incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); +/* Copy the context of the SHAKE128 XOF */ +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); +/* Free the context of the SHAKE128 XOF */ +void shake128_inc_ctx_release(shake128incctx *state); + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); +/* Free the context held by this XOF */ +void shake256_ctx_release(shake256ctx *state); +/* Copy the context held by this XOF */ +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); + +/* Initialize incremental hashing API */ +void shake256_inc_init(shake256incctx *state); +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); +/* Prepares for squeeze phase */ +void shake256_inc_finalize(shake256incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); +/* Copy the state */ +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); +/* Free the state */ +void shake256_inc_ctx_release(shake256incctx *state); + +/* One-stop SHAKE128 call */ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* One-stop SHAKE256 call */ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_256_inc_init(sha3_256incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); +/* Copy the context */ +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_256_inc_ctx_release(sha3_256incctx *state); + +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_384_inc_init(sha3_384incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); +/* Copy the context */ +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_384_inc_ctx_release(sha3_384incctx *state); + +/* One-stop SHA3-384 shop */ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_512_inc_init(sha3_512incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); +/* Copy the context */ +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_512_inc_ctx_release(sha3_512incctx *state); + +/* One-stop SHA3-512 shop */ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.c new file mode 100644 index 0000000000..48e2937f17 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.c @@ -0,0 +1,15 @@ +#include + +/* + * If ctl == 0x00000000, then *d is set to a0 + * If ctl == 0xFFFFFFFF, then *d is set to a1 + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ +void +fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) +{ + digit_t cw = (int32_t)ctl; + for (unsigned int i = 0; i < NWORDS_FIELD; i++) { + (*d)[i] = (*a0)[i] ^ (cw & ((*a0)[i] ^ (*a1)[i])); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.h new file mode 100644 index 0000000000..1241d5801e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.h @@ -0,0 +1,48 @@ +#ifndef FP_H +#define FP_H + +//////////////////////////////////////////////// NOTE: this is placed here for now +#include +#include +#include +#include +#include +#include +#include +#include + +typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements + +extern const digit_t ONE[NWORDS_FIELD]; +extern const digit_t ZERO[NWORDS_FIELD]; +// extern const digit_t PM1O3[NWORDS_FIELD]; + +void fp_set_small(fp_t *x, const digit_t val); +void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val); +void fp_set_zero(fp_t *x); +void fp_set_one(fp_t *x); +uint32_t fp_is_equal(const fp_t *a, const fp_t *b); +uint32_t fp_is_zero(const fp_t *a); +void fp_copy(fp_t *out, const fp_t *a); + +void fp_encode(void *dst, const fp_t *a); +void fp_decode_reduce(fp_t *d, const void *src, size_t len); +uint32_t fp_decode(fp_t *d, const void *src); + +void fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl); +void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl); + +void fp_add(fp_t *out, const fp_t *a, const fp_t *b); +void fp_sub(fp_t *out, const fp_t *a, const fp_t *b); +void fp_neg(fp_t *out, const fp_t *a); +void fp_sqr(fp_t *out, const fp_t *a); +void fp_mul(fp_t *out, const fp_t *a, const fp_t *b); + +void fp_inv(fp_t *x); +uint32_t fp_is_square(const fp_t *a); +void fp_sqrt(fp_t *a); +void fp_half(fp_t *out, const fp_t *a); +void fp_exp3div4(fp_t *out, const fp_t *a); +void fp_div3(fp_t *out, const fp_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp2.c new file mode 100644 index 0000000000..a2589525f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp2.c @@ -0,0 +1,328 @@ +#include +#include +#include + +/* Arithmetic modulo X^2 + 1 */ + +void +fp2_set_small(fp2_t *x, const digit_t val) +{ + fp_set_small(&(x->re), val); + fp_set_zero(&(x->im)); +} + +void +fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n) +{ + fp_mul_small(&x->re, &y->re, n); + fp_mul_small(&x->im, &y->im, n); +} + +void +fp2_set_one(fp2_t *x) +{ + fp_set_one(&(x->re)); + fp_set_zero(&(x->im)); +} + +void +fp2_set_zero(fp2_t *x) +{ + fp_set_zero(&(x->re)); + fp_set_zero(&(x->im)); +} + +// Is a GF(p^2) element zero? +// Returns 0xFF...FF (true) if a=0, 0 (false) otherwise +uint32_t +fp2_is_zero(const fp2_t *a) +{ + return fp_is_zero(&(a->re)) & fp_is_zero(&(a->im)); +} + +// Compare two GF(p^2) elements in constant time +// Returns 0xFF...FF (true) if a=b, 0 (false) otherwise +uint32_t +fp2_is_equal(const fp2_t *a, const fp2_t *b) +{ + return fp_is_equal(&(a->re), &(b->re)) & fp_is_equal(&(a->im), &(b->im)); +} + +// Is a GF(p^2) element one? +// Returns 0xFF...FF (true) if a=1, 0 (false) otherwise +uint32_t +fp2_is_one(const fp2_t *a) +{ + return fp_is_equal(&(a->re), &ONE) & fp_is_zero(&(a->im)); +} + +void +fp2_copy(fp2_t *x, const fp2_t *y) +{ + fp_copy(&(x->re), &(y->re)); + fp_copy(&(x->im), &(y->im)); +} + +void +fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_add(&(x->re), &(y->re), &(z->re)); + fp_add(&(x->im), &(y->im), &(z->im)); +} + +void +fp2_add_one(fp2_t *x, const fp2_t *y) +{ + fp_add(&x->re, &y->re, &ONE); + fp_copy(&x->im, &y->im); +} + +void +fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_sub(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &(y->im), &(z->im)); +} + +void +fp2_neg(fp2_t *x, const fp2_t *y) +{ + fp_neg(&(x->re), &(y->re)); + fp_neg(&(x->im), &(y->im)); +} + +void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t0, t1; + + fp_add(&t0, &(y->re), &(y->im)); + fp_add(&t1, &(z->re), &(z->im)); + fp_mul(&t0, &t0, &t1); + fp_mul(&t1, &(y->im), &(z->im)); + fp_mul(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &t0, &t1); + fp_sub(&(x->im), &(x->im), &(x->re)); + fp_sub(&(x->re), &(x->re), &t1); +} + +void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp_t sum, diff; + + fp_add(&sum, &(y->re), &(y->im)); + fp_sub(&diff, &(y->re), &(y->im)); + fp_mul(&(x->im), &(y->re), &(y->im)); + fp_add(&(x->im), &(x->im), &(x->im)); + fp_mul(&(x->re), &sum, &diff); +} + +void +fp2_inv(fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + fp_inv(&t0); + fp_mul(&(x->re), &(x->re), &t0); + fp_mul(&(x->im), &(x->im), &t0); + fp_neg(&(x->im), &(x->im)); +} + +uint32_t +fp2_is_square(const fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + + return fp_is_square(&t0); +} + +void +fp2_sqrt(fp2_t *a) +{ + fp_t x0, x1, t0, t1; + + /* From "Optimized One-Dimensional SQIsign Verification on Intel and + * Cortex-M4" by Aardal et al: https://eprint.iacr.org/2024/1563 */ + + // x0 = \delta = sqrt(a0^2 + a1^2). + fp_sqr(&x0, &(a->re)); + fp_sqr(&x1, &(a->im)); + fp_add(&x0, &x0, &x1); + fp_sqrt(&x0); + // If a1 = 0, there is a risk of \delta = -a0, which makes x0 = 0 below. + // In that case, we restore the value \delta = a0. + fp_select(&x0, &x0, &(a->re), fp_is_zero(&(a->im))); + // x0 = \delta + a0, t0 = 2 * x0. + fp_add(&x0, &x0, &(a->re)); + fp_add(&t0, &x0, &x0); + + // x1 = t0^(p-3)/4 + fp_exp3div4(&x1, &t0); + + // x0 = x0 * x1, x1 = x1 * a1, t1 = (2x0)^2. + fp_mul(&x0, &x0, &x1); + fp_mul(&x1, &x1, &(a->im)); + fp_add(&t1, &x0, &x0); + fp_sqr(&t1, &t1); + // If t1 = t0, return x0 + x1*i, otherwise x1 - x0*i. + fp_sub(&t0, &t0, &t1); + uint32_t f = fp_is_zero(&t0); + fp_neg(&t1, &x0); + fp_copy(&t0, &x1); + fp_select(&t0, &t0, &x0, f); + fp_select(&t1, &t1, &x1, f); + + // Check if t0 is zero + uint32_t t0_is_zero = fp_is_zero(&t0); + + // Check whether t0, t1 are odd + // Note: we encode to ensure canonical representation + uint8_t tmp_bytes[FP_ENCODED_BYTES]; + fp_encode(tmp_bytes, &t0); + uint32_t t0_is_odd = -((uint32_t)tmp_bytes[0] & 1); + fp_encode(tmp_bytes, &t1); + uint32_t t1_is_odd = -((uint32_t)tmp_bytes[0] & 1); + + // We negate the output if: + // t0 is odd, or + // t0 is zero and t1 is odd + uint32_t negate_output = t0_is_odd | (t0_is_zero & t1_is_odd); + fp_neg(&x0, &t0); + fp_select(&(a->re), &t0, &x0, negate_output); + fp_neg(&x0, &t1); + fp_select(&(a->im), &t1, &x0, negate_output); +} + +uint32_t +fp2_sqrt_verify(fp2_t *a) +{ + fp2_t t0, t1; + + fp2_copy(&t0, a); + fp2_sqrt(a); + fp2_sqr(&t1, a); + + return (fp2_is_equal(&t0, &t1)); +} + +void +fp2_half(fp2_t *x, const fp2_t *y) +{ + fp_half(&(x->re), &(y->re)); + fp_half(&(x->im), &(y->im)); +} + +void +fp2_batched_inv(fp2_t *x, int len) +{ + fp2_t t1[len], t2[len]; + fp2_t inverse; + + // x = x0,...,xn + // t1 = x0, x0*x1, ... ,x0 * x1 * ... * xn + fp2_copy(&t1[0], &x[0]); + for (int i = 1; i < len; i++) { + fp2_mul(&t1[i], &t1[i - 1], &x[i]); + } + + // inverse = 1/ (x0 * x1 * ... * xn) + fp2_copy(&inverse, &t1[len - 1]); + fp2_inv(&inverse); + + fp2_copy(&t2[0], &inverse); + // t2 = 1/ (x0 * x1 * ... * xn), 1/ (x0 * x1 * ... * x(n-1)) , ... , 1/xO + for (int i = 1; i < len; i++) { + fp2_mul(&t2[i], &t2[i - 1], &x[len - i]); + } + + fp2_copy(&x[0], &t2[len - 1]); + + for (int i = 1; i < len; i++) { + fp2_mul(&x[i], &t1[i - 1], &t2[len - i - 1]); + } +} + +// exponentiation using square and multiply +// Warning!! Not constant time! +void +fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size) +{ + fp2_t acc; + digit_t bit; + + fp2_copy(&acc, x); + fp2_set_one(out); + + // Iterate over each word of exp + for (int j = 0; j < size; j++) { + // Iterate over each bit of the word + for (int i = 0; i < RADIX; i++) { + bit = (exp[j] >> i) & 1; + if (bit == 1) { + fp2_mul(out, out, &acc); + } + fp2_sqr(&acc, &acc); + } + } +} + +void +fp2_print(const char *name, const fp2_t *a) +{ + printf("%s0x", name); + + uint8_t buf[FP_ENCODED_BYTES]; + fp_encode(&buf, &a->re); // Encoding ensures canonical rep + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + + printf(" + i*0x"); + + fp_encode(&buf, &a->im); + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + printf("\n"); +} + +void +fp2_encode(void *dst, const fp2_t *a) +{ + uint8_t *buf = dst; + fp_encode(buf, &(a->re)); + fp_encode(buf + FP_ENCODED_BYTES, &(a->im)); +} + +uint32_t +fp2_decode(fp2_t *d, const void *src) +{ + const uint8_t *buf = src; + uint32_t re, im; + + re = fp_decode(&(d->re), buf); + im = fp_decode(&(d->im), buf + FP_ENCODED_BYTES); + return re & im; +} + +void +fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl) +{ + fp_select(&(d->re), &(a0->re), &(a1->re), ctl); + fp_select(&(d->im), &(a0->im), &(a1->im), ctl); +} + +void +fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl) +{ + fp_cswap(&(a->re), &(b->re), ctl); + fp_cswap(&(a->im), &(b->im), ctl); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp2.h new file mode 100644 index 0000000000..00e673b7ca --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp2.h @@ -0,0 +1,41 @@ +#ifndef FP2_H +#define FP2_H + +#include +#include "fp.h" +#include + +// Structure for representing elements in GF(p^2) +typedef struct fp2_t +{ + fp_t re, im; +} fp2_t; + +void fp2_set_small(fp2_t *x, const digit_t val); +void fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n); +void fp2_set_one(fp2_t *x); +void fp2_set_zero(fp2_t *x); +uint32_t fp2_is_zero(const fp2_t *a); +uint32_t fp2_is_equal(const fp2_t *a, const fp2_t *b); +uint32_t fp2_is_one(const fp2_t *a); +void fp2_copy(fp2_t *x, const fp2_t *y); +void fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_add_one(fp2_t *x, const fp2_t *y); +void fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_neg(fp2_t *x, const fp2_t *y); +void fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_sqr(fp2_t *x, const fp2_t *y); +void fp2_inv(fp2_t *x); +uint32_t fp2_is_square(const fp2_t *x); +void fp2_sqrt(fp2_t *x); +uint32_t fp2_sqrt_verify(fp2_t *a); +void fp2_half(fp2_t *x, const fp2_t *y); +void fp2_batched_inv(fp2_t *x, int len); +void fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size); +void fp2_print(const char *name, const fp2_t *a); +void fp2_encode(void *dst, const fp2_t *a); +uint32_t fp2_decode(fp2_t *d, const void *src); +void fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl); +void fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_constants.h new file mode 100644 index 0000000000..c770b78f58 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_constants.h @@ -0,0 +1,17 @@ +#if RADIX == 32 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 8 +#else +#define NWORDS_FIELD 9 +#endif +#define NWORDS_ORDER 8 +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 4 +#else +#define NWORDS_FIELD 5 +#endif +#define NWORDS_ORDER 4 +#endif +#define BITS 256 +#define LOG2P 8 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c new file mode 100644 index 0000000000..62e5491dc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c @@ -0,0 +1,945 @@ +// clang-format off +// Command line : python monty.py 32 +// 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +#ifdef RADIX_32 + +#include +#include + +#define sspint int32_t +#define spint uint32_t +#define udpint uint64_t +#define dpint uint64_t + +#define Wordlength 32 +#define Nlimbs 9 +#define Radix 29 +#define Nbits 251 +#define Nbytes 32 + +#define MONTGOMERY +// propagate carries +inline static spint prop(spint *n) { + int i; + spint mask = ((spint)1 << 29u) - (spint)1; + sspint carry = (sspint)n[0]; + carry >>= 29u; + n[0] &= mask; + for (i = 1; i < 8; i++) { + carry += (sspint)n[i]; + n[i] = (spint)carry & mask; + carry >>= 29u; + } + n[8] += (spint)carry; + return -((n[8] >> 1) >> 30u); +} + +// propagate carries and add p if negative, propagate carries again +inline static int flatten(spint *n) { + spint carry = prop(n); + n[0] -= (spint)1u & carry; + n[8] += ((spint)0x50000u) & carry; + (void)prop(n); + return (int)(carry & 1); +} + +// Montgomery final subtract +static int modfsb(spint *n) { + n[0] += (spint)1u; + n[8] -= (spint)0x50000u; + return flatten(n); +} + +// Modular addition - reduce less than 2p +static void modadd(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] + b[0]; + n[1] = a[1] + b[1]; + n[2] = a[2] + b[2]; + n[3] = a[3] + b[3]; + n[4] = a[4] + b[4]; + n[5] = a[5] + b[5]; + n[6] = a[6] + b[6]; + n[7] = a[7] + b[7]; + n[8] = a[8] + b[8]; + n[0] += (spint)2u; + n[8] -= (spint)0xa0000u; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[8] += ((spint)0xa0000u) & carry; + (void)prop(n); +} + +// Modular subtraction - reduce less than 2p +static void modsub(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] - b[0]; + n[1] = a[1] - b[1]; + n[2] = a[2] - b[2]; + n[3] = a[3] - b[3]; + n[4] = a[4] - b[4]; + n[5] = a[5] - b[5]; + n[6] = a[6] - b[6]; + n[7] = a[7] - b[7]; + n[8] = a[8] - b[8]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[8] += ((spint)0xa0000u) & carry; + (void)prop(n); +} + +// Modular negation +static void modneg(const spint *b, spint *n) { + spint carry; + n[0] = (spint)0 - b[0]; + n[1] = (spint)0 - b[1]; + n[2] = (spint)0 - b[2]; + n[3] = (spint)0 - b[3]; + n[4] = (spint)0 - b[4]; + n[5] = (spint)0 - b[5]; + n[6] = (spint)0 - b[6]; + n[7] = (spint)0 - b[7]; + n[8] = (spint)0 - b[8]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[8] += ((spint)0xa0000u) & carry; + (void)prop(n); +} + +// Overflow limit = 18446744073709551616 +// maximum possible = 2594249331921584137 +// Modular multiplication, c=a*b mod 2p +static void modmul(const spint *a, const spint *b, spint *c) { + dpint t = 0; + spint p8 = 0x50000u; + spint q = ((spint)1 << 29u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + t += (dpint)a[0] * b[0]; + spint v0 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[1]; + t += (dpint)a[1] * b[0]; + spint v1 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[2]; + t += (dpint)a[1] * b[1]; + t += (dpint)a[2] * b[0]; + spint v2 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[3]; + t += (dpint)a[1] * b[2]; + t += (dpint)a[2] * b[1]; + t += (dpint)a[3] * b[0]; + spint v3 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[4]; + t += (dpint)a[1] * b[3]; + t += (dpint)a[2] * b[2]; + t += (dpint)a[3] * b[1]; + t += (dpint)a[4] * b[0]; + spint v4 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[5]; + t += (dpint)a[1] * b[4]; + t += (dpint)a[2] * b[3]; + t += (dpint)a[3] * b[2]; + t += (dpint)a[4] * b[1]; + t += (dpint)a[5] * b[0]; + spint v5 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[6]; + t += (dpint)a[1] * b[5]; + t += (dpint)a[2] * b[4]; + t += (dpint)a[3] * b[3]; + t += (dpint)a[4] * b[2]; + t += (dpint)a[5] * b[1]; + t += (dpint)a[6] * b[0]; + spint v6 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[7]; + t += (dpint)a[1] * b[6]; + t += (dpint)a[2] * b[5]; + t += (dpint)a[3] * b[4]; + t += (dpint)a[4] * b[3]; + t += (dpint)a[5] * b[2]; + t += (dpint)a[6] * b[1]; + t += (dpint)a[7] * b[0]; + spint v7 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[8]; + t += (dpint)a[1] * b[7]; + t += (dpint)a[2] * b[6]; + t += (dpint)a[3] * b[5]; + t += (dpint)a[4] * b[4]; + t += (dpint)a[5] * b[3]; + t += (dpint)a[6] * b[2]; + t += (dpint)a[7] * b[1]; + t += (dpint)a[8] * b[0]; + t += (dpint)v0 * (dpint)p8; + spint v8 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[1] * b[8]; + t += (dpint)a[2] * b[7]; + t += (dpint)a[3] * b[6]; + t += (dpint)a[4] * b[5]; + t += (dpint)a[5] * b[4]; + t += (dpint)a[6] * b[3]; + t += (dpint)a[7] * b[2]; + t += (dpint)a[8] * b[1]; + t += (dpint)v1 * (dpint)p8; + c[0] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[2] * b[8]; + t += (dpint)a[3] * b[7]; + t += (dpint)a[4] * b[6]; + t += (dpint)a[5] * b[5]; + t += (dpint)a[6] * b[4]; + t += (dpint)a[7] * b[3]; + t += (dpint)a[8] * b[2]; + t += (dpint)v2 * (dpint)p8; + c[1] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[3] * b[8]; + t += (dpint)a[4] * b[7]; + t += (dpint)a[5] * b[6]; + t += (dpint)a[6] * b[5]; + t += (dpint)a[7] * b[4]; + t += (dpint)a[8] * b[3]; + t += (dpint)v3 * (dpint)p8; + c[2] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[4] * b[8]; + t += (dpint)a[5] * b[7]; + t += (dpint)a[6] * b[6]; + t += (dpint)a[7] * b[5]; + t += (dpint)a[8] * b[4]; + t += (dpint)v4 * (dpint)p8; + c[3] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[5] * b[8]; + t += (dpint)a[6] * b[7]; + t += (dpint)a[7] * b[6]; + t += (dpint)a[8] * b[5]; + t += (dpint)v5 * (dpint)p8; + c[4] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[6] * b[8]; + t += (dpint)a[7] * b[7]; + t += (dpint)a[8] * b[6]; + t += (dpint)v6 * (dpint)p8; + c[5] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[7] * b[8]; + t += (dpint)a[8] * b[7]; + t += (dpint)v7 * (dpint)p8; + c[6] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[8] * b[8]; + t += (dpint)v8 * (dpint)p8; + c[7] = ((spint)t & mask); + t >>= 29; + c[8] = (spint)t; +} + +// Modular squaring, c=a*a mod 2p +static void modsqr(const spint *a, spint *c) { + udpint tot; + udpint t = 0; + spint p8 = 0x50000u; + spint q = ((spint)1 << 29u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + tot = (udpint)a[0] * a[0]; + t = tot; + spint v0 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[1]; + tot *= 2; + t += tot; + spint v1 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[2]; + tot *= 2; + tot += (udpint)a[1] * a[1]; + t += tot; + spint v2 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[3]; + tot += (udpint)a[1] * a[2]; + tot *= 2; + t += tot; + spint v3 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[4]; + tot += (udpint)a[1] * a[3]; + tot *= 2; + tot += (udpint)a[2] * a[2]; + t += tot; + spint v4 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[5]; + tot += (udpint)a[1] * a[4]; + tot += (udpint)a[2] * a[3]; + tot *= 2; + t += tot; + spint v5 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[6]; + tot += (udpint)a[1] * a[5]; + tot += (udpint)a[2] * a[4]; + tot *= 2; + tot += (udpint)a[3] * a[3]; + t += tot; + spint v6 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[7]; + tot += (udpint)a[1] * a[6]; + tot += (udpint)a[2] * a[5]; + tot += (udpint)a[3] * a[4]; + tot *= 2; + t += tot; + spint v7 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[8]; + tot += (udpint)a[1] * a[7]; + tot += (udpint)a[2] * a[6]; + tot += (udpint)a[3] * a[5]; + tot *= 2; + tot += (udpint)a[4] * a[4]; + t += tot; + t += (udpint)v0 * p8; + spint v8 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[1] * a[8]; + tot += (udpint)a[2] * a[7]; + tot += (udpint)a[3] * a[6]; + tot += (udpint)a[4] * a[5]; + tot *= 2; + t += tot; + t += (udpint)v1 * p8; + c[0] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[2] * a[8]; + tot += (udpint)a[3] * a[7]; + tot += (udpint)a[4] * a[6]; + tot *= 2; + tot += (udpint)a[5] * a[5]; + t += tot; + t += (udpint)v2 * p8; + c[1] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[3] * a[8]; + tot += (udpint)a[4] * a[7]; + tot += (udpint)a[5] * a[6]; + tot *= 2; + t += tot; + t += (udpint)v3 * p8; + c[2] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[4] * a[8]; + tot += (udpint)a[5] * a[7]; + tot *= 2; + tot += (udpint)a[6] * a[6]; + t += tot; + t += (udpint)v4 * p8; + c[3] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[5] * a[8]; + tot += (udpint)a[6] * a[7]; + tot *= 2; + t += tot; + t += (udpint)v5 * p8; + c[4] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[6] * a[8]; + tot *= 2; + tot += (udpint)a[7] * a[7]; + t += tot; + t += (udpint)v6 * p8; + c[5] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[7] * a[8]; + tot *= 2; + t += tot; + t += (udpint)v7 * p8; + c[6] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[8] * a[8]; + t += tot; + t += (udpint)v8 * p8; + c[7] = ((spint)t & mask); + t >>= 29; + c[8] = (spint)t; +} + +// copy +static void modcpy(const spint *a, spint *c) { + int i; + for (i = 0; i < 9; i++) { + c[i] = a[i]; + } +} + +// square n times +static void modnsqr(spint *a, int n) { + int i; + for (i = 0; i < n; i++) { + modsqr(a, a); + } +} + +// Calculate progenitor +static void modpro(const spint *w, spint *z) { + spint x[9]; + spint t0[9]; + spint t1[9]; + spint t2[9]; + spint t3[9]; + spint t4[9]; + modcpy(w, x); + modsqr(x, z); + modmul(x, z, t0); + modsqr(t0, z); + modmul(x, z, z); + modsqr(z, t1); + modsqr(t1, t3); + modsqr(t3, t2); + modcpy(t2, t4); + modnsqr(t4, 3); + modmul(t2, t4, t2); + modcpy(t2, t4); + modnsqr(t4, 6); + modmul(t2, t4, t2); + modcpy(t2, t4); + modnsqr(t4, 2); + modmul(t3, t4, t3); + modnsqr(t3, 13); + modmul(t2, t3, t2); + modcpy(t2, t3); + modnsqr(t3, 27); + modmul(t2, t3, t2); + modmul(z, t2, z); + modcpy(z, t2); + modnsqr(t2, 4); + modmul(t1, t2, t1); + modmul(t0, t1, t0); + modmul(t1, t0, t1); + modmul(t0, t1, t0); + modmul(t1, t0, t2); + modmul(t0, t2, t0); + modmul(t1, t0, t1); + modnsqr(t1, 63); + modmul(t0, t1, t1); + modnsqr(t1, 64); + modmul(t0, t1, t0); + modnsqr(t0, 57); + modmul(z, t0, z); +} + +// calculate inverse, provide progenitor h if available +static void modinv(const spint *x, const spint *h, spint *z) { + spint s[9]; + spint t[9]; + if (h == NULL) { + modpro(x, t); + } else { + modcpy(h, t); + } + modcpy(x, s); + modnsqr(t, 2); + modmul(s, t, z); +} + +// Convert m to n-residue form, n=nres(m) +static void nres(const spint *m, spint *n) { + const spint c[9] = {0xcf5c28fu, 0x6666666u, 0x13333333u, + 0x19999999u, 0xcccccccu, 0x6666666u, + 0x13333333u, 0x19999999u, 0x1ccccu}; + modmul(m, c, n); +} + +// Convert n back to normal form, m=redc(n) +static void redc(const spint *n, spint *m) { + int i; + spint c[9]; + c[0] = 1; + for (i = 1; i < 9; i++) { + c[i] = 0; + } + modmul(n, c, m); + (void)modfsb(m); +} + +// is unity? +static int modis1(const spint *a) { + int i; + spint c[9]; + spint c0; + spint d = 0; + redc(a, c); + for (i = 1; i < 9; i++) { + d |= c[i]; + } + c0 = (spint)c[0]; + return ((spint)1 & ((d - (spint)1) >> 29u) & + (((c0 ^ (spint)1) - (spint)1) >> 29u)); +} + +// is zero? +static int modis0(const spint *a) { + int i; + spint c[9]; + spint d = 0; + redc(a, c); + for (i = 0; i < 9; i++) { + d |= c[i]; + } + return ((spint)1 & ((d - (spint)1) >> 29u)); +} + +// set to zero +static void modzer(spint *a) { + int i; + for (i = 0; i < 9; i++) { + a[i] = 0; + } +} + +// set to one +static void modone(spint *a) { + int i; + a[0] = 1; + for (i = 1; i < 9; i++) { + a[i] = 0; + } + nres(a, a); +} + +// set to integer +static void modint(int x, spint *a) { + int i; + a[0] = (spint)x; + for (i = 1; i < 9; i++) { + a[i] = 0; + } + nres(a, a); +} + +// Modular multiplication by an integer, c=a*b mod 2p +static void modmli(const spint *a, int b, spint *c) { + spint t[9]; + modint(b, t); + modmul(a, t, c); +} + +// Test for quadratic residue +static int modqr(const spint *h, const spint *x) { + spint r[9]; + if (h == NULL) { + modpro(x, r); + modsqr(r, r); + } else { + modsqr(h, r); + } + modmul(r, x, r); + return modis1(r) | modis0(x); +} + +// conditional move g to f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcmv(int b, const spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t; + spint r = 0x5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 9; i++) { + s = g[i]; + t = f[i]; + f[i] = c0 * t + c1 * s; + f[i] -= r * (t + s); + } +} + +// conditional swap g and f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcsw(int b, volatile spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t, w; + spint r = 0x5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 9; i++) { + s = g[i]; + t = f[i]; + w = r * (t + s); + f[i] = c0 * t + c1 * s; + f[i] -= w; + g[i] = c0 * s + c1 * t; + g[i] -= w; + } +} + +// Modular square root, provide progenitor h if available, NULL if not +static void modsqrt(const spint *x, const spint *h, spint *r) { + spint s[9]; + spint y[9]; + if (h == NULL) { + modpro(x, y); + } else { + modcpy(h, y); + } + modmul(y, x, s); + modcpy(s, r); +} + +// shift left by less than a word +static void modshl(unsigned int n, spint *a) { + int i; + a[8] = ((a[8] << n)) | (a[7] >> (29u - n)); + for (i = 7; i > 0; i--) { + a[i] = ((a[i] << n) & (spint)0x1fffffff) | (a[i - 1] >> (29u - n)); + } + a[0] = (a[0] << n) & (spint)0x1fffffff; +} + +// shift right by less than a word. Return shifted out part +static int modshr(unsigned int n, spint *a) { + int i; + spint r = a[0] & (((spint)1 << n) - (spint)1); + for (i = 0; i < 8; i++) { + a[i] = (a[i] >> n) | ((a[i + 1] << (29u - n)) & (spint)0x1fffffff); + } + a[8] = a[8] >> n; + return r; +} + +// set a= 2^r +static void mod2r(unsigned int r, spint *a) { + unsigned int n = r / 29u; + unsigned int m = r % 29u; + modzer(a); + if (r >= 32 * 8) + return; + a[n] = 1; + a[n] <<= m; + nres(a, a); +} + +// export to byte array +static void modexp(const spint *a, char *b) { + int i; + spint c[9]; + redc(a, c); + for (i = 31; i >= 0; i--) { + b[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +// import from byte array +// returns 1 if in range, else 0 +static int modimp(const char *b, spint *a) { + int i, res; + for (i = 0; i < 9; i++) { + a[i] = 0; + } + for (i = 0; i < 32; i++) { + modshl(8, a); + a[0] += (spint)(unsigned char)b[i]; + } + res = modfsb(a); + nres(a, a); + return res; +} + +// determine sign +static int modsign(const spint *a) { + spint c[9]; + redc(a, c); + return c[0] % 2; +} + +// return true if equal +static int modcmp(const spint *a, const spint *b) { + spint c[9], d[9]; + int i, eq = 1; + redc(a, c); + redc(b, d); + for (i = 0; i < 9; i++) { + eq &= (((c[i] ^ d[i]) - 1) >> 29) & 1; + } + return eq; +} + +// clang-format on +/****************************************************************************** + API functions calling generated code above + ******************************************************************************/ + +#include + +const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +const digit_t ONE[NWORDS_FIELD] = { 0x00000666, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00020000 }; +// Montgomery representation of 2^-1 +static const digit_t TWO_INV[NWORDS_FIELD] = { 0x00000333, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00010000 }; +// Montgomery representation of 3^-1 +static const digit_t THREE_INV[NWORDS_FIELD] = { + 0x15555777, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x00025555, +}; +// Montgomery representation of 2^256 +static const digit_t R2[NWORDS_FIELD] = { 0x0667ae14, 0x13333333, 0x19999999, 0x0ccccccc, 0x06666666, + 0x13333333, 0x19999999, 0x0ccccccc, 0x00026666 }; + +void +fp_set_small(fp_t *x, const digit_t val) +{ + modint((int)val, *x); +} + +void +fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) +{ + modmli(*a, (int)val, *x); +} + +void +fp_set_zero(fp_t *x) +{ + modzer(*x); +} + +void +fp_set_one(fp_t *x) +{ + modone(*x); +} + +uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return -(uint32_t)modcmp(*a, *b); +} + +uint32_t +fp_is_zero(const fp_t *a) +{ + return -(uint32_t)modis0(*a); +} + +void +fp_copy(fp_t *out, const fp_t *a) +{ + modcpy(*a, *out); +} + +void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + modcsw((int)(ctl & 0x1), *a, *b); +} + +void +fp_add(fp_t *out, const fp_t *a, const fp_t *b) +{ + modadd(*a, *b, *out); +} + +void +fp_sub(fp_t *out, const fp_t *a, const fp_t *b) +{ + modsub(*a, *b, *out); +} + +void +fp_neg(fp_t *out, const fp_t *a) +{ + modneg(*a, *out); +} + +void +fp_sqr(fp_t *out, const fp_t *a) +{ + modsqr(*a, *out); +} + +void +fp_mul(fp_t *out, const fp_t *a, const fp_t *b) +{ + modmul(*a, *b, *out); +} + +void +fp_inv(fp_t *x) +{ + modinv(*x, NULL, *x); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + return -(uint32_t)modqr(NULL, *a); +} + +void +fp_sqrt(fp_t *a) +{ + modsqrt(*a, NULL, *a); +} + +void +fp_half(fp_t *out, const fp_t *a) +{ + modmul(TWO_INV, *a, *out); +} + +void +fp_exp3div4(fp_t *out, const fp_t *a) +{ + modpro(*a, *out); +} + +void +fp_div3(fp_t *out, const fp_t *a) +{ + modmul(THREE_INV, *a, *out); +} + +void +fp_encode(void *dst, const fp_t *a) +{ + // Modified version of modexp() + int i; + spint c[9]; + redc(*a, c); + for (i = 0; i < 32; i++) { + ((char *)dst)[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +uint32_t +fp_decode(fp_t *d, const void *src) +{ + // Modified version of modimp() + int i; + spint res; + const unsigned char *b = src; + for (i = 0; i < 9; i++) { + (*d)[i] = 0; + } + for (i = 31; i >= 0; i--) { + modshl(8, *d); + (*d)[0] += (spint)b[i]; + } + res = (spint)-modfsb(*d); + nres(*d, *d); + // If the value was canonical then res = -1; otherwise, res = 0 + for (i = 0; i < 9; i++) { + (*d)[i] &= res; + } + return (uint32_t)res; +} + +static inline unsigned char +add_carry(unsigned char cc, spint a, spint b, spint *d) +{ + udpint t = (udpint)a + (udpint)b + cc; + *d = (spint)t; + return (unsigned char)(t >> Wordlength); +} + +static void +partial_reduce(spint *out, const spint *src) +{ + spint h, l, quo, rem; + unsigned char cc; + + // Split value in high (8 bits) and low (248 bits) parts. + h = src[7] >> 24; + l = src[7] & 0x00FFFFFF; + + // 5*2^248 = 1 mod q; hence, we add floor(h/5) + (h mod 5)*2^248 + // to the low part. + quo = (h * 0xCD) >> 10; + rem = h - (5 * quo); + cc = add_carry(0, src[0], quo, &out[0]); + cc = add_carry(cc, src[1], 0, &out[1]); + cc = add_carry(cc, src[2], 0, &out[2]); + cc = add_carry(cc, src[3], 0, &out[3]); + cc = add_carry(cc, src[4], 0, &out[4]); + cc = add_carry(cc, src[5], 0, &out[5]); + cc = add_carry(cc, src[6], 0, &out[6]); + (void)add_carry(cc, l, rem << 24, &out[7]); +} + +// Little-endian encoding of a 32-bit integer. +static inline void +enc32le(void *dst, uint32_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); +} + +// Little-endian decoding of a 32-bit integer. +static inline uint32_t +dec32le(const void *src) +{ + const uint8_t *buf = src; + return (spint)buf[0] | ((spint)buf[1] << 8) | ((spint)buf[2] << 16) | ((spint)buf[3] << 24); +} + +void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + uint32_t t[8]; // Stores Nbytes * 8 bits + uint8_t tmp[32]; // Nbytes + const uint8_t *b = src; + + fp_set_zero(d); + if (len == 0) { + return; + } + + size_t rem = len % 32; + if (rem != 0) { + // Input size is not a multiple of 32, we decode a partial + // block, which is already less than 2^248. + size_t k = len - rem; + memcpy(tmp, b + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + fp_decode(d, tmp); + len = k; + } + // Process all remaining blocks, in descending address order. + while (len > 0) { + fp_mul(d, d, &R2); + len -= 32; + t[0] = dec32le(b + len); + t[1] = dec32le(b + len + 4); + t[2] = dec32le(b + len + 8); + t[3] = dec32le(b + len + 12); + t[4] = dec32le(b + len + 16); + t[5] = dec32le(b + len + 20); + t[6] = dec32le(b + len + 24); + t[7] = dec32le(b + len + 28); + partial_reduce(t, t); + enc32le(tmp, t[0]); + enc32le(tmp + 4, t[1]); + enc32le(tmp + 8, t[2]); + enc32le(tmp + 12, t[3]); + enc32le(tmp + 16, t[4]); + enc32le(tmp + 20, t[5]); + enc32le(tmp + 24, t[6]); + enc32le(tmp + 28, t[7]); + fp_t a; + fp_decode(&a, tmp); + fp_add(d, d, &a); + } +} + +#endif /* RADIX_32 */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c new file mode 100644 index 0000000000..57c2131b60 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c @@ -0,0 +1,794 @@ +// clang-format off +// Command line : python monty.py 64 +// 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +#ifdef RADIX_64 + +#include +#include + +#define sspint int64_t +#define spint uint64_t +#define udpint __uint128_t +#define dpint __uint128_t + +#define Wordlength 64 +#define Nlimbs 5 +#define Radix 51 +#define Nbits 251 +#define Nbytes 32 + +#define MONTGOMERY +// propagate carries +inline static spint prop(spint *n) { + int i; + spint mask = ((spint)1 << 51u) - (spint)1; + sspint carry = (sspint)n[0]; + carry >>= 51u; + n[0] &= mask; + for (i = 1; i < 4; i++) { + carry += (sspint)n[i]; + n[i] = (spint)carry & mask; + carry >>= 51u; + } + n[4] += (spint)carry; + return -((n[4] >> 1) >> 62u); +} + +// propagate carries and add p if negative, propagate carries again +inline static int flatten(spint *n) { + spint carry = prop(n); + n[0] -= (spint)1u & carry; + n[4] += ((spint)0x500000000000u) & carry; + (void)prop(n); + return (int)(carry & 1); +} + +// Montgomery final subtract +inline static int modfsb(spint *n) { + n[0] += (spint)1u; + n[4] -= (spint)0x500000000000u; + return flatten(n); +} + +// Modular addition - reduce less than 2p +inline static void modadd(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] + b[0]; + n[1] = a[1] + b[1]; + n[2] = a[2] + b[2]; + n[3] = a[3] + b[3]; + n[4] = a[4] + b[4]; + n[0] += (spint)2u; + n[4] -= (spint)0xa00000000000u; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[4] += ((spint)0xa00000000000u) & carry; + (void)prop(n); +} + +// Modular subtraction - reduce less than 2p +inline static void modsub(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] - b[0]; + n[1] = a[1] - b[1]; + n[2] = a[2] - b[2]; + n[3] = a[3] - b[3]; + n[4] = a[4] - b[4]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[4] += ((spint)0xa00000000000u) & carry; + (void)prop(n); +} + +// Modular negation +inline static void modneg(const spint *b, spint *n) { + spint carry; + n[0] = (spint)0 - b[0]; + n[1] = (spint)0 - b[1]; + n[2] = (spint)0 - b[2]; + n[3] = (spint)0 - b[3]; + n[4] = (spint)0 - b[4]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[4] += ((spint)0xa00000000000u) & carry; + (void)prop(n); +} + +// Overflow limit = 340282366920938463463374607431768211456 +// maximum possible = 25551082561965953719787503747077 +// Modular multiplication, c=a*b mod 2p +inline static void modmul(const spint *a, const spint *b, spint *c) { + dpint t = 0; + spint p4 = 0x500000000000u; + spint q = ((spint)1 << 51u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + t += (dpint)a[0] * b[0]; + spint v0 = ((spint)t & mask); + t >>= 51; + t += (dpint)a[0] * b[1]; + t += (dpint)a[1] * b[0]; + spint v1 = ((spint)t & mask); + t >>= 51; + t += (dpint)a[0] * b[2]; + t += (dpint)a[1] * b[1]; + t += (dpint)a[2] * b[0]; + spint v2 = ((spint)t & mask); + t >>= 51; + t += (dpint)a[0] * b[3]; + t += (dpint)a[1] * b[2]; + t += (dpint)a[2] * b[1]; + t += (dpint)a[3] * b[0]; + spint v3 = ((spint)t & mask); + t >>= 51; + t += (dpint)a[0] * b[4]; + t += (dpint)a[1] * b[3]; + t += (dpint)a[2] * b[2]; + t += (dpint)a[3] * b[1]; + t += (dpint)a[4] * b[0]; + t += (dpint)v0 * (dpint)p4; + spint v4 = ((spint)t & mask); + t >>= 51; + t += (dpint)a[1] * b[4]; + t += (dpint)a[2] * b[3]; + t += (dpint)a[3] * b[2]; + t += (dpint)a[4] * b[1]; + t += (dpint)v1 * (dpint)p4; + c[0] = ((spint)t & mask); + t >>= 51; + t += (dpint)a[2] * b[4]; + t += (dpint)a[3] * b[3]; + t += (dpint)a[4] * b[2]; + t += (dpint)v2 * (dpint)p4; + c[1] = ((spint)t & mask); + t >>= 51; + t += (dpint)a[3] * b[4]; + t += (dpint)a[4] * b[3]; + t += (dpint)v3 * (dpint)p4; + c[2] = ((spint)t & mask); + t >>= 51; + t += (dpint)a[4] * b[4]; + t += (dpint)v4 * (dpint)p4; + c[3] = ((spint)t & mask); + t >>= 51; + c[4] = (spint)t; +} + +// Modular squaring, c=a*a mod 2p +inline static void modsqr(const spint *a, spint *c) { + udpint tot; + udpint t = 0; + spint p4 = 0x500000000000u; + spint q = ((spint)1 << 51u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + tot = (udpint)a[0] * a[0]; + t = tot; + spint v0 = ((spint)t & mask); + t >>= 51; + tot = (udpint)a[0] * a[1]; + tot *= 2; + t += tot; + spint v1 = ((spint)t & mask); + t >>= 51; + tot = (udpint)a[0] * a[2]; + tot *= 2; + tot += (udpint)a[1] * a[1]; + t += tot; + spint v2 = ((spint)t & mask); + t >>= 51; + tot = (udpint)a[0] * a[3]; + tot += (udpint)a[1] * a[2]; + tot *= 2; + t += tot; + spint v3 = ((spint)t & mask); + t >>= 51; + tot = (udpint)a[0] * a[4]; + tot += (udpint)a[1] * a[3]; + tot *= 2; + tot += (udpint)a[2] * a[2]; + t += tot; + t += (udpint)v0 * p4; + spint v4 = ((spint)t & mask); + t >>= 51; + tot = (udpint)a[1] * a[4]; + tot += (udpint)a[2] * a[3]; + tot *= 2; + t += tot; + t += (udpint)v1 * p4; + c[0] = ((spint)t & mask); + t >>= 51; + tot = (udpint)a[2] * a[4]; + tot *= 2; + tot += (udpint)a[3] * a[3]; + t += tot; + t += (udpint)v2 * p4; + c[1] = ((spint)t & mask); + t >>= 51; + tot = (udpint)a[3] * a[4]; + tot *= 2; + t += tot; + t += (udpint)v3 * p4; + c[2] = ((spint)t & mask); + t >>= 51; + tot = (udpint)a[4] * a[4]; + t += tot; + t += (udpint)v4 * p4; + c[3] = ((spint)t & mask); + t >>= 51; + c[4] = (spint)t; +} + +// copy +inline static void modcpy(const spint *a, spint *c) { + int i; + for (i = 0; i < 5; i++) { + c[i] = a[i]; + } +} + +// square n times +static void modnsqr(spint *a, int n) { + int i; + for (i = 0; i < n; i++) { + modsqr(a, a); + } +} + +// Calculate progenitor +static void modpro(const spint *w, spint *z) { + spint x[5]; + spint t0[5]; + spint t1[5]; + spint t2[5]; + spint t3[5]; + spint t4[5]; + modcpy(w, x); + modsqr(x, z); + modmul(x, z, t0); + modsqr(t0, z); + modmul(x, z, z); + modsqr(z, t1); + modsqr(t1, t3); + modsqr(t3, t2); + modcpy(t2, t4); + modnsqr(t4, 3); + modmul(t2, t4, t2); + modcpy(t2, t4); + modnsqr(t4, 6); + modmul(t2, t4, t2); + modcpy(t2, t4); + modnsqr(t4, 2); + modmul(t3, t4, t3); + modnsqr(t3, 13); + modmul(t2, t3, t2); + modcpy(t2, t3); + modnsqr(t3, 27); + modmul(t2, t3, t2); + modmul(z, t2, z); + modcpy(z, t2); + modnsqr(t2, 4); + modmul(t1, t2, t1); + modmul(t0, t1, t0); + modmul(t1, t0, t1); + modmul(t0, t1, t0); + modmul(t1, t0, t2); + modmul(t0, t2, t0); + modmul(t1, t0, t1); + modnsqr(t1, 63); + modmul(t0, t1, t1); + modnsqr(t1, 64); + modmul(t0, t1, t0); + modnsqr(t0, 57); + modmul(z, t0, z); +} + +// calculate inverse, provide progenitor h if available +static void modinv(const spint *x, const spint *h, spint *z) { + spint s[5]; + spint t[5]; + if (h == NULL) { + modpro(x, t); + } else { + modcpy(h, t); + } + modcpy(x, s); + modnsqr(t, 2); + modmul(s, t, z); +} + +// Convert m to n-residue form, n=nres(m) +static void nres(const spint *m, spint *n) { + const spint c[5] = {0x4cccccccccf5cu, 0x1999999999999u, 0x3333333333333u, + 0x6666666666666u, 0xcccccccccccu}; + modmul(m, c, n); +} + +// Convert n back to normal form, m=redc(n) +static void redc(const spint *n, spint *m) { + int i; + spint c[5]; + c[0] = 1; + for (i = 1; i < 5; i++) { + c[i] = 0; + } + modmul(n, c, m); + (void)modfsb(m); +} + +// is unity? +static int modis1(const spint *a) { + int i; + spint c[5]; + spint c0; + spint d = 0; + redc(a, c); + for (i = 1; i < 5; i++) { + d |= c[i]; + } + c0 = (spint)c[0]; + return ((spint)1 & ((d - (spint)1) >> 51u) & + (((c0 ^ (spint)1) - (spint)1) >> 51u)); +} + +// is zero? +static int modis0(const spint *a) { + int i; + spint c[5]; + spint d = 0; + redc(a, c); + for (i = 0; i < 5; i++) { + d |= c[i]; + } + return ((spint)1 & ((d - (spint)1) >> 51u)); +} + +// set to zero +static void modzer(spint *a) { + int i; + for (i = 0; i < 5; i++) { + a[i] = 0; + } +} + +// set to one +static void modone(spint *a) { + int i; + a[0] = 1; + for (i = 1; i < 5; i++) { + a[i] = 0; + } + nres(a, a); +} + +// set to integer +static void modint(int x, spint *a) { + int i; + a[0] = (spint)x; + for (i = 1; i < 5; i++) { + a[i] = 0; + } + nres(a, a); +} + +// Modular multiplication by an integer, c=a*b mod 2p +inline static void modmli(const spint *a, int b, spint *c) { + spint t[5]; + modint(b, t); + modmul(a, t, c); +} + +// Test for quadratic residue +static int modqr(const spint *h, const spint *x) { + spint r[5]; + if (h == NULL) { + modpro(x, r); + modsqr(r, r); + } else { + modsqr(h, r); + } + modmul(r, x, r); + return modis1(r) | modis0(x); +} + +// conditional move g to f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcmv(int b, const spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t; + spint r = 0x3cc3c33c5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 5; i++) { + s = g[i]; + t = f[i]; + f[i] = c0 * t + c1 * s; + f[i] -= r * (t + s); + } +} + +// conditional swap g and f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcsw(int b, volatile spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t, w; + spint r = 0x3cc3c33c5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 5; i++) { + s = g[i]; + t = f[i]; + w = r * (t + s); + f[i] = c0 * t + c1 * s; + f[i] -= w; + g[i] = c0 * s + c1 * t; + g[i] -= w; + } +} + +// Modular square root, provide progenitor h if available, NULL if not +static void modsqrt(const spint *x, const spint *h, spint *r) { + spint s[5]; + spint y[5]; + if (h == NULL) { + modpro(x, y); + } else { + modcpy(h, y); + } + modmul(y, x, s); + modcpy(s, r); +} + +// shift left by less than a word +static void modshl(unsigned int n, spint *a) { + int i; + a[4] = ((a[4] << n)) | (a[3] >> (51u - n)); + for (i = 3; i > 0; i--) { + a[i] = ((a[i] << n) & (spint)0x7ffffffffffff) | (a[i - 1] >> (51u - n)); + } + a[0] = (a[0] << n) & (spint)0x7ffffffffffff; +} + +// shift right by less than a word. Return shifted out part +static int modshr(unsigned int n, spint *a) { + int i; + spint r = a[0] & (((spint)1 << n) - (spint)1); + for (i = 0; i < 4; i++) { + a[i] = (a[i] >> n) | ((a[i + 1] << (51u - n)) & (spint)0x7ffffffffffff); + } + a[4] = a[4] >> n; + return r; +} + +// set a= 2^r +static void mod2r(unsigned int r, spint *a) { + unsigned int n = r / 51u; + unsigned int m = r % 51u; + modzer(a); + if (r >= 32 * 8) + return; + a[n] = 1; + a[n] <<= m; + nres(a, a); +} + +// export to byte array +static void modexp(const spint *a, char *b) { + int i; + spint c[5]; + redc(a, c); + for (i = 31; i >= 0; i--) { + b[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +// import from byte array +// returns 1 if in range, else 0 +static int modimp(const char *b, spint *a) { + int i, res; + for (i = 0; i < 5; i++) { + a[i] = 0; + } + for (i = 0; i < 32; i++) { + modshl(8, a); + a[0] += (spint)(unsigned char)b[i]; + } + res = modfsb(a); + nres(a, a); + return res; +} + +// determine sign +static int modsign(const spint *a) { + spint c[5]; + redc(a, c); + return c[0] % 2; +} + +// return true if equal +static int modcmp(const spint *a, const spint *b) { + spint c[5], d[5]; + int i, eq = 1; + redc(a, c); + redc(b, d); + for (i = 0; i < 5; i++) { + eq &= (((c[i] ^ d[i]) - 1) >> 51) & 1; + } + return eq; +} + +// clang-format on +/****************************************************************************** + API functions calling generated code above + ******************************************************************************/ + +#include + +const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0 }; +const digit_t ONE[NWORDS_FIELD] = { 0x0000000000000019, + 0x0000000000000000, + 0x0000000000000000, + 0x0000000000000000, + 0x0000300000000000 }; +// Montgomery representation of 2^-1 +static const digit_t TWO_INV[NWORDS_FIELD] = { 0x000000000000000c, + 0x0000000000000000, + 0x0000000000000000, + 0x0000000000000000, + 0x0000400000000000 }; +// Montgomery representation of 3^-1 +static const digit_t THREE_INV[NWORDS_FIELD] = { 0x000555555555555d, + 0x0002aaaaaaaaaaaa, + 0x0005555555555555, + 0x0002aaaaaaaaaaaa, + 0x0000455555555555 }; +// Montgomery representation of 2^256 +static const digit_t R2[NWORDS_FIELD] = { 0x0001999999999eb8, + 0x0003333333333333, + 0x0006666666666666, + 0x0004cccccccccccc, + 0x0000199999999999 }; + +void +fp_set_small(fp_t *x, const digit_t val) +{ + modint((int)val, *x); +} + +void +fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) +{ + modmli(*a, (int)val, *x); +} + +void +fp_set_zero(fp_t *x) +{ + modzer(*x); +} + +void +fp_set_one(fp_t *x) +{ + modone(*x); +} + +uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return -(uint32_t)modcmp(*a, *b); +} + +uint32_t +fp_is_zero(const fp_t *a) +{ + return -(uint32_t)modis0(*a); +} + +void +fp_copy(fp_t *out, const fp_t *a) +{ + modcpy(*a, *out); +} + +void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + modcsw((int)(ctl & 0x1), *a, *b); +} + +void +fp_add(fp_t *out, const fp_t *a, const fp_t *b) +{ + modadd(*a, *b, *out); +} + +void +fp_sub(fp_t *out, const fp_t *a, const fp_t *b) +{ + modsub(*a, *b, *out); +} + +void +fp_neg(fp_t *out, const fp_t *a) +{ + modneg(*a, *out); +} + +void +fp_sqr(fp_t *out, const fp_t *a) +{ + modsqr(*a, *out); +} + +void +fp_mul(fp_t *out, const fp_t *a, const fp_t *b) +{ + modmul(*a, *b, *out); +} + +void +fp_inv(fp_t *x) +{ + modinv(*x, NULL, *x); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + return -(uint32_t)modqr(NULL, *a); +} + +void +fp_sqrt(fp_t *a) +{ + modsqrt(*a, NULL, *a); +} + +void +fp_half(fp_t *out, const fp_t *a) +{ + modmul(TWO_INV, *a, *out); +} + +void +fp_exp3div4(fp_t *out, const fp_t *a) +{ + modpro(*a, *out); +} + +void +fp_div3(fp_t *out, const fp_t *a) +{ + modmul(THREE_INV, *a, *out); +} + +void +fp_encode(void *dst, const fp_t *a) +{ + // Modified version of modexp() + int i; + spint c[5]; + redc(*a, c); + for (i = 0; i < 32; i++) { + ((char *)dst)[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +uint32_t +fp_decode(fp_t *d, const void *src) +{ + // Modified version of modimp() + int i; + spint res; + const unsigned char *b = src; + for (i = 0; i < 5; i++) { + (*d)[i] = 0; + } + for (i = 31; i >= 0; i--) { + modshl(8, *d); + (*d)[0] += (spint)b[i]; + } + res = (spint)-modfsb(*d); + nres(*d, *d); + // If the value was canonical then res = -1; otherwise, res = 0 + for (i = 0; i < 5; i++) { + (*d)[i] &= res; + } + return (uint32_t)res; +} + +static inline unsigned char +add_carry(unsigned char cc, spint a, spint b, spint *d) +{ + udpint t = (udpint)a + (udpint)b + cc; + *d = (spint)t; + return (unsigned char)(t >> Wordlength); +} + +static void +partial_reduce(spint *out, const spint *src) +{ + spint h, l, quo, rem; + unsigned char cc; + + // Split value in high (8 bits) and low (248 bits) parts. + h = src[3] >> 56; + l = src[3] & 0x00FFFFFFFFFFFFFF; + + // 5*2^248 = 1 mod q; hence, we add floor(h/5) + (h mod 5)*2^248 + // to the low part. + quo = (h * 0xCD) >> 10; + rem = h - (5 * quo); + cc = add_carry(0, src[0], quo, &out[0]); + cc = add_carry(cc, src[1], 0, &out[1]); + cc = add_carry(cc, src[2], 0, &out[2]); + (void)add_carry(cc, l, rem << 56, &out[3]); +} + +// Little-endian encoding of a 64-bit integer. +static inline void +enc64le(void *dst, uint64_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); + buf[4] = (uint8_t)(x >> 32); + buf[5] = (uint8_t)(x >> 40); + buf[6] = (uint8_t)(x >> 48); + buf[7] = (uint8_t)(x >> 56); +} + +// Little-endian decoding of a 64-bit integer. +static inline uint64_t +dec64le(const void *src) +{ + const uint8_t *buf = src; + return (spint)buf[0] | ((spint)buf[1] << 8) | ((spint)buf[2] << 16) | ((spint)buf[3] << 24) | + ((spint)buf[4] << 32) | ((spint)buf[5] << 40) | ((spint)buf[6] << 48) | ((spint)buf[7] << 56); +} + +void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + uint64_t t[4]; // Stores Nbytes * 8 bits + uint8_t tmp[32]; // Nbytes + const uint8_t *b = src; + + fp_set_zero(d); + if (len == 0) { + return; + } + + size_t rem = len % 32; + if (rem != 0) { + // Input size is not a multiple of 32, we decode a partial + // block, which is already less than 2^248. + size_t k = len - rem; + memcpy(tmp, b + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + fp_decode(d, tmp); + len = k; + } + // Process all remaining blocks, in descending address order. + while (len > 0) { + fp_mul(d, d, &R2); + len -= 32; + t[0] = dec64le(b + len); + t[1] = dec64le(b + len + 8); + t[2] = dec64le(b + len + 16); + t[3] = dec64le(b + len + 24); + partial_reduce(t, t); + enc64le(tmp, t[0]); + enc64le(tmp + 8, t[1]); + enc64le(tmp + 16, t[2]); + enc64le(tmp + 24, t[3]); + fp_t a; + fp_decode(&a, tmp); + fp_add(d, d, &a); + } +} + +#endif /* RADIX_64 */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.c new file mode 100644 index 0000000000..0424108019 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.c @@ -0,0 +1,93 @@ +#include +#include + +void +double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2) +{ + ec_dbl(&out->P1, &in->P1, &E1E2->E1); + ec_dbl(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + memmove(out, in, sizeof(theta_couple_point_t)); + } else { + double_couple_point(out, in, E1E2); + for (unsigned i = 0; i < n - 1; i++) { + double_couple_point(out, out, E1E2); + } + } +} + +void +add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2) +{ + ADD(&out->P1, &T1->P1, &T2->P1, &E1E2->E1); + ADD(&out->P2, &T1->P2, &T2->P2, &E1E2->E2); +} + +void +double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + DBL(&out->P1, &in->P1, &E1E2->E1); + DBL(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + *out = *in; + } else if (n == 1) { + double_couple_jac_point(out, in, E1E2); + } else { + fp2_t a1, a2, t1, t2; + + jac_to_ws(&out->P1, &t1, &a1, &in->P1, &E1E2->E1); + jac_to_ws(&out->P2, &t2, &a2, &in->P2, &E1E2->E2); + + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + for (unsigned i = 0; i < n - 1; i++) { + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + } + + jac_from_ws(&out->P1, &out->P1, &a1, &E1E2->E1); + jac_from_ws(&out->P2, &out->P2, &a2, &E1E2->E2); + } +} + +void +couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP) +{ + jac_to_xz(&P->P1, &xyP->P1); + jac_to_xz(&P->P2, &xyP->P2); +} + +void +copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2) +{ + // Copy the basis on E1 to (P, _) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P1, &B1->P); + copy_point(&ker->T2.P1, &B1->Q); + copy_point(&ker->T1m2.P1, &B1->PmQ); + + // Copy the basis on E2 to (_, P) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P2, &B2->P); + copy_point(&ker->T2.P2, &B2->Q); + copy_point(&ker->T1m2.P2, &B2->PmQ); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.h new file mode 100644 index 0000000000..2b16e23834 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.h @@ -0,0 +1,435 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The HD-isogenies algorithm required by the signature + * + */ + +#ifndef HD_H +#define HD_H + +#include +#include +#include + +/** @defgroup hd_module Abelian surfaces and their isogenies + * @{ + */ + +#define HD_extra_torsion 2 + +/** @defgroup hd_struct Data structures for dimension 2 + * @{ + */ + +/** @brief Type for couple point with XZ coordinates + * @typedef theta_couple_point_t + * + * @struct theta_couple_point + * + * Structure for the couple point on an elliptic product + * using XZ coordinates + */ +typedef struct theta_couple_point +{ + ec_point_t P1; + ec_point_t P2; +} theta_couple_point_t; + +/** @brief Type for three couple points T1, T2, T1-T2 with XZ coordinates + * @typedef theta_kernel_couple_points_t + * + * @struct theta_kernel_couple_points + * + * Structure for a triple of theta couple points T1, T2 and T1 - T2 + */ +typedef struct theta_kernel_couple_points +{ + theta_couple_point_t T1; + theta_couple_point_t T2; + theta_couple_point_t T1m2; +} theta_kernel_couple_points_t; + +/** @brief Type for couple point with XYZ coordinates + * @typedef theta_couple_jac_point_t + * + * @struct theta_couple_jac_point + * + * Structure for the couple point on an elliptic product + * using XYZ coordinates + */ +typedef struct theta_couple_jac_point +{ + jac_point_t P1; + jac_point_t P2; +} theta_couple_jac_point_t; + +/** @brief Type for couple curve * + * @typedef theta_couple_curve_t + * + * @struct theta_couple_curve + * + * the theta_couple_curve structure + */ +typedef struct theta_couple_curve +{ + ec_curve_t E1; + ec_curve_t E2; +} theta_couple_curve_t; + +/** @brief Type for a product E1 x E2 with corresponding bases + * @typedef theta_couple_curve_with_basis_t + * + * @struct theta_couple_curve_with_basis + * + * tType for a product E1 x E2 with corresponding bases Ei[2^n] + */ +typedef struct theta_couple_curve_with_basis +{ + ec_curve_t E1; + ec_curve_t E2; + ec_basis_t B1; + ec_basis_t B2; +} theta_couple_curve_with_basis_t; + +/** @brief Type for theta point * + * @typedef theta_point_t + * + * @struct theta_point + * + * the theta_point structure used + */ +typedef struct theta_point +{ + fp2_t x; + fp2_t y; + fp2_t z; + fp2_t t; +} theta_point_t; + +/** @brief Type for theta point with repeating components + * @typedef theta_point_compact_t + * + * @struct theta_point_compact + * + * the theta_point structure used for points with repeated components + */ +typedef struct theta_point_compact +{ + fp2_t x; + fp2_t y; +} theta_point_compact_t; + +/** @brief Type for theta structure * + * @typedef theta_structure_t + * + * @struct theta_structure + * + * the theta_structure structure used + */ +typedef struct theta_structure +{ + theta_point_t null_point; + bool precomputation; + + // Eight precomputed values used for doubling and + // (2,2)-isogenies. + fp2_t XYZ0; + fp2_t YZT0; + fp2_t XZT0; + fp2_t XYT0; + + fp2_t xyz0; + fp2_t yzt0; + fp2_t xzt0; + fp2_t xyt0; +} theta_structure_t; + +/** @brief A 2x2 matrix used for action by translation + * @typedef translation_matrix_t + * + * @struct translation_matrix + * + * Structure to hold 4 fp2_t elements representing a 2x2 matrix used when computing + * a compatible theta structure during gluing. + */ +typedef struct translation_matrix +{ + fp2_t g00; + fp2_t g01; + fp2_t g10; + fp2_t g11; +} translation_matrix_t; + +/** @brief A 4x4 matrix used for basis changes + * @typedef basis_change_matrix_t + * + * @struct basis_change_matrix + * + * Structure to hold 16 elements representing a 4x4 matrix used for changing + * the basis of a theta point. + */ +typedef struct basis_change_matrix +{ + fp2_t m[4][4]; +} basis_change_matrix_t; + +/** @brief Type for gluing (2,2) theta isogeny * + * @typedef theta_gluing_t + * + * @struct theta_gluing + * + * the theta_gluing structure + */ +typedef struct theta_gluing +{ + + theta_couple_curve_t domain; + theta_couple_jac_point_t xyK1_8; + theta_point_compact_t imageK1_8; + basis_change_matrix_t M; + theta_point_t precomputation; + theta_point_t codomain; + +} theta_gluing_t; + +/** @brief Type for standard (2,2) theta isogeny * + * @typedef theta_isogeny_t + * + * @struct theta_isogeny + * + * the theta_isogeny structure + */ +typedef struct theta_isogeny +{ + theta_point_t T1_8; + theta_point_t T2_8; + bool hadamard_bool_1; + bool hadamard_bool_2; + theta_structure_t domain; + theta_point_t precomputation; + theta_structure_t codomain; +} theta_isogeny_t; + +/** @brief Type for splitting isomorphism * + * @typedef theta_splitting_t + * + * @struct theta_splitting + * + * the theta_splitting structure + */ +typedef struct theta_splitting +{ + basis_change_matrix_t M; + theta_structure_t B; + +} theta_splitting_t; + +// end of hd_struct +/** + * @} + */ + +/** @defgroup hd_functions Functions for dimension 2 + * @{ + */ + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param n : the number of iteration + * @param E1E2 an elliptic product + * @param in the theta couple point in the elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the addition of two points in (X : Y : Z) coordinates on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param T1 the theta couple jac point in the elliptic product + * @param T2 the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1, P2), (Q1, Q2) + * out = (P1 + Q1, P2 + Q2) + * + **/ +void add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple jac point in on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param n : the number of iteration + * @param in the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief A forgetful function which returns (X : Z) points given a pair of (X : Y : Z) points + * + * @param P Output: the theta_couple_point + * @param xyP : the theta_couple_jac_point + **/ +void couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it does extra isotropy + * checks on the kernel. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it selects a random Montgomery + * model of the codomain. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success, 0 on failure + * + */ +int theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Given a bases B1 on E1 and B2 on E2 copies this to create a kernel + * on E1 x E2 as couple points T1, T2 and T1 - T2 + * + * @param ker Output: a kernel for dim_two_isogenies (T1, T2, T1-T2) + * @param B1 Input basis on E1 + * @param B2 Input basis on E2 + **/ +void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2); + +/** + * @brief Given a couple of points (P1, P2) on a couple of curves (E1, E2) + * this function tests if both points are of order exactly 2^t + * + * @param T: couple point (P1, P2) + * @param E: a couple of curves (E1, E2) + * @param t: an integer + * @returns 0xFFFFFFFF on success, 0 on failure + */ +static int +test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) +{ + int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); + int check_P2 = test_point_order_twof(&T->P2, &E->E2, t); + + return check_P1 & check_P2; +} + +// end of hd_functions +/** + * @} + */ +// end of hd_module +/** + * @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c new file mode 100644 index 0000000000..6332d21f8e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c @@ -0,0 +1,143 @@ +#include + +#define FP2_ZERO 0 +#define FP2_ONE 1 +#define FP2_I 2 +#define FP2_MINUS_ONE 3 +#define FP2_MINUS_I 4 + +const int EVEN_INDEX[10][2] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 0}, {1, 2}, {2, 0}, {2, 1}, {3, 0}, {3, 3}}; +const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}; +const fp2_t FP2_CONSTANTS[5] = {{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x33, 0x0, 0x0, 0x100000000000000} +#else +{0x19, 0x0, 0x0, 0x0, 0x300000000000} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7} +#elif RADIX == 32 +{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +#else +{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7} +#elif RADIX == 32 +{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +#else +{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff} +#endif +#endif +}}; +const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10] = {{{{FP2_ONE, FP2_I, FP2_ONE, FP2_I}, {FP2_ONE, FP2_MINUS_I, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_MINUS_ONE, FP2_MINUS_I}, {FP2_MINUS_ONE, FP2_I, FP2_MINUS_ONE, FP2_I}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}}; +const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6] = {{{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}, {{{FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.h new file mode 100644 index 0000000000..b3147a42a9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.h @@ -0,0 +1,18 @@ +#ifndef HD_SPLITTING_H +#define HD_SPLITTING_H + +#include +#include + +typedef struct precomp_basis_change_matrix { + uint8_t m[4][4]; +} precomp_basis_change_matrix_t; + +extern const int EVEN_INDEX[10][2]; +extern const int CHI_EVAL[4][4]; +extern const fp2_t FP2_CONSTANTS[5]; +extern const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10]; +extern const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6]; + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c new file mode 100644 index 0000000000..1fb4c0f139 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c @@ -0,0 +1,210 @@ +#include "hnf_internal.h" +#include "internal.h" + +// HNF test function +int +ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) +{ + int res = 1; + int found; + int ind = 0; + ibz_t zero; + ibz_init(&zero); + // upper triangular + for (int i = 0; i < 4; i++) { + // upper triangular + for (int j = 0; j < i; j++) { + res = res && ibz_is_zero(&((*mat)[i][j])); + } + // find first non 0 element of line + found = 0; + for (int j = i; j < 4; j++) { + if (found) { + // all values are positive, and first non-0 is the largest of that line + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + } else { + if (!ibz_is_zero(&((*mat)[i][j]))) { + found = 1; + ind = j; + // mustbe non-negative + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + } + } + } + } + // check that first nom-zero elements ndex per column is strictly increasing + int linestart = -1; + int i = 0; + for (int j = 0; j < 4; j++) { + while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + i = i + 1; + } + if (i != 4) { + res = res && (linestart < i); + } + i = 0; + } + ibz_finalize(&zero); + return res; +} + +// Untested HNF helpers +// centered mod +void +ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b, + const ibz_t *mod) +{ + ibz_t prod, m; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_finalize(&m); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m; + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + } + ibz_finalize(&m); +} + +// no need to center this, and not 0 +void +ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m, s; + ibz_init(&m); + ibz_init(&s); + ibz_copy(&s, scalar); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); + ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + } + ibz_finalize(&m); + ibz_finalize(&s); +} + +// Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic +// Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 +// assumes ibz_xgcd outputs u,v which are small in absolute value (as described in the +// book) +void +ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec_4_t *generators, const ibz_t *mod) +{ + int i = 3; + assert(generator_number > 3); + int n = generator_number; + int j = n - 1; + int k = n - 1; + ibz_t b, u, v, d, q, m, coeff_1, coeff_2, r; + ibz_vec_4_t c; + ibz_vec_4_t a[generator_number]; + ibz_vec_4_t w[4]; + ibz_init(&b); + ibz_init(&d); + ibz_init(&u); + ibz_init(&v); + ibz_init(&r); + ibz_init(&m); + ibz_init(&q); + ibz_init(&coeff_1); + ibz_init(&coeff_2); + ibz_vec_4_init(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_init(&(w[h])); + ibz_vec_4_init(&(a[h])); + ibz_copy(&(a[h][0]), &(generators[h][0])); + ibz_copy(&(a[h][1]), &(generators[h][1])); + ibz_copy(&(a[h][2]), &(generators[h][2])); + ibz_copy(&(a[h][3]), &(generators[h][3])); + } + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_copy(&m, mod); + while (i != -1) { + while (j != 0) { + j = j - 1; + if (!ibz_is_zero(&(a[j][i]))) { + // assumtion that ibz_xgcd outputs u,v which are small in absolute + // value is needed here also, needs u non 0, but v can be 0 if needed + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); + ibz_div(&coeff_1, &r, &(a[k][i]), &d); + ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_neg(&coeff_2, &coeff_2); + ibz_vec_4_linear_combination_mod( + &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m + ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy + } + } + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult + if (ibz_is_zero(&(w[i][i]))) { + ibz_copy(&(w[i][i]), &m); + } + for (int h = i + 1; h < 4; h++) { + ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_neg(&q, &q); + ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); + } + ibz_div(&m, &r, &m, &d); + assert(ibz_is_zero(&r)); + if (i != 0) { + k = k - 1; + i = i - 1; + j = k; + if (ibz_is_zero(&(a[k][i]))) + ibz_copy(&(a[k][i]), &m); + + } else { + k = k - 1; + i = i - 1; + j = k; + } + } + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + } + } + + ibz_finalize(&b); + ibz_finalize(&d); + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&coeff_1); + ibz_finalize(&coeff_2); + ibz_finalize(&m); + ibz_vec_4_finalize(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_finalize(&(w[h])); + ibz_vec_4_finalize(&(a[h])); + } +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.c new file mode 100644 index 0000000000..b2db5b54c9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.c @@ -0,0 +1,182 @@ +#include "hnf_internal.h" +#include "internal.h" + +// Small helper for integers +void +ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod) +{ + ibz_t m, t; + ibz_init(&m); + ibz_init(&t); + ibz_mod(&m, x, mod); + ibz_set(&t, ibz_is_zero(&m)); + ibz_mul(&t, &t, mod); + ibz_add(res, &m, &t); + ibz_finalize(&m); + ibz_finalize(&t); +} + +// centered and rather positive then negative +void +ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod) +{ + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_t tmp, d, t; + ibz_init(&tmp); + ibz_init(&d); + ibz_init(&t); + ibz_div_floor(&d, &tmp, mod, &ibz_const_two); + ibz_mod_not_zero(&tmp, a, mod); + ibz_set(&t, ibz_cmp(&tmp, &d) > 0); + ibz_mul(&t, &t, mod); + ibz_sub(remainder, &tmp, &t); + ibz_finalize(&tmp); + ibz_finalize(&d); + ibz_finalize(&t); +} + +// if c, res = x, else res = y +void +ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c) +{ + ibz_t s, t, r; + ibz_init(&r); + ibz_init(&s); + ibz_init(&t); + ibz_set(&s, c != 0); + ibz_sub(&t, &ibz_const_one, &s); + ibz_mul(&r, &s, x); + ibz_mul(res, &t, y); + ibz_add(res, &r, res); + ibz_finalize(&r); + ibz_finalize(&s); + ibz_finalize(&t); +} + +// mpz_gcdext specification specifies unique outputs used here +void +ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const ibz_t *y) +{ + if (ibz_is_zero(x) & ibz_is_zero(y)) { + ibz_set(d, 1); + ibz_set(u, 1); + ibz_set(v, 0); + return; + } + ibz_t q, r, x1, y1; + ibz_init(&q); + ibz_init(&r); + ibz_init(&x1); + ibz_init(&y1); + ibz_copy(&x1, x); + ibz_copy(&y1, y); + + // xgcd + ibz_xgcd(d, u, v, &x1, &y1); + + // make sure u!=0 (v can be 0 if needed) + // following GMP specification, u == 0 implies y|x + if (ibz_is_zero(u)) { + if (!ibz_is_zero(&x1)) { + if (ibz_is_zero(&y1)) { + ibz_set(&y1, 1); + } + ibz_div(&q, &r, &x1, &y1); + assert(ibz_is_zero(&r)); + ibz_sub(v, v, &q); + } + ibz_set(u, 1); + } + if (!ibz_is_zero(&x1)) { + // Make sure ux > 0 (and as small as possible) + assert(ibz_cmp(d, &ibz_const_zero) > 0); + ibz_mul(&r, &x1, &y1); + int neg = ibz_cmp(&r, &ibz_const_zero) < 0; + ibz_mul(&q, &x1, u); + while (ibz_cmp(&q, &ibz_const_zero) <= 0) { + ibz_div(&q, &r, &y1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_add(u, u, &q); + ibz_div(&q, &r, &x1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_sub(v, v, &q); + + ibz_mul(&q, &x1, u); + } + } + +#ifndef NDEBUG + int res = 0; + ibz_t sum, prod, test, cmp; + ibz_init(&sum); + ibz_init(&prod); + ibz_init(&cmp); + ibz_init(&test); + // sign correct + res = res | !(ibz_cmp(d, &ibz_const_zero) >= 0); + if (ibz_is_zero(&x1) && ibz_is_zero(&y1)) { + res = res | !(ibz_is_zero(v) && ibz_is_one(u) && ibz_is_one(d)); + } else { + if (!ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &x1, u); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) > 0); + ibz_mul(&sum, &sum, &y1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) <= 0); + + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &y1, v); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) <= 0); + ibz_mul(&sum, &sum, &x1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) < 0); + } else { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + if (ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + ibz_abs(&prod, v); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_one(u)); + } else { + ibz_abs(&prod, u); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_zero(v)); + } + } + + // Bezout coeffs + ibz_mul(&sum, &x1, u); + ibz_mul(&prod, &y1, v); + ibz_add(&sum, &sum, &prod); + res = res | !(ibz_cmp(&sum, d) == 0); + } + assert(!res); + ibz_finalize(&sum); + ibz_finalize(&prod); + ibz_finalize(&cmp); + ibz_finalize(&test); + +#endif + + ibz_finalize(&x1); + ibz_finalize(&y1); + ibz_finalize(&q); + ibz_finalize(&r); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.h new file mode 100644 index 0000000000..5ecc871bb4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.h @@ -0,0 +1,94 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for functions internal to the HNF computation and its tests + */ + +#ifndef QUAT_HNF_HELPERS_H +#define QUAT_HNF_HELPERS_H + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup quat_hnf_helpers Internal functions for the HNF computation and tests + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_helpers_ibz Internal renamed GMP functions for the HNF computation + */ + +/** + * @brief GCD and Bézout coefficients u, v such that ua + bv = gcd + * + * @param gcd Output: Set to the gcd of a and b + * @param u Output: integer such that ua+bv=gcd + * @param v Output: Integer such that ua+bv=gcd + * @param a + * @param b + */ +void ibz_xgcd(ibz_t *gcd, + ibz_t *u, + ibz_t *v, + const ibz_t *a, + const ibz_t *b); // integers, dim4, test/integers, test/dim4 + +/** @} + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_integer_helpers Integer functions internal to the HNF computation and tests + * @{ + */ + +/** @brief x mod mod, with x in [1,mod] + * + * @param res Output: res = x [mod] and 0 0 + */ +void ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod); + +/** @brief x mod mod, with x in ]-mod/2,mod/2] + * + * Centered and rather positive then negative. + * + * @param remainder Output: remainder = x [mod] and -mod/2 0 + */ +void ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod); + +/** @brief if c then x else y + * + * @param res Output: if c, res = x, else res = y + * @param x + * @param y + * @param c condition: must be 0 or 1 + */ +void ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c); + +/** @brief d = gcd(x,y)>0 and d = ux+vy and u!= 0 and d>0 and u, v of small absolute value, u not 0 + * + * More precisely: + * If x and y are both non 0, -|xy|/d +#else +#include +#endif + +void +ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) +{ + mpz_gcdext(*gcd, *u, *v, *a, *b); +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.c new file mode 100644 index 0000000000..0743974345 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.c @@ -0,0 +1,338 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Scalar multiplication [x]P + [y]Q where x and y are stored +// inside an ibz_vec_2_t [x, y] and P, Q \in E[2^f] +void +ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + digit_t scalars[2][NWORDS_ORDER]; + ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); + ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); +} + +// Given an ideal, computes the scalars s0, s1 which determine the kernel generator +// of the equivalent isogeny +void +id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lideal) +{ + ibz_t tmp; + ibz_init(&tmp); + + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + // construct the matrix of the dual of alpha on the 2^f-torsion + { + quat_alg_elem_t alpha; + quat_alg_elem_init(&alpha); + + int lideal_generator_ok UNUSED = quat_lideal_generator(&alpha, lideal, &QUATALG_PINFTY); + assert(lideal_generator_ok); + quat_alg_conj(&alpha, &alpha); + + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + quat_change_to_O0_basis(&coeffs, &alpha); + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + } + } + + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&alpha); + } + + // find the kernel of alpha modulo the norm of the ideal + { + const ibz_t *const norm = &lideal->norm; + + ibz_mod(&(*vec)[0], &mat[0][0], norm); + ibz_mod(&(*vec)[1], &mat[1][0], norm); + ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + if (ibz_is_even(&tmp)) { + ibz_mod(&(*vec)[0], &mat[0][1], norm); + ibz_mod(&(*vec)[1], &mat[1][1], norm); + } +#ifndef NDEBUG + ibz_gcd(&tmp, &(*vec)[0], norm); + ibz_gcd(&tmp, &(*vec)[1], &tmp); + assert(!ibz_cmp(&tmp, &ibz_const_one)); +#endif + } + + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&tmp); +} + +// helper function to apply a matrix to a basis of E[2^f] +// works in place +int +matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f) +{ + digit_t scalars[2][NWORDS_ORDER] = { 0 }; + int ret; + + ibz_t tmp, pow_two; + ibz_init(&tmp); + ibz_init(&pow_two); + ibz_pow(&pow_two, &ibz_const_two, f); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // reduction mod 2f + ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); + ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); + ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); + ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][0]); + ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); + + // second basis element S = [c]P + [d]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][1]); + ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); + + // Their difference R - S = [a - c]P + [b - d]Q + ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[0], &tmp); + ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[1], &tmp); + ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); + + ibz_finalize(&tmp); + ibz_finalize(&pow_two); + + return ret; +} + +// helper function to apply some endomorphism of E0 on the precomputed basis of E[2^f] +// works in place +void +endomorphism_application_even_basis(ec_basis_t *bas, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_t content; + ibz_init(&content); + + // decomposing theta on the basis + quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); + assert(ibz_is_odd(&content)); + + ibz_set(&mat[0][0], 0); + ibz_set(&mat[0][1], 0); + ibz_set(&mat[1][0], 0); + ibz_set(&mat[1][1], 0); + + // computing the matrix + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&mat[i][j], &mat[i][j], &content); + } + } + + // and now we apply it + matrix_application_even_basis(bas, E, &mat, f); + + ibz_vec_4_finalize(&coeffs); + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&content); + + ibz_finalize(&tmp); +} + +// compute the ideal whose kernel is generated by vec2[0]*BO[0] + vec2[1]*B0[1] where B0 is the +// canonical basis of E0 +void +id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f) +{ + + // algorithm: apply endomorphisms 1 and j+(1+k)/2 to the kernel point, + // the result should form a basis of the respective torsion subgroup. + // then apply i to the kernel point and decompose over said basis. + // hence we have an equation a*P + b*[j+(1+k)/2]P == [i]P, which will + // easily reveal an endomorphism that kills P. + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + if (f == TORSION_EVEN_POWER) { + ibz_copy(&two_pow, &TORSION_PLUS_2POWER); + } else { + ibz_pow(&two_pow, &ibz_const_two, f); + } + + { + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_copy(&mat[0][0], &(*vec2)[0]); + ibz_copy(&mat[1][0], &(*vec2)[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); + ibz_copy(&mat[0][1], &vec[0]); + ibz_copy(&mat[1][1], &vec[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); + ibz_add(&mat[0][1], &mat[0][1], &vec[0]); + ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + + ibz_mod(&mat[0][1], &mat[0][1], &two_pow); + ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + + ibz_mat_2x2_t inv; + ibz_mat_2x2_init(&inv); + { + int inv_ok UNUSED = ibz_mat_2x2_inv_mod(&inv, &mat, &two_pow); + assert(inv_ok); + } + ibz_mat_2x2_finalize(&mat); + + ibz_mat_2x2_eval(&vec, &ACTION_I, vec2); + ibz_mat_2x2_eval(&vec, &inv, &vec); + + ibz_mat_2x2_finalize(&inv); + } + + // final result: a - i + b*(j+(1+k)/2) + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + ibz_set(&gen.denom, 2); + ibz_add(&gen.coord[0], &vec[0], &vec[0]); + ibz_set(&gen.coord[1], -2); + ibz_add(&gen.coord[2], &vec[1], &vec[1]); + ibz_copy(&gen.coord[3], &vec[1]); + ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_vec_2_finalize(&vec); + + quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + assert(0 == ibz_cmp(&lideal->norm, &two_pow)); + + quat_alg_elem_finalize(&gen); + ibz_finalize(&two_pow); +} + +// finds mat such that: +// (mat*v).B2 = v.B1 +// where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q +// mat encodes the coordinates of the points of B1 in the basis B2 +// specifically requires B1 or B2 to be "full" w.r.t to the 2^n torsion, so that we use tate +// full = 0 assumes B2 is "full" so the easier case. +// if we want to switch the role of B2 and B1, we invert the matrix, e.g. set full = 1 +static void +_change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f, + bool invert) +{ + digit_t x1[NWORDS_ORDER] = { 0 }, x2[NWORDS_ORDER] = { 0 }, x3[NWORDS_ORDER] = { 0 }, x4[NWORDS_ORDER] = { 0 }; + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - f; +#endif + + // Ensure the input basis has points of order 2^f + if (invert) { + assert(test_basis_order_twof(B1, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B1, B2, E, f); + mp_invert_matrix(x1, x2, x3, x4, f, NWORDS_ORDER); + } else { + assert(test_basis_order_twof(B2, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B2, B1, E, f); + } + +#ifndef NDEBUG + { + if (invert) { + ec_point_t test, test2; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->P, E); + assert(ec_is_equal(&test, &test2)); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->Q, E); + assert(ec_is_equal(&test, &test2)); + } else { + ec_point_t test; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->P))); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->Q))); + } + } +#endif + + // Copy the results into the matrix + ibz_copy_digit_array(&((*mat)[0][0]), x1); + ibz_copy_digit_array(&((*mat)[1][0]), x2); + ibz_copy_digit_array(&((*mat)[0][1]), x3); + ibz_copy_digit_array(&((*mat)[1][1]), x4); +} + +void +change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, false); +} + +void +change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.h new file mode 100644 index 0000000000..1b4eaae3c5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.h @@ -0,0 +1,280 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The id2iso algorithms + */ + +#ifndef ID2ISO_H +#define ID2ISO_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @defgroup id2iso_id2iso Ideal to isogeny conversion + * @{ + */ +static const quat_represent_integer_params_t QUAT_represent_integer_params = { + .algebra = &QUATALG_PINFTY, /// The level-specific quaternion algebra + .order = &(EXTREMAL_ORDERS[0]), // The special extremal order O0 + .primality_test_iterations = QUAT_primality_num_iter // precompted bound on the iteration number in primality tests +}; + +/*************************** Functions *****************************/ + +/** @defgroup id2iso_others Other functions needed for id2iso + * @{ + */ + +/** + * @brief Scalar multiplication [x]P + [y]Q where x and y are stored inside an + * ibz_vec_2_t [x, y] and P, Q in E[2^f] + * + * @param res Output: the point R = [x]P + [y]Q + * @param scalar_vec: a vector of ibz type elements (x, y) + * @param f: an integer such that P, Q are in E[2^f] + * @param PQ: an x-only basis x(P), x(Q) and x(P-Q) + * @param curve: the curve E the points P, Q, R are defined on + * + */ +void ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Translating an ideal of norm 2^f dividing p²-1 into the corresponding + * kernel coefficients + * + * @param ker_dlog Output : two coefficients indicating the decomposition of the + * kernel over the canonical basis of E0[2^f] + * @param lideal_input : O0-ideal corresponding to the ideal to be translated of + * norm 2^f + * + */ +void id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *ker_dlog, const quat_left_ideal_t *lideal_input); + +/** + * @brief Applies some 2x2 matrix on a basis of E[2^TORSION_EVEN_POWER] + * + * @param P the basis + * @param E the curve + * @param mat the matrix + * @param f TORSION_EVEN_POWER + * @returns 1 if success, 0 if error + * + * helper function, works in place + * + */ +int matrix_application_even_basis(ec_basis_t *P, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f); + +/** + * @brief Applies some endomorphism of an alternate curve to E[f] + * + * @param P the basis + * @param index_alternate_curve index of the alternate order in the list of precomputed extremal + * orders + * @param E the curve (E is not required to be the alternate curve in question since in the end we + * only apply a matrix) + * @param theta the endomorphism + * @param f TORSION_EVEN_POWER + * + * helper function, works in place + * + */ +void endomorphism_application_even_basis(ec_basis_t *P, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f); + +/** + * @brief Translating a kernel on the curve E0, represented as a vector with + * respect to the precomputed 2^f-torsion basis, into the corresponding O0-ideal + * + * @param lideal Output : the output O0-ideal + * @param f : exponent definining the norm of the ideal to compute + * @param vec2 : length-2 vector giving the 2-power part of the kernel with + * respect to the precomputed 2^f basis + * + */ +void id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B2 = v.B1 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^f] + * @param B2 the target basis for E[2^e] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2 + */ +void change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B1 = [2^e-f]*v.B2 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^e] + * @param B2 the target basis for E[2^f] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2, by + * applying change_of_basis_matrix_tate and inverting the outcome + */ +void change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f); + +/** @} + */ + +/** @defgroup id2iso_arbitrary Arbitrary isogeny evaluation + * @{ + */ +/** + * @brief Function to find elements u, v, d1, d2, beta1, beta2 for the ideal to isogeny + * + * @param u Output: integer + * @param v Output: integer + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param d1 Output: integer + * @param d2 Output: integer + * @param index_alternate_order_1 Output: small integer (index of an alternate order) + * @param index_alternate_order_2 Output: small integer (index of an alternate order) + * @param target : integer, target norm + * @param lideal : O0-ideal defining the search space + * @param Bpoo : quaternion algebra + * @param num_alternate_order number of alternate order we consider + * @returns 1 if the computation succeeds, 0 otherwise + * + * Let us write ti = index_alternate_order_i, + * we look for u,v,beta1,beta2,d1,d2,t1,t2 + * such that u d1 + v d2 = target + * and where di = norm(betai)/norm(Ii), where the ideal Ii is equal to overbar{Ji} * lideal and + * betai is in Ii where Ji is a connecting ideal between the maximal order O0 and O_ti t1,t2 must be + * contained between 0 and num_alternate_order This corresponds to the function SuitableIdeals in + * the spec + */ +int find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order); + +/** + * @brief Computes an arbitrary isogeny of fixed degree starting from E0 + * and evaluates it a list of points of the form (P1,0) or (0,P2). + * + * @param lideal Output : an ideal of norm u + * @param u : integer + * @param small : bit indicating if we the value of u is "small" meaning that we + expect it to be + * around sqrt{p}, in that case we use a length slightly above + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny + (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @param index_alternate_order : index of the special extremal order to be used (in the list of + these orders) + * @returns the length of the chain if the computation succeeded, zero upon + failure + * + * F is an isogeny encoding an isogeny [adjust]*phi : E0 -> Eu of degree u + * note that the codomain of F can be either Eu x Eu' or Eu' x Eu for some curve + Eu' + */ +int fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param u Output: integer + * @param v Output: integer + * @param d1 Output: integer + * @param d2 Output: integer + * @param codomain the codomain of the isogeny corresponding to lideal + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : O0 - ideal in input + * @param Bpoo : the quaternion algebra + * @returns 1 if the computation succeeded, 0 otherwise + * + * Compute the codomain and image on the basis of E0 of the isogeny + * E0 -> codomain corresponding to lideal + * + * There is some integer e >= 0 such that + * 2^e * u, 2^e * v,beta1, beta2, d1, d2 are the output of find_uv + * on input target = 2^TORSION_PLUS_EVEN_POWER and lideal + * + * codomain and basis are computed with the help of a dimension 2 isogeny + * of degree 2^TORSION_PLUS_EVEN_POWER - e using a Kani diagram + * + */ +int dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : ideal in input + * @param codomain + * @returns 1 if the computation succeeds, 0 otherwise + * + * This is a wrapper around the ideal to isogeny clapotis function + */ +int dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ideal.c new file mode 100644 index 0000000000..9cf863a104 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ideal.c @@ -0,0 +1,323 @@ +#include +#include +#include "internal.h" + +// assumes parent order and lattice correctly set, computes and sets the norm +void +quat_lideal_norm(quat_left_ideal_t *lideal) +{ + quat_lattice_index(&(lideal->norm), &(lideal->lattice), (lideal->parent_order)); + int ok UNUSED = ibz_sqrt(&(lideal->norm), &(lideal->norm)); + assert(ok); +} + +// assumes parent order and lattice correctly set, recomputes and verifies its norm +static int +quat_lideal_norm_verify(const quat_left_ideal_t *lideal) +{ + int res; + ibz_t index; + ibz_init(&index); + quat_lattice_index(&index, &(lideal->lattice), (lideal->parent_order)); + ibz_sqrt(&index, &index); + res = (ibz_cmp(&(lideal->norm), &index) == 0); + ibz_finalize(&index); + return (res); +} + +void +quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) +{ + copy->parent_order = copied->parent_order; + ibz_copy(©->norm, &copied->norm); + ibz_copy(©->lattice.denom, &copied->lattice.denom); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + } + } +} + +void +quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(quat_lattice_contains(NULL, order, x)); + ibz_t norm_n, norm_d; + ibz_init(&norm_n); + ibz_init(&norm_d); + + // Multiply order on the right by x + quat_lattice_alg_elem_mul(&(lideal->lattice), order, x, alg); + + // Reduce denominator. This conserves HNF + quat_lattice_reduce_denom(&lideal->lattice, &lideal->lattice); + + // Compute norm and check it's integral + quat_alg_norm(&norm_n, &norm_d, x, alg); + assert(ibz_is_one(&norm_d)); + ibz_copy(&lideal->norm, &norm_n); + + // Set order + lideal->parent_order = order; + ibz_finalize(&norm_n); + ibz_finalize(&norm_d); +} + +void +quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(!quat_alg_elem_is_zero(x)); + + quat_lattice_t ON; + quat_lattice_init(&ON); + + // Compute ideal generated by x + quat_lideal_create_principal(lideal, x, order, alg); + + // Compute ideal generated by N (without reducing denominator) + ibz_mat_4x4_scalar_mul(&ON.basis, N, &order->basis); + ibz_copy(&ON.denom, &order->denom); + + // Add lattices (reduces denominators) + quat_lattice_add(&lideal->lattice, &lideal->lattice, &ON); + // Set order + lideal->parent_order = order; + // Compute norm + quat_lideal_norm(lideal); + + quat_lattice_finalize(&ON); +} + +int +quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + ibz_t norm_int, norm_n, gcd, r, q, norm_denom; + ibz_vec_4_t vec; + ibz_vec_4_init(&vec); + ibz_init(&norm_denom); + ibz_init(&norm_int); + ibz_init(&norm_n); + ibz_init(&r); + ibz_init(&q); + ibz_init(&gcd); + int a, b, c, d; + int found = 0; + int int_norm = 0; + while (1) { + int_norm++; + for (a = -int_norm; a <= int_norm; a++) { + for (b = -int_norm + abs(a); b <= int_norm - abs(a); b++) { + for (c = -int_norm + abs(a) + abs(b); c <= int_norm - abs(a) - abs(b); c++) { + d = int_norm - abs(a) - abs(b) - abs(c); + ibz_vec_4_set(&vec, a, b, c, d); + ibz_vec_4_content(&gcd, &vec); + if (ibz_is_one(&gcd)) { + ibz_mat_4x4_eval(&(gen->coord), &(lideal->lattice.basis), &vec); + ibz_copy(&(gen->denom), &(lideal->lattice.denom)); + quat_alg_norm(&norm_int, &norm_denom, gen, alg); + assert(ibz_is_one(&norm_denom)); + ibz_div(&q, &r, &norm_int, &(lideal->norm)); + assert(ibz_is_zero(&r)); + ibz_gcd(&gcd, &(lideal->norm), &q); + found = (0 == ibz_cmp(&gcd, &ibz_const_one)); + if (found) + goto fin; + } + } + } + } + } +fin:; + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&norm_denom); + ibz_finalize(&norm_int); + ibz_finalize(&norm_n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&gcd); + return (found); +} + +void +quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t norm, norm_d; + ibz_init(&norm); + ibz_init(&norm_d); + quat_lattice_alg_elem_mul(&(product->lattice), &(lideal->lattice), alpha, alg); + product->parent_order = lideal->parent_order; + quat_alg_norm(&norm, &norm_d, alpha, alg); + ibz_mul(&(product->norm), &(lideal->norm), &norm); + assert(ibz_divides(&(product->norm), &norm_d)); + ibz_div(&(product->norm), &norm, &(product->norm), &norm_d); + assert(quat_lideal_norm_verify(lideal)); + ibz_finalize(&norm_d); + ibz_finalize(&norm); +} + +void +quat_lideal_add(quat_left_ideal_t *sum, const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_add(&sum->lattice, &I1->lattice, &I2->lattice); + sum->parent_order = I1->parent_order; + quat_lideal_norm(sum); +} + +void +quat_lideal_inter(quat_left_ideal_t *inter, + const quat_left_ideal_t *I1, + const quat_left_ideal_t *I2, + const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_intersect(&inter->lattice, &I1->lattice, &I2->lattice); + inter->parent_order = I1->parent_order; + quat_lideal_norm(inter); +} + +int +quat_lideal_equals(const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((I2->parent_order), alg)); + assert(quat_order_is_maximal((I1->parent_order), alg)); + return (I1->parent_order == I2->parent_order) & (ibz_cmp(&I1->norm, &I2->norm) == 0) & + quat_lattice_equal(&I1->lattice, &I2->lattice); +} + +void +quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lattice_conjugate_without_hnf(inv, &(lideal->lattice)); + ibz_mul(&(inv->denom), &(inv->denom), &(lideal->norm)); +} + +// following the implementation of ideal isomorphisms in the code of LearningToSQI's sage +// implementation of SQIsign +void +quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal1->parent_order), alg)); + assert(quat_order_is_maximal((lideal2->parent_order), alg)); + assert(lideal1->parent_order == lideal2->parent_order); + quat_lattice_t inv; + quat_lattice_init(&inv); + quat_lideal_inverse_lattice_without_hnf(&inv, lideal1, alg); + quat_lattice_mul(trans, &inv, &(lideal2->lattice), alg); + quat_lattice_finalize(&inv); +} + +void +quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lideal_right_transporter(order, lideal, lideal, alg); +} + +void +quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + quat_lattice_gram(G, &(lideal->lattice), alg); + + // divide by norm · denominator² + ibz_t divisor, rmd; + ibz_init(&divisor); + ibz_init(&rmd); + + ibz_mul(&divisor, &(lideal->lattice.denom), &(lideal->lattice.denom)); + ibz_mul(&divisor, &divisor, &(lideal->norm)); + + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + assert(ibz_is_zero(&rmd)); + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i - 1; j++) { + ibz_copy(&(*G)[j][i], &(*G)[i][j]); + } + } + + ibz_finalize(&rmd); + ibz_finalize(&divisor); +} + +void +quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + quat_lideal_right_order(new_parent_order, lideal, alg); + quat_lattice_conjugate_without_hnf(&(conj->lattice), &(lideal->lattice)); + conj->parent_order = new_parent_order; + ibz_copy(&(conj->norm), &(lideal->norm)); +} + +int +quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg_t *alg) +{ + int ok = 0; + ibz_t det, sqr, div; + ibz_mat_4x4_t transposed, norm, prod; + ibz_init(&det); + ibz_init(&sqr); + ibz_init(&div); + ibz_mat_4x4_init(&transposed); + ibz_mat_4x4_init(&norm); + ibz_mat_4x4_init(&prod); + ibz_mat_4x4_transpose(&transposed, &(order->basis)); + // multiply gram matrix by 2 because of reduced trace + ibz_mat_4x4_identity(&norm); + ibz_copy(&(norm[2][2]), &(alg->p)); + ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); + ibz_mat_4x4_mul(&prod, &transposed, &norm); + ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &prod); + ibz_mul(&div, &(order->denom), &(order->denom)); + ibz_mul(&div, &div, &div); + ibz_mul(&div, &div, &div); + ibz_div(&sqr, &div, &det, &div); + ok = ibz_is_zero(&div); + ok = ok & ibz_sqrt(disc, &sqr); + ibz_finalize(&det); + ibz_finalize(&div); + ibz_finalize(&sqr); + ibz_mat_4x4_finalize(&transposed); + ibz_mat_4x4_finalize(&norm); + ibz_mat_4x4_finalize(&prod); + return (ok); +} + +int +quat_order_is_maximal(const quat_lattice_t *order, const quat_alg_t *alg) +{ + int res; + ibz_t disc; + ibz_init(&disc); + quat_order_discriminant(&disc, order, alg); + res = (ibz_cmp(&disc, &(alg->p)) == 0); + ibz_finalize(&disc); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.c new file mode 100644 index 0000000000..b0462dc8b5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.c @@ -0,0 +1,791 @@ +#include "intbig_internal.h" +#include +#include +#include +#include +#include +#include + +// #define DEBUG_VERBOSE + +#ifdef DEBUG_VERBOSE +#define DEBUG_STR_PRINTF(x) printf("%s\n", (x)); + +static void +DEBUG_STR_FUN_INT_MP(const char *op, int arg1, const ibz_t *arg2) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s\n", op, arg1, arg2_str); +} + +static void +DEBUG_STR_FUN_3(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + printf("%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_MP2_INT(const char *op, const ibz_t *arg1, const ibz_t *arg2, int arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%s,%s,%x\n", op, arg1_str, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_INT_MP2(const char *op, int arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + if (arg1 >= 0) + printf("%s,%x,%s,%s\n", op, arg1, arg2_str, arg3_str); + else + printf("%s,-%x,%s,%s\n", op, -arg1, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_INT_MP_INT(const char *op, int arg1, const ibz_t *arg2, int arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s,%x\n", op, arg1, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3, const ibz_t *arg4) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + int arg4_size = ibz_size_in_base(arg4, 16); + char arg4_str[arg4_size + 2]; + ibz_convert_to_str(arg4, arg4_str, 16); + + printf("%s,%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str, arg4_str); +} +#else +#define DEBUG_STR_PRINTF(x) +#define DEBUG_STR_FUN_INT_MP(op, arg1, arg2) +#define DEBUG_STR_FUN_3(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP2(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP_INT(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_4(op, arg1, arg2, arg3, arg4) +#endif + +/** @defgroup ibz_t Constants + * @{ + */ + +const __mpz_struct ibz_const_zero[1] = { + { + ._mp_alloc = 0, + ._mp_size = 0, + ._mp_d = (mp_limb_t[]){ 0 }, + } +}; + +const __mpz_struct ibz_const_one[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 1 }, + } +}; + +const __mpz_struct ibz_const_two[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 2 }, + } +}; + +const __mpz_struct ibz_const_three[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 3 }, + } +}; + +void +ibz_init(ibz_t *x) +{ + mpz_init(*x); +} + +void +ibz_finalize(ibz_t *x) +{ + mpz_clear(*x); +} + +void +ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_add(*sum, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_sub(*diff, *a, *b); + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_mul(*prod, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_neg(ibz_t *neg, const ibz_t *a) +{ + mpz_neg(*neg, *a); +} + +void +ibz_abs(ibz_t *abs, const ibz_t *a) +{ + mpz_abs(*abs, *a); +} + +void +ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_tdiv_qr(*quotient, *remainder, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp; + ibz_init(&a_cp); + ibz_copy(&a_cp, a); +#endif + mpz_tdiv_q_2exp(*quotient, *a, exp); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); + ibz_finalize(&a_cp); +#endif +} + +void +ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) +{ + mpz_fdiv_qr(*q, *r, *n, *d); +} + +void +ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) +{ + mpz_mod(*r, *a, *b); +} + +unsigned long int +ibz_mod_ui(const mpz_t *n, unsigned long int d) +{ + return mpz_fdiv_ui(*n, d); +} + +int +ibz_divides(const ibz_t *a, const ibz_t *b) +{ + return mpz_divisible_p(*a, *b); +} + +void +ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) +{ + mpz_pow_ui(*pow, *x, e); +} + +void +ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) +{ + mpz_powm(*pow, *x, *e, *m); + DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); +} + +int +ibz_two_adic(ibz_t *pow) +{ + return mpz_scan1(*pow, 0); +} + +int +ibz_cmp(const ibz_t *a, const ibz_t *b) +{ + int ret = mpz_cmp(*a, *b); + DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); + return ret; +} + +int +ibz_is_zero(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); + return ret; +} + +int +ibz_is_one(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 1); + DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); + return ret; +} + +int +ibz_cmp_int32(const ibz_t *x, int32_t y) +{ + int ret = mpz_cmp_si(*x, (signed long int)y); + DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); + return ret; +} + +int +ibz_is_even(const ibz_t *x) +{ + int ret = !mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); + return ret; +} + +int +ibz_is_odd(const ibz_t *x) +{ + int ret = mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); + return ret; +} + +void +ibz_set(ibz_t *i, int32_t x) +{ + mpz_set_si(*i, x); +} + +int +ibz_convert_to_str(const ibz_t *i, char *str, int base) +{ + if (!str || (base != 10 && base != 16)) + return 0; + + mpz_get_str(str, base, *i); + + return 1; +} + +void +ibz_print(const ibz_t *num, int base) +{ + assert(base == 10 || base == 16); + + int num_size = ibz_size_in_base(num, base); + char num_str[num_size + 2]; + ibz_convert_to_str(num, num_str, base); + printf("%s", num_str); +} + +int +ibz_set_from_str(ibz_t *i, const char *str, int base) +{ + return (1 + mpz_set_str(*i, str, base)); +} + +void +ibz_copy(ibz_t *target, const ibz_t *value) +{ + mpz_set(*target, *value); +} + +void +ibz_swap(ibz_t *a, ibz_t *b) +{ + mpz_swap(*a, *b); +} + +int32_t +ibz_get(const ibz_t *i) +{ +#if LONG_MAX == INT32_MAX + return (int32_t)mpz_get_si(*i); +#elif LONG_MAX > INT32_MAX + // Extracts the sign bit and the 31 least significant bits + signed long int t = mpz_get_si(*i); + return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); +#else +#error Unsupported configuration: LONG_MAX must be >= INT32_MAX +#endif +} + +int +ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) +{ + int randret; + int ret = 1; + mpz_t tmp; + mpz_t bmina; + mpz_init(bmina); + mpz_sub(bmina, *b, *a); + + if (mpz_sgn(bmina) == 0) { + mpz_set(*rand, *a); + mpz_clear(bmina); + return 1; + } + + size_t len_bits = mpz_sizeinbase(bmina, 2); + size_t len_bytes = (len_bits + 7) / 8; + size_t sizeof_limb = sizeof(mp_limb_t); + size_t sizeof_limb_bits = sizeof_limb * 8; + size_t len_limbs = (len_bytes + sizeof_limb - 1) / sizeof_limb; + + mp_limb_t mask = ((mp_limb_t)-1) >> (sizeof_limb_bits - len_bits) % sizeof_limb_bits; + mp_limb_t r[len_limbs]; + +#ifndef NDEBUG + { + for (size_t i = 0; i < len_limbs; ++i) + r[i] = (mp_limb_t)-1; + r[len_limbs - 1] = mask; + mpz_t check; + mpz_roinit_n(check, r, len_limbs); + assert(mpz_cmp(check, bmina) >= 0); // max sampled value >= b - a + mpz_t bmina2; + mpz_init(bmina2); + mpz_add(bmina2, bmina, bmina); + assert(mpz_cmp(check, bmina2) < 0); // max sampled value < 2 * (b - a) + mpz_clear(bmina2); + } +#endif + + do { + randret = randombytes((unsigned char *)r, len_bytes); + if (randret != 0) { + ret = 0; + goto err; + } +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < len_limbs; ++i) + r[i] = BSWAP_DIGIT(r[i]); +#endif + r[len_limbs - 1] &= mask; + mpz_roinit_n(tmp, r, len_limbs); + if (mpz_cmp(tmp, bmina) <= 0) + break; + } while (1); + + mpz_add(*rand, tmp, *a); +err: + mpz_clear(bmina); + return ret; +} + +int +ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b) +{ + uint32_t diff, mask; + int32_t rand32; + + if (!(a >= 0 && b >= 0 && b > a)) { + printf("a = %d b = %d\n", a, b); + } + assert(a >= 0 && b >= 0 && b > a); + + diff = b - a; + + // Create a mask with 1 + ceil(log2(diff)) least significant bits set +#if (defined(__GNUC__) || defined(__clang__)) && INT_MAX == INT32_MAX + mask = (1 << (32 - __builtin_clz((uint32_t)diff))) - 1; +#else + uint32_t diff2 = diff, tmp; + + mask = (diff2 > 0xFFFF) << 4; + diff2 >>= mask; + + tmp = (diff2 > 0xFF) << 3; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0xF) << 2; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0x3) << 1; + diff2 >>= tmp; + mask |= tmp; + + mask |= diff2 >> 1; + + mask = (1 << (mask + 1)) - 1; +#endif + + assert(mask >= diff && mask < 2 * diff); + + // Rejection sampling + do { + randombytes((unsigned char *)&rand32, sizeof(rand32)); + +#ifdef TARGET_BIG_ENDIAN + rand32 = BSWAP32(rand32); +#endif + + rand32 &= mask; + } while (rand32 > (int32_t)diff); + + rand32 += a; + ibz_set(rand, rand32); + + return 1; +} + +int +ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) +{ + int ret = 1; + mpz_t m_big; + + // m_big = 2 * m + mpz_init_set_si(m_big, m); + mpz_add(m_big, m_big, m_big); + + // Sample in [0, 2*m] + ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); + + // Adjust to range [-m, m] + mpz_sub_ui(*rand, *rand, m); + + mpz_clear(m_big); + + return ret; +} + +int +ibz_rand_interval_bits(ibz_t *rand, uint32_t m) +{ + int ret = 1; + mpz_t tmp; + mpz_t low; + mpz_init_set_ui(tmp, 1); + mpz_mul_2exp(tmp, tmp, m); + mpz_init(low); + mpz_neg(low, tmp); + ret = ibz_rand_interval(rand, &low, &tmp); + mpz_clear(tmp); + mpz_clear(low); + if (ret != 1) + goto err; + mpz_sub_ui(*rand, *rand, (unsigned long int)m); + return ret; +err: + mpz_clear(tmp); + mpz_clear(low); + return ret; +} + +int +ibz_bitsize(const ibz_t *a) +{ + return (int)mpz_sizeinbase(*a, 2); +} + +int +ibz_size_in_base(const ibz_t *a, int base) +{ + return (int)mpz_sizeinbase(*a, base); +} + +void +ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) +{ + mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); +} + +void +ibz_to_digits(digit_t *target, const ibz_t *ibz) +{ + // From the GMP documentation: + // "If op is zero then the count returned will be zero and nothing written to rop." + // The next line ensures zero is written to the first limb of target if ibz is zero; + // target is then overwritten by the actual value if it is not. + target[0] = 0; + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); +} + +int +ibz_probab_prime(const ibz_t *n, int reps) +{ + int ret = mpz_probab_prime_p(*n, reps); + DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); + return ret; +} + +void +ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) +{ + mpz_gcd(*gcd, *a, *b); +} + +int +ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) +{ + return (mpz_invert(*inv, *a, *mod) ? 1 : 0); +} + +int +ibz_legendre(const ibz_t *a, const ibz_t *p) +{ + return mpz_legendre(*a, *p); +} + +int +ibz_sqrt(ibz_t *sqrt, const ibz_t *a) +{ + if (mpz_perfect_square_p(*a)) { + mpz_sqrt(*sqrt, *a); + return 1; + } else { + return 0; + } +} + +void +ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) +{ + mpz_sqrt(*sqrt, *a); +} + +int +ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) +{ +#ifndef NDEBUG + assert(ibz_probab_prime(p, 100)); +#endif + // Case a = 0 + { + ibz_t test; + ibz_init(&test); + ibz_mod(&test, a, p); + if (ibz_is_zero(&test)) { + ibz_set(sqrt, 0); + } + ibz_finalize(&test); + } +#ifdef DEBUG_VERBOSE + ibz_t a_cp, p_cp; + ibz_init(&a_cp); + ibz_init(&p_cp); + ibz_copy(&a_cp, a); + ibz_copy(&p_cp, p); +#endif + + mpz_t amod, tmp, exp, a4, a2, q, z, qnr, x, y, b, pm1; + mpz_init(amod); + mpz_init(tmp); + mpz_init(exp); + mpz_init(a4); + mpz_init(a2); + mpz_init(q); + mpz_init(z); + mpz_init(qnr); + mpz_init(x); + mpz_init(y); + mpz_init(b); + mpz_init(pm1); + + int ret = 1; + + mpz_mod(amod, *a, *p); + if (mpz_cmp_ui(amod, 0) < 0) { + mpz_add(amod, *p, amod); + } + + if (mpz_legendre(amod, *p) != 1) { + ret = 0; + goto end; + } + + mpz_sub_ui(pm1, *p, 1); + + if (mpz_mod_ui(tmp, *p, 4) == 3) { + // p % 4 == 3 + mpz_add_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(*sqrt, amod, tmp, *p); + } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + // p % 8 == 5 + mpz_sub_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + if (!mpz_cmp_ui(tmp, 1)) { + mpz_add_ui(tmp, *p, 3); + mpz_fdiv_q_2exp(tmp, tmp, 3); + mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + } else { + mpz_sub_ui(tmp, *p, 5); + mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 + mpz_mul_2exp(a4, amod, 2); // 4*a + mpz_powm(tmp, a4, tmp, *p); + + mpz_mul_2exp(a2, amod, 1); + mpz_mul(tmp, a2, tmp); + mpz_mod(*sqrt, tmp, *p); + } + } else { + // p % 8 == 1 -> Shanks-Tonelli + int e = 0; + mpz_sub_ui(q, *p, 1); + while (mpz_tstbit(q, e) == 0) + e++; + mpz_fdiv_q_2exp(q, q, e); + + // 1. find generator - non-quadratic residue + mpz_set_ui(qnr, 2); + while (mpz_legendre(qnr, *p) != -1) + mpz_add_ui(qnr, qnr, 1); + mpz_powm(z, qnr, q, *p); + + // 2. Initialize + mpz_set(y, z); + mpz_powm(y, amod, q, *p); // y = a^q mod p + + mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 + mpz_fdiv_q_2exp(tmp, tmp, 1); + + mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + + mpz_set_ui(exp, 1); + mpz_mul_2exp(exp, exp, e - 2); + + for (int i = 0; i < e; ++i) { + mpz_powm(b, y, exp, *p); + + if (!mpz_cmp(b, pm1)) { + mpz_mul(x, x, z); + mpz_mod(x, x, *p); + + mpz_mul(y, y, z); + mpz_mul(y, y, z); + mpz_mod(y, y, *p); + } + + mpz_powm_ui(z, z, 2, *p); + mpz_fdiv_q_2exp(exp, exp, 1); + } + + mpz_set(*sqrt, x); + } + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sqrt_mod_p", sqrt, &a_cp, &p_cp); + ibz_finalize(&a_cp); + ibz_finalize(&p_cp); +#endif + +end: + mpz_clear(amod); + mpz_clear(tmp); + mpz_clear(exp); + mpz_clear(a4); + mpz_clear(a2); + mpz_clear(q); + mpz_clear(z); + mpz_clear(qnr); + mpz_clear(x); + mpz_clear(y); + mpz_clear(b); + mpz_clear(pm1); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.h new file mode 100644 index 0000000000..a0c2c02477 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.h @@ -0,0 +1,303 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for big integers in the reference implementation + */ + +#ifndef INTBIG_H +#define INTBIG_H + +#include +#if defined(MINI_GMP) +#include +#include +#else +#include +#endif +#include +#include + +/** @ingroup quat_quat + * @defgroup ibz_all Signed big integers (gmp-based) + * @{ + */ + +/** @defgroup ibz_t Precise number types + * @{ + */ + +/** @brief Type for signed long integers + * + * @typedef ibz_t + * + * For integers of arbitrary size, used by intbig module, using gmp + */ +typedef mpz_t ibz_t; + +/** @} + */ + +/** @defgroup ibz_c Constants + * @{ + */ + +/** + * Constant zero + */ +extern const ibz_t ibz_const_zero; + +/** + * Constant one + */ +extern const ibz_t ibz_const_one; + +/** + * Constant two + */ +extern const ibz_t ibz_const_two; + +/** + * Constant three + */ +extern const ibz_t ibz_const_three; + +/** @} + */ + +/** @defgroup ibz_finit Constructors and Destructors + * @{ + */ + +void ibz_init(ibz_t *x); +void ibz_finalize(ibz_t *x); + +/** @} + */ + +/** @defgroup ibz_za Basic integer arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b); + +/** @brief diff=a-b + */ +void ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b); + +/** @brief prod=a*b + */ +void ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b); + +/** @brief neg=-a + */ +void ibz_neg(ibz_t *neg, const ibz_t *a); + +/** @brief abs=|a| + */ +void ibz_abs(ibz_t *abs, const ibz_t *a); + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards zero. + */ +void ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b); + +/** @brief Euclidean division of a by 2^exp + * + * Computes a right shift of abs(a) by exp bits, then sets sign(quotient) to sign(a). + * + * Division and rounding is as in ibz_div. + */ +void ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp); + +/** @brief Two adic valuation computation + * + * Computes the position of the first 1 in the binary representation of the integer given in input + * + * When this number is a power of two this gives the two adic valuation of the integer + */ +int ibz_two_adic(ibz_t *pow); + +/** @brief r = a mod b + * + * Assumes valid inputs + * The sign of the divisor is ignored, the result is always non-negative + */ +void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); + +unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); + +/** @brief Test if a = 0 mod b + */ +int ibz_divides(const ibz_t *a, const ibz_t *b); + +/** @brief pow=x^e + * + * Assumes valid inputs, The case 0^0 yields 1. + */ +void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e); + +/** @brief pow=(x^e) mod m + * + * Assumes valid inputs + */ +void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibz_cmp(const ibz_t *a, const ibz_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibz_is_zero(const ibz_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibz_is_one(const ibz_t *x); + +/** @brief Compare x to y + * + * @returns 0 if x=y, positive if x>y, negative if x= 0 and target must hold sufficient elements to hold ibz + * + * @param target Target digit_t array + * @param ibz ibz source ibz_t element + */ +void ibz_to_digits(digit_t *target, const ibz_t *ibz); +#define ibz_to_digit_array(T, I) \ + do { \ + memset((T), 0, sizeof(T)); \ + ibz_to_digits((T), (I)); \ + } while (0) + +/** @brief get int32_t equal to the lowest bits of i + * + * Should not be used to get the value of i if its bitsize is close to 32 bit + * It can however be used on any i to get an int32_t of the same parity as i (and same value modulo + * 4) + * + * @param i Input integer + */ +int32_t ibz_get(const ibz_t *i); + +/** @brief generate random value in [a, b] + * assumed that a >= 0 and b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b); + +/** @brief generate random value in [-m, m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m); + +/** @brief Bitsize of a. + * + * @returns Bitsize of a. + * + */ +int ibz_bitsize(const ibz_t *a); + +/** @brief Size of a in given base. + * + * @returns Size of a in given base. + * + */ +int ibz_size_in_base(const ibz_t *a, int base); + +/** @} + */ + +/** @defgroup ibz_n Number theory functions + * @{ + */ + +/** + * @brief Greatest common divisor + * + * @param gcd Output: Set to the gcd of a and b + * @param a + * @param b + */ +void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b); + +/** + * @brief Modular inverse + * + * @param inv Output: Set to the integer in [0,mod[ such that a*inv = 1 mod (mod) if it exists + * @param a + * @param mod + * @returns 1 if inverse exists and was computed, 0 otherwise + */ +int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod); + +/** + * @brief Floor of Integer square root + * + * @param sqrt Output: Set to the floor of an integer square root + * @param a number of which a floor of an integer square root is searched + */ +void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig_internal.h new file mode 100644 index 0000000000..de4762a6d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig_internal.h @@ -0,0 +1,123 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for big integer functions only used in quaternion functions + */ + +#ifndef INTBIG_INTERNAL_H +#define INTBIG_INTERNAL_H + +#include "intbig.h" + +/** @internal + * @ingroup quat_helpers + * @defgroup ibz_helper Internal integer functions (gmp-based) + * @{ + */ + +/********************************************************************/ + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards minus infinity. + */ +void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d); + +/** @brief generate random value in [a, b] + * assumed that a >= 0, b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b); + +/** @brief generate random value in [-2^m, 2^m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_bits(ibz_t *rand, uint32_t m); + +/** @brief set str to a string containing the representation of i in base + * + * Base should be 10 or 16 + * + * str should be an array of length enough to store the representation of in + * in base, which can be obtained by ibz_sizeinbase(i, base) + 2, where the 2 + * is for the sign and the null terminator + * + * Case for base 16 does not matter + * + * @returns 1 if the integer could be converted to a string, 0 otherwise + */ +int ibz_convert_to_str(const ibz_t *i, char *str, int base); + +/** @brief print num in base to stdout + * + * Base should be 10 or 16 + */ +void ibz_print(const ibz_t *num, int base); + +/** @brief set i to integer contained in string when read as number in base + * + * Base should be 10 or 16, and the number should be written without ponctuation or whitespaces + * + * Case for base 16 does not matter + * + * @returns 1 if the string could be converted to an integer, 0 otherwise + */ +int ibz_set_from_str(ibz_t *i, const char *str, int base); + +/** + * @brief Probabilistic primality test + * + * @param n The number to test + * @param reps Number of Miller-Rabin repetitions. The more, the slower and the less likely are + * false positives + * @return 1 if probably prime, 0 if certainly not prime, 2 if certainly prime + * + * Using GMP's implementation: + * + * From GMP's documentation: "This function performs some trial divisions, a Baillie-PSW probable + * prime test, then reps-24 Miller-Rabin probabilistic primality tests." + */ +int ibz_probab_prime(const ibz_t *n, int reps); + +/** + * @brief Square root modulo a prime + * + * @returns 1 if square root of a mod p exists and was computed, 0 otherwise + * @param sqrt Output: Set to a square root of a mod p if any exist + * @param a number of which a square root mod p is searched + * @param p assumed prime + */ +int ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p); + +/** + * @brief Integer square root of a perfect square + * + * @returns 1 if an integer square root of a exists and was computed, 0 otherwise + * @param sqrt Output: Set to a integer square root of a if any exist + * @param a number of which an integer square root is searched + */ +int ibz_sqrt(ibz_t *sqrt, const ibz_t *a); + +/** + * @brief Legendre symbol of a mod p + * + * @returns Legendre symbol of a mod p + * @param a + * @param p assumed prime + * + * Uses GMP's implementation + * + * If output is 1, a is a square mod p, if -1, not. If 0, it is divisible by p + */ +int ibz_legendre(const ibz_t *a, const ibz_t *p); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/integers.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/integers.c new file mode 100644 index 0000000000..ec7cda05eb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/integers.c @@ -0,0 +1,116 @@ +#include +#include "internal.h" +#include +#include +#include + +// Random prime generation for tests +int +ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations) +{ + assert(bitsize != 0); + int found = 0; + ibz_t two_pow, two_powp; + + ibz_init(&two_pow); + ibz_init(&two_powp); + ibz_pow(&two_pow, &ibz_const_two, (bitsize - 1) - (0 != is3mod4)); + ibz_pow(&two_powp, &ibz_const_two, bitsize - (0 != is3mod4)); + + int cnt = 0; + while (!found) { + cnt++; + if (cnt % 100000 == 0) { + printf("Random prime generation is still running after %d attempts, this is not " + "normal! The expected number of attempts is %d \n", + cnt, + bitsize); + } + ibz_rand_interval(p, &two_pow, &two_powp); + ibz_add(p, p, p); + if (is3mod4) { + ibz_add(p, p, p); + ibz_add(p, &ibz_const_two, p); + } + ibz_add(p, &ibz_const_one, p); + + found = ibz_probab_prime(p, probability_test_iterations); + } + ibz_finalize(&two_pow); + ibz_finalize(&two_powp); + return found; +} + +// solves x^2 + n y^2 == p for positive integers x, y +// assumes that p is prime and -n mod p is a square +int +ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p) +{ + ibz_t r0, r1, r2, a, prod; + ibz_init(&r0); + ibz_init(&r1); + ibz_init(&r2); + ibz_init(&a); + ibz_init(&prod); + + int res = 0; + + // manage case p = 2 separately + if (!ibz_cmp(p, &ibz_const_two)) { + if (ibz_is_one(n)) { + ibz_set(x, 1); + ibz_set(y, 1); + res = 1; + } + goto done; + } + // manage case p = n separately + if (!ibz_cmp(p, n)) { + ibz_set(x, 0); + ibz_set(y, 1); + res = 1; + goto done; + } + + // test coprimality (should always be ok in our cases) + ibz_gcd(&r2, p, n); + if (!ibz_is_one(&r2)) + goto done; + + // get sqrt of -n mod p + ibz_neg(&r2, n); + if (!ibz_sqrt_mod_p(&r2, &r2, p)) + goto done; + + // run loop + ibz_copy(&prod, p); + ibz_copy(&r1, p); + ibz_copy(&r0, p); + while (ibz_cmp(&prod, p) >= 0) { + ibz_div(&a, &r0, &r2, &r1); + ibz_mul(&prod, &r0, &r0); + ibz_copy(&r2, &r1); + ibz_copy(&r1, &r0); + } + // test if result is solution + ibz_sub(&a, p, &prod); + ibz_div(&a, &r2, &a, n); + if (!ibz_is_zero(&r2)) + goto done; + if (!ibz_sqrt(y, &a)) + goto done; + + ibz_copy(x, &r0); + ibz_mul(&a, y, y); + ibz_mul(&a, &a, n); + ibz_add(&prod, &prod, &a); + res = !ibz_cmp(&prod, p); + +done: + ibz_finalize(&r0); + ibz_finalize(&r1); + ibz_finalize(&r2); + ibz_finalize(&a); + ibz_finalize(&prod); + return res; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/internal.h new file mode 100644 index 0000000000..edbba345f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/internal.h @@ -0,0 +1,812 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for helper functions for quaternion algebra implementation + */ + +#ifndef QUAT_HELPER_H +#define QUAT_HELPER_H + +#include +#include +#include "intbig_internal.h" + +/** @internal + * @ingroup quat_quat + * @defgroup quat_helpers Quaternion module internal functions + * @{ + */ + +/** @internal + * @defgroup quat_alg_helpers Helper functions for the alg library + * @{ + */ + +/** @internal + * @brief helper function for initializing small quaternion algebras. + */ +void quat_alg_init_set_ui(quat_alg_t *alg, + unsigned int p); // test/lattice, test/ideal, test/algebra + +/** @brief a*b + * + * Multiply two coordinate vectors as elements of the algebra in basis (1,i,j,ij) with i^2 = -1, j^2 + * = -p + * + * @param res Output: Will contain product + * @param a + * @param b + * @param alg The quaternion algebra + */ +void quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg); + +/** @brief a=b + * + * Test if a and b represent the same quaternion algebra element + * + * @param a + * @param b + * @returns 1 if a=b, 0 otherwise + */ +int quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + * + * x is 0 iff all coordinates in x->coord are 0 + */ +int quat_alg_elem_is_zero(const quat_alg_elem_t *x); + +/** @brief Compute same denominator form of two quaternion algebra elements + * + * res_a=a and res_b=b (representing the same element) and res_a.denom = res_b.denom + * + * @param res_a + * @param res_b + * @param a + * @param b + */ +void quat_alg_equal_denom(quat_alg_elem_t *res_a, + quat_alg_elem_t *res_b, + const quat_alg_elem_t *a, + const quat_alg_elem_t *b); + +/** @brief Copies the given values into an algebra element, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Sets an algebra element to the given integer values, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_set(quat_alg_elem_t *elem, + int32_t denom, + int32_t coord0, + int32_t coord1, + int32_t coord2, + int32_t coord3); + +/** + * @brief Creates algebra element from scalar + * + * Resulting element has 1-coordinate equal to numerator/denominator + * + * @param elem Output: algebra element with numerator/denominator as first coordiante + * (1-coordinate), 0 elsewhere (i,j,ij coordinates) + * @param numerator + * @param denominator Assumed non zero + */ +void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator); + +/** @brief a+b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief a-b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Multiplies algebra element by integer scalar, without normalizing it + * + * @param res Output + * @param scalar Integer + * @param elem Algebra element + */ +void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_helpers Helper functions for functions for matrices or vectors in dimension 4 + * @{ + */ + +/** @internal + * @defgroup quat_inv_helpers Helper functions for the integer matrix inversion function + * @{ + */ + +/** @brief a1a2+b1b2+c1c2 + * + * @param coeff Output: The coefficien which was computed as a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief -a1a2+b1b2-c1c2 + * + * @param coeff Output: The coefficien which was computed as -a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief Matrix determinant and a matrix inv such that inv/det is the inverse matrix of the input + * + * Implemented following the methof of 2x2 minors explained at Method from + * https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf (visited on 3rd of May + * 2023, 16h15 CEST) + * + * @returns 1 if the determinant of mat is not 0 and an inverse was computed, 0 otherwise + * @param inv Output: Will contain an integer matrix which, dividet by det, will yield the rational + * inverse of the matrix if it exists, can be NULL + * @param det Output: Will contain the determinant of the input matrix, can be NULL + * @param mat Matrix of which the inverse will be computed + */ +int ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_lat_helpers Helper functions on vectors and matrices used mainly for lattices + * @{ + */ + +/** @brief Copy all values from one vector to another + * + * @param new Output: is set to same values as vec + * @param vec + */ +void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec); + +/** @brief set res to values coord0,coord1,coord2,coord3 + * + * @param res Output: Will contain vector (coord0,coord1,coord2,coord3) + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Set a vector of 4 integers to given values + * + * @param vec Output: is set to given coordinates + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3); + +/** @brief a+b + * + * Add two integer 4-vectors + * + * @param res Output: Will contain sum + * @param a + * @param b + */ +void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief a-b + * + * Substract two integer 4-vectors + * + * @param res Output: Will contain difference + * @param a + * @param b + */ +void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief x=0 + * + * Test if a vector x has only zero coordinates + * + * @returns 0 if x has at least one non-zero coordinates, 1 otherwise + * @param x + */ +int ibz_vec_4_is_zero(const ibz_vec_4_t *x); + +/** @brief Compute the linear combination lc = coeff_a vec_a + coeff_b vec_b + * + * @param lc Output: linear combination lc = coeff_a vec_a + coeff_b vec_b + * @param coeff_a Scalar multiplied to vec_a + * @param vec_a + * @param coeff_b Scalar multiplied to vec_b + * @param vec_b + */ +void ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b); + +/** @brief multiplies all values in vector by same scalar + * + * @param prod Output + * @param scalar + * @param vec + */ +void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief divides all values in vector by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param vec + */ +int ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief Negation for vectors of 4 integers + * + * @param neg Output: is set to -vec + * @param vec + */ +void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec); + +/** + * @brief content of a 4-vector of integers + * + * The content is the GCD of all entries. + * + * @param v A 4-vector of integers + * @param content Output: the resulting gcd + */ +void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v); + +/** @brief -mat for mat a 4x4 integer matrix + * + * @param neg Output: is set to -mat + * @param mat Input matrix + */ +void ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat); + +/** @brief Set all coefficients of a matrix to zero for 4x4 integer matrices + * + * @param zero + */ +void ibz_mat_4x4_zero(ibz_mat_4x4_t *zero); + +/** @brief Set a matrix to the identity for 4x4 integer matrices + * + * @param id + */ +void ibz_mat_4x4_identity(ibz_mat_4x4_t *id); + +/** @brief Test equality to identity for 4x4 integer matrices + * + * @returns 1 if mat is the identity matrix, 0 otherwise + * @param mat + */ +int ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat); + +/** @brief Equality test for 4x4 integer matrices + * + * @returns 1 if equal, 0 otherwise + * @param mat1 + * @param mat2 + */ +int ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat); + +/** @brief Matrix by integer multiplication + * + * @param prod Output + * @param scalar + * @param mat + */ +void ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** @brief gcd of all values in matrix + * + * @param gcd Output + * @param mat + */ +void ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat); + +/** @brief Verifies whether the 4x4 input matrix is in Hermite Normal Form + * + * @returns 1 if mat is in HNF, 0 otherwise + * @param mat Matrix to be tested + */ +int ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat); + +/** @brief Hermite Normal Form of a matrix of 8 integer vectors, computed using a multiple of its + * determinant as modulo + * + * Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic + * Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 + * + * @param hnf Output: Matrix in Hermite Normal Form generating the same lattice as generators + * @param generators matrix whose colums generate the same lattice than the output + * @param generator_number number of generators given + * @param mod integer, must be a multiple of the volume of the lattice generated by the columns of + * generators + */ +void ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, + int generator_number, + const ibz_vec_4_t *generators, + const ibz_t *mod); + +/** @} + */ +/** @} + */ + +/** @internal + * @defgroup quat_dim2_helpers Helper functions for dimension 2 + * @{ + */ + +/** @brief Set vector coefficients to the given integers + * + * @param vec Output: Vector + * @param a0 + * @param a1 + */ +void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1); // test/dim2 + +/** @brief Set matrix coefficients to the given integers + * + * @param mat Output: Matrix + * @param a00 + * @param a01 + * @param a10 + * @param a11 + */ +void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11); // test/dim2 + +void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, + const ibz_mat_2x2_t *b); // unused + +/** @brief Determinant of a 2x2 integer matrix given as 4 integers + * + * @param det Output: Determinant of the matrix + * @param a11 matrix coefficient (upper left corner) + * @param a12 matrix coefficient (upper right corner) + * @param a21 matrix coefficient (lower left corner) + * @param a22 matrix coefficient (lower right corner) + */ +void ibz_mat_2x2_det_from_ibz(ibz_t *det, + const ibz_t *a11, + const ibz_t *a12, + const ibz_t *a21, + const ibz_t *a22); // dim4 + +/** + * @brief a*b for 2x2 integer matrices modulo m + * + * @param prod Output matrix + * @param mat_a Input matrix + * @param mat_b Input matrix + * @param m Integer modulo + */ +void ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, + const ibz_mat_2x2_t *mat_a, + const ibz_mat_2x2_t *mat_b, + const ibz_t *m); // test/dim2 +/** @} + */ + +/** @internal + * @defgroup quat_lattice_helper Helper functions for the lattice library (dimension 4) + * @{ + */ + +/** + * @brief Modifies a lattice to put it in hermite normal form + * + * In-place modification of the lattice. + * + * @param lat input lattice + * + * On a correct lattice this function changes nothing (since it is already in HNF), but it can be + * used to put a handmade one in correct form in order to use the other lattice functions. + */ +void quat_lattice_hnf(quat_lattice_t *lat); // lattice, test/lattice, test/algebra, + +/** + * @brief Lattice equality + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if both lattices are equal, 0 otherwise + * @param lat1 + * @param lat2 + */ +int quat_lattice_equal(const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice, test/ideal + +/** + * @brief Lattice inclusion test + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if sublat is included in overlat, 0 otherwise + * @param sublat Lattice whose inclusion in overlat will be testes + * @param overlat + */ +int quat_lattice_inclusion(const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // test/lattice, test/ideal + +/** @brief Divides basis and denominator of a lattice by their gcd + * + * @param reduced Output + * @param lat Lattice + */ +void quat_lattice_reduce_denom(quat_lattice_t *reduced, + const quat_lattice_t *lat); // lattice, ideal, + +/** @brief a+b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + */ +void quat_lattice_add(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice + +/** @brief a*b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + * @param alg The quaternion algebra + */ +void quat_lattice_mul(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2, + const quat_alg_t *alg); // ideal, lattie, test/ideal, test/lattice + +/** + * @brief Computes the dual lattice of lat, without putting its basis in HNF + * + * This function returns a lattice not under HNF. For careful internal use only. + * + * Computation method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted + * on 19 of May 2023, 12h40 CEST + * + * @param dual Output: The dual lattice of lat. ATTENTION: is not under HNF. hnf computation must be + * applied before using lattice functions on it + * @param lat lattice, the dual of it will be computed + */ +void quat_lattice_dual_without_hnf(quat_lattice_t *dual, + const quat_lattice_t *lat); // lattice, ideal + +/** + * @brief Multiply all columns of lat with coord (as algebra elements) + * + * The columns and coord are seen as algebra elements in basis 1,i,j,ij, i^2 = -1, j^2 = -p). Coord + * is multiplied to the right of lat. + * + * The output matrix is not under HNF. + * + * @param prod Output: Matrix not under HND whose columns represent the algebra elements obtained as + * L*coord for L column of lat. + * @param lat Matrix whose columns are algebra elements in basis (1,i,j,ij) + * @param coord Integer coordinate algebra element in basis (1,i,j,ij) + * @param alg The quaternion algebra + */ +void quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg); // lattice + +/** @brief The index of sublat into overlat + * + * Assumes inputs are in HNF. + * + * @param index Output + * @param sublat A lattice in HNF, must be sublattice of overlat + * @param overlat A lattice in HNF, must be overlattice of sublat + */ +void quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // ideal + +/** @brief Compute the Gram matrix of the quaternion trace bilinear form + * + * Given a lattice of the quaternion algebra, computes the Gram matrix + * of the bilinear form + * + * 〈a,b〉 := [lattice->denom^2] Tr(a·conj(b)) + * + * multiplied by the square of the denominator of the lattice. + * + * This matrix always has integer entries. + * + * @param G Output: Gram matrix of the trace bilinear form on the lattice, multiplied by the square + * of the denominator of the lattice + * @param lattice A lattice + * @param alg The quaternion algebra + */ +void quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @brief Compute an integer parallelogram containing the ball of + * given radius for the positive definite quadratic form defined by + * the Gram matrix G. + * + * The computed parallelogram is defined by the vectors + * + * (x₁ x₂ x₃ x₄) · U + * + * with x_i ∈ [ -box[i], box[i] ]. + * + * @param box Output: bounds of the parallelogram + * @param U Output: Unimodular transformation defining the parallelogram + * @param G Gram matrix of the quadratic form, must be full rank + * @param radius Radius of the ball, must be non-negative + * @returns 0 if the box only contains the origin, 1 otherwise + */ +int quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius); + +/** @} + */ + +/** @internal + * @defgroup quat_lideal_helper Helper functions for ideals and orders + * @{ + */ +/** @brief Set norm of an ideal given its lattice and parent order + * + * @param lideal In/Output: Ideal which has lattice and parent_order correctly set, but not + * necessarily the norm. Will have norm correctly set too. + */ +void quat_lideal_norm(quat_left_ideal_t *lideal); // ideal + +/** + * @brief Left principal ideal of order, generated by x + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element + * + * Creates the left ideal in 'order' generated by the element 'x' + */ +void quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg); // ideal, test/ideal + +/** + * @brief Equality test for left ideals + * + * @returns 1 if both left ideals are equal, 0 otherwise + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +int quat_lideal_equals(const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // test/ideal + +/** + * @brief Sum of two left ideals + * + * @param sum Output: Left ideal which is the sum of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_add(quat_left_ideal_t *sum, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // Not used outside + +/** + * @brief Left ideal product of left ideal I and element alpha + * + * @param product Output: lideal I*alpha, must have integer norm + * @param lideal left ideal + * @param alpha element multiplied to lideal to get the product ideal + * @param alg the quaternion algebra + * + * I*alpha where I is a left-ideal and alpha an element of the algebra + * + * The resulting ideal must have an integer norm + * + */ +void quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg); // test/ideal + +/** @brief Computes the inverse ideal (for a left ideal of a maximal order) without putting it under + * HNF + * + * This function returns a lattice not under HNF. For careful internal use only + * + * Computes the inverse ideal for lideal as conjugate(lideal)/norm(lideal) + * + * @param inv Output: lattice which is lattice representation of the inverse ideal of lideal + * ATTENTION: is not under HNF. hnf computation must be applied before using lattice functions on it + * @param lideal Left ideal of a maximal order in alg + * @param alg The quaternion algebra + */ +void quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** @brief Computes the right transporter of two left ideals of the same maximal order + * + * Following the implementation of ideal isomorphisms in the code of LearningToSQI's sage + * implementation of SQIsign. Computes the right transporter of (J:I) as inverse(I)J. + * + * @param trans Output: lattice which is right transporter from lideal1 to lideal2 (lideal2:lideal1) + * @param lideal1 Left ideal of the same maximal order than lideal1 in alg + * @param lideal2 Left ideal of the same maximal order than lideal1 in alg + * @param alg The quaternion algebra + */ +void quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Right order of a left ideal + * + * @param order Output: right order of the given ideal + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** + * @brief Gram matrix of the trace map of the ideal class + * + * Compute the Gram matrix of the bilinear form + * + * 〈a, b〉 := Tr(a·conj(b)) / norm(lideal) + * + * on the basis of the ideal. This matrix has integer entries and its + * integer congruence class only depends on the ideal class. + * + * @param G Output: Gram matrix of the trace map + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg); + +/** @brief Test if order is maximal + * + * Checks if the discriminant of the order equals the prime p defining the quaternion algebra. + * + * It is not verified whether the order is really an order. The output 1 only means that if it is an + * order, then it is maximal. + * + * @returns 1 if order is maximal (assuming it is an order), 0 otherwise + * @param order An order of the quaternion algebra (assumes to be an order, this is not tested) + * @param alg The quaternion algebra + */ +int quat_order_is_maximal(const quat_lattice_t *order, + const quat_alg_t *alg); // ideal (only in asserts) + +/** @brief Compute the discriminant of an order as sqrt(det(gram(reduced_norm))) + * + * @param disc: Output: The discriminant sqrt(det(gram(reduced_norm))) + * @param order An order of the quaternion algebra + * @param alg The quaternion algebra + */ +int quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, + const quat_alg_t *alg); // ideal + +/** @} + */ + +/** @internal + * @ingroup quat_normeq + * @{ + */ + +/** @brief Set lattice to O0 + * + * @param O0 Lattice to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set(quat_lattice_t *O0); + +/** @brief Set p-extremal maximal order to O0 + * + * @param O0 p-extremal order to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0); + +/** + * @brief Create an element of a extremal maximal order from its coefficients + * + * @param elem Output: the quaternion element + * @param order the order + * @param coeffs the vector of 4 ibz coefficients + * @param Bpoo quaternion algebra + * + * elem = x + z*y + z*u + t*z*v + * where coeffs = [x,y,u,v] and t = order.t z = order.z + * + */ +void quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo); // normeq, untested + +/** @} + */ +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/isog.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/isog.h new file mode 100644 index 0000000000..b251ca3cdc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/isog.h @@ -0,0 +1,28 @@ +#ifndef _ISOG_H_ +#define _ISOG_H_ +#include +#include + +/* KPS structure for isogenies of degree 2 or 4 */ +typedef struct +{ + ec_point_t K; +} ec_kps2_t; +typedef struct +{ + ec_point_t K[3]; +} ec_kps4_t; + +void xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P); // degree-2 isogeny construction +void xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24); + +void xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P); // degree-4 isogeny construction +void xisog_4_singular(ec_kps4_t *kps, ec_point_t *B24, const ec_point_t P, ec_point_t A24); + +void xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps); +void xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps); + +void xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps); +void xeval_4_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_point_t P, const ec_kps4_t *kps); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/isog_chains.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/isog_chains.c new file mode 100644 index 0000000000..abc9808057 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/isog_chains.c @@ -0,0 +1,241 @@ +#include "isog.h" +#include + +// since we use degree 4 isogeny steps, we need to handle the odd case with care +static uint32_t +ec_eval_even_strategy(ec_curve_t *curve, + ec_point_t *points, + unsigned len_points, + const ec_point_t *kernel, + const int isog_len) +{ + ec_curve_normalize_A24(curve); + ec_point_t A24; + copy_point(&A24, &curve->A24); + + int space = 1; + for (int i = 1; i < isog_len; i *= 2) + ++space; + + // Stack of remaining kernel points and their associated orders + ec_point_t splits[space]; + uint16_t todo[space]; + splits[0] = *kernel; + todo[0] = isog_len; + + int current = 0; // Pointer to current top of stack + + // Chain of 4-isogenies + for (int j = 0; j < isog_len / 2; ++j) { + assert(current >= 0); + assert(todo[current] >= 1); + // Get the next point of order 4 + while (todo[current] != 2) { + assert(todo[current] >= 3); + // A new split will be added + ++current; + assert(current < space); + // We set the seed of the new split to be computed and saved + copy_point(&splits[current], &splits[current - 1]); + // if we copied from the very first element, then we perform one additional doubling + unsigned num_dbls = todo[current - 1] / 4 * 2 + todo[current - 1] % 2; + todo[current] = todo[current - 1] - num_dbls; + while (num_dbls--) + xDBL_A24(&splits[current], &splits[current], &A24, false); + } + + if (j == 0) { + assert(fp2_is_one(&A24.z)); + if (!ec_is_four_torsion(&splits[current], curve)) + return -1; + + ec_point_t T; + xDBL_A24(&T, &splits[current], &A24, false); + if (fp2_is_zero(&T.x)) + return -1; // special isogenies not allowed + } else { + assert(todo[current] == 2); +#ifndef NDEBUG + if (fp2_is_zero(&splits[current].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + + ec_point_t test; + xDBL_A24(&test, &splits[current], &A24, false); + if (fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly zero before doubling"); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + } + + // Evaluate 4-isogeny + ec_kps4_t kps4; + xisog_4(&kps4, &A24, splits[current]); + xeval_4(splits, splits, current, &kps4); + for (int i = 0; i < current; ++i) + todo[i] -= 2; + xeval_4(points, points, len_points, &kps4); + + --current; + } + assert(isog_len % 2 ? !current : current == -1); + + // Final 2-isogeny + if (isog_len % 2) { +#ifndef NDEBUG + if (fp2_is_zero(&splits[0].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + ec_point_t test; + copy_point(&test, &splits[0]); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + + // We need to check the order of this point in case there were no 4-isogenies + if (isog_len == 1 && !ec_is_two_torsion(&splits[0], curve)) + return -1; + if (fp2_is_zero(&splits[0].x)) { + // special isogenies not allowed + // this case can only happen if isog_len == 1; otherwise the + // previous 4-isogenies we computed ensure that $T=(0:1)$ is put + // as the kernel of the dual isogeny + return -1; + } + + ec_kps2_t kps2; + xisog_2(&kps2, &A24, splits[0]); + xeval_2(points, points, len_points, &kps2); + } + + // Output curve in the form (A:C) + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + + return 0; +} + +uint32_t +ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points) +{ + copy_curve(image, &phi->curve); + return ec_eval_even_strategy(image, points, len_points, &phi->kernel, phi->length); +} + +// naive implementation +uint32_t +ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special) // do we allow special isogenies? +{ + + ec_point_t A24; + AC_to_A24(&A24, curve); + + ec_kps2_t kps; + ec_point_t small_K, big_K; + copy_point(&big_K, kernel); + + for (int i = 0; i < len; i++) { + copy_point(&small_K, &big_K); + // small_K = big_K; + for (int j = 0; j < len - i - 1; j++) { + xDBL_A24(&small_K, &small_K, &A24, false); + } + // Check the order of the point before the first isogeny step + if (i == 0 && !ec_is_two_torsion(&small_K, curve)) + return (uint32_t)-1; + // Perform isogeny step + if (fp2_is_zero(&small_K.x)) { + if (special) { + ec_point_t B24; + xisog_2_singular(&kps, &B24, A24); + xeval_2_singular(&big_K, &big_K, 1, &kps); + xeval_2_singular(points, points, len_points, &kps); + copy_point(&A24, &B24); + } else { + return (uint32_t)-1; + } + } else { + xisog_2(&kps, &A24, small_K); + xeval_2(&big_K, &big_K, 1, &kps); + xeval_2(points, points, len_points, &kps); + } + } + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + return 0; +} + +uint32_t +ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to) +{ + fp2_t t0, t1, t2, t3, t4; + + fp2_mul(&t0, &from->A, &from->C); + fp2_mul(&t1, &to->A, &to->C); + + fp2_mul(&t2, &t1, &to->C); // toA*toC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*toA*toC^2 + fp2_sqr(&t3, &to->A); + fp2_mul(&t3, &t3, &to->A); // toA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->Nx, &t3, &t2); // 2*toA^3-9*toA*toC^2 + fp2_mul(&t2, &t0, &from->A); // fromA^2*fromC + fp2_sqr(&t3, &from->C); + fp2_mul(&t3, &t3, &from->C); // fromC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*fromC^3 + fp2_sub(&t3, &t3, &t2); // 3*fromC^3-fromA^2*fromC + fp2_mul(&isom->Nx, &isom->Nx, &t3); // lambda_x = (2*toA^3-9*toA*toC^2)*(3*fromC^3-fromA^2*fromC) + + fp2_mul(&t2, &t0, &from->C); // fromA*fromC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*fromA*fromC^2 + fp2_sqr(&t3, &from->A); + fp2_mul(&t3, &t3, &from->A); // fromA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->D, &t3, &t2); // 2*fromA^3-9*fromA*fromC^2 + fp2_mul(&t2, &t1, &to->A); // toA^2*toC + fp2_sqr(&t3, &to->C); + fp2_mul(&t3, &t3, &to->C); // toC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*toC^3 + fp2_sub(&t3, &t3, &t2); // 3*toC^3-toA^2*toC + fp2_mul(&isom->D, &isom->D, &t3); // lambda_z = (2*fromA^3-9*fromA*fromC^2)*(3*toC^3-toA^2*toC) + + // Mont -> SW -> SW -> Mont + fp2_mul(&t0, &to->C, &from->A); + fp2_mul(&t0, &t0, &isom->Nx); // lambda_x*toC*fromA + fp2_mul(&t1, &from->C, &to->A); + fp2_mul(&t1, &t1, &isom->D); // lambda_z*fromC*toA + fp2_sub(&isom->Nz, &t0, &t1); // lambda_x*toC*fromA - lambda_z*fromC*toA + fp2_mul(&t0, &from->C, &to->C); + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // 3*fromC*toC + fp2_mul(&isom->D, &isom->D, &t0); // 3*lambda_z*fromC*toC + fp2_mul(&isom->Nx, &isom->Nx, &t0); // 3*lambda_x*fromC*toC + + return (fp2_is_zero(&isom->Nx) | fp2_is_zero(&isom->D)); +} + +void +ec_iso_eval(ec_point_t *P, ec_isom_t *isom) +{ + fp2_t tmp; + fp2_mul(&P->x, &P->x, &isom->Nx); + fp2_mul(&tmp, &P->z, &isom->Nz); + fp2_add(&P->x, &P->x, &tmp); + fp2_mul(&P->z, &P->z, &isom->D); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/keygen.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/keygen.c new file mode 100644 index 0000000000..c1c206c99d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/keygen.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +void +secret_key_init(secret_key_t *sk) +{ + quat_left_ideal_init(&(sk->secret_ideal)); + ibz_mat_2x2_init(&(sk->mat_BAcan_to_BA0_two)); + ec_curve_init(&sk->curve); +} + +void +secret_key_finalize(secret_key_t *sk) +{ + quat_left_ideal_finalize(&(sk->secret_ideal)); + ibz_mat_2x2_finalize(&(sk->mat_BAcan_to_BA0_two)); +} + +int +protocols_keygen(public_key_t *pk, secret_key_t *sk) +{ + int found = 0; + ec_basis_t B_0_two; + + // iterating until a solution has been found + while (!found) { + + found = quat_sampling_random_ideal_O0_given_norm( + &sk->secret_ideal, &SEC_DEGREE, 1, &QUAT_represent_integer_params, NULL); + + // replacing the secret key ideal by a shorter equivalent one for efficiency + found = found && quat_lideal_prime_norm_reduced_equivalent( + &sk->secret_ideal, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + + // ideal to isogeny clapotis + + found = found && dim2id2iso_arbitrary_isogeny_evaluation(&B_0_two, &sk->curve, &sk->secret_ideal); + } + + // Assert the isogeny was found and images have the correct order + assert(test_basis_order_twof(&B_0_two, &sk->curve, TORSION_EVEN_POWER)); + + // Compute a deterministic basis with a hint to speed up verification + pk->hint_pk = ec_curve_to_basis_2f_to_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER); + + // Assert the deterministic basis we computed has the correct order + assert(test_basis_order_twof(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the 2x2 matrix basis change from the canonical basis to the evaluation of our secret + // isogeny + change_of_basis_matrix_tate( + &sk->mat_BAcan_to_BA0_two, &sk->canonical_basis, &B_0_two, &sk->curve, TORSION_EVEN_POWER); + + // Set the public key from the codomain curve + copy_curve(&pk->curve, &sk->curve); + pk->curve.is_A24_computed_and_normalized = false; // We don't send any precomputation + + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c new file mode 100644 index 0000000000..8c49b21d20 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c @@ -0,0 +1,190 @@ +#include +#include "lll_internals.h" +#include "internal.h" + +#include "dpe.h" + +// Access entry of symmetric matrix +#define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + dpe_t dpe_const_one, dpe_const_DELTABAR; + + dpe_init(dpe_const_one); + dpe_set_ui(dpe_const_one, 1); + + dpe_init(dpe_const_DELTABAR); + dpe_set_d(dpe_const_DELTABAR, DELTABAR); + + // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions + dpe_t r[4][4], u[4][4], lovasz[4]; + for (int i = 0; i < 4; i++) { + dpe_init(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_init(r[i][j]); + dpe_init(u[i][j]); + } + } + + // threshold for swaps + dpe_t delta_bar; + dpe_init(delta_bar); + dpe_set_d(delta_bar, DELTABAR); + + // Other work variables + dpe_t Xf, tmpF; + dpe_init(Xf); + dpe_init(tmpF); + ibz_t X, tmpI; + ibz_init(&X); + ibz_init(&tmpI); + + // Main L² loop + dpe_set_z(r[0][0], (*G)[0][0]); + int kappa = 1; + while (kappa < 4) { + // size reduce b_κ + int done = 0; + while (!done) { + // Recompute the κ-th row of the Choleski Factorisation + // Loop invariant: + // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 + for (int j = 0; j <= kappa; j++) { + dpe_set_z(r[kappa][j], (*G)[kappa][j]); + for (int k = 0; k < j; k++) { + dpe_mul(tmpF, r[kappa][k], u[j][k]); + dpe_sub(r[kappa][j], r[kappa][j], tmpF); + } + if (j < kappa) + dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + } + + done = 1; + // size reduce + for (int i = kappa - 1; i >= 0; i--) { + if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + done = 0; + dpe_set(Xf, u[kappa][i]); + dpe_round(Xf, Xf); + dpe_get_z(X, Xf); + // Update basis: b_κ ← b_κ - X·b_i + for (int j = 0; j < 4; j++) { + ibz_mul(&tmpI, &X, &(*basis)[j][i]); + ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + } + // Update lower half of the Gram matrix + // = - 2X + X² = + // - X - X( - X·) + //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 + ibz_mul(&tmpI, &X, &(*G)[kappa][i]); + ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + for (int j = 0; j < 4; j++) { // works because i < κ + // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 + ibz_mul(&tmpI, &X, SYM((*G), i, j)); + ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + } + // After the loop: + //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, + /// b_i〉) = 〈b_κ - X·b_i, b_κ - X·b_i〉 + // + // Update u[kappa][j] + for (int j = 0; j < i; j++) { + dpe_mul(tmpF, Xf, u[i][j]); + dpe_sub(u[kappa][j], u[kappa][j], tmpF); + } + } + } + } + + // Check Lovasz' conditions + // lovasz[0] = ‖b_κ‖² + dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] + for (int i = 1; i < kappa; i++) { + dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); + dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + } + int swap; + for (swap = kappa; swap > 0; swap--) { + dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); + if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + break; + } + + // Insert b_κ before b_swap + if (kappa != swap) { + // Insert b_κ before b_swap in the basis and in the lower half Gram matrix + for (int j = kappa; j > swap; j--) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + if (i == j - 1) + ibz_swap(&(*G)[i][i], &(*G)[j][j]); + else if (i != j) + ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + } + } + // Copy row u[κ] and r[κ] in swap position, ignore what follows + for (int i = 0; i < swap; i++) { + dpe_set(u[swap][i], u[kappa][i]); + dpe_set(r[swap][i], r[kappa][i]); + } + dpe_set(r[swap][swap], lovasz[swap]); + // swap complete + kappa = swap; + } + + kappa += 1; + } + +#ifndef NDEBUG + // Check size-reducedness + for (int i = 0; i < 4; i++) + for (int j = 0; j < i; j++) { + dpe_abs(u[i][j], u[i][j]); + assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + } + // Check Lovasz' conditions + for (int i = 1; i < 4; i++) { + dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); + dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); + dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); + assert(dpe_cmp(tmpF, r[i][i]) <= 0); + } +#endif + + // Fill in the upper half of the Gram matrix + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + + // Clearinghouse + ibz_finalize(&X); + ibz_finalize(&tmpI); + dpe_clear(dpe_const_one); + dpe_clear(dpe_const_DELTABAR); + dpe_clear(Xf); + dpe_clear(tmpF); + dpe_clear(delta_bar); + for (int i = 0; i < 4; i++) { + dpe_clear(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_clear(r[i][j]); + dpe_clear(u[i][j]); + } + } +} + +int +quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_mat_4x4_t G; // Gram Matrix + ibz_mat_4x4_init(&G); + quat_lattice_gram(&G, lattice, alg); + ibz_mat_4x4_copy(red, &lattice->basis); + quat_lll_core(&G, red); + ibz_mat_4x4_finalize(&G); + return 0; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lat_ball.c new file mode 100644 index 0000000000..c7bbb9682f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lat_ball.c @@ -0,0 +1,139 @@ +#include +#include +#include +#include "internal.h" +#include "lll_internals.h" + +int +quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius) +{ + ibz_t denom, rem; + ibz_init(&denom); + ibz_init(&rem); + ibz_mat_4x4_t dualG; + ibz_mat_4x4_init(&dualG); + +// Compute the Gram matrix of the dual lattice +#ifndef NDEBUG + int inv_check = ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); + assert(inv_check); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); +#endif + // Initialize the dual lattice basis to the identity matrix + ibz_mat_4x4_identity(U); + // Reduce the dual lattice + quat_lll_core(&dualG, U); + + // Compute the parallelogram's bounds + int trivial = 1; + for (int i = 0; i < 4; i++) { + ibz_mul(&(*box)[i], &dualG[i][i], radius); + ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); + ibz_sqrt_floor(&(*box)[i], &(*box)[i]); + trivial &= ibz_is_zero(&(*box)[i]); + } + + // Compute the transpose transformation matrix +#ifndef NDEBUG + int inv = ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#endif + // U is unitary, det(U) = ± 1 + ibz_mat_4x4_scalar_mul(U, &denom, U); +#ifndef NDEBUG + assert(inv); + ibz_abs(&denom, &denom); + assert(ibz_is_one(&denom)); +#endif + + ibz_mat_4x4_finalize(&dualG); + ibz_finalize(&denom); + ibz_finalize(&rem); + return !trivial; +} + +int +quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius) +{ + assert(ibz_cmp(radius, &ibz_const_zero) > 0); + + ibz_vec_4_t box; + ibz_vec_4_init(&box); + ibz_mat_4x4_t U, G; + ibz_mat_4x4_init(&U); + ibz_mat_4x4_init(&G); + ibz_vec_4_t x; + ibz_vec_4_init(&x); + ibz_t rad, tmp; + ibz_init(&rad); + ibz_init(&tmp); + + // Compute the Gram matrix of the lattice + quat_lattice_gram(&G, lattice, alg); + + // Correct ball radius by the denominator + ibz_mul(&rad, radius, &lattice->denom); + ibz_mul(&rad, &rad, &lattice->denom); + // Correct by 2 (Gram matrix corresponds to twice the norm) + ibz_mul(&rad, &rad, &ibz_const_two); + + // Compute a bounding parallelogram for the ball, stop if it only + // contains the origin + int ok = quat_lattice_bound_parallelogram(&box, &U, &G, &rad); + if (!ok) + goto err; + + // Rejection sampling from the parallelogram +#ifndef NDEBUG + int cnt = 0; +#endif + do { + // Sample vector + for (int i = 0; i < 4; i++) { + if (ibz_is_zero(&box[i])) { + ibz_copy(&x[i], &ibz_const_zero); + } else { + ibz_add(&tmp, &box[i], &box[i]); + ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); + ibz_sub(&x[i], &x[i], &box[i]); + if (!ok) + goto err; + } + } + // Map to parallelogram + ibz_mat_4x4_eval_t(&x, &x, &U); + // Evaluate quadratic form + quat_qf_eval(&tmp, &G, &x); +#ifndef NDEBUG + cnt++; + if (cnt % 100 == 0) + printf("Lattice sampling rejected %d times", cnt - 1); +#endif + } while (ibz_is_zero(&tmp) || (ibz_cmp(&tmp, &rad) > 0)); + + // Evaluate linear combination + ibz_mat_4x4_eval(&(res->coord), &(lattice->basis), &x); + ibz_copy(&(res->denom), &(lattice->denom)); + quat_alg_normalize(res); + +#ifndef NDEBUG + // Check norm is smaller than radius + quat_alg_norm(&tmp, &rad, res, alg); + ibz_mul(&rad, &rad, radius); + assert(ibz_cmp(&tmp, &rad) <= 0); +#endif + +err: + ibz_finalize(&rad); + ibz_finalize(&tmp); + ibz_vec_4_finalize(&x); + ibz_mat_4x4_finalize(&U); + ibz_mat_4x4_finalize(&G); + ibz_vec_4_finalize(&box); + return ok; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lattice.c new file mode 100644 index 0000000000..c98bae9499 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lattice.c @@ -0,0 +1,328 @@ +#include +#include +#include "internal.h" + +// helper functions +int +quat_lattice_equal(const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + int equal = 1; + quat_lattice_t a, b; + quat_lattice_init(&a); + quat_lattice_init(&b); + quat_lattice_reduce_denom(&a, lat1); + quat_lattice_reduce_denom(&b, lat2); + ibz_abs(&(a.denom), &(a.denom)); + ibz_abs(&(b.denom), &(b.denom)); + quat_lattice_hnf(&a); + quat_lattice_hnf(&b); + equal = equal && (ibz_cmp(&(a.denom), &(b.denom)) == 0); + equal = equal && ibz_mat_4x4_equal(&(a.basis), &(b.basis)); + quat_lattice_finalize(&a); + quat_lattice_finalize(&b); + return (equal); +} + +// sublattice test +int +quat_lattice_inclusion(const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + int res; + quat_lattice_t sum; + quat_lattice_init(&sum); + quat_lattice_add(&sum, overlat, sublat); + res = quat_lattice_equal(&sum, overlat); + quat_lattice_finalize(&sum); + return (res); +} + +void +quat_lattice_reduce_denom(quat_lattice_t *reduced, const quat_lattice_t *lat) +{ + ibz_t gcd; + ibz_init(&gcd); + ibz_mat_4x4_gcd(&gcd, &(lat->basis)); + ibz_gcd(&gcd, &gcd, &(lat->denom)); + ibz_mat_4x4_scalar_div(&(reduced->basis), &gcd, &(lat->basis)); + ibz_div(&(reduced->denom), &gcd, &(lat->denom), &gcd); + ibz_abs(&(reduced->denom), &(reduced->denom)); + ibz_finalize(&gcd); +} + +void +quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat) +{ + ibz_mat_4x4_copy(&(conj->basis), &(lat->basis)); + ibz_copy(&(conj->denom), &(lat->denom)); + + for (int row = 1; row < 4; ++row) { + for (int col = 0; col < 4; ++col) { + ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + } + } +} + +// Method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_dual_without_hnf(quat_lattice_t *dual, const quat_lattice_t *lat) +{ + ibz_mat_4x4_t inv; + ibz_t det; + ibz_init(&det); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + ibz_mat_4x4_transpose(&inv, &inv); + // dual_denom = det/lat_denom + ibz_mat_4x4_scalar_mul(&(dual->basis), &(lat->denom), &inv); + ibz_copy(&(dual->denom), &det); + + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); +} + +void +quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + ibz_vec_4_t generators[8]; + ibz_mat_4x4_t tmp; + ibz_t det1, det2, detprod; + ibz_init(&det1); + ibz_init(&det2); + ibz_init(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_init(&(generators[i])); + ibz_mat_4x4_init(&tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); + assert(!ibz_is_zero(&det1)); + assert(!ibz_is_zero(&det2)); + ibz_gcd(&detprod, &det1, &det2); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 8, generators, &detprod); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_mat_4x4_finalize(&tmp); + ibz_finalize(&det1); + ibz_finalize(&det2); + ibz_finalize(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + quat_lattice_t dual1, dual2, dual_res; + quat_lattice_init(&dual1); + quat_lattice_init(&dual2); + quat_lattice_init(&dual_res); + quat_lattice_dual_without_hnf(&dual1, lat1); + + quat_lattice_dual_without_hnf(&dual2, lat2); + quat_lattice_add(&dual_res, &dual1, &dual2); + quat_lattice_dual_without_hnf(res, &dual_res); + quat_lattice_hnf(res); // could be removed if we do not expect HNF any more + quat_lattice_finalize(&dual1); + quat_lattice_finalize(&dual2); + quat_lattice_finalize(&dual_res); +} + +void +quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg) +{ + ibz_vec_4_t p, a; + ibz_vec_4_init(&p); + ibz_vec_4_init(&a); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + quat_alg_coord_mul(&p, &a, coord, alg); + ibz_copy(&((*prod)[0][i]), &(p[0])); + ibz_copy(&((*prod)[1][i]), &(p[1])); + ibz_copy(&((*prod)[2][i]), &(p[2])); + ibz_copy(&((*prod)[3][i]), &(p[3])); + } + ibz_vec_4_finalize(&p); + ibz_vec_4_finalize(&a); +} + +void +quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg) +{ + quat_lattice_mat_alg_coord_mul_without_hnf(&(prod->basis), &(lat->basis), &(elem->coord), alg); + ibz_mul(&(prod->denom), &(lat->denom), &(elem->denom)); + quat_lattice_hnf(prod); +} + +void +quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2, const quat_alg_t *alg) +{ + ibz_vec_4_t elem1, elem2, elem_res; + ibz_vec_4_t generators[16]; + ibz_mat_4x4_t detmat; + ibz_t det; + quat_lattice_t lat_res; + ibz_init(&det); + ibz_mat_4x4_init(&detmat); + quat_lattice_init(&lat_res); + ibz_vec_4_init(&elem1); + ibz_vec_4_init(&elem2); + ibz_vec_4_init(&elem_res); + for (int i = 0; i < 16; i++) + ibz_vec_4_init(&(generators[i])); + for (int k = 0; k < 4; k++) { + ibz_vec_4_copy_ibz( + &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz( + &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); + for (int j = 0; j < 4; j++) { + if (k == 0) + ibz_copy(&(detmat[i][j]), &(elem_res[j])); + ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + } + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &detmat); + ibz_abs(&det, &det); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 16, generators, &det); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_vec_4_finalize(&elem1); + ibz_vec_4_finalize(&elem2); + ibz_vec_4_finalize(&elem_res); + quat_lattice_finalize(&lat_res); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&(detmat)); + for (int i = 0; i < 16; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// lattice assumed of full rank +int +quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x) +{ + int divisible = 0; + ibz_vec_4_t work_coord; + ibz_mat_4x4_t inv; + ibz_t det, prod; + ibz_init(&prod); + ibz_init(&det); + ibz_vec_4_init(&work_coord); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + assert(!ibz_is_zero(&det)); + ibz_mat_4x4_eval(&work_coord, &inv, &(x->coord)); + ibz_vec_4_scalar_mul(&(work_coord), &(lat->denom), &work_coord); + ibz_mul(&prod, &(x->denom), &det); + divisible = ibz_vec_4_scalar_div(&work_coord, &prod, &work_coord); + // copy result + if (divisible && (coord != NULL)) { + for (int i = 0; i < 4; i++) { + ibz_copy(&((*coord)[i]), &(work_coord[i])); + } + } + ibz_finalize(&prod); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); + ibz_vec_4_finalize(&work_coord); + return (divisible); +} + +void +quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + ibz_t tmp, det; + ibz_init(&tmp); + ibz_init(&det); + + // det = det(sublat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &sublat->basis); + // tmp = (overlat->denom)⁴ + ibz_mul(&tmp, &overlat->denom, &overlat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // index = (overlat->denom)⁴ · det(sublat->basis) + ibz_mul(index, &det, &tmp); + // tmp = (sublat->denom)⁴ + ibz_mul(&tmp, &sublat->denom, &sublat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // det = det(overlat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &overlat->basis); + // tmp = (sublat->denom)⁴ · det(overlat->basis) + ibz_mul(&tmp, &tmp, &det); + // index = index / tmp + ibz_div(index, &tmp, index, &tmp); + assert(ibz_is_zero(&tmp)); + // index = |index| + ibz_abs(index, index); + + ibz_finalize(&tmp); + ibz_finalize(&det); +} + +void +quat_lattice_hnf(quat_lattice_t *lat) +{ + ibz_t mod; + ibz_vec_4_t generators[4]; + ibz_init(&mod); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &mod, &(lat->basis)); + ibz_abs(&mod, &mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_init(&(generators[i])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + } + } + ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); + quat_lattice_reduce_denom(lat, lat); + ibz_finalize(&mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +void +quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_t tmp; + ibz_init(&tmp); + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_set(&(*G)[i][j], 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + if (k >= 2) + ibz_mul(&tmp, &tmp, &alg->p); + ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + } + ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + } + } + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + } + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_applications.c new file mode 100644 index 0000000000..6c763b8c04 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_applications.c @@ -0,0 +1,127 @@ +#include +#include +#include "lll_internals.h" + +void +quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t gram_corrector; + ibz_init(&gram_corrector); + ibz_mul(&gram_corrector, &(lideal->lattice.denom), &(lideal->lattice.denom)); + quat_lideal_class_gram(gram, lideal, alg); + ibz_mat_4x4_copy(reduced, &(lideal->lattice.basis)); + quat_lll_core(gram, reduced); + ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); + for (int i = 0; i < 4; i++) { + ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + for (int j = i + 1; j < 4; j++) { + ibz_set(&((*gram)[i][j]), 0); + } + } + ibz_finalize(&gram_corrector); +} + +void +quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + ibz_mat_4x4_t red; + ibz_mat_4x4_init(&red); + + quat_lattice_mul(&(prod->lattice), &(lideal1->lattice), &(lideal2->lattice), alg); + prod->parent_order = lideal1->parent_order; + quat_lideal_norm(prod); + quat_lideal_reduce_basis(&red, gram, prod, alg); + ibz_mat_4x4_copy(&(prod->lattice.basis), &red); + + ibz_mat_4x4_finalize(&red); +} + +int +quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff) +{ + ibz_mat_4x4_t gram, red; + ibz_mat_4x4_init(&gram); + ibz_mat_4x4_init(&red); + + int found = 0; + + // computing the reduced basis + quat_lideal_reduce_basis(&red, &gram, lideal, alg); + + quat_alg_elem_t new_alpha; + quat_alg_elem_init(&new_alpha); + ibz_t tmp, remainder, adjusted_norm; + ibz_init(&tmp); + ibz_init(&remainder); + ibz_init(&adjusted_norm); + + ibz_mul(&adjusted_norm, &lideal->lattice.denom, &lideal->lattice.denom); + + int ctr = 0; + + // equiv_num_iter = (2 * equiv_bound_coeff + 1)^4 + assert(equiv_bound_coeff < (1 << 20)); + int equiv_num_iter = (2 * equiv_bound_coeff + 1); + equiv_num_iter = equiv_num_iter * equiv_num_iter; + equiv_num_iter = equiv_num_iter * equiv_num_iter; + + while (!found && ctr < equiv_num_iter) { + ctr++; + // we select our linear combination at random + ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + + // computation of the norm of the vector sampled + quat_qf_eval(&tmp, &gram, &new_alpha.coord); + + // compute the norm of the equivalent ideal + // can be improved by removing the power of two first and the odd part only if the trial + // division failed (this should always be called on an ideal of norm 2^x * N for some + // big prime N ) + ibz_div(&tmp, &remainder, &tmp, &adjusted_norm); + + // debug : check that the remainder is zero + assert(ibz_is_zero(&remainder)); + + // pseudo-primality test + if (ibz_probab_prime(&tmp, primality_num_iter)) { + + // computes the generator using a matrix multiplication + ibz_mat_4x4_eval(&new_alpha.coord, &red, &new_alpha.coord); + ibz_copy(&new_alpha.denom, &lideal->lattice.denom); + assert(quat_lattice_contains(NULL, &lideal->lattice, &new_alpha)); + + quat_alg_conj(&new_alpha, &new_alpha); + ibz_mul(&new_alpha.denom, &new_alpha.denom, &lideal->norm); + quat_lideal_mul(lideal, lideal, &new_alpha, alg); + assert(ibz_probab_prime(&lideal->norm, primality_num_iter)); + + found = 1; + break; + } + } + assert(found); + + ibz_finalize(&tmp); + ibz_finalize(&remainder); + ibz_finalize(&adjusted_norm); + quat_alg_elem_finalize(&new_alpha); + + ibz_mat_4x4_finalize(&gram); + ibz_mat_4x4_finalize(&red); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_internals.h new file mode 100644 index 0000000000..e8d90141ac --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_internals.h @@ -0,0 +1,238 @@ +#ifndef LLL_INTERNALS_H +#define LLL_INTERNALS_H + +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations of functions only used for the LLL tets + */ + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup lll_internal Functions only used for LLL or its tests + * @{ + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_params Parameters used by the L2 implementation (floats) and its tests (ints) + * @{ + */ + +#define DELTABAR 0.995 +#define DELTA_NUM 99 +#define DELTA_DENOM 100 + +#define ETABAR 0.505 +#define EPSILON_NUM 1 +#define EPSILON_DENOM 100 + +#define PREC 64 +/** + * @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup ibq_t Types for rationals + * @{ + */ + +/** @brief Type for fractions of integers + * + * @typedef ibq_t + * + * For fractions of integers of arbitrary size, used by intbig module, using gmp + */ +typedef ibz_t ibq_t[2]; +typedef ibq_t ibq_vec_4_t[4]; +typedef ibq_t ibq_mat_4x4_t[4][4]; + +/**@} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_ibq_c Constructors and Destructors and Printers + * @{ + */ + +void ibq_init(ibq_t *x); +void ibq_finalize(ibq_t *x); + +void ibq_mat_4x4_init(ibq_mat_4x4_t *mat); +void ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat); + +void ibq_vec_4_init(ibq_vec_4_t *vec); +void ibq_vec_4_finalize(ibq_vec_4_t *vec); + +void ibq_mat_4x4_print(const ibq_mat_4x4_t *mat); +void ibq_vec_4_print(const ibq_vec_4_t *vec); + +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_qa Basic fraction arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b); + +/** @brief diff=a-b + */ +void ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b); + +/** @brief neg=-x + */ +void ibq_neg(ibq_t *neg, const ibq_t *x); + +/** @brief abs=|x| + */ +void ibq_abs(ibq_t *abs, const ibq_t *x); + +/** @brief prod=a*b + */ +void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b); + +/** @brief inv=1/x + * + * @returns 0 if x is 0, 1 if inverse exists and was computed + */ +int ibq_inv(ibq_t *inv, const ibq_t *x); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibq_cmp(const ibq_t *a, const ibq_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibq_is_zero(const ibq_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibq_is_one(const ibq_t *x); + +/** @brief Set q to a/b if b not 0 + * + * @returns 1 if b not 0 and q is set, 0 otherwise + */ +int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b); + +/** @brief Copy value into target + */ +void ibq_copy(ibq_t *target, const ibq_t *value); + +/** @brief Checks if q is an integer + * + * @returns 1 if yes, 0 if not + */ +int ibq_is_ibz(const ibq_t *q); + +/** + * @brief Converts a fraction q to an integer y, if q is an integer. + * + * @returns 1 if z is an integer, 0 if not + */ +int ibq_to_ibz(ibz_t *z, const ibq_t *q); +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup quat_lll_verify_helpers Helper functions for lll verification in dimension 4 + * @{ + */ + +/** @brief Set ibq to parameters delta and eta = 1/2 + epsilon using L2 constants + */ +void quat_lll_set_ibq_parameters(ibq_t *delta, ibq_t *eta); + +/** @brief Set an ibq vector to 4 given integer coefficients + */ +void ibq_vec_4_copy_ibz(ibq_vec_4_t *vec, + const ibz_t *coeff0, + const ibz_t *coeff1, + const ibz_t *coeff2, + const ibz_t *coeff3); // dim4, test/dim4 + +/** @brief Bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 for ibz_q + */ +void quat_lll_bilinear(ibq_t *b, const ibq_vec_4_t *vec0, const ibq_vec_4_t *vec1, + const ibz_t *q); // dim4, test/dim4 + +/** @brief Outputs the transposition of the orthogonalised matrix of mat (as fractions) + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +void quat_lll_gram_schmidt_transposed_with_ibq(ibq_mat_4x4_t *orthogonalised_transposed, + const ibz_mat_4x4_t *mat, + const ibz_t *q); // dim4 + +/** @brief Verifies if mat is lll-reduced for parameter coeff and norm defined by q + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +int quat_lll_verify(const ibz_mat_4x4_t *mat, + const ibq_t *delta, + const ibq_t *eta, + const quat_alg_t *alg); // test/lattice, test/dim4 + /** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_internal_gram Internal LLL function + * @{ + */ + +/** @brief In-place L2 reduction core function + * + * Given a lattice basis represented by the columns of a 4x4 matrix + * and the Gram matrix of its bilinear form, L2-reduces the basis + * in-place and updates the Gram matrix accordingly. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param G In/Output: Gram matrix of the lattice basis + * @param basis In/Output: lattice basis + */ +void quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis); + +/** + * @brief LLL reduction on 4-dimensional lattice + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param red Output: LLL reduced basis + * @param lattice In/Output: lattice with 4-dimensional basis + * @param alg The quaternion algebra + */ +int quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @} + */ + +// end of lll_internal +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lvlx.cmake b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lvlx.cmake new file mode 100644 index 0000000000..9b8c0f9287 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lvlx.cmake @@ -0,0 +1,12 @@ +set(SOURCE_FILES_ID2ISO_GENERIC_REF + ${LVLX_DIR}/id2iso.c + ${LVLX_DIR}/dim2id2iso.c +) + +add_library(${LIB_ID2ISO_${SVARIANT_UPPER}} STATIC ${SOURCE_FILES_ID2ISO_GENERIC_REF}) +target_link_libraries(${LIB_ID2ISO_${SVARIANT_UPPER}} ${LIB_QUATERNION} ${LIB_PRECOMP_${SVARIANT_UPPER}} ${LIB_MP} ${LIB_GF_${SVARIANT_UPPER}} ${LIB_EC_${SVARIANT_UPPER}} ${LIB_HD_${SVARIANT_UPPER}}) +target_include_directories(${LIB_ID2ISO_${SVARIANT_UPPER}} PRIVATE ${INC_PUBLIC} ${INC_PRECOMP_${SVARIANT_UPPER}} ${INC_QUATERNION} ${INC_MP} ${INC_GF} ${INC_GF_${SVARIANT_UPPER}} ${INC_EC} ${INC_HD} ${INC_ID2ISO} ${INC_COMMON}) +target_compile_options(${LIB_ID2ISO_${SVARIANT_UPPER}} PRIVATE ${C_OPT_FLAGS}) +target_compile_definitions(${LIB_ID2ISO_${SVARIANT_UPPER}} PUBLIC SQISIGN_VARIANT=${SVARIANT_LOWER}) + +add_subdirectory(test) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.c new file mode 100644 index 0000000000..4956beda50 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include + +void +sqisign_secure_free(void *mem, size_t size) +{ + if (mem) { + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); + free(mem); + } +} +void +sqisign_secure_clear(void *mem, size_t size) +{ + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.h new file mode 100644 index 0000000000..ab8f6c6481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef MEM_H +#define MEM_H +#include +#include + +/** + * Clears and frees allocated memory. + * + * @param[out] mem Memory to be cleared and freed. + * @param size Size of memory to be cleared and freed. + */ +void sqisign_secure_free(void *mem, size_t size); + +/** + * Clears memory. + * + * @param[out] mem Memory to be cleared. + * @param size Size of memory to be cleared. + */ +void sqisign_secure_clear(void *mem, size_t size); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c new file mode 100644 index 0000000000..396d505aec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c @@ -0,0 +1,73 @@ +#include +#include +#if defined(MINI_GMP) +#include "mini-gmp.h" +#else +// This configuration is used only for testing +#include +#endif +#include + +// Exported for testing +int +mini_mpz_legendre(const mpz_t a, const mpz_t p) +{ + int res = 0; + mpz_t e; + mpz_init_set(e, p); + mpz_sub_ui(e, e, 1); + mpz_fdiv_q_2exp(e, e, 1); + mpz_powm(e, a, e, p); + + if (mpz_cmp_ui(e, 1) <= 0) { + res = mpz_get_si(e); + } else { + res = -1; + } + mpz_clear(e); + return res; +} + +#if defined(MINI_GMP) +int +mpz_legendre(const mpz_t a, const mpz_t p) +{ + return mini_mpz_legendre(a, p); +} +#endif + +// Exported for testing +double +mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + double ret; + int tmp_exp; + mpz_t tmp; + + // Handle the case where op is 0 + if (mpz_cmp_ui(op, 0) == 0) { + *exp = 0; + return 0.0; + } + + *exp = mpz_sizeinbase(op, 2); + + mpz_init_set(tmp, op); + + if (*exp > DBL_MAX_EXP) { + mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); + } + + ret = frexp(mpz_get_d(tmp), &tmp_exp); + mpz_clear(tmp); + + return ret; +} + +#if defined(MINI_GMP) +double +mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + return mini_mpz_get_d_2exp(exp, op); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.h new file mode 100644 index 0000000000..0113cfdfe6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.h @@ -0,0 +1,19 @@ +#ifndef MINI_GMP_EXTRA_H +#define MINI_GMP_EXTRA_H + +#if defined MINI_GMP +#include "mini-gmp.h" + +typedef long mp_exp_t; + +int mpz_legendre(const mpz_t a, const mpz_t p); +double mpz_get_d_2exp(signed long int *exp, const mpz_t op); +#else +// This configuration is used only for testing +#include +#endif + +int mini_mpz_legendre(const mpz_t a, const mpz_t p); +double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.c new file mode 100644 index 0000000000..3830ab2031 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.c @@ -0,0 +1,4671 @@ +/* Note: The code from mini-gmp is modifed from the original by + commenting out the definition of GMP_LIMB_BITS */ + +/* + mini-gmp, a minimalistic implementation of a GNU GMP subset. + + Contributed to the GNU project by Niels Möller + Additional functionalities and improvements by Marco Bodrato. + +Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* NOTE: All functions in this file which are not declared in + mini-gmp.h are internal, and are not intended to be compatible + with GMP or with future versions of mini-gmp. */ + +/* Much of the material copied from GMP files, including: gmp-impl.h, + longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, + mpn/generic/lshift.c, mpn/generic/mul_1.c, + mpn/generic/mul_basecase.c, mpn/generic/rshift.c, + mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, + mpn/generic/submul_1.c. */ + +#include +#include +#include +#include +#include +#include + +#include "mini-gmp.h" + +#if !defined(MINI_GMP_DONT_USE_FLOAT_H) +#include +#endif + + +/* Macros */ +/* Removed from here as it is passed as a compiler command-line definition */ +/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ + +#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) +#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) + +#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) +#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) + +#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) +#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) + +#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) +#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) + +#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) + +#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 +#define GMP_DBL_MANT_BITS DBL_MANT_DIG +#else +#define GMP_DBL_MANT_BITS (53) +#endif + +/* Return non-zero if xp,xsize and yp,ysize overlap. + If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no + overlap. If both these are false, there's an overlap. */ +#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ + ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) + +#define gmp_assert_nocarry(x) do { \ + mp_limb_t __cy = (x); \ + assert (__cy == 0); \ + (void) (__cy); \ + } while (0) + +#define gmp_clz(count, x) do { \ + mp_limb_t __clz_x = (x); \ + unsigned __clz_c = 0; \ + int LOCAL_SHIFT_BITS = 8; \ + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ + for (; \ + (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ + __clz_c += 8) \ + { __clz_x <<= LOCAL_SHIFT_BITS; } \ + for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ + __clz_x <<= 1; \ + (count) = __clz_c; \ + } while (0) + +#define gmp_ctz(count, x) do { \ + mp_limb_t __ctz_x = (x); \ + unsigned __ctz_c = 0; \ + gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ + (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ + } while (0) + +#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) + (bl); \ + (sh) = (ah) + (bh) + (__x < (al)); \ + (sl) = __x; \ + } while (0) + +#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) - (bl); \ + (sh) = (ah) - (bh) - ((al) < (bl)); \ + (sl) = __x; \ + } while (0) + +#define gmp_umul_ppmm(w1, w0, u, v) \ + do { \ + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ + if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned int __ww = (unsigned int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned long int __ww = (unsigned long int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else { \ + mp_limb_t __x0, __x1, __x2, __x3; \ + unsigned __ul, __vl, __uh, __vh; \ + mp_limb_t __u = (u), __v = (v); \ + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ + \ + __ul = __u & GMP_LLIMB_MASK; \ + __uh = __u >> (GMP_LIMB_BITS / 2); \ + __vl = __v & GMP_LLIMB_MASK; \ + __vh = __v >> (GMP_LIMB_BITS / 2); \ + \ + __x0 = (mp_limb_t) __ul * __vl; \ + __x1 = (mp_limb_t) __ul * __vh; \ + __x2 = (mp_limb_t) __uh * __vl; \ + __x3 = (mp_limb_t) __uh * __vh; \ + \ + __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ + (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ + } \ + } while (0) + +/* If mp_limb_t is of size smaller than int, plain u*v implies + automatic promotion to *signed* int, and then multiply may overflow + and cause undefined behavior. Explicitly cast to unsigned int for + that case. */ +#define gmp_umullo_limb(u, v) \ + ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) + +#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ + do { \ + mp_limb_t _qh, _ql, _r, _mask; \ + gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ + gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ + _r = (nl) - gmp_umullo_limb (_qh, (d)); \ + _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ + _qh += _mask; \ + _r += _mask & (d); \ + if (_r >= (d)) \ + { \ + _r -= (d); \ + _qh++; \ + } \ + \ + (r) = _r; \ + (q) = _qh; \ + } while (0) + +#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ + do { \ + mp_limb_t _q0, _t1, _t0, _mask; \ + gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ + gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ + \ + /* Compute the two most significant limbs of n - q'd */ \ + (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ + gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ + (q)++; \ + \ + /* Conditionally adjust q and the remainders */ \ + _mask = - (mp_limb_t) ((r1) >= _q0); \ + (q) += _mask; \ + gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ + if ((r1) >= (d1)) \ + { \ + if ((r1) > (d1) || (r0) >= (d0)) \ + { \ + (q)++; \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ + } \ + } \ + } while (0) + +/* Swap macros. */ +#define MP_LIMB_T_SWAP(x, y) \ + do { \ + mp_limb_t __mp_limb_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_limb_t_swap__tmp; \ + } while (0) +#define MP_SIZE_T_SWAP(x, y) \ + do { \ + mp_size_t __mp_size_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_size_t_swap__tmp; \ + } while (0) +#define MP_BITCNT_T_SWAP(x,y) \ + do { \ + mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_bitcnt_t_swap__tmp; \ + } while (0) +#define MP_PTR_SWAP(x, y) \ + do { \ + mp_ptr __mp_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_ptr_swap__tmp; \ + } while (0) +#define MP_SRCPTR_SWAP(x, y) \ + do { \ + mp_srcptr __mp_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_srcptr_swap__tmp; \ + } while (0) + +#define MPN_PTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_PTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) +#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_SRCPTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) + +#define MPZ_PTR_SWAP(x, y) \ + do { \ + mpz_ptr __mpz_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_ptr_swap__tmp; \ + } while (0) +#define MPZ_SRCPTR_SWAP(x, y) \ + do { \ + mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_srcptr_swap__tmp; \ + } while (0) + +const int mp_bits_per_limb = GMP_LIMB_BITS; + + +/* Memory allocation and other helper functions. */ +static void +gmp_die (const char *msg) +{ + fprintf (stderr, "%s\n", msg); + abort(); +} + +static void * +gmp_default_alloc (size_t size) +{ + void *p; + + assert (size > 0); + + p = malloc (size); + if (!p) + gmp_die("gmp_default_alloc: Virtual memory exhausted."); + + return p; +} + +static void * +gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) +{ + void * p; + + p = realloc (old, new_size); + + if (!p) + gmp_die("gmp_default_realloc: Virtual memory exhausted."); + + return p; +} + +static void +gmp_default_free (void *p, size_t unused_size) +{ + free (p); +} + +static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; +static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; +static void (*gmp_free_func) (void *, size_t) = gmp_default_free; + +void +mp_get_memory_functions (void *(**alloc_func) (size_t), + void *(**realloc_func) (void *, size_t, size_t), + void (**free_func) (void *, size_t)) +{ + if (alloc_func) + *alloc_func = gmp_allocate_func; + + if (realloc_func) + *realloc_func = gmp_reallocate_func; + + if (free_func) + *free_func = gmp_free_func; +} + +void +mp_set_memory_functions (void *(*alloc_func) (size_t), + void *(*realloc_func) (void *, size_t, size_t), + void (*free_func) (void *, size_t)) +{ + if (!alloc_func) + alloc_func = gmp_default_alloc; + if (!realloc_func) + realloc_func = gmp_default_realloc; + if (!free_func) + free_func = gmp_default_free; + + gmp_allocate_func = alloc_func; + gmp_reallocate_func = realloc_func; + gmp_free_func = free_func; +} + +#define gmp_alloc(size) ((*gmp_allocate_func)((size))) +#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) +#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) + +static mp_ptr +gmp_alloc_limbs (mp_size_t size) +{ + return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); +} + +static mp_ptr +gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) +{ + assert (size > 0); + return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); +} + +static void +gmp_free_limbs (mp_ptr old, mp_size_t size) +{ + gmp_free (old, size * sizeof (mp_limb_t)); +} + + +/* MPN interface */ + +void +mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + mp_size_t i; + for (i = 0; i < n; i++) + d[i] = s[i]; +} + +void +mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + while (--n >= 0) + d[n] = s[n]; +} + +int +mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + while (--n >= 0) + { + if (ap[n] != bp[n]) + return ap[n] > bp[n] ? 1 : -1; + } + return 0; +} + +static int +mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + if (an != bn) + return an < bn ? -1 : 1; + else + return mpn_cmp (ap, bp, an); +} + +static mp_size_t +mpn_normalized_size (mp_srcptr xp, mp_size_t n) +{ + while (n > 0 && xp[n-1] == 0) + --n; + return n; +} + +int +mpn_zero_p(mp_srcptr rp, mp_size_t n) +{ + return mpn_normalized_size (rp, n) == 0; +} + +void +mpn_zero (mp_ptr rp, mp_size_t n) +{ + while (--n >= 0) + rp[n] = 0; +} + +mp_limb_t +mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + i = 0; + do + { + mp_limb_t r = ap[i] + b; + /* Carry out */ + b = (r < b); + rp[i] = r; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b, r; + a = ap[i]; b = bp[i]; + r = a + cy; + cy = (r < cy); + r += b; + cy += (r < b); + rp[i] = r; + } + return cy; +} + +mp_limb_t +mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_add_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + + i = 0; + do + { + mp_limb_t a = ap[i]; + /* Carry out */ + mp_limb_t cy = a < b; + rp[i] = a - b; + b = cy; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b; + a = ap[i]; b = bp[i]; + b += cy; + cy = (b < cy); + cy += (a < b); + rp[i] = a - b; + } + return cy; +} + +mp_limb_t +mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_sub_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl + lpl; + cl += lpl < rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl - lpl; + cl += lpl > rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn >= 1); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); + + /* We first multiply by the low order limb. This result can be + stored, not added, to rp. We also avoid a loop for zeroing this + way. */ + + rp[un] = mpn_mul_1 (rp, up, un, vp[0]); + + /* Now accumulate the product of up[] and the next higher limb from + vp[]. */ + + while (--vn >= 1) + { + rp += 1, vp += 1; + rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); + } + return rp[un]; +} + +void +mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mpn_mul (rp, ap, n, bp, n); +} + +void +mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) +{ + mpn_mul (rp, ap, n, ap, n); +} + +mp_limb_t +mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + up += n; + rp += n; + + tnc = GMP_LIMB_BITS - cnt; + low_limb = *--up; + retval = low_limb >> tnc; + high_limb = (low_limb << cnt); + + while (--n != 0) + { + low_limb = *--up; + *--rp = high_limb | (low_limb >> tnc); + high_limb = (low_limb << cnt); + } + *--rp = high_limb; + + return retval; +} + +mp_limb_t +mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + tnc = GMP_LIMB_BITS - cnt; + high_limb = *up++; + retval = (high_limb << tnc); + low_limb = high_limb >> cnt; + + while (--n != 0) + { + high_limb = *up++; + *rp++ = low_limb | (high_limb << tnc); + low_limb = high_limb >> cnt; + } + *rp = low_limb; + + return retval; +} + +static mp_bitcnt_t +mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, + mp_limb_t ux) +{ + unsigned cnt; + + assert (ux == 0 || ux == GMP_LIMB_MAX); + assert (0 <= i && i <= un ); + + while (limb == 0) + { + i++; + if (i == un) + return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); + limb = ux ^ up[i]; + } + gmp_ctz (cnt, limb); + return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; +} + +mp_bitcnt_t +mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, 0); +} + +mp_bitcnt_t +mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, GMP_LIMB_MAX); +} + +void +mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (--n >= 0) + *rp++ = ~ *up++; +} + +mp_limb_t +mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (*up == 0) + { + *rp = 0; + if (!--n) + return 0; + ++up; ++rp; + } + *rp = - *up; + mpn_com (++rp, ++up, --n); + return 1; +} + + +/* MPN division interface. */ + +/* The 3/2 inverse is defined as + + m = floor( (B^3-1) / (B u1 + u0)) - B +*/ +mp_limb_t +mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) +{ + mp_limb_t r, m; + + { + mp_limb_t p, ql; + unsigned ul, uh, qh; + + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); + /* For notation, let b denote the half-limb base, so that B = b^2. + Split u1 = b uh + ul. */ + ul = u1 & GMP_LLIMB_MASK; + uh = u1 >> (GMP_LIMB_BITS / 2); + + /* Approximation of the high half of quotient. Differs from the 2/1 + inverse of the half limb uh, since we have already subtracted + u0. */ + qh = (u1 ^ GMP_LIMB_MAX) / uh; + + /* Adjust to get a half-limb 3/2 inverse, i.e., we want + + qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u + = floor( (b (~u) + b-1) / u), + + and the remainder + + r = b (~u) + b-1 - qh (b uh + ul) + = b (~u - qh uh) + b-1 - qh ul + + Subtraction of qh ul may underflow, which implies adjustments. + But by normalization, 2 u >= B > qh ul, so we need to adjust by + at most 2. + */ + + r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; + + p = (mp_limb_t) qh * ul; + /* Adjustment steps taken from udiv_qrnnd_c */ + if (r < p) + { + qh--; + r += u1; + if (r >= u1) /* i.e. we didn't get carry when adding to r */ + if (r < p) + { + qh--; + r += u1; + } + } + r -= p; + + /* Low half of the quotient is + + ql = floor ( (b r + b-1) / u1). + + This is a 3/2 division (on half-limbs), for which qh is a + suitable inverse. */ + + p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; + /* Unlike full-limb 3/2, we can add 1 without overflow. For this to + work, it is essential that ql is a full mp_limb_t. */ + ql = (p >> (GMP_LIMB_BITS / 2)) + 1; + + /* By the 3/2 trick, we don't need the high half limb. */ + r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; + + if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) + { + ql--; + r += u1; + } + m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; + if (r >= u1) + { + m++; + r -= u1; + } + } + + /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a + 3/2 inverse. */ + if (u0 > 0) + { + mp_limb_t th, tl; + r = ~r; + r += u0; + if (r < u0) + { + m--; + if (r >= u1) + { + m--; + r -= u1; + } + r -= u1; + } + gmp_umul_ppmm (th, tl, u0, m); + r += th; + if (r < th) + { + m--; + m -= ((r > u1) | ((r == u1) & (tl > u0))); + } + } + + return m; +} + +struct gmp_div_inverse +{ + /* Normalization shift count. */ + unsigned shift; + /* Normalized divisor (d0 unused for mpn_div_qr_1) */ + mp_limb_t d1, d0; + /* Inverse, for 2/1 or 3/2. */ + mp_limb_t di; +}; + +static void +mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) +{ + unsigned shift; + + assert (d > 0); + gmp_clz (shift, d); + inv->shift = shift; + inv->d1 = d << shift; + inv->di = mpn_invert_limb (inv->d1); +} + +static void +mpn_div_qr_2_invert (struct gmp_div_inverse *inv, + mp_limb_t d1, mp_limb_t d0) +{ + unsigned shift; + + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 <<= shift; + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); +} + +static void +mpn_div_qr_invert (struct gmp_div_inverse *inv, + mp_srcptr dp, mp_size_t dn) +{ + assert (dn > 0); + + if (dn == 1) + mpn_div_qr_1_invert (inv, dp[0]); + else if (dn == 2) + mpn_div_qr_2_invert (inv, dp[1], dp[0]); + else + { + unsigned shift; + mp_limb_t d1, d0; + + d1 = dp[dn-1]; + d0 = dp[dn-2]; + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); + } +} + +/* Not matching current public gmp interface, rather corresponding to + the sbpi1_div_* functions. */ +static mp_limb_t +mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + mp_limb_t d, di; + mp_limb_t r; + mp_ptr tp = NULL; + mp_size_t tn = 0; + + if (inv->shift > 0) + { + /* Shift, reusing qp area if possible. In-place shift if qp == np. */ + tp = qp; + if (!tp) + { + tn = nn; + tp = gmp_alloc_limbs (tn); + } + r = mpn_lshift (tp, np, nn, inv->shift); + np = tp; + } + else + r = 0; + + d = inv->d1; + di = inv->di; + while (--nn >= 0) + { + mp_limb_t q; + + gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); + if (qp) + qp[nn] = q; + } + if (tn) + gmp_free_limbs (tp, tn); + + return r >> inv->shift; +} + +static void +mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + unsigned shift; + mp_size_t i; + mp_limb_t d1, d0, di, r1, r0; + + assert (nn >= 2); + shift = inv->shift; + d1 = inv->d1; + d0 = inv->d0; + di = inv->di; + + if (shift > 0) + r1 = mpn_lshift (np, np, nn, shift); + else + r1 = 0; + + r0 = np[nn - 1]; + + i = nn - 2; + do + { + mp_limb_t n0, q; + n0 = np[i]; + gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + if (shift > 0) + { + assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); + r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); + r1 >>= shift; + } + + np[1] = r1; + np[0] = r0; +} + +static void +mpn_div_qr_pi1 (mp_ptr qp, + mp_ptr np, mp_size_t nn, mp_limb_t n1, + mp_srcptr dp, mp_size_t dn, + mp_limb_t dinv) +{ + mp_size_t i; + + mp_limb_t d1, d0; + mp_limb_t cy, cy1; + mp_limb_t q; + + assert (dn > 2); + assert (nn >= dn); + + d1 = dp[dn - 1]; + d0 = dp[dn - 2]; + + assert ((d1 & GMP_LIMB_HIGHBIT) != 0); + /* Iteration variable is the index of the q limb. + * + * We divide + * by + */ + + i = nn - dn; + do + { + mp_limb_t n0 = np[dn-1+i]; + + if (n1 == d1 && n0 == d0) + { + q = GMP_LIMB_MAX; + mpn_submul_1 (np+i, dp, dn, q); + n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ + } + else + { + gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); + + cy = mpn_submul_1 (np + i, dp, dn-2, q); + + cy1 = n0 < cy; + n0 = n0 - cy; + cy = n1 < cy1; + n1 = n1 - cy1; + np[dn-2+i] = n0; + + if (cy != 0) + { + n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); + q--; + } + } + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + np[dn - 1] = n1; +} + +static void +mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + mp_srcptr dp, mp_size_t dn, + const struct gmp_div_inverse *inv) +{ + assert (dn > 0); + assert (nn >= dn); + + if (dn == 1) + np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); + else if (dn == 2) + mpn_div_qr_2_preinv (qp, np, nn, inv); + else + { + mp_limb_t nh; + unsigned shift; + + assert (inv->d1 == dp[dn-1]); + assert (inv->d0 == dp[dn-2]); + assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); + + shift = inv->shift; + if (shift > 0) + nh = mpn_lshift (np, np, nn, shift); + else + nh = 0; + + mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); + + if (shift > 0) + gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); + } +} + +static void +mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) +{ + struct gmp_div_inverse inv; + mp_ptr tp = NULL; + + assert (dn > 0); + assert (nn >= dn); + + mpn_div_qr_invert (&inv, dp, dn); + if (dn > 2 && inv.shift > 0) + { + tp = gmp_alloc_limbs (dn); + gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); + dp = tp; + } + mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); + if (tp) + gmp_free_limbs (tp, dn); +} + + +/* MPN base conversion. */ +static unsigned +mpn_base_power_of_two_p (unsigned b) +{ + switch (b) + { + case 2: return 1; + case 4: return 2; + case 8: return 3; + case 16: return 4; + case 32: return 5; + case 64: return 6; + case 128: return 7; + case 256: return 8; + default: return 0; + } +} + +struct mpn_base_info +{ + /* bb is the largest power of the base which fits in one limb, and + exp is the corresponding exponent. */ + unsigned exp; + mp_limb_t bb; +}; + +static void +mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) +{ + mp_limb_t m; + mp_limb_t p; + unsigned exp; + + m = GMP_LIMB_MAX / b; + for (exp = 1, p = b; p <= m; exp++) + p *= b; + + info->exp = exp; + info->bb = p; +} + +static mp_bitcnt_t +mpn_limb_size_in_base_2 (mp_limb_t u) +{ + unsigned shift; + + assert (u > 0); + gmp_clz (shift, u); + return GMP_LIMB_BITS - shift; +} + +static size_t +mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) +{ + unsigned char mask; + size_t sn, j; + mp_size_t i; + unsigned shift; + + sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) + + bits - 1) / bits; + + mask = (1U << bits) - 1; + + for (i = 0, j = sn, shift = 0; j-- > 0;) + { + unsigned char digit = up[i] >> shift; + + shift += bits; + + if (shift >= GMP_LIMB_BITS && ++i < un) + { + shift -= GMP_LIMB_BITS; + digit |= up[i] << (bits - shift); + } + sp[j] = digit & mask; + } + return sn; +} + +/* We generate digits from the least significant end, and reverse at + the end. */ +static size_t +mpn_limb_get_str (unsigned char *sp, mp_limb_t w, + const struct gmp_div_inverse *binv) +{ + mp_size_t i; + for (i = 0; w > 0; i++) + { + mp_limb_t h, l, r; + + h = w >> (GMP_LIMB_BITS - binv->shift); + l = w << binv->shift; + + gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); + assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); + r >>= binv->shift; + + sp[i] = r; + } + return i; +} + +static size_t +mpn_get_str_other (unsigned char *sp, + int base, const struct mpn_base_info *info, + mp_ptr up, mp_size_t un) +{ + struct gmp_div_inverse binv; + size_t sn; + size_t i; + + mpn_div_qr_1_invert (&binv, base); + + sn = 0; + + if (un > 1) + { + struct gmp_div_inverse bbinv; + mpn_div_qr_1_invert (&bbinv, info->bb); + + do + { + mp_limb_t w; + size_t done; + w = mpn_div_qr_1_preinv (up, up, un, &bbinv); + un -= (up[un-1] == 0); + done = mpn_limb_get_str (sp + sn, w, &binv); + + for (sn += done; done < info->exp; done++) + sp[sn++] = 0; + } + while (un > 1); + } + sn += mpn_limb_get_str (sp + sn, up[0], &binv); + + /* Reverse order */ + for (i = 0; 2*i + 1 < sn; i++) + { + unsigned char t = sp[i]; + sp[i] = sp[sn - i - 1]; + sp[sn - i - 1] = t; + } + + return sn; +} + +size_t +mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) +{ + unsigned bits; + + assert (un > 0); + assert (up[un-1] > 0); + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_get_str_bits (sp, bits, up, un); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_get_str_other (sp, base, &info, up, un); + } +} + +static mp_size_t +mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, + unsigned bits) +{ + mp_size_t rn; + mp_limb_t limb; + unsigned shift; + + for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) + { + limb |= (mp_limb_t) sp[sn] << shift; + shift += bits; + if (shift >= GMP_LIMB_BITS) + { + shift -= GMP_LIMB_BITS; + rp[rn++] = limb; + /* Next line is correct also if shift == 0, + bits == 8, and mp_limb_t == unsigned char. */ + limb = (unsigned int) sp[sn] >> (bits - shift); + } + } + if (limb != 0) + rp[rn++] = limb; + else + rn = mpn_normalized_size (rp, rn); + return rn; +} + +/* Result is usually normalized, except for all-zero input, in which + case a single zero limb is written at *RP, and 1 is returned. */ +static mp_size_t +mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, + mp_limb_t b, const struct mpn_base_info *info) +{ + mp_size_t rn; + mp_limb_t w; + unsigned k; + size_t j; + + assert (sn > 0); + + k = 1 + (sn - 1) % info->exp; + + j = 0; + w = sp[j++]; + while (--k != 0) + w = w * b + sp[j++]; + + rp[0] = w; + + for (rn = 1; j < sn;) + { + mp_limb_t cy; + + w = sp[j++]; + for (k = 1; k < info->exp; k++) + w = w * b + sp[j++]; + + cy = mpn_mul_1 (rp, rp, rn, info->bb); + cy += mpn_add_1 (rp, rp, rn, w); + if (cy > 0) + rp[rn++] = cy; + } + assert (j == sn); + + return rn; +} + +mp_size_t +mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) +{ + unsigned bits; + + if (sn == 0) + return 0; + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_set_str_bits (rp, sp, sn, bits); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_set_str_other (rp, sp, sn, base, &info); + } +} + + +/* MPZ interface */ +void +mpz_init (mpz_t r) +{ + static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; + + r->_mp_alloc = 0; + r->_mp_size = 0; + r->_mp_d = (mp_ptr) &dummy_limb; +} + +/* The utility of this function is a bit limited, since many functions + assigns the result variable using mpz_swap. */ +void +mpz_init2 (mpz_t r, mp_bitcnt_t bits) +{ + mp_size_t rn; + + bits -= (bits != 0); /* Round down, except if 0 */ + rn = 1 + bits / GMP_LIMB_BITS; + + r->_mp_alloc = rn; + r->_mp_size = 0; + r->_mp_d = gmp_alloc_limbs (rn); +} + +void +mpz_clear (mpz_t r) +{ + if (r->_mp_alloc) + gmp_free_limbs (r->_mp_d, r->_mp_alloc); +} + +static mp_ptr +mpz_realloc (mpz_t r, mp_size_t size) +{ + size = GMP_MAX (size, 1); + + if (r->_mp_alloc) + r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); + else + r->_mp_d = gmp_alloc_limbs (size); + r->_mp_alloc = size; + + if (GMP_ABS (r->_mp_size) > size) + r->_mp_size = 0; + + return r->_mp_d; +} + +/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ +#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ + ? mpz_realloc(z,n) \ + : (z)->_mp_d) + +/* MPZ assignment and basic conversions. */ +void +mpz_set_si (mpz_t r, signed long int x) +{ + if (x >= 0) + mpz_set_ui (r, x); + else /* (x < 0) */ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); + mpz_neg (r, r); + } + else + { + r->_mp_size = -1; + MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); + } +} + +void +mpz_set_ui (mpz_t r, unsigned long int x) +{ + if (x > 0) + { + r->_mp_size = 1; + MPZ_REALLOC (r, 1)[0] = x; + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + while (x >>= LOCAL_GMP_LIMB_BITS) + { + ++ r->_mp_size; + MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; + } + } + } + else + r->_mp_size = 0; +} + +void +mpz_set (mpz_t r, const mpz_t x) +{ + /* Allow the NOP r == x */ + if (r != x) + { + mp_size_t n; + mp_ptr rp; + + n = GMP_ABS (x->_mp_size); + rp = MPZ_REALLOC (r, n); + + mpn_copyi (rp, x->_mp_d, n); + r->_mp_size = x->_mp_size; + } +} + +void +mpz_init_set_si (mpz_t r, signed long int x) +{ + mpz_init (r); + mpz_set_si (r, x); +} + +void +mpz_init_set_ui (mpz_t r, unsigned long int x) +{ + mpz_init (r); + mpz_set_ui (r, x); +} + +void +mpz_init_set (mpz_t r, const mpz_t x) +{ + mpz_init (r); + mpz_set (r, x); +} + +int +mpz_fits_slong_p (const mpz_t u) +{ + return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; +} + +static int +mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) +{ + int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; + mp_limb_t ulongrem = 0; + + if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) + ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; + + return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); +} + +int +mpz_fits_ulong_p (const mpz_t u) +{ + mp_size_t us = u->_mp_size; + + return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); +} + +int +mpz_fits_sint_p (const mpz_t u) +{ + return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; +} + +int +mpz_fits_uint_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; +} + +int +mpz_fits_sshort_p (const mpz_t u) +{ + return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; +} + +int +mpz_fits_ushort_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; +} + +long int +mpz_get_si (const mpz_t u) +{ + unsigned long r = mpz_get_ui (u); + unsigned long c = -LONG_MAX - LONG_MIN; + + if (u->_mp_size < 0) + /* This expression is necessary to properly handle -LONG_MIN */ + return -(long) c - (long) ((r - c) & LONG_MAX); + else + return (long) (r & LONG_MAX); +} + +unsigned long int +mpz_get_ui (const mpz_t u) +{ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + unsigned long r = 0; + mp_size_t n = GMP_ABS (u->_mp_size); + n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); + while (--n >= 0) + r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; + return r; + } + + return u->_mp_size == 0 ? 0 : u->_mp_d[0]; +} + +size_t +mpz_size (const mpz_t u) +{ + return GMP_ABS (u->_mp_size); +} + +mp_limb_t +mpz_getlimbn (const mpz_t u, mp_size_t n) +{ + if (n >= 0 && n < GMP_ABS (u->_mp_size)) + return u->_mp_d[n]; + else + return 0; +} + +void +mpz_realloc2 (mpz_t x, mp_bitcnt_t n) +{ + mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); +} + +mp_srcptr +mpz_limbs_read (mpz_srcptr x) +{ + return x->_mp_d; +} + +mp_ptr +mpz_limbs_modify (mpz_t x, mp_size_t n) +{ + assert (n > 0); + return MPZ_REALLOC (x, n); +} + +mp_ptr +mpz_limbs_write (mpz_t x, mp_size_t n) +{ + return mpz_limbs_modify (x, n); +} + +void +mpz_limbs_finish (mpz_t x, mp_size_t xs) +{ + mp_size_t xn; + xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); + x->_mp_size = xs < 0 ? -xn : xn; +} + +static mpz_srcptr +mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + x->_mp_alloc = 0; + x->_mp_d = (mp_ptr) xp; + x->_mp_size = xs; + return x; +} + +mpz_srcptr +mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + mpz_roinit_normal_n (x, xp, xs); + mpz_limbs_finish (x, xs); + return x; +} + + +/* Conversions and comparison to double. */ +void +mpz_set_d (mpz_t r, double x) +{ + int sign; + mp_ptr rp; + mp_size_t rn, i; + double B; + double Bi; + mp_limb_t f; + + /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is + zero or infinity. */ + if (x != x || x == x * 0.5) + { + r->_mp_size = 0; + return; + } + + sign = x < 0.0 ; + if (sign) + x = - x; + + if (x < 1.0) + { + r->_mp_size = 0; + return; + } + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + for (rn = 1; x >= B; rn++) + x *= Bi; + + rp = MPZ_REALLOC (r, rn); + + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + i = rn-1; + rp[i] = f; + while (--i >= 0) + { + x = B * x; + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + rp[i] = f; + } + + r->_mp_size = sign ? - rn : rn; +} + +void +mpz_init_set_d (mpz_t r, double x) +{ + mpz_init (r); + mpz_set_d (r, x); +} + +double +mpz_get_d (const mpz_t u) +{ + int m; + mp_limb_t l; + mp_size_t un; + double x; + double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + + un = GMP_ABS (u->_mp_size); + + if (un == 0) + return 0.0; + + l = u->_mp_d[--un]; + gmp_clz (m, l); + m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + + for (x = l; --un >= 0;) + { + x = B*x; + if (m > 0) { + l = u->_mp_d[un]; + m -= GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + x += l; + } + } + + if (u->_mp_size < 0) + x = -x; + + return x; +} + +int +mpz_cmpabs_d (const mpz_t x, double d) +{ + mp_size_t xn; + double B, Bi; + mp_size_t i; + + xn = x->_mp_size; + d = GMP_ABS (d); + + if (xn != 0) + { + xn = GMP_ABS (xn); + + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + + /* Scale d so it can be compared with the top limb. */ + for (i = 1; i < xn; i++) + d *= Bi; + + if (d >= B) + return -1; + + /* Compare floor(d) to top limb, subtract and cancel when equal. */ + for (i = xn; i-- > 0;) + { + mp_limb_t f, xl; + + f = (mp_limb_t) d; + xl = x->_mp_d[i]; + if (xl > f) + return 1; + else if (xl < f) + return -1; + d = B * (d - f); + } + } + return - (d > 0.0); +} + +int +mpz_cmp_d (const mpz_t x, double d) +{ + if (x->_mp_size < 0) + { + if (d >= 0.0) + return -1; + else + return -mpz_cmpabs_d (x, d); + } + else + { + if (d < 0.0) + return 1; + else + return mpz_cmpabs_d (x, d); + } +} + + +/* MPZ comparisons and the like. */ +int +mpz_sgn (const mpz_t u) +{ + return GMP_CMP (u->_mp_size, 0); +} + +int +mpz_cmp_si (const mpz_t u, long v) +{ + mp_size_t usize = u->_mp_size; + + if (v >= 0) + return mpz_cmp_ui (u, v); + else if (usize >= 0) + return 1; + else + return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); +} + +int +mpz_cmp_ui (const mpz_t u, unsigned long v) +{ + mp_size_t usize = u->_mp_size; + + if (usize < 0) + return -1; + else + return mpz_cmpabs_ui (u, v); +} + +int +mpz_cmp (const mpz_t a, const mpz_t b) +{ + mp_size_t asize = a->_mp_size; + mp_size_t bsize = b->_mp_size; + + if (asize != bsize) + return (asize < bsize) ? -1 : 1; + else if (asize >= 0) + return mpn_cmp (a->_mp_d, b->_mp_d, asize); + else + return mpn_cmp (b->_mp_d, a->_mp_d, -asize); +} + +int +mpz_cmpabs_ui (const mpz_t u, unsigned long v) +{ + mp_size_t un = GMP_ABS (u->_mp_size); + + if (! mpn_absfits_ulong_p (u->_mp_d, un)) + return 1; + else + { + unsigned long uu = mpz_get_ui (u); + return GMP_CMP(uu, v); + } +} + +int +mpz_cmpabs (const mpz_t u, const mpz_t v) +{ + return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), + v->_mp_d, GMP_ABS (v->_mp_size)); +} + +void +mpz_abs (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = GMP_ABS (r->_mp_size); +} + +void +mpz_neg (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = -r->_mp_size; +} + +void +mpz_swap (mpz_t u, mpz_t v) +{ + MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); + MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); +} + + +/* MPZ addition and subtraction */ + + +void +mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_t bb; + mpz_init_set_ui (bb, b); + mpz_add (r, a, bb); + mpz_clear (bb); +} + +void +mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_ui_sub (r, b, a); + mpz_neg (r, r); +} + +void +mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) +{ + mpz_neg (r, b); + mpz_add_ui (r, r, a); +} + +static mp_size_t +mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + mp_ptr rp; + mp_limb_t cy; + + if (an < bn) + { + MPZ_SRCPTR_SWAP (a, b); + MP_SIZE_T_SWAP (an, bn); + } + + rp = MPZ_REALLOC (r, an + 1); + cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); + + rp[an] = cy; + + return an + cy; +} + +static mp_size_t +mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + int cmp; + mp_ptr rp; + + cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); + if (cmp > 0) + { + rp = MPZ_REALLOC (r, an); + gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); + return mpn_normalized_size (rp, an); + } + else if (cmp < 0) + { + rp = MPZ_REALLOC (r, bn); + gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); + return -mpn_normalized_size (rp, bn); + } + else + return 0; +} + +void +mpz_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_add (r, a, b); + else + rn = mpz_abs_sub (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + +void +mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_sub (r, a, b); + else + rn = mpz_abs_add (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + + +/* MPZ multiplication */ +void +mpz_mul_si (mpz_t r, const mpz_t u, long int v) +{ + if (v < 0) + { + mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); + mpz_neg (r, r); + } + else + mpz_mul_ui (r, u, v); +} + +void +mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t vv; + mpz_init_set_ui (vv, v); + mpz_mul (r, u, vv); + mpz_clear (vv); + return; +} + +void +mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) +{ + int sign; + mp_size_t un, vn, rn; + mpz_t t; + mp_ptr tp; + + un = u->_mp_size; + vn = v->_mp_size; + + if (un == 0 || vn == 0) + { + r->_mp_size = 0; + return; + } + + sign = (un ^ vn) < 0; + + un = GMP_ABS (un); + vn = GMP_ABS (vn); + + mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); + + tp = t->_mp_d; + if (un >= vn) + mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); + else + mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); + + rn = un + vn; + rn -= tp[rn-1] == 0; + + t->_mp_size = sign ? - rn : rn; + mpz_swap (r, t); + mpz_clear (t); +} + +void +mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) +{ + mp_size_t un, rn; + mp_size_t limbs; + unsigned shift; + mp_ptr rp; + + un = GMP_ABS (u->_mp_size); + if (un == 0) + { + r->_mp_size = 0; + return; + } + + limbs = bits / GMP_LIMB_BITS; + shift = bits % GMP_LIMB_BITS; + + rn = un + limbs + (shift > 0); + rp = MPZ_REALLOC (r, rn); + if (shift > 0) + { + mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); + rp[rn-1] = cy; + rn -= (cy == 0); + } + else + mpn_copyd (rp + limbs, u->_mp_d, un); + + mpn_zero (rp, limbs); + + r->_mp_size = (u->_mp_size < 0) ? - rn : rn; +} + +void +mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_sub (r, r, t); + mpz_clear (t); +} + +void +mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_sub (r, r, t); + mpz_clear (t); +} + + +/* MPZ division */ +enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; + +/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ +static int +mpz_div_qr (mpz_t q, mpz_t r, + const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) +{ + mp_size_t ns, ds, nn, dn, qs; + ns = n->_mp_size; + ds = d->_mp_size; + + if (ds == 0) + gmp_die("mpz_div_qr: Divide by zero."); + + if (ns == 0) + { + if (q) + q->_mp_size = 0; + if (r) + r->_mp_size = 0; + return 0; + } + + nn = GMP_ABS (ns); + dn = GMP_ABS (ds); + + qs = ds ^ ns; + + if (nn < dn) + { + if (mode == GMP_DIV_CEIL && qs >= 0) + { + /* q = 1, r = n - d */ + if (r) + mpz_sub (r, n, d); + if (q) + mpz_set_ui (q, 1); + } + else if (mode == GMP_DIV_FLOOR && qs < 0) + { + /* q = -1, r = n + d */ + if (r) + mpz_add (r, n, d); + if (q) + mpz_set_si (q, -1); + } + else + { + /* q = 0, r = d */ + if (r) + mpz_set (r, n); + if (q) + q->_mp_size = 0; + } + return 1; + } + else + { + mp_ptr np, qp; + mp_size_t qn, rn; + mpz_t tq, tr; + + mpz_init_set (tr, n); + np = tr->_mp_d; + + qn = nn - dn + 1; + + if (q) + { + mpz_init2 (tq, qn * GMP_LIMB_BITS); + qp = tq->_mp_d; + } + else + qp = NULL; + + mpn_div_qr (qp, np, nn, d->_mp_d, dn); + + if (qp) + { + qn -= (qp[qn-1] == 0); + + tq->_mp_size = qs < 0 ? -qn : qn; + } + rn = mpn_normalized_size (np, dn); + tr->_mp_size = ns < 0 ? - rn : rn; + + if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) + { + if (q) + mpz_sub_ui (tq, tq, 1); + if (r) + mpz_add (tr, tr, d); + } + else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) + { + if (q) + mpz_add_ui (tq, tq, 1); + if (r) + mpz_sub (tr, tr, d); + } + + if (q) + { + mpz_swap (tq, q); + mpz_clear (tq); + } + if (r) + mpz_swap (tr, r); + + mpz_clear (tr); + + return rn != 0; + } +} + +void +mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); +} + +static void +mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t un, qn; + mp_size_t limb_cnt; + mp_ptr qp; + int adjust; + + un = u->_mp_size; + if (un == 0) + { + q->_mp_size = 0; + return; + } + limb_cnt = bit_index / GMP_LIMB_BITS; + qn = GMP_ABS (un) - limb_cnt; + bit_index %= GMP_LIMB_BITS; + + if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ + /* Note: Below, the final indexing at limb_cnt is valid because at + that point we have qn > 0. */ + adjust = (qn <= 0 + || !mpn_zero_p (u->_mp_d, limb_cnt) + || (u->_mp_d[limb_cnt] + & (((mp_limb_t) 1 << bit_index) - 1))); + else + adjust = 0; + + if (qn <= 0) + qn = 0; + else + { + qp = MPZ_REALLOC (q, qn); + + if (bit_index != 0) + { + mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); + qn -= qp[qn - 1] == 0; + } + else + { + mpn_copyi (qp, u->_mp_d + limb_cnt, qn); + } + } + + q->_mp_size = qn; + + if (adjust) + mpz_add_ui (q, q, 1); + if (un < 0) + mpz_neg (q, q); +} + +static void +mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t us, un, rn; + mp_ptr rp; + mp_limb_t mask; + + us = u->_mp_size; + if (us == 0 || bit_index == 0) + { + r->_mp_size = 0; + return; + } + rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + assert (rn > 0); + + rp = MPZ_REALLOC (r, rn); + un = GMP_ABS (us); + + mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); + + if (rn > un) + { + /* Quotient (with truncation) is zero, and remainder is + non-zero */ + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* Have to negate and sign extend. */ + mp_size_t i; + + gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); + for (i = un; i < rn - 1; i++) + rp[i] = GMP_LIMB_MAX; + + rp[rn-1] = mask; + us = -us; + } + else + { + /* Just copy */ + if (r != u) + mpn_copyi (rp, u->_mp_d, un); + + rn = un; + } + } + else + { + if (r != u) + mpn_copyi (rp, u->_mp_d, rn - 1); + + rp[rn-1] = u->_mp_d[rn-1] & mask; + + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* If r != 0, compute 2^{bit_count} - r. */ + mpn_neg (rp, rp, rn); + + rp[rn-1] &= mask; + + /* us is not used for anything else, so we can modify it + here to indicate flipped sign. */ + us = -us; + } + } + rn = mpn_normalized_size (rp, rn); + r->_mp_size = us < 0 ? -rn : rn; +} + +void +mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) +{ + gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_p (const mpz_t n, const mpz_t d) +{ + return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + +int +mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) +{ + mpz_t t; + int res; + + /* a == b (mod 0) iff a == b */ + if (mpz_sgn (m) == 0) + return (mpz_cmp (a, b) == 0); + + mpz_init (t); + mpz_sub (t, a, b); + res = mpz_divisible_p (t, m); + mpz_clear (t); + + return res; +} + +static unsigned long +mpz_div_qr_ui (mpz_t q, mpz_t r, + const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) +{ + unsigned long ret; + mpz_t rr, dd; + + mpz_init (rr); + mpz_init_set_ui (dd, d); + mpz_div_qr (q, rr, n, dd, mode); + mpz_clear (dd); + ret = mpz_get_ui (rr); + + if (r) + mpz_swap (r, rr); + mpz_clear (rr); + + return ret; +} + +unsigned long +mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); +} +unsigned long +mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} +unsigned long +mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_ui_p (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + + +/* GCD */ +static mp_limb_t +mpn_gcd_11 (mp_limb_t u, mp_limb_t v) +{ + unsigned shift; + + assert ( (u | v) > 0); + + if (u == 0) + return v; + else if (v == 0) + return u; + + gmp_ctz (shift, u | v); + + u >>= shift; + v >>= shift; + + if ( (u & 1) == 0) + MP_LIMB_T_SWAP (u, v); + + while ( (v & 1) == 0) + v >>= 1; + + while (u != v) + { + if (u > v) + { + u -= v; + do + u >>= 1; + while ( (u & 1) == 0); + } + else + { + v -= u; + do + v >>= 1; + while ( (v & 1) == 0); + } + } + return u << shift; +} + +mp_size_t +mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn > 0); + assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); + assert (vp[vn-1] > 0); + assert ((up[0] | vp[0]) & 1); + + if (un > vn) + mpn_div_qr (NULL, up, un, vp, vn); + + un = mpn_normalized_size (up, vn); + if (un == 0) + { + mpn_copyi (rp, vp, vn); + return vn; + } + + if (!(vp[0] & 1)) + MPN_PTR_SWAP (up, un, vp, vn); + + while (un > 1 || vn > 1) + { + int shift; + assert (vp[0] & 1); + + while (up[0] == 0) + { + up++; + un--; + } + gmp_ctz (shift, up[0]); + if (shift > 0) + { + gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); + un -= (up[un-1] == 0); + } + + if (un < vn) + MPN_PTR_SWAP (up, un, vp, vn); + else if (un == vn) + { + int c = mpn_cmp (up, vp, un); + if (c == 0) + { + mpn_copyi (rp, up, un); + return un; + } + else if (c < 0) + MP_PTR_SWAP (up, vp); + } + + gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); + un = mpn_normalized_size (up, un); + } + rp[0] = mpn_gcd_11 (up[0], vp[0]); + return 1; +} + +unsigned long +mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) +{ + mpz_t t; + mpz_init_set_ui(t, v); + mpz_gcd (t, u, t); + if (v > 0) + v = mpz_get_ui (t); + + if (g) + mpz_swap (t, g); + + mpz_clear (t); + + return v; +} + +static mp_bitcnt_t +mpz_make_odd (mpz_t r) +{ + mp_bitcnt_t shift; + + assert (r->_mp_size > 0); + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + shift = mpn_scan1 (r->_mp_d, 0); + mpz_tdiv_q_2exp (r, r, shift); + + return shift; +} + +void +mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv; + mp_bitcnt_t uz, vz, gz; + + if (u->_mp_size == 0) + { + mpz_abs (g, v); + return; + } + if (v->_mp_size == 0) + { + mpz_abs (g, u); + return; + } + + mpz_init (tu); + mpz_init (tv); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + if (tu->_mp_size < tv->_mp_size) + mpz_swap (tu, tv); + + tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); + mpz_mul_2exp (g, tu, gz); + + mpz_clear (tu); + mpz_clear (tv); +} + +void +mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv, s0, s1, t0, t1; + mp_bitcnt_t uz, vz, gz; + mp_bitcnt_t power; + int cmp; + + if (u->_mp_size == 0) + { + /* g = 0 u + sgn(v) v */ + signed long sign = mpz_sgn (v); + mpz_abs (g, v); + if (s) + s->_mp_size = 0; + if (t) + mpz_set_si (t, sign); + return; + } + + if (v->_mp_size == 0) + { + /* g = sgn(u) u + 0 v */ + signed long sign = mpz_sgn (u); + mpz_abs (g, u); + if (s) + mpz_set_si (s, sign); + if (t) + t->_mp_size = 0; + return; + } + + mpz_init (tu); + mpz_init (tv); + mpz_init (s0); + mpz_init (s1); + mpz_init (t0); + mpz_init (t1); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + uz -= gz; + vz -= gz; + + /* Cofactors corresponding to odd gcd. gz handled later. */ + if (tu->_mp_size < tv->_mp_size) + { + mpz_swap (tu, tv); + MPZ_SRCPTR_SWAP (u, v); + MPZ_PTR_SWAP (s, t); + MP_BITCNT_T_SWAP (uz, vz); + } + + /* Maintain + * + * u = t0 tu + t1 tv + * v = s0 tu + s1 tv + * + * where u and v denote the inputs with common factors of two + * eliminated, and det (s0, t0; s1, t1) = 2^p. Then + * + * 2^p tu = s1 u - t1 v + * 2^p tv = -s0 u + t0 v + */ + + /* After initial division, tu = q tv + tu', we have + * + * u = 2^uz (tu' + q tv) + * v = 2^vz tv + * + * or + * + * t0 = 2^uz, t1 = 2^uz q + * s0 = 0, s1 = 2^vz + */ + + mpz_tdiv_qr (t1, tu, tu, tv); + mpz_mul_2exp (t1, t1, uz); + + mpz_setbit (s1, vz); + power = uz + vz; + + if (tu->_mp_size > 0) + { + mp_bitcnt_t shift; + shift = mpz_make_odd (tu); + mpz_setbit (t0, uz + shift); + power += shift; + + for (;;) + { + int c; + c = mpz_cmp (tu, tv); + if (c == 0) + break; + + if (c < 0) + { + /* tv = tv' + tu + * + * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' + * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ + + mpz_sub (tv, tv, tu); + mpz_add (t0, t0, t1); + mpz_add (s0, s0, s1); + + shift = mpz_make_odd (tv); + mpz_mul_2exp (t1, t1, shift); + mpz_mul_2exp (s1, s1, shift); + } + else + { + mpz_sub (tu, tu, tv); + mpz_add (t1, t0, t1); + mpz_add (s1, s0, s1); + + shift = mpz_make_odd (tu); + mpz_mul_2exp (t0, t0, shift); + mpz_mul_2exp (s0, s0, shift); + } + power += shift; + } + } + else + mpz_setbit (t0, uz); + + /* Now tv = odd part of gcd, and -s0 and t0 are corresponding + cofactors. */ + + mpz_mul_2exp (tv, tv, gz); + mpz_neg (s0, s0); + + /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To + adjust cofactors, we need u / g and v / g */ + + mpz_divexact (s1, v, tv); + mpz_abs (s1, s1); + mpz_divexact (t1, u, tv); + mpz_abs (t1, t1); + + while (power-- > 0) + { + /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ + if (mpz_odd_p (s0) || mpz_odd_p (t0)) + { + mpz_sub (s0, s0, s1); + mpz_add (t0, t0, t1); + } + assert (mpz_even_p (t0) && mpz_even_p (s0)); + mpz_tdiv_q_2exp (s0, s0, 1); + mpz_tdiv_q_2exp (t0, t0, 1); + } + + /* Choose small cofactors (they should generally satify + + |s| < |u| / 2g and |t| < |v| / 2g, + + with some documented exceptions). Always choose the smallest s, + if there are two choices for s with same absolute value, choose + the one with smallest corresponding t (this asymmetric condition + is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ + mpz_add (s1, s0, s1); + mpz_sub (t1, t0, t1); + cmp = mpz_cmpabs (s0, s1); + if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) + { + mpz_swap (s0, s1); + mpz_swap (t0, t1); + } + if (u->_mp_size < 0) + mpz_neg (s0, s0); + if (v->_mp_size < 0) + mpz_neg (t0, t0); + + mpz_swap (g, tv); + if (s) + mpz_swap (s, s0); + if (t) + mpz_swap (t, t0); + + mpz_clear (tu); + mpz_clear (tv); + mpz_clear (s0); + mpz_clear (s1); + mpz_clear (t0); + mpz_clear (t1); +} + +void +mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t g; + + if (u->_mp_size == 0 || v->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + mpz_init (g); + + mpz_gcd (g, u, v); + mpz_divexact (g, u, g); + mpz_mul (r, g, v); + + mpz_clear (g); + mpz_abs (r, r); +} + +void +mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) +{ + if (v == 0 || u->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + v /= mpz_gcd_ui (NULL, u, v); + mpz_mul_ui (r, u, v); + + mpz_abs (r, r); +} + +int +mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) +{ + mpz_t g, tr; + int invertible; + + if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) + return 0; + + mpz_init (g); + mpz_init (tr); + + mpz_gcdext (g, tr, NULL, u, m); + invertible = (mpz_cmp_ui (g, 1) == 0); + + if (invertible) + { + if (tr->_mp_size < 0) + { + if (m->_mp_size >= 0) + mpz_add (tr, tr, m); + else + mpz_sub (tr, tr, m); + } + mpz_swap (r, tr); + } + + mpz_clear (g); + mpz_clear (tr); + return invertible; +} + + +/* Higher level operations (sqrt, pow and root) */ + +void +mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) +{ + unsigned long bit; + mpz_t tr; + mpz_init_set_ui (tr, 1); + + bit = GMP_ULONG_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (e & bit) + mpz_mul (tr, tr, b); + bit >>= 1; + } + while (bit > 0); + + mpz_swap (r, tr); + mpz_clear (tr); +} + +void +mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) +{ + mpz_t b; + + mpz_init_set_ui (b, blimb); + mpz_pow_ui (r, b, e); + mpz_clear (b); +} + +void +mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) +{ + mpz_t tr; + mpz_t base; + mp_size_t en, mn; + mp_srcptr mp; + struct gmp_div_inverse minv; + unsigned shift; + mp_ptr tp = NULL; + + en = GMP_ABS (e->_mp_size); + mn = GMP_ABS (m->_mp_size); + if (mn == 0) + gmp_die ("mpz_powm: Zero modulo."); + + if (en == 0) + { + mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); + return; + } + + mp = m->_mp_d; + mpn_div_qr_invert (&minv, mp, mn); + shift = minv.shift; + + if (shift > 0) + { + /* To avoid shifts, we do all our reductions, except the final + one, using a *normalized* m. */ + minv.shift = 0; + + tp = gmp_alloc_limbs (mn); + gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); + mp = tp; + } + + mpz_init (base); + + if (e->_mp_size < 0) + { + if (!mpz_invert (base, b, m)) + gmp_die ("mpz_powm: Negative exponent and non-invertible base."); + } + else + { + mp_size_t bn; + mpz_abs (base, b); + + bn = base->_mp_size; + if (bn >= mn) + { + mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); + bn = mn; + } + + /* We have reduced the absolute value. Now take care of the + sign. Note that we get zero represented non-canonically as + m. */ + if (b->_mp_size < 0) + { + mp_ptr bp = MPZ_REALLOC (base, mn); + gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); + bn = mn; + } + base->_mp_size = mpn_normalized_size (base->_mp_d, bn); + } + mpz_init_set_ui (tr, 1); + + while (--en >= 0) + { + mp_limb_t w = e->_mp_d[en]; + mp_limb_t bit; + + bit = GMP_LIMB_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (w & bit) + mpz_mul (tr, tr, base); + if (tr->_mp_size > mn) + { + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + bit >>= 1; + } + while (bit > 0); + } + + /* Final reduction */ + if (tr->_mp_size >= mn) + { + minv.shift = shift; + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + if (tp) + gmp_free_limbs (tp, mn); + + mpz_swap (r, tr); + mpz_clear (tr); + mpz_clear (base); +} + +void +mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) +{ + mpz_t e; + + mpz_init_set_ui (e, elimb); + mpz_powm (r, b, e, m); + mpz_clear (e); +} + +/* x=trunc(y^(1/z)), r=y-x^z */ +void +mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) +{ + int sgn; + mp_bitcnt_t bc; + mpz_t t, u; + + sgn = y->_mp_size < 0; + if ((~z & sgn) != 0) + gmp_die ("mpz_rootrem: Negative argument, with even root."); + if (z == 0) + gmp_die ("mpz_rootrem: Zeroth root."); + + if (mpz_cmpabs_ui (y, 1) <= 0) { + if (x) + mpz_set (x, y); + if (r) + r->_mp_size = 0; + return; + } + + mpz_init (u); + mpz_init (t); + bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; + mpz_setbit (t, bc); + + if (z == 2) /* simplify sqrt loop: z-1 == 1 */ + do { + mpz_swap (u, t); /* u = x */ + mpz_tdiv_q (t, y, u); /* t = y/x */ + mpz_add (t, t, u); /* t = y/x + x */ + mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + else /* z != 2 */ { + mpz_t v; + + mpz_init (v); + if (sgn) + mpz_neg (t, t); + + do { + mpz_swap (u, t); /* u = x */ + mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ + mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ + mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ + mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ + mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + + mpz_clear (v); + } + + if (r) { + mpz_pow_ui (t, u, z); + mpz_sub (r, y, t); + } + if (x) + mpz_swap (x, u); + mpz_clear (u); + mpz_clear (t); +} + +int +mpz_root (mpz_t x, const mpz_t y, unsigned long z) +{ + int res; + mpz_t r; + + mpz_init (r); + mpz_rootrem (x, r, y, z); + res = r->_mp_size == 0; + mpz_clear (r); + + return res; +} + +/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ +void +mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) +{ + mpz_rootrem (s, r, u, 2); +} + +void +mpz_sqrt (mpz_t s, const mpz_t u) +{ + mpz_rootrem (s, NULL, u, 2); +} + +int +mpz_perfect_square_p (const mpz_t u) +{ + if (u->_mp_size <= 0) + return (u->_mp_size == 0); + else + return mpz_root (NULL, u, 2); +} + +int +mpn_perfect_square_p (mp_srcptr p, mp_size_t n) +{ + mpz_t t; + + assert (n > 0); + assert (p [n-1] != 0); + return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); +} + +mp_size_t +mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) +{ + mpz_t s, r, u; + mp_size_t res; + + assert (n > 0); + assert (p [n-1] != 0); + + mpz_init (r); + mpz_init (s); + mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); + + assert (s->_mp_size == (n+1)/2); + mpn_copyd (sp, s->_mp_d, s->_mp_size); + mpz_clear (s); + res = r->_mp_size; + if (rp) + mpn_copyd (rp, r->_mp_d, res); + mpz_clear (r); + return res; +} + +/* Combinatorics */ + +void +mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) +{ + mpz_set_ui (x, n + (n == 0)); + if (m + 1 < 2) return; + while (n > m + 1) + mpz_mul_ui (x, x, n -= m); +} + +void +mpz_2fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 2); +} + +void +mpz_fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 1); +} + +void +mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) +{ + mpz_t t; + + mpz_set_ui (r, k <= n); + + if (k > (n >> 1)) + k = (k <= n) ? n - k : 0; + + mpz_init (t); + mpz_fac_ui (t, k); + + for (; k > 0; --k) + mpz_mul_ui (r, r, n--); + + mpz_divexact (r, r, t); + mpz_clear (t); +} + + +/* Primality testing */ + +/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ +/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ +static int +gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) +{ + int c, bit = 0; + + assert (b & 1); + assert (a != 0); + /* assert (mpn_gcd_11 (a, b) == 1); */ + + /* Below, we represent a and b shifted right so that the least + significant one bit is implicit. */ + b >>= 1; + + gmp_ctz(c, a); + a >>= 1; + + for (;;) + { + a >>= c; + /* (2/b) = -1 if b = 3 or 5 mod 8 */ + bit ^= c & (b ^ (b >> 1)); + if (a < b) + { + if (a == 0) + return bit & 1 ? -1 : 1; + bit ^= a & b; + a = b - a; + b -= a; + } + else + { + a -= b; + assert (a != 0); + } + + gmp_ctz(c, a); + ++c; + } +} + +static void +gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) +{ + mpz_mod (Qk, Qk, n); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + mpz_mul (V, V, V); + mpz_submul_ui (V, Qk, 2); + mpz_tdiv_r (V, V, n); + /* Q^{2k} = (Q^k)^2 */ + mpz_mul (Qk, Qk, Qk); +} + +/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ +/* with P=1, Q=Q; k = (n>>b0)|1. */ +/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ +/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ +static int +gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, + mp_bitcnt_t b0, const mpz_t n) +{ + mp_bitcnt_t bs; + mpz_t U; + int res; + + assert (b0 > 0); + assert (Q <= - (LONG_MIN / 2)); + assert (Q >= - (LONG_MAX / 2)); + assert (mpz_cmp_ui (n, 4) > 0); + assert (mpz_odd_p (n)); + + mpz_init_set_ui (U, 1); /* U1 = 1 */ + mpz_set_ui (V, 1); /* V1 = 1 */ + mpz_set_si (Qk, Q); + + for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) + { + /* U_{2k} <- U_k * V_k */ + mpz_mul (U, U, V); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + /* A step k->k+1 is performed if the bit in $n$ is 1 */ + /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ + /* should be 1 in $n+1$ (bs == b0) */ + if (b0 == bs || mpz_tstbit (n, bs)) + { + /* Q^{k+1} <- Q^k * Q */ + mpz_mul_si (Qk, Qk, Q); + /* U_{k+1} <- (U_k + V_k) / 2 */ + mpz_swap (U, V); /* Keep in V the old value of U_k */ + mpz_add (U, U, V); + /* We have to compute U/2, so we need an even value, */ + /* equivalent (mod n) */ + if (mpz_odd_p (U)) + mpz_add (U, U, n); + mpz_tdiv_q_2exp (U, U, 1); + /* V_{k+1} <-(D*U_k + V_k) / 2 = + U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ + mpz_mul_si (V, V, -2*Q); + mpz_add (V, U, V); + mpz_tdiv_r (V, V, n); + } + mpz_tdiv_r (U, U, n); + } + + res = U->_mp_size == 0; + mpz_clear (U); + return res; +} + +/* Performs strong Lucas' test on x, with parameters suggested */ +/* for the BPSW test. Qk is only passed to recycle a variable. */ +/* Requires GCD (x,6) = 1.*/ +static int +gmp_stronglucas (const mpz_t x, mpz_t Qk) +{ + mp_bitcnt_t b0; + mpz_t V, n; + mp_limb_t maxD, D; /* The absolute value is stored. */ + long Q; + mp_limb_t tl; + + /* Test on the absolute value. */ + mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); + + assert (mpz_odd_p (n)); + /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ + if (mpz_root (Qk, n, 2)) + return 0; /* A square is composite. */ + + /* Check Ds up to square root (in case, n is prime) + or avoid overflows */ + maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; + + D = 3; + /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ + /* For those Ds we have (D/n) = (n/|D|) */ + do + { + if (D >= maxD) + return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ + D += 2; + tl = mpz_tdiv_ui (n, D); + if (tl == 0) + return 0; + } + while (gmp_jacobi_coprime (tl, D) == 1); + + mpz_init (V); + + /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ + b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); + /* b0 = mpz_scan0 (n, 0); */ + + /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ + Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); + + if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ + while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ + /* V <- V ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + mpz_clear (V); + return (b0 != 0); +} + +static int +gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, + const mpz_t q, mp_bitcnt_t k) +{ + assert (k > 0); + + /* Caller must initialize y to the base. */ + mpz_powm (y, y, q, n); + + if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) + return 1; + + while (--k > 0) + { + mpz_powm_ui (y, y, 2, n); + if (mpz_cmp (y, nm1) == 0) + return 1; + } + return 0; +} + +/* This product is 0xc0cfd797, and fits in 32 bits. */ +#define GMP_PRIME_PRODUCT \ + (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) + +/* Bit (p+1)/2 is set, for each odd prime <= 61 */ +#define GMP_PRIME_MASK 0xc96996dcUL + +int +mpz_probab_prime_p (const mpz_t n, int reps) +{ + mpz_t nm1; + mpz_t q; + mpz_t y; + mp_bitcnt_t k; + int is_prime; + int j; + + /* Note that we use the absolute value of n only, for compatibility + with the real GMP. */ + if (mpz_even_p (n)) + return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; + + /* Above test excludes n == 0 */ + assert (n->_mp_size != 0); + + if (mpz_cmpabs_ui (n, 64) < 0) + return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; + + if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) + return 0; + + /* All prime factors are >= 31. */ + if (mpz_cmpabs_ui (n, 31*31) < 0) + return 2; + + mpz_init (nm1); + mpz_init (q); + + /* Find q and k, where q is odd and n = 1 + 2**k * q. */ + mpz_abs (nm1, n); + nm1->_mp_d[0] -= 1; + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + k = mpn_scan1 (nm1->_mp_d, 0); + mpz_tdiv_q_2exp (q, nm1, k); + + /* BPSW test */ + mpz_init_set_ui (y, 2); + is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); + reps -= 24; /* skip the first 24 repetitions */ + + /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = + j^2 + j + 41 using Euler's polynomial. We potentially stop early, + if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > + 30 (a[30] == 971 > 31*31 == 961). */ + + for (j = 0; is_prime & (j < reps); j++) + { + mpz_set_ui (y, (unsigned long) j*j+j+41); + if (mpz_cmp (y, nm1) >= 0) + { + /* Don't try any further bases. This "early" break does not affect + the result for any reasonable reps value (<=5000 was tested) */ + assert (j >= 30); + break; + } + is_prime = gmp_millerrabin (n, nm1, y, q, k); + } + mpz_clear (nm1); + mpz_clear (q); + mpz_clear (y); + + return is_prime; +} + + +/* Logical operations and bit manipulation. */ + +/* Numbers are treated as if represented in two's complement (and + infinitely sign extended). For a negative values we get the two's + complement from -x = ~x + 1, where ~ is bitwise complement. + Negation transforms + + xxxx10...0 + + into + + yyyy10...0 + + where yyyy is the bitwise complement of xxxx. So least significant + bits, up to and including the first one bit, are unchanged, and + the more significant bits are all complemented. + + To change a bit from zero to one in a negative number, subtract the + corresponding power of two from the absolute value. This can never + underflow. To change a bit from one to zero, add the corresponding + power of two, and this might overflow. E.g., if x = -001111, the + two's complement is 110001. Clearing the least significant bit, we + get two's complement 110000, and -010000. */ + +int +mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t limb_index; + unsigned shift; + mp_size_t ds; + mp_size_t dn; + mp_limb_t w; + int bit; + + ds = d->_mp_size; + dn = GMP_ABS (ds); + limb_index = bit_index / GMP_LIMB_BITS; + if (limb_index >= dn) + return ds < 0; + + shift = bit_index % GMP_LIMB_BITS; + w = d->_mp_d[limb_index]; + bit = (w >> shift) & 1; + + if (ds < 0) + { + /* d < 0. Check if any of the bits below is set: If so, our bit + must be complemented. */ + if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) + return bit ^ 1; + while (--limb_index >= 0) + if (d->_mp_d[limb_index] > 0) + return bit ^ 1; + } + return bit; +} + +static void +mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_limb_t bit; + mp_ptr dp; + + dn = GMP_ABS (d->_mp_size); + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + if (limb_index >= dn) + { + mp_size_t i; + /* The bit should be set outside of the end of the number. + We have to increase the size of the number. */ + dp = MPZ_REALLOC (d, limb_index + 1); + + dp[limb_index] = bit; + for (i = dn; i < limb_index; i++) + dp[i] = 0; + dn = limb_index + 1; + } + else + { + mp_limb_t cy; + + dp = d->_mp_d; + + cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); + if (cy > 0) + { + dp = MPZ_REALLOC (d, dn + 1); + dp[dn++] = cy; + } + } + + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +static void +mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_ptr dp; + mp_limb_t bit; + + dn = GMP_ABS (d->_mp_size); + dp = d->_mp_d; + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + assert (limb_index < dn); + + gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, + dn - limb_index, bit)); + dn = mpn_normalized_size (dp, dn); + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +void +mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (!mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_add_bit (d, bit_index); + else + mpz_abs_sub_bit (d, bit_index); + } +} + +void +mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); + } +} + +void +mpz_combit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); +} + +void +mpz_com (mpz_t r, const mpz_t u) +{ + mpz_add_ui (r, u, 1); + mpz_neg (r, r); +} + +void +mpz_and (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + r->_mp_size = 0; + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc & vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is positive, higher limbs don't matter. */ + rn = vx ? un : vn; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul & vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul & vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc | vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is negative, by sign extension higher limbs + don't matter. */ + rn = vx ? vn : un; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul | vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul | vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc ^ vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + rp = MPZ_REALLOC (r, un + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = (ul ^ vl ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = (ul ^ ux) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[un++] = rc; + else + un = mpn_normalized_size (rp, un); + + r->_mp_size = rx ? -un : un; +} + +static unsigned +gmp_popcount_limb (mp_limb_t x) +{ + unsigned c; + + /* Do 16 bits at a time, to avoid limb-sized constants. */ + int LOCAL_SHIFT_BITS = 16; + for (c = 0; x > 0;) + { + unsigned w = x - ((x >> 1) & 0x5555); + w = ((w >> 2) & 0x3333) + (w & 0x3333); + w = (w >> 4) + w; + w = ((w >> 8) & 0x000f) + (w & 0x000f); + c += w; + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) + x >>= LOCAL_SHIFT_BITS; + else + x = 0; + } + return c; +} + +mp_bitcnt_t +mpn_popcount (mp_srcptr p, mp_size_t n) +{ + mp_size_t i; + mp_bitcnt_t c; + + for (c = 0, i = 0; i < n; i++) + c += gmp_popcount_limb (p[i]); + + return c; +} + +mp_bitcnt_t +mpz_popcount (const mpz_t u) +{ + mp_size_t un; + + un = u->_mp_size; + + if (un < 0) + return ~(mp_bitcnt_t) 0; + + return mpn_popcount (u->_mp_d, un); +} + +mp_bitcnt_t +mpz_hamdist (const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_limb_t uc, vc, ul, vl, comp; + mp_srcptr up, vp; + mp_bitcnt_t c; + + un = u->_mp_size; + vn = v->_mp_size; + + if ( (un ^ vn) < 0) + return ~(mp_bitcnt_t) 0; + + comp = - (uc = vc = (un < 0)); + if (uc) + { + assert (vn < 0); + un = -un; + vn = -vn; + } + + up = u->_mp_d; + vp = v->_mp_d; + + if (un < vn) + MPN_SRCPTR_SWAP (up, un, vp, vn); + + for (i = 0, c = 0; i < vn; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + vl = (vp[i] ^ comp) + vc; + vc = vl < vc; + + c += gmp_popcount_limb (ul ^ vl); + } + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + c += gmp_popcount_limb (ul ^ comp); + } + + return c; +} + +mp_bitcnt_t +mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit + for u<0. Notice this test picks up any u==0 too. */ + if (i >= un) + return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); + + up = u->_mp_d; + ux = 0; + limb = up[i]; + + if (starting_bit != 0) + { + if (us < 0) + { + ux = mpn_zero_p (up, i); + limb = ~ limb + ux; + ux = - (mp_limb_t) (limb >= ux); + } + + /* Mask to 0 all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + } + + return mpn_common_scan (limb, i, up, un, ux); +} + +mp_bitcnt_t +mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + ux = - (mp_limb_t) (us >= 0); + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for + u<0. Notice this test picks up all cases of u==0 too. */ + if (i >= un) + return (ux ? starting_bit : ~(mp_bitcnt_t) 0); + + up = u->_mp_d; + limb = up[i] ^ ux; + + if (ux == 0) + limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ + + /* Mask all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + + return mpn_common_scan (limb, i, up, un, ux); +} + + +/* MPZ base conversion. */ + +size_t +mpz_sizeinbase (const mpz_t u, int base) +{ + mp_size_t un, tn; + mp_srcptr up; + mp_ptr tp; + mp_bitcnt_t bits; + struct gmp_div_inverse bi; + size_t ndigits; + + assert (base >= 2); + assert (base <= 62); + + un = GMP_ABS (u->_mp_size); + if (un == 0) + return 1; + + up = u->_mp_d; + + bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); + switch (base) + { + case 2: + return bits; + case 4: + return (bits + 1) / 2; + case 8: + return (bits + 2) / 3; + case 16: + return (bits + 3) / 4; + case 32: + return (bits + 4) / 5; + /* FIXME: Do something more clever for the common case of base + 10. */ + } + + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, up, un); + mpn_div_qr_1_invert (&bi, base); + + tn = un; + ndigits = 0; + do + { + ndigits++; + mpn_div_qr_1_preinv (tp, tp, tn, &bi); + tn -= (tp[tn-1] == 0); + } + while (tn > 0); + + gmp_free_limbs (tp, un); + return ndigits; +} + +char * +mpz_get_str (char *sp, int base, const mpz_t u) +{ + unsigned bits; + const char *digits; + mp_size_t un; + size_t i, sn, osn; + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + if (base > 1) + { + if (base <= 36) + digits = "0123456789abcdefghijklmnopqrstuvwxyz"; + else if (base > 62) + return NULL; + } + else if (base >= -1) + base = 10; + else + { + base = -base; + if (base > 36) + return NULL; + } + + sn = 1 + mpz_sizeinbase (u, base); + if (!sp) + { + osn = 1 + sn; + sp = (char *) gmp_alloc (osn); + } + else + osn = 0; + un = GMP_ABS (u->_mp_size); + + if (un == 0) + { + sp[0] = '0'; + sn = 1; + goto ret; + } + + i = 0; + + if (u->_mp_size < 0) + sp[i++] = '-'; + + bits = mpn_base_power_of_two_p (base); + + if (bits) + /* Not modified in this case. */ + sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); + else + { + struct mpn_base_info info; + mp_ptr tp; + + mpn_get_base_info (&info, base); + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, u->_mp_d, un); + + sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); + gmp_free_limbs (tp, un); + } + + for (; i < sn; i++) + sp[i] = digits[(unsigned char) sp[i]]; + +ret: + sp[sn] = '\0'; + if (osn && osn != sn + 1) + sp = (char*) gmp_realloc (sp, osn, sn + 1); + return sp; +} + +int +mpz_set_str (mpz_t r, const char *sp, int base) +{ + unsigned bits, value_of_a; + mp_size_t rn, alloc; + mp_ptr rp; + size_t dn, sn; + int sign; + unsigned char *dp; + + assert (base == 0 || (base >= 2 && base <= 62)); + + while (isspace( (unsigned char) *sp)) + sp++; + + sign = (*sp == '-'); + sp += sign; + + if (base == 0) + { + if (sp[0] == '0') + { + if (sp[1] == 'x' || sp[1] == 'X') + { + base = 16; + sp += 2; + } + else if (sp[1] == 'b' || sp[1] == 'B') + { + base = 2; + sp += 2; + } + else + base = 8; + } + else + base = 10; + } + + if (!*sp) + { + r->_mp_size = 0; + return -1; + } + sn = strlen(sp); + dp = (unsigned char *) gmp_alloc (sn); + + value_of_a = (base > 36) ? 36 : 10; + for (dn = 0; *sp; sp++) + { + unsigned digit; + + if (isspace ((unsigned char) *sp)) + continue; + else if (*sp >= '0' && *sp <= '9') + digit = *sp - '0'; + else if (*sp >= 'a' && *sp <= 'z') + digit = *sp - 'a' + value_of_a; + else if (*sp >= 'A' && *sp <= 'Z') + digit = *sp - 'A' + 10; + else + digit = base; /* fail */ + + if (digit >= (unsigned) base) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + + dp[dn++] = digit; + } + + if (!dn) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + bits = mpn_base_power_of_two_p (base); + + if (bits > 0) + { + alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_bits (rp, dp, dn, bits); + } + else + { + struct mpn_base_info info; + mpn_get_base_info (&info, base); + alloc = (dn + info.exp - 1) / info.exp; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_other (rp, dp, dn, base, &info); + /* Normalization, needed for all-zero input. */ + assert (rn > 0); + rn -= rp[rn-1] == 0; + } + assert (rn <= alloc); + gmp_free (dp, sn); + + r->_mp_size = sign ? - rn : rn; + + return 0; +} + +int +mpz_init_set_str (mpz_t r, const char *sp, int base) +{ + mpz_init (r); + return mpz_set_str (r, sp, base); +} + +size_t +mpz_out_str (FILE *stream, int base, const mpz_t x) +{ + char *str; + size_t len, n; + + str = mpz_get_str (NULL, base, x); + if (!str) + return 0; + len = strlen (str); + n = fwrite (str, 1, len, stream); + gmp_free (str, len + 1); + return n; +} + + +static int +gmp_detect_endian (void) +{ + static const int i = 2; + const unsigned char *p = (const unsigned char *) &i; + return 1 - *p; +} + +/* Import and export. Does not support nails. */ +void +mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, + size_t nails, const void *src) +{ + const unsigned char *p; + ptrdiff_t word_step; + mp_ptr rp; + mp_size_t rn; + + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes already copied to this limb (starting from + the low end). */ + size_t bytes; + /* The index where the limb should be stored, when completed. */ + mp_size_t i; + + if (nails != 0) + gmp_die ("mpz_import: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) src; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); + rp = MPZ_REALLOC (r, rn); + + for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) + { + size_t j; + for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) + { + limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); + if (bytes == sizeof(mp_limb_t)) + { + rp[i++] = limb; + bytes = 0; + limb = 0; + } + } + } + assert (i + (bytes > 0) == rn); + if (limb != 0) + rp[i++] = limb; + else + i = mpn_normalized_size (rp, i); + + r->_mp_size = i; +} + +void * +mpz_export (void *r, size_t *countp, int order, size_t size, int endian, + size_t nails, const mpz_t u) +{ + size_t count; + mp_size_t un; + + if (nails != 0) + gmp_die ("mpz_export: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + assert (size > 0 || u->_mp_size == 0); + + un = u->_mp_size; + count = 0; + if (un != 0) + { + size_t k; + unsigned char *p; + ptrdiff_t word_step; + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes left to do in this limb. */ + size_t bytes; + /* The index where the limb was read. */ + mp_size_t i; + + un = GMP_ABS (un); + + /* Count bytes in top limb. */ + limb = u->_mp_d[un-1]; + assert (limb != 0); + + k = (GMP_LIMB_BITS <= CHAR_BIT); + if (!k) + { + do { + int LOCAL_CHAR_BIT = CHAR_BIT; + k++; limb >>= LOCAL_CHAR_BIT; + } while (limb != 0); + } + /* else limb = 0; */ + + count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; + + if (!r) + r = gmp_alloc (count * size); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) r; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) + { + size_t j; + for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) + { + if (sizeof (mp_limb_t) == 1) + { + if (i < un) + *p = u->_mp_d[i++]; + else + *p = 0; + } + else + { + int LOCAL_CHAR_BIT = CHAR_BIT; + if (bytes == 0) + { + if (i < un) + limb = u->_mp_d[i++]; + bytes = sizeof (mp_limb_t); + } + *p = limb; + limb >>= LOCAL_CHAR_BIT; + bytes--; + } + } + } + assert (i == un); + assert (k == count); + } + + if (countp) + *countp = count; + + return r; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.h new file mode 100644 index 0000000000..f28cb360ce --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.h @@ -0,0 +1,311 @@ +/* mini-gmp, a minimalistic implementation of a GNU GMP subset. + +Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* About mini-gmp: This is a minimal implementation of a subset of the + GMP interface. It is intended for inclusion into applications which + have modest bignums needs, as a fallback when the real GMP library + is not installed. + + This file defines the public interface. */ + +#ifndef __MINI_GMP_H__ +#define __MINI_GMP_H__ + +/* For size_t */ +#include + +#if defined (__cplusplus) +extern "C" { +#endif + +void mp_set_memory_functions (void *(*) (size_t), + void *(*) (void *, size_t, size_t), + void (*) (void *, size_t)); + +void mp_get_memory_functions (void *(**) (size_t), + void *(**) (void *, size_t, size_t), + void (**) (void *, size_t)); + +#ifndef MINI_GMP_LIMB_TYPE +#define MINI_GMP_LIMB_TYPE long +#endif + +typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; +typedef long mp_size_t; +typedef unsigned long mp_bitcnt_t; + +typedef mp_limb_t *mp_ptr; +typedef const mp_limb_t *mp_srcptr; + +typedef struct +{ + int _mp_alloc; /* Number of *limbs* allocated and pointed + to by the _mp_d field. */ + int _mp_size; /* abs(_mp_size) is the number of limbs the + last field points to. If _mp_size is + negative this is a negative number. */ + mp_limb_t *_mp_d; /* Pointer to the limbs. */ +} __mpz_struct; + +typedef __mpz_struct mpz_t[1]; + +typedef __mpz_struct *mpz_ptr; +typedef const __mpz_struct *mpz_srcptr; + +extern const int mp_bits_per_limb; + +void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); +void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); +void mpn_zero (mp_ptr, mp_size_t); + +int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); +int mpn_zero_p (mp_srcptr, mp_size_t); + +mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); +void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); +int mpn_perfect_square_p (mp_srcptr, mp_size_t); +mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); +mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); + +mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); +mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); + +mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); +mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); + +void mpn_com (mp_ptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); + +mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); + +mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); +#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) + +size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); +mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); + +void mpz_init (mpz_t); +void mpz_init2 (mpz_t, mp_bitcnt_t); +void mpz_clear (mpz_t); + +#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) +#define mpz_even_p(z) (! mpz_odd_p (z)) + +int mpz_sgn (const mpz_t); +int mpz_cmp_si (const mpz_t, long); +int mpz_cmp_ui (const mpz_t, unsigned long); +int mpz_cmp (const mpz_t, const mpz_t); +int mpz_cmpabs_ui (const mpz_t, unsigned long); +int mpz_cmpabs (const mpz_t, const mpz_t); +int mpz_cmp_d (const mpz_t, double); +int mpz_cmpabs_d (const mpz_t, double); + +void mpz_abs (mpz_t, const mpz_t); +void mpz_neg (mpz_t, const mpz_t); +void mpz_swap (mpz_t, mpz_t); + +void mpz_add_ui (mpz_t, const mpz_t, unsigned long); +void mpz_add (mpz_t, const mpz_t, const mpz_t); +void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); +void mpz_sub (mpz_t, const mpz_t, const mpz_t); + +void mpz_mul_si (mpz_t, const mpz_t, long int); +void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_mul (mpz_t, const mpz_t, const mpz_t); +void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_addmul (mpz_t, const mpz_t, const mpz_t); +void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_submul (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); + +void mpz_mod (mpz_t, const mpz_t, const mpz_t); + +void mpz_divexact (mpz_t, const mpz_t, const mpz_t); + +int mpz_divisible_p (const mpz_t, const mpz_t); +int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); + +unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); + +unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); + +void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); + +int mpz_divisible_ui_p (const mpz_t, unsigned long); + +unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); +void mpz_gcd (mpz_t, const mpz_t, const mpz_t); +void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); +void mpz_lcm (mpz_t, const mpz_t, const mpz_t); +int mpz_invert (mpz_t, const mpz_t, const mpz_t); + +void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); +void mpz_sqrt (mpz_t, const mpz_t); +int mpz_perfect_square_p (const mpz_t); + +void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); +void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); +void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); + +void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); +int mpz_root (mpz_t, const mpz_t, unsigned long); + +void mpz_fac_ui (mpz_t, unsigned long); +void mpz_2fac_ui (mpz_t, unsigned long); +void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); +void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); + +int mpz_probab_prime_p (const mpz_t, int); + +int mpz_tstbit (const mpz_t, mp_bitcnt_t); +void mpz_setbit (mpz_t, mp_bitcnt_t); +void mpz_clrbit (mpz_t, mp_bitcnt_t); +void mpz_combit (mpz_t, mp_bitcnt_t); + +void mpz_com (mpz_t, const mpz_t); +void mpz_and (mpz_t, const mpz_t, const mpz_t); +void mpz_ior (mpz_t, const mpz_t, const mpz_t); +void mpz_xor (mpz_t, const mpz_t, const mpz_t); + +mp_bitcnt_t mpz_popcount (const mpz_t); +mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); +mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); +mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); + +int mpz_fits_slong_p (const mpz_t); +int mpz_fits_ulong_p (const mpz_t); +int mpz_fits_sint_p (const mpz_t); +int mpz_fits_uint_p (const mpz_t); +int mpz_fits_sshort_p (const mpz_t); +int mpz_fits_ushort_p (const mpz_t); +long int mpz_get_si (const mpz_t); +unsigned long int mpz_get_ui (const mpz_t); +double mpz_get_d (const mpz_t); +size_t mpz_size (const mpz_t); +mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); + +void mpz_realloc2 (mpz_t, mp_bitcnt_t); +mp_srcptr mpz_limbs_read (mpz_srcptr); +mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); +mp_ptr mpz_limbs_write (mpz_t, mp_size_t); +void mpz_limbs_finish (mpz_t, mp_size_t); +mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); + +#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} + +void mpz_set_si (mpz_t, signed long int); +void mpz_set_ui (mpz_t, unsigned long int); +void mpz_set (mpz_t, const mpz_t); +void mpz_set_d (mpz_t, double); + +void mpz_init_set_si (mpz_t, signed long int); +void mpz_init_set_ui (mpz_t, unsigned long int); +void mpz_init_set (mpz_t, const mpz_t); +void mpz_init_set_d (mpz_t, double); + +size_t mpz_sizeinbase (const mpz_t, int); +char *mpz_get_str (char *, int, const mpz_t); +int mpz_set_str (mpz_t, const char *, int); +int mpz_init_set_str (mpz_t, const char *, int); + +/* This long list taken from gmp.h. */ +/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, + defines EOF but not FILE. */ +#if defined (FILE) \ + || defined (H_STDIO) \ + || defined (_H_STDIO) /* AIX */ \ + || defined (_STDIO_H) /* glibc, Sun, SCO */ \ + || defined (_STDIO_H_) /* BSD, OSF */ \ + || defined (__STDIO_H) /* Borland */ \ + || defined (__STDIO_H__) /* IRIX */ \ + || defined (_STDIO_INCLUDED) /* HPUX */ \ + || defined (__dj_include_stdio_h_) /* DJGPP */ \ + || defined (_FILE_DEFINED) /* Microsoft */ \ + || defined (__STDIO__) /* Apple MPW MrC */ \ + || defined (_MSL_STDIO_H) /* Metrowerks */ \ + || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ + || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ + || defined (__STDIO_LOADED) /* VMS */ \ + || defined (_STDIO) /* HPE NonStop */ \ + || defined (__DEFINED_FILE) /* musl */ +size_t mpz_out_str (FILE *, int, const mpz_t); +#endif + +void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); +void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); + +#if defined (__cplusplus) +} +#endif +#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.c new file mode 100644 index 0000000000..27f4a963db --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.c @@ -0,0 +1,357 @@ +#include +#include +#include +#include + +// double-wide multiplication +void +MUL(digit_t *out, const digit_t a, const digit_t b) +{ +#ifdef RADIX_32 + uint64_t r = (uint64_t)a * b; + out[0] = r & 0xFFFFFFFFUL; + out[1] = r >> 32; + +#elif defined(RADIX_64) && defined(_MSC_VER) + uint64_t umul_hi; + out[0] = _umul128(a, b, &umul_hi); + out[1] = umul_hi; + +#elif defined(RADIX_64) && defined(HAVE_UINT128) + unsigned __int128 umul_tmp; + umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); + out[0] = (uint64_t)umul_tmp; + out[1] = (uint64_t)(umul_tmp >> 64); + +#else + register digit_t al, ah, bl, bh, temp; + digit_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; + digit_t mask_low = (digit_t)(-1) >> (sizeof(digit_t) * 4), mask_high = (digit_t)(-1) << (sizeof(digit_t) * 4); + al = a & mask_low; // Low part + ah = a >> (sizeof(digit_t) * 4); // High part + bl = b & mask_low; + bh = b >> (sizeof(digit_t) * 4); + + albl = al * bl; + albh = al * bh; + ahbl = ah * bl; + ahbh = ah * bh; + out[0] = albl & mask_low; // out00 + + res1 = albl >> (sizeof(digit_t) * 4); + res2 = ahbl & mask_low; + res3 = albh & mask_low; + temp = res1 + res2 + res3; + carry = temp >> (sizeof(digit_t) * 4); + out[0] ^= temp << (sizeof(digit_t) * 4); // out01 + + res1 = ahbl >> (sizeof(digit_t) * 4); + res2 = albh >> (sizeof(digit_t) * 4); + res3 = ahbh & mask_low; + temp = res1 + res2 + res3 + carry; + out[1] = temp & mask_low; // out10 + carry = temp & mask_high; + out[1] ^= (ahbh & mask_high) + carry; // out11 + +#endif +} + +void +mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision addition + unsigned int i, carry = 0; + + for (i = 0; i < nwords; i++) { + ADDC(c[i], carry, a[i], b[i], carry); + } +} + +digit_t +mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision right shift by 1...RADIX-1 + digit_t bit_out = x[0] & 1; + + for (unsigned int i = 0; i < nwords - 1; i++) { + SHIFTR(x[i + 1], x[i], shift, x[i], RADIX); + } + x[nwords - 1] >>= shift; + return bit_out; +} + +void +mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision left shift by 1...RADIX-1 + + for (int i = nwords - 1; i > 0; i--) { + SHIFTL(x[i], x[i - 1], shift, x[i], RADIX); + } + x[0] <<= shift; +} + +void +multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ + int t = shift; + while (t > RADIX - 1) { + mp_shiftl(x, RADIX - 1, nwords); + t = t - (RADIX - 1); + } + mp_shiftl(x, t, nwords); +} + +// The below functions were taken from the EC module + +void +mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision subtraction, assuming a > b + unsigned int i, borrow = 0; + + for (i = 0; i < nwords; i++) { + SUBC(c[i], borrow, a[i], b[i], borrow); + } +} + +void +select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords) +{ // Select c <- a if mask = 0, select c <- b if mask = 1...1 + + for (int i = 0; i < nwords; i++) { + c[i] = ((a[i] ^ b[i]) & mask) ^ a[i]; + } +} + +void +swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords) +{ // Swap entries + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then a <- b and b <- a + digit_t temp; + + for (int i = 0; i < nwords; i++) { + temp = option & (a[i] ^ b[i]); + a[i] = temp ^ a[i]; + b[i] = temp ^ b[i]; + } +} + +int +mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords) +{ // Multiprecision comparison, a=b? : (1) a>b, (0) a=b, (-1) a= 0; i--) { + if (a[i] > b[i]) + return 1; + else if (a[i] < b[i]) + return -1; + } + return 0; +} + +bool +mp_is_zero(const digit_t *a, unsigned int nwords) +{ // Is a multiprecision element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + digit_t r = 0; + + for (unsigned int i = 0; i < nwords; i++) + r |= a[i] ^ 0; + + return (bool)is_digit_zero_ct(r); +} + +void +mp_mul2(digit_t *c, const digit_t *a, const digit_t *b) +{ // Multiprecision multiplication fixed to two-digit operands + unsigned int carry = 0; + digit_t t0[2], t1[2], t2[2]; + + MUL(t0, a[0], b[0]); + MUL(t1, a[0], b[1]); + ADDC(t0[1], carry, t0[1], t1[0], carry); + ADDC(t1[1], carry, 0, t1[1], carry); + MUL(t2, a[1], b[1]); + ADDC(t2[0], carry, t2[0], t1[1], carry); + ADDC(t2[1], carry, 0, t2[1], carry); + c[0] = t0[0]; + c[1] = t0[1]; + c[2] = t2[0]; + c[3] = t2[1]; +} + +void +mp_print(const digit_t *a, size_t nwords) +{ + printf("0x"); + for (size_t i = 0; i < nwords; i++) { +#ifdef RADIX_32 + printf("%08" PRIx32, a[nwords - i - 1]); // Print each word with 8 hex digits +#elif defined(RADIX_64) + printf("%016" PRIx64, a[nwords - i - 1]); // Print each word with 16 hex digits +#endif + } +} + +void +mp_copy(digit_t *b, const digit_t *a, size_t nwords) +{ + for (size_t i = 0; i < nwords; i++) { + b[i] = a[i]; + } +} + +void +mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords) +{ + // Multiprecision multiplication, c = a*b, for nwords-digit inputs, with nwords-digit output + // explicitly does not use the higher half of c, as we do not need in our applications + digit_t carry, UV[2], t[nwords], cc[nwords]; + + for (size_t i = 0; i < nwords; i++) { + cc[i] = 0; + } + + for (size_t i = 0; i < nwords; i++) { + + MUL(t, a[i], b[0]); + + for (size_t j = 1; j < nwords - 1; j++) { + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + t[j + 1] = UV[1] + carry; + } + + int j = nwords - 1; + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + + mp_add(&cc[i], &cc[i], t, nwords - i); + } + + mp_copy(c, cc, nwords); +} + +void +mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords) +{ // Multiprecision modulo 2^e, with 0 <= a < 2^(e) + unsigned int i, q = e >> LOG2RADIX, r = e & (RADIX - 1); + + if (q < nwords) { + a[q] &= ((digit_t)1 << r) - 1; + + for (i = q + 1; i < nwords; i++) { + a[i] = 0; + } + } +} + +void +mp_neg(digit_t *a, unsigned int nwords) +{ // negates a + for (size_t i = 0; i < nwords; i++) { + a[i] ^= -1; + } + + a[0] += 1; +} + +bool +mp_is_one(const digit_t *x, unsigned int nwords) +{ // returns true if x represents 1, and false otherwise + if (x[0] != 1) { + return false; + } + + for (size_t i = 1; i < nwords; i++) { + if (x[i] != 0) { + return false; + } + } + return true; +} + +void +mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) +{ // Inversion modulo 2^e, using Newton's method and Hensel lifting + // we take the first power of 2 larger than e to use + // requires a to be odd, of course + // returns b such that a*b = 1 mod 2^e + assert((a[0] & 1) == 1); + + digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + mp_copy(aa, a, nwords); + + mp_one[0] = 1; + for (unsigned int i = 1; i < nwords; i++) { + mp_one[i] = 0; + } + + int p = 1; + while ((1 << p) < e) { + p++; + } + p -= 2; // using k = 4 for initial inverse + int w = (1 << (p + 2)); + + mp_mod_2exp(aa, w, nwords); + mp_add(x, aa, aa, nwords); + mp_add(x, x, aa, nwords); // should be 3a + x[0] ^= (1 << 1); // so that x equals (3a)^2 xor 2 + mp_mod_2exp(x, w, nwords); // now x*a = 1 mod 2^4, which we lift + + mp_mul(tmp, aa, x, nwords); + mp_neg(tmp, nwords); + mp_add(y, mp_one, tmp, nwords); + + // Hensel lifting for p rounds + for (int i = 0; i < p; i++) { + mp_add(tmp, mp_one, y, nwords); + mp_mul(x, x, tmp, nwords); + mp_mul(y, y, y, nwords); + } + + mp_mod_2exp(x, w, nwords); + mp_copy(b, x, nwords); + + // verify results + mp_mul(x, x, aa, nwords); + mp_mod_2exp(x, w, nwords); + assert(mp_is_one(x, nwords)); +} + +void +mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords) +{ + // given a matrix ( ( a, b ), (c, d) ) of values mod 2^e + // returns the inverse matrix gamma ( (d, -b), (-c, a) ) + // where gamma is the inverse of the determinant a*d - b*c + // assumes the matrix is invertible, otherwises, inversion of determinant fails + + int p = 1; + while ((1 << p) < e) { + p++; + } + int w = (1 << (p)); + + digit_t det[nwords], tmp[nwords], resa[nwords], resb[nwords], resc[nwords], resd[nwords]; + mp_mul(tmp, r1, s2, nwords); + mp_mul(det, r2, s1, nwords); + mp_sub(det, tmp, det, nwords); + mp_inv_2e(det, det, e, nwords); + + mp_mul(resa, det, s2, nwords); + mp_mul(resb, det, r2, nwords); + mp_mul(resc, det, s1, nwords); + mp_mul(resd, det, r1, nwords); + + mp_neg(resb, nwords); + mp_neg(resc, nwords); + + mp_mod_2exp(resa, w, nwords); + mp_mod_2exp(resb, w, nwords); + mp_mod_2exp(resc, w, nwords); + mp_mod_2exp(resd, w, nwords); + + mp_copy(r1, resa, nwords); + mp_copy(r2, resb, nwords); + mp_copy(s1, resc, nwords); + mp_copy(s2, resd, nwords); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.h new file mode 100644 index 0000000000..b3733b520d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.h @@ -0,0 +1,88 @@ +#ifndef MP_H +#define MP_H + +#include +#include +#include + +// Functions taken from the GF module + +void mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +digit_t mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords); +void multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void MUL(digit_t *out, const digit_t a, const digit_t b); + +// Functions taken from the EC module + +void mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +void select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords); +void swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords); +int mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords); +bool mp_is_zero(const digit_t *a, unsigned int nwords); +void mp_mul2(digit_t *c, const digit_t *a, const digit_t *b); + +// Further functions for multiprecision arithmetic +void mp_print(const digit_t *a, size_t nwords); +void mp_copy(digit_t *b, const digit_t *a, size_t nwords); +void mp_neg(digit_t *a, unsigned int nwords); +bool mp_is_one(const digit_t *x, unsigned int nwords); +void mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords); +void mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords); +void mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords); +void mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords); + +#define mp_is_odd(x, nwords) (((nwords) != 0) & (int)(x)[0]) +#define mp_is_even(x, nwords) (!mp_is_odd(x, nwords)) + +/********************** Constant-time unsigned comparisons ***********************/ + +// The following functions return 1 (TRUE) if condition is true, 0 (FALSE) otherwise +static inline unsigned int +is_digit_nonzero_ct(digit_t x) +{ // Is x != 0? + return (unsigned int)((x | (0 - x)) >> (RADIX - 1)); +} + +static inline unsigned int +is_digit_zero_ct(digit_t x) +{ // Is x = 0? + return (unsigned int)(1 ^ is_digit_nonzero_ct(x)); +} + +static inline unsigned int +is_digit_lessthan_ct(digit_t x, digit_t y) +{ // Is x < y? + return (unsigned int)((x ^ ((x ^ y) | ((x - y) ^ y))) >> (RADIX - 1)); +} + +/********************** Platform-independent macros for digit-size operations + * **********************/ + +// Digit addition with carry +#define ADDC(sumOut, carryOut, addend1, addend2, carryIn) \ + { \ + digit_t tempReg = (addend1) + (digit_t)(carryIn); \ + (sumOut) = (addend2) + tempReg; \ + (carryOut) = (is_digit_lessthan_ct(tempReg, (digit_t)(carryIn)) | is_digit_lessthan_ct((sumOut), tempReg)); \ + } + +// Digit subtraction with borrow +#define SUBC(differenceOut, borrowOut, minuend, subtrahend, borrowIn) \ + { \ + digit_t tempReg = (minuend) - (subtrahend); \ + unsigned int borrowReg = \ + (is_digit_lessthan_ct((minuend), (subtrahend)) | ((borrowIn) & is_digit_zero_ct(tempReg))); \ + (differenceOut) = tempReg - (digit_t)(borrowIn); \ + (borrowOut) = borrowReg; \ + } + +// Shift right with flexible datatype +#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << (DigitSize - (shift))); + +// Digit shift left +#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((highIn) << (shift)) ^ ((lowIn) >> (RADIX - (shift))); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/normeq.c new file mode 100644 index 0000000000..8c133dd095 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/normeq.c @@ -0,0 +1,369 @@ +#include +#include "internal.h" + +/** @file + * + * @authors Antonin Leroux + * + * @brief Functions related to norm equation solving or special extremal orders + */ + +void +quat_lattice_O0_set(quat_lattice_t *O0) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(O0->basis[i][j]), 0); + } + } + ibz_set(&(O0->denom), 2); + ibz_set(&(O0->basis[0][0]), 2); + ibz_set(&(O0->basis[1][1]), 2); + ibz_set(&(O0->basis[2][2]), 1); + ibz_set(&(O0->basis[1][2]), 1); + ibz_set(&(O0->basis[3][3]), 1); + ibz_set(&(O0->basis[0][3]), 1); +} + +void +quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) +{ + ibz_set(&O0->z.coord[1], 1); + ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.denom, 1); + ibz_set(&O0->t.denom, 1); + O0->q = 1; + quat_lattice_O0_set(&(O0->order)); +} + +void +quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo) +{ + + // var dec + quat_alg_elem_t quat_temp; + + // var init + quat_alg_elem_init(&quat_temp); + + // elem = x + quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + + // quat_temp = i*y + quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); + + // elem = x + i*y + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = z * j + quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + + // elem = x + i* + z*j + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = t * j * i + quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); + + // elem = x + i*y + j*z + j*i*t + quat_alg_add(elem, elem, &quat_temp); + + quat_alg_elem_finalize(&quat_temp); +} + +int +quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params) +{ + + if (ibz_is_even(n_gamma)) { + return 0; + } + // var dec + int found; + ibz_t cornacchia_target; + ibz_t adjusted_n_gamma, q; + ibz_t bound, sq_bound, temp; + ibz_t test; + ibz_vec_4_t coeffs; // coeffs = [x,y,z,t] + quat_alg_elem_t quat_temp; + + if (non_diag) + assert(params->order->q % 4 == 1); + + // var init + found = 0; + ibz_init(&bound); + ibz_init(&test); + ibz_init(&temp); + ibz_init(&q); + ibz_init(&sq_bound); + ibz_vec_4_init(&coeffs); + quat_alg_elem_init(&quat_temp); + ibz_init(&adjusted_n_gamma); + ibz_init(&cornacchia_target); + + ibz_set(&q, params->order->q); + + // this could be removed in the current state + int standard_order = (params->order->q == 1); + + // adjusting the norm of gamma (multiplying by 4 to find a solution in an order of odd level) + if (non_diag || standard_order) { + ibz_mul(&adjusted_n_gamma, n_gamma, &ibz_const_two); + ibz_mul(&adjusted_n_gamma, &adjusted_n_gamma, &ibz_const_two); + } else { + ibz_copy(&adjusted_n_gamma, n_gamma); + } + // computation of the first bound = sqrt (adjust_n_gamma / p - q) + ibz_div(&sq_bound, &bound, &adjusted_n_gamma, &((params->algebra)->p)); + ibz_set(&temp, params->order->q); + ibz_sub(&sq_bound, &sq_bound, &temp); + ibz_sqrt_floor(&bound, &sq_bound); + + // the size of the search space is roughly n_gamma / (p√q) + ibz_t counter; + ibz_init(&counter); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_sqrt_floor(&temp, &temp); + ibz_div(&counter, &temp, &adjusted_n_gamma, &temp); + + // entering the main loop + while (!found && ibz_cmp(&counter, &ibz_const_zero) != 0) { + // decreasing the counter + ibz_sub(&counter, &counter, &ibz_const_one); + + // we start by sampling the first coordinate + ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + + // then, we sample the second coordinate + // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) + ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); + ibz_sub(&temp, &adjusted_n_gamma, &temp); + ibz_mul(&sq_bound, &q, &(params->algebra->p)); + ibz_div(&temp, &sq_bound, &temp, &sq_bound); + ibz_sqrt_floor(&temp, &temp); + + if (ibz_cmp(&temp, &ibz_const_zero) == 0) { + continue; + } + // sampling the second value + ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + + // compute cornacchia_target = n_gamma - p * (z² + q*t²) + ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &q, &temp); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); + ibz_sub(&cornacchia_target, &adjusted_n_gamma, &cornacchia_target); + assert(ibz_cmp(&cornacchia_target, &ibz_const_zero) > 0); + + // applying cornacchia + if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) + found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + else + found = 0; + + if (found && non_diag && standard_order) { + // check that we can divide by two at least once + // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 + // we must have x = t mod 2 and y = z mod 2 + // if q=1 we can simply swap x and y + if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { + ibz_swap(&coeffs[1], &coeffs[0]); + } + // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the + // resulting endomorphism will behave well for dim 2 computations + found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && + ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + } + if (found) { + +#ifndef NDEBUG + ibz_set(&temp, (params->order->q)); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_add(&temp, &temp, &test); + assert(0 == ibz_cmp(&temp, &cornacchia_target)); + + ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); + ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_set(&temp, (params->order->q)); + ibz_mul(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &temp, &(params->algebra->p)); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); +#endif + // translate x,y,z,t into the quaternion element gamma + quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); +#ifndef NDEBUG + quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs[0]))); + assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); + assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); +#endif + // making gamma primitive + // coeffs contains the coefficients of primitivized gamma in the basis of order + quat_alg_make_primitive(&coeffs, &temp, gamma, &((params->order)->order)); + + if (non_diag || standard_order) + found = (ibz_cmp(&temp, &ibz_const_two) == 0); + else + found = (ibz_cmp(&temp, &ibz_const_one) == 0); + } + } + + if (found) { + // new gamma + ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); + ibz_copy(&gamma->coord[0], &coeffs[0]); + ibz_copy(&gamma->coord[1], &coeffs[1]); + ibz_copy(&gamma->coord[2], &coeffs[2]); + ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->denom, &(((params->order)->order).denom)); + } + // var finalize + ibz_finalize(&counter); + ibz_finalize(&bound); + ibz_finalize(&temp); + ibz_finalize(&sq_bound); + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&quat_temp); + ibz_finalize(&adjusted_n_gamma); + ibz_finalize(&cornacchia_target); + ibz_finalize(&q); + ibz_finalize(&test); + + return found; +} + +int +quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor) +{ + + ibz_t n_temp, norm_d; + ibz_t disc; + quat_alg_elem_t gen, gen_rerand; + int found = 0; + ibz_init(&n_temp); + ibz_init(&norm_d); + ibz_init(&disc); + quat_alg_elem_init(&gen); + quat_alg_elem_init(&gen_rerand); + + // when the norm is prime we can be quite efficient + // by avoiding to run represent integer + // the first step is to generate one ideal of the correct norm + if (is_prime) { + + // we find a quaternion element of norm divisible by norm + while (!found) { + // generating a trace-zero element at random + ibz_set(&gen.coord[0], 0); + ibz_sub(&n_temp, norm, &ibz_const_one); + for (int i = 1; i < 4; i++) + ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + + // and finally the negation mod norm + ibz_neg(&disc, &n_temp); + ibz_mod(&disc, &disc, norm); + // now we check that -n is a square mod norm + // and if the square root exists we compute it + found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = found && !quat_alg_elem_is_zero(&gen); + } + } else { + assert(prime_cofactor != NULL); + // if it is not prime or we don't know if it is prime, we may just use represent integer + // and use a precomputed prime as cofactor + assert(!ibz_is_zero(norm)); + ibz_mul(&n_temp, prime_cofactor, norm); + found = quat_represent_integer(&gen, &n_temp, 0, params); + found = found && !quat_alg_elem_is_zero(&gen); + } +#ifndef NDEBUG + if (found) { + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_mod(&n_temp, &n_temp, norm); + assert(ibz_cmp(&n_temp, &ibz_const_zero) == 0); + } +#endif + + // now we just have to rerandomize the class of the ideal generated by gen + found = 0; + while (!found) { + for (int i = 0; i < 4; i++) { + ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + } + quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_gcd(&disc, &n_temp, norm); + found = ibz_is_one(&disc); + found = found && !quat_alg_elem_is_zero(&gen_rerand); + } + + quat_alg_mul(&gen, &gen, &gen_rerand, (params->algebra)); + // in both cases, whether norm is prime or not prime, + // gen is not divisible by any integer factor of the target norm + // therefore the call below will yield an ideal of the correct norm + quat_lideal_create(lideal, &gen, norm, &((params->order)->order), (params->algebra)); + assert(ibz_cmp(norm, &(lideal->norm)) == 0); + + ibz_finalize(&n_temp); + quat_alg_elem_finalize(&gen); + quat_alg_elem_finalize(&gen_rerand); + ibz_finalize(&norm_d); + ibz_finalize(&disc); + return (found); +} + +void +quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_copy(&(*vec)[2], &el->coord[2]); + ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) + ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) + ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); + ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); + ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); + + assert(ibz_divides(&(*vec)[0], &el->denom)); + assert(ibz_divides(&(*vec)[1], &el->denom)); + assert(ibz_divides(&(*vec)[2], &el->denom)); + assert(ibz_divides(&(*vec)[3], &el->denom)); + + ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); + ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); + ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); + ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/printer.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/printer.c new file mode 100644 index 0000000000..6d6a3ca9b7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/printer.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +void +ibz_mat_2x2_print(const ibz_mat_2x2_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_print(&((*mat)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibz_mat_4x4_print(const ibz_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibz_vec_2_print(const ibz_vec_2_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 2; i++) { + ibz_print(&((*vec)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibz_vec_4_print(const ibz_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +quat_lattice_print(const quat_lattice_t *lat) +{ + printf("lattice\n"); + printf("denominator: "); + ibz_print(&(lat->denom), 10); + printf("\n"); + printf("basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lat->basis)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +quat_alg_print(const quat_alg_t *alg) +{ + printf("quaternion algebra ramified at "); + ibz_print(&(alg->p), 10); + printf(" and infinity\n\n"); +} + +void +quat_alg_elem_print(const quat_alg_elem_t *elem) +{ + printf("denominator: "); + ibz_print(&(elem->denom), 10); + printf("\n"); + printf("coordinates: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((elem->coord)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +quat_left_ideal_print(const quat_left_ideal_t *lideal) +{ + printf("left ideal\n"); + printf("norm: "); + ibz_print(&(lideal->norm), 10); + printf("\n"); + printf("denominator: "); + ibz_print(&(lideal->lattice.denom), 10); + printf("\n"); + printf("basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lideal->lattice.basis)[i][j]), 10); + printf(" "); + } + if (i != 3) { + printf("\n "); + } else { + printf("\n"); + } + } + if ((lideal->parent_order) != NULL) { + printf("parent order denominator: "); + ibz_print(&(lideal->parent_order->denom), 10); + printf("\n"); + printf("parent order basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lideal->parent_order->basis)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + } else { + printf("Parent order not given!\n"); + } + printf("\n"); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion.h new file mode 100644 index 0000000000..a567657464 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion.h @@ -0,0 +1,708 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for quaternion algebra operations + */ + +#ifndef QUATERNION_H +#define QUATERNION_H + +// #include +#include +#include "intbig.h" +#include + +/** @defgroup quat_quat Quaternion algebra + * @{ + */ + +/** @defgroup quat_vec_t Types for integer vectors and matrices + * @{ + */ + +/** @brief Type for vector of 2 big integers + * + * @typedef ibz_vec_2_t + */ +typedef ibz_t ibz_vec_2_t[2]; + +/** @brief Type for vectors of 4 integers + * + * @typedef ibz_vec_4_t + * + * Represented as a vector of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_vec_4_t[4]; + +/** @brief Type for 2 by 2 matrices of integers + * + * @typedef ibz_mat_2x2_t + * + * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_2x2_t[2][2]; + +/** @brief Type for 4 by 4 matrices of integers + * + * @typedef ibz_mat_4x4_t + * + * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_4x4_t[4][4]; +/** + * @} + */ + +/** @defgroup quat_quat_t Types for quaternion algebras + * @{ + */ + +/** @brief Type for quaternion algebras + * + * @typedef quat_alg_t + * + * @struct quat_alg + * + * The quaternion algebra ramified at p = 3 mod 4 and ∞. + */ +typedef struct quat_alg +{ + ibz_t p; ///< Prime number, must be = 3 mod 4. +} quat_alg_t; + +/** @brief Type for quaternion algebra elements + * + * @typedef quat_alg_elem_t + * + * @struct quat_alg_elem + * + * Represented as a array *coord* of 4 ibz_t integers and a common ibz_t denominator *denom*. + * + * The representation is not necessarily normalized, that is, gcd(denom, content(coord)) might not + * be 1. For getting a normalized representation, use the quat_alg_normalize function + * + * The elements are always represented in basis (1,i,j,ij) of the quaternion algebra, with i^2=-1 + * and j^2 = -p + */ +typedef struct quat_alg_elem +{ + ibz_t denom; ///< Denominator by which all coordinates are divided (big integer, must not be 0) + ibz_vec_4_t coord; ///< Numerators of the 4 coordinates of the quaternion algebra element in basis (1,i,j,ij) +} quat_alg_elem_t; + +/** @brief Type for lattices in dimension 4 + * + * @typedef quat_lattice_t + * + * @struct quat_lattice + * + * Represented as a rational (`frac`) times an integreal lattice (`basis`) + * + * The basis is such that its columns divided by its denominator are elements of + * the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + * + * All lattices must have full rank (4) + */ +typedef struct quat_lattice +{ + ibz_t denom; ///< Denominator by which the basis is divided (big integer, must not be 0) + ibz_mat_4x4_t basis; ///< Integer basis of the lattice (its columns divided by denom are + ///< algebra elements in the usual basis) +} quat_lattice_t; + +/** @brief Type for left ideals of maximal orders in quaternion algebras + * + * @typedef quat_left_ideal_t + * + * @struct quat_left_ideal + * + * The basis of the lattice representing it is such that its columns divided by its denominator are + * elements of the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + */ +typedef struct quat_left_ideal +{ + quat_lattice_t lattice; ///< lattice representing the ideal + ibz_t norm; ///< norm of the lattice + const quat_lattice_t *parent_order; ///< should be a maximal order +} quat_left_ideal_t; +/** @} + */ + +/** @brief Type for extremal maximal orders + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + * The basis of the order representing it is in hermite normal form, and its columns divid +ed by its denominator are elements of the quaternion algebra, represented in basis (1,z,t, +tz) where z^2 = -q, t^2 = -p. +*/ +typedef struct quat_p_extremal_maximal_order +{ + quat_lattice_t order; ///< the order represented as a lattice + quat_alg_elem_t z; ///< the element of small discriminant + quat_alg_elem_t t; ///< the element of norm p orthogonal to z + uint32_t q; ///< the absolute value of the square of z +} quat_p_extremal_maximal_order_t; + +/** @brief Type for represent integer parameters + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + */ +typedef struct quat_represent_integer_params +{ + int primality_test_iterations; ///< Primality test iterations + const quat_p_extremal_maximal_order_t *order; ///< The standard extremal maximal order + const quat_alg_t *algebra; ///< The quaternion algebra +} quat_represent_integer_params_t; + +/*************************** Functions *****************************/ + +/** @defgroup quat_c Constructors and Destructors + * @{ + */ +void quat_alg_init_set(quat_alg_t *alg, const ibz_t *p); +void quat_alg_finalize(quat_alg_t *alg); + +void quat_alg_elem_init(quat_alg_elem_t *elem); +void quat_alg_elem_finalize(quat_alg_elem_t *elem); + +void ibz_vec_2_init(ibz_vec_2_t *vec); +void ibz_vec_2_finalize(ibz_vec_2_t *vec); + +void ibz_vec_4_init(ibz_vec_4_t *vec); +void ibz_vec_4_finalize(ibz_vec_4_t *vec); + +void ibz_mat_2x2_init(ibz_mat_2x2_t *mat); +void ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat); + +void ibz_mat_4x4_init(ibz_mat_4x4_t *mat); +void ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat); + +void quat_lattice_init(quat_lattice_t *lat); +void quat_lattice_finalize(quat_lattice_t *lat); + +void quat_left_ideal_init(quat_left_ideal_t *lideal); +void quat_left_ideal_finalize(quat_left_ideal_t *lideal); +/** @} + */ + +/** @defgroup quat_printers Print functions for types from the quaternion module + * @{ + */ +void ibz_mat_2x2_print(const ibz_mat_2x2_t *mat); +void ibz_mat_4x4_print(const ibz_mat_4x4_t *mat); +void ibz_vec_2_print(const ibz_vec_2_t *vec); +void ibz_vec_4_print(const ibz_vec_4_t *vec); + +void quat_lattice_print(const quat_lattice_t *lat); +void quat_alg_print(const quat_alg_t *alg); +void quat_alg_elem_print(const quat_alg_elem_t *elem); +void quat_left_ideal_print(const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @defgroup quat_int Integer functions for quaternion algebra + * @{ + */ + +/** @defgroup quat_int_mat Integer matrix and vector functions + * @{ + */ + +/** @brief Copy matrix + * + * @param copy Output: Matrix into which copied will be copied + * @param copied + */ +void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied); + +/** + * @brief Inverse of 2x2 integer matrices modulo m + * + * @param inv Output matrix + * @param mat Input matrix + * @param m Integer modulo + * @return 1 if inverse exists 0 otherwise + */ +int ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m); + +/** @brief mat*vec in dimension 2 for integers + * + * @param res Output vector + * @param mat Input vector + * @param vec Input vector + */ +void ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, + const ibz_mat_4x4_t *mat); // dim4, lattice, test/dim4, ideal + +/** @brief transpose a 4x4 integer matrix + * + * @param transposed Output: is set to the transposition of mat + * @param mat Input matrix + */ +void ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat); + +/** @brief a*b for a,b integer 4x4 matrices + * + * Naive implementation + * + * @param res Output: A 4x4 integer matrix + * @param a + * @param b + */ +void ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b); + +/** @brief divides all values in matrix by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param mat + */ +int ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** + * @brief mat*vec + * + * + * @param res Output: coordinate vector + * @param mat Integer 4x4 matrix + * @param vec Integer vector (coordinate vector) + * + * Multiplies 4x4 integer matrix mat by a 4-integers column vector vec + */ +void ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec); + +/** + * @brief vec*mat + * + * + * @param res Output: coordinate vector. + * @param vec Integer vector (coordinate vector) + * @param mat Integer 4x4 matrix + * + * Multiplies 4x4 integer matrix mat by a 4-integers row vector vec (on the left) + */ +void ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @defgroup quat_integer Higher-level integer functions for quaternion algebra + * @{ + */ + +/** + * @brief Generates a random prime + * + * A number is accepted as prime if it passes a 30-round Miller-Rabin test. + * This function is fairly inefficient and mostly meant for tests. + * + * @returns 1 if a prime is found, 0 otherwise + * @param p Output: The prime (if found) + * @param is3mod4 If 1, the prime is required to be 3 mod 4, if 0 no congruence condition is imposed + * @param bitsize Maximal size of output prime + * @param probability_test_iterations Miller-Rabin iteartions for probabilistic primality testing in + * rejection sampling + */ +int ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations); + +/** + * @brief Find integers x and y such that x^2 + n*y^2 = p + * + * Uses Cornacchia's algorithm, should be used only for prime p + * + * @param x Output + * @param y Output + * @param n first parameter defining the equation + * @param p seond parameter defining the equation, must be prime + * @return 1 if success, 0 otherwise + */ +int ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p); + +/** @} + */ + +/** @defgroup quat_qf Quadratic form functions + * @{ + */ + +/** + * @brief Quadratic form evaluation + * + * qf and coord must be represented in the same basis. + * + * @param res Output: coordinate vector + * @param qf Quadratic form (4x4 integer matrix) + * @param coord Integer vector (coordinate vector) + */ +void quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord); +/** @} + */ + +/** @} + */ + +/** @defgroup quat_quat_f Quaternion algebra functions + * @{ + */ +/** + * @brief Copies an algebra element + * + * @param copy Output: The element into which another one is copied + * @param copied Source element copied into copy + */ +void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied); + +void quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg); + +/** @brief reduced norm of alg_elem x + * + * @param res_num Output: rational which will contain the numerator of the reduced norm of a + * @param res_denom Output: rational which will contain the denominator of the reduced norm of a (it + * is 1 if the norm is integer) + * @param x Algebra element whose norm is computed + * @param alg The quaternion algebra + */ +void quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *x, const quat_alg_t *alg); + +/** @brief Normalize representation of alg_elem x + * + * @param x Algebra element whose representation will be normalized + * + * Modification of x. + * Sets coord and denom of x so that gcd(denom, content(coord))=1 + * without changing the value of x = (coord0/denom, coord1/denom, coord2/denom, coord3/denom). + */ +void quat_alg_normalize(quat_alg_elem_t *x); + +/** + * @brief Standard involution in a quaternion algebra + * + * @param conj Output: image of x by standard involution of the quaternion algebra alg + * @param x element of alg whose image is searched + */ +void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x); + +/** + * @brief Given `x` ∈ `order`, factor it into its primitive and impritive parts + * + * Given `x` ∈ `order`, return a coordinate vector `primitive_x` and an integer `content` + * such that `x` = `content` · Λ `primitive_x`, where Λ is the basis of `order` + * and `x` / `content` is primitive in `order`. + * + * @param primitive_x Output: coordinates of a primitive element of `order` (in `order`'s basis) + * @param content Output: content of `x`'s coordinate vector in order's basis + * @param order order of `alg` + * @param x element of order, must be in `order` + */ +void quat_alg_make_primitive(ibz_vec_4_t *primitive_x, + ibz_t *content, + const quat_alg_elem_t *x, + const quat_lattice_t *order); + +// end quat_quat_f +/** @} + */ + +/** @defgroup quat_lat_f Lattice functions + * @{ + */ + +void quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2); + +/** + * @brief Test whether x ∈ lat. If so, compute its coordinates in lat's basis. + * + * @param coord Output: Set to the coordinates of x in lat. May be NULL. + * @param lat The lattice, not necessarily in HNF but full rank + * @param x An element of the quaternion algebra + * @return true if x ∈ lat + */ +int quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x); + +/** + * @brief Conjugate of a lattice with basis not in HNF + * + * @param conj Output: The lattice conjugate to lat. ATTENTION: is not under HNF + * @param lat Input lattice + */ +void quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat); + +/** + * @brief Multiply a lattice and an algebra element + * + * The element is multiplied to the right of the lattice + * + * @param prod Output: Lattice lat*elem + * @param lat Input lattice + * @param elem Algebra element + * @param alg The quaternion algebra + */ +void quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg); // ideal + +/** + * @brief Sample from the intersection of a lattice with a ball + * + * Sample a uniform non-zero vector of norm ≤ `radius` from the lattice. + * + * @param res Output: sampled quaternion from the lattice + * @param lattice Input lattice + * @param alg The quaternion algebra + * @param radius The ball radius (quaternion norm) + * @return 0 if an error occurred (ball too small or RNG error), 1 otherwise + */ +int quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius); + +// end quat_lat_f +/** @} + */ + +/** @defgroup quat_lideal_f Functions for left ideals + * @{ + */ + +/** @defgroup quat_lideal_c Creating left ideals + * @{ + */ + +/** + * @brief Left ideal of order, generated by x and N as order*x+order*N + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element. Must be non-zero + * @param N generating integer + * + * Creates the left ideal in order generated by the element x and the integer N. + * If x is not divisible (inside the order) by any integer divisor n>1 of N, + * then the norm of the output ideal is N. + * + */ +void quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg); + +/** @} + */ + +/** @defgroup quat_lideal_gen Generators of left ideals + * @{ + */ + +/** + * @brief Generator of 'lideal' + * + * @returns 1 if such a generator was found, 0 otherwise + * @param gen Output: non scalar generator of lideal + * @param lideal left ideal + * @param alg the quaternion algebra + * + * Ideal is generated by gen and the ideal's norm + * + * Bound has as default value QUATERNION_lideal_generator_search_bound + */ +int quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg); +/** @} + */ + +/** @defgroup quat_lideal_op Operations on left ideals + * @{ + */ + +/** + * @brief Copies an ideal + * + * @param copy Output: The ideal into which another one is copied + * @param copied Source ideal copied into copy. The parent order is not copied (only the pointer). + */ +void quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied); + +/** + * @brief Conjugate of a left ideal (not in HNF) + * + * @param conj Output: Ideal conjugate to lideal, with norm and parent order correctly set, but its + * lattice not in HNF + * @param new_parent_order Output: Will be set to the right order of lideal, and serve as parent + * order for conj (so must have at least the lifetime of conj) + * @param lideal input left ideal (of which conj will be the conjugate) + * @param alg the quaternion algebra + */ +void quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); + +/** + * @brief Intersection of two left ideals + * + * @param intersection Output: Left ideal which is the intersection of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_inter(quat_left_ideal_t *intersection, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief L2-reduce the basis of the left ideal, without considering its denominator + * + * This function reduce the basis of the lattice of the ideal, but it does completely ignore its + * denominator. So the outputs of this function must still e divided by the appropriate power of + * lideal.lattice.denom. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param reduced Output: Lattice defining the ideal, which has its basis in a lll-reduced form. + * Must be divided by lideal.lattice.denom before usage + * @param gram Output: Matrix of the quadratic form given by the norm on the basis of the reduced + * ideal, divided by the norm of the ideal + * @param lideal ideal whose basis will be reduced + * @param alg the quaternion algebra + */ +void quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // replaces lideal_lll + +/** + * @brief Multplies two ideals and L2-reduces the lattice of the result + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param prod Output: The product ideal with its lattice basis being L2-reduced + * @param gram Output: Gram matrix of the reduced norm (as quadratic but not bilinear form) on the + * basis of prod, divided by the norm of prod + * @param lideal1 Ideal at left in the product + * @param lideal2 Ideal at right in the product + * @param alg The quaternion algebra + */ +void quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Replaces an ideal by a smaller equivalent one of prime norm + * + * @returns 1 if the computation succeeded and 0 otherwise + * @param lideal In- and Output: Ideal to be replaced + * @param alg The quaternion algebra + * @param primality_num_iter number of repetition for primality testing + * @param equiv_bound_coeff bound on the coefficients for the candidates + */ +int quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff); + +/** @} + */ + +// end quat_lideal_f +/** @} + */ + +/** @defgroup quat_normeq Functions specific to special extremal maximal orders + * @{ + */ + +/** + * @brief Representing an integer by the quadratic norm form of a maximal extremal order + * + * @returns 1 if the computation succeeded + * @param gamma Output: a quaternion element + * @param n_gamma Target norm of gamma. n_gamma must be odd. If n_gamma/(p*params.order->q) < + * 2^QUAT_repres_bound_input failure is likely + * @param non_diag If set to 1 (instead of 0) and the order is O0, an additional property is ensured + * @param params Represent integer parameters specifying the algebra, the special extremal order, + * the number of trials for finding gamma and the number of iterations of the primality test. + * Special requirements apply if non-diag is set to 1 + * + * This algorithm finds a primitive quaternion element gamma of n_gamma inside any maximal extremal + * order. Failure is possible. Most efficient for the standard order. + * + * If non-diag is set to 1,this algorithm finds a primitive quaternion element gamma with some + * special properties used in fixed degree isogeny of n_gamma inside any maximal extremal order such + * that params->order->q=1 mod 4. Failure is possible. Most efficient for the standard order. The + * most important property is to avoid diagonal isogenies, meaning that the gamma returned by the + * algorithm must not be contained inside ZZ + 2 O where O is the maximal order params->order When O + * is the special order O0 corresponding to j=1728, we further need to avoid endomorphisms of E0xE0 + * and there is another requirement + * + * If non-diag is set to 1, the number of trials for finding gamma (in params), the number of + * iterations of the primality test and the value of params->order->q is required to be 1 mod 4 + */ +int quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params); + +/** @brief Basis change to (1,i,(i+j)/2,(1+ij)/2) for elements of O0 + * + * Change the basis in which an element is give from 1,i,j,ij to (1,i,(i+j)/2,(1+ij)/2) the ususal + * basis of the special maximal order O0 Only for elements of O0 + * + * @param vec Output: Coordinates of el in basis (1,i,(i+j)/2,(1+ij)/2) + * @param el Imput: An algebra element in O0 + */ +void quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el); + +/** + * @brief Random O0-ideal of given norm + * + * Much faster if norm is prime and is_prime is set to 1 + * + * @param lideal Output: O0-ideal of norm norm + * @param norm Norm of the ideal to be found + * @param is_prime Indicates if norm is prime: 1 if it is, 0 otherwise + * @param params Represent Integer parameters from the level-dependent constants + * @param prime_cofactor Prime distinct from the prime p defining the algebra but of similar size + * and coprime to norm. If is_prime is 1, it might be NULL. + * @returns 1 if success, 0 if no ideal found or randomness failed + */ +int quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor); +// end quat_normeq +/** @} + */ +// end quat_quat +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_constants.h new file mode 100644 index 0000000000..5dca7d7cd4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_constants.h @@ -0,0 +1,6 @@ +#include +#define QUAT_primality_num_iter 32 +#define QUAT_repres_bound_input 20 +#define QUAT_equiv_bound_coeff 64 +#define FINDUV_box_size 2 +#define FINDUV_cube_size 624 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.c new file mode 100644 index 0000000000..baf3da0059 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.c @@ -0,0 +1,3176 @@ +#include +#include +#include +const ibz_t QUAT_prime_cofactor = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x8000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x800000000000000}}} +#endif +; +const quat_alg_t QUATALG_PINFTY = { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x4ff}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x4ffffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x4ffffffffffffff}}} +#endif +}; +const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 1}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x80000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 5}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3f47,0x7060,0x5e29,0x3e35,0xd950,0x2a1b,0x10ae,0x78dd,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x70603f47,0x3e355e29,0x2a1bd950,0x78dd10ae,0x0,0x0,0x0,0x2800000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3e355e2970603f47,0x78dd10ae2a1bd950,0x0,0x280000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3fe7,0x28ee,0x26e8,0xb194,0x6d7a,0xaf58,0xe568,0xd6d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x28ee3fe7,0xb19426e8,0xaf586d7a,0xd6de568}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb19426e828ee3fe7,0xd6de568af586d7a}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 17}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x954f,0x6bc9,0xca46,0x3d25,0x431b,0x46ed,0x8229,0x4f5,0xe453,0x6eb3,0x4530,0xeb3e,0x5306,0xb3e4,0x306e,0x45}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6bc9954f,0x3d25ca46,0x46ed431b,0x4f58229,0x6eb3e453,0xeb3e4530,0xb3e45306,0x45306e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3d25ca466bc9954f,0x4f5822946ed431b,0xeb3e45306eb3e453,0x45306eb3e45306}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7f,0xca3a,0x2454,0xbd31,0xe562,0xcb4c,0x72f0,0x21}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xca3a0e7f,0xbd312454,0xcb4ce562,0x2172f0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbd312454ca3a0e7f,0x2172f0cb4ce562}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 37}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x3a03,0xc406,0x47c,0xa0a2,0x6dbc,0x1df4,0x796,0x6cee,0xce0c,0xe0c7,0xc7c,0xc7ce,0x7ce0,0xce0c,0xe0c7,0x7c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xc4063a03,0xa0a2047c,0x1df46dbc,0x6cee0796,0xe0c7ce0c,0xc7ce0c7c,0xce0c7ce0,0x7ce0c7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa0a2047cc4063a03,0x6cee07961df46dbc,0xc7ce0c7ce0c7ce0c,0x7ce0c7ce0c7ce0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x188f,0xa1e2,0x2148,0xd9f8,0x2e79,0x1a07,0xe1b2,0xd6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa1e2188f,0xd9f82148,0x1a072e79,0xd6e1b2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xd9f82148a1e2188f,0xd6e1b21a072e79}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 41}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca33,0x3dd0,0x1d92,0x9f0,0x2f81,0xafe9,0xe395,0x83f7,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x27f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3dd0ca33,0x9f01d92,0xafe92f81,0x83f7e395,0xfffffffc,0xffffffff,0xffffffff,0x27fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9f01d923dd0ca33,0x83f7e395afe92f81,0xfffffffffffffffc,0x27fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb73,0xf93c,0x71c0,0x87f5,0x667a,0xcb3c,0xb9cb,0x12fa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf93ceb73,0x87f571c0,0xcb3c667a,0x12fab9cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x87f571c0f93ceb73,0x12fab9cbcb3c667a}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 53}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf0ab,0x9d3b,0x6ea,0x84ac,0x62e5,0xdde9,0x882b,0xd021,0xffe2,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x13ff}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d3bf0ab,0x84ac06ea,0xdde962e5,0xd021882b,0xffffffe2,0xffffffff,0xffffffff,0x13ffffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x84ac06ea9d3bf0ab,0xd021882bdde962e5,0xffffffffffffffe2,0x13ffffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f37,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1f37,0x77013f1,0x56007183,0x9281da31}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1f37,0x9281da3156007183}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 97}}; +const quat_left_ideal_t CONNECTING_IDEALS[7] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x5000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x50000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x5000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x3000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x30000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x3000000000000000}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfee5,0x2b,0xd6d8,0xe65c,0x68a3,0xe72d,0x373d,0x5b1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2bfee5,0xe65cd6d8,0xe72d68a3,0x5b1373d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe65cd6d8002bfee5,0x5b1373de72d68a3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf719,0x8647,0x3ea3,0x9933,0x6a21,0xe8de,0x6f08,0x7343}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8647f719,0x99333ea3,0xe8de6a21,0x73436f08}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x99333ea38647f719,0x73436f08e8de6a21}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfaff,0xc339,0xabd,0xbfc8,0xe962,0x6805,0x5323,0x3c7a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc339faff,0xbfc80abd,0x6805e962,0x3c7a5323}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbfc80abdc339faff,0x3c7a53236805e962}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8597,0x3af7,0xa5a,0xbb29,0x77c0,0xd2d9,0xf561,0x84f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3af78597,0xbb290a5a,0xd2d977c0,0x84ff561}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbb290a5a3af78597,0x84ff561d2d977c0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x604b,0x3c1e,0x9e8c,0x8146,0x18b7,0xb452,0xa68a,0xf44}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3c1e604b,0x81469e8c,0xb45218b7,0xf44a68a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x81469e8c3c1e604b,0xf44a68ab45218b7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x519b,0xa90b,0xcdca,0xd5f5,0x757a,0x83dd,0xb354,0xe59}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa90b519b,0xd5f5cdca,0x83dd757a,0xe59b354}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd5f5cdcaa90b519b,0xe59b35483dd757a}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e07,0xc4e3,0xf746,0x83d,0x5354,0x44c1,0x9c43,0x1f9f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc4e35e07,0x83df746,0x44c15354,0x1f9f9c43}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x83df746c4e35e07,0x1f9f9c4344c15354}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdbd3,0x967a,0x8a96,0x1df4,0x7845,0xd70,0x419a,0x222}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x967adbd3,0x1df48a96,0xd707845,0x222419a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1df48a96967adbd3,0x222419a0d707845}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e1f,0xbf19,0x63e0,0x34ae,0x7c14,0x3859,0xdfed,0xb125}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbf193e1f,0x34ae63e0,0x38597c14,0xb125dfed}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x34ae63e0bf193e1f,0xb125dfed38597c14}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcf9,0xaaca,0x773b,0xa951,0xfa2c,0xa2e4,0x10c3,0x59a4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaaca0cf9,0xa951773b,0xa2e4fa2c,0x59a410c3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xa951773baaca0cf9,0x59a410c3a2e4fa2c}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x275,0xd7ab,0xedeb,0xbc67,0xad41,0xaeb5,0xf2e5,0x148e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd7ab0275,0xbc67edeb,0xaeb5ad41,0x148ef2e5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbc67edebd7ab0275,0x148ef2e5aeb5ad41}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa7c5,0x9024,0x7ceb,0x13c9,0x59c0,0x3d14,0xe56d,0x1507}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9024a7c5,0x13c97ceb,0x3d1459c0,0x1507e56d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x13c97ceb9024a7c5,0x1507e56d3d1459c0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd51d,0xb3e7,0xb56b,0xe818,0x380,0x75e5,0x6c29,0x14cb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3e7d51d,0xe818b56b,0x75e50380,0x14cb6c29}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe818b56bb3e7d51d,0x14cb6c2975e50380}}} +#endif +, &MAXORD_O0}}; +const quat_alg_elem_t CONJUGATING_ELEMENTS[7] = {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +#endif +}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.h new file mode 100644 index 0000000000..a5eb1106e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.h @@ -0,0 +1,12 @@ +#include +#define MAXORD_O0 (EXTREMAL_ORDERS->order) +#define STANDARD_EXTREMAL_ORDER (EXTREMAL_ORDERS[0]) +#define NUM_ALTERNATE_EXTREMAL_ORDERS 6 +#define ALTERNATE_EXTREMAL_ORDERS (EXTREMAL_ORDERS+1) +#define ALTERNATE_CONNECTING_IDEALS (CONNECTING_IDEALS+1) +#define ALTERNATE_CONJUGATING_ELEMENTS (CONJUGATING_ELEMENTS+1) +extern const ibz_t QUAT_prime_cofactor; +extern const quat_alg_t QUATALG_PINFTY; +extern const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7]; +extern const quat_left_ideal_t CONNECTING_IDEALS[7]; +extern const quat_alg_elem_t CONJUGATING_ELEMENTS[7]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c new file mode 100644 index 0000000000..372cc0de81 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: Apache-2.0 and Unknown +// +/* +NIST-developed software is provided by NIST as a public service. You may use, +copy, and distribute copies of the software in any medium, provided that you +keep intact this entire notice. You may improve, modify, and create derivative +works of the software or any portion of the software, and you may copy and +distribute such modifications or works. Modified works should carry a notice +stating that you changed the software and should note the date and nature of any +such change. Please explicitly acknowledge the National Institute of Standards +and Technology as the source of the software. + +NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF +ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, +WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS +NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR +ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE +ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, +INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR +USEFULNESS OF THE SOFTWARE. + +You are solely responsible for determining the appropriateness of using and +distributing the software and you assume all risks associated with its use, +including but not limited to the risks and costs of program errors, compliance +with applicable laws, damage to or loss of data, programs or equipment, and the +unavailability or interruption of operation. This software is not intended to be +used in any situation where a failure could cause risk of injury or damage to +property. The software developed by NIST employees is not subject to copyright +protection within the United States. +*/ + +#include +#include + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +static inline void AES256_ECB(const unsigned char *key, + const unsigned char *ctr, unsigned char *buffer) { + AES_ECB_encrypt(ctr, key, buffer); +} + +typedef struct { + unsigned char Key[32]; + unsigned char V[16]; + int reseed_counter; +} AES256_CTR_DRBG_struct; + +void AES256_CTR_DRBG_Update(const unsigned char *provided_data, + unsigned char *Key, unsigned char *V); + +AES256_CTR_DRBG_struct DRBG_ctx; + +#ifndef CTRDRBG_TEST_BENCH +static +#endif + void + randombytes_init_nist(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + unsigned char seed_material[48]; + + (void)security_strength; // Unused parameter + memcpy(seed_material, entropy_input, 48); + if (personalization_string) + for (int i = 0; i < 48; i++) { + seed_material[i] ^= personalization_string[i]; + } + memset(DRBG_ctx.Key, 0x00, 32); + memset(DRBG_ctx.V, 0x00, 16); + AES256_CTR_DRBG_Update(seed_material, DRBG_ctx.Key, DRBG_ctx.V); + DRBG_ctx.reseed_counter = 1; +} + +#ifndef CTRDRBG_TEST_BENCH +static +#endif + int + randombytes_nist(unsigned char *x, size_t xlen) { + unsigned char block[16]; + size_t i = 0; + + while (xlen > 0) { + // increment V + for (int j = 15; j >= 0; j--) { + if (DRBG_ctx.V[j] == 0xff) { + DRBG_ctx.V[j] = 0x00; + } else { + DRBG_ctx.V[j]++; + break; + } + } + AES256_ECB(DRBG_ctx.Key, DRBG_ctx.V, block); + if (xlen > 15) { + memcpy(x + i, block, 16); + i += 16; + xlen -= 16; + } else { + memcpy(x + i, block, xlen); + i += xlen; + xlen = 0; + } + } + AES256_CTR_DRBG_Update(NULL, DRBG_ctx.Key, DRBG_ctx.V); + DRBG_ctx.reseed_counter++; + + return 0; +} + +void AES256_CTR_DRBG_Update(const unsigned char *provided_data, + unsigned char *Key, unsigned char *V) { + unsigned char temp[48]; + + for (int i = 0; i < 3; i++) { + // increment V + for (int j = 15; j >= 0; j--) { + if (V[j] == 0xff) { + V[j] = 0x00; + } else { + V[j]++; + break; + } + } + + AES256_ECB(Key, V, temp + 16 * i); + } + if (provided_data != NULL) + for (int i = 0; i < 48; i++) { + temp[i] ^= provided_data[i]; + } + memcpy(Key, temp, 32); + memcpy(V, temp + 32, 16); +} + +#ifdef RANDOMBYTES_C +SQISIGN_API +int randombytes(unsigned char *random_array, unsigned long long nbytes) { + int ret = randombytes_nist(random_array, nbytes); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); +#endif + return ret; +} + +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + randombytes_init_nist(entropy_input, personalization_string, + security_strength); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_system.c new file mode 100644 index 0000000000..689c29b242 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_system.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: MIT + +/* +The MIT License +Copyright (c) 2017 Daan Sprenkels +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +// In the case that are compiling on linux, we need to define _GNU_SOURCE +// *before* randombytes.h is included. Otherwise SYS_getrandom will not be +// declared. +#if defined(__linux__) || defined(__GNU__) +#define _GNU_SOURCE +#endif /* defined(__linux__) || defined(__GNU__) */ + +#if defined(_WIN32) +/* Windows */ +#include +#include /* CryptAcquireContext, CryptGenRandom */ +#endif /* defined(_WIN32) */ + +/* wasi */ +#if defined(__wasi__) +#include +#endif + +/* kFreeBSD */ +#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) +#define GNU_KFREEBSD +#endif + +#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +/* Linux */ +// We would need to include , but not every target has access +// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. +// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the +// linux repo. +#define RNDGETENTCNT 0x80045200 + +#include +#include +#include +#include +#include +#include +#include +#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ + ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) +#define USE_GLIBC +#include +#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ + (__GLIBC_MINOR__ > 24)) */ +#include +#include +#include +#include + +// We need SSIZE_MAX as the maximum read len from /dev/urandom +#if !defined(SSIZE_MAX) +#define SSIZE_MAX (SIZE_MAX / 2 - 1) +#endif /* defined(SSIZE_MAX) */ + +#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ + +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ +#include +#if defined(BSD) +#include +#endif +/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ +#if defined(__GNU__) +#undef BSD +#endif +#endif + +#if defined(__EMSCRIPTEN__) +#include +#include +#include +#include +#endif /* defined(__EMSCRIPTEN__) */ + +#if defined(_WIN32) +static int +randombytes_win32_randombytes(void *buf, size_t n) +{ + HCRYPTPROV ctx; + BOOL tmp; + DWORD to_read = 0; + const size_t MAX_DWORD = 0xFFFFFFFF; + + tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); + if (tmp == FALSE) + return -1; + + while (n > 0) { + to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); + tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); + if (tmp == FALSE) + return -1; + buf = ((char *)buf) + to_read; + n -= to_read; + } + + tmp = CryptReleaseContext(ctx, 0); + if (tmp == FALSE) + return -1; + + return 0; +} +#endif /* defined(_WIN32) */ + +#if defined(__wasi__) +static int +randombytes_wasi_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(__wasi__) */ + +#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) +#if defined(USE_GLIBC) +// getrandom is declared in glibc. +#elif defined(SYS_getrandom) +static ssize_t +getrandom(void *buf, size_t buflen, unsigned int flags) +{ + return syscall(SYS_getrandom, buf, buflen, flags); +} +#endif + +static int +randombytes_linux_randombytes_getrandom(void *buf, size_t n) +{ + /* I have thought about using a separate PRF, seeded by getrandom, but + * it turns out that the performance of getrandom is good enough + * (250 MB/s on my laptop). + */ + size_t offset = 0, chunk; + int ret; + while (n > 0) { + /* getrandom does not allow chunks larger than 33554431 */ + chunk = n <= 33554431 ? n : 33554431; + do { + ret = getrandom((char *)buf + offset, chunk, 0); + } while (ret == -1 && errno == EINTR); + if (ret < 0) + return ret; + offset += ret; + n -= ret; + } + assert(n == 0); + return 0; +} +#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ + defined(SYS_getrandom)) */ + +#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) + +#if defined(__linux__) +static int +randombytes_linux_read_entropy_ioctl(int device, int *entropy) +{ + return ioctl(device, RNDGETENTCNT, entropy); +} + +static int +randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) +{ + int retcode; + do { + rewind(stream); + retcode = fscanf(stream, "%d", entropy); + } while (retcode != 1 && errno == EINTR); + if (retcode != 1) { + return -1; + } + return 0; +} + +static int +randombytes_linux_wait_for_entropy(int device) +{ + /* We will block on /dev/random, because any increase in the OS' entropy + * level will unblock the request. I use poll here (as does libsodium), + * because we don't *actually* want to read from the device. */ + enum + { + IOCTL, + PROC + } strategy = IOCTL; + const int bits = 128; + struct pollfd pfd; + int fd; + FILE *proc_file; + int retcode, retcode_error = 0; // Used as return codes throughout this function + int entropy = 0; + + /* If the device has enough entropy already, we will want to return early */ + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + // printf("errno: %d (%s)\n", errno, strerror(errno)); + if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { + // The ioctl call on /dev/urandom has failed due to a + // - ENOTTY (unsupported action), or + // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). + // + // We will fall back to reading from + // `/proc/sys/kernel/random/entropy_avail`. This less ideal, + // because it allocates a file descriptor, and it may not work + // in a chroot. But at this point it seems we have no better + // options left. + strategy = PROC; + // Open the entropy count file + proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); + if (proc_file == NULL) { + return -1; + } + } else if (retcode != 0) { + // Unrecoverable ioctl error + return -1; + } + if (entropy >= bits) { + return 0; + } + + do { + fd = open("/dev/random", O_RDONLY); + } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ + if (fd == -1) { + /* Unrecoverable IO error */ + return -1; + } + + pfd.fd = fd; + pfd.events = POLLIN; + for (;;) { + retcode = poll(&pfd, 1, -1); + if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { + continue; + } else if (retcode == 1) { + if (strategy == IOCTL) { + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + } else if (strategy == PROC) { + retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); + } else { + return -1; // Unreachable + } + + if (retcode != 0) { + // Unrecoverable I/O error + retcode_error = retcode; + break; + } + if (entropy >= bits) { + break; + } + } else { + // Unreachable: poll() should only return -1 or 1 + retcode_error = -1; + break; + } + } + do { + retcode = close(fd); + } while (retcode == -1 && errno == EINTR); + if (strategy == PROC) { + do { + retcode = fclose(proc_file); + } while (retcode == -1 && errno == EINTR); + } + if (retcode_error != 0) { + return retcode_error; + } + return retcode; +} +#endif /* defined(__linux__) */ + +static int +randombytes_linux_randombytes_urandom(void *buf, size_t n) +{ + int fd; + size_t offset = 0, count; + ssize_t tmp; + do { + fd = open("/dev/urandom", O_RDONLY); + } while (fd == -1 && errno == EINTR); + if (fd == -1) + return -1; +#if defined(__linux__) + if (randombytes_linux_wait_for_entropy(fd) == -1) + return -1; +#endif + + while (n > 0) { + count = n <= SSIZE_MAX ? n : SSIZE_MAX; + tmp = read(fd, (char *)buf + offset, count); + if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { + continue; + } + if (tmp == -1) + return -1; /* Unrecoverable IO error */ + offset += tmp; + n -= tmp; + } + close(fd); + assert(n == 0); + return 0; +} +#endif /* defined(__linux__) && !defined(SYS_getrandom) */ + +#if defined(BSD) +static int +randombytes_bsd_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(BSD) */ + +#if defined(__EMSCRIPTEN__) +static int +randombytes_js_randombytes_nodejs(void *buf, size_t n) +{ + const int ret = EM_ASM_INT( + { + var crypto; + try { + crypto = require('crypto'); + } catch (error) { + return -2; + } + try { + writeArrayToMemory(crypto.randomBytes($1), $0); + return 0; + } catch (error) { + return -1; + } + }, + buf, + n); + switch (ret) { + case 0: + return 0; + case -1: + errno = EINVAL; + return -1; + case -2: + errno = ENOSYS; + return -1; + } + assert(false); // Unreachable +} +#endif /* defined(__EMSCRIPTEN__) */ + +SQISIGN_API +int +randombytes_select(unsigned char *buf, unsigned long long n) +{ +#if defined(__EMSCRIPTEN__) + return randombytes_js_randombytes_nodejs(buf, n); +#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +#if defined(USE_GLIBC) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#elif defined(SYS_getrandom) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#else + /* When we have enough entropy, we can read from /dev/urandom */ + return randombytes_linux_randombytes_urandom(buf, n); +#endif +#elif defined(BSD) + /* Use arc4random system call */ + return randombytes_bsd_randombytes(buf, n); +#elif defined(_WIN32) + /* Use windows API */ + return randombytes_win32_randombytes(buf, n); +#elif defined(__wasi__) + /* Use WASI */ + return randombytes_wasi_randombytes(buf, n); +#else +#error "randombytes(...) is not supported on this platform" +#endif +} + +#ifdef RANDOMBYTES_SYSTEM +SQISIGN_API +int +randombytes(unsigned char *x, unsigned long long xlen) +{ + + int ret = randombytes_select(x, (size_t)xlen); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); +#endif + return ret; +} + +SQISIGN_API +void +randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) +{ + (void)entropy_input; + (void)personalization_string; + (void)security_strength; +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rationals.c new file mode 100644 index 0000000000..0c5387e5e8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rationals.c @@ -0,0 +1,233 @@ +#include +#include "internal.h" +#include "lll_internals.h" + +void +ibq_init(ibq_t *x) +{ + ibz_init(&((*x)[0])); + ibz_init(&((*x)[1])); + ibz_set(&((*x)[1]), 1); +} + +void +ibq_finalize(ibq_t *x) +{ + ibz_finalize(&((*x)[0])); + ibz_finalize(&((*x)[1])); +} + +void +ibq_mat_4x4_init(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_init(&(*mat)[i][j]); + } + } +} +void +ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_finalize(&(*mat)[i][j]); + } + } +} + +void +ibq_vec_4_init(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_init(&(*vec)[i]); + } +} +void +ibq_vec_4_finalize(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_finalize(&(*vec)[i]); + } +} + +void +ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j][0]), 10); + printf("/"); + ibz_print(&((*mat)[i][j][1]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibq_vec_4_print(const ibq_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i][0]), 10); + printf("/"); + ibz_print(&((*vec)[i][1]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibq_reduce(ibq_t *x) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); + ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + assert(ibz_is_zero(&r)); + ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + assert(ibz_is_zero(&r)); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +void +ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) +{ + ibz_t add, prod; + ibz_init(&add); + ibz_init(&prod); + + ibz_mul(&add, &((*a)[0]), &((*b)[1])); + ibz_mul(&prod, &((*b)[0]), &((*a)[1])); + ibz_add(&((*sum)[0]), &add, &prod); + ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_finalize(&add); + ibz_finalize(&prod); +} + +void +ibq_neg(ibq_t *neg, const ibq_t *x) +{ + ibz_copy(&((*neg)[1]), &((*x)[1])); + ibz_neg(&((*neg)[0]), &((*x)[0])); +} + +void +ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b) +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, b); + ibq_add(diff, a, &neg); + ibq_finalize(&neg); +} + +void +ibq_abs(ibq_t *abs, const ibq_t *x) // once +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, x); + if (ibq_cmp(x, &neg) < 0) + ibq_copy(abs, &neg); + else + ibq_copy(abs, x); + ibq_finalize(&neg); +} + +void +ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) +{ + ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); + ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); +} + +int +ibq_inv(ibq_t *inv, const ibq_t *x) +{ + int res = !ibq_is_zero(x); + if (res) { + ibz_copy(&((*inv)[0]), &((*x)[0])); + ibz_copy(&((*inv)[1]), &((*x)[1])); + ibz_swap(&((*inv)[1]), &((*inv)[0])); + } + return (res); +} + +int +ibq_cmp(const ibq_t *a, const ibq_t *b) +{ + ibz_t x, y; + ibz_init(&x); + ibz_init(&y); + ibz_copy(&x, &((*a)[0])); + ibz_copy(&y, &((*b)[0])); + ibz_mul(&y, &y, &((*a)[1])); + ibz_mul(&x, &x, &((*b)[1])); + if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + int res = ibz_cmp(&x, &y); + ibz_finalize(&x); + ibz_finalize(&y); + return (res); +} + +int +ibq_is_zero(const ibq_t *x) +{ + return ibz_is_zero(&((*x)[0])); +} + +int +ibq_is_one(const ibq_t *x) +{ + return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); +} + +int +ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) +{ + ibz_copy(&((*q)[0]), a); + ibz_copy(&((*q)[1]), b); + return !ibz_is_zero(b); +} + +void +ibq_copy(ibq_t *target, const ibq_t *value) // once +{ + ibz_copy(&((*target)[0]), &((*value)[0])); + ibz_copy(&((*target)[1]), &((*value)[1])); +} + +int +ibq_is_ibz(const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_mod(&r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} + +int +ibq_to_ibz(ibz_t *z, const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h new file mode 100644 index 0000000000..0a9ca0e465 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef rng_h +#define rng_h + +#include + +/** + * Randombytes initialization. + * Initialization may be needed for some random number generators (e.g. CTR-DRBG). + * + * @param[in] entropy_input 48 bytes entropy input + * @param[in] personalization_string Personalization string + * @param[in] security_strength Security string + */ +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength); + +/** + * Random byte generation using /dev/urandom. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes_select(unsigned char *x, unsigned long long xlen); + +/** + * Random byte generation. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes(unsigned char *x, unsigned long long xlen); + +#endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sig.h new file mode 100644 index 0000000000..4c33510084 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sig.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef SQISIGN_H +#define SQISIGN_H + +#include +#include + +#if defined(ENABLE_SIGN) +/** + * SQIsign keypair generation. + * + * The implementation corresponds to SQIsign.CompactKeyGen() in the SQIsign spec. + * The caller is responsible to allocate sufficient memory to hold pk and sk. + * + * @param[out] pk SQIsign public key + * @param[out] sk SQIsign secret key + * @return int status code + */ +SQISIGN_API +int sqisign_keypair(unsigned char *pk, unsigned char *sk); + +/** + * SQIsign signature generation. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] sm Signature concatenated with message + * @param[out] smlen Pointer to the length of sm + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); +#endif + +/** + * SQIsign open signature. + * + * The implementation performs SQIsign.verify(). If the signature verification succeeded, the + * original message is stored in m. Keys provided is a compact public key. The caller is responsible + * to allocate sufficient memory to hold m. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sm Signature concatenated with message + * @param[in] smlen Length of sm + * @param[in] pk Compacted public key + * @return int status code + */ +SQISIGN_API +int sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk); + +/** + * SQIsign verify signature. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sign.c new file mode 100644 index 0000000000..9216bbe4d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sign.c @@ -0,0 +1,634 @@ +#include +#include +#include +#include +#include +#include + +// compute the commitment with ideal to isogeny clapotis +// and apply it to the basis of E0 (together with the multiplication by some scalar u) +static bool +commit(ec_curve_t *E_com, ec_basis_t *basis_even_com, quat_left_ideal_t *lideal_com) +{ + + bool found = false; + + found = quat_sampling_random_ideal_O0_given_norm(lideal_com, &COM_DEGREE, 1, &QUAT_represent_integer_params, NULL); + // replacing it with a shorter prime norm equivalent ideal + found = found && quat_lideal_prime_norm_reduced_equivalent( + lideal_com, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + // ideal to isogeny clapotis + found = found && dim2id2iso_arbitrary_isogeny_evaluation(basis_even_com, E_com, lideal_com); + return found; +} + +static void +compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const signature_t *sig, const secret_key_t *sk) +{ + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge + // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the + // 2^TORSION_EVEN_POWER torsion of EA + ibz_set(&vec[0], 1); + ibz_copy_digit_array(&vec[1], sig->chall_coeff); + + // now we compute the ideal associated to the challenge + // for that, we need to find vec such that + // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // is the image through the secret key isogeny of the canonical basis E0 + ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); + + // lideal_chall_two is the pullback of the ideal challenge through the secret key ideal + id2iso_kernel_dlogs_to_ideal_even(lideal_chall_two, &vec, TORSION_EVEN_POWER); + assert(ibz_cmp(&lideal_chall_two->norm, &TORSION_PLUS_2POWER) == 0); + + ibz_vec_2_finalize(&vec); +} + +static void +sample_response(quat_alg_elem_t *x, const quat_lattice_t *lattice, const ibz_t *lattice_content) +{ + ibz_t bound; + ibz_init(&bound); + ibz_pow(&bound, &ibz_const_two, SQIsign_response_length); + ibz_sub(&bound, &bound, &ibz_const_one); + ibz_mul(&bound, &bound, lattice_content); + + int ok UNUSED = quat_lattice_sample_from_ball(x, lattice, &QUATALG_PINFTY, &bound); + assert(ok); + + ibz_finalize(&bound); +} + +static void +compute_response_quat_element(quat_alg_elem_t *resp_quat, + ibz_t *lattice_content, + const secret_key_t *sk, + const quat_left_ideal_t *lideal_chall_two, + const quat_left_ideal_t *lideal_commit) +{ + quat_left_ideal_t lideal_chall_secret; + quat_lattice_t lattice_hom_chall_to_com, lat_commit; + + // Init + quat_left_ideal_init(&lideal_chall_secret); + quat_lattice_init(&lat_commit); + quat_lattice_init(&lattice_hom_chall_to_com); + + // lideal_chall_secret = lideal_secret * lideal_chall_two + quat_lideal_inter(&lideal_chall_secret, lideal_chall_two, &(sk->secret_ideal), &QUATALG_PINFTY); + + // now we compute lideal_com_to_chall which is dual(Icom)* lideal_chall_secret + quat_lattice_conjugate_without_hnf(&lat_commit, &(lideal_commit->lattice)); + quat_lattice_intersect(&lattice_hom_chall_to_com, &lideal_chall_secret.lattice, &lat_commit); + + // sampling the smallest response + ibz_mul(lattice_content, &lideal_chall_secret.norm, &lideal_commit->norm); + sample_response(resp_quat, &lattice_hom_chall_to_com, lattice_content); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_secret); + quat_lattice_finalize(&lat_commit); + quat_lattice_finalize(&lattice_hom_chall_to_com); +} + +static void +compute_backtracking_signature(signature_t *sig, quat_alg_elem_t *resp_quat, ibz_t *lattice_content, ibz_t *remain) +{ + uint_fast8_t backtracking; + ibz_t tmp; + ibz_init(&tmp); + + ibz_vec_4_t dummy_coord; + ibz_vec_4_init(&dummy_coord); + + quat_alg_make_primitive(&dummy_coord, &tmp, resp_quat, &MAXORD_O0); + ibz_mul(&resp_quat->denom, &resp_quat->denom, &tmp); + assert(quat_lattice_contains(NULL, &MAXORD_O0, resp_quat)); + + // the backtracking is the common part of the response and the challenge + // its degree is the scalar tmp computed above such that quat_resp is in tmp * O0. + backtracking = ibz_two_adic(&tmp); + sig->backtracking = backtracking; + + ibz_pow(&tmp, &ibz_const_two, backtracking); + ibz_div(lattice_content, remain, lattice_content, &tmp); + + ibz_finalize(&tmp); + ibz_vec_4_finalize(&dummy_coord); +} + +static uint_fast8_t +compute_random_aux_norm_and_helpers(signature_t *sig, + ibz_t *random_aux_norm, + ibz_t *degree_resp_inv, + ibz_t *remain, + const ibz_t *lattice_content, + quat_alg_elem_t *resp_quat, + quat_left_ideal_t *lideal_com_resp, + quat_left_ideal_t *lideal_commit) +{ + uint_fast8_t pow_dim2_deg_resp; + uint_fast8_t exp_diadic_val_full_resp; + + ibz_t tmp, degree_full_resp, degree_odd_resp, norm_d; + + // Init + ibz_init(°ree_full_resp); + ibz_init(°ree_odd_resp); + ibz_init(&norm_d); + ibz_init(&tmp); + + quat_alg_norm(°ree_full_resp, &norm_d, resp_quat, &QUATALG_PINFTY); + + // dividing by n(lideal_com) * n(lideal_secret_chall) + assert(ibz_is_one(&norm_d)); + ibz_div(°ree_full_resp, remain, °ree_full_resp, lattice_content); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); + + // computing the diadic valuation + exp_diadic_val_full_resp = ibz_two_adic(°ree_full_resp); + sig->two_resp_length = exp_diadic_val_full_resp; + + // removing the power of two part + ibz_pow(&tmp, &ibz_const_two, exp_diadic_val_full_resp); + ibz_div(°ree_odd_resp, remain, °ree_full_resp, &tmp); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); +#ifndef NDEBUG + ibz_pow(&tmp, &ibz_const_two, SQIsign_response_length - sig->backtracking); + assert(ibz_cmp(&tmp, °ree_odd_resp) > 0); +#endif + + // creating the ideal + quat_alg_conj(resp_quat, resp_quat); + + // setting the norm + ibz_mul(&tmp, &lideal_commit->norm, °ree_odd_resp); + quat_lideal_create(lideal_com_resp, resp_quat, &tmp, &MAXORD_O0, &QUATALG_PINFTY); + + // now we compute the ideal_aux + // computing the norm + pow_dim2_deg_resp = SQIsign_response_length - exp_diadic_val_full_resp - sig->backtracking; + ibz_pow(remain, &ibz_const_two, pow_dim2_deg_resp); + ibz_sub(random_aux_norm, remain, °ree_odd_resp); + + // multiplying by 2^HD_extra_torsion to account for the fact that + // we use extra torsion above the kernel + for (int i = 0; i < HD_extra_torsion; i++) + ibz_mul(remain, remain, &ibz_const_two); + + ibz_invmod(degree_resp_inv, °ree_odd_resp, remain); + + ibz_finalize(°ree_full_resp); + ibz_finalize(°ree_odd_resp); + ibz_finalize(&norm_d); + ibz_finalize(&tmp); + + return pow_dim2_deg_resp; +} + +static int +evaluate_random_aux_isogeny_signature(ec_curve_t *E_aux, + ec_basis_t *B_aux, + const ibz_t *norm, + const quat_left_ideal_t *lideal_com_resp) +{ + quat_left_ideal_t lideal_aux; + quat_left_ideal_t lideal_aux_resp_com; + + // Init + quat_left_ideal_init(&lideal_aux); + quat_left_ideal_init(&lideal_aux_resp_com); + + // sampling the ideal at random + int found = quat_sampling_random_ideal_O0_given_norm( + &lideal_aux, norm, 0, &QUAT_represent_integer_params, &QUAT_prime_cofactor); + + if (found) { + // pushing forward + quat_lideal_inter(&lideal_aux_resp_com, lideal_com_resp, &lideal_aux, &QUATALG_PINFTY); + + // now we evaluate this isogeny on the basis of E0 + found = dim2id2iso_arbitrary_isogeny_evaluation(B_aux, E_aux, &lideal_aux_resp_com); + + // Clean up + quat_left_ideal_finalize(&lideal_aux_resp_com); + quat_left_ideal_finalize(&lideal_aux); + } + + return found; +} + +static int +compute_dim2_isogeny_challenge(theta_couple_curve_with_basis_t *codomain, + theta_couple_curve_with_basis_t *domain, + const ibz_t *degree_resp_inv, + int pow_dim2_deg_resp, + int exp_diadic_val_full_resp, + int reduced_order) +{ + // now, we compute the isogeny Phi : Ecom x Eaux -> Echl' x Eaux' + // where Echl' is 2^exp_diadic_val_full_resp isogenous to Echal + // ker Phi = <(Bcom_can.P,Baux.P),(Bcom_can.Q,Baux.Q)> + + // preparing the domain + theta_couple_curve_t EcomXEaux; + copy_curve(&EcomXEaux.E1, &domain->E1); + copy_curve(&EcomXEaux.E2, &domain->E2); + + // preparing the kernel + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &domain->B1, &domain->B2); + + // dividing by the degree of the response + digit_t scalar[NWORDS_ORDER]; + ibz_to_digit_array(scalar, degree_resp_inv); + ec_mul(&dim_two_ker.T1.P2, scalar, reduced_order, &dim_two_ker.T1.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T2.P2, scalar, reduced_order, &dim_two_ker.T2.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T1m2.P2, scalar, reduced_order, &dim_two_ker.T1m2.P2, &EcomXEaux.E2); + + // and multiplying by 2^exp_diadic... + double_couple_point_iter(&dim_two_ker.T1, exp_diadic_val_full_resp, &dim_two_ker.T1, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T2, exp_diadic_val_full_resp, &dim_two_ker.T2, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T1m2, exp_diadic_val_full_resp, &dim_two_ker.T1m2, &EcomXEaux); + + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const Tev1 = pushed_points + 0, *const Tev2 = pushed_points + 1, + *const Tev1m2 = pushed_points + 2; + + // Set points on the commitment curve + copy_point(&Tev1->P1, &domain->B1.P); + copy_point(&Tev2->P1, &domain->B1.Q); + copy_point(&Tev1m2->P1, &domain->B1.PmQ); + + // Zero points on the aux curve + ec_point_init(&Tev1->P2); + ec_point_init(&Tev2->P2); + ec_point_init(&Tev1m2->P2); + + theta_couple_curve_t codomain_product; + + // computation of the dim2 isogeny + if (!theta_chain_compute_and_eval_randomized(pow_dim2_deg_resp, + &EcomXEaux, + &dim_two_ker, + true, + &codomain_product, + pushed_points, + sizeof(pushed_points) / sizeof(*pushed_points))) + return 0; + + assert(test_couple_point_order_twof(Tev1, &codomain_product, reduced_order)); + + // Set the auxiliary curve + copy_curve(&codomain->E1, &codomain_product.E2); + + // Set the codomain curve from the dim 2 isogeny + // it should always be the first curve + copy_curve(&codomain->E2, &codomain_product.E1); + + // Set the evaluated basis points + copy_point(&codomain->B1.P, &Tev1->P2); + copy_point(&codomain->B1.Q, &Tev2->P2); + copy_point(&codomain->B1.PmQ, &Tev1m2->P2); + + copy_point(&codomain->B2.P, &Tev1->P1); + copy_point(&codomain->B2.Q, &Tev2->P1); + copy_point(&codomain->B2.PmQ, &Tev1m2->P1); + return 1; +} + +static int +compute_small_chain_isogeny_signature(ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2, + const quat_alg_elem_t *resp_quat, + int pow_dim2_deg_resp, + int length) +{ + int ret = 1; + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec_resp_two; + ibz_vec_2_init(&vec_resp_two); + + quat_left_ideal_t lideal_resp_two; + quat_left_ideal_init(&lideal_resp_two); + + // computing the ideal + ibz_pow(&two_pow, &ibz_const_two, length); + + // we compute the generator of the challenge ideal + quat_lideal_create(&lideal_resp_two, resp_quat, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + // computing the coefficients of the kernel in terms of the basis of O0 + id2iso_ideal_to_kernel_dlogs_even(&vec_resp_two, &lideal_resp_two); + + ec_point_t points[3]; + copy_point(&points[0], &B_chall_2->P); + copy_point(&points[1], &B_chall_2->Q); + copy_point(&points[2], &B_chall_2->PmQ); + + // getting down to the right order and applying the matrix + ec_dbl_iter_basis(B_chall_2, pow_dim2_deg_resp + HD_extra_torsion, B_chall_2, E_chall_2); + assert(test_basis_order_twof(B_chall_2, E_chall_2, length)); + + ec_point_t ker; + // applying the vector to find the kernel + ec_biscalar_mul_ibz_vec(&ker, &vec_resp_two, length, B_chall_2, E_chall_2); + assert(test_point_order_twof(&ker, E_chall_2, length)); + + // computing the isogeny and pushing the points + if (ec_eval_small_chain(E_chall_2, &ker, length, points, 3, true)) { + ret = 0; + } + + // copying the result + copy_point(&B_chall_2->P, &points[0]); + copy_point(&B_chall_2->Q, &points[1]); + copy_point(&B_chall_2->PmQ, &points[2]); + + ibz_finalize(&two_pow); + ibz_vec_2_finalize(&vec_resp_two); + quat_left_ideal_finalize(&lideal_resp_two); + + return ret; +} + +static int +compute_challenge_codomain_signature(const signature_t *sig, + secret_key_t *sk, + ec_curve_t *E_chall, + const ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2) +{ + ec_isog_even_t phi_chall; + ec_basis_t bas_sk; + copy_basis(&bas_sk, &sk->canonical_basis); + + phi_chall.curve = sk->curve; + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + assert(test_basis_order_twof(&bas_sk, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the kernel + { + ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_sk.P, &bas_sk.Q, &bas_sk.PmQ, &sk->curve); + } + assert(test_point_order_twof(&phi_chall.kernel, &sk->curve, TORSION_EVEN_POWER)); + + // Double kernel to get correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &sk->curve); + + assert(test_point_order_twof(&phi_chall.kernel, E_chall, phi_chall.length)); + + // Compute the codomain from challenge isogeny + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + +#ifndef NDEBUG + fp2_t j_chall, j_codomain; + ec_j_inv(&j_codomain, E_chall_2); + ec_j_inv(&j_chall, E_chall); + // apparently its always the second one curve + assert(fp2_is_equal(&j_chall, &j_codomain)); +#endif + + // applying the isomorphism from E_chall_2 to E_chall + ec_isom_t isom; + if (ec_isomorphism(&isom, E_chall_2, E_chall)) + return 0; // error due to a corner case with 1/p probability + ec_iso_eval(&B_chall_2->P, &isom); + ec_iso_eval(&B_chall_2->Q, &isom); + ec_iso_eval(&B_chall_2->PmQ, &isom); + + return 1; +} + +static void +set_aux_curve_signature(signature_t *sig, ec_curve_t *E_aux) +{ + ec_normalize_curve(E_aux); + fp2_copy(&sig->E_aux_A, &E_aux->A); +} + +static void +compute_and_set_basis_change_matrix(signature_t *sig, + const ec_basis_t *B_aux_2, + ec_basis_t *B_chall_2, + ec_curve_t *E_aux_2, + ec_curve_t *E_chall, + int f) +{ + // Matrices for change of bases matrices + ibz_mat_2x2_t mat_Baux2_to_Baux2_can, mat_Bchall_can_to_Bchall; + ibz_mat_2x2_init(&mat_Baux2_to_Baux2_can); + ibz_mat_2x2_init(&mat_Bchall_can_to_Bchall); + + // Compute canonical bases + ec_basis_t B_can_chall, B_aux_2_can; + sig->hint_chall = ec_curve_to_basis_2f_to_hint(&B_can_chall, E_chall, TORSION_EVEN_POWER); + sig->hint_aux = ec_curve_to_basis_2f_to_hint(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(B_aux_2, E_aux_2, f)); + fp2_t w0; + weil(&w0, f, &B_aux_2->P, &B_aux_2->Q, &B_aux_2->PmQ, E_aux_2); + } +#endif + + // compute the matrix to go from B_aux_2 to B_aux_2_can + change_of_basis_matrix_tate_invert(&mat_Baux2_to_Baux2_can, &B_aux_2_can, B_aux_2, E_aux_2, f); + + // apply the change of basis to B_chall_2 + matrix_application_even_basis(B_chall_2, E_chall, &mat_Baux2_to_Baux2_can, f); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_can_chall, E_chall, TORSION_EVEN_POWER)); + } +#endif + + // compute the matrix to go from B_chall_can to B_chall_2 + change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); + + // Assert all values in the matrix are of the expected size for packing + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + + // Set the basis change matrix to signature + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + + // Finalise the matrices + ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); + ibz_mat_2x2_finalize(&mat_Baux2_to_Baux2_can); +} + +int +protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l) +{ + int ret = 0; + int reduced_order = 0; // work around false positive gcc warning + + uint_fast8_t pow_dim2_deg_resp; + assert(SQIsign_response_length <= (intmax_t)UINT_FAST8_MAX); // otherwise we might need more bits there + + ibz_t remain, lattice_content, random_aux_norm, degree_resp_inv; + ibz_init(&remain); + ibz_init(&lattice_content); + ibz_init(&random_aux_norm); + ibz_init(°ree_resp_inv); + + quat_alg_elem_t resp_quat; + quat_alg_elem_init(&resp_quat); + + quat_left_ideal_t lideal_commit, lideal_com_resp; + quat_left_ideal_init(&lideal_commit); + quat_left_ideal_init(&lideal_com_resp); + + // This structure holds two curves E1 x E2 together with a basis + // Bi of E[2^n] for each of these curves + theta_couple_curve_with_basis_t Ecom_Eaux; + // This structure holds two curves E1 x E2 together with a basis + // Bi of Ei[2^n] + theta_couple_curve_with_basis_t Eaux2_Echall2; + + // This will hold the challenge curve + ec_curve_t E_chall = sk->curve; + + ec_curve_init(&Ecom_Eaux.E1); + ec_curve_init(&Ecom_Eaux.E2); + + while (!ret) { + + // computing the commitment + ret = commit(&Ecom_Eaux.E1, &Ecom_Eaux.B1, &lideal_commit); + + // start again if the commitment generation has failed + if (!ret) { + continue; + } + + // Hash the message to a kernel generator + // i.e. a scalar such that ker = P + [s]Q + hash_to_challenge(&sig->chall_coeff, pk, &Ecom_Eaux.E1, m, l); + // Compute the challenge ideal and response quaternion element + { + quat_left_ideal_t lideal_chall_two; + quat_left_ideal_init(&lideal_chall_two); + + // computing the challenge ideal + compute_challenge_ideal_signature(&lideal_chall_two, sig, sk); + compute_response_quat_element(&resp_quat, &lattice_content, sk, &lideal_chall_two, &lideal_commit); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_two); + } + + // computing the amount of backtracking we're making + // and removing it + compute_backtracking_signature(sig, &resp_quat, &lattice_content, &remain); + + // creating lideal_com * lideal_resp + // we first compute the norm of lideal_resp + // norm of the resp_quat + pow_dim2_deg_resp = compute_random_aux_norm_and_helpers(sig, + &random_aux_norm, + °ree_resp_inv, + &remain, + &lattice_content, + &resp_quat, + &lideal_com_resp, + &lideal_commit); + + // notational conventions: + // B0 = canonical basis of E0 + // B_com = image through commitment isogeny (odd degree) of canonical basis of E0 + // B_aux = image through aux_resp_com isogeny (odd degree) of canonical basis of E0 + + if (pow_dim2_deg_resp > 0) { + // Evaluate the random aux ideal on the curve E0 and its basis to find E_aux and B_aux + ret = + evaluate_random_aux_isogeny_signature(&Ecom_Eaux.E2, &Ecom_Eaux.B2, &random_aux_norm, &lideal_com_resp); + + // auxiliary isogeny computation failed we must start again + if (!ret) { + continue; + } + +#ifndef NDEBUG + // testing that the order of the points in the bases is as expected + assert(test_basis_order_twof(&Ecom_Eaux.B1, &Ecom_Eaux.E1, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(&Ecom_Eaux.B2, &Ecom_Eaux.E2, TORSION_EVEN_POWER)); +#endif + + // applying the matrix to compute Baux + // first, we reduce to the relevant order + reduced_order = pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length; + ec_dbl_iter_basis(&Ecom_Eaux.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Ecom_Eaux.B2, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B2, &Ecom_Eaux.E2); + + // Given all the above data, compute a dim two isogeny with domain + // E_com x E_aux + // and codomain + // E_aux_2 x E_chall_2 (note: E_chall_2 is isomorphic to E_chall) + // and evaluated points stored as bases in + // B_aux_2 on E_aux_2 + // B_chall_2 on E_chall_2 + ret = compute_dim2_isogeny_challenge( + &Eaux2_Echall2, &Ecom_Eaux, °ree_resp_inv, pow_dim2_deg_resp, sig->two_resp_length, reduced_order); + if (!ret) + continue; + } else { + // No 2d isogeny needed, so simulate a "Kani matrix" identity here + copy_curve(&Eaux2_Echall2.E1, &Ecom_Eaux.E1); + copy_curve(&Eaux2_Echall2.E2, &Ecom_Eaux.E1); + + reduced_order = sig->two_resp_length; + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + copy_basis(&Eaux2_Echall2.B2, &Eaux2_Echall2.B1); + } + + // computation of the remaining small chain of two isogenies when needed + if (sig->two_resp_length > 0) { + if (!compute_small_chain_isogeny_signature( + &Eaux2_Echall2.E2, &Eaux2_Echall2.B2, &resp_quat, pow_dim2_deg_resp, sig->two_resp_length)) { + assert(0); // this shouldn't fail + } + } + + // computation of the challenge codomain + if (!compute_challenge_codomain_signature(sig, sk, &E_chall, &Eaux2_Echall2.E2, &Eaux2_Echall2.B2)) + assert(0); // this shouldn't fail + } + + // Set to the signature the Montgomery A-coefficient of E_aux_2 + set_aux_curve_signature(sig, &Eaux2_Echall2.E1); + + // Set the basis change matrix from canonical bases to the supplied bases + compute_and_set_basis_change_matrix( + sig, &Eaux2_Echall2.B1, &Eaux2_Echall2.B2, &Eaux2_Echall2.E1, &E_chall, reduced_order); + + quat_alg_elem_finalize(&resp_quat); + quat_left_ideal_finalize(&lideal_commit); + quat_left_ideal_finalize(&lideal_com_resp); + + ibz_finalize(&lattice_content); + ibz_finalize(&remain); + ibz_finalize(°ree_resp_inv); + ibz_finalize(&random_aux_norm); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/signature.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/signature.h new file mode 100644 index 0000000000..ba38c360e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/signature.h @@ -0,0 +1,97 @@ +/** @file + * + * @brief The key generation and signature protocols + */ + +#ifndef SIGNATURE_H +#define SIGNATURE_H + +#include +#include +#include +#include + +/** @defgroup signature SQIsignHD key generation and signature protocols + * @{ + */ +/** @defgroup signature_t Types for SQIsignHD key generation and signature protocols + * @{ + */ + +/** @brief Type for the secret keys + * + * @typedef secret_key_t + * + * @struct secret_key + * + */ +typedef struct secret_key +{ + ec_curve_t curve; /// the public curve, but with little precomputations + quat_left_ideal_t secret_ideal; + ibz_mat_2x2_t mat_BAcan_to_BA0_two; // mat_BA0_to_BAcan*BA0 = BAcan, where BAcan is the + // canonical basis of EA[2^e], and BA0 the image of the + // basis of E0[2^e] through the secret isogeny + ec_basis_t canonical_basis; // the canonical basis of the public key curve +} secret_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void secret_key_init(secret_key_t *sk); +void secret_key_finalize(secret_key_t *sk); + +/** + * @brief Key generation + * + * @param pk Output: will contain the public key + * @param sk Output: will contain the secret key + * @returns 1 if success, 0 otherwise + */ +int protocols_keygen(public_key_t *pk, secret_key_t *sk); + +/** + * @brief Signature computation + * + * @param sig Output: will contain the signature + * @param sk secret key + * @param pk public key + * @param m message + * @param l size + * @returns 1 if success, 0 otherwise + */ +int protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a secret key as a byte array + * + * @param enc : Byte array to encode the secret key (including public key) in + * @param sk : Secret key to encode + * @param pk : Public key to encode + */ +void secret_key_to_bytes(unsigned char *enc, const secret_key_t *sk, const public_key_t *pk); + +/** + * @brief Decodes a secret key (and public key) from a byte array + * + * @param sk : Structure to decode the secret key in + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +void secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign.c new file mode 100644 index 0000000000..7335c38d9a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign.c @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#if defined(ENABLE_SIGN) +#include +#endif + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +sqisign_keypair(unsigned char *pk, unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + secret_key_init(&skt); + + ret = !protocols_keygen(&pkt, &skt); + + secret_key_to_bytes(sk, &skt, &pkt); + public_key_to_bytes(pk, &pkt); + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + memmove(sm + SIGNATURE_BYTES, m, mlen); + + ret = !protocols_sign(&sigt, &pkt, &skt, sm + SIGNATURE_BYTES, mlen); + if (ret != 0) { + *smlen = 0; + goto err; + } + + signature_to_bytes(sm, &sigt); + *smlen = SIGNATURE_BYTES + mlen; + +err: + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + ret = !protocols_sign(&sigt, &pkt, &skt, m, mlen); + if (ret != 0) { + *slen = 0; + goto err; + } + + signature_to_bytes(s, &sigt); + *slen = SIGNATURE_BYTES; + +err: + secret_key_finalize(&skt); + return ret; +} +#endif + +SQISIGN_API +int +sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk) +{ + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sm); + + ret = !protocols_verify(&sigt, &pkt, sm + SIGNATURE_BYTES, smlen - SIGNATURE_BYTES); + + if (!ret) { + *mlen = smlen - SIGNATURE_BYTES; + memmove(m, sm + SIGNATURE_BYTES, *mlen); + } else { + *mlen = 0; + memset(m, 0, smlen - SIGNATURE_BYTES); + } + + return ret; +} + +SQISIGN_API +int +sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk) +{ + + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sig); + + ret = !protocols_verify(&sigt, &pkt, m, mlen); + + return ret; +} + +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk) +{ + return sqisign_verify(m, mlen, sig, siglen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h new file mode 100644 index 0000000000..007d2572b9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h @@ -0,0 +1,1071 @@ + +#ifndef SQISIGN_NAMESPACE_H +#define SQISIGN_NAMESPACE_H + +//#define DISABLE_NAMESPACING + +#if defined(_WIN32) +#define SQISIGN_API __declspec(dllexport) +#else +#define SQISIGN_API __attribute__((visibility("default"))) +#endif + +#define PARAM_JOIN3_(a, b, c) sqisign_##a##_##b##_##c +#define PARAM_JOIN3(a, b, c) PARAM_JOIN3_(a, b, c) +#define PARAM_NAME3(end, s) PARAM_JOIN3(SQISIGN_VARIANT, end, s) + +#define PARAM_JOIN2_(a, b) sqisign_##a##_##b +#define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) +#define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) + +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + +#if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) +#if defined(SQISIGN_BUILD_TYPE_REF) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) +#elif defined(SQISIGN_BUILD_TYPE_OPT) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(opt, s) +#elif defined(SQISIGN_BUILD_TYPE_BROADWELL) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(broadwell, s) +#elif defined(SQISIGN_BUILD_TYPE_ARM64CRYPTO) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(arm64crypto, s) +#else +#error "Build type not known" +#endif + +#else +#define SQISIGN_NAMESPACE(s) s +#endif + +// Namespacing symbols exported from algebra.c: +#undef quat_alg_add +#undef quat_alg_conj +#undef quat_alg_coord_mul +#undef quat_alg_elem_copy +#undef quat_alg_elem_copy_ibz +#undef quat_alg_elem_equal +#undef quat_alg_elem_is_zero +#undef quat_alg_elem_mul_by_scalar +#undef quat_alg_elem_set +#undef quat_alg_equal_denom +#undef quat_alg_init_set_ui +#undef quat_alg_make_primitive +#undef quat_alg_mul +#undef quat_alg_norm +#undef quat_alg_normalize +#undef quat_alg_scalar +#undef quat_alg_sub + +#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) + +// Namespacing symbols exported from api.c: +#undef crypto_sign +#undef crypto_sign_keypair +#undef crypto_sign_open + +#define crypto_sign SQISIGN_NAMESPACE(crypto_sign) +#define crypto_sign_keypair SQISIGN_NAMESPACE(crypto_sign_keypair) +#define crypto_sign_open SQISIGN_NAMESPACE(crypto_sign_open) + +// Namespacing symbols exported from basis.c: +#undef ec_curve_to_basis_2f_from_hint +#undef ec_curve_to_basis_2f_to_hint +#undef ec_recover_y +#undef lift_basis +#undef lift_basis_normalized + +#define ec_curve_to_basis_2f_from_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_from_hint) +#define ec_curve_to_basis_2f_to_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_to_hint) +#define ec_recover_y SQISIGN_NAMESPACE(ec_recover_y) +#define lift_basis SQISIGN_NAMESPACE(lift_basis) +#define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) + +// Namespacing symbols exported from biextension.c: +#undef clear_cofac +#undef ec_dlog_2_tate +#undef ec_dlog_2_weil +#undef fp2_frob +#undef reduced_tate +#undef weil + +#define clear_cofac SQISIGN_NAMESPACE(clear_cofac) +#define ec_dlog_2_tate SQISIGN_NAMESPACE(ec_dlog_2_tate) +#define ec_dlog_2_weil SQISIGN_NAMESPACE(ec_dlog_2_weil) +#define fp2_frob SQISIGN_NAMESPACE(fp2_frob) +#define reduced_tate SQISIGN_NAMESPACE(reduced_tate) +#define weil SQISIGN_NAMESPACE(weil) + +// Namespacing symbols exported from common.c: +#undef hash_to_challenge +#undef public_key_finalize +#undef public_key_init + +#define hash_to_challenge SQISIGN_NAMESPACE(hash_to_challenge) +#define public_key_finalize SQISIGN_NAMESPACE(public_key_finalize) +#define public_key_init SQISIGN_NAMESPACE(public_key_init) + +// Namespacing symbols exported from dim2.c: +#undef ibz_2x2_mul_mod +#undef ibz_mat_2x2_add +#undef ibz_mat_2x2_copy +#undef ibz_mat_2x2_det_from_ibz +#undef ibz_mat_2x2_eval +#undef ibz_mat_2x2_inv_mod +#undef ibz_mat_2x2_set +#undef ibz_vec_2_set + +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) + +// Namespacing symbols exported from dim2id2iso.c: +#undef dim2id2iso_arbitrary_isogeny_evaluation +#undef dim2id2iso_ideal_to_isogeny_clapotis +#undef find_uv +#undef fixed_degree_isogeny_and_eval + +#define dim2id2iso_arbitrary_isogeny_evaluation SQISIGN_NAMESPACE(dim2id2iso_arbitrary_isogeny_evaluation) +#define dim2id2iso_ideal_to_isogeny_clapotis SQISIGN_NAMESPACE(dim2id2iso_ideal_to_isogeny_clapotis) +#define find_uv SQISIGN_NAMESPACE(find_uv) +#define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) + +// Namespacing symbols exported from dim4.c: +#undef ibz_inv_dim4_make_coeff_mpm +#undef ibz_inv_dim4_make_coeff_pmp +#undef ibz_mat_4x4_copy +#undef ibz_mat_4x4_equal +#undef ibz_mat_4x4_eval +#undef ibz_mat_4x4_eval_t +#undef ibz_mat_4x4_gcd +#undef ibz_mat_4x4_identity +#undef ibz_mat_4x4_inv_with_det_as_denom +#undef ibz_mat_4x4_is_identity +#undef ibz_mat_4x4_mul +#undef ibz_mat_4x4_negate +#undef ibz_mat_4x4_scalar_div +#undef ibz_mat_4x4_scalar_mul +#undef ibz_mat_4x4_transpose +#undef ibz_mat_4x4_zero +#undef ibz_vec_4_add +#undef ibz_vec_4_content +#undef ibz_vec_4_copy +#undef ibz_vec_4_copy_ibz +#undef ibz_vec_4_is_zero +#undef ibz_vec_4_linear_combination +#undef ibz_vec_4_negate +#undef ibz_vec_4_scalar_div +#undef ibz_vec_4_scalar_mul +#undef ibz_vec_4_set +#undef ibz_vec_4_sub +#undef quat_qf_eval + +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) + +// Namespacing symbols exported from ec.c: +#undef cswap_points +#undef ec_biscalar_mul +#undef ec_curve_init +#undef ec_curve_init_from_A +#undef ec_curve_normalize_A24 +#undef ec_curve_verify_A +#undef ec_dbl +#undef ec_dbl_iter +#undef ec_dbl_iter_basis +#undef ec_has_zero_coordinate +#undef ec_is_basis_four_torsion +#undef ec_is_equal +#undef ec_is_four_torsion +#undef ec_is_two_torsion +#undef ec_is_zero +#undef ec_j_inv +#undef ec_ladder3pt +#undef ec_mul +#undef ec_normalize_curve +#undef ec_normalize_curve_and_A24 +#undef ec_normalize_point +#undef ec_point_init +#undef select_point +#undef xADD +#undef xDBL +#undef xDBLADD +#undef xDBLMUL +#undef xDBL_A24 +#undef xDBL_E0 +#undef xMUL + +#define cswap_points SQISIGN_NAMESPACE(cswap_points) +#define ec_biscalar_mul SQISIGN_NAMESPACE(ec_biscalar_mul) +#define ec_curve_init SQISIGN_NAMESPACE(ec_curve_init) +#define ec_curve_init_from_A SQISIGN_NAMESPACE(ec_curve_init_from_A) +#define ec_curve_normalize_A24 SQISIGN_NAMESPACE(ec_curve_normalize_A24) +#define ec_curve_verify_A SQISIGN_NAMESPACE(ec_curve_verify_A) +#define ec_dbl SQISIGN_NAMESPACE(ec_dbl) +#define ec_dbl_iter SQISIGN_NAMESPACE(ec_dbl_iter) +#define ec_dbl_iter_basis SQISIGN_NAMESPACE(ec_dbl_iter_basis) +#define ec_has_zero_coordinate SQISIGN_NAMESPACE(ec_has_zero_coordinate) +#define ec_is_basis_four_torsion SQISIGN_NAMESPACE(ec_is_basis_four_torsion) +#define ec_is_equal SQISIGN_NAMESPACE(ec_is_equal) +#define ec_is_four_torsion SQISIGN_NAMESPACE(ec_is_four_torsion) +#define ec_is_two_torsion SQISIGN_NAMESPACE(ec_is_two_torsion) +#define ec_is_zero SQISIGN_NAMESPACE(ec_is_zero) +#define ec_j_inv SQISIGN_NAMESPACE(ec_j_inv) +#define ec_ladder3pt SQISIGN_NAMESPACE(ec_ladder3pt) +#define ec_mul SQISIGN_NAMESPACE(ec_mul) +#define ec_normalize_curve SQISIGN_NAMESPACE(ec_normalize_curve) +#define ec_normalize_curve_and_A24 SQISIGN_NAMESPACE(ec_normalize_curve_and_A24) +#define ec_normalize_point SQISIGN_NAMESPACE(ec_normalize_point) +#define ec_point_init SQISIGN_NAMESPACE(ec_point_init) +#define select_point SQISIGN_NAMESPACE(select_point) +#define xADD SQISIGN_NAMESPACE(xADD) +#define xDBL SQISIGN_NAMESPACE(xDBL) +#define xDBLADD SQISIGN_NAMESPACE(xDBLADD) +#define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) +#define xMUL SQISIGN_NAMESPACE(xMUL) + +// Namespacing symbols exported from ec_jac.c: +#undef ADD +#undef DBL +#undef DBLW +#undef copy_jac_point +#undef jac_from_ws +#undef jac_init +#undef jac_is_equal +#undef jac_neg +#undef jac_to_ws +#undef jac_to_xz +#undef jac_to_xz_add_components +#undef select_jac_point + +#define ADD SQISIGN_NAMESPACE(ADD) +#define DBL SQISIGN_NAMESPACE(DBL) +#define DBLW SQISIGN_NAMESPACE(DBLW) +#define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) +#define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) +#define jac_init SQISIGN_NAMESPACE(jac_init) +#define jac_is_equal SQISIGN_NAMESPACE(jac_is_equal) +#define jac_neg SQISIGN_NAMESPACE(jac_neg) +#define jac_to_ws SQISIGN_NAMESPACE(jac_to_ws) +#define jac_to_xz SQISIGN_NAMESPACE(jac_to_xz) +#define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) +#define select_jac_point SQISIGN_NAMESPACE(select_jac_point) + +// Namespacing symbols exported from encode_signature.c: +#undef secret_key_from_bytes +#undef secret_key_to_bytes + +#define secret_key_from_bytes SQISIGN_NAMESPACE(secret_key_from_bytes) +#define secret_key_to_bytes SQISIGN_NAMESPACE(secret_key_to_bytes) + +// Namespacing symbols exported from encode_verification.c: +#undef public_key_from_bytes +#undef public_key_to_bytes +#undef signature_from_bytes +#undef signature_to_bytes + +#define public_key_from_bytes SQISIGN_NAMESPACE(public_key_from_bytes) +#define public_key_to_bytes SQISIGN_NAMESPACE(public_key_to_bytes) +#define signature_from_bytes SQISIGN_NAMESPACE(signature_from_bytes) +#define signature_to_bytes SQISIGN_NAMESPACE(signature_to_bytes) + +// Namespacing symbols exported from finit.c: +#undef ibz_mat_2x2_finalize +#undef ibz_mat_2x2_init +#undef ibz_mat_4x4_finalize +#undef ibz_mat_4x4_init +#undef ibz_vec_2_finalize +#undef ibz_vec_2_init +#undef ibz_vec_4_finalize +#undef ibz_vec_4_init +#undef quat_alg_elem_finalize +#undef quat_alg_elem_init +#undef quat_alg_finalize +#undef quat_alg_init_set +#undef quat_lattice_finalize +#undef quat_lattice_init +#undef quat_left_ideal_finalize +#undef quat_left_ideal_init + +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) + +// Namespacing symbols exported from fp.c: +#undef fp_select +#undef p +#undef p2 + +#define fp_select SQISIGN_NAMESPACE(fp_select) +#define p SQISIGN_NAMESPACE(p) +#define p2 SQISIGN_NAMESPACE(p2) + +// Namespacing symbols exported from fp.c, fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_exp3div4 +#undef fp_inv +#undef fp_is_square +#undef fp_sqrt + +#define fp_exp3div4 SQISIGN_NAMESPACE(fp_exp3div4) +#define fp_inv SQISIGN_NAMESPACE(fp_inv) +#define fp_is_square SQISIGN_NAMESPACE(fp_is_square) +#define fp_sqrt SQISIGN_NAMESPACE(fp_sqrt) + +// Namespacing symbols exported from fp2.c: +#undef fp2_add +#undef fp2_add_one +#undef fp2_batched_inv +#undef fp2_copy +#undef fp2_cswap +#undef fp2_decode +#undef fp2_encode +#undef fp2_half +#undef fp2_inv +#undef fp2_is_equal +#undef fp2_is_one +#undef fp2_is_square +#undef fp2_is_zero +#undef fp2_mul +#undef fp2_mul_small +#undef fp2_neg +#undef fp2_pow_vartime +#undef fp2_print +#undef fp2_select +#undef fp2_set_one +#undef fp2_set_small +#undef fp2_set_zero +#undef fp2_sqr +#undef fp2_sqrt +#undef fp2_sqrt_verify +#undef fp2_sub + +#define fp2_add SQISIGN_NAMESPACE(fp2_add) +#define fp2_add_one SQISIGN_NAMESPACE(fp2_add_one) +#define fp2_batched_inv SQISIGN_NAMESPACE(fp2_batched_inv) +#define fp2_copy SQISIGN_NAMESPACE(fp2_copy) +#define fp2_cswap SQISIGN_NAMESPACE(fp2_cswap) +#define fp2_decode SQISIGN_NAMESPACE(fp2_decode) +#define fp2_encode SQISIGN_NAMESPACE(fp2_encode) +#define fp2_half SQISIGN_NAMESPACE(fp2_half) +#define fp2_inv SQISIGN_NAMESPACE(fp2_inv) +#define fp2_is_equal SQISIGN_NAMESPACE(fp2_is_equal) +#define fp2_is_one SQISIGN_NAMESPACE(fp2_is_one) +#define fp2_is_square SQISIGN_NAMESPACE(fp2_is_square) +#define fp2_is_zero SQISIGN_NAMESPACE(fp2_is_zero) +#define fp2_mul SQISIGN_NAMESPACE(fp2_mul) +#define fp2_mul_small SQISIGN_NAMESPACE(fp2_mul_small) +#define fp2_neg SQISIGN_NAMESPACE(fp2_neg) +#define fp2_pow_vartime SQISIGN_NAMESPACE(fp2_pow_vartime) +#define fp2_print SQISIGN_NAMESPACE(fp2_print) +#define fp2_select SQISIGN_NAMESPACE(fp2_select) +#define fp2_set_one SQISIGN_NAMESPACE(fp2_set_one) +#define fp2_set_small SQISIGN_NAMESPACE(fp2_set_small) +#define fp2_set_zero SQISIGN_NAMESPACE(fp2_set_zero) +#define fp2_sqr SQISIGN_NAMESPACE(fp2_sqr) +#define fp2_sqrt SQISIGN_NAMESPACE(fp2_sqrt) +#define fp2_sqrt_verify SQISIGN_NAMESPACE(fp2_sqrt_verify) +#define fp2_sub SQISIGN_NAMESPACE(fp2_sub) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_copy +#undef fp_cswap +#undef fp_decode +#undef fp_decode_reduce +#undef fp_div3 +#undef fp_encode +#undef fp_half +#undef fp_is_equal +#undef fp_is_zero +#undef fp_mul_small +#undef fp_neg +#undef fp_set_one +#undef fp_set_small +#undef fp_set_zero + +#define fp_copy SQISIGN_NAMESPACE(fp_copy) +#define fp_cswap SQISIGN_NAMESPACE(fp_cswap) +#define fp_decode SQISIGN_NAMESPACE(fp_decode) +#define fp_decode_reduce SQISIGN_NAMESPACE(fp_decode_reduce) +#define fp_div3 SQISIGN_NAMESPACE(fp_div3) +#define fp_encode SQISIGN_NAMESPACE(fp_encode) +#define fp_half SQISIGN_NAMESPACE(fp_half) +#define fp_is_equal SQISIGN_NAMESPACE(fp_is_equal) +#define fp_is_zero SQISIGN_NAMESPACE(fp_is_zero) +#define fp_mul_small SQISIGN_NAMESPACE(fp_mul_small) +#define fp_neg SQISIGN_NAMESPACE(fp_neg) +#define fp_set_one SQISIGN_NAMESPACE(fp_set_one) +#define fp_set_small SQISIGN_NAMESPACE(fp_set_small) +#define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef fp_add +#undef fp_mul +#undef fp_sqr +#undef fp_sub + +#define fp_add SQISIGN_NAMESPACE(fp_add) +#define fp_mul SQISIGN_NAMESPACE(fp_mul) +#define fp_sqr SQISIGN_NAMESPACE(fp_sqr) +#define fp_sub SQISIGN_NAMESPACE(fp_sub) + +// Namespacing symbols exported from gf27500.c: +#undef gf27500_decode +#undef gf27500_decode_reduce +#undef gf27500_div +#undef gf27500_div3 +#undef gf27500_encode +#undef gf27500_invert +#undef gf27500_legendre +#undef gf27500_sqrt + +#define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) +#define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) +#define gf27500_div SQISIGN_NAMESPACE(gf27500_div) +#define gf27500_div3 SQISIGN_NAMESPACE(gf27500_div3) +#define gf27500_encode SQISIGN_NAMESPACE(gf27500_encode) +#define gf27500_invert SQISIGN_NAMESPACE(gf27500_invert) +#define gf27500_legendre SQISIGN_NAMESPACE(gf27500_legendre) +#define gf27500_sqrt SQISIGN_NAMESPACE(gf27500_sqrt) + +// Namespacing symbols exported from gf27500.c, gf5248.c, gf65376.c: +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 + +#define fp2_mul_c0 SQISIGN_NAMESPACE(fp2_mul_c0) +#define fp2_mul_c1 SQISIGN_NAMESPACE(fp2_mul_c1) +#define fp2_sq_c0 SQISIGN_NAMESPACE(fp2_sq_c0) +#define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) + +// Namespacing symbols exported from gf5248.c: +#undef gf5248_decode +#undef gf5248_decode_reduce +#undef gf5248_div +#undef gf5248_div3 +#undef gf5248_encode +#undef gf5248_invert +#undef gf5248_legendre +#undef gf5248_sqrt + +#define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) +#define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) +#define gf5248_div SQISIGN_NAMESPACE(gf5248_div) +#define gf5248_div3 SQISIGN_NAMESPACE(gf5248_div3) +#define gf5248_encode SQISIGN_NAMESPACE(gf5248_encode) +#define gf5248_invert SQISIGN_NAMESPACE(gf5248_invert) +#define gf5248_legendre SQISIGN_NAMESPACE(gf5248_legendre) +#define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) + +// Namespacing symbols exported from gf65376.c: +#undef gf65376_decode +#undef gf65376_decode_reduce +#undef gf65376_div +#undef gf65376_div3 +#undef gf65376_encode +#undef gf65376_invert +#undef gf65376_legendre +#undef gf65376_sqrt + +#define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) +#define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) +#define gf65376_div SQISIGN_NAMESPACE(gf65376_div) +#define gf65376_div3 SQISIGN_NAMESPACE(gf65376_div3) +#define gf65376_encode SQISIGN_NAMESPACE(gf65376_encode) +#define gf65376_invert SQISIGN_NAMESPACE(gf65376_invert) +#define gf65376_legendre SQISIGN_NAMESPACE(gf65376_legendre) +#define gf65376_sqrt SQISIGN_NAMESPACE(gf65376_sqrt) + +// Namespacing symbols exported from hd.c: +#undef add_couple_jac_points +#undef copy_bases_to_kernel +#undef couple_jac_to_xz +#undef double_couple_jac_point +#undef double_couple_jac_point_iter +#undef double_couple_point +#undef double_couple_point_iter + +#define add_couple_jac_points SQISIGN_NAMESPACE(add_couple_jac_points) +#define copy_bases_to_kernel SQISIGN_NAMESPACE(copy_bases_to_kernel) +#define couple_jac_to_xz SQISIGN_NAMESPACE(couple_jac_to_xz) +#define double_couple_jac_point SQISIGN_NAMESPACE(double_couple_jac_point) +#define double_couple_jac_point_iter SQISIGN_NAMESPACE(double_couple_jac_point_iter) +#define double_couple_point SQISIGN_NAMESPACE(double_couple_point) +#define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) + +// Namespacing symbols exported from hnf.c: +#undef ibz_mat_4x4_is_hnf +#undef ibz_mat_4xn_hnf_mod_core +#undef ibz_vec_4_copy_mod +#undef ibz_vec_4_linear_combination_mod +#undef ibz_vec_4_scalar_mul_mod + +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) + +// Namespacing symbols exported from hnf_internal.c: +#undef ibz_centered_mod +#undef ibz_conditional_assign +#undef ibz_mod_not_zero +#undef ibz_xgcd_with_u_not_0 + +#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) + +// Namespacing symbols exported from ibz_division.c: +#undef ibz_xgcd + +#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) + +// Namespacing symbols exported from id2iso.c: +#undef change_of_basis_matrix_tate +#undef change_of_basis_matrix_tate_invert +#undef ec_biscalar_mul_ibz_vec +#undef endomorphism_application_even_basis +#undef id2iso_ideal_to_kernel_dlogs_even +#undef id2iso_kernel_dlogs_to_ideal_even +#undef matrix_application_even_basis + +#define change_of_basis_matrix_tate SQISIGN_NAMESPACE(change_of_basis_matrix_tate) +#define change_of_basis_matrix_tate_invert SQISIGN_NAMESPACE(change_of_basis_matrix_tate_invert) +#define ec_biscalar_mul_ibz_vec SQISIGN_NAMESPACE(ec_biscalar_mul_ibz_vec) +#define endomorphism_application_even_basis SQISIGN_NAMESPACE(endomorphism_application_even_basis) +#define id2iso_ideal_to_kernel_dlogs_even SQISIGN_NAMESPACE(id2iso_ideal_to_kernel_dlogs_even) +#define id2iso_kernel_dlogs_to_ideal_even SQISIGN_NAMESPACE(id2iso_kernel_dlogs_to_ideal_even) +#define matrix_application_even_basis SQISIGN_NAMESPACE(matrix_application_even_basis) + +// Namespacing symbols exported from ideal.c: +#undef quat_lideal_add +#undef quat_lideal_class_gram +#undef quat_lideal_conjugate_without_hnf +#undef quat_lideal_copy +#undef quat_lideal_create +#undef quat_lideal_create_principal +#undef quat_lideal_equals +#undef quat_lideal_generator +#undef quat_lideal_inter +#undef quat_lideal_inverse_lattice_without_hnf +#undef quat_lideal_mul +#undef quat_lideal_norm +#undef quat_lideal_right_order +#undef quat_lideal_right_transporter +#undef quat_order_discriminant +#undef quat_order_is_maximal + +#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) + +// Namespacing symbols exported from intbig.c: +#undef ibz_abs +#undef ibz_add +#undef ibz_bitsize +#undef ibz_cmp +#undef ibz_cmp_int32 +#undef ibz_convert_to_str +#undef ibz_copy +#undef ibz_copy_digits +#undef ibz_div +#undef ibz_div_2exp +#undef ibz_div_floor +#undef ibz_divides +#undef ibz_finalize +#undef ibz_gcd +#undef ibz_get +#undef ibz_init +#undef ibz_invmod +#undef ibz_is_even +#undef ibz_is_odd +#undef ibz_is_one +#undef ibz_is_zero +#undef ibz_legendre +#undef ibz_mod +#undef ibz_mod_ui +#undef ibz_mul +#undef ibz_neg +#undef ibz_pow +#undef ibz_pow_mod +#undef ibz_print +#undef ibz_probab_prime +#undef ibz_rand_interval +#undef ibz_rand_interval_bits +#undef ibz_rand_interval_i +#undef ibz_rand_interval_minm_m +#undef ibz_set +#undef ibz_set_from_str +#undef ibz_size_in_base +#undef ibz_sqrt +#undef ibz_sqrt_floor +#undef ibz_sqrt_mod_p +#undef ibz_sub +#undef ibz_swap +#undef ibz_to_digits +#undef ibz_two_adic + +#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) +#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) +#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) + +// Namespacing symbols exported from integers.c: +#undef ibz_cornacchia_prime +#undef ibz_generate_random_prime + +#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) + +// Namespacing symbols exported from isog_chains.c: +#undef ec_eval_even +#undef ec_eval_small_chain +#undef ec_iso_eval +#undef ec_isomorphism + +#define ec_eval_even SQISIGN_NAMESPACE(ec_eval_even) +#define ec_eval_small_chain SQISIGN_NAMESPACE(ec_eval_small_chain) +#define ec_iso_eval SQISIGN_NAMESPACE(ec_iso_eval) +#define ec_isomorphism SQISIGN_NAMESPACE(ec_isomorphism) + +// Namespacing symbols exported from keygen.c: +#undef protocols_keygen +#undef secret_key_finalize +#undef secret_key_init + +#define protocols_keygen SQISIGN_NAMESPACE(protocols_keygen) +#define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) +#define secret_key_init SQISIGN_NAMESPACE(secret_key_init) + +// Namespacing symbols exported from l2.c: +#undef quat_lattice_lll +#undef quat_lll_core + +#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) + +// Namespacing symbols exported from lat_ball.c: +#undef quat_lattice_bound_parallelogram +#undef quat_lattice_sample_from_ball + +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) + +// Namespacing symbols exported from lattice.c: +#undef quat_lattice_add +#undef quat_lattice_alg_elem_mul +#undef quat_lattice_conjugate_without_hnf +#undef quat_lattice_contains +#undef quat_lattice_dual_without_hnf +#undef quat_lattice_equal +#undef quat_lattice_gram +#undef quat_lattice_hnf +#undef quat_lattice_inclusion +#undef quat_lattice_index +#undef quat_lattice_intersect +#undef quat_lattice_mat_alg_coord_mul_without_hnf +#undef quat_lattice_mul +#undef quat_lattice_reduce_denom + +#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) + +// Namespacing symbols exported from lll_applications.c: +#undef quat_lideal_lideal_mul_reduced +#undef quat_lideal_prime_norm_reduced_equivalent +#undef quat_lideal_reduce_basis + +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) + +// Namespacing symbols exported from lll_verification.c: +#undef ibq_vec_4_copy_ibz +#undef quat_lll_bilinear +#undef quat_lll_gram_schmidt_transposed_with_ibq +#undef quat_lll_set_ibq_parameters +#undef quat_lll_verify + +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) + +// Namespacing symbols exported from mem.c: +#undef sqisign_secure_clear +#undef sqisign_secure_free + +#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) + +// Namespacing symbols exported from mp.c: +#undef MUL +#undef mp_add +#undef mp_compare +#undef mp_copy +#undef mp_inv_2e +#undef mp_invert_matrix +#undef mp_is_one +#undef mp_is_zero +#undef mp_mod_2exp +#undef mp_mul +#undef mp_mul2 +#undef mp_neg +#undef mp_print +#undef mp_shiftl +#undef mp_shiftr +#undef mp_sub +#undef multiple_mp_shiftl +#undef select_ct +#undef swap_ct + +#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) +#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) +#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) +#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) +#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) +#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) + +// Namespacing symbols exported from normeq.c: +#undef quat_change_to_O0_basis +#undef quat_lattice_O0_set +#undef quat_lattice_O0_set_extremal +#undef quat_order_elem_create +#undef quat_represent_integer +#undef quat_sampling_random_ideal_O0_given_norm + +#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) + +// Namespacing symbols exported from printer.c: +#undef ibz_mat_2x2_print +#undef ibz_mat_4x4_print +#undef ibz_vec_2_print +#undef ibz_vec_4_print +#undef quat_alg_elem_print +#undef quat_alg_print +#undef quat_lattice_print +#undef quat_left_ideal_print + +#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) + +// Namespacing symbols exported from random_input_generation.c: +#undef quat_test_input_random_ideal_generation +#undef quat_test_input_random_ideal_lattice_generation +#undef quat_test_input_random_lattice_generation + +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) + +// Namespacing symbols exported from rationals.c: +#undef ibq_abs +#undef ibq_add +#undef ibq_cmp +#undef ibq_copy +#undef ibq_finalize +#undef ibq_init +#undef ibq_inv +#undef ibq_is_ibz +#undef ibq_is_one +#undef ibq_is_zero +#undef ibq_mat_4x4_finalize +#undef ibq_mat_4x4_init +#undef ibq_mat_4x4_print +#undef ibq_mul +#undef ibq_neg +#undef ibq_reduce +#undef ibq_set +#undef ibq_sub +#undef ibq_to_ibz +#undef ibq_vec_4_finalize +#undef ibq_vec_4_init +#undef ibq_vec_4_print + +#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) + +// Namespacing symbols exported from sign.c: +#undef protocols_sign + +#define protocols_sign SQISIGN_NAMESPACE(protocols_sign) + +// Namespacing symbols exported from sqisign.c: +#undef sqisign_keypair +#undef sqisign_open +#undef sqisign_sign +#undef sqisign_sign_signature +#undef sqisign_verify +#undef sqisign_verify_signature + +#define sqisign_keypair SQISIGN_NAMESPACE(sqisign_keypair) +#define sqisign_open SQISIGN_NAMESPACE(sqisign_open) +#define sqisign_sign SQISIGN_NAMESPACE(sqisign_sign) +#define sqisign_sign_signature SQISIGN_NAMESPACE(sqisign_sign_signature) +#define sqisign_verify SQISIGN_NAMESPACE(sqisign_verify) +#define sqisign_verify_signature SQISIGN_NAMESPACE(sqisign_verify_signature) + +// Namespacing symbols exported from theta_isogenies.c: +#undef theta_chain_compute_and_eval +#undef theta_chain_compute_and_eval_randomized +#undef theta_chain_compute_and_eval_verify + +#define theta_chain_compute_and_eval SQISIGN_NAMESPACE(theta_chain_compute_and_eval) +#define theta_chain_compute_and_eval_randomized SQISIGN_NAMESPACE(theta_chain_compute_and_eval_randomized) +#define theta_chain_compute_and_eval_verify SQISIGN_NAMESPACE(theta_chain_compute_and_eval_verify) + +// Namespacing symbols exported from theta_structure.c: +#undef double_iter +#undef double_point +#undef is_product_theta_point +#undef theta_precomputation + +#define double_iter SQISIGN_NAMESPACE(double_iter) +#define double_point SQISIGN_NAMESPACE(double_point) +#define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) +#define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) + +// Namespacing symbols exported from verify.c: +#undef protocols_verify + +#define protocols_verify SQISIGN_NAMESPACE(protocols_verify) + +// Namespacing symbols exported from xeval.c: +#undef xeval_2 +#undef xeval_2_singular +#undef xeval_4 + +#define xeval_2 SQISIGN_NAMESPACE(xeval_2) +#define xeval_2_singular SQISIGN_NAMESPACE(xeval_2_singular) +#define xeval_4 SQISIGN_NAMESPACE(xeval_4) + +// Namespacing symbols exported from xisog.c: +#undef xisog_2 +#undef xisog_2_singular +#undef xisog_4 + +#define xisog_2 SQISIGN_NAMESPACE(xisog_2) +#define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) +#define xisog_4 SQISIGN_NAMESPACE(xisog_4) + +// Namespacing symbols from precomp: +#undef BASIS_E0_PX +#undef BASIS_E0_QX +#undef p_cofactor_for_2f +#undef CURVES_WITH_ENDOMORPHISMS +#undef EVEN_INDEX +#undef CHI_EVAL +#undef FP2_CONSTANTS +#undef SPLITTING_TRANSFORMS +#undef NORMALIZATION_TRANSFORMS +#undef QUAT_prime_cofactor +#undef QUATALG_PINFTY +#undef EXTREMAL_ORDERS +#undef CONNECTING_IDEALS +#undef CONJUGATING_ELEMENTS +#undef TWO_TO_SECURITY_BITS +#undef TORSION_PLUS_2POWER +#undef SEC_DEGREE +#undef COM_DEGREE + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_parameters.txt b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_parameters.txt new file mode 100644 index 0000000000..8a1a26a502 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_parameters.txt @@ -0,0 +1,3 @@ +lvl = 1 +p = 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +num_orders = 7 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.c new file mode 100644 index 0000000000..478a9ab25b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.c @@ -0,0 +1,1283 @@ +#include "theta_isogenies.h" +#include +#include +#include +#include +#include + +// Select a base change matrix in constant time, with M1 a regular +// base change matrix and M2 a precomputed base change matrix +// If option = 0 then M <- M1, else if option = 0xFF...FF then M <- M2 +static inline void +select_base_change_matrix(basis_change_matrix_t *M, + const basis_change_matrix_t *M1, + const precomp_basis_change_matrix_t *M2, + const uint32_t option) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + fp2_select(&M->m[i][j], &M1->m[i][j], &FP2_CONSTANTS[M2->m[i][j]], option); +} + +// Set a regular base change matrix from a precomputed one +static inline void +set_base_change_matrix_from_precomp(basis_change_matrix_t *res, const precomp_basis_change_matrix_t *M) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + res->m[i][j] = FP2_CONSTANTS[M->m[i][j]]; +} + +static inline void +choose_index_theta_point(fp2_t *res, int ind, const theta_point_t *T) +{ + const fp2_t *src = NULL; + switch (ind % 4) { + case 0: + src = &T->x; + break; + case 1: + src = &T->y; + break; + case 2: + src = &T->z; + break; + case 3: + src = &T->t; + break; + default: + assert(0); + } + fp2_copy(res, src); +} + +// same as apply_isomorphism method but more efficient when the t component of P is zero. +static void +apply_isomorphism_general(theta_point_t *res, + const basis_change_matrix_t *M, + const theta_point_t *P, + const bool Pt_not_zero) +{ + fp2_t x1; + theta_point_t temp; + + fp2_mul(&temp.x, &P->x, &M->m[0][0]); + fp2_mul(&x1, &P->y, &M->m[0][1]); + fp2_add(&temp.x, &temp.x, &x1); + fp2_mul(&x1, &P->z, &M->m[0][2]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&temp.y, &P->x, &M->m[1][0]); + fp2_mul(&x1, &P->y, &M->m[1][1]); + fp2_add(&temp.y, &temp.y, &x1); + fp2_mul(&x1, &P->z, &M->m[1][2]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&temp.z, &P->x, &M->m[2][0]); + fp2_mul(&x1, &P->y, &M->m[2][1]); + fp2_add(&temp.z, &temp.z, &x1); + fp2_mul(&x1, &P->z, &M->m[2][2]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&temp.t, &P->x, &M->m[3][0]); + fp2_mul(&x1, &P->y, &M->m[3][1]); + fp2_add(&temp.t, &temp.t, &x1); + fp2_mul(&x1, &P->z, &M->m[3][2]); + fp2_add(&temp.t, &temp.t, &x1); + + if (Pt_not_zero) { + fp2_mul(&x1, &P->t, &M->m[0][3]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&x1, &P->t, &M->m[1][3]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&x1, &P->t, &M->m[2][3]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&x1, &P->t, &M->m[3][3]); + fp2_add(&temp.t, &temp.t, &x1); + } + + fp2_copy(&res->x, &temp.x); + fp2_copy(&res->y, &temp.y); + fp2_copy(&res->z, &temp.z); + fp2_copy(&res->t, &temp.t); +} + +static void +apply_isomorphism(theta_point_t *res, const basis_change_matrix_t *M, const theta_point_t *P) +{ + apply_isomorphism_general(res, M, P, true); +} + +// set res = M1 * M2 with matrix multiplication +static void +base_change_matrix_multiplication(basis_change_matrix_t *res, + const basis_change_matrix_t *M1, + const basis_change_matrix_t *M2) +{ + basis_change_matrix_t tmp; + fp2_t sum, m_ik, m_kj; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + fp2_set_zero(&sum); + for (int k = 0; k < 4; k++) { + m_ik = M1->m[i][k]; + m_kj = M2->m[k][j]; + fp2_mul(&m_ik, &m_ik, &m_kj); + fp2_add(&sum, &sum, &m_ik); + } + tmp.m[i][j] = sum; + } + } + *res = tmp; +} + +// compute the theta_point corresponding to the couple of point T on an elliptic product +static void +base_change(theta_point_t *out, const theta_gluing_t *phi, const theta_couple_point_t *T) +{ + theta_point_t null_point; + + // null_point = (a : b : c : d) + // a = P1.x P2.x, b = P1.x P2.z, c = P1.z P2.x, d = P1.z P2.z + fp2_mul(&null_point.x, &T->P1.x, &T->P2.x); + fp2_mul(&null_point.y, &T->P1.x, &T->P2.z); + fp2_mul(&null_point.z, &T->P2.x, &T->P1.z); + fp2_mul(&null_point.t, &T->P1.z, &T->P2.z); + + // Apply the basis change + apply_isomorphism(out, &phi->M, &null_point); +} + +static void +action_by_translation_z_and_det(fp2_t *z_inv, fp2_t *det_inv, const ec_point_t *P4, const ec_point_t *P2) +{ + // Store the Z-coordinate to invert + fp2_copy(z_inv, &P4->z); + + // Then collect detij = xij wij - uij zij + fp2_t tmp; + fp2_mul(det_inv, &P4->x, &P2->z); + fp2_mul(&tmp, &P4->z, &P2->x); + fp2_sub(det_inv, det_inv, &tmp); +} + +static void +action_by_translation_compute_matrix(translation_matrix_t *G, + const ec_point_t *P4, + const ec_point_t *P2, + const fp2_t *z_inv, + const fp2_t *det_inv) +{ + fp2_t tmp; + + // Gi.g10 = uij xij /detij - xij/zij + fp2_mul(&tmp, &P4->x, z_inv); + fp2_mul(&G->g10, &P4->x, &P2->x); + fp2_mul(&G->g10, &G->g10, det_inv); + fp2_sub(&G->g10, &G->g10, &tmp); + + // Gi.g11 = uij zij * detij + fp2_mul(&G->g11, &P2->x, det_inv); + fp2_mul(&G->g11, &G->g11, &P4->z); + + // Gi.g00 = -Gi.g11 + fp2_neg(&G->g00, &G->g11); + + // Gi.g01 = - wij zij detij + fp2_mul(&G->g01, &P2->z, det_inv); + fp2_mul(&G->g01, &G->g01, &P4->z); + fp2_neg(&G->g01, &G->g01); +} + +// Returns 1 if the basis is as expected and 0 otherwise +// We only expect this to fail for malformed signatures, so +// do not require this to run in constant time. +static int +verify_two_torsion(const theta_couple_point_t *K1_2, const theta_couple_point_t *K2_2, const theta_couple_curve_t *E12) +{ + // First check if any point in K1_2 or K2_2 is zero, if they are then the points did not have + // order 8 when we started gluing + if (ec_is_zero(&K1_2->P1) | ec_is_zero(&K1_2->P2) | ec_is_zero(&K2_2->P1) | ec_is_zero(&K2_2->P2)) { + return 0; + } + + // Now ensure that P1, Q1 and P2, Q2 are independent. For points of order two this means + // that they're not the same + if (ec_is_equal(&K1_2->P1, &K2_2->P1) | ec_is_equal(&K1_2->P2, &K2_2->P2)) { + return 0; + } + + // Finally, double points to ensure all points have order exactly 0 + theta_couple_point_t O1, O2; + double_couple_point(&O1, K1_2, E12); + double_couple_point(&O2, K2_2, E12); + // If this check fails then the points had order 2*f for some f, and the kernel is malformed. + if (!(ec_is_zero(&O1.P1) & ec_is_zero(&O1.P2) & ec_is_zero(&O2.P1) & ec_is_zero(&O2.P2))) { + return 0; + } + + return 1; +} + +// Computes the action by translation for four points +// (P1, P2) and (Q1, Q2) on E1 x E2 simultaneously to +// save on inversions. +// Returns 0 if any of Pi or Qi does not have order 2 +// and 1 otherwise +static int +action_by_translation(translation_matrix_t *Gi, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute points of order 2 from Ki_4 + theta_couple_point_t K1_2, K2_2; + double_couple_point(&K1_2, K1_4, E12); + double_couple_point(&K2_2, K2_4, E12); + + if (!verify_two_torsion(&K1_2, &K2_2, E12)) { + return 0; + } + + // We need to invert four Z coordinates and + // four determinants which we do with batched + // inversion + fp2_t inverses[8]; + action_by_translation_z_and_det(&inverses[0], &inverses[4], &K1_4->P1, &K1_2.P1); + action_by_translation_z_and_det(&inverses[1], &inverses[5], &K1_4->P2, &K1_2.P2); + action_by_translation_z_and_det(&inverses[2], &inverses[6], &K2_4->P1, &K2_2.P1); + action_by_translation_z_and_det(&inverses[3], &inverses[7], &K2_4->P2, &K2_2.P2); + + fp2_batched_inv(inverses, 8); + if (fp2_is_zero(&inverses[0])) + return 0; // something was wrong with our input (which somehow was not caught by + // verify_two_torsion) + + action_by_translation_compute_matrix(&Gi[0], &K1_4->P1, &K1_2.P1, &inverses[0], &inverses[4]); + action_by_translation_compute_matrix(&Gi[1], &K1_4->P2, &K1_2.P2, &inverses[1], &inverses[5]); + action_by_translation_compute_matrix(&Gi[2], &K2_4->P1, &K2_2.P1, &inverses[2], &inverses[6]); + action_by_translation_compute_matrix(&Gi[3], &K2_4->P2, &K2_2.P2, &inverses[3], &inverses[7]); + + return 1; +} + +// Given the appropriate four torsion, computes the +// change of basis to compute the correct theta null +// point. +// Returns 0 if the order of K1_4 or K2_4 is not 4 +static int +gluing_change_of_basis(basis_change_matrix_t *M, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute the four 2x2 matrices for the action by translation + // on the four points: + translation_matrix_t Gi[4]; + if (!action_by_translation(Gi, K1_4, K2_4, E12)) + return 0; + + // Computation of the 4x4 matrix from Mij + // t001, t101 (resp t002, t102) first column of M11 * M21 (resp M12 * M22) + fp2_t t001, t101, t002, t102, tmp; + + fp2_mul(&t001, &Gi[0].g00, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g01, &Gi[2].g10); + fp2_add(&t001, &t001, &tmp); + + fp2_mul(&t101, &Gi[0].g10, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g11, &Gi[2].g10); + fp2_add(&t101, &t101, &tmp); + + fp2_mul(&t002, &Gi[1].g00, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g01, &Gi[3].g10); + fp2_add(&t002, &t002, &tmp); + + fp2_mul(&t102, &Gi[1].g10, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g11, &Gi[3].g10); + fp2_add(&t102, &t102, &tmp); + + // trace for the first row + fp2_set_one(&M->m[0][0]); + fp2_mul(&tmp, &t001, &t002); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + + fp2_mul(&M->m[0][1], &t001, &t102); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + + fp2_mul(&M->m[0][2], &t101, &t002); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + + fp2_mul(&M->m[0][3], &t101, &t102); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + + // Compute the action of (0,out.K2_4.P2) for the second row + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][1]); + fp2_mul(&M->m[1][0], &Gi[3].g00, &M->m[0][0]); + fp2_add(&M->m[1][0], &M->m[1][0], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][1]); + fp2_mul(&M->m[1][1], &Gi[3].g10, &M->m[0][0]); + fp2_add(&M->m[1][1], &M->m[1][1], &tmp); + + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][3]); + fp2_mul(&M->m[1][2], &Gi[3].g00, &M->m[0][2]); + fp2_add(&M->m[1][2], &M->m[1][2], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][3]); + fp2_mul(&M->m[1][3], &Gi[3].g10, &M->m[0][2]); + fp2_add(&M->m[1][3], &M->m[1][3], &tmp); + + // compute the action of (K1_4.P1,0) for the third row + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][2]); + fp2_mul(&M->m[2][0], &Gi[0].g00, &M->m[0][0]); + fp2_add(&M->m[2][0], &M->m[2][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][3]); + fp2_mul(&M->m[2][1], &Gi[0].g00, &M->m[0][1]); + fp2_add(&M->m[2][1], &M->m[2][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][2]); + fp2_mul(&M->m[2][2], &Gi[0].g10, &M->m[0][0]); + fp2_add(&M->m[2][2], &M->m[2][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][3]); + fp2_mul(&M->m[2][3], &Gi[0].g10, &M->m[0][1]); + fp2_add(&M->m[2][3], &M->m[2][3], &tmp); + + // compute the action of (K1_4.P1,K2_4.P2) for the final row + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][2]); + fp2_mul(&M->m[3][0], &Gi[0].g00, &M->m[1][0]); + fp2_add(&M->m[3][0], &M->m[3][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][3]); + fp2_mul(&M->m[3][1], &Gi[0].g00, &M->m[1][1]); + fp2_add(&M->m[3][1], &M->m[3][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][2]); + fp2_mul(&M->m[3][2], &Gi[0].g10, &M->m[1][0]); + fp2_add(&M->m[3][2], &M->m[3][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][3]); + fp2_mul(&M->m[3][3], &Gi[0].g10, &M->m[1][1]); + fp2_add(&M->m[3][3], &M->m[3][3], &tmp); + + return 1; +} + +/** + * @brief Compute the gluing isogeny from an elliptic product + * + * @param out Output: the theta_gluing + * @param K1_8 a couple point + * @param E12 an elliptic curve product + * @param K2_8 a point in E2[8] + * + * out : E1xE2 -> A of kernel [4](K1_8,K2_8) + * if the kernel supplied has the incorrect order, or gluing seems malformed, + * returns 0, otherwise returns 1. + */ +static int +gluing_compute(theta_gluing_t *out, + const theta_couple_curve_t *E12, + const theta_couple_jac_point_t *xyK1_8, + const theta_couple_jac_point_t *xyK2_8, + bool verify) +{ + // Ensure that we have been given the eight torsion +#ifndef NDEBUG + { + int check = test_jac_order_twof(&xyK1_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK1_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK1_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P2 does not have order 8"); + } +#endif + + out->xyK1_8 = *xyK1_8; + out->domain = *E12; + + // Given points in E[8] x E[8] we need the four torsion below + theta_couple_jac_point_t xyK1_4, xyK2_4; + + double_couple_jac_point(&xyK1_4, xyK1_8, E12); + double_couple_jac_point(&xyK2_4, xyK2_8, E12); + + // Convert from (X:Y:Z) coordinates to (X:Z) + theta_couple_point_t K1_8, K2_8; + theta_couple_point_t K1_4, K2_4; + + couple_jac_to_xz(&K1_8, xyK1_8); + couple_jac_to_xz(&K2_8, xyK2_8); + couple_jac_to_xz(&K1_4, &xyK1_4); + couple_jac_to_xz(&K2_4, &xyK2_4); + + // Set the basis change matrix, if we have not been given a valid K[8] for this computation + // gluing_change_of_basis will detect this and return 0 + if (!gluing_change_of_basis(&out->M, &K1_4, &K2_4, E12)) { + debug_print("gluing failed as kernel does not have correct order"); + return 0; + } + + // apply the base change to the kernel + theta_point_t TT1, TT2; + + base_change(&TT1, out, &K1_8); + base_change(&TT2, out, &K2_8); + + // compute the codomain + to_squared_theta(&TT1, &TT1); + to_squared_theta(&TT2, &TT2); + + // If the kernel is well formed then TT1.t and TT2.t are zero + // if they are not, we exit early as the signature we are validating + // is probably malformed + if (!(fp2_is_zero(&TT1.t) & fp2_is_zero(&TT2.t))) { + debug_print("gluing failed TT1.t or TT2.t is not zero"); + return 0; + } + // Test our projective factors are non zero + if (fp2_is_zero(&TT1.x) | fp2_is_zero(&TT2.x) | fp2_is_zero(&TT1.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT1.z)) + return 0; // invalid input + + // Projective factor: Ax + fp2_mul(&out->codomain.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.y, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.z, &TT1.x, &TT2.z); + fp2_set_zero(&out->codomain.t); + // Projective factor: ABCxz + fp2_mul(&out->precomputation.x, &TT1.y, &TT2.z); + fp2_copy(&out->precomputation.y, &out->codomain.z); + fp2_copy(&out->precomputation.z, &out->codomain.y); + fp2_set_zero(&out->precomputation.t); + + // Compute the two components of phi(K1_8) = (x:x:y:y). + fp2_mul(&out->imageK1_8.x, &TT1.x, &out->precomputation.x); + fp2_mul(&out->imageK1_8.y, &TT1.z, &out->precomputation.z); + + // If K1_8 and K2_8 are our 8-torsion points, this ensures that the + // 4-torsion points [2]K1_8 and [2]K2_8 are isotropic. + if (verify) { + fp2_t t1, t2; + fp2_mul(&t1, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&out->imageK1_8.x, &t1)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t2, &t1)) + return 0; + } + + // compute the final codomain + hadamard(&out->codomain, &out->codomain); + return 1; +} + +// sub routine of the gluing eval +static void +gluing_eval_point(theta_point_t *image, const theta_couple_jac_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T1, T2; + add_components_t add_comp1, add_comp2; + + // Compute the cross addition components of P1+Q1 and P2+Q2 + jac_to_xz_add_components(&add_comp1, &P->P1, &phi->xyK1_8.P1, &phi->domain.E1); + jac_to_xz_add_components(&add_comp2, &P->P2, &phi->xyK1_8.P2, &phi->domain.E2); + + // Compute T1 and T2 derived from the cross addition components. + fp2_mul(&T1.x, &add_comp1.u, &add_comp2.u); // T1x = u1u2 + fp2_mul(&T2.t, &add_comp1.v, &add_comp2.v); // T2t = v1v2 + fp2_add(&T1.x, &T1.x, &T2.t); // T1x = u1u2 + v1v2 + fp2_mul(&T1.y, &add_comp1.u, &add_comp2.w); // T1y = u1w2 + fp2_mul(&T1.z, &add_comp1.w, &add_comp2.u); // T1z = w1u2 + fp2_mul(&T1.t, &add_comp1.w, &add_comp2.w); // T1t = w1w2 + fp2_add(&T2.x, &add_comp1.u, &add_comp1.v); // T2x = (u1+v1) + fp2_add(&T2.y, &add_comp2.u, &add_comp2.v); // T2y = (u2+v2) + fp2_mul(&T2.x, &T2.x, &T2.y); // T2x = (u1+v1)(u2+v2) + fp2_sub(&T2.x, &T2.x, &T1.x); // T1x = v1u2 + u1v2 + fp2_mul(&T2.y, &add_comp1.v, &add_comp2.w); // T2y = v1w2 + fp2_mul(&T2.z, &add_comp1.w, &add_comp2.v); // T2z = w1v2 + fp2_set_zero(&T2.t); // T2t = 0 + + // Apply the basis change and compute their respective square + // theta(P+Q) = M.T1 - M.T2 and theta(P-Q) = M.T1 + M.T2 + apply_isomorphism_general(&T1, &phi->M, &T1, true); + apply_isomorphism_general(&T2, &phi->M, &T2, false); + pointwise_square(&T1, &T1); + pointwise_square(&T2, &T2); + + // the difference between the two is therefore theta(P+Q)theta(P-Q) + // whose hadamard transform is then the product of the dual + // theta_points of phi(P) and phi(Q). + fp2_sub(&T1.x, &T1.x, &T2.x); + fp2_sub(&T1.y, &T1.y, &T2.y); + fp2_sub(&T1.z, &T1.z, &T2.z); + fp2_sub(&T1.t, &T1.t, &T2.t); + hadamard(&T1, &T1); + + // Compute (x, y, z, t) + // As imageK1_8 = (x:x:y:y), its inverse is (y:y:x:x). + fp2_mul(&image->x, &T1.x, &phi->imageK1_8.y); + fp2_mul(&image->y, &T1.y, &phi->imageK1_8.y); + fp2_mul(&image->z, &T1.z, &phi->imageK1_8.x); + fp2_mul(&image->t, &T1.t, &phi->imageK1_8.x); + + hadamard(image, image); +} + +// Same as gluing_eval_point but in the very special case where we already know that the point will +// have a zero coordinate at the place where the zero coordinate of the dual_theta_nullpoint would +// have made the computation difficult +static int +gluing_eval_point_special_case(theta_point_t *image, const theta_couple_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T; + + // Apply the basis change + base_change(&T, phi, P); + + // Apply the to_squared_theta transform + to_squared_theta(&T, &T); + + // This coordinate should always be 0 in a gluing because D=0. + // If this is not the case, something went very wrong, so reject + if (!fp2_is_zero(&T.t)) + return 0; + + // Compute (x, y, z, t) + fp2_mul(&image->x, &T.x, &phi->precomputation.x); + fp2_mul(&image->y, &T.y, &phi->precomputation.y); + fp2_mul(&image->z, &T.z, &phi->precomputation.z); + fp2_set_zero(&image->t); + + hadamard(image, image); + return 1; +} + +/** + * @brief Evaluate a gluing isogeny from an elliptic product on a basis + * + * @param image1 Output: the theta_point of the image of the first couple of points + * @param image2 Output : the theta point of the image of the second couple of points + * @param xyT1: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param xyT2: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param phi : a gluing isogeny E1 x E2 -> A + * + **/ +static void +gluing_eval_basis(theta_point_t *image1, + theta_point_t *image2, + const theta_couple_jac_point_t *xyT1, + const theta_couple_jac_point_t *xyT2, + const theta_gluing_t *phi) +{ + gluing_eval_point(image1, xyT1, phi); + gluing_eval_point(image2, xyT2, phi); +} + +/** + * @brief Compute a (2,2) isogeny in dimension 2 in the theta_model + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_8 a point in A[8] + * @param T2_8 a point in A[8] + * @param hadamard_bool_1 a boolean used for the last two steps of the chain + * @param hadamard_bool_2 a boolean used for the last two steps of the chain + * + * out : A -> B of kernel [4](T1_8,T2_8) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * verify: add extra sanity check to ensure our 8-torsion points are coherent with the isogeny + * + */ +static int +theta_isogeny_compute(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_8, + const theta_point_t *T2_8, + bool hadamard_bool_1, + bool hadamard_bool_2, + bool verify) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_8; + out->T2_8 = *T2_8; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_8); + to_squared_theta(&TT1, &TT1); + hadamard(&TT2, T2_8); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_8); + to_squared_theta(&TT2, T2_8); + } + + fp2_t t1, t2; + + // Test that our projective factor ABCDxzw is non zero, where + // TT1=(Ax, Bx, Cy, Dy), TT2=(Az, Bw, Cz, Dw) + // But ABCDxzw=0 can only happen if we had an unexpected splitting in + // the isogeny chain. + // In either case reject + // (this is not strictly necessary, we could just return (0:0:0:0)) + if (fp2_is_zero(&TT2.x) | fp2_is_zero(&TT2.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT2.t) | fp2_is_zero(&TT1.x) | + fp2_is_zero(&TT1.y)) + return 0; + + fp2_mul(&t1, &TT1.x, &TT2.y); + fp2_mul(&t2, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.null_point.x, &TT2.x, &t1); + fp2_mul(&out->codomain.null_point.y, &TT2.y, &t2); + fp2_mul(&out->codomain.null_point.z, &TT2.z, &t1); + fp2_mul(&out->codomain.null_point.t, &TT2.t, &t2); + fp2_t t3; + fp2_mul(&t3, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.x, &t3, &TT1.y); + fp2_mul(&out->precomputation.y, &t3, &TT1.x); + fp2_copy(&out->precomputation.z, &out->codomain.null_point.t); + fp2_copy(&out->precomputation.t, &out->codomain.null_point.z); + + // If T1_8 and T2_8 are our 8-torsion points, this ensures that the + // 4-torsion points 2T1_8 and 2T2_8 are isotropic. + if (verify) { + fp2_mul(&t1, &TT1.x, &out->precomputation.x); + fp2_mul(&t2, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT1.z, &out->precomputation.z); + fp2_mul(&t2, &TT1.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.y, &out->precomputation.y); + fp2_mul(&t2, &TT2.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + } + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } + return 1; +} + +/** + * @brief Compute a (2,2) isogeny when only the 4 torsion above the kernel is known and not the 8 + * torsion + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_4 a point in A[4] + * @param T2_4 a point in A[4] + * @param hadamard_bool_1 a boolean + * @param hadamard_bool_2 a boolean + * + * out : A -> B of kernel [2](T1_4,T2_4) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_4(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_4, + const theta_point_t *T2_4, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_4; + out->T2_8 = *T2_4; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + // we will compute: + // TT1 = (xAB, _ , xCD, _) + // TT2 = (AA,BB,CC,DD) + + // fp2_t xA_inv,zA_inv,tB_inv; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_4); + to_squared_theta(&TT1, &TT1); + + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_4); + to_squared_theta(&TT2, &A->null_point); + } + + fp2_t sqaabb, sqaacc; + fp2_mul(&sqaabb, &TT2.x, &TT2.y); + fp2_mul(&sqaacc, &TT2.x, &TT2.z); + // No need to check the square roots, only used for signing. + // sqaabb = sqrt(AA*BB) + fp2_sqrt(&sqaabb); + // sqaacc = sqrt(AA*CC) + fp2_sqrt(&sqaacc); + + // we compute out->codomain.null_point = (xAB * sqaacc * AA, xAB *sqaabb *sqaacc, xCD*sqaabb * + // AA) out->precomputation = (xAB * BB * CC *DD , sqaabb * CC * DD * xAB , sqaacc * BB* DD * xAB + // , xCD * sqaabb *sqaacc * BB) + + fp2_mul(&out->codomain.null_point.y, &sqaabb, &sqaacc); + fp2_mul(&out->precomputation.t, &out->codomain.null_point.y, &TT1.z); + fp2_mul(&out->codomain.null_point.y, &out->codomain.null_point.y, + &TT1.x); // done for out->codomain.null_point.y + + fp2_mul(&out->codomain.null_point.t, &TT1.z, &sqaabb); + fp2_mul(&out->codomain.null_point.t, &out->codomain.null_point.t, + &TT2.x); // done for out->codomain.null_point.t + + fp2_mul(&out->codomain.null_point.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.null_point.z, &out->codomain.null_point.x, + &TT2.z); // done for out->codomain.null_point.z + fp2_mul(&out->codomain.null_point.x, &out->codomain.null_point.x, + &sqaacc); // done for out->codomain.null_point.x + + fp2_mul(&out->precomputation.x, &TT1.x, &TT2.t); + fp2_mul(&out->precomputation.z, &out->precomputation.x, &TT2.y); + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.z); + fp2_mul(&out->precomputation.y, &out->precomputation.x, &sqaabb); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &out->precomputation.z, &sqaacc); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +/** + * @brief Compute a (2,2) isogeny when only the kernel is known and not the 8 or 4 torsion above + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_2 a point in A[2] + * @param T2_2 a point in A[2] + * @param hadamard_bool_1 a boolean + * @param boo2 a boolean + * + * out : A -> B of kernel (T1_2,T2_2) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_2(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_2, + const theta_point_t *T2_2, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_2; + out->T2_8 = *T2_2; + out->codomain.precomputation = false; + + theta_point_t TT2; + // we will compute: + // TT2 = (AA,BB,CC,DD) + + if (hadamard_bool_1) { + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT2, &A->null_point); + } + + // we compute out->codomain.null_point = (AA,sqaabb, sqaacc, sqaadd) + // out->precomputation = ( BB * CC *DD , sqaabb * CC * DD , sqaacc * BB* DD , sqaadd * BB * CC) + fp2_copy(&out->codomain.null_point.x, &TT2.x); + fp2_mul(&out->codomain.null_point.y, &TT2.x, &TT2.y); + fp2_mul(&out->codomain.null_point.z, &TT2.x, &TT2.z); + fp2_mul(&out->codomain.null_point.t, &TT2.x, &TT2.t); + // No need to check the square roots, only used for signing. + fp2_sqrt(&out->codomain.null_point.y); + fp2_sqrt(&out->codomain.null_point.z); + fp2_sqrt(&out->codomain.null_point.t); + + fp2_mul(&out->precomputation.x, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.y, + &out->precomputation.x, + &out->codomain.null_point.y); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &TT2.t, &out->codomain.null_point.z); + fp2_mul(&out->precomputation.z, &out->precomputation.z, &TT2.y); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &TT2.z, &out->codomain.null_point.t); + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +static void +theta_isogeny_eval(theta_point_t *out, const theta_isogeny_t *phi, const theta_point_t *P) +{ + if (phi->hadamard_bool_1) { + hadamard(out, P); + to_squared_theta(out, out); + } else { + to_squared_theta(out, P); + } + fp2_mul(&out->x, &out->x, &phi->precomputation.x); + fp2_mul(&out->y, &out->y, &phi->precomputation.y); + fp2_mul(&out->z, &out->z, &phi->precomputation.z); + fp2_mul(&out->t, &out->t, &phi->precomputation.t); + + if (phi->hadamard_bool_2) { + hadamard(out, out); + } +} + +#if defined(ENABLE_SIGN) +// Sample a random secret index in [0, 5] to select one of the 6 normalisation +// matrices for the normalisation of the output of the (2,2)-chain during +// splitting +static unsigned char +sample_random_index(void) +{ + // To avoid bias in reduction we should only consider integers smaller + // than 2^32 which are a multiple of 6, so we only reduce bytes with a + // value in [0, 4294967292-1]. + // We have 4294967292/2^32 = ~99.9999999% chance that the first try is "good". + unsigned char seed_arr[4]; + uint32_t seed; + + do { + randombytes(seed_arr, 4); + seed = (seed_arr[0] | (seed_arr[1] << 8) | (seed_arr[2] << 16) | (seed_arr[3] << 24)); + } while (seed >= 4294967292U); + + uint32_t secret_index = seed - (((uint64_t)seed * 2863311531U) >> 34) * 6; + assert(secret_index == seed % 6); // ensure the constant time trick above works + return (unsigned char)secret_index; +} +#endif + +static bool +splitting_compute(theta_splitting_t *out, const theta_structure_t *A, int zero_index, bool randomize) + +{ + // init + uint32_t ctl; + uint32_t count = 0; + fp2_t U_cst, t1, t2; + + memset(&out->M, 0, sizeof(basis_change_matrix_t)); + + // enumerate through all indices + for (int i = 0; i < 10; i++) { + fp2_set_zero(&U_cst); + for (int t = 0; t < 4; t++) { + // Iterate through the null point + choose_index_theta_point(&t2, t, &A->null_point); + choose_index_theta_point(&t1, t ^ EVEN_INDEX[i][1], &A->null_point); + + // Compute t1 * t2 + fp2_mul(&t1, &t1, &t2); + // If CHI_EVAL(i,t) is +1 we want ctl to be 0 and + // If CHI_EVAL(i,t) is -1 we want ctl to be 0xFF..FF + ctl = (uint32_t)(CHI_EVAL[EVEN_INDEX[i][0]][t] >> 1); + assert(ctl == 0 || ctl == 0xffffffff); + + fp2_neg(&t2, &t1); + fp2_select(&t1, &t1, &t2, ctl); + + // Then we compute U_cst ± (t1 * t2) + fp2_add(&U_cst, &U_cst, &t1); + } + + // If U_cst is 0 then update the splitting matrix + ctl = fp2_is_zero(&U_cst); + count -= ctl; + select_base_change_matrix(&out->M, &out->M, &SPLITTING_TRANSFORMS[i], ctl); + if (zero_index != -1 && i == zero_index && + !ctl) { // extra checks if we know exactly where the 0 index should be + return 0; + } + } + +#if defined(ENABLE_SIGN) + // Pick a random normalization matrix + if (randomize) { + unsigned char secret_index = sample_random_index(); + basis_change_matrix_t Mrandom; + + set_base_change_matrix_from_precomp(&Mrandom, &NORMALIZATION_TRANSFORMS[0]); + + // Use a constant time selection to pick the index we want + for (unsigned char i = 1; i < 6; i++) { + // When i == secret_index, mask == 0 and 0xFF..FF otherwise + int32_t mask = i - secret_index; + mask = (mask | -mask) >> 31; + select_base_change_matrix(&Mrandom, &Mrandom, &NORMALIZATION_TRANSFORMS[i], ~mask); + } + base_change_matrix_multiplication(&out->M, &Mrandom, &out->M); + } +#else + assert(!randomize); +#endif + + // apply the isomorphism to ensure the null point is compatible with splitting + apply_isomorphism(&out->B.null_point, &out->M, &A->null_point); + + // splitting was successful only if exactly one zero was identified + return count == 1; +} + +static int +theta_product_structure_to_elliptic_product(theta_couple_curve_t *E12, theta_structure_t *A) +{ + fp2_t xx, yy; + + // This should be true from our computations in splitting_compute + // but still check this for sanity + if (!is_product_theta_point(&A->null_point)) + return 0; + + ec_curve_init(&(E12->E1)); + ec_curve_init(&(E12->E2)); + + // A valid elliptic theta null point has no zero coordinate + if (fp2_is_zero(&A->null_point.x) | fp2_is_zero(&A->null_point.y) | fp2_is_zero(&A->null_point.z)) + return 0; + + // xx = x², yy = y² + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.y); + // xx = x^4, yy = y^4 + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A2 = -2(x^4+y^4)/(x^4-y^4) + fp2_add(&E12->E2.A, &xx, &yy); + fp2_sub(&E12->E2.C, &xx, &yy); + fp2_add(&E12->E2.A, &E12->E2.A, &E12->E2.A); + fp2_neg(&E12->E2.A, &E12->E2.A); + + // same with x,z + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.z); + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A1 = -2(x^4+z^4)/(x^4-z^4) + fp2_add(&E12->E1.A, &xx, &yy); + fp2_sub(&E12->E1.C, &xx, &yy); + fp2_add(&E12->E1.A, &E12->E1.A, &E12->E1.A); + fp2_neg(&E12->E1.A, &E12->E1.A); + + if (fp2_is_zero(&E12->E1.C) | fp2_is_zero(&E12->E2.C)) + return 0; + + return 1; +} + +static int +theta_point_to_montgomery_point(theta_couple_point_t *P12, const theta_point_t *P, const theta_structure_t *A) +{ + fp2_t temp; + const fp2_t *x, *z; + + if (!is_product_theta_point(P)) + return 0; + + x = &P->x; + z = &P->y; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->z; + z = &P->t; + } + if (fp2_is_zero(x) & fp2_is_zero(z)) { + return 0; // at this point P=(0:0:0:0) so is invalid + } + // P2.X = A.null_point.y * P.x + A.null_point.x * P.y + // P2.Z = - A.null_point.y * P.x + A.null_point.x * P.y + fp2_mul(&P12->P2.x, &A->null_point.y, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P2.z, &temp, &P12->P2.x); + fp2_add(&P12->P2.x, &P12->P2.x, &temp); + + x = &P->x; + z = &P->z; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->y; + z = &P->t; + } + // P1.X = A.null_point.z * P.x + A.null_point.x * P.z + // P1.Z = -A.null_point.z * P.x + A.null_point.x * P.z + fp2_mul(&P12->P1.x, &A->null_point.z, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P1.z, &temp, &P12->P1.x); + fp2_add(&P12->P1.x, &P12->P1.x, &temp); + return 1; +} + +static int +_theta_chain_compute_impl(unsigned n, + theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + bool verify, + bool randomize) +{ + theta_structure_t theta; + + // lift the basis + theta_couple_jac_point_t xyT1, xyT2; + + ec_basis_t bas1 = { .P = ker->T1.P1, .Q = ker->T2.P1, .PmQ = ker->T1m2.P1 }; + ec_basis_t bas2 = { .P = ker->T1.P2, .Q = ker->T2.P2, .PmQ = ker->T1m2.P2 }; + if (!lift_basis(&xyT1.P1, &xyT2.P1, &bas1, &E12->E1)) + return 0; + if (!lift_basis(&xyT1.P2, &xyT2.P2, &bas2, &E12->E2)) + return 0; + + const unsigned extra = HD_extra_torsion * extra_torsion; + +#ifndef NDEBUG + assert(extra == 0 || extra == 2); // only cases implemented + if (!test_point_order_twof(&bas2.P, &E12->E2, n + extra)) + debug_print("bas2.P does not have correct order"); + + if (!test_jac_order_twof(&xyT2.P2, &E12->E2, n + extra)) + debug_print("xyT2.P2 does not have correct order"); +#endif + + theta_point_t pts[numP ? numP : 1]; + + int space = 1; + for (unsigned i = 1; i < n; i *= 2) + ++space; + + uint16_t todo[space]; + todo[0] = n - 2 + extra; + + int current = 0; + + // kernel points for the gluing isogeny + theta_couple_jac_point_t jacQ1[space], jacQ2[space]; + jacQ1[0] = xyT1; + jacQ2[0] = xyT2; + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + // the gluing isogeny is quite a bit more expensive than the others, + // so we adjust the usual splitting rule here a little bit: towards + // the end of the doubling chain it will be cheaper to recompute the + // doublings after evaluation than to push the intermediate points. + const unsigned num_dbls = todo[current - 1] >= 16 ? todo[current - 1] / 2 : todo[current - 1] - 1; + assert(num_dbls && num_dbls < todo[current - 1]); + double_couple_jac_point_iter(&jacQ1[current], num_dbls, &jacQ1[current - 1], E12); + double_couple_jac_point_iter(&jacQ2[current], num_dbls, &jacQ2[current - 1], E12); + todo[current] = todo[current - 1] - num_dbls; + } + + // kernel points for the remaining isogeny steps + theta_point_t thetaQ1[space], thetaQ2[space]; + + // the gluing step + theta_gluing_t first_step; + { + assert(todo[current] == 1); + + // compute the gluing isogeny + if (!gluing_compute(&first_step, E12, &jacQ1[current], &jacQ2[current], verify)) + return 0; + + // evaluate + for (unsigned j = 0; j < numP; ++j) { + assert(ec_is_zero(&P12[j].P1) || ec_is_zero(&P12[j].P2)); + if (!gluing_eval_point_special_case(&pts[j], &P12[j], &first_step)) + return 0; + } + + // push kernel points through gluing isogeny + for (int j = 0; j < current; ++j) { + gluing_eval_basis(&thetaQ1[j], &thetaQ2[j], &jacQ1[j], &jacQ2[j], &first_step); + --todo[j]; + } + + --current; + } + + // set-up the theta_structure for the first codomain + theta.null_point = first_step.codomain; + theta.precomputation = 0; + theta_precomputation(&theta); + + theta_isogeny_t step; + + // and now we do the remaining steps + for (unsigned i = 1; current >= 0 && todo[current]; ++i) { + assert(current < space); + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + const unsigned num_dbls = todo[current - 1] / 2; + assert(num_dbls && num_dbls < todo[current - 1]); + double_iter(&thetaQ1[current], &theta, &thetaQ1[current - 1], num_dbls); + double_iter(&thetaQ2[current], &theta, &thetaQ2[current - 1], num_dbls); + todo[current] = todo[current - 1] - num_dbls; + } + + // computing the next step + int ret; + if (i == n - 2) // penultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 0, verify); + else if (i == n - 1) // ultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 1, 0, false); + else + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 1, verify); + if (!ret) + return 0; + + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + + // updating the codomain + theta = step.codomain; + + // pushing the kernel + assert(todo[current] == 1); + for (int j = 0; j < current; ++j) { + theta_isogeny_eval(&thetaQ1[j], &step, &thetaQ1[j]); + theta_isogeny_eval(&thetaQ2[j], &step, &thetaQ2[j]); + assert(todo[j]); + --todo[j]; + } + + --current; + } + + assert(current == -1); + + if (!extra_torsion) { + if (n >= 3) { + // in the last step we've skipped pushing the kernel since current was == 0, let's do it now + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + } + + // penultimate step + theta_isogeny_compute_4(&step, &theta, &thetaQ1[0], &thetaQ2[0], 0, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + + // ultimate step + theta_isogeny_compute_2(&step, &theta, &thetaQ1[0], &thetaQ2[0], 1, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + } + + // final splitting step + theta_splitting_t last_step; + + bool is_split = splitting_compute(&last_step, &theta, extra_torsion ? 8 : -1, randomize); + + if (!is_split) { + debug_print("kernel did not generate an isogeny between elliptic products"); + return 0; + } + + if (!theta_product_structure_to_elliptic_product(E34, &last_step.B)) + return 0; + + // evaluate + for (size_t j = 0; j < numP; ++j) { + apply_isomorphism(&pts[j], &last_step.M, &pts[j]); + if (!theta_point_to_montgomery_point(&P12[j], &pts[j], &last_step.B)) + return 0; + } + + return 1; +} + +int +theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, false); +} + +// Like theta_chain_compute_and_eval, adding extra verification checks; +// used in the signature verification +int +theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, true, false); +} + +int +theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.h new file mode 100644 index 0000000000..d151811fe7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.h @@ -0,0 +1,18 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta isogeny header + */ + +#ifndef THETA_ISOGENY_H +#define THETA_ISOGENY_H + +#include +#include +#include +#include "theta_structure.h" +#include +#include + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_structure.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_structure.c new file mode 100644 index 0000000000..ce97ac61a8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_structure.c @@ -0,0 +1,78 @@ +#include "theta_structure.h" +#include + +void +theta_precomputation(theta_structure_t *A) +{ + + if (A->precomputation) { + return; + } + + theta_point_t A_dual; + to_squared_theta(&A_dual, &A->null_point); + + fp2_t t1, t2; + fp2_mul(&t1, &A_dual.x, &A_dual.y); + fp2_mul(&t2, &A_dual.z, &A_dual.t); + fp2_mul(&A->XYZ0, &t1, &A_dual.z); + fp2_mul(&A->XYT0, &t1, &A_dual.t); + fp2_mul(&A->YZT0, &t2, &A_dual.y); + fp2_mul(&A->XZT0, &t2, &A_dual.x); + + fp2_mul(&t1, &A->null_point.x, &A->null_point.y); + fp2_mul(&t2, &A->null_point.z, &A->null_point.t); + fp2_mul(&A->xyz0, &t1, &A->null_point.z); + fp2_mul(&A->xyt0, &t1, &A->null_point.t); + fp2_mul(&A->yzt0, &t2, &A->null_point.y); + fp2_mul(&A->xzt0, &t2, &A->null_point.x); + + A->precomputation = true; +} + +void +double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in) +{ + to_squared_theta(out, in); + fp2_sqr(&out->x, &out->x); + fp2_sqr(&out->y, &out->y); + fp2_sqr(&out->z, &out->z); + fp2_sqr(&out->t, &out->t); + + if (!A->precomputation) { + theta_precomputation(A); + } + fp2_mul(&out->x, &out->x, &A->YZT0); + fp2_mul(&out->y, &out->y, &A->XZT0); + fp2_mul(&out->z, &out->z, &A->XYT0); + fp2_mul(&out->t, &out->t, &A->XYZ0); + + hadamard(out, out); + + fp2_mul(&out->x, &out->x, &A->yzt0); + fp2_mul(&out->y, &out->y, &A->xzt0); + fp2_mul(&out->z, &out->z, &A->xyt0); + fp2_mul(&out->t, &out->t, &A->xyz0); +} + +void +double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp) +{ + if (exp == 0) { + *out = *in; + } else { + double_point(out, A, in); + for (int i = 1; i < exp; i++) { + double_point(out, A, out); + } + } +} + +uint32_t +is_product_theta_point(const theta_point_t *P) +{ + fp2_t t1, t2; + fp2_mul(&t1, &P->x, &P->t); + fp2_mul(&t2, &P->y, &P->z); + return fp2_is_equal(&t1, &t2); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_structure.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_structure.h new file mode 100644 index 0000000000..fc630b750a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_structure.h @@ -0,0 +1,135 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta structure header + */ + +#ifndef THETA_STRUCTURE_H +#define THETA_STRUCTURE_H + +#include +#include +#include + +/** @internal + * @ingroup hd_module + * @defgroup hd_theta Functions for theta structures + * @{ + */ + +/** + * @brief Perform the hadamard transform on a theta point + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x+y+z+t, x-y+z-t, x+y-z-t, x-y-z+t) + * + */ +static inline void +hadamard(theta_point_t *out, const theta_point_t *in) +{ + fp2_t t1, t2, t3, t4; + + // t1 = x + y + fp2_add(&t1, &in->x, &in->y); + // t2 = x - y + fp2_sub(&t2, &in->x, &in->y); + // t3 = z + t + fp2_add(&t3, &in->z, &in->t); + // t4 = z - t + fp2_sub(&t4, &in->z, &in->t); + + fp2_add(&out->x, &t1, &t3); + fp2_add(&out->y, &t2, &t4); + fp2_sub(&out->z, &t1, &t3); + fp2_sub(&out->t, &t2, &t4); +} + +/** + * @brief Square the coordinates of a theta point + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2, y^2, z^2, t^2) + * + */ +static inline void +pointwise_square(theta_point_t *out, const theta_point_t *in) +{ + fp2_sqr(&out->x, &in->x); + fp2_sqr(&out->y, &in->y); + fp2_sqr(&out->z, &in->z); + fp2_sqr(&out->t, &in->t); +} + +/** + * @brief Square the coordinates and then perform the hadamard transform + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2+y^2+z^2+t^2, x^2-y^2+z^2-t^2, x^2+y^2-z^2-t^2, x^2-y^2-z^2+t^2) + * + */ +static inline void +to_squared_theta(theta_point_t *out, const theta_point_t *in) +{ + pointwise_square(out, in); + hadamard(out, out); +} + +/** + * @brief Perform the theta structure precomputation + * + * @param A Output: the theta_structure + * + * if A.null_point = (x,y,z,t) + * if (xx,yy,zz,tt) = to_squared_theta(A.null_point) + * Computes y0,z0,t0,Y0,Z0,T0 = x/y,x/z,x/t,XX/YY,XX/ZZ,XX/TT + * + */ +void theta_precomputation(theta_structure_t *A); + +/** + * @brief Compute the double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * in = (x,y,z,t) + * out = [2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in); + +/** + * @brief Compute the iterated double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * @param exp the exponent + * in = (x,y,z,t) + * out = [2^2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp); + +/* + * @brief Check if a theta point is a product theta point + * + * @param P a theta point + * @return 0xFFFFFFFF if true, zero otherwise + */ +uint32_t is_product_theta_point(const theta_point_t *P); + +// end hd_theta +/** + * @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.c new file mode 100644 index 0000000000..242ea08fe2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.c @@ -0,0 +1,75 @@ +#include +#include + +static clock_t global_timer; + +clock_t +tic(void) +{ + global_timer = clock(); + return global_timer; +} + +float +tac(void) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); + return ms; +} + +float +TAC(const char *str) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); +#ifndef NDEBUG + printf("%s [%d ms]\n", str, (int)ms); +#endif + return ms; +} + +float +toc(const clock_t t) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + return ms; +} + +float +TOC(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,clock()-t); + // return (float) (clock()-t); +} + +float +TOC_clock(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, clock() - t); + return (float)(clock() - t); +} + +clock_t +dclock(const clock_t t) +{ + return (clock() - t); +} + +float +clock_to_time(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,t); + // return (float) (t); +} + +float +clock_print(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, t); + return (float)(t); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.h new file mode 100644 index 0000000000..5a6a505fc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.h @@ -0,0 +1,49 @@ + +#ifndef TOOLS_H +#define TOOLS_H + +#include + +// Debug printing: +// https://stackoverflow.com/questions/1644868/define-macro-for-debug-printing-in-c +#ifndef NDEBUG +#define DEBUG_PRINT 1 +#else +#define DEBUG_PRINT 0 +#endif + +#ifndef __FILE_NAME__ +#define __FILE_NAME__ "NA" +#endif + +#ifndef __LINE__ +#define __LINE__ 0 +#endif + +#ifndef __func__ +#define __func__ "NA" +#endif + +#define debug_print(fmt) \ + do { \ + if (DEBUG_PRINT) \ + printf("warning: %s, file %s, line %d, function %s().\n", \ + fmt, \ + __FILE_NAME__, \ + __LINE__, \ + __func__); \ + } while (0) + + +clock_t tic(void); +float tac(void); /* time in ms since last tic */ +float TAC(const char *str); /* same, but prints it with label 'str' */ +float toc(const clock_t t); /* time in ms since t */ +float TOC(const clock_t t, const char *str); /* same, but prints it with label 'str' */ +float TOC_clock(const clock_t t, const char *str); + +clock_t dclock(const clock_t t); // return the clock cycle diff between now and t +float clock_to_time(const clock_t t, + const char *str); // convert the number of clock cycles t to time +float clock_print(const clock_t t, const char *str); +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.c new file mode 100644 index 0000000000..d7a42bcbe9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.c @@ -0,0 +1,43 @@ +#include +#include +#include +const ibz_t TWO_TO_SECURITY_BITS = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x1}}} +#endif +; +const ibz_t TORSION_PLUS_2POWER = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x100000000000000}}} +#endif +; +const ibz_t SEC_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t COM_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.h new file mode 100644 index 0000000000..2756a2715f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.h @@ -0,0 +1,6 @@ +#include +#define TORSION_2POWER_BYTES 32 +extern const ibz_t TWO_TO_SECURITY_BITS; +extern const ibz_t TORSION_PLUS_2POWER; +extern const ibz_t SEC_DEGREE; +extern const ibz_t COM_DEGREE; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tutil.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tutil.h new file mode 100644 index 0000000000..59f162093e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tutil.h @@ -0,0 +1,36 @@ +#ifndef TUTIL_H +#define TUTIL_H + +#include +#include + +#if defined(__GNUC__) || defined(__clang__) +#define BSWAP16(i) __builtin_bswap16((i)) +#define BSWAP32(i) __builtin_bswap32((i)) +#define BSWAP64(i) __builtin_bswap64((i)) +#define UNUSED __attribute__((unused)) +#else +#define BSWAP16(i) ((((i) >> 8) & 0xff) | (((i) & 0xff00) << 8)) +#define BSWAP32(i) \ + ((((i) >> 24) & 0xff) | (((i) >> 8) & 0xff00) | (((i) & 0xff00) << 8) | ((i) << 24)) +#define BSWAP64(i) ((BSWAP32((i) >> 32) & 0xffffffff) | (BSWAP32(i) << 32) +#define UNUSED +#endif + +#if defined(RADIX_64) +#define digit_t uint64_t +#define sdigit_t int64_t +#define RADIX 64 +#define LOG2RADIX 6 +#define BSWAP_DIGIT(i) BSWAP64(i) +#elif defined(RADIX_32) +#define digit_t uint32_t +#define sdigit_t int32_t +#define RADIX 32 +#define LOG2RADIX 5 +#define BSWAP_DIGIT(i) BSWAP32(i) +#else +#error "Radix must be 32bit or 64 bit" +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/verification.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/verification.h new file mode 100644 index 0000000000..af674691da --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/verification.h @@ -0,0 +1,123 @@ +/** @file + * + * @brief The verification protocol + */ + +#ifndef VERIFICATION_H +#define VERIFICATION_H + +#include +#include + +/** @defgroup verification SQIsignHD verification protocol + * @{ + */ + +/** @defgroup verification_t Types for SQIsignHD verification protocol + * @{ + */ + +typedef digit_t scalar_t[NWORDS_ORDER]; +typedef scalar_t scalar_mtx_2x2_t[2][2]; + +/** @brief Type for the signature + * + * @typedef signature_t + * + * @struct signature + * + */ +typedef struct signature +{ + fp2_t E_aux_A; // the Montgomery A-coefficient for the auxiliary curve + uint8_t backtracking; + uint8_t two_resp_length; + scalar_mtx_2x2_t mat_Bchall_can_to_B_chall; // the matrix of the desired basis + scalar_t chall_coeff; + uint8_t hint_aux; + uint8_t hint_chall; +} signature_t; + +/** @brief Type for the public keys + * + * @typedef public_key_t + * + * @struct public_key + * + */ +typedef struct public_key +{ + ec_curve_t curve; // the normalized A-coefficient of the Montgomery curve + uint8_t hint_pk; +} public_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void public_key_init(public_key_t *pk); +void public_key_finalize(public_key_t *pk); + +void hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length); + +/** + * @brief Verification + * + * @param sig signature + * @param pk public key + * @param m message + * @param l size + * @returns 1 if the signature verifies, 0 otherwise + */ +int protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a signature as a byte array + * + * @param enc : Byte array to encode the signature in + * @param sig : Signature to encode + */ +void signature_to_bytes(unsigned char *enc, const signature_t *sig); + +/** + * @brief Decodes a signature from a byte array + * + * @param sig : Structure to decode the signature in + * @param enc : Byte array to decode + */ +void signature_from_bytes(signature_t *sig, const unsigned char *enc); + +/** + * @brief Encodes a public key as a byte array + * + * @param enc : Byte array to encode the public key in + * @param pk : Public key to encode + */ +unsigned char *public_key_to_bytes(unsigned char *enc, const public_key_t *pk); + +/** + * @brief Decodes a public key from a byte array + * + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +const unsigned char *public_key_from_bytes(public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/verify.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/verify.c new file mode 100644 index 0000000000..b5f78ad398 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/verify.c @@ -0,0 +1,309 @@ +#include +#include +#include +#include +#include + +// Check that the basis change matrix elements are canonical +// representatives modulo 2^(SQIsign_response_length + 2). +static int +check_canonical_basis_change_matrix(const signature_t *sig) +{ + // This works as long as all values in sig->mat_Bchall_can_to_B_chall are + // positive integers. + int ret = 1; + scalar_t aux; + + memset(aux, 0, NWORDS_ORDER * sizeof(digit_t)); + aux[0] = 0x1; + multiple_mp_shiftl(aux, SQIsign_response_length + HD_extra_torsion - (int)sig->backtracking, NWORDS_ORDER); + + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + if (mp_compare(aux, sig->mat_Bchall_can_to_B_chall[i][j], NWORDS_ORDER) <= 0) { + ret = 0; + } + } + } + + return ret; +} + +// Compute the 2^n isogeny from the signature with kernel +// P + [chall_coeff]Q and store the codomain in E_chall +static int +compute_challenge_verify(ec_curve_t *E_chall, const signature_t *sig, const ec_curve_t *Epk, const uint8_t hint_pk) +{ + ec_basis_t bas_EA; + ec_isog_even_t phi_chall; + + // Set domain and length of 2^n isogeny + copy_curve(&phi_chall.curve, Epk); + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + + // Compute the basis from the supplied hint + if (!ec_curve_to_basis_2f_from_hint(&bas_EA, &phi_chall.curve, TORSION_EVEN_POWER, hint_pk)) // canonical + return 0; + + // recovering the exact challenge + { + if (!ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_EA.P, &bas_EA.Q, &bas_EA.PmQ, &phi_chall.curve)) { + return 0; + }; + } + + // Double the kernel until is has the correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &phi_chall.curve); + + // Compute the codomain + copy_curve(E_chall, &phi_chall.curve); + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + return 1; +} + +// same as matrix_application_even_basis() in id2iso.c, with some modifications: +// - this version works with a matrix of scalars (not ibz_t). +// - reduction modulo 2^f of matrix elements is removed here, because it is +// assumed that the elements are already cannonical representatives modulo +// 2^f; this is ensured by calling check_canonical_basis_change_matrix() at +// the beginning of protocols_verify(). +static int +matrix_scalar_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, scalar_mtx_2x2_t *mat, int f) +{ + scalar_t scalar0, scalar1; + memset(scalar0, 0, NWORDS_ORDER * sizeof(digit_t)); + memset(scalar1, 0, NWORDS_ORDER * sizeof(digit_t)); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + if (!ec_biscalar_mul(&bas->P, (*mat)[0][0], (*mat)[1][0], f, &tmp_bas, E)) + return 0; + // second basis element S = [c]P + [d]Q + if (!ec_biscalar_mul(&bas->Q, (*mat)[0][1], (*mat)[1][1], f, &tmp_bas, E)) + return 0; + // Their difference R - S = [a - c]P + [b - d]Q + mp_sub(scalar0, (*mat)[0][0], (*mat)[0][1], NWORDS_ORDER); + mp_mod_2exp(scalar0, f, NWORDS_ORDER); + mp_sub(scalar1, (*mat)[1][0], (*mat)[1][1], NWORDS_ORDER); + mp_mod_2exp(scalar1, f, NWORDS_ORDER); + return ec_biscalar_mul(&bas->PmQ, scalar0, scalar1, f, &tmp_bas, E); +} + +// Compute the bases for the challenge and auxillary curve from +// the canonical bases. Challenge basis is reconstructed from the +// compressed scalars within the challenge. +static int +challenge_and_aux_basis_verify(ec_basis_t *B_chall_can, + ec_basis_t *B_aux_can, + ec_curve_t *E_chall, + ec_curve_t *E_aux, + signature_t *sig, + const int pow_dim2_deg_resp) +{ + + // recovering the canonical basis as TORSION_EVEN_POWER for consistency with signing + if (!ec_curve_to_basis_2f_from_hint(B_chall_can, E_chall, TORSION_EVEN_POWER, sig->hint_chall)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_chall_can, + TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion - sig->two_resp_length, + B_chall_can, + E_chall); + + if (!ec_curve_to_basis_2f_from_hint(B_aux_can, E_aux, TORSION_EVEN_POWER, sig->hint_aux)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_aux_can, TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion, B_aux_can, E_aux); + +#ifndef NDEBUG + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp + sig->two_resp_length)) + debug_print("canonical basis has wrong order, expect something to fail"); +#endif + + // applying the change matrix on the basis of E_chall + return matrix_scalar_application_even_basis(B_chall_can, + E_chall, + &sig->mat_Bchall_can_to_B_chall, + pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length); +} + +// When two_resp_length is non-zero, we must compute a small 2^n-isogeny +// updating E_chall as the codomain as well as push the basis on E_chall +// through this isogeny +static int +two_response_isogeny_verify(ec_curve_t *E_chall, ec_basis_t *B_chall_can, const signature_t *sig, int pow_dim2_deg_resp) +{ + ec_point_t ker, points[3]; + + // choosing the right point for the small two_isogenies + if (mp_is_even(sig->mat_Bchall_can_to_B_chall[0][0], NWORDS_ORDER) && + mp_is_even(sig->mat_Bchall_can_to_B_chall[1][0], NWORDS_ORDER)) { + copy_point(&ker, &B_chall_can->Q); + } else { + copy_point(&ker, &B_chall_can->P); + } + + copy_point(&points[0], &B_chall_can->P); + copy_point(&points[1], &B_chall_can->Q); + copy_point(&points[2], &B_chall_can->PmQ); + + ec_dbl_iter(&ker, pow_dim2_deg_resp + HD_extra_torsion, &ker, E_chall); + +#ifndef NDEBUG + if (!test_point_order_twof(&ker, E_chall, sig->two_resp_length)) + debug_print("kernel does not have order 2^(two_resp_length"); +#endif + + if (ec_eval_small_chain(E_chall, &ker, sig->two_resp_length, points, 3, false)) { + return 0; + } + +#ifndef NDEBUG + if (!test_point_order_twof(&points[0], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[0] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[1], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[1] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[2], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[2] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + copy_point(&B_chall_can->P, &points[0]); + copy_point(&B_chall_can->Q, &points[1]); + copy_point(&B_chall_can->PmQ, &points[2]); + return 1; +} + +// The commitment curve can be recovered from the codomain of the 2D +// isogeny built from the bases computed during verification. +static int +compute_commitment_curve_verify(ec_curve_t *E_com, + const ec_basis_t *B_chall_can, + const ec_basis_t *B_aux_can, + const ec_curve_t *E_chall, + const ec_curve_t *E_aux, + int pow_dim2_deg_resp) + +{ +#ifndef NDEBUG + // Check all the points are the correct order + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_chall_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + + if (!test_basis_order_twof(B_aux_can, E_aux, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_aux_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + // now compute the dim2 isogeny from Echall x E_aux -> E_com x E_aux' + // of kernel B_chall_can x B_aux_can + + // first we set-up the kernel + theta_couple_curve_t EchallxEaux; + copy_curve(&EchallxEaux.E1, E_chall); + copy_curve(&EchallxEaux.E2, E_aux); + + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, B_chall_can, B_aux_can); + + // computing the isogeny + theta_couple_curve_t codomain; + int codomain_splits; + ec_curve_init(&codomain.E1); + ec_curve_init(&codomain.E2); + // handling the special case where we don't need to perform any dim2 computation + if (pow_dim2_deg_resp == 0) { + codomain_splits = 1; + copy_curve(&codomain.E1, &EchallxEaux.E1); + copy_curve(&codomain.E2, &EchallxEaux.E2); + // We still need to check that E_chall is supersingular + // This assumes that HD_extra_torsion == 2 + if (!ec_is_basis_four_torsion(B_chall_can, E_chall)) { + return 0; + } + } else { + codomain_splits = theta_chain_compute_and_eval_verify( + pow_dim2_deg_resp, &EchallxEaux, &dim_two_ker, true, &codomain, NULL, 0); + } + + // computing the commitment curve + // its always the first one because of our (2^n,2^n)-isogeny formulae + copy_curve(E_com, &codomain.E1); + + return codomain_splits; +} + +// SQIsign verification +int +protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l) +{ + int verify; + + if (!check_canonical_basis_change_matrix(sig)) + return 0; + + // Computation of the length of the dim 2 2^n isogeny + int pow_dim2_deg_resp = SQIsign_response_length - (int)sig->two_resp_length - (int)sig->backtracking; + + // basic sanity test: checking that the response is not too long + if (pow_dim2_deg_resp < 0) + return 0; + // The dim 2 isogeny embeds a dim 1 isogeny of odd degree, so it can + // never be of length 2. + if (pow_dim2_deg_resp == 1) + return 0; + + // check the public curve is valid + if (!ec_curve_verify_A(&(pk->curve).A)) + return 0; + + // Set auxiliary curve from the A-coefficient within the signature + ec_curve_t E_aux; + if (!ec_curve_init_from_A(&E_aux, &sig->E_aux_A)) + return 0; // invalid curve + + // checking that we are given A-coefficients and no precomputation + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF && !pk->curve.is_A24_computed_and_normalized); + + // computation of the challenge + ec_curve_t E_chall; + if (!compute_challenge_verify(&E_chall, sig, &pk->curve, pk->hint_pk)) { + return 0; + } + + // Computation of the canonical bases for the challenge and aux curve + ec_basis_t B_chall_can, B_aux_can; + + if (!challenge_and_aux_basis_verify(&B_chall_can, &B_aux_can, &E_chall, &E_aux, sig, pow_dim2_deg_resp)) { + return 0; + } + + // When two_resp_length != 0 we need to compute a second, short 2^r-isogeny + if (sig->two_resp_length > 0) { + if (!two_response_isogeny_verify(&E_chall, &B_chall_can, sig, pow_dim2_deg_resp)) { + return 0; + } + } + + // We can recover the commitment curve with a 2D isogeny + // The supplied signature did not compute an isogeny between eliptic products + // and so definitely is an invalid signature. + ec_curve_t E_com; + if (!compute_commitment_curve_verify(&E_com, &B_chall_can, &B_aux_can, &E_chall, &E_aux, pow_dim2_deg_resp)) + return 0; + + scalar_t chk_chall; + + // recomputing the challenge vector + hash_to_challenge(&chk_chall, pk, &E_com, m, l); + + // performing the final check + verify = mp_compare(sig->chall_coeff, chk_chall, NWORDS_ORDER) == 0; + + return verify; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/xeval.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/xeval.c new file mode 100644 index 0000000000..7fc7170423 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/xeval.c @@ -0,0 +1,64 @@ +#include "isog.h" +#include "ec.h" +#include + +// ----------------------------------------------------------------------------------------- +// ----------------------------------------------------------------------------------------- + +// Degree-2 isogeny evaluation with kenerl generated by P != (0, 0) +void +xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1, t2; + for (int j = 0; j < lenQ; j++) { + fp2_add(&t0, &Q[j].x, &Q[j].z); + fp2_sub(&t1, &Q[j].x, &Q[j].z); + fp2_mul(&t2, &kps->K.x, &t1); + fp2_mul(&t1, &kps->K.z, &t0); + fp2_add(&t0, &t2, &t1); + fp2_sub(&t1, &t2, &t1); + fp2_mul(&R[j].x, &Q[j].x, &t0); + fp2_mul(&R[j].z, &Q[j].z, &t1); + } +} + +void +xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1; + for (int i = 0; i < lenQ; i++) { + fp2_mul(&t0, &Q[i].x, &Q[i].z); + fp2_mul(&t1, &kps->K.x, &Q[i].z); + fp2_add(&t1, &t1, &Q[i].x); + fp2_mul(&t1, &t1, &Q[i].x); + fp2_sqr(&R[i].x, &Q[i].z); + fp2_add(&R[i].x, &R[i].x, &t1); + fp2_mul(&R[i].z, &t0, &kps->K.z); + } +} + +// Degree-4 isogeny evaluation with kenerl generated by P such that [2]P != (0, 0) +void +xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps) +{ + const ec_point_t *K = kps->K; + + fp2_t t0, t1; + + for (int i = 0; i < lenQ; i++) { + fp2_add(&t0, &Q[i].x, &Q[i].z); + fp2_sub(&t1, &Q[i].x, &Q[i].z); + fp2_mul(&(R[i].x), &t0, &K[1].x); + fp2_mul(&(R[i].z), &t1, &K[2].x); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &K[0].x); + fp2_add(&t1, &(R[i].x), &(R[i].z)); + fp2_sub(&(R[i].z), &(R[i].x), &(R[i].z)); + fp2_sqr(&t1, &t1); + fp2_sqr(&(R[i].z), &(R[i].z)); + fp2_add(&(R[i].x), &t0, &t1); + fp2_sub(&t0, &t0, &(R[i].z)); + fp2_mul(&(R[i].x), &(R[i].x), &t1); + fp2_mul(&(R[i].z), &(R[i].z), &t0); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/xisog.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/xisog.c new file mode 100644 index 0000000000..7242d29433 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/xisog.c @@ -0,0 +1,61 @@ +#include "isog.h" +#include "ec.h" +#include + +// ------------------------------------------------------------------------- +// ------------------------------------------------------------------------- + +// Degree-2 isogeny with kernel generated by P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P) +{ + fp2_sqr(&B->x, &P.x); + fp2_sqr(&B->z, &P.z); + fp2_sub(&B->x, &B->z, &B->x); + fp2_add(&kps->K.x, &P.x, &P.z); + fp2_sub(&kps->K.z, &P.x, &P.z); +} + +void +xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24) +{ + // No need to check the square root, only used for signing. + fp2_t t0, four; + fp2_set_small(&four, 4); + fp2_add(&t0, &A24.x, &A24.x); + fp2_sub(&t0, &t0, &A24.z); + fp2_add(&t0, &t0, &t0); + fp2_inv(&A24.z); + fp2_mul(&t0, &t0, &A24.z); + fp2_copy(&kps->K.x, &t0); + fp2_add(&B24->x, &t0, &t0); + fp2_sqr(&t0, &t0); + fp2_sub(&t0, &t0, &four); + fp2_sqrt(&t0); + fp2_neg(&kps->K.z, &t0); + fp2_add(&B24->z, &t0, &t0); + fp2_add(&B24->x, &B24->x, &B24->z); + fp2_add(&B24->z, &B24->z, &B24->z); +} + +// Degree-4 isogeny with kernel generated by P such that [2]P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P) +{ + ec_point_t *K = kps->K; + + fp2_sqr(&K[0].x, &P.x); + fp2_sqr(&K[0].z, &P.z); + fp2_add(&K[1].x, &K[0].z, &K[0].x); + fp2_sub(&K[1].z, &K[0].z, &K[0].x); + fp2_mul(&B->x, &K[1].x, &K[1].z); + fp2_sqr(&B->z, &K[0].z); + + // Constants for xeval_4 + fp2_add(&K[2].x, &P.x, &P.z); + fp2_sub(&K[1].x, &P.x, &P.z); + fp2_add(&K[0].x, &K[0].z, &K[0].z); + fp2_add(&K[0].x, &K[0].x, &K[0].x); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/COPYING.LGPL new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/COPYING.LGPL @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/LICENSE b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/LICENSE new file mode 100644 index 0000000000..8f71f43fee --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/NOTICE b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/NOTICE new file mode 100644 index 0000000000..6eccf392fa --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/NOTICE @@ -0,0 +1,21 @@ +Copyright 2023-2025 the SQIsign team. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +The DPE Library is (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, +LORIA/INRIA, and licensed under the GNU Lesser General Public License, +version 3. You may obtain a copy of the License at + + https://www.gnu.org/licenses/lgpl-3.0.en.html + +or in the file COPYING.LGPL. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes.h new file mode 100644 index 0000000000..e35ec3705b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef AES_H +#define AES_H + +#include +#include + +void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); +#define AES_ECB_encrypt AES_256_ECB + +#ifdef ENABLE_AESNI +int AES_128_CTR_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +int AES_128_CTR_4R_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#define AES_128_CTR AES_128_CTR_NI +#else +int AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.c new file mode 100644 index 0000000000..dc778fc9b6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.c @@ -0,0 +1,258 @@ +/*************************************************************************** +* This implementation is a modified version of the code, +* written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#include "aes_ni.h" +#include + +#include +#include + +#define AESENC(m, key) _mm_aesenc_si128(m, key) +#define AESENCLAST(m, key) _mm_aesenclast_si128(m, key) +#define XOR(a, b) _mm_xor_si128(a, b) +#define ADD32(a, b) _mm_add_epi32(a, b) +#define SHUF8(a, mask) _mm_shuffle_epi8(a, mask) + +#define ZERO256 _mm256_zeroall + +#define BSWAP_MASK 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f + +#ifdef VAES256 +#define VAESENC(a, key) _mm256_aesenc_epi128(a, key) +#define VAESENCLAST(a, key) _mm256_aesenclast_epi128(a, key) +#define EXTRACT128(a, imm) _mm256_extracti128_si256(a, imm) +#define XOR256(a, b) _mm256_xor_si256(a,b) +#define ADD32_256(a, b) _mm256_add_epi32(a,b) +#define SHUF8_256(a, mask) _mm256_shuffle_epi8(a, mask) +#endif + +#ifdef VAES512 +#define VAESENC(a, key) _mm512_aesenc_epi128(a, key) +#define VAESENCLAST(a, key) _mm512_aesenclast_epi128(a, key) +#define EXTRACT128(a, imm) _mm512_extracti64x2_epi64(a, imm) +#define XOR512(a, b) _mm512_xor_si512(a,b) +#define ADD32_512(a, b) _mm512_add_epi32(a,b) +#define SHUF8_512(a, mask) _mm512_shuffle_epi8(a, mask) +#endif + +_INLINE_ __m128i load_m128i(IN const uint8_t *ctr) +{ + return _mm_set_epi8(ctr[0], ctr[1], ctr[2], ctr[3], + ctr[4], ctr[5], ctr[6], ctr[7], + ctr[8], ctr[9], ctr[10], ctr[11], + ctr[12], ctr[13], ctr[14], ctr[15]); +} + +_INLINE_ __m128i loadr_m128i(IN const uint8_t *ctr) +{ + return _mm_setr_epi8(ctr[0], ctr[1], ctr[2], ctr[3], + ctr[4], ctr[5], ctr[6], ctr[7], + ctr[8], ctr[9], ctr[10], ctr[11], + ctr[12], ctr[13], ctr[14], ctr[15]); +} + +void aes256_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const aes256_ks_t *ks) { + uint32_t i = 0; + __m128i block = loadr_m128i(pt); + + block = XOR(block, ks->keys[0]); + for (i = 1; i < AES256_ROUNDS; i++) { + block = AESENC(block, ks->keys[i]); + } + block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); + + _mm_storeu_si128((void*)ct, block); + + // Delete secrets from registers if any. + ZERO256(); +} + +void aes256_ctr_enc(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + __m128i ctr_block = load_m128i(ctr); + + const __m128i bswap_mask = _mm_set_epi32(BSWAP_MASK); + const __m128i one = _mm_set_epi32(0,0,0,1); + + __m128i block = SHUF8(ctr_block, bswap_mask); + + for (uint32_t bidx = 0; bidx < num_blocks; bidx++) + { + block = XOR(block, ks->keys[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) { + block = AESENC(block, ks->keys[i]); + } + block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); + + //We use memcpy to avoid align casting. + _mm_storeu_si128((void*)&ct[16*bidx], block); + + ctr_block = ADD32(ctr_block, one); + block = SHUF8(ctr_block, bswap_mask); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#ifdef VAES256 +_INLINE_ void load_ks(OUT __m256i ks256[AES256_ROUNDS + 1], + IN const aes256_ks_t *ks) +{ + for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) + { + ks256[i] = _mm256_broadcastsi128_si256(ks->keys[i]); + } +} + +// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that +// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 +// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 +// Here num_blocks is assumed to be less then 2^32. +// It is the caller responsiblity to ensure it. +void aes256_ctr_enc256(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + const uint64_t num_par_blocks = num_blocks/2; + const uint64_t blocks_rem = num_blocks - (2*(num_par_blocks)); + + __m256i ks256[AES256_ROUNDS + 1]; + load_ks(ks256, ks); + + __m128i single_block = load_m128i(ctr); + __m256i ctr_blocks = _mm256_broadcastsi128_si256(single_block); + + // Preparing the masks + const __m256i bswap_mask = _mm256_set_epi32(BSWAP_MASK, BSWAP_MASK); + const __m256i two = _mm256_set_epi32(0,0,0,2,0,0,0,2); + const __m256i init = _mm256_set_epi32(0,0,0,1,0,0,0,0); + + // Initialize two parallel counters + ctr_blocks = ADD32_256(ctr_blocks, init); + __m256i p = SHUF8_256(ctr_blocks, bswap_mask); + + for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) + { + p = XOR256(p, ks256[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) + { + p = VAESENC(p, ks256[i]); + } + p = VAESENCLAST(p, ks256[AES256_ROUNDS]); + + // We use memcpy to avoid align casting. + _mm256_storeu_si256((__m256i *)&ct[PAR_AES_BLOCK_SIZE * block_idx], p); + + // Increase the two counters in parallel + ctr_blocks = ADD32_256(ctr_blocks, two); + p = SHUF8_256(ctr_blocks, bswap_mask); + } + + if(0 != blocks_rem) + { + single_block = EXTRACT128(p, 0); + aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], + (const uint8_t*)&single_block, blocks_rem, ks); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#endif //VAES256 + +#ifdef VAES512 + +_INLINE_ void load_ks(OUT __m512i ks512[AES256_ROUNDS + 1], + IN const aes256_ks_t *ks) +{ + for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) + { + ks512[i] = _mm512_broadcast_i32x4(ks->keys[i]); + } +} + +// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that +// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 +// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 +// Here num_blocks is assumed to be less then 2^32. +// It is the caller responsiblity to ensure it. +void aes256_ctr_enc512(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + const uint64_t num_par_blocks = num_blocks/4; + const uint64_t blocks_rem = num_blocks - (4*(num_par_blocks)); + + __m512i ks512[AES256_ROUNDS + 1]; + load_ks(ks512, ks); + + __m128i single_block = load_m128i(ctr); + __m512i ctr_blocks = _mm512_broadcast_i32x4(single_block); + + // Preparing the masks + const __m512i bswap_mask = _mm512_set_epi32(BSWAP_MASK, BSWAP_MASK, + BSWAP_MASK, BSWAP_MASK); + const __m512i four = _mm512_set_epi32(0,0,0,4,0,0,0,4,0,0,0,4,0,0,0,4); + const __m512i init = _mm512_set_epi32(0,0,0,3,0,0,0,2,0,0,0,1,0,0,0,0); + + // Initialize four parallel counters + ctr_blocks = ADD32_512(ctr_blocks, init); + __m512i p = SHUF8_512(ctr_blocks, bswap_mask); + + for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) + { + p = XOR512(p, ks512[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) + { + p = VAESENC(p, ks512[i]); + } + p = VAESENCLAST(p, ks512[AES256_ROUNDS]); + + + // We use memcpy to avoid align casting. + _mm512_storeu_si512(&ct[PAR_AES_BLOCK_SIZE * block_idx], p); + + // Increase the four counters in parallel + ctr_blocks = ADD32_512(ctr_blocks, four); + p = SHUF8_512(ctr_blocks, bswap_mask); + } + + if(0 != blocks_rem) + { + single_block = EXTRACT128(p, 0); + aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], + (const uint8_t*)&single_block, blocks_rem, ks); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#endif //VAES512 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.h new file mode 100644 index 0000000000..3d2b21ecf5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.h @@ -0,0 +1,85 @@ +/*************************************************************************** +* Written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#pragma once + +#include +#include +#include "defs.h" + +#define MAX_AES_INVOKATION (MASK(32)) + +#define AES256_KEY_SIZE (32ULL) +#define AES256_KEY_BITS (AES256_KEY_SIZE * 8) +#define AES_BLOCK_SIZE (16ULL) +#define AES256_ROUNDS (14ULL) + +#ifdef VAES256 +#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*2) +#elif defined(VAES512) +#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*4) +#endif + +typedef ALIGN(16) struct aes256_key_s { + uint8_t raw[AES256_KEY_SIZE]; +} aes256_key_t; + +typedef ALIGN(16) struct aes256_ks_s { + __m128i keys[AES256_ROUNDS + 1]; +} aes256_ks_t; + +// The ks parameter must be 16 bytes aligned! +EXTERNC void aes256_key_expansion(OUT aes256_ks_t *ks, + IN const aes256_key_t *key); + +// Encrypt one 128-bit block ct = E(pt,ks) +void aes256_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks using VAES (AVX-2) +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc256(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks using VAES (AVX512) +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc512(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/api.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/api.c new file mode 100644 index 0000000000..e01f911e87 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/api.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk) +{ + + return sqisign_keypair(pk, sk); +} + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk) +{ + return sqisign_sign(sm, smlen, m, mlen, sk); +} +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk) +{ + return sqisign_open(m, mlen, sm, smlen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/api.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/api.h new file mode 100644 index 0000000000..8a37d4ba4e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/api.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef api_h +#define api_h + +#include + +#define CRYPTO_SECRETKEYBYTES 529 +#define CRYPTO_PUBLICKEYBYTES 97 +#define CRYPTO_BYTES 224 + +#define CRYPTO_ALGNAME "SQIsign_lvl3" + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk); + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk); +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk); + +#endif /* api_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/asm_preamble.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/asm_preamble.h new file mode 100644 index 0000000000..3ef7927e9c --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/asm_preamble.h @@ -0,0 +1,22 @@ +#ifdef __APPLE__ +#define CAT(A, B) _CAT(A, B) +#define _CAT(A, B) A##B +#undef fp_add +#undef fp_sub +#undef fp_mul +#undef fp_sqr +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 +#define p2 CAT(_, p2) +#define p CAT(_, p) +#define fp_add CAT(_, SQISIGN_NAMESPACE(fp_add)) +#define fp_sub CAT(_, SQISIGN_NAMESPACE(fp_sub)) +#define fp_mul CAT(_, SQISIGN_NAMESPACE(fp_mul)) +#define fp_sqr CAT(_, SQISIGN_NAMESPACE(fp_sqr)) +#define fp2_mul_c0 CAT(_, SQISIGN_NAMESPACE(fp2_mul_c0)) +#define fp2_mul_c1 CAT(_, SQISIGN_NAMESPACE(fp2_mul_c1)) +#define fp2_sq_c0 CAT(_, SQISIGN_NAMESPACE(fp2_sq_c0)) +#define fp2_sq_c1 CAT(_, SQISIGN_NAMESPACE(fp2_sq_c1)) +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/basis.c new file mode 100644 index 0000000000..94cb7fcacb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/basis.c @@ -0,0 +1,416 @@ +#include "ec.h" +#include "fp2.h" +#include "e0_basis.h" +#include + +uint32_t +ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve) +{ // Recover y-coordinate of a point on the Montgomery curve y^2 = x^3 + Ax^2 + x + fp2_t t0; + + fp2_sqr(&t0, Px); + fp2_mul(y, &t0, &curve->A); // Ax^2 + fp2_add(y, y, Px); // Ax^2 + x + fp2_mul(&t0, &t0, Px); + fp2_add(y, y, &t0); // x^3 + Ax^2 + x + // This is required, because we do not yet know that our curves are + // supersingular so our points live on the twist with B = 1. + return fp2_sqrt_verify(y); +} + +static void +difference_point(ec_point_t *PQ, const ec_point_t *P, const ec_point_t *Q, const ec_curve_t *curve) +{ + // Given P,Q in projective x-only, computes a deterministic choice for (P-Q) + // Based on Proposition 3 of https://eprint.iacr.org/2017/518.pdf + + fp2_t Bxx, Bxz, Bzz, t0, t1; + + fp2_mul(&t0, &P->x, &Q->x); + fp2_mul(&t1, &P->z, &Q->z); + fp2_sub(&Bxx, &t0, &t1); + fp2_sqr(&Bxx, &Bxx); + fp2_mul(&Bxx, &Bxx, &curve->C); // C*(P.x*Q.x-P.z*Q.z)^2 + fp2_add(&Bxz, &t0, &t1); + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + fp2_add(&Bzz, &t0, &t1); + fp2_mul(&Bxz, &Bxz, &Bzz); // (P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_sub(&Bzz, &t0, &t1); + fp2_sqr(&Bzz, &Bzz); + fp2_mul(&Bzz, &Bzz, &curve->C); // C*(P.x*Q.z-P.z*Q.x)^2 + fp2_mul(&Bxz, &Bxz, &curve->C); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &curve->A); + fp2_add(&t0, &t0, &t0); + fp2_add(&Bxz, &Bxz, &t0); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + 2*A*P.x*Q.z*P.z*Q.x + + // To ensure that the denominator is a fourth power in Fp, we normalize by + // C*C_bar^2*(P.z)_bar^2*(Q.z)_bar^2 + fp_copy(&t0.re, &curve->C.re); + fp_neg(&t0.im, &curve->C.im); + fp2_sqr(&t0, &t0); + fp2_mul(&t0, &t0, &curve->C); + fp_copy(&t1.re, &P->z.re); + fp_neg(&t1.im, &P->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp_copy(&t1.re, &Q->z.re); + fp_neg(&t1.im, &Q->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&Bxx, &Bxx, &t0); + fp2_mul(&Bxz, &Bxz, &t0); + fp2_mul(&Bzz, &Bzz, &t0); + + // Solving quadratic equation + fp2_sqr(&t0, &Bxz); + fp2_mul(&t1, &Bxx, &Bzz); + fp2_sub(&t0, &t0, &t1); + // No need to check if t0 is square, as per the entangled basis algorithm. + fp2_sqrt(&t0); + fp2_add(&PQ->x, &Bxz, &t0); + fp2_copy(&PQ->z, &Bzz); +} + +// Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and the point +// P = (X/Z : 1). For generic implementation see lift_basis() +uint32_t +lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + assert(fp2_is_one(&B->P.z)); + assert(fp2_is_one(&E->C)); + + fp2_copy(&P->x, &B->P.x); + fp2_copy(&Q->x, &B->Q.x); + fp2_copy(&Q->z, &B->Q.z); + fp2_set_one(&P->z); + uint32_t ret = ec_recover_y(&P->y, &P->x, E); + + // Algorithm of Okeya-Sakurai to recover y.Q in the montgomery model + fp2_t v1, v2, v3, v4; + fp2_mul(&v1, &P->x, &Q->z); + fp2_add(&v2, &Q->x, &v1); + fp2_sub(&v3, &Q->x, &v1); + fp2_sqr(&v3, &v3); + fp2_mul(&v3, &v3, &B->PmQ.x); + fp2_add(&v1, &E->A, &E->A); + fp2_mul(&v1, &v1, &Q->z); + fp2_add(&v2, &v2, &v1); + fp2_mul(&v4, &P->x, &Q->x); + fp2_add(&v4, &v4, &Q->z); + fp2_mul(&v2, &v2, &v4); + fp2_mul(&v1, &v1, &Q->z); + fp2_sub(&v2, &v2, &v1); + fp2_mul(&v2, &v2, &B->PmQ.z); + fp2_sub(&Q->y, &v3, &v2); + fp2_add(&v1, &P->y, &P->y); + fp2_mul(&v1, &v1, &Q->z); + fp2_mul(&v1, &v1, &B->PmQ.z); + fp2_mul(&Q->x, &Q->x, &v1); + fp2_mul(&Q->z, &Q->z, &v1); + + // Transforming to a jacobian coordinate + fp2_sqr(&v1, &Q->z); + fp2_mul(&Q->y, &Q->y, &v1); + fp2_mul(&Q->x, &Q->x, &Q->z); + return ret; +} + +uint32_t +lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + // Normalise the curve E such that (A : C) is (A/C : 1) + // and the point x(P) = (X/Z : 1). + fp2_t inverses[2]; + fp2_copy(&inverses[0], &B->P.z); + fp2_copy(&inverses[1], &E->C); + + fp2_batched_inv(inverses, 2); + fp2_set_one(&B->P.z); + fp2_set_one(&E->C); + + fp2_mul(&B->P.x, &B->P.x, &inverses[0]); + fp2_mul(&E->A, &E->A, &inverses[1]); + + // Lift the basis to Jacobian points P, Q + return lift_basis_normalized(P, Q, B, E); +} + +// Given an x-coordinate, determines if this is a valid +// point on the curve. Assumes C=1. +static uint32_t +is_on_curve(const fp2_t *x, const ec_curve_t *curve) +{ + assert(fp2_is_one(&curve->C)); + fp2_t t0; + + fp2_add(&t0, x, &curve->A); // x + (A/C) + fp2_mul(&t0, &t0, x); // x^2 + (A/C)*x + fp2_add_one(&t0, &t0); // x^2 + (A/C)*x + 1 + fp2_mul(&t0, &t0, x); // x^3 + (A/C)*x^2 + x + + return fp2_is_square(&t0); +} + +// Helper function which given a point of order k*2^n with n maximal +// and k odd, computes a point of order 2^f +static inline void +clear_cofactor_for_maximal_even_order(ec_point_t *P, ec_curve_t *curve, int f) +{ + // clear out the odd cofactor to get a point of order 2^n + ec_mul(P, p_cofactor_for_2f, P_COFACTOR_FOR_2F_BITLENGTH, P, curve); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_A24(P, P, &curve->A24, curve->is_A24_computed_and_normalized); + } +} + +// Helper function which finds an NQR -1 / (1 + i*b) for entangled basis generation +static uint8_t +find_nqr_factor(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + // factor = -1/(1 + i*b) for b in Fp will be NQR whenever 1 + b^2 is NQR + // in Fp, so we find one of these and then invert (1 + i*b). We store b + // as a u8 hint to save time in verification. + + // We return the hint as a u8, but use (uint16_t)n to give 2^16 - 1 + // to make failure cryptographically negligible, with a fallback when + // n > 128 is required. + uint8_t hint; + uint32_t found = 0; + uint16_t n = start; + + bool qr_b = 1; + fp_t b, tmp; + fp2_t z, t0, t1; + + do { + while (qr_b) { + // find b with 1 + b^2 a non-quadratic residue + fp_set_small(&tmp, (uint32_t)n * n + 1); + qr_b = fp_is_square(&tmp); + n++; // keeps track of b = n - 1 + } + + // for Px := -A/(1 + i*b) to be on the curve + // is equivalent to A^2*(z-1) - z^2 NQR for z = 1 + i*b + // thus prevents unnecessary inversion pre-check + + // t0 = z - 1 = i*b + // t1 = z = 1 + i*b + fp_set_small(&b, (uint32_t)n - 1); + fp2_set_zero(&t0); + fp2_set_one(&z); + fp_copy(&z.im, &b); + fp_copy(&t0.im, &b); + + // A^2*(z-1) - z^2 + fp2_sqr(&t1, &curve->A); + fp2_mul(&t0, &t0, &t1); // A^2 * (z - 1) + fp2_sqr(&t1, &z); + fp2_sub(&t0, &t0, &t1); // A^2 * (z - 1) - z^2 + found = !fp2_is_square(&t0); + + qr_b = 1; + } while (!found); + + // set Px to -A/(1 + i*b) + fp2_copy(x, &z); + fp2_inv(x); + fp2_mul(x, x, &curve->A); + fp2_neg(x, x); + + /* + * With very low probability n will not fit in 7 bits. + * We set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + hint = n <= 128 ? n - 1 : 0; + + return hint; +} + +// Helper function which finds a point x(P) = n * A +static uint8_t +find_nA_x_coord(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + assert(!fp2_is_square(&curve->A)); // Only to be called when A is a NQR + + // when A is NQR we allow x(P) to be a multiple n*A of A + uint8_t n = start; + if (n == 1) { + fp2_copy(x, &curve->A); + } else { + fp2_mul_small(x, &curve->A, n); + } + + while (!is_on_curve(x, curve)) { + fp2_add(x, x, &curve->A); + n++; + } + + /* + * With very low probability (1/2^128), n will not fit in 7 bits. + * In this case, we set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + uint8_t hint = n < 128 ? n : 0; + return hint; +} + +// The entangled basis generation does not allow A = 0 +// so we simply return the one we have already precomputed +static void +ec_basis_E0_2f(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + assert(fp2_is_zero(&curve->A)); + ec_point_t P, Q; + + // Set P, Q to precomputed (X : 1) values + fp2_copy(&P.x, &BASIS_E0_PX); + fp2_copy(&Q.x, &BASIS_E0_QX); + fp2_set_one(&P.z); + fp2_set_one(&Q.z); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_E0(&P, &P); + xDBL_E0(&Q, &Q); + } + + // Set P, Q in the basis and compute x(P - Q) + copy_point(&PQ2->P, &P); + copy_point(&PQ2->Q, &Q); + difference_point(&PQ2->PmQ, &P, &Q, curve); +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// and stores hints as an array for faster recomputation at a later point +uint8_t +ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 0; + } + + uint8_t hint; + bool hint_A = fp2_is_square(&curve->A); + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_A) { + // when A is NQR we allow x(P) to be a multiple n*A of A + hint = find_nA_x_coord(&P.x, curve, 1); + } else { + // when A is QR we instead have to find (1 + b^2) a NQR + // such that x(P) = -A / (1 + i*b) + hint = find_nqr_factor(&P.x, curve, 1); + } + + fp2_set_one(&P.z); + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + + // Finally, we compress hint_A and hint into a single bytes. + // We choose to set the LSB of hint to hint_A + assert(hint < 128); // We expect hint to be 7-bits in size + return (hint << 1) | hint_A; +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// given the hints as an array for faster basis computation +int +ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 1; + } + + // The LSB of hint encodes whether A is a QR + // The remaining 7-bits are used to find a valid x(P) + bool hint_A = hint & 1; + uint8_t hint_P = hint >> 1; + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_P) { + // When hint_P = 0 it means we did not find a point in 128 attempts + // this is very rare and we almost never expect to need this fallback + // In either case, we can start with b = 128 to skip testing the known + // values which will not work + if (!hint_A) { + find_nA_x_coord(&P.x, curve, 128); + } else { + find_nqr_factor(&P.x, curve, 128); + } + } else { + // Otherwise we use the hint to directly find x(P) based on hint_A + if (!hint_A) { + // when A is NQR, we have found n such that x(P) = n*A + fp2_mul_small(&P.x, &curve->A, hint_P); + } else { + // when A is QR we have found b such that (1 + b^2) is a NQR in + // Fp, so we must compute x(P) = -A / (1 + i*b) + fp_set_one(&P.x.re); + fp_set_small(&P.x.im, hint_P); + fp2_inv(&P.x); + fp2_mul(&P.x, &P.x, &curve->A); + fp2_neg(&P.x, &P.x); + } + } + fp2_set_one(&P.z); + +#ifndef NDEBUG + int passed = 1; + passed = is_on_curve(&P.x, curve); + passed &= !fp2_is_square(&P.x); + + if (!passed) + return 0; +#endif + + // set xQ to -xP - A + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + +#ifndef NDEBUG + passed &= test_basis_order_twof(PQ2, curve, f); + + if (!passed) + return 0; +#endif + + return 1; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/bench.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/bench.h new file mode 100644 index 0000000000..c253825828 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/bench.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: Apache-2.0 +#ifndef BENCH_H__ +#define BENCH_H__ + +#include +#include +#include +#include +#include +#if defined(__APPLE__) +#include "bench_macos.h" +#endif + +#if defined(TARGET_ARM) || defined(TARGET_S390X) || defined(NO_CYCLE_COUNTER) +#define BENCH_UNIT0 "nanoseconds" +#define BENCH_UNIT3 "microseconds" +#define BENCH_UNIT6 "milliseconds" +#define BENCH_UNIT9 "seconds" +#else +#define BENCH_UNIT0 "cycles" +#define BENCH_UNIT3 "kilocycles" +#define BENCH_UNIT6 "megacycles" +#define BENCH_UNIT9 "gigacycles" +#endif + +static inline void +cpucycles_init(void) { +#if defined(__APPLE__) && defined(TARGET_ARM64) + macos_init_rdtsc(); +#endif +} + +static inline uint64_t +cpucycles(void) +{ +#if defined(TARGET_AMD64) || defined(TARGET_X86) + uint32_t hi, lo; + + asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)lo) | ((uint64_t)hi << 32); +#elif defined(TARGET_S390X) + uint64_t tod; + asm volatile("stckf %0\n" : "=Q"(tod) : : "cc"); + return (tod * 1000 / 4096); +#elif defined(TARGET_ARM64) && !defined(NO_CYCLE_COUNTER) +#if defined(__APPLE__) + return macos_rdtsc(); +#else + uint64_t cycles; + asm volatile("mrs %0, PMCCNTR_EL0" : "=r"(cycles)); + return cycles; +#endif // __APPLE__ +#else + struct timespec time; + clock_gettime(CLOCK_REALTIME, &time); + return (uint64_t)time.tv_sec * 1000000000 + time.tv_nsec; +#endif +} + +static inline int +CMPFUNC(const void *a, const void *b) +{ + uint64_t aa = *(uint64_t *)a, bb = *(uint64_t *)b; + + if (aa > bb) + return +1; + if (aa < bb) + return -1; + return 0; +} + +static inline uint32_t +ISQRT(uint64_t x) +{ + uint32_t r = 0; + for (ssize_t i = 31; i >= 0; --i) { + uint32_t s = r + (1 << i); + if ((uint64_t)s * s <= x) + r = s; + } + return r; +} + +static inline double +_TRUNC(uint64_t x) +{ + return x / 1000 / 1000.; +} +#define _FMT ".3lf" +#define _UNIT BENCH_UNIT6 + +#define BENCH_CODE_1(RUNS) \ + { \ + const size_t count = (RUNS); \ + if (!count) \ + abort(); \ + uint64_t cycles, cycles1, cycles2; \ + uint64_t cycles_list[count]; \ + cycles = 0; \ + for (size_t i = 0; i < count; ++i) { \ + cycles1 = cpucycles(); + +#define BENCH_CODE_2(name) \ + cycles2 = cpucycles(); \ + cycles_list[i] = cycles2 - cycles1; \ + cycles += cycles2 - cycles1; \ + } \ + qsort(cycles_list, count, sizeof(uint64_t), CMPFUNC); \ + uint64_t variance = 0; \ + for (size_t i = 0; i < count; ++i) { \ + int64_t off = cycles_list[i] - cycles / count; \ + variance += off * off; \ + } \ + variance /= count; \ + printf(" %-10s", name); \ + printf(" | average %9" _FMT " | stddev %9" _FMT, \ + _TRUNC(cycles / count), \ + _TRUNC(ISQRT(variance))); \ + printf(" | median %9" _FMT " | min %9" _FMT " | max %9" _FMT, \ + _TRUNC(cycles_list[count / 2]), \ + _TRUNC(cycles_list[0]), \ + _TRUNC(cycles_list[count - 1])); \ + printf(" (%s)\n", _UNIT); \ + } + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/bench_macos.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/bench_macos.h new file mode 100644 index 0000000000..0494fc85e9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/bench_macos.h @@ -0,0 +1,143 @@ +// WARNING: must be run as root on an M1 device +// WARNING: fragile, uses private apple APIs +// currently no command line interface, see variables at top of main + +/* +no warranty; use at your own risk - i believe this code needs +some minor changes to work on some later hardware and/or software revisions, +which is unsurprising given the use of undocumented, private APIs. +------------------------------------------------------------------------------ +This code is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2020 Dougall Johnson +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ + +/* + Based on https://github.com/travisdowns/robsize + Henry Wong + http://blog.stuffedcow.net/2013/05/measuring-rob-capacity/ + 2014-10-14 +*/ + +#include +#include +#include +#include + +#define KPERF_LIST \ + /* ret, name, params */ \ + F(int, kpc_force_all_ctrs_set, int) \ + F(int, kpc_set_counting, uint32_t) \ + F(int, kpc_set_thread_counting, uint32_t) \ + F(int, kpc_set_config, uint32_t, void *) \ + F(int, kpc_get_thread_counters, int, unsigned int, void *) + +#define F(ret, name, ...) \ + typedef ret name##proc(__VA_ARGS__); \ + static name##proc *name; +KPERF_LIST +#undef F + +#define CFGWORD_EL0A64EN_MASK (0x20000) + +#define CPMU_CORE_CYCLE 0x02 + +#define KPC_CLASS_FIXED (0) +#define KPC_CLASS_CONFIGURABLE (1) + +#define COUNTERS_COUNT 10 +#define KPC_MASK ((1u << KPC_CLASS_CONFIGURABLE) | (1u << KPC_CLASS_FIXED)) +static uint64_t g_config[COUNTERS_COUNT]; +static uint64_t g_counters[COUNTERS_COUNT]; + +static void +macos_configure_rdtsc() +{ + if (kpc_force_all_ctrs_set(1)) { + printf("kpc_force_all_ctrs_set failed\n"); + return; + } + + if (kpc_set_config(KPC_MASK, g_config)) { + printf("kpc_set_config failed\n"); + return; + } + + if (kpc_set_counting(KPC_MASK)) { + printf("kpc_set_counting failed\n"); + return; + } + + if (kpc_set_thread_counting(KPC_MASK)) { + printf("kpc_set_thread_counting failed\n"); + return; + } +} + +static void +macos_init_rdtsc() +{ + void *kperf = + dlopen("/System/Library/PrivateFrameworks/kperf.framework/Versions/A/kperf", RTLD_LAZY); + if (!kperf) { + printf("kperf = %p\n", kperf); + return; + } +#define F(ret, name, ...) \ + name = (name##proc *)(intptr_t)(dlsym(kperf, #name)); \ + if (!name) { \ + printf("%s = %p\n", #name, (void *)(intptr_t)name); \ + return; \ + } + KPERF_LIST +#undef F + + g_config[0] = CPMU_CORE_CYCLE | CFGWORD_EL0A64EN_MASK; + + macos_configure_rdtsc(); +} + +static uint64_t +macos_rdtsc(void) +{ + if (kpc_get_thread_counters(0, COUNTERS_COUNT, g_counters)) { + printf("kpc_get_thread_counters failed\n"); + return 1; + } + return g_counters[2]; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/biextension.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/biextension.c new file mode 100644 index 0000000000..1df7ab938b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/biextension.c @@ -0,0 +1,770 @@ +#include +#include +#include +#include + +/* + * We implement the biextension arithmetic by using the cubical torsor + * representation. For now only implement the 2^e-ladder. + * + * Warning: cubicalADD is off by a factor x4 with respect to the correct + * cubical arithmetic. This does not affect the Weil pairing or the Tate + * pairing over F_{p^2} (due to the final exponentiation), but would give + * the wrong result if we compute the Tate pairing over F_p. + */ + +// this would be exactly like xADD if PQ was 'antinormalised' as (1,z) +// Cost: 3M + 2S + 3a + 3s +// Note: if needed, cubicalDBL is simply xDBL_A24 normalized and +// costs 3M + 2S + 2a + 2s + +static void +cubicalADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const fp2_t *ixPQ) +{ + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&R->z, &t3); + fp2_sqr(&t2, &t2); + fp2_mul(&R->x, ixPQ, &t2); +} + +// Given cubical reps of P, Q and x(P - Q) = (1 : ixPQ) +// compute P + Q, [2]Q +// Cost: 6M + 4S + 4a + 4s +static void +cubicalDBLADD(ec_point_t *PpQ, + ec_point_t *QQ, + const ec_point_t *P, + const ec_point_t *Q, + const fp2_t *ixPQ, + const ec_point_t *A24) +{ + // A24 = (A+2C/4C: 1) + assert(fp2_is_one(&A24->z)); + + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&PpQ->x, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_sqr(&t2, &PpQ->x); + fp2_sqr(&QQ->z, &t3); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &PpQ->x); + fp2_add(&PpQ->x, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&PpQ->z, &t3); + fp2_sqr(&PpQ->x, &PpQ->x); + fp2_mul(&PpQ->x, ixPQ, &PpQ->x); + fp2_sub(&t3, &t2, &QQ->z); + fp2_mul(&QQ->x, &t2, &QQ->z); + fp2_mul(&t0, &t3, &A24->x); + fp2_add(&t0, &t0, &QQ->z); + fp2_mul(&QQ->z, &t0, &t3); +} + +// iterative biextension doubling +static void +biext_ladder_2e(uint32_t e, + ec_point_t *PnQ, + ec_point_t *nQ, + const ec_point_t *PQ, + const ec_point_t *Q, + const fp2_t *ixP, + const ec_point_t *A24) +{ + copy_point(PnQ, PQ); + copy_point(nQ, Q); + for (uint32_t i = 0; i < e; i++) { + cubicalDBLADD(PnQ, nQ, PnQ, nQ, ixP, A24); + } +} + +// Compute the monodromy ratio X/Z above as a (X:Z) point to avoid a division +// We implicitly use (1,0) as a cubical point above 0_E +static void +point_ratio(ec_point_t *R, const ec_point_t *PnQ, const ec_point_t *nQ, const ec_point_t *P) +{ + // Sanity tests + assert(ec_is_zero(nQ)); + assert(ec_is_equal(PnQ, P)); + + fp2_mul(&R->x, &nQ->x, &P->x); + fp2_copy(&R->z, &PnQ->x); +} + +// Compute the cubical translation of P by a point of 2-torsion T +static void +translate(ec_point_t *P, const ec_point_t *T) +{ + // When we translate, the following three things can happen: + // T = (A : 0) then the translation of P should be P + // T = (0 : B) then the translation of P = (X : Z) should be (Z : X) + // Otherwise T = (A : B) and P translates to (AX - BZ : BX - AZ) + // We compute this in constant time by computing the generic case + // and then using constant time swaps. + fp2_t PX_new, PZ_new; + + { + fp2_t t0, t1; + + // PX_new = AX - BZ + fp2_mul(&t0, &T->x, &P->x); + fp2_mul(&t1, &T->z, &P->z); + fp2_sub(&PX_new, &t0, &t1); + + // PZ_new = BX - AZ + fp2_mul(&t0, &T->z, &P->x); + fp2_mul(&t1, &T->x, &P->z); + fp2_sub(&PZ_new, &t0, &t1); + } + + // When we have A zero we should return (Z : X) + uint32_t TA_is_zero = fp2_is_zero(&T->x); + fp2_select(&PX_new, &PX_new, &P->z, TA_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->x, TA_is_zero); + + // When we have B zero we should return (X : Z) + uint32_t TB_is_zero = fp2_is_zero(&T->z); + fp2_select(&PX_new, &PX_new, &P->x, TB_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->z, TB_is_zero); + + // Set the point to the desired result + fp2_copy(&P->x, &PX_new); + fp2_copy(&P->z, &PZ_new); +} + +// Compute the biextension monodromy g_P,Q^{2^g} (in level 1) via the +// cubical arithmetic of P+2^e Q. +// The suffix _i means that we are given 1/x(P) as parameter. Warning: to +// get meaningful result when using the monodromy to compute pairings, we +// need P, Q, PQ, A24 to be normalised (this is not strictly necessary, but +// care need to be taken when they are not normalised. Only handle the +// normalised case for now) +static void +monodromy_i(ec_point_t *R, const pairing_params_t *pairing_data, bool swap_PQ) +{ + fp2_t ixP; + ec_point_t P, Q, PnQ, nQ; + + // When we compute the Weil pairing we need both P + [2^e]Q and + // Q + [2^e]P which we can do easily with biext_ladder_2e() below + // we use a bool to decide wether to use Q, ixP or P, ixQ in the + // ladder and P or Q in translation. + if (!swap_PQ) { + copy_point(&P, &pairing_data->P); + copy_point(&Q, &pairing_data->Q); + fp2_copy(&ixP, &pairing_data->ixP); + } else { + copy_point(&P, &pairing_data->Q); + copy_point(&Q, &pairing_data->P); + fp2_copy(&ixP, &pairing_data->ixQ); + } + + // Compute the biextension ladder P + [2^e]Q + biext_ladder_2e(pairing_data->e - 1, &PnQ, &nQ, &pairing_data->PQ, &Q, &ixP, &pairing_data->A24); + translate(&PnQ, &nQ); + translate(&nQ, &nQ); + point_ratio(R, &PnQ, &nQ, &P); +} + +// Normalize the points and also store 1/x(P), 1/x(Q) +static void +cubical_normalization(pairing_params_t *pairing_data, const ec_point_t *P, const ec_point_t *Q) +{ + fp2_t t[4]; + fp2_copy(&t[0], &P->x); + fp2_copy(&t[1], &P->z); + fp2_copy(&t[2], &Q->x); + fp2_copy(&t[3], &Q->z); + fp2_batched_inv(t, 4); + + // Store PZ / PX and QZ / QX + fp2_mul(&pairing_data->ixP, &P->z, &t[0]); + fp2_mul(&pairing_data->ixQ, &Q->z, &t[2]); + + // Store x(P), x(Q) normalised to (X/Z : 1) + fp2_mul(&pairing_data->P.x, &P->x, &t[1]); + fp2_mul(&pairing_data->Q.x, &Q->x, &t[3]); + fp2_set_one(&pairing_data->P.z); + fp2_set_one(&pairing_data->Q.z); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// We assume the points are normalised correctly +static void +weil_n(fp2_t *r, const pairing_params_t *pairing_data) +{ + ec_point_t R0, R1; + monodromy_i(&R0, pairing_data, true); + monodromy_i(&R1, pairing_data, false); + + fp2_mul(r, &R0.x, &R1.z); + fp2_inv(r); + fp2_mul(r, r, &R0.z); + fp2_mul(r, r, &R1.x); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// Normalise the points and call the code above +// The code will crash (division by 0) if either P or Q is (0:1) +void +weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + pairing_params_t pairing_data; + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + // Compute the Weil pairing e_(2^n)(P, Q) + weil_n(r, &pairing_data); +} + +// two helper functions for reducing the tate pairing +// clear_cofac clears (p + 1) // 2^f for an Fp2 value +void +clear_cofac(fp2_t *r, const fp2_t *a) +{ + digit_t exp = *p_cofactor_for_2f; + exp >>= 1; + + fp2_t x; + fp2_copy(&x, a); + fp2_copy(r, a); + + // removes cofac + while (exp > 0) { + fp2_sqr(r, r); + if (exp & 1) { + fp2_mul(r, r, &x); + } + exp >>= 1; + } +} + +// applies frobenius a + ib --> a - ib to an fp2 element +void +fp2_frob(fp2_t *out, const fp2_t *in) +{ + fp_copy(&(out->re), &(in->re)); + fp_neg(&(out->im), &(in->im)); +} + +// reduced Tate pairing, normalizes the points, assumes PQ is P+Q in (X:Z) +// coordinates. Computes 1/x(P) and 1/x(Q) for efficient cubical ladder +void +reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - e; + ec_point_t R; + pairing_params_t pairing_data; + + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + monodromy_i(&R, &pairing_data, true); + + // we get unreduced tate as R.X, R.Z + // reduced tate is -(R.Z/R.X)^((p^2 - 1) div 2^f) + // we reuse R.X and R.Z to split reduction step ^(p-1) into frobenius and ^-1 + fp2_t frob, tmp; + fp2_copy(&tmp, &R.x); + fp2_frob(&frob, &R.x); + fp2_mul(&R.x, &R.z, &frob); + fp2_frob(&frob, &R.z); + fp2_mul(&R.z, &tmp, &frob); + fp2_inv(&R.x); + fp2_mul(r, &R.x, &R.z); + + clear_cofac(r, r); + // clear remaining 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(r, r); + } +} + +// Functions to compute discrete logs by computing the Weil pairing of points +// followed by computing the dlog in Fp^2 +// (If we work with full order points, it would be faster to use the Tate +// pairings rather than the Weil pairings; this is not implemented yet) + +// recursive dlog function +static bool +fp2_dlog_2e_rec(digit_t *a, long len, fp2_t *pows_f, fp2_t *pows_g, long stacklen) +{ + if (len == 0) { + // *a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + return true; + } else if (len == 1) { + if (fp2_is_one(&pows_f[stacklen - 1])) { + // a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else if (fp2_is_equal(&pows_f[stacklen - 1], &pows_g[stacklen - 1])) { + // a = 1; + a[0] = 1; + for (int i = 1; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_mul(&pows_f[i], &pows_f[i], &pows_g[i]); // new_f = f*g + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else { + return false; + } + } else { + long right = (double)len * 0.5; + long left = len - right; + pows_f[stacklen] = pows_f[stacklen - 1]; + pows_g[stacklen] = pows_g[stacklen - 1]; + for (int i = 0; i < left; i++) { + fp2_sqr(&pows_f[stacklen], &pows_f[stacklen]); + fp2_sqr(&pows_g[stacklen], &pows_g[stacklen]); + } + // uint32_t dlp1 = 0, dlp2 = 0; + digit_t dlp1[NWORDS_ORDER], dlp2[NWORDS_ORDER]; + bool ok; + ok = fp2_dlog_2e_rec(dlp1, right, pows_f, pows_g, stacklen + 1); + if (!ok) + return false; + ok = fp2_dlog_2e_rec(dlp2, left, pows_f, pows_g, stacklen); + if (!ok) + return false; + // a = dlp1 + 2^right * dlp2 + multiple_mp_shiftl(dlp2, right, NWORDS_ORDER); + mp_add(a, dlp2, dlp1, NWORDS_ORDER); + + return true; + } +} + +// compute DLP: compute scal such that f = g^scal with f, 1/g as input +static bool +fp2_dlog_2e(digit_t *scal, const fp2_t *f, const fp2_t *g_inverse, int e) +{ + long log, len = e; + for (log = 0; len > 1; len >>= 1) + log++; + log += 1; + + fp2_t pows_f[log], pows_g[log]; + pows_f[0] = *f; + pows_g[0] = *g_inverse; + + for (int i = 0; i < NWORDS_ORDER; i++) { + scal[i] = 0; + } + + bool ok = fp2_dlog_2e_rec(scal, e, pows_f, pows_g, 1); + assert(ok); + + return ok; +} + +// Normalize the bases (P, Q), (R, S) and store their inverse +// and additionally normalise the curve to (A/C : 1) +static void +cubical_normalization_dlog(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + fp2_t t[11]; + ec_basis_t *PQ = &pairing_dlog_data->PQ; + ec_basis_t *RS = &pairing_dlog_data->RS; + fp2_copy(&t[0], &PQ->P.x); + fp2_copy(&t[1], &PQ->P.z); + fp2_copy(&t[2], &PQ->Q.x); + fp2_copy(&t[3], &PQ->Q.z); + fp2_copy(&t[4], &PQ->PmQ.x); + fp2_copy(&t[5], &PQ->PmQ.z); + fp2_copy(&t[6], &RS->P.x); + fp2_copy(&t[7], &RS->P.z); + fp2_copy(&t[8], &RS->Q.x); + fp2_copy(&t[9], &RS->Q.z); + fp2_copy(&t[10], &curve->C); + + fp2_batched_inv(t, 11); + + fp2_mul(&pairing_dlog_data->ixP, &PQ->P.z, &t[0]); + fp2_mul(&PQ->P.x, &PQ->P.x, &t[1]); + fp2_set_one(&PQ->P.z); + + fp2_mul(&pairing_dlog_data->ixQ, &PQ->Q.z, &t[2]); + fp2_mul(&PQ->Q.x, &PQ->Q.x, &t[3]); + fp2_set_one(&PQ->Q.z); + + fp2_mul(&PQ->PmQ.x, &PQ->PmQ.x, &t[5]); + fp2_set_one(&PQ->PmQ.z); + + fp2_mul(&pairing_dlog_data->ixR, &RS->P.z, &t[6]); + fp2_mul(&RS->P.x, &RS->P.x, &t[7]); + fp2_set_one(&RS->P.z); + + fp2_mul(&pairing_dlog_data->ixS, &RS->Q.z, &t[8]); + fp2_mul(&RS->Q.x, &RS->Q.x, &t[9]); + fp2_set_one(&RS->Q.z); + + fp2_mul(&curve->A, &curve->A, &t[10]); + fp2_set_one(&curve->C); +} + +// Given two bases and basis = compute +// x(P - R), x(P - S), x(R - Q), x(S - Q) +static void +compute_difference_points(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + jac_point_t xyP, xyQ, xyR, xyS, temp; + + // lifting the two basis points, assumes that x(P) and x(R) + // and the curve itself are normalised to (X : 1) + lift_basis_normalized(&xyP, &xyQ, &pairing_dlog_data->PQ, curve); + lift_basis_normalized(&xyR, &xyS, &pairing_dlog_data->RS, curve); + + // computation of the differences + // x(P - R) + jac_neg(&temp, &xyR); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmR, &temp); + + // x(P - S) + jac_neg(&temp, &xyS); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmS, &temp); + + // x(R - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyR, curve); + jac_to_xz(&pairing_dlog_data->diff.RmQ, &temp); + + // x(S - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyS, curve); + jac_to_xz(&pairing_dlog_data->diff.SmQ, &temp); +} + +// Inline all the Weil pairing computations needed for ec_dlog_2_weil +static void +weil_dlog(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + ec_point_t nP, nQ, nR, nS, nPQ, PnQ, nPR, PnR, nPS, PnS, nRQ, RnQ, nSQ, SnQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&nPR, &pairing_dlog_data->diff.PmR); + copy_point(&nPS, &pairing_dlog_data->diff.PmS); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + copy_point(&RnQ, &pairing_dlog_data->diff.RmQ); + copy_point(&SnQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&nPQ, &nPQ, &nP, &pairing_dlog_data->ixQ); + cubicalADD(&nPR, &nPR, &nP, &pairing_dlog_data->ixR); + cubicalDBLADD(&nPS, &nP, &nPS, &nP, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnQ, &PnQ, &nQ, &pairing_dlog_data->ixP); + cubicalADD(&RnQ, &RnQ, &nQ, &pairing_dlog_data->ixR); + cubicalDBLADD(&SnQ, &nQ, &SnQ, &nQ, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + // weil(&w0,e,&PQ->P,&PQ->Q,&PQ->PmQ,&A24); + translate(&nPQ, &nP); + translate(&nPR, &nP); + translate(&nPS, &nP); + translate(&PnQ, &nQ); + translate(&RnQ, &nQ); + translate(&SnQ, &nQ); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference weil pairing + ec_point_t T0, T1; + fp2_t w1[5], w2[5]; + + // e(P, Q) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &PnQ, &nQ, &pairing_dlog_data->PQ.P); + // For the first element we need it's inverse for + // fp2_dlog_2e so we swap w1 and w2 here to save inversions + fp2_mul(&w2[0], &T0.x, &T1.z); + fp2_mul(&w1[0], &T1.x, &T0.z); + + // e(P,R) = w0^r2 + point_ratio(&T0, &nPR, &nP, &pairing_dlog_data->RS.P); + point_ratio(&T1, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[1], &T0.x, &T1.z); + fp2_mul(&w2[1], &T1.x, &T0.z); + + // e(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &RnQ, &nQ, &pairing_dlog_data->RS.P); + fp2_mul(&w1[2], &T0.x, &T1.z); + fp2_mul(&w2[2], &T1.x, &T0.z); + + // e(P,S) = w0^s2 + point_ratio(&T0, &nPS, &nP, &pairing_dlog_data->RS.Q); + point_ratio(&T1, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[3], &T0.x, &T1.z); + fp2_mul(&w2[3], &T1.x, &T0.z); + + // e(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &SnQ, &nQ, &pairing_dlog_data->RS.Q); + fp2_mul(&w1[4], &T0.x, &T1.z); + fp2_mul(&w2[4], &T1.x, &T0.z); + + fp2_batched_inv(w1, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + assert(test_point_order_twof(&PQ->Q, curve, e)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + + weil_dlog(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} + +// Inline all the Tate pairing computations needed for ec_dlog_2_weil +// including reduction, assumes a bases PQ of full E[2^e_full] torsion +// and a bases RS of smaller E[2^e] torsion +static void +tate_dlog_partial(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - pairing_dlog_data->e; + + ec_point_t nP, nQ, nR, nS, nPQ, PnR, PnS, nRQ, nSQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < e_full - 1; i++) { + cubicalDBLADD(&nPQ, &nP, &nPQ, &nP, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + translate(&nPQ, &nP); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference Tate pairing + ec_point_t T0; + fp2_t w1[5], w2[5]; + + // t(P, Q)^(2^e_diff) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + fp2_copy(&w1[0], &T0.x); + fp2_copy(&w2[0], &T0.z); + + // t(R,P) = w0^r2 + point_ratio(&T0, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[1], &T0.x); + fp2_copy(&w2[1], &T0.z); + + // t(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[2], &T0.x); + fp2_copy(&w1[2], &T0.z); + + // t(S,P) = w0^s2 + point_ratio(&T0, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[3], &T0.x); + fp2_copy(&w2[3], &T0.z); + + // t(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[4], &T0.x); + fp2_copy(&w1[4], &T0.z); + + // batched reduction using projective representation + for (int i = 0; i < 5; i++) { + fp2_t frob, tmp; + fp2_copy(&tmp, &w1[i]); + // inline frobenius for ^p + // multiply by inverse to get ^(p-1) + fp2_frob(&frob, &w1[i]); + fp2_mul(&w1[i], &w2[i], &frob); + + // repeat for denom + fp2_frob(&frob, &w2[i]); + fp2_mul(&w2[i], &tmp, &frob); + } + + // batched normalization + fp2_batched_inv(w2, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + for (int i = 0; i < 5; i++) { + clear_cofac(&w1[i], &w1[i]); + + // removes 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(&w1[i], &w1[i]); + } + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + // assume PQ is a full torsion basis + // returns a, b, c, d such that R = [a]P + [b]Q, S = [c]P + [d]Q + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - e; +#endif + assert(test_basis_order_twof(PQ, curve, e_full)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + tate_dlog_partial(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/biextension.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/biextension.h new file mode 100644 index 0000000000..1a50fcc738 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/biextension.h @@ -0,0 +1,82 @@ +#ifndef _BIEXT_H_ +#define _BIEXT_H_ + +#include +#include + +typedef struct pairing_params +{ + uint32_t e; // Points have order 2^e + ec_point_t P; // x(P) + ec_point_t Q; // x(Q) + ec_point_t PQ; // x(P-Q) = (PQX/PQZ : 1) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_params_t; + +// For two bases and store: +// x(P - R), x(P - S), x(R - Q), x(S - Q) +typedef struct pairing_dlog_diff_points +{ + ec_point_t PmR; // x(P - R) + ec_point_t PmS; // x(P - S) + ec_point_t RmQ; // x(R - Q) + ec_point_t SmQ; // x(S - Q) +} pairing_dlog_diff_points_t; + +typedef struct pairing_dlog_params +{ + uint32_t e; // Points have order 2^e + ec_basis_t PQ; // x(P), x(Q), x(P-Q) + ec_basis_t RS; // x(R), x(S), x(R-S) + pairing_dlog_diff_points_t diff; // x(P - R), x(P - S), x(R - Q), x(S - Q) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + fp2_t ixR; // RZ/RX + fp2_t ixS; // SZ/SX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_dlog_params_t; + +// Computes e = e_{2^e}(P, Q) using biextension ladder +void weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Computes (reduced) z = t_{2^e}(P, Q) using biextension ladder +void reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Given two bases and computes scalars +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +// Given two bases and +// where is a basis for E[2^f] +// the full 2-torsion, and a basis +// for smaller torsion E[2^e] +// computes scalars r1, r2, s1, s2 +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +void ec_dlog_2_tate_to_full(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + ec_basis_t *RS, + ec_curve_t *curve, + int e); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c new file mode 100644 index 0000000000..d393e9cb11 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include + +void +public_key_init(public_key_t *pk) +{ + ec_curve_init(&pk->curve); +} + +void +public_key_finalize(public_key_t *pk) +{ +} + +// compute the challenge as the hash of the message and the commitment curve and public key +void +hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length) +{ + unsigned char buf[2 * FP2_ENCODED_BYTES]; + { + fp2_t j1, j2; + ec_j_inv(&j1, &pk->curve); + ec_j_inv(&j2, com_curve); + fp2_encode(buf, &j1); + fp2_encode(buf + FP2_ENCODED_BYTES, &j2); + } + + { + // The type scalar_t represents an element of GF(p), which is about + // 2*lambda bits, where lambda = 128, 192 or 256, according to the + // security level. Thus, the variable scalar should have enough memory + // for the values produced by SHAKE256 in the intermediate iterations. + + shake256incctx ctx; + + size_t hash_bytes = ((2 * SECURITY_BITS) + 7) / 8; + size_t limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + size_t bits = (2 * SECURITY_BITS) % RADIX; + digit_t mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, buf, 2 * FP2_ENCODED_BYTES); + shake256_inc_absorb(&ctx, message, length); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + for (int i = 2; i < HASH_ITERATIONS; i++) { + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + } + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + + hash_bytes = ((TORSION_EVEN_POWER - SQIsign_response_length) + 7) / 8; + limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + bits = (TORSION_EVEN_POWER - SQIsign_response_length) % RADIX; + mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + +#ifdef TARGET_BIG_ENDIAN + for (int i = 0; i < NWORDS_ORDER; i++) + (*scalar)[i] = BSWAP_DIGIT((*scalar)[i]); +#endif + + mp_mod_2exp(*scalar, SECURITY_BITS, NWORDS_ORDER); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c new file mode 100644 index 0000000000..983ba49adf --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c @@ -0,0 +1,201 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/*************************************************************************** + * Small modification by Nir Drucker and Shay Gueron + * AWS Cryptographic Algorithms Group + * (ndrucker@amazon.com, gueron@amazon.com) + * include: + * 1) Use memcpy/memset instead of OPENSSL_memcpy/memset + * 2) Include aes.h as the underlying aes code + * 3) Modifying the drbg structure + * ***************************************************************************/ + +#include "ctr_drbg.h" +#include + + +// Section references in this file refer to SP 800-90Ar1: +// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf + +int CTR_DRBG_init(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *personalization, size_t personalization_len) { + // Section 10.2.1.3.1 + if (personalization_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + uint8_t seed_material[CTR_DRBG_ENTROPY_LEN]; + memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN); + + for (size_t i = 0; i < personalization_len; i++) { + seed_material[i] ^= personalization[i]; + } + + // Section 10.2.1.2 + // kInitMask is the result of encrypting blocks with big-endian value 1, 2 + // and 3 with the all-zero AES-256 key. + static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { + 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, + 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, + 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca, + 0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e, + }; + + for (size_t i = 0; i < sizeof(kInitMask); i++) { + seed_material[i] ^= kInitMask[i]; + } + + aes256_key_t key; + memcpy(key.raw, seed_material, 32); + memcpy(drbg->counter.bytes, seed_material + 32, 16); + + aes256_key_expansion(&drbg->ks, &key); + drbg->reseed_counter = 1; + + return 1; +} + +// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a +// big-endian number. +static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { + drbg->counter.words[3] = + CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n); +} + +static int ctr_drbg_update(CTR_DRBG_STATE *drbg, const uint8_t *data, + size_t data_len) { + // Per section 10.2.1.2, |data_len| must be |CTR_DRBG_ENTROPY_LEN|. Here, we + // allow shorter inputs and right-pad them with zeros. This is equivalent to + // the specified algorithm but saves a copy in |CTR_DRBG_generate|. + if (data_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + uint8_t temp[CTR_DRBG_ENTROPY_LEN]; + for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) { + ctr32_add(drbg, 1); + aes256_enc(temp + i, drbg->counter.bytes, &drbg->ks); + } + + for (size_t i = 0; i < data_len; i++) { + temp[i] ^= data[i]; + } + + aes256_key_t key; + memcpy(key.raw, temp, 32); + memcpy(drbg->counter.bytes, temp + 32, 16); + aes256_key_expansion(&drbg->ks, &key); + + return 1; +} + +int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *additional_data, + size_t additional_data_len) { + // Section 10.2.1.4 + uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; + + if (additional_data_len > 0) { + if (additional_data_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN); + for (size_t i = 0; i < additional_data_len; i++) { + entropy_copy[i] ^= additional_data[i]; + } + + entropy = entropy_copy; + } + + if (!ctr_drbg_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) { + return 0; + } + + drbg->reseed_counter = 1; + + return 1; +} + +int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, + const uint8_t *additional_data, + size_t additional_data_len) { + if (additional_data_len != 0 && + !ctr_drbg_update(drbg, additional_data, additional_data_len)) { + return 0; + } + + // kChunkSize is used to interact better with the cache. Since the AES-CTR + // code assumes that it's encrypting rather than just writing keystream, the + // buffer has to be zeroed first. Without chunking, large reads would zero + // the whole buffer, flushing the L1 cache, and then do another pass (missing + // the cache every time) to “encrypt” it. The code can avoid this by + // chunking. + static const size_t kChunkSize = 8 * 1024; + + while (out_len >= AES_BLOCK_SIZE) { + size_t todo = kChunkSize; + if (todo > out_len) { + todo = out_len; + } + + todo &= ~(AES_BLOCK_SIZE - 1); + + const size_t num_blocks = todo / AES_BLOCK_SIZE; + if (1) { + memset(out, 0, todo); + ctr32_add(drbg, 1); +#ifdef VAES512 + aes256_ctr_enc512(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#elif defined(VAES256) + aes256_ctr_enc256(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#else + aes256_ctr_enc(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#endif + ctr32_add(drbg, num_blocks - 1); + } else { + for (size_t i = 0; i < todo; i += AES_BLOCK_SIZE) { + ctr32_add(drbg, 1); + aes256_enc(&out[i], drbg->counter.bytes, &drbg->ks); + } + } + + out += todo; + out_len -= todo; + } + + if (out_len > 0) { + uint8_t block[AES_BLOCK_SIZE]; + ctr32_add(drbg, 1); + aes256_enc(block, drbg->counter.bytes, &drbg->ks); + + memcpy(out, block, out_len); + } + + // Right-padding |additional_data| in step 2.2 is handled implicitly by + // |ctr_drbg_update|, to save a copy. + if (!ctr_drbg_update(drbg, additional_data, additional_data_len)) { + return 0; + } + + drbg->reseed_counter++; + return 1; +} + +void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) { + secure_clean((uint8_t *)drbg, sizeof(CTR_DRBG_STATE)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.h new file mode 100644 index 0000000000..2d1b1f3f0c --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/*************************************************************************** +* Small modification by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* include: +* 1) Use memcpy/memset instead of OPENSSL_memcpy/memset +* 2) Include aes.h as the underlying aes code +* 3) Modifying the drbg structure +* ***************************************************************************/ + +#pragma once + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "aes_ni.h" + +// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP +// 800-90Ar1. +typedef struct { + aes256_ks_t ks; + union { + uint8_t bytes[16]; + uint32_t words[4]; + } counter; + uint64_t reseed_counter; +} CTR_DRBG_STATE; + +// See SP 800-90Ar1, table 3. +#define CTR_DRBG_ENTROPY_LEN 48 + +// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of +// entropy in |entropy| and, optionally, a personalization string up to +// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero +// on error. +int CTR_DRBG_init(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *personalization, + size_t personalization_len); + +// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy +// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of +// additional data. It returns one on success or zero on error. +int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *additional_data, + size_t additional_data_len); + +// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional +// data (if any) and then writes |out_len| random bytes to |out|. It returns one on success or +// zero on error. +int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, + size_t out_len, + const uint8_t *additional_data, + size_t additional_data_len); + +// CTR_DRBG_clear zeroises the state of |drbg|. +void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); + + +#if defined(__cplusplus) +} // extern C +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/defs.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/defs.h new file mode 100644 index 0000000000..09bb8b5eba --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/defs.h @@ -0,0 +1,63 @@ +/*************************************************************************** +* Written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#pragma once + +#include + +#ifdef __cplusplus + #define EXTERNC extern "C" +#else + #define EXTERNC +#endif + +// For code clarity. +#define IN +#define OUT + +#define ALIGN(n) __attribute__((aligned(n))) +#define _INLINE_ static inline + +typedef enum +{ + SUCCESS=0, + ERROR=1 +} status_t; + +#define SUCCESS 0 +#define ERROR 1 +#define GUARD(func) {if(SUCCESS != func) {return ERROR;}} + +#if defined(__GNUC__) && __GNUC__ >= 2 +static inline uint32_t CRYPTO_bswap4(uint32_t x) { + return __builtin_bswap32(x); +} +#endif + +_INLINE_ void secure_clean(OUT uint8_t *p, IN const uint32_t len) +{ +#ifdef _WIN32 + SecureZeroMemory(p, len); +#else + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(p, 0, len); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c new file mode 100644 index 0000000000..171473d481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c @@ -0,0 +1,1172 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +_fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + + // var declaration + int ret; + ibz_t two_pow, tmp; + quat_alg_elem_t theta; + + ec_curve_t E0; + copy_curve(&E0, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].curve); + ec_curve_normalize_A24(&E0); + + unsigned length; + + int u_bitsize = ibz_bitsize(u); + + // deciding the power of 2 of the dim2 isogeny we use for this + // the smaller the faster, but if it set too low there is a risk that + // RepresentInteger will fail + if (!small) { + // in that case, we just set it to be the biggest value possible + length = TORSION_EVEN_POWER - HD_extra_torsion; + } else { + length = ibz_bitsize(&QUATALG_PINFTY.p) + QUAT_repres_bound_input - u_bitsize; + assert(u_bitsize < (int)length); + assert(length < TORSION_EVEN_POWER - HD_extra_torsion); + } + assert(length); + + // var init + ibz_init(&two_pow); + ibz_init(&tmp); + quat_alg_elem_init(&theta); + + ibz_pow(&two_pow, &ibz_const_two, length); + ibz_copy(&tmp, u); + assert(ibz_cmp(&two_pow, &tmp) > 0); + assert(!ibz_is_even(&tmp)); + + // computing the endomorphism theta of norm u * (2^(length) - u) + ibz_sub(&tmp, &two_pow, &tmp); + ibz_mul(&tmp, &tmp, u); + assert(!ibz_is_even(&tmp)); + + // setting-up the quat_represent_integer_params + quat_represent_integer_params_t ri_params; + ri_params.primality_test_iterations = QUAT_represent_integer_params.primality_test_iterations; + + quat_p_extremal_maximal_order_t order_hnf; + quat_alg_elem_init(&order_hnf.z); + quat_alg_elem_copy(&order_hnf.z, &EXTREMAL_ORDERS[index_alternate_order].z); + quat_alg_elem_init(&order_hnf.t); + quat_alg_elem_copy(&order_hnf.t, &EXTREMAL_ORDERS[index_alternate_order].t); + quat_lattice_init(&order_hnf.order); + ibz_copy(&order_hnf.order.denom, &EXTREMAL_ORDERS[index_alternate_order].order.denom); + ibz_mat_4x4_copy(&order_hnf.order.basis, &EXTREMAL_ORDERS[index_alternate_order].order.basis); + order_hnf.q = EXTREMAL_ORDERS[index_alternate_order].q; + ri_params.order = &order_hnf; + ri_params.algebra = &QUATALG_PINFTY; + +#ifndef NDEBUG + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->z)); + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->t)); +#endif + + ret = quat_represent_integer(&theta, &tmp, 1, &ri_params); + + assert(!ibz_is_even(&tmp)); + + if (!ret) { + printf("represent integer failed for the alternate order number %d and for " + "a target of " + "size %d for a u of size %d with length = " + "%u \n", + index_alternate_order, + ibz_bitsize(&tmp), + ibz_bitsize(u), + length); + goto cleanup; + } + quat_lideal_create(lideal, &theta, u, &order_hnf.order, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&order_hnf.z); + quat_alg_elem_finalize(&order_hnf.t); + quat_lattice_finalize(&order_hnf.order); + +#ifndef NDEBUG + ibz_t test_norm, test_denom; + ibz_init(&test_denom); + ibz_init(&test_norm); + quat_alg_norm(&test_norm, &test_denom, &theta, &QUATALG_PINFTY); + assert(ibz_is_one(&test_denom)); + assert(ibz_cmp(&test_norm, &tmp) == 0); + assert(!ibz_is_even(&tmp)); + assert(quat_lattice_contains(NULL, &EXTREMAL_ORDERS[index_alternate_order].order, &theta)); + ibz_finalize(&test_norm); + ibz_finalize(&test_denom); +#endif + + ec_basis_t B0_two; + // copying the basis + copy_basis(&B0_two, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].basis_even); + assert(test_basis_order_twof(&B0_two, &E0, TORSION_EVEN_POWER)); + ec_dbl_iter_basis(&B0_two, TORSION_EVEN_POWER - length - HD_extra_torsion, &B0_two, &E0); + + assert(test_basis_order_twof(&B0_two, &E0, length + HD_extra_torsion)); + + // now we set-up the kernel + theta_couple_point_t T1; + theta_couple_point_t T2, T1m2; + + copy_point(&T1.P1, &B0_two.P); + copy_point(&T2.P1, &B0_two.Q); + copy_point(&T1m2.P1, &B0_two.PmQ); + + // multiplication of theta by (u)^-1 mod 2^(length+2) + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_copy(&tmp, u); + ibz_invmod(&tmp, &tmp, &two_pow); + assert(!ibz_is_even(&tmp)); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta to the basis + ec_basis_t B0_two_theta; + copy_basis(&B0_two_theta, &B0_two); + endomorphism_application_even_basis(&B0_two_theta, index_alternate_order, &E0, &theta, length + HD_extra_torsion); + + // Ensure the basis we're using has the expected order + assert(test_basis_order_twof(&B0_two_theta, &E0, length + HD_extra_torsion)); + + // Set-up the domain E0 x E0 + theta_couple_curve_t E00; + E00.E1 = E0; + E00.E2 = E0; + + // Set-up the kernel from the bases + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &B0_two, &B0_two_theta); + + ret = theta_chain_compute_and_eval(length, &E00, &dim_two_ker, true, E34, P12, numP); + if (!ret) + goto cleanup; + + assert(length); + ret = (int)length; + +cleanup: + // var finalize + ibz_finalize(&two_pow); + ibz_finalize(&tmp); + quat_alg_elem_finalize(&theta); + + return ret; +} + +int +fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + return _fixed_degree_isogeny_impl(lideal, u, small, E34, P12, numP, index_alternate_order); +} + +// takes the output of LLL and apply some small treatment on the basis +// reordering vectors and switching some signs if needed to make it in a nicer +// shape +static void +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +{ + // if the left order is the special one, then we apply some additional post + // treatment + if (is_special_order) { + // reordering the basis if needed + if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + } + ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); + ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); + ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); + ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + // in this case it seems that we need to swap the second and third + // element, and then recompute entirely the second element from the first + // first we swap the second and third element + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } + + // adjusting the sign if needed + if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); + ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); + ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + } + } + if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); + ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); + ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + } + // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + } + } +} + +// enumerate all vectors in an hypercube of norm m for the infinity norm +// with respect to a basis whose gram matrix is given by gram +// Returns an int `count`, the number of vectors found with the desired +// properties +static int +enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t *gram, const ibz_t *adjusted_norm) +{ + + ibz_t remain, norm; + ibz_vec_4_t point; + + ibz_init(&remain); + ibz_init(&norm); + ibz_vec_4_init(&point); + + assert(m > 0); + + int count = 0; + int dim = 2 * m + 1; + int dim2 = dim * dim; + int dim3 = dim2 * dim; + + // if the basis is of the form alpha, i*alpha, beta, i*beta + // we can remove some values due to symmetry of the basis that + bool need_remove_symmetry = + (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + + int check1, check2, check3; + + // Enumerate over points in a hypercube with coordinates (x, y, z, w) + for (int x = -m; x <= 0; x++) { // We only check non-positive x-values + for (int y = -m; y < m + 1; y++) { + // Once x = 0 we only consider non-positive y values + if (x == 0 && y > 0) { + break; + } + for (int z = -m; z < m + 1; z++) { + // If x and y are both zero, we only consider non-positive z values + if (x == 0 && y == 0 && z > 0) { + break; + } + for (int w = -m; w < m + 1; w++) { + // If x, y, z are all zero, we only consider negative w values + if (x == 0 && y == 0 && z == 0 && w >= 0) { + break; + } + + // Now for each candidate (x, y, z, w) we need to check a number of + // conditions We have already filtered for symmetry with several break + // statements, but there are more checks. + + // 1. We do not allow all (x, y, z, w) to be multiples of 2 + // 2. We do not allow all (x, y, z, w) to be multiples of 3 + // 3. We do not want elements of the same norm, so we quotient out the + // action + // of a group of order four generated by i for a basis expected to + // be of the form: [gamma, i gamma, beta, i beta ]. + + // Ensure that not all values are even + if (!((x | y | z | w) & 1)) { + continue; + } + // Ensure that not all values are multiples of three + if (x % 3 == 0 && y % 3 == 0 && z % 3 == 0 && w % 3 == 0) { + continue; + } + + check1 = (m + w) + dim * (m + z) + dim2 * (m + y) + dim3 * (m + x); + check2 = (m - z) + dim * (m + w) + dim2 * (m - x) + dim3 * (m + y); + check3 = (m + z) + dim * (m - w) + dim2 * (m + x) + dim3 * (m - y); + + // either the basis does not have symmetry and we are good, + // or there is a special symmetry that we can exploit + // and we ensure that we don't record the same norm in the list + if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { + // Set the point as a vector (x, y, z, w) + ibz_set(&point[0], x); + ibz_set(&point[1], y); + ibz_set(&point[2], z); + ibz_set(&point[3], w); + + // Evaluate this through the gram matrix and divide out by the + // adjusted_norm + quat_qf_eval(&norm, gram, &point); + ibz_div(&norm, &remain, &norm, adjusted_norm); + assert(ibz_is_zero(&remain)); + + if (ibz_mod_ui(&norm, 2) == 1) { + ibz_set(&vecs[count][0], x); + ibz_set(&vecs[count][1], y); + ibz_set(&vecs[count][2], z); + ibz_set(&vecs[count][3], w); + ibz_copy(&norms[count], &norm); + count++; + } + } + } + } + } + } + + ibz_finalize(&remain); + ibz_finalize(&norm); + ibz_vec_4_finalize(&point); + + return count - 1; +} + +// enumerate through the two list given in input to find to integer d1,d2 such +// that there exists u,v with u d1 + v d2 = target the bool is diagonal +// indicates if the two lists are the same +static int +find_uv_from_lists(ibz_t *au, + ibz_t *bu, + ibz_t *av, + ibz_t *bv, + ibz_t *u, + ibz_t *v, + int *index_sol1, + int *index_sol2, + const ibz_t *target, + const ibz_t *small_norms1, + const ibz_t *small_norms2, + const ibz_t *quotients, + const int index1, + const int index2, + const int is_diagonal, + const int number_sum_square) +{ + + ibz_t n, remain, adjusted_norm; + ibz_init(&n); + ibz_init(&remain); + ibz_init(&adjusted_norm); + + int found = 0; + int cmp; + ibz_copy(&n, target); + + // enumerating through the list + for (int i1 = 0; i1 < index1; i1++) { + ibz_mod(&adjusted_norm, &n, &small_norms1[i1]); + int starting_index2; + if (is_diagonal) { + starting_index2 = i1; + } else { + starting_index2 = 0; + } + for (int i2 = starting_index2; i2 < index2; i2++) { + // u = target / d1 mod d2 + if (!ibz_invmod(&remain, &small_norms2[i2], &small_norms1[i1])) { + continue; + } + ibz_mul(v, &remain, &adjusted_norm); + ibz_mod(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + while (!found && cmp < 0) { + if (number_sum_square > 0) { + found = ibz_cornacchia_prime(av, bv, &ibz_const_one, v); + } else if (number_sum_square == 0) { + found = 1; + } + if (found) { + ibz_mul(&remain, v, &small_norms2[i2]); + ibz_copy(au, &n); + ibz_sub(u, au, &remain); + assert(ibz_cmp(u, &ibz_const_zero) > 0); + ibz_div(u, &remain, u, &small_norms1[i1]); + assert(ibz_is_zero(&remain)); + // we want to remove weird cases where u,v have big power of two + found = found && (ibz_get(u) != 0 && ibz_get(v) != 0); + if (number_sum_square == 2) { + found = ibz_cornacchia_prime(au, bu, &ibz_const_one, u); + } + } + if (!found) { + ibz_add(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + } + } + + if (found) { + // copying the indices + *index_sol1 = i1; + *index_sol2 = i2; + break; + } + } + if (found) { + break; + } + } + + ibz_finalize(&n); + ibz_finalize(&remain); + ibz_finalize(&adjusted_norm); + + return found; +} + +struct vec_and_norm +{ + ibz_vec_4_t vec; + ibz_t norm; + int idx; +}; + +static int +compare_vec_by_norm(const void *_first, const void *_second) +{ + const struct vec_and_norm *first = _first, *second = _second; + int res = ibz_cmp(&first->norm, &second->norm); + if (res != 0) + return res; + else + return first->idx - second->idx; +} + +// use several special curves +// we assume that the first one is always j=1728 +int +find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order) + +{ + + // variable declaration & init + ibz_vec_4_t vec; + ibz_t n; + ibz_t au, bu, av, bv; + ibz_t norm_d; + ibz_t remain; + ibz_init(&au); + ibz_init(&bu); + ibz_init(&av); + ibz_init(&bv); + ibz_init(&norm_d); + ibz_init(&n); + ibz_vec_4_init(&vec); + ibz_init(&remain); + + ibz_copy(&n, target); + + ibz_t adjusted_norm[num_alternate_order + 1]; + ibz_mat_4x4_t gram[num_alternate_order + 1], reduced[num_alternate_order + 1]; + quat_left_ideal_t ideal[num_alternate_order + 1]; + + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_init(&adjusted_norm[i]); + ibz_mat_4x4_init(&gram[i]); + ibz_mat_4x4_init(&reduced[i]); + quat_left_ideal_init(&ideal[i]); + } + + // first we reduce the ideal given in input + quat_lideal_copy(&ideal[0], lideal); + quat_lideal_reduce_basis(&reduced[0], &gram[0], &ideal[0], Bpoo); + + ibz_mat_4x4_copy(&ideal[0].lattice.basis, &reduced[0]); + ibz_set(&adjusted_norm[0], 1); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + + // for efficient lattice reduction, we replace ideal[0] by the equivalent + // ideal of smallest norm + quat_left_ideal_t reduced_id; + quat_left_ideal_init(&reduced_id); + quat_lideal_copy(&reduced_id, &ideal[0]); + quat_alg_elem_t delta; + // delta will be the element of smallest norm + quat_alg_elem_init(&delta); + ibz_set(&delta.coord[0], 1); + ibz_set(&delta.coord[1], 0); + ibz_set(&delta.coord[2], 0); + ibz_set(&delta.coord[3], 0); + ibz_copy(&delta.denom, &reduced_id.lattice.denom); + ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); + assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); + + // reduced_id = ideal[0] * \overline{delta}/n(ideal[0]) + quat_alg_conj(&delta, &delta); + ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); + quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); + ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + + // and conj_ideal is the conjugate of reduced_id + // init the right order; + quat_lattice_t right_order; + quat_lattice_init(&right_order); + // computing the conjugate + quat_left_ideal_t conj_ideal; + quat_left_ideal_init(&conj_ideal); + quat_lideal_conjugate_without_hnf(&conj_ideal, &right_order, &reduced_id, Bpoo); + + // computing all the other connecting ideals and reducing them + for (int i = 1; i < num_alternate_order + 1; i++) { + quat_lideal_lideal_mul_reduced(&ideal[i], &gram[i], &conj_ideal, &ALTERNATE_CONNECTING_IDEALS[i - 1], Bpoo); + ibz_mat_4x4_copy(&reduced[i], &ideal[i].lattice.basis); + ibz_set(&adjusted_norm[i], 1); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + } + + // enumerating small vectors + + // global parameters for the enumeration + int m = FINDUV_box_size; + int m4 = FINDUV_cube_size; + + ibz_vec_4_t small_vecs[num_alternate_order + 1][m4]; + ibz_t small_norms[num_alternate_order + 1][m4]; + ibz_vec_4_t alternate_small_vecs[num_alternate_order + 1][m4]; + ibz_t alternate_small_norms[num_alternate_order + 1][m4]; + ibz_t quotients[num_alternate_order + 1][m4]; + int indices[num_alternate_order + 1]; + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_init(&small_norms[j][i]); + ibz_vec_4_init(&small_vecs[j][i]); + ibz_init(&alternate_small_norms[j][i]); + ibz_init("ients[j][i]); + ibz_vec_4_init(&alternate_small_vecs[j][i]); + } + // enumeration in the hypercube of norm m + indices[j] = enumerate_hypercube(small_vecs[j], small_norms[j], m, &gram[j], &adjusted_norm[j]); + + // sorting the list + { + struct vec_and_norm small_vecs_and_norms[indices[j]]; + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs_and_norms[i].vec, &small_vecs[j][i], sizeof(ibz_vec_4_t)); + memcpy(&small_vecs_and_norms[i].norm, &small_norms[j][i], sizeof(ibz_t)); + small_vecs_and_norms[i].idx = i; + } + qsort(small_vecs_and_norms, indices[j], sizeof(*small_vecs_and_norms), compare_vec_by_norm); + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs[j][i], &small_vecs_and_norms[i].vec, sizeof(ibz_vec_4_t)); + memcpy(&small_norms[j][i], &small_vecs_and_norms[i].norm, sizeof(ibz_t)); + } +#ifndef NDEBUG + for (int i = 1; i < indices[j]; ++i) + assert(ibz_cmp(&small_norms[j][i - 1], &small_norms[j][i]) <= 0); +#endif + } + + for (int i = 0; i < indices[j]; i++) { + ibz_div("ients[j][i], &remain, &n, &small_norms[j][i]); + } + } + + int found = 0; + int i1; + int i2; + for (int j1 = 0; j1 < num_alternate_order + 1; j1++) { + for (int j2 = j1; j2 < num_alternate_order + 1; j2++) { + // in this case, there are some small adjustements to make + int is_diago = (j1 == j2); + found = find_uv_from_lists(&au, + &bu, + &av, + &bv, + u, + v, + &i1, + &i2, + target, + small_norms[j1], + small_norms[j2], + quotients[j2], + indices[j1], + indices[j2], + is_diago, + 0); + // } + + if (found) { + // recording the solutions that we found + ibz_copy(&beta1->denom, &ideal[j1].lattice.denom); + ibz_copy(&beta2->denom, &ideal[j2].lattice.denom); + ibz_copy(d1, &small_norms[j1][i1]); + ibz_copy(d2, &small_norms[j2][i2]); + ibz_mat_4x4_eval(&beta1->coord, &reduced[j1], &small_vecs[j1][i1]); + ibz_mat_4x4_eval(&beta2->coord, &reduced[j2], &small_vecs[j2][i2]); + assert(quat_lattice_contains(NULL, &ideal[j1].lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal[j2].lattice, beta2)); + if (j1 != 0 || j2 != 0) { + ibz_div(&delta.denom, &remain, &delta.denom, &lideal->norm); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + ibz_mul(&delta.denom, &delta.denom, &conj_ideal.norm); + } + if (j1 != 0) { + // we send back beta1 to the original ideal + quat_alg_mul(beta1, &delta, beta1, Bpoo); + quat_alg_normalize(beta1); + } + if (j2 != 0) { + // we send back beta2 to the original ideal + quat_alg_mul(beta2, &delta, beta2, Bpoo); + quat_alg_normalize(beta2); + } + + // if the selected element belong to an alternate order, we conjugate it + if (j1 != 0) { + quat_alg_conj(beta1, beta1); + } + if (j2 != 0) { + quat_alg_conj(beta2, beta2); + } + +#ifndef NDEBUG + quat_alg_norm(&remain, &norm_d, beta1, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d1, &ideal->norm); + if (j1 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j1 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + quat_alg_norm(&remain, &norm_d, beta2, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d2, &ideal->norm); + if (j2 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j2 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta2)); + + quat_left_ideal_t ideal_test; + quat_lattice_t ro; + quat_left_ideal_init(&ideal_test); + quat_lattice_init(&ro); + if (j1 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j1 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta1)); + } + if (j2 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j2 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta2)); + } + + quat_lattice_finalize(&ro); + quat_left_ideal_finalize(&ideal_test); +#endif + + *index_alternate_order_1 = j1; + *index_alternate_order_2 = j2; + break; + } + } + if (found) { + break; + } + } + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_finalize(&small_norms[j][i]); + ibz_vec_4_finalize(&small_vecs[j][i]); + ibz_finalize(&alternate_small_norms[j][i]); + ibz_finalize("ients[j][i]); + ibz_vec_4_finalize(&alternate_small_vecs[j][i]); + } + } + + // var finalize + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_mat_4x4_finalize(&gram[i]); + ibz_mat_4x4_finalize(&reduced[i]); + quat_left_ideal_finalize(&ideal[i]); + ibz_finalize(&adjusted_norm[i]); + } + + ibz_finalize(&n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&au); + ibz_finalize(&bu); + ibz_finalize(&av); + ibz_finalize(&bv); + ibz_finalize(&remain); + ibz_finalize(&norm_d); + quat_lattice_finalize(&right_order); + quat_left_ideal_finalize(&conj_ideal); + quat_left_ideal_finalize(&reduced_id); + quat_alg_elem_finalize(&delta); + + return found; +} + +int +dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo) +{ + ibz_t target, tmp, two_pow; + ; + quat_alg_elem_t theta; + + ibz_t norm_d; + ibz_init(&norm_d); + ibz_t test1, test2; + ibz_init(&test1); + ibz_init(&test2); + + ibz_init(&target); + ibz_init(&tmp); + ibz_init(&two_pow); + int exp = TORSION_EVEN_POWER; + quat_alg_elem_init(&theta); + + // first, we find u,v,d1,d2,beta1,beta2 + // such that u*d1 + v*d2 = 2^TORSION_EVEN_POWER and there are ideals of + // norm d1,d2 equivalent to ideal beta1 and beta2 are elements of norm nd1, + // nd2 where n=n(lideal) + int ret; + int index_order1 = 0, index_order2 = 0; +#ifndef NDEBUG + unsigned int Fu_length, Fv_length; +#endif + ret = find_uv(u, + v, + beta1, + beta2, + d1, + d2, + &index_order1, + &index_order2, + &TORSION_PLUS_2POWER, + lideal, + Bpoo, + NUM_ALTERNATE_EXTREMAL_ORDERS); + if (!ret) { + goto cleanup; + } + + assert(ibz_is_odd(d1) && ibz_is_odd(d2)); + // compute the valuation of the GCD of u,v + ibz_gcd(&tmp, u, v); + assert(ibz_cmp(&tmp, &ibz_const_zero) != 0); + int exp_gcd = ibz_two_adic(&tmp); + exp = TORSION_EVEN_POWER - exp_gcd; + // removing the power of 2 from u and v + ibz_div(u, &test1, u, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + ibz_div(v, &test1, v, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + +#ifndef NDEBUG + // checking that ud1+vd2 = 2^exp + ibz_t pow_check, tmp_check; + ibz_init(&pow_check); + ibz_init(&tmp_check); + ibz_pow(&pow_check, &ibz_const_two, exp); + ibz_mul(&tmp_check, d1, u); + ibz_sub(&pow_check, &pow_check, &tmp_check); + ibz_mul(&tmp_check, v, d2); + ibz_sub(&pow_check, &pow_check, &tmp_check); + assert(ibz_cmp(&pow_check, &ibz_const_zero) == 0); + ibz_finalize(&tmp_check); + ibz_finalize(&pow_check); +#endif + + // now we compute the dimension 2 isogeny + // F : Eu x Ev -> E x E' + // where we have phi_u : Eu -> E_index_order1 and phi_v : Ev -> E_index_order2 + // if we have phi1 : E_index_order_1 -> E of degree d1 + // and phi2 : E_index_order_2 -> E of degree d2 + // we can define theta = phi2 o hat{phi1} + // and the kernel of F is given by + // ( [ud1](P), phiv o theta o hat{phiu} (P)),( [ud1](Q), phiv o theta o + // hat{phiu} (Q)) where P,Q is a basis of E0[2e] + + // now we set-up the kernel + // ec_curve_t E0 = CURVE_E0; + ec_curve_t E1; + copy_curve(&E1, &CURVES_WITH_ENDOMORPHISMS[index_order1].curve); + ec_curve_t E2; + copy_curve(&E2, &CURVES_WITH_ENDOMORPHISMS[index_order2].curve); + ec_basis_t bas1, bas2; + theta_couple_curve_t E01; + theta_kernel_couple_points_t ker; + + ec_basis_t bas_u; + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + + // we start by computing theta = beta2 \hat{beta1}/n + ibz_set(&theta.denom, 1); + quat_alg_conj(&theta, beta1); + quat_alg_mul(&theta, beta2, &theta, &QUATALG_PINFTY); + ibz_mul(&theta.denom, &theta.denom, &lideal->norm); + + // now we perform the actual computation + quat_left_ideal_t idealu, idealv; + quat_left_ideal_init(&idealu); + quat_left_ideal_init(&idealv); + theta_couple_curve_t Fu_codomain, Fv_codomain; + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const V1 = pushed_points + 0, *const V2 = pushed_points + 1, *const V1m2 = pushed_points + 2; + theta_couple_point_t P, Q, PmQ; + + copy_point(&P.P1, &bas1.P); + copy_point(&PmQ.P1, &bas1.PmQ); + copy_point(&Q.P1, &bas1.Q); + // Set points to zero + ec_point_init(&P.P2); + ec_point_init(&Q.P2); + ec_point_init(&PmQ.P2); + + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + // we perform the computation of phiu with a fixed degree isogeny + ret = fixed_degree_isogeny_and_eval( + &idealu, u, true, &Fu_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order1); + + if (!ret) { + goto cleanup; + } + assert(test_point_order_twof(&V1->P1, &Fu_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fu_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fu_length = (unsigned int)ret; + // presumably the correct curve is the first one, we check this + fp2_t w0a, w1a, w2a; + ec_curve_t E1_tmp, Fu_codomain_E1_tmp, Fu_codomain_E2_tmp; + copy_curve(&E1_tmp, &E1); + copy_curve(&Fu_codomain_E1_tmp, &Fu_codomain.E1); + copy_curve(&Fu_codomain_E2_tmp, &Fu_codomain.E2); + weil(&w0a, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fu_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fu_codomain_E2_tmp); + ibz_pow(&two_pow, &ibz_const_two, Fu_length); + ibz_sub(&two_pow, &two_pow, u); + + // now we are checking that the weil pairings are equal to the correct value + digit_t digit_u[NWORDS_ORDER] = { 0 }; + ibz_to_digit_array(digit_u, u); + fp2_t test_powa; + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); +#endif + + // copying the basis images + copy_point(&bas_u.P, &V1->P1); + copy_point(&bas_u.Q, &V2->P1); + copy_point(&bas_u.PmQ, &V1m2->P1); + + // copying the points to the first part of the kernel + copy_point(&ker.T1.P1, &bas_u.P); + copy_point(&ker.T2.P1, &bas_u.Q); + copy_point(&ker.T1m2.P1, &bas_u.PmQ); + copy_curve(&E01.E1, &Fu_codomain.E1); + + copy_point(&P.P1, &bas2.P); + copy_point(&PmQ.P1, &bas2.PmQ); + copy_point(&Q.P1, &bas2.Q); + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + + // computation of phiv + ret = fixed_degree_isogeny_and_eval( + &idealv, v, true, &Fv_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order2); + if (!ret) { + goto cleanup; + } + + assert(test_point_order_twof(&V1->P1, &Fv_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fv_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fv_length = (unsigned int)ret; + ec_curve_t E2_tmp, Fv_codomain_E1_tmp, Fv_codomain_E2_tmp; + copy_curve(&E2_tmp, &E2); + copy_curve(&Fv_codomain_E1_tmp, &Fv_codomain.E1); + copy_curve(&Fv_codomain_E2_tmp, &Fv_codomain.E2); + // presumably the correct curve is the first one, we check this + weil(&w0a, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fv_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fv_codomain_E2_tmp); + if (Fv_length == 0) { + ibz_set(&tmp, 1); + ibz_set(&two_pow, 1); + } else { + ibz_pow(&two_pow, &ibz_const_two, Fv_length); + ibz_sub(&two_pow, &two_pow, v); + } + + // now we are checking that one of the two is equal to the correct value + ibz_to_digit_array(digit_u, v); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); + +#endif + + copy_point(&bas2.P, &V1->P1); + copy_point(&bas2.Q, &V2->P1); + copy_point(&bas2.PmQ, &V1m2->P1); + + // multiplying theta by 1 / (d1 * n(connecting_ideal2)) + ibz_pow(&two_pow, &ibz_const_two, TORSION_EVEN_POWER); + ibz_copy(&tmp, d1); + if (index_order2 > 0) { + ibz_mul(&tmp, &tmp, &ALTERNATE_CONNECTING_IDEALS[index_order2 - 1].norm); + } + ibz_invmod(&tmp, &tmp, &two_pow); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta + endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); + + assert(test_basis_order_twof(&bas2, &Fv_codomain.E1, TORSION_EVEN_POWER)); + + // copying points to the second part of the kernel + copy_point(&ker.T1.P2, &bas2.P); + copy_point(&ker.T2.P2, &bas2.Q); + copy_point(&ker.T1m2.P2, &bas2.PmQ); + copy_curve(&E01.E2, &Fv_codomain.E1); + + // copying the points to the first part of the kernel + quat_left_ideal_finalize(&idealu); + quat_left_ideal_finalize(&idealv); + + double_couple_point_iter(&ker.T1, TORSION_EVEN_POWER - exp, &ker.T1, &E01); + double_couple_point_iter(&ker.T2, TORSION_EVEN_POWER - exp, &ker.T2, &E01); + double_couple_point_iter(&ker.T1m2, TORSION_EVEN_POWER - exp, &ker.T1m2, &E01); + + assert(test_point_order_twof(&ker.T1.P1, &E01.E1, exp)); + assert(test_point_order_twof(&ker.T1m2.P2, &E01.E2, exp)); + + assert(ibz_is_odd(u)); + + // now we evaluate the basis points through the isogeny + assert(test_basis_order_twof(&bas_u, &E01.E1, TORSION_EVEN_POWER)); + + // evaluating the basis through the isogeny of degree u*d1 + copy_point(&pushed_points[0].P1, &bas_u.P); + copy_point(&pushed_points[2].P1, &bas_u.PmQ); + copy_point(&pushed_points[1].P1, &bas_u.Q); + // Set points to zero + ec_point_init(&pushed_points[0].P2); + ec_point_init(&pushed_points[1].P2); + ec_point_init(&pushed_points[2].P2); + + theta_couple_curve_t theta_codomain; + + ret = theta_chain_compute_and_eval_randomized( + exp, &E01, &ker, false, &theta_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points)); + if (!ret) { + goto cleanup; + } + + theta_couple_point_t T1, T2, T1m2; + T1 = pushed_points[0]; + T2 = pushed_points[1]; + T1m2 = pushed_points[2]; + + assert(test_point_order_twof(&T1.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1.P1, &theta_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1m2.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + + copy_point(&basis->P, &T1.P1); + copy_point(&basis->Q, &T2.P1); + copy_point(&basis->PmQ, &T1m2.P1); + copy_curve(codomain, &theta_codomain.E1); + + // using weil pairing to verify that we selected the correct curve + fp2_t w0, w1; + // ec_curve_t E0 = CURVE_E0; + // ec_basis_t bas0 = BASIS_EVEN; + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, codomain); + + digit_t digit_d[NWORDS_ORDER] = { 0 }; + ibz_mul(&tmp, d1, u); + ibz_mul(&tmp, &tmp, u); + ibz_mod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_to_digit_array(digit_d, &tmp); + fp2_t test_pow; + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + + // then we have selected the wrong one + if (!fp2_is_equal(&w1, &test_pow)) { + copy_point(&basis->P, &T1.P2); + copy_point(&basis->Q, &T2.P2); + copy_point(&basis->PmQ, &T1m2.P2); + copy_curve(codomain, &theta_codomain.E2); + +// verifying that the other one is the good one +#ifndef NDEBUG + ec_curve_t codomain_tmp; + copy_curve(&codomain_tmp, codomain); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1)); +#endif + } + + // now we apply M / (u * d1) where M is the matrix corresponding to the + // endomorphism beta1 = phi o dual(phi1) we multiply beta1 by the inverse of + // (u*d1) mod 2^TORSION_EVEN_POWER + ibz_mul(&tmp, u, d1); + if (index_order1 != 0) { + ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); + } + ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); + ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); + ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); + ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + + endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + ec_curve_t E0 = CURVE_E0; + ec_curve_t codomain_tmp; + ec_basis_t bas0 = CURVES_WITH_ENDOMORPHISMS[0].basis_even; + copy_curve(&codomain_tmp, codomain); + copy_curve(&E1_tmp, &E1); + copy_curve(&E2_tmp, &E2); + weil(&w0a, TORSION_EVEN_POWER, &bas0.P, &bas0.Q, &bas0.PmQ, &E0); + weil(&w1a, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + digit_t tmp_d[2 * NWORDS_ORDER] = { 0 }; + if (index_order1 != 0) { + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order1].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + if (index_order2 != 0) { + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order2].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + ibz_to_digit_array(tmp_d, &lideal->norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1a)); + } +#endif + +cleanup: + ibz_finalize(&norm_d); + ibz_finalize(&test1); + ibz_finalize(&test2); + ibz_finalize(&target); + ibz_finalize(&tmp); + ibz_finalize(&two_pow); + quat_alg_elem_finalize(&theta); + return ret; +} + +int +dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal) +{ + int ret; + + quat_alg_elem_t beta1, beta2; + ibz_t u, v, d1, d2; + + quat_alg_elem_init(&beta1); + quat_alg_elem_init(&beta2); + + ibz_init(&u); + ibz_init(&v); + ibz_init(&d1); + ibz_init(&d2); + + ret = dim2id2iso_ideal_to_isogeny_clapotis( + &beta1, &beta2, &u, &v, &d1, &d2, codomain, basis, lideal, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&beta1); + quat_alg_elem_finalize(&beta2); + + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&d1); + ibz_finalize(&d2); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.c new file mode 100644 index 0000000000..1b12a8380f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.c @@ -0,0 +1,55 @@ +#include +const fp2_t BASIS_E0_PX = { +#if 0 +#elif RADIX == 16 +{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12} +#elif RADIX == 32 +{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1} +#else +{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e} +#elif RADIX == 32 +{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164} +#else +{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418} +#endif +#endif +}; +const fp2_t BASIS_E0_QX = { +#if 0 +#elif RADIX == 16 +{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd} +#elif RADIX == 32 +{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28} +#else +{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a} +#elif RADIX == 32 +{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9} +#else +{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d} +#endif +#endif +}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.h new file mode 100644 index 0000000000..05cafb8462 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.h @@ -0,0 +1,3 @@ +#include +extern const fp2_t BASIS_E0_PX; +extern const fp2_t BASIS_E0_QX; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.c new file mode 100644 index 0000000000..be4e4e55b1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.c @@ -0,0 +1,665 @@ +#include +#include +#include +#include + +void +ec_point_init(ec_point_t *P) +{ // Initialize point as identity element (1:0) + fp2_set_one(&(P->x)); + fp2_set_zero(&(P->z)); +} + +void +ec_curve_init(ec_curve_t *E) +{ // Initialize the curve struct + // Initialize the constants + fp2_set_zero(&(E->A)); + fp2_set_one(&(E->C)); + + // Initialize the point (A+2 : 4C) + ec_point_init(&(E->A24)); + + // Set the bool to be false by default + E->is_A24_computed_and_normalized = false; +} + +void +select_point(ec_point_t *Q, const ec_point_t *P1, const ec_point_t *P2, const digit_t option) +{ // Select points in constant time + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +cswap_points(ec_point_t *P, ec_point_t *Q, const digit_t option) +{ // Swap points in constant time + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then P <- Q and Q <- P + fp2_cswap(&(P->x), &(Q->x), option); + fp2_cswap(&(P->z), &(Q->z), option); +} + +void +ec_normalize_point(ec_point_t *P) +{ + fp2_inv(&P->z); + fp2_mul(&P->x, &P->x, &P->z); + fp2_set_one(&(P->z)); +} + +void +ec_normalize_curve(ec_curve_t *E) +{ + fp2_inv(&E->C); + fp2_mul(&E->A, &E->A, &E->C); + fp2_set_one(&E->C); +} + +void +ec_curve_normalize_A24(ec_curve_t *E) +{ + if (!E->is_A24_computed_and_normalized) { + AC_to_A24(&E->A24, E); + ec_normalize_point(&E->A24); + E->is_A24_computed_and_normalized = true; + } + assert(fp2_is_one(&E->A24.z)); +} + +void +ec_normalize_curve_and_A24(ec_curve_t *E) +{ // Neither the curve or A24 are guaranteed to be normalized. + // First we normalize (A/C : 1) and conditionally compute + if (!fp2_is_one(&E->C)) { + ec_normalize_curve(E); + } + + if (!E->is_A24_computed_and_normalized) { + // Now compute A24 = ((A + 2) / 4 : 1) + fp2_add_one(&E->A24.x, &E->A); // re(A24.x) = re(A) + 1 + fp2_add_one(&E->A24.x, &E->A24.x); // re(A24.x) = re(A) + 2 + fp_copy(&E->A24.x.im, &E->A.im); // im(A24.x) = im(A) + + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 2 + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 4 + fp2_set_one(&E->A24.z); + + E->is_A24_computed_and_normalized = true; + } +} + +uint32_t +ec_is_zero(const ec_point_t *P) +{ + return fp2_is_zero(&P->z); +} + +uint32_t +ec_has_zero_coordinate(const ec_point_t *P) +{ + return fp2_is_zero(&P->x) | fp2_is_zero(&P->z); +} + +uint32_t +ec_is_equal(const ec_point_t *P, const ec_point_t *Q) +{ // Evaluate if two points in Montgomery coordinates (X:Z) are equal + // Returns 0xFFFFFFFF (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1; + + // Check if P, Q are the points at infinity + uint32_t l_zero = ec_is_zero(P); + uint32_t r_zero = ec_is_zero(Q); + + // Check if PX * QZ = QX * PZ + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + uint32_t lr_equal = fp2_is_equal(&t0, &t1); + + // Points are equal if + // - Both are zero, or + // - neither are zero AND PX * QZ = QX * PZ + return (l_zero & r_zero) | (~l_zero & ~r_zero * lr_equal); +} + +uint32_t +ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + if (ec_is_zero(P)) + return 0; + + uint32_t x_is_zero, tmp_is_zero; + fp2_t t0, t1, t2; + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t0, &t1); + fp2_mul(&t2, &t2, &E->A); + fp2_mul(&t1, &t1, &E->C); + fp2_add(&t1, &t1, &t1); + fp2_add(&t0, &t1, &t2); // 4 (CX^2+CZ^2+AXZ) + + x_is_zero = fp2_is_zero(&P->x); + tmp_is_zero = fp2_is_zero(&t0); + + // two torsion if x or x^2 + Ax + 1 is zero + return x_is_zero | tmp_is_zero; +} + +uint32_t +ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + ec_point_t test; + xDBL_A24(&test, P, &E->A24, E->is_A24_computed_and_normalized); + return ec_is_two_torsion(&test, E); +} + +uint32_t +ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E) +{ // Check if basis points (P, Q) form a full 2^t-basis + ec_point_t P2, Q2; + xDBL_A24(&P2, &B->P, &E->A24, E->is_A24_computed_and_normalized); + xDBL_A24(&Q2, &B->Q, &E->A24, E->is_A24_computed_and_normalized); + return (ec_is_two_torsion(&P2, E) & ec_is_two_torsion(&Q2, E) & ~ec_is_equal(&P2, &Q2)); +} + +int +ec_curve_verify_A(const fp2_t *A) +{ // Verify the Montgomery coefficient A is valid (A^2-4 \ne 0) + // Return 1 if curve is valid, 0 otherwise + fp2_t t; + fp2_set_one(&t); + fp_add(&t.re, &t.re, &t.re); // t=2 + if (fp2_is_equal(A, &t)) + return 0; + fp_neg(&t.re, &t.re); // t=-2 + if (fp2_is_equal(A, &t)) + return 0; + return 1; +} + +int +ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A) +{ // Initialize the curve from the A coefficient and check it is valid + // Return 1 if curve is valid, 0 otherwise + ec_curve_init(E); + fp2_copy(&E->A, A); // Set A + return ec_curve_verify_A(A); +} + +void +ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve) +{ // j-invariant computation for Montgommery coefficient A2=(A+2C:4C) + fp2_t t0, t1; + + fp2_sqr(&t1, &curve->C); + fp2_sqr(j_inv, &curve->A); + fp2_add(&t0, &t1, &t1); + fp2_sub(&t0, j_inv, &t0); + fp2_sub(&t0, &t0, &t1); + fp2_sub(j_inv, &t0, &t1); + fp2_sqr(&t1, &t1); + fp2_mul(j_inv, j_inv, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_sqr(&t1, &t0); + fp2_mul(&t0, &t0, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_inv(j_inv); + fp2_mul(j_inv, &t0, j_inv); +} + +void +xDBL_E0(ec_point_t *Q, const ec_point_t *P) +{ // Doubling of a Montgomery point in projective coordinates (X:Z) on the curve E0 with (A:C) = (0:1). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C) = (0:1). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&Q->z, &t1, &t2); + fp2_mul(&Q->z, &Q->z, &t2); +} + +void +xDBL(ec_point_t *Q, const ec_point_t *P, const ec_point_t *AC) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). Computation of coefficient values A+2C and 4C + // on-the-fly. + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t3, &AC->z, &AC->z); + fp2_mul(&t1, &t1, &t3); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&t0, &t3, &AC->x); + fp2_mul(&t0, &t0, &t2); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and + // the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + if (!A24_normalized) + fp2_mul(&t1, &t1, &A24->z); + fp2_mul(&Q->x, &t0, &t1); + fp2_mul(&t0, &t2, &A24->x); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ) +{ // Differential addition of Montgomery points in projective coordinates (X:Z). + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, and difference + // PQ=P-Q=(XPQ:ZPQ). + // Output: projective Montgomery point R <- P+Q = (XR:ZR) such that x(P+Q)=XR/ZR. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&t2, &t2); + fp2_sqr(&t3, &t3); + fp2_mul(&t2, &PQ->z, &t2); + fp2_mul(&R->z, &PQ->x, &t3); + fp2_copy(&R->x, &t2); +} + +void +xDBLADD(ec_point_t *R, + ec_point_t *S, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_point_t *A24, + const bool A24_normalized) +{ // Simultaneous doubling and differential addition. + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, the difference + // PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points R <- 2*P = (XR:ZR) such that x(2P)=XR/ZR, and S <- P+Q = (XS:ZS) such that = + // x(Q+P)=XS/ZS. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&R->x, &t0); + fp2_sub(&t2, &Q->x, &Q->z); + fp2_add(&S->x, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t2); + fp2_sqr(&R->z, &t1); + fp2_mul(&t1, &t1, &S->x); + fp2_sub(&t2, &R->x, &R->z); + if (!A24_normalized) + fp2_mul(&R->z, &R->z, &A24->z); + fp2_mul(&R->x, &R->x, &R->z); + fp2_mul(&S->x, &A24->x, &t2); + fp2_sub(&S->z, &t0, &t1); + fp2_add(&R->z, &R->z, &S->x); + fp2_add(&S->x, &t0, &t1); + fp2_mul(&R->z, &R->z, &t2); + fp2_sqr(&S->z, &S->z); + fp2_sqr(&S->x, &S->x); + fp2_mul(&S->z, &S->z, &PQ->x); + fp2_mul(&S->x, &S->x, &PQ->z); +} + +void +xMUL(ec_point_t *Q, const ec_point_t *P, const digit_t *k, const int kbits, const ec_curve_t *curve) +{ // The Montgomery ladder + // Input: projective Montgomery point P=(XP:ZP) such that xP=XP/ZP, a scalar k of bitlength kbits, and + // the Montgomery curve constants (A:C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points Q <- k*P = (XQ:ZQ) such that x(k*P)=XQ/ZQ. + ec_point_t R0, R1, A24; + digit_t mask; + unsigned int bit, prevbit = 0, swap; + + if (!curve->is_A24_computed_and_normalized) { + // Computation of A24=(A+2C:4C) + fp2_add(&A24.x, &curve->C, &curve->C); + fp2_add(&A24.z, &A24.x, &A24.x); + fp2_add(&A24.x, &A24.x, &curve->A); + } else { + fp2_copy(&A24.x, &curve->A24.x); + fp2_copy(&A24.z, &curve->A24.z); + // Assert A24 has been normalised + assert(fp2_is_one(&A24.z)); + } + + // R0 <- (1:0), R1 <- P + ec_point_init(&R0); + fp2_copy(&R1.x, &P->x); + fp2_copy(&R1.z, &P->z); + + // Main loop + for (int i = kbits - 1; i >= 0; i--) { + bit = (k[i >> LOG2RADIX] >> (i & (RADIX - 1))) & 1; + swap = bit ^ prevbit; + prevbit = bit; + mask = 0 - (digit_t)swap; + + cswap_points(&R0, &R1, mask); + xDBLADD(&R0, &R1, &R0, &R1, P, &A24, true); + } + swap = 0 ^ prevbit; + mask = 0 - (digit_t)swap; + cswap_points(&R0, &R1, mask); + + fp2_copy(&Q->x, &R0.x); + fp2_copy(&Q->z, &R0.z); +} + +int +xDBLMUL(ec_point_t *S, + const ec_point_t *P, + const digit_t *k, + const ec_point_t *Q, + const digit_t *l, + const ec_point_t *PQ, + const int kbits, + const ec_curve_t *curve) +{ // The Montgomery biladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, scalars k and l of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants (A:C). + // Output: projective Montgomery point S <- k*P + l*Q = (XS:ZS) such that x(k*P + l*Q)=XS/ZS. + + int i, A_is_zero; + digit_t evens, mevens, bitk0, bitl0, maskk, maskl, temp, bs1_ip1, bs2_ip1, bs1_i, bs2_i, h; + digit_t sigma[2] = { 0 }, pre_sigma = 0; + digit_t k_t[NWORDS_ORDER], l_t[NWORDS_ORDER], one[NWORDS_ORDER] = { 0 }, r[2 * BITS] = { 0 }; + ec_point_t DIFF1a, DIFF1b, DIFF2a, DIFF2b, R[3] = { 0 }, T[3]; + + // differential additions formulas are invalid in this case + if (ec_has_zero_coordinate(P) | ec_has_zero_coordinate(Q) | ec_has_zero_coordinate(PQ)) + return 0; + + // Derive sigma according to parity + bitk0 = (k[0] & 1); + bitl0 = (l[0] & 1); + maskk = 0 - bitk0; // Parity masks: 0 if even, otherwise 1...1 + maskl = 0 - bitl0; + sigma[0] = (bitk0 ^ 1); + sigma[1] = (bitl0 ^ 1); + evens = sigma[0] + sigma[1]; // Count number of even scalars + mevens = 0 - (evens & 1); // Mask mevens <- 0 if # even of scalars = 0 or 2, otherwise mevens = 1...1 + + // If k and l are both even or both odd, pick sigma = (0,1) + sigma[0] = (sigma[0] & mevens); + sigma[1] = (sigma[1] & mevens) | (1 & ~mevens); + + // Convert even scalars to odd + one[0] = 1; + mp_sub(k_t, k, one, NWORDS_ORDER); + mp_sub(l_t, l, one, NWORDS_ORDER); + select_ct(k_t, k_t, k, maskk, NWORDS_ORDER); + select_ct(l_t, l_t, l, maskl, NWORDS_ORDER); + + // Scalar recoding + for (i = 0; i < kbits; i++) { + // If sigma[0] = 1 swap k_t and l_t + maskk = 0 - (sigma[0] ^ pre_sigma); + swap_ct(k_t, l_t, maskk, NWORDS_ORDER); + + if (i == kbits - 1) { + bs1_ip1 = 0; + bs2_ip1 = 0; + } else { + bs1_ip1 = mp_shiftr(k_t, 1, NWORDS_ORDER); + bs2_ip1 = mp_shiftr(l_t, 1, NWORDS_ORDER); + } + bs1_i = k_t[0] & 1; + bs2_i = l_t[0] & 1; + + r[2 * i] = bs1_i ^ bs1_ip1; + r[2 * i + 1] = bs2_i ^ bs2_ip1; + + // Revert sigma if second bit, r_(2i+1), is 1 + pre_sigma = sigma[0]; + maskk = 0 - r[2 * i + 1]; + select_ct(&temp, &sigma[0], &sigma[1], maskk, 1); + select_ct(&sigma[1], &sigma[1], &sigma[0], maskk, 1); + sigma[0] = temp; + } + + // Point initialization + ec_point_init(&R[0]); + maskk = 0 - sigma[0]; + select_point(&R[1], P, Q, maskk); + select_point(&R[2], Q, P, maskk); + + fp2_copy(&DIFF1a.x, &R[1].x); + fp2_copy(&DIFF1a.z, &R[1].z); + fp2_copy(&DIFF1b.x, &R[2].x); + fp2_copy(&DIFF1b.z, &R[2].z); + + // Initialize DIFF2a <- P+Q, DIFF2b <- P-Q + xADD(&R[2], &R[1], &R[2], PQ); + if (ec_has_zero_coordinate(&R[2])) + return 0; // non valid formulas + + fp2_copy(&DIFF2a.x, &R[2].x); + fp2_copy(&DIFF2a.z, &R[2].z); + fp2_copy(&DIFF2b.x, &PQ->x); + fp2_copy(&DIFF2b.z, &PQ->z); + + A_is_zero = fp2_is_zero(&curve->A); + + // Main loop + for (i = kbits - 1; i >= 0; i--) { + h = r[2 * i] + r[2 * i + 1]; // in {0, 1, 2} + maskk = 0 - (h & 1); + select_point(&T[0], &R[0], &R[1], maskk); + maskk = 0 - (h >> 1); + select_point(&T[0], &T[0], &R[2], maskk); + if (A_is_zero) { + xDBL_E0(&T[0], &T[0]); + } else { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(&T[0], &T[0], &curve->A24, true); + } + + maskk = 0 - r[2 * i + 1]; // in {0, 1} + select_point(&T[1], &R[0], &R[1], maskk); + select_point(&T[2], &R[1], &R[2], maskk); + + cswap_points(&DIFF1a, &DIFF1b, maskk); + xADD(&T[1], &T[1], &T[2], &DIFF1a); + xADD(&T[2], &R[0], &R[2], &DIFF2a); + + // If hw (mod 2) = 1 then swap DIFF2a and DIFF2b + maskk = 0 - (h & 1); + cswap_points(&DIFF2a, &DIFF2b, maskk); + + // R <- T + copy_point(&R[0], &T[0]); + copy_point(&R[1], &T[1]); + copy_point(&R[2], &T[2]); + } + + // Output R[evens] + select_point(S, &R[0], &R[1], mevens); + + maskk = 0 - (bitk0 & bitl0); + select_point(S, S, &R[2], maskk); + return 1; +} + +int +ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *E) +{ // The 3-point Montgomery ladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, a scalar k of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C/4C:1). + // Output: projective Montgomery point R <- P + m*Q = (XR:ZR) such that x(P + m*Q)=XR/ZR. + assert(E->is_A24_computed_and_normalized); + if (!fp2_is_one(&E->A24.z)) { + return 0; + } + // Formulas are not valid in that case + if (ec_has_zero_coordinate(PQ)) { + return 0; + } + + ec_point_t X0, X1, X2; + copy_point(&X0, Q); + copy_point(&X1, P); + copy_point(&X2, PQ); + + int i, j; + digit_t t; + for (i = 0; i < NWORDS_ORDER; i++) { + t = 1; + for (j = 0; j < RADIX; j++) { + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + xDBLADD(&X0, &X1, &X0, &X1, &X2, &E->A24, true); + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + t <<= 1; + }; + }; + copy_point(R, &X1); + return 1; +} + +// WRAPPERS to export + +void +ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve) +{ + // If A24 = ((A+2)/4 : 1) we save multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + } else { + // Otherwise we compute A24 on the fly for doubling + xDBL(res, P, (const ec_point_t *)curve); + } +} + +void +ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve) +{ + if (n == 0) { + copy_point(res, P); + return; + } + + // When the chain is long enough, we should normalise A24 + if (n > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is normalized we can save some multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + for (int i = 0; i < n - 1; i++) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, res, &curve->A24, true); + } + } else { + // Otherwise we do normal doubling + xDBL(res, P, (const ec_point_t *)curve); + for (int i = 0; i < n - 1; i++) { + xDBL(res, res, (const ec_point_t *)curve); + } + } +} + +void +ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve) +{ + ec_dbl_iter(&res->P, n, &B->P, curve); + ec_dbl_iter(&res->Q, n, &B->Q, curve); + ec_dbl_iter(&res->PmQ, n, &B->PmQ, curve); +} + +void +ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve) +{ + // For large scalars it's worth normalising anyway + if (kbits > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is computed and normalized we save some Fp2 multiplications + xMUL(res, P, scalar, kbits, curve); +} + +int +ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + if (fp2_is_zero(&PQ->PmQ.z)) + return 0; + + /* Differential additions behave badly when PmQ = (0:1), so we need to + * treat this case specifically. Since we assume P, Q are a basis, this + * can happen only if kbits==1 */ + if (kbits == 1) { + // Sanity check: our basis should be given by 2-torsion points + if (!ec_is_two_torsion(&PQ->P, curve) || !ec_is_two_torsion(&PQ->Q, curve) || + !ec_is_two_torsion(&PQ->PmQ, curve)) + return 0; + digit_t bP, bQ; + bP = (scalarP[0] & 1); + bQ = (scalarQ[0] & 1); + if (bP == 0 && bQ == 0) + ec_point_init(res); //(1: 0) + else if (bP == 1 && bQ == 0) + copy_point(res, &PQ->P); + else if (bP == 0 && bQ == 1) + copy_point(res, &PQ->Q); + else if (bP == 1 && bQ == 1) + copy_point(res, &PQ->PmQ); + else // should never happen + assert(0); + return 1; + } else { + ec_curve_t E; + copy_curve(&E, curve); + + if (!fp2_is_zero(&curve->A)) { // If A is not zero normalize + ec_curve_normalize_A24(&E); + } + return xDBLMUL(res, &PQ->P, scalarP, &PQ->Q, scalarQ, &PQ->PmQ, kbits, (const ec_curve_t *)&E); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h new file mode 100644 index 0000000000..ee2be38060 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h @@ -0,0 +1,668 @@ +/** @file + * + * @authors Luca De Feo, Francisco RH + * + * @brief Elliptic curve stuff + */ + +#ifndef EC_H +#define EC_H +#include +#include +#include +#include +#include + +/** @defgroup ec Elliptic curves + * @{ + */ + +/** @defgroup ec_t Data structures + * @{ + */ + +/** @brief Projective point on the Kummer line E/pm 1 in Montgomery coordinates + * + * @typedef ec_point_t + * + * @struct ec_point_t + * + * A projective point in (X:Z) or (X:Y:Z) coordinates (tbd). + */ +typedef struct ec_point_t +{ + fp2_t x; + fp2_t z; +} ec_point_t; + +/** @brief Projective point in Montgomery coordinates + * + * @typedef jac_point_t + * + * @struct jac_point_t + * + * A projective point in (X:Y:Z) coordinates + */ +typedef struct jac_point_t +{ + fp2_t x; + fp2_t y; + fp2_t z; +} jac_point_t; + +/** @brief Addition components + * + * @typedef add_components_t + * + * @struct add_components_t + * + * 3 components u,v,w that define the (X:Z) coordinates of both + * addition and substraction of two distinct points with + * P+Q =(u-v:w) and P-Q = (u+v=w) + */ +typedef struct add_components_t +{ + fp2_t u; + fp2_t v; + fp2_t w; +} add_components_t; + +/** @brief A basis of a torsion subgroup + * + * @typedef ec_basis_t + * + * @struct ec_basis_t + * + * A pair of points (or a triplet, tbd) forming a basis of a torsion subgroup. + */ +typedef struct ec_basis_t +{ + ec_point_t P; + ec_point_t Q; + ec_point_t PmQ; +} ec_basis_t; + +/** @brief An elliptic curve + * + * @typedef ec_curve_t + * + * @struct ec_curve_t + * + * An elliptic curve in projective Montgomery form + */ +typedef struct ec_curve_t +{ + fp2_t A; + fp2_t C; ///< cannot be 0 + ec_point_t A24; // the point (A+2 : 4C) + bool is_A24_computed_and_normalized; // says if A24 has been computed and normalized +} ec_curve_t; + +/** @brief An isogeny of degree a power of 2 + * + * @typedef ec_isog_even_t + * + * @struct ec_isog_even_t + */ +typedef struct ec_isog_even_t +{ + ec_curve_t curve; ///< The domain curve + ec_point_t kernel; ///< A kernel generator + unsigned length; ///< The length as a 2-isogeny walk +} ec_isog_even_t; + +/** @brief Isomorphism of Montgomery curves + * + * @typedef ec_isom_t + * + * @struct ec_isom_t + * + * The isomorphism is given by the map maps (X:Z) ↦ ( (Nx X + Nz Z) : (D Z) ) + */ +typedef struct ec_isom_t +{ + fp2_t Nx; + fp2_t Nz; + fp2_t D; +} ec_isom_t; + +// end ec_t +/** @} + */ + +/** @defgroup ec_curve_t Curves and isomorphisms + * @{ + */ + +// Initalisation for curves and points +void ec_curve_init(ec_curve_t *E); +void ec_point_init(ec_point_t *P); + +/** + * @brief Verify that a Montgomery coefficient is valid + * + * @param A an fp2_t + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_verify_A(const fp2_t *A); + +/** + * @brief Initialize an elliptic curve from a coefficient + * + * @param A an fp2_t + * @param E the elliptic curve to initialize + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A); + +// Copying points, bases and curves +static inline void +copy_point(ec_point_t *P, const ec_point_t *Q) +{ + fp2_copy(&P->x, &Q->x); + fp2_copy(&P->z, &Q->z); +} + +static inline void +copy_basis(ec_basis_t *B1, const ec_basis_t *B0) +{ + copy_point(&B1->P, &B0->P); + copy_point(&B1->Q, &B0->Q); + copy_point(&B1->PmQ, &B0->PmQ); +} + +static inline void +copy_curve(ec_curve_t *E1, const ec_curve_t *E2) +{ + fp2_copy(&(E1->A), &(E2->A)); + fp2_copy(&(E1->C), &(E2->C)); + E1->is_A24_computed_and_normalized = E2->is_A24_computed_and_normalized; + copy_point(&E1->A24, &E2->A24); +} + +// Functions for working with the A24 point and normalisation + +/** + * @brief Reduce (A : C) to (A/C : 1) in place + * + * @param E a curve + */ +void ec_normalize_curve(ec_curve_t *E); + +/** + * @brief Reduce (A + 2 : 4C) to ((A+2)/4C : 1) in place + * + * @param E a curve + */ +void ec_curve_normalize_A24(ec_curve_t *E); + +/** + * @brief Normalise both (A : C) and (A + 2 : 4C) as above, in place + * + * @param E a curve + */ +void ec_normalize_curve_and_A24(ec_curve_t *E); + +/** + * @brief Given a curve E, compute (A+2 : 4C) + * + * @param A24 the value (A+2 : 4C) to return into + * @param E a curve + */ +static inline void +AC_to_A24(ec_point_t *A24, const ec_curve_t *E) +{ + // Maybe we already have this computed + if (E->is_A24_computed_and_normalized) { + copy_point(A24, &E->A24); + return; + } + + // A24 = (A+2C : 4C) + fp2_add(&A24->z, &E->C, &E->C); + fp2_add(&A24->x, &E->A, &A24->z); + fp2_add(&A24->z, &A24->z, &A24->z); +} + +/** + * @brief Given a curve the point (A+2 : 4C) compute the curve coefficients (A : C) + * + * @param E a curve to compute + * @param A24 the value (A+2 : 4C) + */ +static inline void +A24_to_AC(ec_curve_t *E, const ec_point_t *A24) +{ + // (A:C) = ((A+2C)*2-4C : 4C) + fp2_add(&E->A, &A24->x, &A24->x); + fp2_sub(&E->A, &E->A, &A24->z); + fp2_add(&E->A, &E->A, &E->A); + fp2_copy(&E->C, &A24->z); +} + +/** + * @brief j-invariant. + * + * @param j_inv computed j_invariant + * @param curve input curve + */ +void ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve); + +/** + * @brief Isomorphism of elliptic curve + * Takes as input two isomorphic Kummer lines in Montgomery form, and output an isomorphism between + * them + * + * @param isom computed isomorphism + * @param from domain curve + * @param to image curve + * @return 0xFFFFFFFF if there was an error during the computation, zero otherwise + */ +uint32_t ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to); + +/** + * @brief In-place evaluation of an isomorphism + * + * @param P a point + * @param isom an isomorphism + */ +void ec_iso_eval(ec_point_t *P, ec_isom_t *isom); + +/** @} + */ +/** @defgroup ec_point_t Point operations + * @{ + */ + +/** + * @brief Point equality + * + * @param P a point + * @param Q a point + * @return 0xFFFFFFFF if equal, zero otherwise + */ +uint32_t ec_is_equal(const ec_point_t *P, const ec_point_t *Q); + +/** + * @brief Point equality + * + * @param P a point + * @return 0xFFFFFFFF if point at infinity, zero otherwise + */ +uint32_t ec_is_zero(const ec_point_t *P); + +/** + * @brief Two torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Four torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Reduce Z-coordinate of point in place + * + * @param P a point + */ +void ec_normalize_point(ec_point_t *P); + +void xDBL_E0(ec_point_t *Q, const ec_point_t *P); +void xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ); +void xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized); + +/** + * @brief Point doubling + * + * @param res computed double of P + * @param P a point + * @param curve an elliptic curve + */ +void ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve); + +/** + * @brief Point iterated doubling + * + * @param res computed double of P + * @param P a point + * @param n the number of double + * @param curve the curve on which P lays + */ +void ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Iterated doubling for a basis P, Q, PmQ + * + * @param res the computed iterated double of basis B + * @param n the number of doubles + * @param B the basis to double + * @param curve the parent curve of the basis + */ +void ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve); + +/** + * @brief Point multiplication + * + * @param res computed scalar * P + * @param curve the curve + * @param scalar an unsigned multi-precision integer + * @param P a point + * @param kbits numer of bits of the scalar + */ +void ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Combination P+m*Q + * + * @param R computed P + m * Q + * @param curve the curve + * @param m an unsigned multi-precision integer + * @param P a point + * @param Q a point + * @param PQ the difference P-Q + * @return 0 if there was an error, 1 otherwise + */ +int ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Linear combination of points of a basis + * + * @param res computed scalarP * P + scalarQ * Q + * @param scalarP an unsigned multi-precision integer + * @param scalarQ an unsigned multi-precision integer + * @param kbits number of bits of the scalars, or n for points of order 2^n + * @param PQ a torsion basis consisting of points P and Q + * @param curve the curve + * + * @return 0 if there was an error, 1 otherwise + */ +int ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +// end point computations +/** + * @} + */ + +/** @defgroup ec_dlog_t Torsion basis computations + * @{ + */ + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve along with a hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * + * @return A hint + * + * The algorithm is deterministc + */ +uint8_t ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f); + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve and a given hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * @param hint the hint + * + * @return 1 is the basis is valid, 0 otherwise + * + * The algorithm is deterministc + */ +int ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint); +/** // end basis computations + * @} + */ + +/** @defgroup ec_isog_t Isogenies + * @{ + */ + +/** + * @brief Evaluate isogeny of even degree on list of points. + * Returns 0 if successful and -1 if kernel has the wrong order or includes (0:1). + * + * @param image computed image curve + * @param phi isogeny + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points); + +/** + * @brief Multiplicative strategy for a short isogeny chain. Returns 1 if successfull and -1 + * if kernel has the wrong order or includes (0:1) when special=false. + * + * @param curve domain curve, to be overwritten by the codomain curve. + * @param kernel a kernel generator of order 2^len + * @param len the length of t he 2-isogeny chain + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * @param special if true, allow isogenies with (0:1) in the kernel + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special); + +/** + * @brief Recover Y-coordinate from X-coordinate and curve coefficients. + * + * @param y: a y-coordinate + * @param Px: a x-coordinate + * @param curve: the elliptic curve + * + * @return 0xFFFFFFFF if the point was on the curve, 0 otherwise + */ +uint32_t ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve); + +// Jacobian point init and copying +void jac_init(jac_point_t *P); +void copy_jac_point(jac_point_t *P, const jac_point_t *Q); + +/** + * @brief Test if two Jacobian points are equal + * + * @param P: a point + * @param Q: a point + * + * @return 0xFFFFFFFF if they are equal, 0 otherwise + */ +uint32_t jac_is_equal(const jac_point_t *P, const jac_point_t *Q); + +// Convert from Jacobian to x-only (just drop the Y-coordinate) +void jac_to_xz(ec_point_t *P, const jac_point_t *xyP); +// Convert from Jacobian coordinates in Montgomery model to Weierstrass +void jac_to_ws(jac_point_t *P, fp2_t *t, fp2_t *ao3, const jac_point_t *Q, const ec_curve_t *curve); +void jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve); + +// Jacobian arithmetic +void jac_neg(jac_point_t *Q, const jac_point_t *P); +void ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); +void DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC); +void DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t); +void jac_to_xz_add_components(add_components_t *uvw, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + * + * + * Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and + * the point P = (X/Z : 1). For generic implementation see lift_basis() + */ +uint32_t lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + */ +uint32_t lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Check if basis points (P, Q) form a full 4-basis + * + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if they form a basis, 0 otherwise + */ +uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); + +/* + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Test functions for printing and order checking, only used in debug mode + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + */ + +/** + * @brief Check if a point (X : Z) has order exactly 2^t + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) +{ + ec_point_t test; + ec_curve_t curve; + test = *P; + copy_curve(&curve, E); + + if (ec_is_zero(&test)) + return 0; + // Scale point by 2^(t-1) + ec_dbl_iter(&test, t - 1, &test, &curve); + // If it's zero now, it doesnt have order 2^t + if (ec_is_zero(&test)) + return 0; + // Ensure [2^t] P = 0 + ec_dbl(&test, &test, &curve); + return ec_is_zero(&test); +} + +/** + * @brief Check if basis points (P, Q, PmQ) all have order exactly 2^t + * + * @param B: a basis + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) +{ + int check_P = test_point_order_twof(&B->P, E, t); + int check_Q = test_point_order_twof(&B->Q, E, t); + int check_PmQ = test_point_order_twof(&B->PmQ, E, t); + + return check_P & check_Q & check_PmQ; +} + +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} + +// Prints the x-coordinate of the point (X : 1) +static void +ec_point_print(const char *name, ec_point_t P) +{ + fp2_t a; + if (fp2_is_zero(&P.z)) { + printf("%s = INF\n", name); + } else { + fp2_copy(&a, &P.z); + fp2_inv(&a); + fp2_mul(&a, &a, &P.x); + fp2_print(name, &a); + } +} + +// Prints the Montgomery coefficient A +static void +ec_curve_print(const char *name, ec_curve_t E) +{ + fp2_t a; + fp2_copy(&a, &E.C); + fp2_inv(&a); + fp2_mul(&a, &a, &E.A); + fp2_print(name, &a); +} + +#endif +// end isogeny computations +/** + * @} + */ + +// end ec +/** + * @} + */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_jac.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_jac.c new file mode 100644 index 0000000000..20ca68c9b2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_jac.c @@ -0,0 +1,335 @@ +#include +#include + +void +jac_init(jac_point_t *P) +{ // Initialize Montgomery in Jacobian coordinates as identity element (0:1:0) + fp2_set_zero(&P->x); + fp2_set_one(&P->y); + fp2_set_zero(&P->z); +} + +uint32_t +jac_is_equal(const jac_point_t *P, const jac_point_t *Q) +{ // Evaluate if two points in Jacobian coordinates (X:Y:Z) are equal + // Returns 1 (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1, t2, t3; + + fp2_sqr(&t0, &Q->z); + fp2_mul(&t2, &P->x, &t0); // x1*z2^2 + fp2_sqr(&t1, &P->z); + fp2_mul(&t3, &Q->x, &t1); // x2*z1^2 + fp2_sub(&t2, &t2, &t3); + + fp2_mul(&t0, &t0, &Q->z); + fp2_mul(&t0, &P->y, &t0); // y1*z2^3 + fp2_mul(&t1, &t1, &P->z); + fp2_mul(&t1, &Q->y, &t1); // y2*z1^3 + fp2_sub(&t0, &t0, &t1); + + return fp2_is_zero(&t0) & fp2_is_zero(&t2); +} + +void +jac_to_xz(ec_point_t *P, const jac_point_t *xyP) +{ + fp2_copy(&P->x, &xyP->x); + fp2_copy(&P->z, &xyP->z); + fp2_sqr(&P->z, &P->z); + + // If xyP = (0:1:0), we currently have P=(0 : 0) but we want to set P=(1:0) + uint32_t c1, c2; + fp2_t one; + fp2_set_one(&one); + + c1 = fp2_is_zero(&P->x); + c2 = fp2_is_zero(&P->z); + fp2_select(&P->x, &P->x, &one, c1 & c2); +} + +void +jac_to_ws(jac_point_t *Q, fp2_t *t, fp2_t *ao3, const jac_point_t *P, const ec_curve_t *curve) +{ + // Cost of 3M + 2S when A != 0. + fp_t one; + fp2_t a; + /* a = 1 - A^2/3, U = X + (A*Z^2)/3, V = Y, W = Z, T = a*Z^4*/ + fp_set_one(&one); + if (!fp2_is_zero(&(curve->A))) { + fp_div3(&(ao3->re), &(curve->A.re)); + fp_div3(&(ao3->im), &(curve->A.im)); + fp2_sqr(t, &P->z); + fp2_mul(&Q->x, ao3, t); + fp2_add(&Q->x, &Q->x, &P->x); + fp2_sqr(t, t); + fp2_mul(&a, ao3, &(curve->A)); + fp_sub(&(a.re), &one, &(a.re)); + fp_neg(&(a.im), &(a.im)); + fp2_mul(t, t, &a); + } else { + fp2_copy(&Q->x, &P->x); + fp2_sqr(t, &P->z); + fp2_sqr(t, t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve) +{ + // Cost of 1M + 1S when A != 0. + fp2_t t; + /* X = U - (A*W^2)/3, Y = V, Z = W. */ + if (!fp2_is_zero(&(curve->A))) { + fp2_sqr(&t, &P->z); + fp2_mul(&t, &t, ao3); + fp2_sub(&Q->x, &P->x, &t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +copy_jac_point(jac_point_t *P, const jac_point_t *Q) +{ + fp2_copy(&(P->x), &(Q->x)); + fp2_copy(&(P->y), &(Q->y)); + fp2_copy(&(P->z), &(Q->z)); +} + +void +jac_neg(jac_point_t *Q, const jac_point_t *P) +{ + fp2_copy(&Q->x, &P->x); + fp2_neg(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC) +{ // Cost of 6M + 6S. + // Doubling on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding to + // (X/Z^2,Y/Z^3) This version receives the coefficient value A + fp2_t t0, t1, t2, t3; + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // t0 = 3x1^2 + fp2_sqr(&t1, &P->z); // t1 = z1^2 + fp2_mul(&t2, &P->x, &AC->A); + fp2_add(&t2, &t2, &t2); // t2 = 2Ax1 + fp2_add(&t2, &t1, &t2); // t2 = 2Ax1+z1^2 + fp2_mul(&t2, &t1, &t2); // t2 = z1^2(2Ax1+z1^2) + fp2_add(&t2, &t0, &t2); // t2 = alpha = 3x1^2 + z1^2(2Ax1+z1^2) + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); // z2 = 2y1z1 + fp2_sqr(&t0, &Q->z); + fp2_mul(&t0, &t0, &AC->A); // t0 = 4Ay1^2z1^2 + fp2_sqr(&t1, &P->y); + fp2_add(&t1, &t1, &t1); // t1 = 2y1^2 + fp2_add(&t3, &P->x, &P->x); // t3 = 2x1 + fp2_mul(&t3, &t1, &t3); // t3 = 4x1y1^2 + fp2_sqr(&Q->x, &t2); // x2 = alpha^2 + fp2_sub(&Q->x, &Q->x, &t0); // x2 = alpha^2 - 4Ay1^2z1^2 + fp2_sub(&Q->x, &Q->x, &t3); + fp2_sub(&Q->x, &Q->x, &t3); // x2 = alpha^2 - 4Ay1^2z1^2 - 8x1y1^2 + fp2_sub(&Q->y, &t3, &Q->x); // y2 = 4x1y1^2 - x2 + fp2_mul(&Q->y, &Q->y, &t2); // y2 = alpha(4x1y1^2 - x2) + fp2_sqr(&t1, &t1); // t1 = 4y1^4 + fp2_sub(&Q->y, &Q->y, &t1); + fp2_sub(&Q->y, &Q->y, &t1); // y2 = alpha(4x1y1^2 - x2) - 8y1^4 + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t) +{ // Cost of 3M + 5S. + // Doubling on a Weierstrass curve, representation in modified Jacobian coordinates + // (X:Y:Z:T=a*Z^4) corresponding to (X/Z^2,Y/Z^3), where a is the curve coefficient. + // Formula from https://hyperelliptic.org/EFD/g1p/auto-shortw-modified.html + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_t xx, c, cc, r, s, m; + // XX = X^2 + fp2_sqr(&xx, &P->x); + // A = 2*Y^2 + fp2_sqr(&c, &P->y); + fp2_add(&c, &c, &c); + // AA = A^2 + fp2_sqr(&cc, &c); + // R = 2*AA + fp2_add(&r, &cc, &cc); + // S = (X+A)^2-XX-AA + fp2_add(&s, &P->x, &c); + fp2_sqr(&s, &s); + fp2_sub(&s, &s, &xx); + fp2_sub(&s, &s, &cc); + // M = 3*XX+T1 + fp2_add(&m, &xx, &xx); + fp2_add(&m, &m, &xx); + fp2_add(&m, &m, t); + // X3 = M^2-2*S + fp2_sqr(&Q->x, &m); + fp2_sub(&Q->x, &Q->x, &s); + fp2_sub(&Q->x, &Q->x, &s); + // Z3 = 2*Y*Z + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); + // Y3 = M*(S-X3)-R + fp2_sub(&Q->y, &s, &Q->x); + fp2_mul(&Q->y, &Q->y, &m); + fp2_sub(&Q->y, &Q->y, &r); + // T3 = 2*R*T1 + fp2_mul(u, t, &r); + fp2_add(u, u, u); + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +select_jac_point(jac_point_t *Q, const jac_point_t *P1, const jac_point_t *P2, const digit_t option) +{ // Select points + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->y), &(P1->y), &(P2->y), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Addition on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding + // to (x,y) = (X/Z^2,Y/Z^3) This version receives the coefficient value A + // + // Complete routine, to handle all edge cases: + // if ZP == 0: # P == inf + // return Q + // if ZQ == 0: # Q == inf + // return P + // dy <- YQ*ZP**3 - YP*ZQ**3 + // dx <- XQ*ZP**2 - XP*ZQ**2 + // if dx == 0: # x1 == x2 + // if dy == 0: # ... and y1 == y2: doubling case + // dy <- ZP*ZQ * (3*XP^2 + ZP^2 * (2*A*XP + ZP^2)) + // dx <- 2*YP*ZP + // else: # ... but y1 != y2, thus P = -Q + // return inf + // XR <- dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) + // YR <- dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3 + // ZR <- dx * ZP * ZQ + + // Constant time processing: + // - The case for P == 0 or Q == 0 is handled at the end with conditional select + // - dy and dx are computed for both the normal and doubling cases, we switch when + // dx == dy == 0 for the normal case. + // - If we have that P = -Q then dx = 0 and so ZR will be zero, giving us the point + // at infinity for "free". + // + // These current formula are expensive and I'm probably missing some tricks... + // Thought I'd get the ball rolling. + // Cost 17M + 6S + 13a + fp2_t t0, t1, t2, t3, u1, u2, v1, dx, dy; + + /* If P is zero or Q is zero we will conditionally swap before returning. */ + uint32_t ctl1 = fp2_is_zero(&P->z); + uint32_t ctl2 = fp2_is_zero(&Q->z); + + /* Precompute some values */ + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + + /* Compute dy and dx for ordinary case */ + fp2_mul(&v1, &t1, &Q->z); // v1 = z2^3 + fp2_mul(&t2, &t0, &P->z); // t2 = z1^3 + fp2_mul(&v1, &v1, &P->y); // v1 = y1z2^3 + fp2_mul(&t2, &t2, &Q->y); // t2 = y2z1^3 + fp2_sub(&dy, &t2, &v1); // dy = y2z1^3 - y1z2^3 + fp2_mul(&u2, &t0, &Q->x); // u2 = x2z1^2 + fp2_mul(&u1, &t1, &P->x); // u1 = x1z2^2 + fp2_sub(&dx, &u2, &u1); // dx = x2z1^2 - x1z2^2 + + /* Compute dy and dx for doubling case */ + fp2_add(&t1, &P->y, &P->y); // dx_dbl = t1 = 2y1 + fp2_add(&t2, &AC->A, &AC->A); // t2 = 2A + fp2_mul(&t2, &t2, &P->x); // t2 = 2Ax1 + fp2_add(&t2, &t2, &t0); // t2 = 2Ax1 + z1^2 + fp2_mul(&t2, &t2, &t0); // t2 = z1^2 * (2Ax1 + z1^2) + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t2, &t2, &t0); // t2 = x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 2*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 3*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_mul(&t2, &t2, &Q->z); // dy_dbl = t2 = z2 * (3*x1^2 + z1^2 * (2Ax1 + z1^2)) + + /* If dx is zero and dy is zero swap with double variables */ + uint32_t ctl = fp2_is_zero(&dx) & fp2_is_zero(&dy); + fp2_select(&dx, &dx, &t1, ctl); + fp2_select(&dy, &dy, &t2, ctl); + + /* Some more precomputations */ + fp2_mul(&t0, &P->z, &Q->z); // t0 = z1z2 + fp2_sqr(&t1, &t0); // t1 = z1z2^2 + fp2_sqr(&t2, &dx); // t2 = dx^2 + fp2_sqr(&t3, &dy); // t3 = dy^2 + + /* Compute x3 = dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) */ + fp2_mul(&R->x, &AC->A, &t1); // x3 = A*(z1z2)^2 + fp2_add(&R->x, &R->x, &u1); // x3 = A*(z1z2)^2 + u1 + fp2_add(&R->x, &R->x, &u2); // x3 = A*(z1z2)^2 + u1 + u2 + fp2_mul(&R->x, &R->x, &t2); // x3 = dx^2 * (A*(z1z2)^2 + u1 + u2) + fp2_sub(&R->x, &t3, &R->x); // x3 = dy^2 - dx^2 * (A*(z1z2)^2 + u1 + u2) + + /* Compute y3 = dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3*/ + fp2_mul(&R->y, &u1, &t2); // y3 = u1 * dx^2 + fp2_sub(&R->y, &R->y, &R->x); // y3 = u1 * dx^2 - x3 + fp2_mul(&R->y, &R->y, &dy); // y3 = dy * (u1 * dx^2 - x3) + fp2_mul(&t3, &t2, &dx); // t3 = dx^3 + fp2_mul(&t3, &t3, &v1); // t3 = v1 * dx^3 + fp2_sub(&R->y, &R->y, &t3); // y3 = dy * (u1 * dx^2 - x3) - v1 * dx^3 + + /* Compute z3 = dx * z1 * z2 */ + fp2_mul(&R->z, &dx, &t0); + + /* Finally, we need to set R = P is Q.Z = 0 and R = Q if P.Z = 0 */ + select_jac_point(R, R, Q, ctl1); + select_jac_point(R, R, P, ctl2); +} + +void +jac_to_xz_add_components(add_components_t *add_comp, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Take P and Q in E distinct, two jac_point_t, return three components u,v and w in Fp2 such + // that the xz coordinates of P+Q are (u-v:w) and of P-Q are (u+v:w) + + fp2_t t0, t1, t2, t3, t4, t5, t6; + + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + fp2_mul(&t2, &P->x, &t1); // t2 = x1z2^2 + fp2_mul(&t3, &t0, &Q->x); // t3 = z1^2x2 + fp2_mul(&t4, &P->y, &Q->z); // t4 = y1z2 + fp2_mul(&t4, &t4, &t1); // t4 = y1z2^3 + fp2_mul(&t5, &P->z, &Q->y); // t5 = z1y2 + fp2_mul(&t5, &t5, &t0); // t5 = z1^3y2 + fp2_mul(&t0, &t0, &t1); // t0 = (z1z2)^2 + fp2_mul(&t6, &t4, &t5); // t6 = (z1z_2)^3y1y2 + fp2_add(&add_comp->v, &t6, &t6); // v = 2(z1z_2)^3y1y2 + fp2_sqr(&t4, &t4); // t4 = y1^2z2^6 + fp2_sqr(&t5, &t5); // t5 = z1^6y_2^2 + fp2_add(&t4, &t4, &t5); // t4 = z1^6y_2^2 + y1^2z2^6 + fp2_add(&t5, &t2, &t3); // t5 = x1z2^2 +z_1^2x2 + fp2_add(&t6, &t3, &t3); // t6 = 2z_1^2x2 + fp2_sub(&t6, &t5, &t6); // t6 = lambda = x1z2^2 - z_1^2x2 + fp2_sqr(&t6, &t6); // t6 = lambda^2 = (x1z2^2 - z_1^2x2)^2 + fp2_mul(&t1, &AC->A, &t0); // t1 = A*(z1z2)^2 + fp2_add(&t1, &t5, &t1); // t1 = gamma =A*(z1z2)^2 + x1z2^2 +z_1^2x2 + fp2_mul(&t1, &t1, &t6); // t1 = gamma*lambda^2 + fp2_sub(&add_comp->u, &t4, &t1); // u = z1^6y_2^2 + y1^2z2^6 - gamma*lambda^2 + fp2_mul(&add_comp->w, &t6, &t0); // w = (z1z2)^2(lambda)^2 +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_params.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_params.c new file mode 100644 index 0000000000..ae214aabed --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_params.c @@ -0,0 +1,4 @@ +#include +// p+1 divided by the power of 2 +const digit_t p_cofactor_for_2f[1] = {65}; + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_params.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_params.h new file mode 100644 index 0000000000..941abd5452 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec_params.h @@ -0,0 +1,12 @@ +#ifndef EC_PARAMS_H +#define EC_PARAMS_H + +#include + +#define TORSION_EVEN_POWER 376 + +// p+1 divided by the power of 2 +extern const digit_t p_cofactor_for_2f[1]; +#define P_COFACTOR_FOR_2F_BITLENGTH 7 + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_signature.c new file mode 100644 index 0000000000..112c695941 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_signature.c @@ -0,0 +1,208 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// ibz_t + +static byte_t * +ibz_to_bytes(byte_t *enc, const ibz_t *x, size_t nbytes, bool sgn) +{ +#ifndef NDEBUG + { + // make sure there is enough space + ibz_t abs, bnd; + ibz_init(&bnd); + ibz_init(&abs); + ibz_pow(&bnd, &ibz_const_two, 8 * nbytes - sgn); + ibz_abs(&abs, x); + assert(ibz_cmp(&abs, &bnd) < 0); + ibz_finalize(&bnd); + ibz_finalize(&abs); + } +#endif + const size_t digits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + digit_t d[digits]; + memset(d, 0, sizeof(d)); + if (ibz_cmp(x, &ibz_const_zero) >= 0) { + // non-negative, straightforward. + ibz_to_digits(d, x); + } else { + assert(sgn); + // negative; use two's complement. + ibz_t tmp; + ibz_init(&tmp); + ibz_neg(&tmp, x); + ibz_sub(&tmp, &tmp, &ibz_const_one); + ibz_to_digits(d, &tmp); + for (size_t i = 0; i < digits; ++i) + d[i] = ~d[i]; +#ifndef NDEBUG + { + // make sure the result is correct + ibz_t chk; + ibz_init(&chk); + ibz_copy_digit_array(&tmp, d); + ibz_sub(&tmp, &tmp, x); + ibz_pow(&chk, &ibz_const_two, 8 * sizeof(d)); + assert(!ibz_cmp(&tmp, &chk)); + ibz_finalize(&chk); + } +#endif + ibz_finalize(&tmp); + } + encode_digits(enc, d, nbytes); + return enc + nbytes; +} + +static const byte_t * +ibz_from_bytes(ibz_t *x, const byte_t *enc, size_t nbytes, bool sgn) +{ + assert(nbytes > 0); + const size_t ndigits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + assert(ndigits > 0); + digit_t d[ndigits]; + memset(d, 0, sizeof(d)); + decode_digits(d, enc, nbytes, ndigits); + if (sgn && enc[nbytes - 1] >> 7) { + // negative, decode two's complement + const size_t s = sizeof(digit_t) - 1 - (sizeof(d) - nbytes); + assert(s < sizeof(digit_t)); + d[ndigits - 1] |= ((digit_t)-1) >> 8 * s << 8 * s; + for (size_t i = 0; i < ndigits; ++i) + d[i] = ~d[i]; + ibz_copy_digits(x, d, ndigits); + ibz_add(x, x, &ibz_const_one); + ibz_neg(x, x); + } else { + // non-negative + ibz_copy_digits(x, d, ndigits); + } + return enc + nbytes; +} + +// public API + +void +secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = public_key_to_bytes(enc, pk); + +#ifndef NDEBUG + { + fp2_t lhs, rhs; + fp2_mul(&lhs, &sk->curve.A, &pk->curve.C); + fp2_mul(&rhs, &sk->curve.C, &pk->curve.A); + assert(fp2_is_equal(&lhs, &rhs)); + } +#endif + + enc = ibz_to_bytes(enc, &sk->secret_ideal.norm, FP_ENCODED_BYTES, false); + { + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + int ret UNUSED = quat_lideal_generator(&gen, &sk->secret_ideal, &QUATALG_PINFTY); + assert(ret); + // we skip encoding the denominator since it won't change the generated ideal +#ifndef NDEBUG + { + // let's make sure that the denominator is indeed coprime to the norm of the ideal + ibz_t gcd; + ibz_init(&gcd); + ibz_gcd(&gcd, &gen.denom, &sk->secret_ideal.norm); + assert(!ibz_cmp(&gcd, &ibz_const_one)); + ibz_finalize(&gcd); + } +#endif + enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); +} + +void +secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = public_key_from_bytes(pk, enc); + + { + ibz_t norm; + ibz_init(&norm); + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); + enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); + ibz_finalize(&norm); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); + + sk->curve = pk->curve; + ec_curve_to_basis_2f_from_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER, pk->hint_pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_verification.c new file mode 100644 index 0000000000..fecdb9c259 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_verification.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// fp2_t + +static byte_t * +fp2_to_bytes(byte_t *enc, const fp2_t *x) +{ + fp2_encode(enc, x); + return enc + FP2_ENCODED_BYTES; +} + +static const byte_t * +fp2_from_bytes(fp2_t *x, const byte_t *enc) +{ + fp2_decode(x, enc); + return enc + FP2_ENCODED_BYTES; +} + +// curves and points + +static byte_t * +proj_to_bytes(byte_t *enc, const fp2_t *x, const fp2_t *z) +{ + assert(!fp2_is_zero(z)); + fp2_t tmp = *z; + fp2_inv(&tmp); +#ifndef NDEBUG + { + fp2_t chk; + fp2_mul(&chk, z, &tmp); + fp2_t one; + fp2_set_one(&one); + assert(fp2_is_equal(&chk, &one)); + } +#endif + fp2_mul(&tmp, x, &tmp); + enc = fp2_to_bytes(enc, &tmp); + return enc; +} + +static const byte_t * +proj_from_bytes(fp2_t *x, fp2_t *z, const byte_t *enc) +{ + enc = fp2_from_bytes(x, enc); + fp2_set_one(z); + return enc; +} + +static byte_t * +ec_curve_to_bytes(byte_t *enc, const ec_curve_t *curve) +{ + return proj_to_bytes(enc, &curve->A, &curve->C); +} + +static const byte_t * +ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) +{ + memset(curve, 0, sizeof(*curve)); + return proj_from_bytes(&curve->A, &curve->C, enc); +} + +static byte_t * +ec_point_to_bytes(byte_t *enc, const ec_point_t *point) +{ + return proj_to_bytes(enc, &point->x, &point->z); +} + +static const byte_t * +ec_point_from_bytes(ec_point_t *point, const byte_t *enc) +{ + return proj_from_bytes(&point->x, &point->z, enc); +} + +static byte_t * +ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) +{ + enc = ec_point_to_bytes(enc, &basis->P); + enc = ec_point_to_bytes(enc, &basis->Q); + enc = ec_point_to_bytes(enc, &basis->PmQ); + return enc; +} + +static const byte_t * +ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) +{ + enc = ec_point_from_bytes(&basis->P, enc); + enc = ec_point_from_bytes(&basis->Q, enc); + enc = ec_point_from_bytes(&basis->PmQ, enc); + return enc; +} + +// public API + +byte_t * +public_key_to_bytes(byte_t *enc, const public_key_t *pk) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_to_bytes(enc, &pk->curve); + *enc++ = pk->hint_pk; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +const byte_t * +public_key_from_bytes(public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_from_bytes(&pk->curve, enc); + pk->hint_pk = *enc++; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +void +signature_to_bytes(byte_t *enc, const signature_t *sig) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = fp2_to_bytes(enc, &sig->E_aux_A); + + *enc++ = sig->backtracking; + *enc++ = sig->two_resp_length; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][1], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][1], nbytes); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + encode_digits(enc, sig->chall_coeff, nbytes); + enc += nbytes; + + *enc++ = sig->hint_aux; + *enc++ = sig->hint_chall; + + assert(enc - start == SIGNATURE_BYTES); +} + +void +signature_from_bytes(signature_t *sig, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = fp2_from_bytes(&sig->E_aux_A, enc); + + sig->backtracking = *enc++; + sig->two_resp_length = *enc++; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + decode_digits(sig->chall_coeff, enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + sig->hint_aux = *enc++; + sig->hint_chall = *enc++; + + assert(enc - start == SIGNATURE_BYTES); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encoded_sizes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encoded_sizes.h new file mode 100644 index 0000000000..50a8781bb6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encoded_sizes.h @@ -0,0 +1,11 @@ +#define SECURITY_BITS 192 +#define SQIsign_response_length 192 +#define HASH_ITERATIONS 256 +#define FP_ENCODED_BYTES 48 +#define FP2_ENCODED_BYTES 96 +#define EC_CURVE_ENCODED_BYTES 96 +#define EC_POINT_ENCODED_BYTES 96 +#define EC_BASIS_ENCODED_BYTES 288 +#define PUBLICKEY_BYTES 97 +#define SECRETKEY_BYTES 529 +#define SIGNATURE_BYTES 224 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c new file mode 100644 index 0000000000..8aafeac12b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c @@ -0,0 +1,3812 @@ +#include +#include +#include +const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x7e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1} +#elif RADIX == 32 +{0x1f8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1, 0x0, 0x0, 0x0, 0x0, 0x3f00000000000000} +#else +{0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12} +#elif RADIX == 32 +{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1} +#else +{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e} +#elif RADIX == 32 +{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164} +#else +{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd} +#elif RADIX == 32 +{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28} +#else +{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a} +#elif RADIX == 32 +{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9} +#else +{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1e36, 0x1718, 0xced, 0x186e, 0x83d, 0x1a23, 0xf5b, 0x5ca, 0x194d, 0x1bd8, 0xb67, 0x9f7, 0x1806, 0x17ae, 0x508, 0x117f, 0x5cc, 0x1809, 0x14b1, 0x85f, 0xcf0, 0x1b0c, 0x1753, 0x1484, 0xb5f, 0x1d62, 0x808, 0x1cc3, 0x844, 0x9} +#elif RADIX == 32 +{0xb8c78d9, 0x70dcced, 0xbd11a0f, 0x34b94f5, 0x67dec65, 0x193eeb, 0x508bd76, 0x97322fe, 0xf4b1c04, 0x633c10b, 0x9753d8, 0xb12d7e9, 0x986808e, 0x9113} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1a0f70dccedb8c78, 0x7dec6534b94f5bd1, 0xe508bd760193eeb6, 0x10bf4b1c0497322f, 0x2d7e909753d8633c, 0x3722113986808eb1} +#else +{0x1ee1b99db718f1, 0x14d2e53d6f4468, 0x300c9f75b3ef63, 0x497322fe508bd7, 0xc678217e96380, 0x2c4b5fa425d4f6, 0xb51089cc34047} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1785, 0x1652, 0x4b4, 0x1b37, 0x918, 0x12d, 0x1340, 0x16d3, 0xee, 0xb43, 0x52a, 0x1ff, 0x1e6b, 0x1424, 0x609, 0x1e2c, 0x19bd, 0x18f, 0x174a, 0x134d, 0x6f4, 0xa33, 0x1d5c, 0xa53, 0x73c, 0x361, 0x372, 0x1242, 0x87c, 0x17} +#elif RADIX == 32 +{0xb295e16, 0x366e4b4, 0x96a46, 0xbada734, 0x2a5a183, 0x9ac3fe5, 0x609a127, 0xe6f7c58, 0xb74a0c7, 0x99bd269, 0xa7d5c51, 0xb09cf14, 0x4843721, 0x381f2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6a46366e4b4b295e, 0xa5a183bada734009, 0x8609a1279ac3fe52, 0x269b74a0c7e6f7c5, 0x9cf14a7d5c5199bd, 0x5ce1f24843721b0} +#else +{0xc6cdc969652bc, 0xeeb69cd0025a9, 0x3cd61ff2952d0c, 0x7e6f7c58609a12, 0x3337a4d36e9418, 0x6c273c529f5714, 0x2e70f92421b90} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6f75,0xc742,0x1abb,0xc3b2,0x4bff,0xf015,0x66b,0xc51b,0xacd6,0x30c2,0xf641,0x625b,0x2e88,0xbe5,0x5121,0xbe40,0x8ac2,0x755b,0xb8c9,0x4eb6,0xb07,0x46b6,0x84cf,0x47}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc7426f75,0xc3b21abb,0xf0154bff,0xc51b066b,0x30c2acd6,0x625bf641,0xbe52e88,0xbe405121,0x755b8ac2,0x4eb6b8c9,0x46b60b07,0x4784cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc3b21abbc7426f75,0xc51b066bf0154bff,0x625bf64130c2acd6,0xbe4051210be52e88,0x4eb6b8c9755b8ac2,0x4784cf46b60b07}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9db8,0x479b,0xe350,0xae1e,0x4f92,0x6572,0x60a4,0x89ed,0x12f4,0xb88d,0x64b6,0xf9ca,0x26b,0xc086,0x83b8,0xb2c7,0x88a8,0xe99b,0x57b3,0x9017,0xe033,0x9d5d,0x5de6,0x37}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x479b9db8,0xae1ee350,0x65724f92,0x89ed60a4,0xb88d12f4,0xf9ca64b6,0xc086026b,0xb2c783b8,0xe99b88a8,0x901757b3,0x9d5de033,0x375de6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xae1ee350479b9db8,0x89ed60a465724f92,0xf9ca64b6b88d12f4,0xb2c783b8c086026b,0x901757b3e99b88a8,0x375de69d5de033}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x23f7,0x1d02,0x3431,0x354e,0xba31,0x23a4,0xe6c4,0x6a9c,0x64c,0xea8,0x419f,0xe54f,0x3cb9,0xc02d,0x3caf,0xe7a3,0x2d32,0x31d4,0xed80,0x47d9,0x2086,0x69f4,0x80d3,0x25}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1d0223f7,0x354e3431,0x23a4ba31,0x6a9ce6c4,0xea8064c,0xe54f419f,0xc02d3cb9,0xe7a33caf,0x31d42d32,0x47d9ed80,0x69f42086,0x2580d3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x354e34311d0223f7,0x6a9ce6c423a4ba31,0xe54f419f0ea8064c,0xe7a33cafc02d3cb9,0x47d9ed8031d42d32,0x2580d369f42086}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x908b,0x38bd,0xe544,0x3c4d,0xb400,0xfea,0xf994,0x3ae4,0x5329,0xcf3d,0x9be,0x9da4,0xd177,0xf41a,0xaede,0x41bf,0x753d,0x8aa4,0x4736,0xb149,0xf4f8,0xb949,0x7b30,0xb8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x38bd908b,0x3c4de544,0xfeab400,0x3ae4f994,0xcf3d5329,0x9da409be,0xf41ad177,0x41bfaede,0x8aa4753d,0xb1494736,0xb949f4f8,0xb87b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3c4de54438bd908b,0x3ae4f9940feab400,0x9da409becf3d5329,0x41bfaedef41ad177,0xb14947368aa4753d,0xb87b30b949f4f8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x83a3,0xab6f,0x4f99,0xe1f6,0xc2e8,0x2b61,0xd921,0xec7a,0x4f14,0x7555,0xf78e,0xe0fd,0xb2bf,0x44b,0xfb09,0x107c,0xf365,0x55f7,0x633,0x9bbe,0x409c,0x9c11,0x25b0,0xf1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xab6f83a3,0xe1f64f99,0x2b61c2e8,0xec7ad921,0x75554f14,0xe0fdf78e,0x44bb2bf,0x107cfb09,0x55f7f365,0x9bbe0633,0x9c11409c,0xf125b0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe1f64f99ab6f83a3,0xec7ad9212b61c2e8,0xe0fdf78e75554f14,0x107cfb09044bb2bf,0x9bbe063355f7f365,0xf125b09c11409c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc3d,0x130,0x16ca,0x127f,0x1c5c,0x57d0,0x3ece,0x2e8d,0xc5ae,0xeb26,0x1272,0x6cab,0x79c7,0x7c9,0x321b,0xfeb3,0xc99f,0xb33e,0xefa2,0x62c3,0x7bbe,0x777c,0xc959,0x4e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x130dc3d,0x127f16ca,0x57d01c5c,0x2e8d3ece,0xeb26c5ae,0x6cab1272,0x7c979c7,0xfeb3321b,0xb33ec99f,0x62c3efa2,0x777c7bbe,0x4ec959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x127f16ca0130dc3d,0x2e8d3ece57d01c5c,0x6cab1272eb26c5ae,0xfeb3321b07c979c7,0x62c3efa2b33ec99f,0x4ec959777c7bbe}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8f83,0xf9b,0xec59,0x68d7,0x8301,0x787e,0x909b,0x2714,0xe264,0x8ea5,0x9950,0x60f4,0x971d,0x392b,0x4d1b,0xeb9a,0xb9fb,0xdd02,0xcbaa,0x1f24,0x626c,0x6afb,0xfc8,0x91}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9b8f83,0x68d7ec59,0x787e8301,0x2714909b,0x8ea5e264,0x60f49950,0x392b971d,0xeb9a4d1b,0xdd02b9fb,0x1f24cbaa,0x6afb626c,0x910fc8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x68d7ec590f9b8f83,0x2714909b787e8301,0x60f499508ea5e264,0xeb9a4d1b392b971d,0x1f24cbaadd02b9fb,0x910fc86afb626c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7c5d,0x5490,0xb066,0x1e09,0x3d17,0xd49e,0x26de,0x1385,0xb0eb,0x8aaa,0x871,0x1f02,0x4d40,0xfbb4,0x4f6,0xef83,0xc9a,0xaa08,0xf9cc,0x6441,0xbf63,0x63ee,0xda4f,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x54907c5d,0x1e09b066,0xd49e3d17,0x138526de,0x8aaab0eb,0x1f020871,0xfbb44d40,0xef8304f6,0xaa080c9a,0x6441f9cc,0x63eebf63,0xeda4f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1e09b06654907c5d,0x138526ded49e3d17,0x1f0208718aaab0eb,0xef8304f6fbb44d40,0x6441f9ccaa080c9a,0xeda4f63eebf63}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x194c, 0x43, 0xc70, 0x1c7a, 0xa7a, 0xdf7, 0x1564, 0x1809, 0x8e8, 0x3a5, 0x1399, 0xf0a, 0x914, 0x1a27, 0xb1c, 0xcd0, 0xfc, 0xaa4, 0xd87, 0x1ed2, 0x2c0, 0x8e4, 0x1b93, 0x1a3f, 0x1d9b, 0x1a00, 0xbce, 0x17d2, 0x1a07, 0xf} +#elif RADIX == 32 +{0x21e531, 0xb8f4c70, 0x46fba9e, 0xa301356, 0x991d2a3, 0x451e153, 0xb1cd13a, 0x3f19a0, 0x4d87552, 0x20b03da, 0x7fb9347, 0x766f4, 0xfa4bced, 0x3d81e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xba9eb8f4c70021e5, 0x91d2a3a30135646f, 0xb1cd13a451e1539, 0x3da4d8755203f19a, 0x766f47fb934720b0, 0xcae81efa4bced00} +#else +{0x3d71e98e0043ca, 0xe8c04d591beea, 0x5228f0a9cc8e95, 0x203f19a0b1cd13, 0x641607b49b0eaa, 0x401d9bd1fee4d1, 0x65740f7d25e76} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1ed1, 0x10, 0x131c, 0x171e, 0x1a9e, 0x37d, 0xd59, 0x602, 0xa3a, 0x8e9, 0x14e6, 0x3c2, 0x1a45, 0x689, 0x2c7, 0x334, 0x3f, 0x1aa9, 0x1361, 0x7b4, 0xb0, 0x1a39, 0x1ee4, 0x1e8f, 0x766, 0x1680, 0x12f3, 0x1df4, 0x1e81, 0x4} +#elif RADIX == 32 +{0x87b44, 0xae3d31c, 0x91beea7, 0xe8c04d5, 0xe6474a8, 0x9147854, 0x2c7344e, 0x80fc668, 0x9361d54, 0xc82c0f6, 0x1fee4d1, 0x401d9bd, 0xbe92f3b, 0x27a07} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xeea7ae3d31c0087b, 0x6474a8e8c04d591b, 0x82c7344e9147854e, 0xf69361d5480fc66, 0x1d9bd1fee4d1c82c, 0x116ba07be92f3b40} +#else +{0x4f5c7a638010f6, 0x23a30135646fba, 0x748a3c2a7323a5, 0x480fc6682c7344, 0x390581ed26c3aa, 0x500766f47fb934, 0x8b5d03df4979d} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x187c, 0x10c9, 0xfda, 0x189b, 0x3b, 0xbcd, 0x16ab, 0xabe, 0x102, 0x19b7, 0x288, 0x1c7e, 0x1ee8, 0x452, 0x853, 0x1b5a, 0x1ca8, 0x1129, 0xd16, 0x168a, 0x1414, 0x6ed, 0xc0, 0xda2, 0x19ae, 0x12fe, 0x1813, 0xdd8, 0x102e, 0x1f} +#elif RADIX == 32 +{0x864e1f3, 0xf136fda, 0xb5e680e, 0x957d6a, 0x88cdb84, 0xba38fc2, 0x8532297, 0xf2a36b4, 0x4d16894, 0x6d052d1, 0x440c037, 0x7f66b9b, 0xbb18139, 0x390b9} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x680ef136fda864e1, 0x8cdb840957d6ab5e, 0x48532297ba38fc28, 0x2d14d16894f2a36b, 0x66b9b440c0376d05, 0x3dec0b9bb181397f} +#else +{0x1de26dfb50c9c3, 0x10255f5aad79a0, 0x3dd1c7e14466dc, 0x4f2a36b4853229, 0x6da0a5a29a2d12, 0x5fd9ae6d10300d, 0xeb605cdd8c09c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1ca3, 0x16ad, 0x12b3, 0x9d7, 0xb37, 0x118b, 0xb22, 0x1662, 0xa8f, 0xd68, 0x6d5, 0x1a1f, 0x1f29, 0x632, 0x1b7e, 0xb6, 0xba7, 0xeca, 0x11ed, 0x13b, 0x18cc, 0x19a2, 0x77, 0x1582, 0x11ff, 0xc5f, 0x7de, 0x4b1, 0x1a7f, 0x18} +#elif RADIX == 32 +{0xb56f28f, 0xd3af2b3, 0x28c5acd, 0x3ecc4b2, 0xd56b42a, 0xca743e6, 0xb7e3197, 0x2e9c16d, 0x71ed765, 0x1633027, 0x4077cd, 0x2fc7feb, 0x9627de6, 0x39fc} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x5acdd3af2b3b56f2, 0x56b42a3ecc4b228c, 0xdb7e3197ca743e6d, 0x2771ed7652e9c16, 0xc7feb04077cd1633, 0x24529fc9627de62f} +#else +{0x1ba75e5676ade5, 0x28fb312c8a316b, 0x3e53a1f36ab5a1, 0x52e9c16db7e319, 0x22c6604ee3daec, 0xbf1ffac101df3, 0x1e94fe4b13ef3} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1f7a, 0x1a13, 0x11f4, 0xaeb, 0x997, 0x12d, 0x315, 0x1d7, 0x2fc, 0x736, 0x927, 0x350, 0x695, 0x14ac, 0x703, 0x1ec7, 0x1567, 0x1527, 0x7ee, 0x1a23, 0x11aa, 0x919, 0x130b, 0x199e, 0x137d, 0x795, 0x4e4, 0x1dc6, 0xa87, 0xd} +#elif RADIX == 32 +{0xd09fde9, 0xd5d71f4, 0x5096a65, 0xf03ae31, 0x2739b0b, 0xa546a09, 0x703a561, 0xd59fd8e, 0x67eea93, 0xcc6ab44, 0x3d30b48, 0xcacdf73, 0xb8c4e43, 0x29a1f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6a65d5d71f4d09fd, 0x739b0bf03ae31509, 0xe703a561a546a092, 0xb4467eea93d59fd8, 0xcdf733d30b48cc6a, 0x3b52a1fb8c4e43ca} +#else +{0x4babae3e9a13fb, 0x2fc0eb8c5425a9, 0xd2a3504939cd8, 0x3d59fd8e703a56, 0x198d5688cfdd52, 0x72b37dccf4c2d2, 0xd6950fdc62721} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xa54, 0x1685, 0x1b20, 0x1632, 0x1047, 0x159e, 0x14a0, 0x94c, 0x3c8, 0x793, 0x3a2, 0x1938, 0x1899, 0x15b7, 0xefa, 0xcc8, 0x12c3, 0x1335, 0x4ef, 0x1e93, 0x1861, 0x1602, 0x1d6c, 0x1ae7, 0x187, 0x18b1, 0x857, 0x8da, 0x12f7, 0xa} +#elif RADIX == 32 +{0xb42a951, 0xec65b20, 0xacf411, 0x212994a, 0xa23c98f, 0x2672703, 0xefaadbe, 0xcb0d990, 0x64ef99a, 0x16187d2, 0xcfd6cb0, 0x58861f5, 0x1b4857c, 0x13bdd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf411ec65b20b42a9, 0x23c98f212994a0ac, 0xefaadbe2672703a, 0x7d264ef99acb0d99, 0x861f5cfd6cb01618, 0x14a4bdd1b4857c58} +#else +{0x23d8cb64168552, 0x3c84a65282b3d0, 0x71339381d11e4c, 0x2cb0d990efaadb, 0x2c30fa4c9df33, 0x162187d73f5b2c, 0xa525ee8da42be} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1e6b, 0x111, 0x74d, 0xb04, 0x738, 0x178f, 0xdc5, 0x835, 0x724, 0xaf9, 0xf3c, 0x1855, 0x266, 0x1b16, 0x1cf0, 0x1aa3, 0x32f, 0xce, 0x1f26, 0x16ba, 0x1cb6, 0x9b8, 0x12de, 0x1cef, 0x1a72, 0x1d68, 0xa02, 0x1c67, 0xa67, 0x13} +#elif RADIX == 32 +{0x88f9ae, 0x160874d, 0x5bc79ce, 0x9106adc, 0x3c57c9c, 0x99b0aaf, 0xcf0d8b0, 0xcbf547, 0x5f26067, 0xc72dad7, 0xdf2de4d, 0xb469cb9, 0x8cea02e, 0x1899f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x79ce160874d088f9, 0xc57c9c9106adc5bc, 0x7cf0d8b099b0aaf3, 0xad75f260670cbf54, 0x69cb9df2de4dc72d, 0x2c4699f8cea02eb4} +#else +{0x1c2c10e9a111f3, 0x72441ab716f1e7, 0x4cd85579e2be4, 0x70cbf547cf0d8b, 0x38e5b5aebe4c0c, 0x2d1a72e77cb793, 0x5e34cfc675017} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x12d6, 0x1c7a, 0x9bb, 0x1ce1, 0x1ca, 0xf3f, 0x1036, 0x19a6, 0x1c79, 0x5bf, 0x3, 0x1a92, 0x1d08, 0xeaa, 0x11e8, 0xab1, 0x1ed2, 0x80c, 0x10c9, 0x1517, 0xc18, 0x1513, 0x1dff, 0xc00, 0x16a0, 0x14ce, 0x72d, 0x1a86, 0xd45, 0x19} +#elif RADIX == 32 +{0xe3d4b5b, 0xb9c29bb, 0x679f872, 0xe734d03, 0x32dff1, 0x4235240, 0x1e87557, 0x7b49563, 0xf0c9406, 0x9b062a2, 0x1dffa8, 0x675a818, 0x50c72da, 0x8517} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf872b9c29bbe3d4b, 0x32dff1e734d03679, 0x31e8755742352400, 0x2a2f0c94067b4956, 0x5a81801dffa89b06, 0x172351750c72da67} +#else +{0x657385377c7a96, 0x479cd340d9e7e1, 0x3a11a9200196ff, 0x67b495631e8755, 0x1360c545e19280, 0x19d6a060077fea, 0xb91a8ba86396d} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ebb,0xe120,0x35fc,0x20e3,0xba01,0xff68,0x2ef4,0x62f6,0x5e93,0x94c1,0x3f93,0x804c,0xddc5,0x5b3d,0x1d31,0xf673,0x6e47,0x3d32,0x242c,0x6f7e,0x764b,0x63cb,0xbf4,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe1201ebb,0x20e335fc,0xff68ba01,0x62f62ef4,0x94c15e93,0x804c3f93,0x5b3dddc5,0xf6731d31,0x3d326e47,0x6f7e242c,0x63cb764b,0xf70bf4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x20e335fce1201ebb,0x62f62ef4ff68ba01,0x804c3f9394c15e93,0xf6731d315b3dddc5,0x6f7e242c3d326e47,0xf70bf463cb764b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe76c,0x34d0,0x684,0xee5,0x43c6,0x5a38,0x4bd5,0x2867,0xd3c5,0x2ee1,0xf790,0x18bf,0xbb64,0x3924,0x7d25,0xe0bc,0x913a,0x1355,0x50e9,0x7091,0x6724,0x21b2,0xc027,0xaa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x34d0e76c,0xee50684,0x5a3843c6,0x28674bd5,0x2ee1d3c5,0x18bff790,0x3924bb64,0xe0bc7d25,0x1355913a,0x709150e9,0x21b26724,0xaac027}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xee5068434d0e76c,0x28674bd55a3843c6,0x18bff7902ee1d3c5,0xe0bc7d253924bb64,0x709150e91355913a,0xaac02721b26724}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbd01,0x45bb,0x58bc,0x8007,0xbf5b,0xfd7,0x440b,0x7f9,0x54ed,0xe5db,0x2ba9,0xcd7b,0xfc98,0x1314,0x1470,0x9e9b,0xca3,0x944c,0x73c6,0x4cc9,0xa757,0x45fe,0x8b40,0x46}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x45bbbd01,0x800758bc,0xfd7bf5b,0x7f9440b,0xe5db54ed,0xcd7b2ba9,0x1314fc98,0x9e9b1470,0x944c0ca3,0x4cc973c6,0x45fea757,0x468b40}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x800758bc45bbbd01,0x7f9440b0fd7bf5b,0xcd7b2ba9e5db54ed,0x9e9b14701314fc98,0x4cc973c6944c0ca3,0x468b4045fea757}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe145,0x1edf,0xca03,0xdf1c,0x45fe,0x97,0xd10b,0x9d09,0xa16c,0x6b3e,0xc06c,0x7fb3,0x223a,0xa4c2,0xe2ce,0x98c,0x91b8,0xc2cd,0xdbd3,0x9081,0x89b4,0x9c34,0xf40b,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1edfe145,0xdf1cca03,0x9745fe,0x9d09d10b,0x6b3ea16c,0x7fb3c06c,0xa4c2223a,0x98ce2ce,0xc2cd91b8,0x9081dbd3,0x9c3489b4,0x8f40b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdf1cca031edfe145,0x9d09d10b009745fe,0x7fb3c06c6b3ea16c,0x98ce2cea4c2223a,0x9081dbd3c2cd91b8,0x8f40b9c3489b4}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3e42,0x35b4,0xc315,0x4acc,0x7905,0x734e,0xe57,0x941d,0xcc00,0x9010,0x652,0x5679,0x1e7c,0x69d5,0x77f0,0x5936,0x9815,0xdc49,0xdbae,0x8415,0x2381,0x706d,0x1b55,0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x35b43e42,0x4accc315,0x734e7905,0x941d0e57,0x9010cc00,0x56790652,0x69d51e7c,0x593677f0,0xdc499815,0x8415dbae,0x706d2381,0x351b55}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4accc31535b43e42,0x941d0e57734e7905,0x567906529010cc00,0x593677f069d51e7c,0x8415dbaedc499815,0x351b55706d2381}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9f23,0x1f88,0x311a,0x8d4e,0x15a2,0x199f,0x997,0x8bcf,0xc7a0,0xc956,0x3de8,0x254b,0x1224,0x1a69,0x604a,0x9cb1,0xa8f7,0xc6ee,0x5903,0x65b8,0xe8a5,0xa271,0x7d6e,0xb3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1f889f23,0x8d4e311a,0x199f15a2,0x8bcf0997,0xc956c7a0,0x254b3de8,0x1a691224,0x9cb1604a,0xc6eea8f7,0x65b85903,0xa271e8a5,0xb37d6e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8d4e311a1f889f23,0x8bcf0997199f15a2,0x254b3de8c956c7a0,0x9cb1604a1a691224,0x65b85903c6eea8f7,0xb37d6ea271e8a5}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfad4,0x9280,0x39ea,0xba3b,0xb12b,0x1c9c,0x5ffd,0x2c19,0x13bf,0x2145,0xaf34,0x30c1,0x70d8,0x27ea,0x6539,0xb50a,0x3106,0x3638,0x7fad,0xa5d2,0x912a,0xb0e6,0xb4a1,0xfd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9280fad4,0xba3b39ea,0x1c9cb12b,0x2c195ffd,0x214513bf,0x30c1af34,0x27ea70d8,0xb50a6539,0x36383106,0xa5d27fad,0xb0e6912a,0xfdb4a1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xba3b39ea9280fad4,0x2c195ffd1c9cb12b,0x30c1af34214513bf,0xb50a653927ea70d8,0xa5d27fad36383106,0xfdb4a1b0e6912a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc1be,0xca4b,0x3cea,0xb533,0x86fa,0x8cb1,0xf1a8,0x6be2,0x33ff,0x6fef,0xf9ad,0xa986,0xe183,0x962a,0x880f,0xa6c9,0x67ea,0x23b6,0x2451,0x7bea,0xdc7e,0x8f92,0xe4aa,0xca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xca4bc1be,0xb5333cea,0x8cb186fa,0x6be2f1a8,0x6fef33ff,0xa986f9ad,0x962ae183,0xa6c9880f,0x23b667ea,0x7bea2451,0x8f92dc7e,0xcae4aa}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5333ceaca4bc1be,0x6be2f1a88cb186fa,0xa986f9ad6fef33ff,0xa6c9880f962ae183,0x7bea245123b667ea,0xcae4aa8f92dc7e}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x9a9, 0x8c7, 0x119d, 0xada, 0x1d98, 0xc97, 0x1a31, 0xdc6, 0x192a, 0xf3c, 0x509, 0xc5b, 0xea7, 0x1eb9, 0x59d, 0x1e3c, 0x114b, 0x88, 0x5a9, 0x1154, 0x18c0, 0x11db, 0x1ba9, 0x3b8, 0x837, 0x18d0, 0x17eb, 0x211, 0x1aa7, 0x11} +#elif RADIX == 32 +{0x463a6a6, 0x15b519d, 0x164bf66, 0xa9b8da3, 0x979e64, 0xa9d8b65, 0x59df5cb, 0x452fc78, 0x85a9044, 0xde3022a, 0x71ba98e, 0x6820dc7, 0x4237ebc, 0xca9c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbf6615b519d463a6, 0x979e64a9b8da3164, 0x859df5cba9d8b650, 0x22a85a9044452fc7, 0x20dc771ba98ede30, 0x2a32a9c4237ebc68} +#else +{0x4c2b6a33a8c74d, 0x12a6e368c592fd, 0x5d4ec5b284bcf3, 0x4452fc7859df5c, 0x5bc604550b5208, 0x1a08371dc6ea63, 0x4d954e211bf5e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1ae8, 0xa31, 0x1467, 0x2b6, 0x1f66, 0xb25, 0x168c, 0x1371, 0x64a, 0xbcf, 0x1942, 0x1b16, 0xba9, 0xfae, 0x167, 0x1f8f, 0x452, 0x822, 0x16a, 0x455, 0x1e30, 0xc76, 0x6ea, 0x18ee, 0x20d, 0x1e34, 0xdfa, 0x1884, 0x12a9, 0xd} +#elif RADIX == 32 +{0x518eba1, 0x856d467, 0xc592fd9, 0x2a6e368, 0x425e799, 0xea762d9, 0x1677d72, 0x114bf1e, 0xa16a411, 0xb78c08a, 0xdc6ea63, 0x1a08371, 0x108dfaf, 0x2baa7} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2fd9856d467518eb, 0x25e7992a6e368c59, 0xe1677d72ea762d94, 0x8aa16a411114bf1, 0x8371dc6ea63b78c, 0x290caa7108dfaf1a} +#else +{0x330ada8cea31d7, 0x64a9b8da3164bf, 0x1753b16ca12f3c, 0x1114bf1e1677d7, 0x76f1811542d482, 0x46820dc771ba98, 0x4465538846fd7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x954, 0x49a, 0xee7, 0x1037, 0x171c, 0x81, 0x448, 0x76f, 0x1615, 0xefe, 0xe70, 0xc54, 0x3d4, 0xc30, 0x1aaf, 0x72c, 0x464, 0x7a7, 0x5b7, 0x1f2a, 0xa98, 0x8db, 0x1689, 0x1cc1, 0x11ae, 0x4bf, 0x1ddc, 0x1f93, 0x1b3e, 0xb} +#elif RADIX == 32 +{0x24d2551, 0x206eee7, 0x8040dc7, 0x54ede44, 0x7077f58, 0xf518a8e, 0xaaf6180, 0x9190e59, 0x45b73d3, 0xdaa63e5, 0x8368946, 0x5fc6bb9, 0xf27ddc2, 0x1dcfb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xdc7206eee724d25, 0x77f5854ede44804, 0x9aaf6180f518a8e7, 0x3e545b73d39190e5, 0xc6bb98368946daa6, 0x14aecfbf27ddc25f} +#else +{0xe40dddce49a4a, 0x6153b791201037, 0x7a8c547383bfa, 0x39190e59aaf618, 0x5b54c7ca8b6e7a, 0x17f1aee60da251, 0xa5767df93eee1} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xf14, 0xa31, 0x805, 0x19bd, 0x1b37, 0x5d5, 0x1211, 0x9c0, 0x557, 0x6b5, 0x1b2a, 0x775, 0x1a4f, 0x1d9, 0x520, 0x16be, 0x3d, 0x1cae, 0x4ca, 0x1a17, 0x1e64, 0x170b, 0x136, 0x1cd4, 0x150b, 0x1111, 0xf0b, 0x1af9, 0x3ce, 0x1c} +#elif RADIX == 32 +{0x518bc53, 0xf37a805, 0x12eaecd, 0x5d38121, 0x2a35a95, 0x93ceebb, 0x5200ece, 0xf6d7c, 0xe4cae57, 0x5f99342, 0xa8136b8, 0x88d42f9, 0x5f2f0b8, 0x1df3b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xaecdf37a805518bc, 0xa35a955d3812112e, 0xc5200ece93ceebb2, 0x342e4cae5700f6d7, 0xd42f9a8136b85f99, 0x1530f3b5f2f0b888} +#else +{0x1be6f500aa3178, 0x5574e04844babb, 0x749e775d951ad4, 0x700f6d7c5200ec, 0xbf32685c995ca, 0x22350be6a04dae, 0xa9879daf9785c} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1b6e, 0x5aa, 0x1bd9, 0x1e85, 0x1615, 0x1629, 0xb8b, 0x1066, 0x1532, 0x19ad, 0xe24, 0xcb8, 0x17fc, 0x2ab, 0x1726, 0x1ad5, 0x1c83, 0x1b32, 0x75e, 0x1794, 0x161d, 0x9c4, 0x11b6, 0x1c02, 0x14bb, 0x15d2, 0x10d5, 0x26b, 0x1765, 0x14} +#elif RADIX == 32 +{0x2d56dba, 0x7d0bbd9, 0xbb14d85, 0xca0ccb8, 0x24cd6d4, 0xff1970e, 0x726155d, 0x720f5ab, 0x875ed99, 0x25876f2, 0x51b64e, 0xe952ef8, 0x4d70d5a, 0x23d94} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x4d857d0bbd92d56d, 0x4cd6d4ca0ccb8bb1, 0xb726155dff1970e2, 0x6f2875ed99720f5a, 0x52ef8051b64e2587, 0x2f5dd944d70d5ae9} +#else +{0xafa177b25aadb, 0x5328332e2ec536, 0x6ff8cb871266b6, 0x1720f5ab726155, 0x44b0ede50ebdb3, 0x3a54bbe0146d93, 0x76eeca26b86ad} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x18aa, 0x459, 0x747, 0x401, 0x14be, 0x13ba, 0xafb, 0x1cb4, 0x636, 0xd10, 0x16ec, 0x1e6e, 0x1ee5, 0x1475, 0xf82, 0x1695, 0x1a54, 0xe4e, 0x1856, 0x459, 0x752, 0x1d56, 0x15a7, 0xde2, 0x158c, 0x623, 0x17, 0x10d9, 0x1156, 0x19} +#elif RADIX == 32 +{0x22ce2ab, 0x8802747, 0xb9dd52f, 0xdb968af, 0xec68818, 0xb97cdd6, 0xf82a3af, 0x6952d2a, 0x3856727, 0xb1d488b, 0xc55a7ea, 0x11d631b, 0x1b20173, 0x955a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd52f880274722ce2, 0xc68818db968afb9d, 0xaf82a3afb97cdd6e, 0x88b38567276952d2, 0xd631bc55a7eab1d4, 0x2b7455a1b2017311} +#else +{0x5f1004e8e459c5, 0x636e5a2bee7754, 0x7dcbe6eb763440, 0x76952d2af82a3a, 0x563a911670ace4, 0x44758c6f1569fa, 0x57a2ad0d900b9} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1557, 0x1987, 0x65f, 0x1c20, 0x14ef, 0xb3b, 0xbbe, 0x19db, 0xc77, 0x566, 0x9ea, 0xcab, 0xafc, 0x1fda, 0xb44, 0x1fe6, 0x1af3, 0x1829, 0x2ef, 0xc23, 0x83d, 0x82c, 0x1fa8, 0x14b, 0xd6e, 0xde8, 0x260, 0x1019, 0x97a, 0x3} +#elif RADIX == 32 +{0xcc3d55c, 0xf84065f, 0xe59dd3b, 0xdf3b6bb, 0xea2b331, 0xbf19569, 0xb44fed2, 0xebcffcc, 0x62efc14, 0x620f584, 0x97fa841, 0xf435b82, 0x322606, 0x1a5ea} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xdd3bf84065fcc3d5, 0xa2b331df3b6bbe59, 0xcb44fed2bf19569e, 0x58462efc14ebcffc, 0x35b8297fa841620f, 0x17765ea0322606f4} +#else +{0x77f080cbf987aa, 0x477cedaef96774, 0x15f8cab4f51599, 0x4ebcffccb44fed, 0x2c41eb08c5df82, 0x3d0d6e0a5fea10, 0xbbb2f50191303} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xb02, 0xc60, 0x791, 0x1cf7, 0xc15, 0x125a, 0x1697, 0xca1, 0x327, 0x89f, 0xf64, 0xddf, 0xcb7, 0x1977, 0x29f, 0x100a, 0xdac, 0xc8, 0x1e16, 0x1c4e, 0xedf, 0x1ec0, 0x1ac0, 0x1bbd, 0x16ee, 0x106a, 0x35c, 0x11cc, 0xdde, 0x20} +#elif RADIX == 32 +{0x6302c0b, 0x79ee791, 0x792d305, 0x9d94369, 0x6444f8c, 0x2ddbbef, 0x29fcbbb, 0x36b2014, 0xde16064, 0x3b7f89, 0x7bac0f6, 0x355bbb7, 0x39835c8, 0x4077a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd30579ee7916302c, 0x444f8c9d94369792, 0x429fcbbb2ddbbef6, 0xf89de1606436b201, 0x5bbb77bac0f603b7, 0x30b77a39835c835} +#else +{0xaf3dcf22c6058, 0x327650da5e4b4c, 0x596eddf7b2227c, 0x436b201429fcbb, 0x4076ff13bc2c0c, 0xd56eeddeeb03d, 0x185bbd1cc1ae4} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe463,0x3132,0x31,0xb872,0xdbee,0x1045,0x2b88,0x62c5,0xee3c,0xde5c,0xb179,0xa84f,0x18e5,0x355e,0x9a0f,0xbef8,0x783a,0x35b5,0x6d1c,0xaa31,0x3024,0xed81,0xa0f6,0x8a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3132e463,0xb8720031,0x1045dbee,0x62c52b88,0xde5cee3c,0xa84fb179,0x355e18e5,0xbef89a0f,0x35b5783a,0xaa316d1c,0xed813024,0x8aa0f6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb87200313132e463,0x62c52b881045dbee,0xa84fb179de5cee3c,0xbef89a0f355e18e5,0xaa316d1c35b5783a,0x8aa0f6ed813024}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcf24,0xdac2,0xe08b,0xd2f9,0x13a,0xf1f,0x9517,0xfa7c,0xa1c5,0x581e,0x4d0b,0x3e59,0x97cc,0x7506,0xee19,0xa48e,0xb1b0,0x50c2,0xb5a7,0x4b1d,0x2fcd,0xee68,0xab65,0x85}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdac2cf24,0xd2f9e08b,0xf1f013a,0xfa7c9517,0x581ea1c5,0x3e594d0b,0x750697cc,0xa48eee19,0x50c2b1b0,0x4b1db5a7,0xee682fcd,0x85ab65}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd2f9e08bdac2cf24,0xfa7c95170f1f013a,0x3e594d0b581ea1c5,0xa48eee19750697cc,0x4b1db5a750c2b1b0,0x85ab65ee682fcd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8b69,0x7be5,0xdf28,0x9c91,0xf929,0x7c60,0x6c50,0x4f81,0x714a,0x59da,0x2741,0x3c71,0x223a,0x79bf,0x14bd,0xa26f,0xc787,0x606d,0xc74c,0xef81,0xd1c4,0x32a,0x55ff,0x6a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7be58b69,0x9c91df28,0x7c60f929,0x4f816c50,0x59da714a,0x3c712741,0x79bf223a,0xa26f14bd,0x606dc787,0xef81c74c,0x32ad1c4,0x6a55ff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c91df287be58b69,0x4f816c507c60f929,0x3c71274159da714a,0xa26f14bd79bf223a,0xef81c74c606dc787,0x6a55ff032ad1c4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1b9d,0xcecd,0xffce,0x478d,0x2411,0xefba,0xd477,0x9d3a,0x11c3,0x21a3,0x4e86,0x57b0,0xe71a,0xcaa1,0x65f0,0x4107,0x87c5,0xca4a,0x92e3,0x55ce,0xcfdb,0x127e,0x5f09,0x75}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecd1b9d,0x478dffce,0xefba2411,0x9d3ad477,0x21a311c3,0x57b04e86,0xcaa1e71a,0x410765f0,0xca4a87c5,0x55ce92e3,0x127ecfdb,0x755f09}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x478dffcececd1b9d,0x9d3ad477efba2411,0x57b04e8621a311c3,0x410765f0caa1e71a,0x55ce92e3ca4a87c5,0x755f09127ecfdb}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd69f,0xa20a,0x2dbf,0x4897,0x3199,0xde89,0xe5f9,0x293e,0x826b,0xb67a,0x9878,0x508f,0x1cd5,0xbfc7,0xa6dc,0xa78c,0xa5a7,0xf717,0x2bd3,0x9a61,0x7d35,0xb772,0xba39,0x5d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa20ad69f,0x48972dbf,0xde893199,0x293ee5f9,0xb67a826b,0x508f9878,0xbfc71cd5,0xa78ca6dc,0xf717a5a7,0x9a612bd3,0xb7727d35,0x5dba39}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x48972dbfa20ad69f,0x293ee5f9de893199,0x508f9878b67a826b,0xa78ca6dcbfc71cd5,0x9a612bd3f717a5a7,0x5dba39b7727d35}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xeec1,0x1e36,0x61bb,0x9e9f,0xe1d8,0x9166,0x8a8e,0xb5cd,0xc787,0x4281,0xb7db,0xc5fe,0x29b,0x7038,0xad1a,0xdfb3,0x5d88,0xa643,0xce34,0xe9d5,0xfe7,0xc15c,0xb80f,0xbc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1e36eec1,0x9e9f61bb,0x9166e1d8,0xb5cd8a8e,0x4281c787,0xc5feb7db,0x7038029b,0xdfb3ad1a,0xa6435d88,0xe9d5ce34,0xc15c0fe7,0xbcb80f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9e9f61bb1e36eec1,0xb5cd8a8e9166e1d8,0xc5feb7db4281c787,0xdfb3ad1a7038029b,0xe9d5ce34a6435d88,0xbcb80fc15c0fe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb7ff,0xc2,0x2b8a,0x5a59,0xd318,0x52ca,0x9b64,0xad19,0x8df,0xc9b8,0x7b28,0x9d09,0xe309,0x9,0xfb09,0xcbb9,0x6a67,0x1137,0x707c,0xaa5,0xcdf5,0x3ffd,0xfb9e,0xb9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc2b7ff,0x5a592b8a,0x52cad318,0xad199b64,0xc9b808df,0x9d097b28,0x9e309,0xcbb9fb09,0x11376a67,0xaa5707c,0x3ffdcdf5,0xb9fb9e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5a592b8a00c2b7ff,0xad199b6452cad318,0x9d097b28c9b808df,0xcbb9fb090009e309,0xaa5707c11376a67,0xb9fb9e3ffdcdf5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2961,0x5df5,0xd240,0xb768,0xce66,0x2176,0x1a06,0xd6c1,0x7d94,0x4985,0x6787,0xaf70,0xe32a,0x4038,0x5923,0x5873,0x5a58,0x8e8,0xd42c,0x659e,0x82ca,0x488d,0x45c6,0xa2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5df52961,0xb768d240,0x2176ce66,0xd6c11a06,0x49857d94,0xaf706787,0x4038e32a,0x58735923,0x8e85a58,0x659ed42c,0x488d82ca,0xa245c6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb768d2405df52961,0xd6c11a062176ce66,0xaf70678749857d94,0x587359234038e32a,0x659ed42c08e85a58,0xa245c6488d82ca}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0xccf, 0xad2, 0x1c72, 0x1f31, 0x155, 0xa3, 0x1062, 0xf6e, 0xe60, 0x87b, 0x1891, 0xb45, 0x1de8, 0x7f8, 0x18a6, 0x1e67, 0x988, 0x1887, 0xca7, 0x216, 0x1daa, 0x1aa5, 0xc3a, 0x11d3, 0x1282, 0x55d, 0x15fc, 0x1132, 0x1c5e, 0x8} +#elif RADIX == 32 +{0x569333d, 0x7e63c72, 0x2051855, 0x81edd06, 0x9143db9, 0x7a168b8, 0x8a63fc7, 0xa623ccf, 0xcca7c43, 0x2f6a842, 0xa6c3ad5, 0xaeca0a3, 0x2655fc2, 0x617a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x18557e63c7256933, 0x143db981edd06205, 0xf8a63fc77a168b89, 0x842cca7c43a623cc, 0xca0a3a6c3ad52f6a, 0xf8317a2655fc2ae} +#else +{0x2afcc78e4ad266, 0x6607b741881461, 0x3bd0b45c48a1ed, 0x3a623ccf8a63fc, 0x25ed5085994f88, 0x2bb2828e9b0eb5, 0x7c18bd132afe1} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x13b1, 0x12b4, 0xf1c, 0xfcc, 0x1855, 0x1028, 0x1418, 0x3db, 0x1b98, 0xa1e, 0xe24, 0x2d1, 0x77a, 0x11fe, 0x1e29, 0x799, 0x1a62, 0x1e21, 0x1329, 0x1085, 0xf6a, 0x16a9, 0x1b0e, 0x1474, 0xca0, 0x157, 0x157f, 0x144c, 0x1317, 0x1b} +#elif RADIX == 32 +{0x95a4ec7, 0x5f98f1c, 0x8814615, 0x607b741, 0x2450f6e, 0xde85a2e, 0xe298ff1, 0xe988f33, 0xb329f10, 0x4bdaa10, 0xe9b0eb5, 0xabb2828, 0x89957f0, 0x19c5e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x46155f98f1c95a4e, 0x450f6e607b741881, 0x3e298ff1de85a2e2, 0xa10b329f10e988f3, 0xb2828e9b0eb54bda, 0x32a0c5e89957f0ab} +#else +{0x2abf31e392b49d, 0x3981edd0620518, 0xef42d1712287b, 0xe988f33e298ff, 0x297b54216653e2, 0x2aeca0a3a6c3ad, 0x91062f44cabf8} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xdd8, 0x13bc, 0x17ae, 0x83e, 0x10c6, 0x1a72, 0x270, 0x84, 0xb92, 0x431, 0x1fdf, 0x9cf, 0x2a9, 0x121d, 0x5d5, 0x1d9f, 0xa48, 0xec9, 0xcfc, 0x6ee, 0x1812, 0x66b, 0xed8, 0xf7, 0x117b, 0x1fb7, 0xc5, 0x1f00, 0x134f, 0x1f} +#elif RADIX == 32 +{0x9de3763, 0x907d7ae, 0xd39431, 0x4810827, 0xdf218ae, 0xaa539ff, 0x5d590e8, 0xa923b3e, 0xccfc764, 0x5e048dd, 0xeeed833, 0xdbc5ec1, 0xe000c5f, 0x39d3f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9431907d7ae9de37, 0xf218ae48108270d3, 0xe5d590e8aa539ffd, 0x8ddccfc764a923b3, 0xc5ec1eeed8335e04, 0x195cd3fe000c5fdb} +#else +{0x6320faf5d3bc6e, 0x39204209c34e50, 0x45529cffef90c5, 0x4a923b3e5d590e, 0x6bc091bb99f8ec, 0x76f17b07bbb60c, 0xcae69ff00062f} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xf36, 0x2c8, 0x1ab4, 0x17c1, 0x10be, 0x1a20, 0x1baf, 0x3ce, 0x1088, 0xd75, 0x1e25, 0x10f8, 0x3d2, 0x1b8, 0x9c7, 0x168, 0x44c, 0x372, 0xc50, 0x1d9a, 0x1b99, 0xab9, 0x8af, 0x657, 0xe84, 0xe1d, 0x1675, 0x47, 0x157e, 0xc} +#elif RADIX == 32 +{0x1643cd9, 0xaf83ab4, 0xfd1042f, 0x2079dba, 0x256bac2, 0xf4a1f1e, 0x9c70dc0, 0x11302d0, 0x4c501b9, 0xcee67b3, 0xae8af55, 0xeba10c, 0x8f6757, 0x245f8} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x42faf83ab41643c, 0x56bac22079dbafd1, 0x9c70dc0f4a1f1e2, 0x7b34c501b911302d, 0xba10cae8af55cee6, 0x373d5f808f67570e} +#else +{0x5f5f075682c879, 0x881e76ebf4410, 0x7a50f8f12b5d6, 0x111302d09c70dc, 0x39dccf6698a037, 0x43ae8432ba2bd5, 0xb5eafc047b3ab} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x4b0, 0x31c, 0x92f, 0xf0d, 0xbc1, 0x1e89, 0x4ce, 0x1480, 0xdee, 0x504, 0x970, 0x16c3, 0xcb6, 0xae7, 0x1147, 0x8c, 0xc2a, 0x1ff9, 0x7d8, 0xfe9, 0x1fb1, 0x748, 0x998, 0xb85, 0x1a8e, 0x19c7, 0x5f7, 0x103c, 0x12a4, 0xe} +#elif RADIX == 32 +{0x18e12c1, 0x5e1a92f, 0xef44af0, 0xba9004c, 0x7028237, 0x2dad869, 0x147573b, 0xb0a8119, 0x27d8ffc, 0x47ec5fd, 0xa9983a, 0xe3ea397, 0x785f7c, 0x33a92} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x4af05e1a92f18e12, 0x28237ba9004cef4, 0x9147573b2dad8697, 0x5fd27d8ffcb0a811, 0xea3970a9983a47ec, 0x3134a920785f7ce3} +#else +{0x60bc3525e31c25, 0x5eea40133bd12b, 0x596d6c34b81411, 0x4b0a8119147573, 0x48fd8bfa4fb1ff, 0x38fa8e5c2a660e, 0x85a54903c2fbe} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x15a9, 0x1ae1, 0x1dd2, 0xa61, 0x1259, 0xfad, 0xe49, 0x1f6d, 0xd9a, 0x1371, 0xee7, 0x1179, 0x1bcf, 0x876, 0x3ca, 0xf7c, 0x1192, 0x315, 0x916, 0x1aa5, 0x1ca9, 0x10cb, 0xe32, 0x18b9, 0xf58, 0x1932, 0x1cce, 0x1ba7, 0x1377, 0x6} +#elif RADIX == 32 +{0xd70d6a4, 0x54c3dd2, 0x97d6c96, 0x6bedae4, 0xe79b8b6, 0xf3e2f2e, 0x3ca43b6, 0xc649ef8, 0xa91618a, 0x5f2a754, 0x72e3286, 0x993d631, 0x74fccec, 0x34ddf} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6c9654c3dd2d70d6, 0x79b8b66bedae497d, 0x83ca43b6f3e2f2ee, 0x754a91618ac649ef, 0x3d63172e32865f2a, 0x29d8ddf74fccec99} +#else +{0x2ca987ba5ae1ad, 0x59afb6b925f5b2, 0x379f179773cdc5, 0x2c649ef83ca43b, 0x4be54ea9522c31, 0x264f58c5cb8ca1, 0x4ac6efba7e676} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1f79, 0xcad, 0x18f2, 0x1ba7, 0x1d14, 0x1fc6, 0x197d, 0x522, 0xab, 0x7bd, 0x57b, 0x1fbf, 0x12, 0xb50, 0x425, 0x1aa3, 0x1c8e, 0x11cf, 0x1c1b, 0x1774, 0x3fc, 0x36a, 0x148f, 0x1fd3, 0x608, 0x1711, 0x1142, 0xcfa, 0xd43, 0xd} +#elif RADIX == 32 +{0x656fde5, 0x374f8f2, 0xdfe3745, 0xaca4597, 0x7b3de82, 0x4bf7e5, 0x4255a80, 0xf23b546, 0x9c1b8e7, 0x50ff2ee, 0xa748f1b, 0x889823f, 0x9f5142b, 0x2a50d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3745374f8f2656fd, 0xb3de82aca4597dfe, 0x64255a8004bf7e57, 0x2ee9c1b8e7f23b54, 0x9823fa748f1b50ff, 0x3a4f50d9f5142b88} +#else +{0xa6e9f1e4cadfb, 0xab29165f7f8dd, 0x25fbf2bd9ef4, 0x7f23b5464255a8, 0x6a1fe5dd38371c, 0x622608fe9d23c6, 0xce7a86cfa8a15} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x14a, 0x1236, 0x839, 0xe2, 0xe2d, 0xe17, 0x1b8f, 0x18dd, 0xb20, 0xeb8, 0x1da9, 0xc53, 0x12e8, 0x146, 0x1b9b, 0x154, 0x1121, 0x1049, 0x105d, 0x631, 0xc9, 0xbe0, 0x8fa, 0xbc0, 0x34b, 0x178a, 0x77b, 0x2a7, 0x105b, 0x15} +#elif RADIX == 32 +{0x91b052a, 0x41c4839, 0xf70bb8b, 0x831bbb8, 0xa975c2c, 0xba18a7d, 0xb9b0a34, 0xc4842a9, 0x305d824, 0x324c6, 0x808fa5f, 0xc50d2d7, 0x54e77bb, 0x2a16c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbb8b41c483991b05, 0x975c2c831bbb8f70, 0x9b9b0a34ba18a7da, 0x4c6305d824c4842a, 0xd2d7808fa5f0032, 0xad416c54e77bbc5} +#else +{0x1683890732360a, 0x320c6eee3dc2ee, 0x25d0c53ed4bae1, 0x4c4842a9b9b0a3, 0x6006498c60bb04, 0x71434b5e023e97, 0x56a0b62a73bdd} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xafa5,0x4195,0xbb2d,0xdd24,0xa3ca,0xc678,0xf995,0x2ccb,0x5c3b,0xf9ff,0xd06,0x1f9b,0x926d,0x4e3b,0x2881,0x24f2,0xcf4c,0x8e9a,0xa38d,0x24cb,0xe8f2,0x28a1,0x581c,0xde}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4195afa5,0xdd24bb2d,0xc678a3ca,0x2ccbf995,0xf9ff5c3b,0x1f9b0d06,0x4e3b926d,0x24f22881,0x8e9acf4c,0x24cba38d,0x28a1e8f2,0xde581c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd24bb2d4195afa5,0x2ccbf995c678a3ca,0x1f9b0d06f9ff5c3b,0x24f228814e3b926d,0x24cba38d8e9acf4c,0xde581c28a1e8f2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcd88,0x9cea,0x593c,0xb5a8,0x79c6,0xc07c,0x496f,0xfb85,0x5ac9,0x381c,0xf4f8,0xfa59,0xb7a3,0x5caa,0x24c2,0x67c8,0x31b3,0x7585,0xbe8a,0xb89f,0xa29f,0x6cd5,0xc156,0x25}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ceacd88,0xb5a8593c,0xc07c79c6,0xfb85496f,0x381c5ac9,0xfa59f4f8,0x5caab7a3,0x67c824c2,0x758531b3,0xb89fbe8a,0x6cd5a29f,0x25c156}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5a8593c9ceacd88,0xfb85496fc07c79c6,0xfa59f4f8381c5ac9,0x67c824c25caab7a3,0xb89fbe8a758531b3,0x25c1566cd5a29f}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9627,0xd297,0x9200,0x73de,0xaa89,0xf44f,0x99c7,0x2d45,0xb1eb,0xab2b,0x4168,0x976f,0x1e88,0x7777,0x2f39,0x6648,0xc224,0xd5a1,0xb815,0x861b,0xf76f,0xb476,0x4123,0xbe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd2979627,0x73de9200,0xf44faa89,0x2d4599c7,0xab2bb1eb,0x976f4168,0x77771e88,0x66482f39,0xd5a1c224,0x861bb815,0xb476f76f,0xbe4123}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x73de9200d2979627,0x2d4599c7f44faa89,0x976f4168ab2bb1eb,0x66482f3977771e88,0x861bb815d5a1c224,0xbe4123b476f76f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x505b,0xbe6a,0x44d2,0x22db,0x5c35,0x3987,0x66a,0xd334,0xa3c4,0x600,0xf2f9,0xe064,0x6d92,0xb1c4,0xd77e,0xdb0d,0x30b3,0x7165,0x5c72,0xdb34,0x170d,0xd75e,0xa7e3,0x21}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbe6a505b,0x22db44d2,0x39875c35,0xd334066a,0x600a3c4,0xe064f2f9,0xb1c46d92,0xdb0dd77e,0x716530b3,0xdb345c72,0xd75e170d,0x21a7e3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22db44d2be6a505b,0xd334066a39875c35,0xe064f2f90600a3c4,0xdb0dd77eb1c46d92,0xdb345c72716530b3,0x21a7e3d75e170d}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x43c6,0x55d8,0x682a,0xc215,0x706e,0xac4c,0x5ce,0x1182,0x8b72,0x90e3,0xf04f,0x6a11,0xc345,0x3488,0x45b0,0x5d3f,0x556b,0x9896,0x7b20,0x8d46,0xa9e3,0x7b0c,0xd428,0xba}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x55d843c6,0xc215682a,0xac4c706e,0x118205ce,0x90e38b72,0x6a11f04f,0x3488c345,0x5d3f45b0,0x9896556b,0x8d467b20,0x7b0ca9e3,0xbad428}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc215682a55d843c6,0x118205ceac4c706e,0x6a11f04f90e38b72,0x5d3f45b03488c345,0x8d467b209896556b,0xbad4287b0ca9e3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x91a5,0xf9ad,0x243c,0xedb9,0xc4f5,0xce5f,0xd6d7,0x3592,0x40df,0xdead,0x1489,0xe297,0x55b1,0xee4d,0xda9d,0x9e1f,0x4a5c,0xd99a,0x6c6b,0xa585,0x62fc,0x4383,0xc1ad,0xc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9ad91a5,0xedb9243c,0xce5fc4f5,0x3592d6d7,0xdead40df,0xe2971489,0xee4d55b1,0x9e1fda9d,0xd99a4a5c,0xa5856c6b,0x438362fc,0xc0c1ad}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xedb9243cf9ad91a5,0x3592d6d7ce5fc4f5,0xe2971489dead40df,0x9e1fda9dee4d55b1,0xa5856c6bd99a4a5c,0xc0c1ad438362fc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf454,0x6191,0x2181,0x2fc4,0x66fb,0xc44f,0x7bb6,0x9b1c,0x99f,0xee09,0xb1a3,0xf8f9,0xf234,0x5151,0x595c,0x4e44,0xa80a,0x305c,0x9930,0x25f6,0x8e50,0xb812,0xff4d,0xb8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6191f454,0x2fc42181,0xc44f66fb,0x9b1c7bb6,0xee09099f,0xf8f9b1a3,0x5151f234,0x4e44595c,0x305ca80a,0x25f69930,0xb8128e50,0xb8ff4d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2fc421816191f454,0x9b1c7bb6c44f66fb,0xf8f9b1a3ee09099f,0x4e44595c5151f234,0x25f69930305ca80a,0xb8ff4db8128e50}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbc3a,0xaa27,0x97d5,0x3dea,0x8f91,0x53b3,0xfa31,0xee7d,0x748d,0x6f1c,0xfb0,0x95ee,0x3cba,0xcb77,0xba4f,0xa2c0,0xaa94,0x6769,0x84df,0x72b9,0x561c,0x84f3,0x2bd7,0x45}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa27bc3a,0x3dea97d5,0x53b38f91,0xee7dfa31,0x6f1c748d,0x95ee0fb0,0xcb773cba,0xa2c0ba4f,0x6769aa94,0x72b984df,0x84f3561c,0x452bd7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3dea97d5aa27bc3a,0xee7dfa3153b38f91,0x95ee0fb06f1c748d,0xa2c0ba4fcb773cba,0x72b984df6769aa94,0x452bd784f3561c}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x10c4, 0x1e1a, 0x1b68, 0xa71, 0xa1a, 0x1f60, 0xdda, 0xb59, 0x1bc0, 0x26c, 0x1325, 0x4cf, 0x1375, 0x10ef, 0x1702, 0x1eff, 0x171f, 0x467, 0xc1c, 0xf3b, 0xf74, 0x6fa, 0x177f, 0x911, 0x8bc, 0x16b7, 0x1022, 0xa5f, 0xa55, 0x9} +#elif RADIX == 32 +{0xf0d4311, 0x94e3b68, 0xafb0286, 0x16b2dd, 0x251366f, 0xdd499f3, 0x702877c, 0xdc7fdff, 0x6c1c233, 0xd3dd1e7, 0x2377f37, 0x5ba2f12, 0x4bf022b, 0x9955} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x28694e3b68f0d43, 0x51366f016b2ddafb, 0xf702877cdd499f32, 0x1e76c1c233dc7fdf, 0xa2f122377f37d3dd, 0x45a9554bf022b5b} +#else +{0xd29c76d1e1a86, 0x3c05acb76bec0a, 0x66ea4cf99289b3, 0x3dc7fdff702877, 0x7a7ba3ced83846, 0x56e8bc488ddfcd, 0x22d4aaa5f8115} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x14af, 0x786, 0xeda, 0x129c, 0x286, 0x17d8, 0xb76, 0x2d6, 0x6f0, 0x89b, 0x1cc9, 0x933, 0x1cdd, 0x143b, 0x1dc0, 0x1fbf, 0x1dc7, 0x119, 0x1b07, 0x3ce, 0x13dd, 0x19be, 0xddf, 0x244, 0x1a2f, 0x15ad, 0x1c08, 0xa97, 0xa95, 0x3} +#elif RADIX == 32 +{0x3c352bc, 0xa538eda, 0x6bec0a1, 0xc05acb7, 0xc944d9b, 0x375267c, 0xdc0a1df, 0xf71ff7f, 0xdb0708c, 0xf4f7479, 0x88ddfcd, 0xd6e8bc4, 0x52fc08a, 0x1aa55} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc0a1a538eda3c352, 0x944d9bc05acb76be, 0xfdc0a1df375267cc, 0x479db0708cf71ff7, 0xe8bc488ddfcdf4f7, 0x2fd6a5552fc08ad6} +#else +{0x434a71db4786a5, 0x6f016b2ddafb02, 0x79ba933e64a26c, 0x4f71ff7fdc0a1d, 0x3e9ee8f3b60e11, 0x35ba2f122377f3, 0x7ab52aa97e045} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xd3b, 0x1cbd, 0x1177, 0x1087, 0x5d2, 0x1535, 0x1cb5, 0x1372, 0x158a, 0x931, 0x12da, 0x1b9d, 0x44e, 0xa00, 0xb71, 0xe8a, 0x1c57, 0x1a1, 0x5bb, 0x1180, 0x15f0, 0x1ca3, 0x119b, 0x16cc, 0xd3a, 0xaa7, 0xbc3, 0x9fc, 0xb07, 0x1a} +#elif RADIX == 32 +{0xe5eb4ef, 0xa10f177, 0x5a9a974, 0x2a6e5cb, 0xda498d6, 0x13b73b2, 0xb715001, 0xf15dd14, 0x5bb0d0, 0x1d7c230, 0x9919be5, 0x53b4ead, 0x3f8bc35, 0xfc1d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa974a10f177e5eb4, 0xa498d62a6e5cb5a9, 0x4b71500113b73b2d, 0x23005bb0d0f15dd1, 0xb4ead9919be51d7c, 0x3cbec1d3f8bc3553} +#else +{0x69421e2efcbd69, 0x58a9b972d6a6a5, 0x89db9d96d24c6, 0xf15dd14b71500, 0x23af84600b761a, 0x54ed3ab66466f9, 0xe1f60e9fc5e1a} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x186, 0x245, 0xa48, 0x11da, 0x1354, 0x9fc, 0x168f, 0xff7, 0x1f2c, 0x6a2, 0x6fb, 0x980, 0x164f, 0xbb8, 0x49c, 0x1ad1, 0x145f, 0x80a, 0xf93, 0x2d8, 0x1846, 0x43, 0x5a9, 0x3a, 0x72e, 0x1e10, 0x741, 0x783, 0x967, 0x1a} +#elif RADIX == 32 +{0x122861b, 0x23b4a48, 0xf4fe4d5, 0xb1fef68, 0xfb3517c, 0x93d3006, 0x49c5dc5, 0x517f5a2, 0xf93405, 0x1e1185b, 0x745a902, 0x81cb80, 0xf06741f, 0xf59c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe4d523b4a4812286, 0xb3517cb1fef68f4f, 0x249c5dc593d3006f, 0x85b0f93405517f5a, 0x1cb80745a9021e11, 0x6ea59cf06741f08} +#else +{0x2a47694902450c, 0x72c7fbda3d3f93, 0x2c9e98037d9a8b, 0x5517f5a249c5dc, 0x43c230b61f2680, 0x42072e01d16a40, 0x3752ce7833a0f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1064, 0x8a7, 0x7c, 0x1876, 0xf16, 0x3a0, 0x124, 0x637, 0x11bf, 0x223, 0x6d, 0x58e, 0xcde, 0xaf, 0x99c, 0x1c62, 0xdcb, 0xe10, 0x7ba, 0x127f, 0x1a23, 0x69a, 0x7bd, 0x238, 0x455, 0x16ac, 0x1147, 0x12a, 0x14c1, 0x5} +#elif RADIX == 32 +{0x453c190, 0xb0ec07c, 0x41d03c5, 0xfcc6e12, 0x6d111c6, 0x378b1c0, 0x99c057b, 0x372f8c4, 0xe7ba708, 0xd688e4f, 0x707bd34, 0x5611544, 0x255147b, 0x2d304} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3c5b0ec07c453c1, 0xd111c6fcc6e1241d, 0x499c057b378b1c06, 0xe4fe7ba708372f8c, 0x11544707bd34d688, 0x24bd304255147b56} +#else +{0xb61d80f88a783, 0x1bf31b8490740f, 0x59bc58e036888e, 0x372f8c499c057, 0x1ad11c9fcf74e1, 0x55845511c1ef4d, 0x21e98212a8a3d} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xaab, 0x60b, 0x8a0, 0x15d7, 0xbd8, 0x3ab, 0x1641, 0x1771, 0x134a, 0x17a, 0x785, 0x624, 0x1d, 0x1c3d, 0xcb1, 0xb5e, 0x23f, 0xf53, 0x879, 0x5e2, 0x903, 0xaff, 0xf72, 0xa2d, 0x7f4, 0xeb8, 0xd96, 0x1715, 0xffa, 0xa} +#elif RADIX == 32 +{0x305aaad, 0x2bae8a0, 0x11d5af6, 0x2aee364, 0x850bd4d, 0x74c487, 0xcb1e1e8, 0x88fd6bc, 0x48797a9, 0xfa40cbc, 0x5af7257, 0x5c1fd14, 0xe2ad967, 0x12fea} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x5af62bae8a0305aa, 0x50bd4d2aee36411d, 0xccb1e1e8074c4878, 0xcbc48797a988fd6b, 0x1fd145af7257fa40, 0x2bfffeae2ad9675c} +#else +{0x6c575d14060b55, 0x34abb8d904756b, 0x403a6243c285ea, 0x188fd6bccb1e1e, 0x7f48197890f2f5, 0x5707f4516bdc95, 0x5bfff57156cb3} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x195c, 0x1d55, 0x99f, 0x11f, 0x106b, 0xab1, 0x3e7, 0x1e40, 0xa1e, 0xdf0, 0x1dd4, 0x5cd, 0xfc3, 0x1c99, 0xbfa, 0x1ead, 0x1f6, 0x12fa, 0x1465, 0xad7, 0x1a84, 0x18d8, 0x1b7f, 0x9fe, 0x14b1, 0x13b7, 0x189f, 0x12bc, 0xabc, 0x1f} +#elif RADIX == 32 +{0xeaae573, 0xc23e99f, 0x7558c1a, 0x7bc803e, 0xd46f828, 0xf0cb9bd, 0xbfae4cb, 0x7dbd5a, 0xf46597d, 0xc6a115a, 0xfdb7fc6, 0xdbd2c53, 0x57989f9, 0x37af2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8c1ac23e99feaae5, 0x46f8287bc803e755, 0xabfae4cbf0cb9bdd, 0x15af46597d07dbd5, 0xd2c53fdb7fc6c6a1, 0x1d6aaf257989f9db} +#else +{0x35847d33fd55ca, 0x21ef200f9d5630, 0x5f865cdeea37c1, 0x507dbd5abfae4c, 0x58d422b5e8cb2f, 0x76f4b14ff6dff1, 0xeb55792bcc4fc} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x970, 0x18b4, 0xc62, 0xf59, 0xf33, 0x6c0, 0x5ae, 0x86b, 0x1690, 0x17e1, 0x829, 0xab5, 0x169, 0x1115, 0x1b7e, 0x17fa, 0xcae, 0x1b7, 0xc7b, 0xb70, 0x11fc, 0x1417, 0x8b4, 0x1b78, 0x35a, 0x18e, 0x1e46, 0x15f0, 0xf64, 0x15} +#elif RADIX == 32 +{0xc5a25c2, 0xdeb2c62, 0xe3603cc, 0x410d65a, 0x29bf0da, 0x5a556a8, 0xb7e88a8, 0xb2baff5, 0xc7b0db, 0xbc7f16e, 0xf08b4a0, 0xc70d6b6, 0xbe1e460, 0x29d92} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3ccdeb2c62c5a25, 0x9bf0da410d65ae36, 0x5b7e88a85a556a82, 0x16e0c7b0dbb2baff, 0xd6b6f08b4a0bc7f, 0x316bd92be1e460c7} +#else +{0x19bd658c58b44b, 0x69043596b8d80f, 0x42d2ab5414df86, 0x3b2baff5b7e88a, 0x178fe2dc18f61b, 0x31c35adbc22d28, 0x875ec95f0f230} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x313b,0xc18a,0x812a,0x406d,0x472a,0x9fca,0x9f07,0xb030,0x8b7b,0x7924,0x2af6,0x9e99,0x2b81,0x8eb8,0x35ee,0x59c8,0x7655,0x34cc,0x5aaf,0x326,0xe58d,0xf8b7,0x969a,0x6e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc18a313b,0x406d812a,0x9fca472a,0xb0309f07,0x79248b7b,0x9e992af6,0x8eb82b81,0x59c835ee,0x34cc7655,0x3265aaf,0xf8b7e58d,0x6e969a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x406d812ac18a313b,0xb0309f079fca472a,0x9e992af679248b7b,0x59c835ee8eb82b81,0x3265aaf34cc7655,0x6e969af8b7e58d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6610,0xfd89,0xb147,0xcf39,0x2b02,0x4ccf,0xed64,0x8470,0xaaf6,0x1891,0x8c78,0xf074,0x8a4c,0xfaed,0xd66c,0xf52b,0xf1c5,0xb0a,0x5cd,0x46f8,0x79a3,0x81de,0x451d,0xd9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfd896610,0xcf39b147,0x4ccf2b02,0x8470ed64,0x1891aaf6,0xf0748c78,0xfaed8a4c,0xf52bd66c,0xb0af1c5,0x46f805cd,0x81de79a3,0xd9451d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcf39b147fd896610,0x8470ed644ccf2b02,0xf0748c781891aaf6,0xf52bd66cfaed8a4c,0x46f805cd0b0af1c5,0xd9451d81de79a3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1869,0x2ce0,0x425c,0x7d0f,0x30c8,0x1c3e,0xd562,0xfb41,0x3951,0xeccc,0x9c8a,0xb265,0x829,0xd879,0x3c42,0x2cbf,0xb1d2,0xd9d3,0xee28,0x7fdf,0xccdd,0x3ad,0xa6d9,0x3b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2ce01869,0x7d0f425c,0x1c3e30c8,0xfb41d562,0xeccc3951,0xb2659c8a,0xd8790829,0x2cbf3c42,0xd9d3b1d2,0x7fdfee28,0x3adccdd,0x3ba6d9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7d0f425c2ce01869,0xfb41d5621c3e30c8,0xb2659c8aeccc3951,0x2cbf3c42d8790829,0x7fdfee28d9d3b1d2,0x3ba6d903adccdd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcec5,0x3e75,0x7ed5,0xbf92,0xb8d5,0x6035,0x60f8,0x4fcf,0x7484,0x86db,0xd509,0x6166,0xd47e,0x7147,0xca11,0xa637,0x89aa,0xcb33,0xa550,0xfcd9,0x1a72,0x748,0x6965,0x91}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3e75cec5,0xbf927ed5,0x6035b8d5,0x4fcf60f8,0x86db7484,0x6166d509,0x7147d47e,0xa637ca11,0xcb3389aa,0xfcd9a550,0x7481a72,0x916965}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbf927ed53e75cec5,0x4fcf60f86035b8d5,0x6166d50986db7484,0xa637ca117147d47e,0xfcd9a550cb3389aa,0x91696507481a72}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x29a3,0x7abe,0x2ef1,0x26a6,0xa5a5,0x54e6,0xf4c8,0xb56f,0x2bae,0x1aae,0xd9ba,0x94ed,0x2df5,0x882c,0xc686,0x6f64,0x29f7,0x850a,0x9eee,0x617c,0x5678,0x3108,0x8ebe,0x86}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7abe29a3,0x26a62ef1,0x54e6a5a5,0xb56ff4c8,0x1aae2bae,0x94edd9ba,0x882c2df5,0x6f64c686,0x850a29f7,0x617c9eee,0x31085678,0x868ebe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x26a62ef17abe29a3,0xb56ff4c854e6a5a5,0x94edd9ba1aae2bae,0x6f64c686882c2df5,0x617c9eee850a29f7,0x868ebe31085678}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc2a5,0x8ce6,0x3729,0xaa2b,0xb9d2,0xbf43,0xe2be,0xaf25,0x4ffb,0xec8e,0xf85a,0x94c6,0xe027,0x3c64,0xf4ad,0xf63,0x86ba,0xa244,0xde0f,0x2390,0x11e1,0xdd7c,0xcd4c,0x33}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8ce6c2a5,0xaa2b3729,0xbf43b9d2,0xaf25e2be,0xec8e4ffb,0x94c6f85a,0x3c64e027,0xf63f4ad,0xa24486ba,0x2390de0f,0xdd7c11e1,0x33cd4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaa2b37298ce6c2a5,0xaf25e2bebf43b9d2,0x94c6f85aec8e4ffb,0xf63f4ad3c64e027,0x2390de0fa24486ba,0x33cd4cdd7c11e1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1893,0xa4bf,0x1eb8,0x9df0,0x91b1,0x17b0,0xe4ae,0x6ba1,0x35fd,0xd56b,0xc03f,0x82a8,0x99cd,0x30be,0xf3a3,0x181e,0x879b,0x518,0x3e8,0xed0e,0xc0ff,0xe2d6,0xe29c,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa4bf1893,0x9df01eb8,0x17b091b1,0x6ba1e4ae,0xd56b35fd,0x82a8c03f,0x30be99cd,0x181ef3a3,0x518879b,0xed0e03e8,0xe2d6c0ff,0x1e29c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9df01eb8a4bf1893,0x6ba1e4ae17b091b1,0x82a8c03fd56b35fd,0x181ef3a330be99cd,0xed0e03e80518879b,0x1e29ce2d6c0ff}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd65d,0x8541,0xd10e,0xd959,0x5a5a,0xab19,0xb37,0x4a90,0xd451,0xe551,0x2645,0x6b12,0xd20a,0x77d3,0x3979,0x909b,0xd608,0x7af5,0x6111,0x9e83,0xa987,0xcef7,0x7141,0x79}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8541d65d,0xd959d10e,0xab195a5a,0x4a900b37,0xe551d451,0x6b122645,0x77d3d20a,0x909b3979,0x7af5d608,0x9e836111,0xcef7a987,0x797141}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd959d10e8541d65d,0x4a900b37ab195a5a,0x6b122645e551d451,0x909b397977d3d20a,0x9e8361117af5d608,0x797141cef7a987}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x187f, 0x1c7d, 0x163a, 0x54d, 0x197f, 0x1a5d, 0x1833, 0x823, 0x11ae, 0x225, 0xd6d, 0x1627, 0xd6c, 0x1625, 0xa36, 0xf64, 0xcfa, 0x327, 0x188d, 0xfab, 0x56b, 0x91c, 0x1cc5, 0x4fe, 0x2ae, 0x181a, 0x195, 0x1288, 0x10fc, 0x3} +#elif RADIX == 32 +{0xe3ee1fc, 0xca9b63a, 0x3d2ee5f, 0xb904783, 0x6d112c6, 0x5b2c4ed, 0xa36b12b, 0xb3e9ec8, 0x788d193, 0xe15adf5, 0xfdcc548, 0xd0ab89, 0x510195c, 0x1c3f2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xee5fca9b63ae3ee1, 0xd112c6b9047833d2, 0x8a36b12b5b2c4ed6, 0xdf5788d193b3e9ec, 0xab89fdcc548e15a, 0x40183f2510195c0d} +#else +{0x3f9536c75c7dc3, 0x1ae411e0cf4bb9, 0x5ad96276b68896, 0x3b3e9ec8a36b12, 0x1c2b5beaf11a32, 0x342ae27f73152, 0xfcc1f92880cae} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0xe9d, 0x171f, 0xd8e, 0x1953, 0xe5f, 0x1e97, 0x1e0c, 0x1208, 0xc6b, 0x889, 0x1b5b, 0x589, 0xb5b, 0x1589, 0x28d, 0x13d9, 0x1b3e, 0x8c9, 0x1e23, 0x1bea, 0x15a, 0xa47, 0x1731, 0x113f, 0x10ab, 0xe06, 0x65, 0x4a2, 0x83f, 0x1a} +#elif RADIX == 32 +{0xb8fba77, 0xf2a6d8e, 0xcf4bb97, 0xae411e0, 0x5b444b1, 0xd6cb13b, 0x28dac4a, 0xecfa7b2, 0x5e23464, 0x3856b7d, 0x7f73152, 0x342ae2, 0x9440657, 0xf0fc} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbb97f2a6d8eb8fba, 0xb444b1ae411e0cf4, 0x228dac4ad6cb13b5, 0xb7d5e23464ecfa7b, 0x42ae27f731523856, 0x1e460fc944065703} +#else +{0x2fe54db1d71f74, 0x46b9047833d2ee, 0x56b6589dada225, 0x4ecfa7b228dac4, 0x470ad6fabc468c, 0x40d0ab89fdcc54, 0xf2307e4a2032b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x237, 0xee8, 0xd8c, 0xafb, 0x18cd, 0x1ce1, 0x162a, 0x11c9, 0x1bbc, 0x1415, 0x1c35, 0x1d0c, 0x1104, 0x1558, 0x9d, 0xb17, 0x1097, 0x16d2, 0xc02, 0x1573, 0x1c5f, 0x1bec, 0x1a73, 0x1dfe, 0x1923, 0x18d6, 0x221, 0x11ee, 0x1581, 0xb} +#elif RADIX == 32 +{0x77408dd, 0x55f6d8c, 0xae70e33, 0xf239362, 0x35a0aee, 0x413a19c, 0x9daac4, 0x425d62e, 0x6c02b69, 0x6717eae, 0xfda73df, 0x6b648fb, 0x3dc221c, 0x1c606} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe3355f6d8c77408, 0x5a0aeef239362ae7, 0xe09daac4413a19c3, 0xeae6c02b69425d62, 0x648fbfda73df6717, 0x38396063dc221c6b} +#else +{0x66abedb18ee811, 0x3bc8e4d8ab9c38, 0x2209d0ce1ad057, 0x1425d62e09daac, 0x6ce2fd5cd8056d, 0x1ad923eff69cf7, 0xbdcb031ee110e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x16a4, 0x11f0, 0x446, 0x1b2b, 0x129e, 0x1b52, 0x25, 0x18e4, 0x15d7, 0x545, 0x1502, 0x3af, 0x1b45, 0xff3, 0x1423, 0x1574, 0x1c5a, 0xff0, 0x1663, 0x114b, 0xc99, 0x1c89, 0x11f0, 0x15fd, 0x17a1, 0x14dd, 0x17f7, 0x1451, 0x5af, 0x17} +#elif RADIX == 32 +{0x8f85a92, 0xb656446, 0x5da94a7, 0x5f1c802, 0x22a2d7, 0xd1475f5, 0x4237f9e, 0x716aae9, 0x76637f8, 0x4b26629, 0xfb1f0e4, 0x6ede86b, 0x8a37f7a, 0x376be} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x94a7b6564468f85a, 0x22a2d75f1c8025da, 0x94237f9ed1475f50, 0x62976637f8716aae, 0xde86bfb1f0e44b26, 0x25496be8a37f7a6e} +#else +{0x4f6cac88d1f0b5, 0x5d7c7200976a52, 0x768a3afa811516, 0x716aae94237f9, 0x964cc52ecc6ff, 0x1bb7a1afec7c39, 0x264b5f451bfbd} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xc89, 0x16f8, 0x1bcf, 0x14c7, 0x1c81, 0x1c37, 0x3b1, 0xb00, 0x5e, 0xdb5, 0x920, 0x14db, 0x41, 0x1bd7, 0x159d, 0x1889, 0x1318, 0x95d, 0x13d5, 0x46b, 0x18bd, 0x1bf1, 0x1bf6, 0x1ba2, 0x2d6, 0x1b06, 0x17c1, 0x1a40, 0x1f02, 0x11} +#elif RADIX == 32 +{0xb7c3226, 0x698fbcf, 0x1e1bf20, 0x796003b, 0x206da81, 0x1069b69, 0x59ddeb8, 0xcc63113, 0x73d54ae, 0x8e2f48d, 0x45bf6df, 0x830b5b7, 0x4817c1d, 0xdc0b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbf20698fbcfb7c32, 0x6da81796003b1e1, 0x359ddeb81069b692, 0x48d73d54aecc6311, 0xb5b745bf6df8e2f, 0x9b3c0b4817c1d83} +#else +{0x40d31f79f6f864, 0x5e5800ec786fc, 0x40834db49036d4, 0x6cc6311359ddeb, 0x71c5e91ae7aa95, 0x60c2d6dd16fdb7, 0x4d9e05a40be0e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x8c0, 0x125b, 0x1d1c, 0x8a8, 0x1c41, 0xbb7, 0x15bf, 0x15ec, 0x959, 0x1fc5, 0xc2, 0x2ff, 0x1dd2, 0x1c02, 0x9db, 0x139d, 0x9a, 0x1654, 0xce7, 0xf6d, 0x13e5, 0x19be, 0x1f28, 0x161c, 0xe9f, 0x940, 0x77d, 0x162c, 0x385, 0x4} +#elif RADIX == 32 +{0x92da300, 0x5151d1c, 0xf5dbf10, 0x66bd95b, 0xc2fe2a5, 0x7485fe0, 0x9dbe017, 0x26a73a, 0xace7b2a, 0xf4f95ed, 0x39f28cd, 0xa03a7ec, 0xc5877d4, 0x20e16} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbf105151d1c92da3, 0x2fe2a566bd95bf5d, 0xa9dbe0177485fe0c, 0x5edace7b2a026a73, 0x3a7ec39f28cdf4f9, 0x20e16c5877d4a0} +#else +{0x20a2a3a3925b46, 0x159af656fd76fc, 0x3ba42ff0617f15, 0x2026a73a9dbe01, 0x3e9f2bdb59cf65, 0x280e9fb0e7ca33, 0x1070b62c3bea} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xd30, 0x670, 0x165f, 0x18f8, 0x3fe, 0x11e5, 0x663, 0x270, 0x18cb, 0x42b, 0x11c3, 0xe0a, 0x4fc, 0x18ad, 0xfd0, 0x3fa, 0x1957, 0x1544, 0x941, 0x181e, 0x661, 0x18b9, 0x74a, 0xa70, 0x866, 0x11f8, 0xd20, 0xae3, 0x19b8, 0xb} +#elif RADIX == 32 +{0x33834c1, 0xb1f165f, 0x38f28ff, 0x2c4e066, 0xc3215e3, 0x3f1c151, 0xfd0c569, 0x655c7f4, 0xc941aa2, 0xc998703, 0xe074ac5, 0xfc21994, 0x5c6d208, 0x1d6e1} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x28ffb1f165f33834, 0x3215e32c4e06638f, 0x4fd0c5693f1c151c, 0x703c941aa2655c7f, 0x21994e074ac5c998, 0x311e6e15c6d208fc} +#else +{0x7f63e2cbe67069, 0xcb138198e3ca3, 0x49f8e0a8e190af, 0x2655c7f4fd0c56, 0x39330e07928354, 0x3f08665381d2b1, 0x84f370ae36904} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1f47, 0x9e3, 0x5d, 0xdc6, 0x18a3, 0x1c99, 0x1253, 0x179f, 0x16b, 0x1b87, 0x27a, 0x9f8, 0x1064, 0x9ed, 0xe66, 0x47d, 0x4e9, 0x1805, 0x1349, 0x40, 0x1bbd, 0x7f6, 0x1c57, 0x1f9f, 0x11e9, 0x14cf, 0xe61, 0x1892, 0x833, 0x10} +#elif RADIX == 32 +{0x4f1fd1e, 0xdb8c05d, 0x3e4ce28, 0xaef3f25, 0x7adc385, 0x1913f02, 0xe664f6c, 0x93a48fa, 0x1349c02, 0xb6ef408, 0x3fc573f, 0x67c7a7f, 0x124e61a, 0xcf} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xce28db8c05d4f1fd, 0xadc385aef3f253e4, 0xae664f6c1913f027, 0x4081349c0293a48f, 0xc7a7f3fc573fb6ef, 0x79e0cf124e61a67} +#else +{0x51b7180ba9e3fa, 0x16bbcfc94f9338, 0x60c89f813d6e1c, 0x293a48fae664f6, 0x76dde810269380, 0x19f1e9fcff15cf, 0x3cf067892730d} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x703,0xe86d,0xe89e,0xbcf8,0x675b,0xe250,0x9f65,0xe8ec,0x2c83,0x11ca,0x4751,0x192a,0xf9d8,0xf46a,0xeb89,0x4f40,0x2a2c,0xdcf,0xfff9,0x13f9,0x24e7,0x8348,0xb9af,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe86d0703,0xbcf8e89e,0xe250675b,0xe8ec9f65,0x11ca2c83,0x192a4751,0xf46af9d8,0x4f40eb89,0xdcf2a2c,0x13f9fff9,0x834824e7,0x6b9af}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbcf8e89ee86d0703,0xe8ec9f65e250675b,0x192a475111ca2c83,0x4f40eb89f46af9d8,0x13f9fff90dcf2a2c,0x6b9af834824e7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e40,0xb548,0xf9c7,0x6598,0x7e33,0x25c6,0x6cbf,0x2ef2,0xa630,0xdd99,0xaef2,0xf320,0x4a2,0x93a7,0x4541,0x2f7c,0xbf45,0x1a7a,0x24f4,0x52a9,0xd3b4,0xa12a,0x9d37,0xb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb5481e40,0x6598f9c7,0x25c67e33,0x2ef26cbf,0xdd99a630,0xf320aef2,0x93a704a2,0x2f7c4541,0x1a7abf45,0x52a924f4,0xa12ad3b4,0xb09d37}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6598f9c7b5481e40,0x2ef26cbf25c67e33,0xf320aef2dd99a630,0x2f7c454193a704a2,0x52a924f41a7abf45,0xb09d37a12ad3b4}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e1,0x2283,0x3774,0x83d4,0xf33f,0x1fc,0x2790,0xde59,0xe89d,0xc942,0x2c1b,0x6574,0x55b1,0x3a3c,0x9f11,0xbb0a,0x6813,0xa69,0xff9d,0xc94c,0xdede,0xce6b,0x18c6,0xa9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x228301e1,0x83d43774,0x1fcf33f,0xde592790,0xc942e89d,0x65742c1b,0x3a3c55b1,0xbb0a9f11,0xa696813,0xc94cff9d,0xce6bdede,0xa918c6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x83d43774228301e1,0xde59279001fcf33f,0x65742c1bc942e89d,0xbb0a9f113a3c55b1,0xc94cff9d0a696813,0xa918c6ce6bdede}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf8fd,0x1792,0x1761,0x4307,0x98a4,0x1daf,0x609a,0x1713,0xd37c,0xee35,0xb8ae,0xe6d5,0x627,0xb95,0x1476,0xb0bf,0xd5d3,0xf230,0x6,0xec06,0xdb18,0x7cb7,0x4650,0xf9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1792f8fd,0x43071761,0x1daf98a4,0x1713609a,0xee35d37c,0xe6d5b8ae,0xb950627,0xb0bf1476,0xf230d5d3,0xec060006,0x7cb7db18,0xf94650}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x430717611792f8fd,0x1713609a1daf98a4,0xe6d5b8aeee35d37c,0xb0bf14760b950627,0xec060006f230d5d3,0xf946507cb7db18}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2d5d,0x46e9,0x4215,0x63b0,0x8358,0xdc91,0x80aa,0x6970,0x4e7d,0x266d,0xc13a,0xe4ea,0x504e,0xbc38,0xdbaf,0x119b,0xa3cc,0x45d8,0x98db,0x7b90,0x3a5b,0xde6a,0x3676,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x46e92d5d,0x63b04215,0xdc918358,0x697080aa,0x266d4e7d,0xe4eac13a,0xbc38504e,0x119bdbaf,0x45d8a3cc,0x7b9098db,0xde6a3a5b,0x83676}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x63b0421546e92d5d,0x697080aadc918358,0xe4eac13a266d4e7d,0x119bdbafbc38504e,0x7b9098db45d8a3cc,0x83676de6a3a5b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1db1,0x61ae,0x220b,0xc2e,0xa7ee,0xb16a,0x8697,0xf90c,0x7505,0xced5,0x5cf8,0xb601,0x6235,0x27ad,0x9fdf,0x57d0,0xca2,0xa6d2,0x94db,0xb53a,0x8bd2,0xa3ad,0xfe95,0x92}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x61ae1db1,0xc2e220b,0xb16aa7ee,0xf90c8697,0xced57505,0xb6015cf8,0x27ad6235,0x57d09fdf,0xa6d20ca2,0xb53a94db,0xa3ad8bd2,0x92fe95}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc2e220b61ae1db1,0xf90c8697b16aa7ee,0xb6015cf8ced57505,0x57d09fdf27ad6235,0xb53a94dba6d20ca2,0x92fe95a3ad8bd2}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa809,0xf0cf,0xb393,0xf0ab,0x181a,0xb5bc,0x1833,0xb0ea,0xff0e,0x3088,0xb299,0x4f5c,0x5a20,0x5b86,0xad7b,0x9ffd,0x2216,0x4e4c,0xb8eb,0x989,0x712f,0xa798,0x8e8f,0x45}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf0cfa809,0xf0abb393,0xb5bc181a,0xb0ea1833,0x3088ff0e,0x4f5cb299,0x5b865a20,0x9ffdad7b,0x4e4c2216,0x989b8eb,0xa798712f,0x458e8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf0abb393f0cfa809,0xb0ea1833b5bc181a,0x4f5cb2993088ff0e,0x9ffdad7b5b865a20,0x989b8eb4e4c2216,0x458e8fa798712f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd2a3,0xb916,0xbdea,0x9c4f,0x7ca7,0x236e,0x7f55,0x968f,0xb182,0xd992,0x3ec5,0x1b15,0xafb1,0x43c7,0x2450,0xee64,0x5c33,0xba27,0x6724,0x846f,0xc5a4,0x2195,0xc989,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb916d2a3,0x9c4fbdea,0x236e7ca7,0x968f7f55,0xd992b182,0x1b153ec5,0x43c7afb1,0xee642450,0xba275c33,0x846f6724,0x2195c5a4,0xf7c989}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c4fbdeab916d2a3,0x968f7f55236e7ca7,0x1b153ec5d992b182,0xee64245043c7afb1,0x846f6724ba275c33,0xf7c9892195c5a4}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x17bf, 0xbb8, 0x485, 0x1296, 0x1b32, 0xe0b, 0x13f9, 0x15f5, 0x1a2b, 0xff6, 0xf53, 0x70a, 0x36c, 0x40f, 0xa89, 0xca6, 0x37e, 0x1488, 0x1796, 0x1be2, 0x1e0f, 0xf10, 0x1628, 0x1a20, 0x1ee7, 0x1e53, 0x48c, 0x94, 0xd53, 0xd} +#elif RADIX == 32 +{0x5dc5efd, 0xa52c485, 0x9705ecc, 0xaebeb3f, 0x537fb68, 0xdb0e14f, 0xa892078, 0xdf994c, 0x5796a44, 0x8783f7c, 0x4162878, 0x29fb9f4, 0x12848cf, 0x2a54c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x5ecca52c4855dc5e, 0x37fb68aebeb3f970, 0xca892078db0e14f5, 0xf7c5796a440df994, 0xfb9f441628788783, 0x406754c12848cf29} +#else +{0x194a5890abb8bd, 0x22bafacfe5c17b, 0x46d870a7a9bfdb, 0x40df994ca89207, 0x10f07ef8af2d48, 0x4a7ee7d1058a1e, 0xff3aa60942467} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x66d, 0xaee, 0x1121, 0x14a5, 0x1ecc, 0xb82, 0xcfe, 0x1d7d, 0x168a, 0x1bfd, 0x13d4, 0x1c2, 0x18db, 0x903, 0x12a2, 0x1329, 0xdf, 0x1522, 0x15e5, 0x1ef8, 0x783, 0x3c4, 0x58a, 0x1e88, 0x1fb9, 0x794, 0x123, 0x1825, 0x1754, 0x1c} +#elif RADIX == 32 +{0x57719b7, 0x294b121, 0xe5c17b3, 0x2bafacf, 0xd4dfeda, 0x36c3853, 0x2a2481e, 0x37e653, 0x15e5a91, 0x21e0fdf, 0x1058a1e, 0xca7ee7d, 0x4a1233, 0x22d53} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x17b3294b12157719, 0x4dfeda2bafacfe5c, 0x32a2481e36c3853d, 0xfdf15e5a91037e65, 0x7ee7d1058a1e21e0, 0x2e99d5304a1233ca} +#else +{0x665296242aee33, 0x68aebeb3f9705e, 0x71b61c29ea6ff6, 0x1037e6532a2481, 0x443c1fbe2bcb52, 0x729fb9f4416287, 0x70cea98250919} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x3e9, 0x9f6, 0x1c50, 0x27e, 0xa85, 0x39c, 0xa7b, 0x177c, 0xdfc, 0x77e, 0x1490, 0x11b8, 0xd2b, 0x17dc, 0xd7c, 0x16a0, 0xe21, 0xb86, 0x15bb, 0x844, 0x146c, 0xe51, 0xc6d, 0x143d, 0x1d2b, 0x1715, 0x18bb, 0xdc8, 0x55d, 0x16} +#elif RADIX == 32 +{0x4fb0fa6, 0x44fdc50, 0xb1ce2a1, 0xf2ef8a7, 0x903bf37, 0x4ae3714, 0xd7cbee3, 0x3886d40, 0x95bb5c3, 0x8d1b108, 0x7ac6d72, 0x8af4ae8, 0xb918bbb, 0x2f575} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe2a144fdc504fb0f, 0x3bf37f2ef8a7b1c, 0xd7cbee34ae37149, 0x10895bb5c33886d4, 0xf4ae87ac6d728d1b, 0x2a55575b918bbb8a} +#else +{0x4289fb8a09f61f, 0x5fcbbe29ec738a, 0x1a571b8a481df9, 0x33886d40d7cbee, 0x51a362112b76b8, 0x62bd2ba1eb1b5c, 0x4eaabadc8c5dd} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x793, 0x1095, 0x8d0, 0x676, 0x2be, 0x1a9d, 0x6d6, 0x1d0, 0x112a, 0x18e1, 0x1741, 0xc68, 0x156d, 0x113f, 0x181e, 0x201, 0xcd7, 0xbb7, 0xdb, 0x64c, 0x181e, 0x63, 0x965, 0xf2, 0xc95, 0x50d, 0x1ec2, 0x1c03, 0x5b4, 0x1b} +#elif RADIX == 32 +{0x84a9e4f, 0x8cec8d0, 0x6d4e8af, 0xa83a06d, 0x41c70c4, 0x5b58d17, 0x81e89fd, 0xb35c403, 0x80db5db, 0x1e078c9, 0xe496503, 0x86b2541, 0x807ec22, 0x166d3} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe8af8cec8d084a9e, 0x1c70c4a83a06d6d4, 0x381e89fd5b58d174, 0x8c980db5dbb35c40, 0xb2541e4965031e07, 0x14256d3807ec2286} +#else +{0x5f19d91a10953c, 0x12a0e81b5b53a2, 0x6adac68ba0e386, 0x3b35c40381e89f, 0x63c0f19301b6bb, 0x21ac9507925940, 0xa12b69c03f611} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x71d, 0xf0e, 0x506, 0x1aec, 0x3f6, 0x2c1, 0x17dd, 0x43f, 0x1552, 0x1488, 0x10c3, 0x5ea, 0xfd4, 0x634, 0x1eb1, 0x1711, 0x1424, 0xeb1, 0xfe1, 0xa0a, 0x165f, 0x5c8, 0x1544, 0x1493, 0x329, 0x19ec, 0x1db4, 0x983, 0x790, 0x1d} +#elif RADIX == 32 +{0x7871c77, 0xb5d8506, 0xd1608fd, 0x4887f7d, 0xc3a4455, 0xf50bd50, 0xeb131a3, 0xd092e23, 0x4fe1758, 0x4597d41, 0x275442e, 0xf60ca69, 0x307db4c, 0x26e41} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8fdb5d85067871c, 0x3a44554887f7dd16, 0x3eb131a3f50bd50c, 0xd414fe1758d092e2, 0xca69275442e4597, 0x1e5de41307db4cf6} +#else +{0x7b6bb0a0cf0e38, 0x55221fdf745823, 0x1fa85ea861d222, 0xd092e23eb131a, 0x48b2fa829fc2eb, 0x3d8329a49d510b, 0xf2ef20983eda6} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x704, 0x1718, 0x1f41, 0x1569, 0x1353, 0x403, 0x8ba, 0xd3b, 0x1e9a, 0xca6, 0x1433, 0xc05, 0x2dd, 0xf7d, 0x12c8, 0x1109, 0x1797, 0x4e2, 0xf77, 0x569, 0xfcf, 0x1dd4, 0x11a4, 0x1354, 0x1563, 0x14b7, 0x6ad, 0xf7e, 0x251, 0xe} +#elif RADIX == 32 +{0xb8c1c11, 0xead3f41, 0xa201cd4, 0x69a768b, 0x336537a, 0xb7580b4, 0x2c87be8, 0x5e5e213, 0x2f77271, 0xa3f3cad, 0xa91a4ee, 0x5bd58e6, 0xefc6ada, 0x2f945} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1cd4ead3f41b8c1c, 0x36537a69a768ba20, 0x32c87be8b7580b43, 0xcad2f772715e5e21, 0xd58e6a91a4eea3f3, 0x480945efc6ada5b} +#else +{0x29d5a7e8371838, 0x69a69da2e88073, 0x45bac05a19b29b, 0x15e5e2132c87be, 0x547e795a5eee4e, 0x16f5639aa4693b, 0x2404a2f7e356d} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xf6, 0x15a2, 0x1cbc, 0x185c, 0x9a1, 0xc2f, 0x1123, 0x11, 0xda7, 0x1628, 0x41, 0x1163, 0x12f7, 0x9aa, 0x1235, 0x1444, 0x1c4a, 0x3b6, 0xfee, 0x96, 0x1ed, 0x1f4d, 0x5ec, 0x1bf2, 0x1bca, 0x151d, 0x58f, 0x293, 0x960, 0x20} +#elif RADIX == 32 +{0xad103db, 0x70b9cbc, 0x3617a68, 0x9c02312, 0x41b1436, 0xbde2c60, 0x2354d54, 0x712a889, 0xcfee1db, 0x687b412, 0xe45ecfa, 0x8eef2b7, 0x52658fa, 0x3f580} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x7a6870b9cbcad103, 0x1b14369c02312361, 0x92354d54bde2c604, 0x412cfee1db712a88, 0xef2b7e45ecfa687b, 0x37da58052658fa8e} +#else +{0x50e1739795a207, 0x5a7008c48d85e9, 0x25ef163020d8a1, 0x3712a8892354d5, 0x4d0f68259fdc3b, 0x23bbcadf917b3e, 0xbad2c02932c7d} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xbc5, 0xa1d, 0xe8a, 0xe9c, 0x1af1, 0x13b5, 0xa68, 0x4a4, 0x135e, 0x171, 0x716, 0x2c2, 0x1c2b, 0x332, 0x349, 0x138c, 0x168b, 0x21c, 0x1629, 0xb97, 0x186, 0x629, 0x6e8, 0x497, 0x128c, 0x19d2, 0xcc1, 0x121, 0x250, 0x1a} +#elif RADIX == 32 +{0x50eaf17, 0x5d38e8a, 0x89daebc, 0x78948a6, 0x160b8cd, 0xac5847, 0x3491997, 0x5a2e718, 0xf62910e, 0x4861972, 0x2e6e831, 0xe94a309, 0x242cc1c, 0xd940} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xaebc5d38e8a50eaf, 0x60b8cd78948a689d, 0x834919970ac58471, 0x972f62910e5a2e71, 0x4a3092e6e8314861, 0x5e4940242cc1ce9} +#else +{0x78ba71d14a1d5e, 0x35e25229a276ba, 0x38562c238b05c6, 0x65a2e718349199, 0x290c32e5ec5221, 0x3a528c24b9ba0c, 0x2f24a0121660e} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac65,0x6102,0xe1f0,0x7b39,0x64be,0xff4d,0x8256,0xd11b,0x4645,0x7a89,0x814c,0x66e7,0x77a,0xc4d8,0xe691,0x1f42,0xfdb9,0x547b,0x752,0x18d9,0x9279,0xe604,0xbed4,0xec}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6102ac65,0x7b39e1f0,0xff4d64be,0xd11b8256,0x7a894645,0x66e7814c,0xc4d8077a,0x1f42e691,0x547bfdb9,0x18d90752,0xe6049279,0xecbed4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7b39e1f06102ac65,0xd11b8256ff4d64be,0x66e7814c7a894645,0x1f42e691c4d8077a,0x18d90752547bfdb9,0xecbed4e6049279}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3380,0xe477,0x9e18,0x218d,0xddc6,0x4cc5,0xb33f,0x59e7,0xb291,0xa1a1,0x8f77,0x92a2,0x480e,0x82af,0x40f1,0x5d48,0x83b0,0x4229,0xcb9e,0xff7a,0x2e32,0xa78,0x71fc,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe4773380,0x218d9e18,0x4cc5ddc6,0x59e7b33f,0xa1a1b291,0x92a28f77,0x82af480e,0x5d4840f1,0x422983b0,0xff7acb9e,0xa782e32,0x1671fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x218d9e18e4773380,0x59e7b33f4cc5ddc6,0x92a28f77a1a1b291,0x5d4840f182af480e,0xff7acb9e422983b0,0x1671fc0a782e32}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbb17,0xaa62,0x774e,0x2e59,0xe440,0xebce,0x874e,0xbfdb,0x3afd,0xa7ba,0xded2,0x78aa,0x7568,0xcfed,0x5633,0xa1de,0x4c5e,0x5796,0x5727,0xec25,0xac0a,0xce9c,0x3f13,0x98}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa62bb17,0x2e59774e,0xebcee440,0xbfdb874e,0xa7ba3afd,0x78aaded2,0xcfed7568,0xa1de5633,0x57964c5e,0xec255727,0xce9cac0a,0x983f13}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e59774eaa62bb17,0xbfdb874eebcee440,0x78aaded2a7ba3afd,0xa1de5633cfed7568,0xec25572757964c5e,0x983f13ce9cac0a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x539b,0x9efd,0x1e0f,0x84c6,0x9b41,0xb2,0x7da9,0x2ee4,0xb9ba,0x8576,0x7eb3,0x9918,0xf885,0x3b27,0x196e,0xe0bd,0x246,0xab84,0xf8ad,0xe726,0x6d86,0x19fb,0x412b,0x13}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9efd539b,0x84c61e0f,0xb29b41,0x2ee47da9,0x8576b9ba,0x99187eb3,0x3b27f885,0xe0bd196e,0xab840246,0xe726f8ad,0x19fb6d86,0x13412b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x84c61e0f9efd539b,0x2ee47da900b29b41,0x99187eb38576b9ba,0xe0bd196e3b27f885,0xe726f8adab840246,0x13412b19fb6d86}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb919,0xcfad,0xeb7f,0x81f8,0x4d97,0xf272,0x4300,0xdd38,0x1b01,0x826,0x1894,0x3e43,0x7310,0xa84,0x4161,0x7c63,0xec4,0x9625,0xe475,0xadc9,0x5a7,0xfa6a,0xb7e3,0x7e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcfadb919,0x81f8eb7f,0xf2724d97,0xdd384300,0x8261b01,0x3e431894,0xa847310,0x7c634161,0x96250ec4,0xadc9e475,0xfa6a05a7,0x7eb7e3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x81f8eb7fcfadb919,0xdd384300f2724d97,0x3e43189408261b01,0x7c6341610a847310,0xadc9e47596250ec4,0x7eb7e3fa6a05a7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e83,0xade4,0x9d21,0x2e51,0x42e5,0xd3,0xac79,0xe0a8,0x32e2,0xfcf2,0xb504,0xc941,0xa0d0,0x8016,0x5485,0x3331,0xabd7,0xc296,0xf76e,0xef5,0xce39,0x8e31,0x165c,0x56}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xade41e83,0x2e519d21,0xd342e5,0xe0a8ac79,0xfcf232e2,0xc941b504,0x8016a0d0,0x33315485,0xc296abd7,0xef5f76e,0x8e31ce39,0x56165c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e519d21ade41e83,0xe0a8ac7900d342e5,0xc941b504fcf232e2,0x333154858016a0d0,0xef5f76ec296abd7,0x56165c8e31ce39}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdd11,0x6e27,0xfbdb,0xf5d9,0xd6cb,0x9fef,0xc59a,0x7a4,0xfbd,0x5c3e,0xbc2,0xd091,0x6546,0xc9d0,0x193e,0x93fa,0x776,0x2763,0xdecd,0xbbe3,0xcec1,0x6abf,0x9070,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6e27dd11,0xf5d9fbdb,0x9fefd6cb,0x7a4c59a,0x5c3e0fbd,0xd0910bc2,0xc9d06546,0x93fa193e,0x27630776,0xbbe3decd,0x6abfcec1,0x669070}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5d9fbdb6e27dd11,0x7a4c59a9fefd6cb,0xd0910bc25c3e0fbd,0x93fa193ec9d06546,0xbbe3decd27630776,0x6690706abfcec1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x46e7,0x3052,0x1480,0x7e07,0xb268,0xd8d,0xbcff,0x22c7,0xe4fe,0xf7d9,0xe76b,0xc1bc,0x8cef,0xf57b,0xbe9e,0x839c,0xf13b,0x69da,0x1b8a,0x5236,0xfa58,0x595,0x481c,0x81}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x305246e7,0x7e071480,0xd8db268,0x22c7bcff,0xf7d9e4fe,0xc1bce76b,0xf57b8cef,0x839cbe9e,0x69daf13b,0x52361b8a,0x595fa58,0x81481c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e071480305246e7,0x22c7bcff0d8db268,0xc1bce76bf7d9e4fe,0x839cbe9ef57b8cef,0x52361b8a69daf13b,0x81481c0595fa58}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0xb89, 0xf4d, 0xbd8, 0x18d2, 0x781, 0x1f79, 0xef5, 0xcfd, 0xa7d, 0x121a, 0x59e, 0x18a3, 0x2b, 0x122f, 0x9d5, 0x18aa, 0x105f, 0x1bcd, 0x871, 0x1f9d, 0xae4, 0xc5c, 0x385, 0xbe8, 0x1878, 0xcd8, 0x7a1, 0x7c8, 0x5b4, 0xe} +#elif RADIX == 32 +{0x7a6ae25, 0x71a4bd8, 0x5fbc9e0, 0xf59faef, 0x9e90d29, 0xaf1465, 0x9d59178, 0xc17f154, 0xa871de6, 0xe2b93f3, 0xd038562, 0x6c61e17, 0xf907a16, 0x306d0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc9e071a4bd87a6ae, 0xe90d29f59faef5fb, 0x49d591780af14659, 0x3f3a871de6c17f15, 0x61e17d038562e2b9, 0x9956d0f907a166c} +#else +{0x40e3497b0f4d5c, 0x27d67ebbd7ef27, 0x40578a32cf4869, 0x6c17f1549d5917, 0x5c5727e750e3bc, 0x1b18785f40e158, 0x4cab687c83d0b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0xb60, 0x3d3, 0x12f6, 0xe34, 0x9e0, 0xfde, 0xbbd, 0xb3f, 0x129f, 0x1486, 0x1967, 0x1e28, 0x180a, 0xc8b, 0x1275, 0x1e2a, 0xc17, 0xef3, 0xa1c, 0x7e7, 0x2b9, 0xb17, 0xe1, 0x2fa, 0x61e, 0xb36, 0x1e8, 0x1f2, 0x156d, 0xc} +#elif RADIX == 32 +{0x1e9ad81, 0x1c692f6, 0xd7ef278, 0x7d67ebb, 0x67a434a, 0x2bc519, 0x275645e, 0xb05fc55, 0xea1c779, 0xb8ae4fc, 0xf40e158, 0x9b18785, 0x3e41e85, 0x245b4} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf2781c692f61e9ad, 0x7a434a7d67ebbd7e, 0x5275645e02bc5196, 0x4fcea1c779b05fc5, 0x18785f40e158b8ae, 0x20e55b43e41e859b} +#else +{0x7038d25ec3d35b, 0x29f59faef5fbc9, 0x7015e28cb3d21a, 0x1b05fc55275645, 0x1715c9f9d438ef, 0x66c61e17d03856, 0x32ada1f20f42} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x441, 0x1774, 0x1527, 0x106a, 0x577, 0x3fc, 0xf92, 0x12c4, 0x96a, 0x10ea, 0x10f5, 0x11c9, 0x1f8, 0x1407, 0x1bcc, 0x16c4, 0x15c1, 0x790, 0x5bc, 0x1c28, 0xbc6, 0x123c, 0xf19, 0x1d6f, 0x361, 0x1fcd, 0x1dc9, 0x20c, 0x17c6, 0x6} +#elif RADIX == 32 +{0xbba1104, 0xe0d5527, 0x21fe15d, 0xaa588f9, 0xf587525, 0x7e23930, 0xbcca038, 0x5706d89, 0x5bc3c8, 0xe2f1b85, 0xdef1991, 0xe68d87a, 0x419dc9f, 0x35f18} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe15de0d5527bba11, 0x587525aa588f921f, 0x9bcca0387e23930f, 0xb8505bc3c85706d8, 0x8d87adef1991e2f1, 0x139f18419dc9fe6} +#else +{0x3bc1aaa4f77422, 0x16a9623e487f85, 0x43f11c987ac3a9, 0x5706d89bcca03, 0x3c5e370a0b7879, 0x79a361eb7bc664, 0x9cf8c20cee4f} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x98a, 0x1bbb, 0x7d8, 0xd84, 0x3fe, 0x90b, 0xfe8, 0x12c3, 0x1e84, 0xde3, 0xbe1, 0x1217, 0x1925, 0x84a, 0xa0e, 0x7cd, 0x1854, 0x768, 0x6e6, 0x1d87, 0xfac, 0x6df, 0x109b, 0x64d, 0x9f2, 0x596, 0x435, 0x1918, 0x1095, 0x0} +#elif RADIX == 32 +{0xddda628, 0x9b087d8, 0x84858ff, 0x12586fe, 0xe16f1fa, 0x49642eb, 0xa0e4256, 0x6150f9a, 0xe6e63b4, 0xfbeb3b0, 0x9b09b36, 0xcb27c8c, 0x2304352, 0x4257} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x58ff9b087d8ddda6, 0x16f1fa12586fe848, 0xaa0e425649642ebe, 0x3b0e6e63b46150f9, 0x27c8c9b09b36fbeb, 0xa2c2572304352cb} +#else +{0x7f3610fb1bbb4c, 0x684961bfa12163, 0x324b2175f0b78f, 0x46150f9aa0e425, 0x5f7d6761cdcc76, 0x32c9f2326c26cd, 0x51612b91821a9} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x17ec, 0x6b9, 0x1dc0, 0x1783, 0x18ee, 0xdd4, 0x1c7f, 0x1fb2, 0x16b0, 0x196e, 0x1e5a, 0x1fda, 0x11f9, 0x117, 0x1c30, 0x1a47, 0x2a2, 0x19e6, 0x1347, 0x2bb, 0x1463, 0x1f37, 0xa64, 0x3c6, 0x1910, 0x2bc, 0xbc0, 0x17e8, 0x1cfd, 0xa} +#elif RADIX == 32 +{0x35cdfb1, 0xaf07dc0, 0xf6ea63b, 0xc3f65c7, 0x5acb75a, 0x7e7fb5e, 0xc3008bc, 0xa8b48f, 0x7347cf3, 0xbd18c57, 0x8ca64f9, 0x5e64407, 0xfd0bc01, 0x163f6} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa63baf07dc035cdf, 0xacb75ac3f65c7f6e, 0xfc3008bc7e7fb5e5, 0xc577347cf30a8b48, 0x644078ca64f9bd18, 0x2d073f6fd0bc015e} +#else +{0x775e0fb806b9bf, 0x6b0fd971fdba98, 0x63f3fdaf2d65ba, 0x30a8b48fc3008b, 0x37a318aee68f9e, 0x5799101e32993e, 0x6439fb7e85e00} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x440, 0x172e, 0x4f, 0x1e07, 0x15ce, 0x1b55, 0x68e, 0x2c, 0x13bb, 0x1f43, 0x1dda, 0x1fb4, 0xe54, 0x1502, 0x723, 0x7e7, 0x1147, 0x1ba0, 0x3d0, 0xf7c, 0x1754, 0x5fc, 0x1098, 0x16aa, 0x182, 0x1c1d, 0x18e9, 0x13ce, 0xbae, 0x18} +#elif RADIX == 32 +{0xb971102, 0xbc0e04f, 0xedaad73, 0xec05868, 0xdafa1ce, 0x953f69d, 0x723a813, 0x451cfce, 0x83d0dd0, 0xe5d51ef, 0x550982f, 0xe860ad, 0x79d8e9e, 0x40eba} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xad73bc0e04fb9711, 0xafa1ceec05868eda, 0xe723a813953f69dd, 0x1ef83d0dd0451cfc, 0x860ad550982fe5d5, 0xc2eba79d8e9e0e} +#else +{0x67781c09f72e22, 0x3bb0161a3b6ab5, 0x1ca9fb4eed7d0e, 0x451cfce723a81, 0x7cbaa3df07a1ba, 0x3a182b554260b, 0x6175d3cec74f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x18c5, 0x1326, 0x1d4d, 0x19eb, 0xea, 0x947, 0x1adf, 0xbf5, 0xafe, 0x1225, 0x18a0, 0xb3a, 0x8e0, 0xaea, 0x17aa, 0x19a5, 0x912, 0x634, 0x15c7, 0x1df7, 0x13cb, 0x1894, 0xeaa, 0xa69, 0x6ca, 0x1b49, 0x26f, 0x1f50, 0xd92, 0x6} +#elif RADIX == 32 +{0x9936314, 0xb3d7d4d, 0xf4a383a, 0xf97ebad, 0xa0912ab, 0x3816758, 0x7aa5752, 0x244b34b, 0xf5c731a, 0xa4f2fbe, 0xd2eaac4, 0xa49b294, 0xea026fd, 0x3364b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x383ab3d7d4d99363, 0x912abf97ebadf4a, 0xb7aa57523816758a, 0xfbef5c731a244b34, 0x9b294d2eaac4a4f2, 0x54764bea026fda4} +#else +{0x7567afa9b326c6, 0x2fe5faeb7d28e0, 0x11c0b3ac504895, 0x2244b34b7aa575, 0x149e5f7deb8e63, 0x6926ca534baab1, 0x2a3b25f50137e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x132f, 0x6d5, 0x95b, 0xa68, 0x1814, 0x12d3, 0x1f1e, 0x857, 0x14fa, 0xcf, 0x1f19, 0xe1b, 0x1cf7, 0xa53, 0x1455, 0x5ef, 0x3e2, 0x199c, 0x1162, 0x38d, 0x174b, 0x794, 0xef6, 0xf74, 0x9c, 0x1f55, 0x1c4d, 0x56f, 0x1638, 0x19} +#elif RADIX == 32 +{0x36accbf, 0x14d095b, 0xe969e05, 0xe90aff1, 0x19067d3, 0x3ddc37f, 0x455529f, 0xf88bdf, 0xb162cce, 0xa5d2c71, 0xe8ef63c, 0xaa8271e, 0xadfc4df, 0xa8e0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9e0514d095b36acc, 0x9067d3e90aff1e96, 0xf455529f3ddc37f1, 0xc71b162cce0f88bd, 0x8271ee8ef63ca5d2, 0x30898e0adfc4dfaa} +#else +{0xa29a12b66d599, 0x4fa42bfc7a5a78, 0x79eee1bf8c833e, 0x60f88bdf455529, 0x14ba58e362c599, 0x6aa09c7ba3bd8f, 0x804c7056fe26f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3d63,0xdad1,0xf501,0xd58f,0x8741,0xd265,0xf8bd,0xb3b9,0xac08,0xfc8b,0x45ab,0xbcdf,0x501,0x9f7,0x10ed,0x102f,0xc6e3,0xdc57,0xf892,0x8db4,0x2c76,0x21ab,0x2bc3,0x8e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdad13d63,0xd58ff501,0xd2658741,0xb3b9f8bd,0xfc8bac08,0xbcdf45ab,0x9f70501,0x102f10ed,0xdc57c6e3,0x8db4f892,0x21ab2c76,0x8e2bc3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd58ff501dad13d63,0xb3b9f8bdd2658741,0xbcdf45abfc8bac08,0x102f10ed09f70501,0x8db4f892dc57c6e3,0x8e2bc321ab2c76}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc998,0x418c,0xa8e4,0x2354,0x622a,0xb76d,0x5487,0xdad9,0x1672,0x522b,0xa00f,0xdfa5,0x296b,0xe17c,0x595e,0x91e1,0xa22d,0xe126,0x904c,0x9288,0x5075,0xc6c5,0x61b0,0xb1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x418cc998,0x2354a8e4,0xb76d622a,0xdad95487,0x522b1672,0xdfa5a00f,0xe17c296b,0x91e1595e,0xe126a22d,0x9288904c,0xc6c55075,0xb161b0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2354a8e4418cc998,0xdad95487b76d622a,0xdfa5a00f522b1672,0x91e1595ee17c296b,0x9288904ce126a22d,0xb161b0c6c55075}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1271,0x594e,0x16ee,0x35fa,0xaf0e,0x11b2,0x1fca,0x24b7,0xa3e3,0x2bcc,0xc2f0,0x6409,0xf8e1,0x6a8f,0x67e,0xe7ee,0xad00,0x2b9a,0x6813,0x5e0a,0x6dec,0x48f5,0xbd1d,0xb3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x594e1271,0x35fa16ee,0x11b2af0e,0x24b71fca,0x2bcca3e3,0x6409c2f0,0x6a8ff8e1,0xe7ee067e,0x2b9aad00,0x5e0a6813,0x48f56dec,0xb3bd1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x35fa16ee594e1271,0x24b71fca11b2af0e,0x6409c2f02bcca3e3,0xe7ee067e6a8ff8e1,0x5e0a68132b9aad00,0xb3bd1d48f56dec}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc29d,0x252e,0xafe,0x2a70,0x78be,0x2d9a,0x742,0x4c46,0x53f7,0x374,0xba54,0x4320,0xfafe,0xf608,0xef12,0xefd0,0x391c,0x23a8,0x76d,0x724b,0xd389,0xde54,0xd43c,0x71}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x252ec29d,0x2a700afe,0x2d9a78be,0x4c460742,0x37453f7,0x4320ba54,0xf608fafe,0xefd0ef12,0x23a8391c,0x724b076d,0xde54d389,0x71d43c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2a700afe252ec29d,0x4c4607422d9a78be,0x4320ba54037453f7,0xefd0ef12f608fafe,0x724b076d23a8391c,0x71d43cde54d389}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd70d,0x31e4,0xa551,0x7483,0x6f09,0x34d,0x6a80,0x85f,0x6b11,0xe29b,0x188,0x38d2,0x85b,0xa241,0xc423,0xddc8,0x3260,0x1722,0xf3a4,0x7cf7,0x36e8,0x7955,0xeeb9,0xc6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x31e4d70d,0x7483a551,0x34d6f09,0x85f6a80,0xe29b6b11,0x38d20188,0xa241085b,0xddc8c423,0x17223260,0x7cf7f3a4,0x795536e8,0xc6eeb9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7483a55131e4d70d,0x85f6a80034d6f09,0x38d20188e29b6b11,0xddc8c423a241085b,0x7cf7f3a417223260,0xc6eeb9795536e8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x59a9,0x8f53,0xd42f,0xf65b,0x7134,0x4475,0x9543,0x8428,0x4555,0x7d45,0x7bfb,0xe15d,0xe9c2,0x24ec,0xf17f,0x88ea,0x766c,0xbf2d,0x2b42,0x2771,0x5dfc,0xd040,0xfa62,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f5359a9,0xf65bd42f,0x44757134,0x84289543,0x7d454555,0xe15d7bfb,0x24ece9c2,0x88eaf17f,0xbf2d766c,0x27712b42,0xd0405dfc,0xc9fa62}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf65bd42f8f5359a9,0x8428954344757134,0xe15d7bfb7d454555,0x88eaf17f24ece9c2,0x27712b42bf2d766c,0xc9fa62d0405dfc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4b49,0x89b0,0x8c52,0x91ca,0xed1b,0xd527,0x453,0x82d,0xb0eb,0xb6bf,0x3790,0x5816,0x49bb,0xa0a7,0xffc6,0x5530,0x23b9,0x12bb,0x52c4,0x6f51,0x25fd,0x62d,0x723d,0xc6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x89b04b49,0x91ca8c52,0xd527ed1b,0x82d0453,0xb6bfb0eb,0x58163790,0xa0a749bb,0x5530ffc6,0x12bb23b9,0x6f5152c4,0x62d25fd,0xc6723d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x91ca8c5289b04b49,0x82d0453d527ed1b,0x58163790b6bfb0eb,0x5530ffc6a0a749bb,0x6f5152c412bb23b9,0xc6723d062d25fd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x28f3,0xce1b,0x5aae,0x8b7c,0x90f6,0xfcb2,0x957f,0xf7a0,0x94ee,0x1d64,0xfe77,0xc72d,0xf7a4,0x5dbe,0x3bdc,0x2237,0xcd9f,0xe8dd,0xc5b,0x8308,0xc917,0x86aa,0x1146,0x39}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xce1b28f3,0x8b7c5aae,0xfcb290f6,0xf7a0957f,0x1d6494ee,0xc72dfe77,0x5dbef7a4,0x22373bdc,0xe8ddcd9f,0x83080c5b,0x86aac917,0x391146}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8b7c5aaece1b28f3,0xf7a0957ffcb290f6,0xc72dfe771d6494ee,0x22373bdc5dbef7a4,0x83080c5be8ddcd9f,0x39114686aac917}}} +#endif +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.h new file mode 100644 index 0000000000..5bb17f554a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.h @@ -0,0 +1,31 @@ +#ifndef ENDOMORPHISM_ACTION_H +#define ENDOMORPHISM_ACTION_H +#include +#include +#include +/** Type for precomputed endomorphism rings applied to precomputed torsion bases. + * + * Precomputed by the precompute scripts. + * + * @typedef curve_with_endomorphism_ring_t + * + * @struct curve_with_endomorphism_ring + **/ +typedef struct curve_with_endomorphism_ring { + ec_curve_t curve; + ec_basis_t basis_even; + ibz_mat_2x2_t action_i, action_j, action_k; + ibz_mat_2x2_t action_gen2, action_gen3, action_gen4; +} curve_with_endomorphism_ring_t; +#define CURVE_E0 (CURVES_WITH_ENDOMORPHISMS->curve) +#define BASIS_EVEN (CURVES_WITH_ENDOMORPHISMS->basis_even) +#define ACTION_I (CURVES_WITH_ENDOMORPHISMS->action_i) +#define ACTION_J (CURVES_WITH_ENDOMORPHISMS->action_j) +#define ACTION_K (CURVES_WITH_ENDOMORPHISMS->action_k) +#define ACTION_GEN2 (CURVES_WITH_ENDOMORPHISMS->action_gen2) +#define ACTION_GEN3 (CURVES_WITH_ENDOMORPHISMS->action_gen3) +#define ACTION_GEN4 (CURVES_WITH_ENDOMORPHISMS->action_gen4) +#define NUM_ALTERNATE_STARTING_CURVES 7 +#define ALTERNATE_STARTING_CURVES (CURVES_WITH_ENDOMORPHISMS+1) +extern const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8]; +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.c new file mode 100644 index 0000000000..f2992d8c7f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: PD and Apache-2.0 + +/* FIPS202 implementation based on code from PQClean, + * which is in turn based based on the public domain implementation in + * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html + * by Ronny Van Keer + * and the public domain "TweetFips202" implementation + * from https://twitter.com/tweetfips202 + * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ + +#include +#include +#include +#include + +#include "fips202.h" + +#define NROUNDS 24 +#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) + +/************************************************* + * Name: load64 + * + * Description: Load 8 bytes into uint64_t in little-endian order + * + * Arguments: - const uint8_t *x: pointer to input byte array + * + * Returns the loaded 64-bit unsigned integer + **************************************************/ +static uint64_t load64(const uint8_t *x) { + uint64_t r = 0; + for (size_t i = 0; i < 8; ++i) { + r |= (uint64_t)x[i] << 8 * i; + } + + return r; +} + +/************************************************* + * Name: store64 + * + * Description: Store a 64-bit integer to a byte array in little-endian order + * + * Arguments: - uint8_t *x: pointer to the output byte array + * - uint64_t u: input 64-bit unsigned integer + **************************************************/ +static void store64(uint8_t *x, uint64_t u) { + for (size_t i = 0; i < 8; ++i) { + x[i] = (uint8_t) (u >> 8 * i); + } +} + +/* Keccak round constants */ +static const uint64_t KeccakF_RoundConstants[NROUNDS] = { + 0x0000000000000001ULL, 0x0000000000008082ULL, + 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, + 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, + 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, + 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, + 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, + 0x0000000080000001ULL, 0x8000000080008008ULL +}; + +/************************************************* + * Name: KeccakF1600_StatePermute + * + * Description: The Keccak F1600 Permutation + * + * Arguments: - uint64_t *state: pointer to input/output Keccak state + **************************************************/ +static void KeccakF1600_StatePermute(uint64_t *state) { + int round; + + uint64_t Aba, Abe, Abi, Abo, Abu; + uint64_t Aga, Age, Agi, Ago, Agu; + uint64_t Aka, Ake, Aki, Ako, Aku; + uint64_t Ama, Ame, Ami, Amo, Amu; + uint64_t Asa, Ase, Asi, Aso, Asu; + uint64_t BCa, BCe, BCi, BCo, BCu; + uint64_t Da, De, Di, Do, Du; + uint64_t Eba, Ebe, Ebi, Ebo, Ebu; + uint64_t Ega, Ege, Egi, Ego, Egu; + uint64_t Eka, Eke, Eki, Eko, Eku; + uint64_t Ema, Eme, Emi, Emo, Emu; + uint64_t Esa, Ese, Esi, Eso, Esu; + + // copyFromState(A, state) + Aba = state[0]; + Abe = state[1]; + Abi = state[2]; + Abo = state[3]; + Abu = state[4]; + Aga = state[5]; + Age = state[6]; + Agi = state[7]; + Ago = state[8]; + Agu = state[9]; + Aka = state[10]; + Ake = state[11]; + Aki = state[12]; + Ako = state[13]; + Aku = state[14]; + Ama = state[15]; + Ame = state[16]; + Ami = state[17]; + Amo = state[18]; + Amu = state[19]; + Asa = state[20]; + Ase = state[21]; + Asi = state[22]; + Aso = state[23]; + Asu = state[24]; + + for (round = 0; round < NROUNDS; round += 2) { + // prepareTheta + BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; + BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; + BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; + BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; + BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; + + // thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Aba ^= Da; + BCa = Aba; + Age ^= De; + BCe = ROL(Age, 44); + Aki ^= Di; + BCi = ROL(Aki, 43); + Amo ^= Do; + BCo = ROL(Amo, 21); + Asu ^= Du; + BCu = ROL(Asu, 14); + Eba = BCa ^ ((~BCe) & BCi); + Eba ^= KeccakF_RoundConstants[round]; + Ebe = BCe ^ ((~BCi) & BCo); + Ebi = BCi ^ ((~BCo) & BCu); + Ebo = BCo ^ ((~BCu) & BCa); + Ebu = BCu ^ ((~BCa) & BCe); + + Abo ^= Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka ^= Da; + BCi = ROL(Aka, 3); + Ame ^= De; + BCo = ROL(Ame, 45); + Asi ^= Di; + BCu = ROL(Asi, 61); + Ega = BCa ^ ((~BCe) & BCi); + Ege = BCe ^ ((~BCi) & BCo); + Egi = BCi ^ ((~BCo) & BCu); + Ego = BCo ^ ((~BCu) & BCa); + Egu = BCu ^ ((~BCa) & BCe); + + Abe ^= De; + BCa = ROL(Abe, 1); + Agi ^= Di; + BCe = ROL(Agi, 6); + Ako ^= Do; + BCi = ROL(Ako, 25); + Amu ^= Du; + BCo = ROL(Amu, 8); + Asa ^= Da; + BCu = ROL(Asa, 18); + Eka = BCa ^ ((~BCe) & BCi); + Eke = BCe ^ ((~BCi) & BCo); + Eki = BCi ^ ((~BCo) & BCu); + Eko = BCo ^ ((~BCu) & BCa); + Eku = BCu ^ ((~BCa) & BCe); + + Abu ^= Du; + BCa = ROL(Abu, 27); + Aga ^= Da; + BCe = ROL(Aga, 36); + Ake ^= De; + BCi = ROL(Ake, 10); + Ami ^= Di; + BCo = ROL(Ami, 15); + Aso ^= Do; + BCu = ROL(Aso, 56); + Ema = BCa ^ ((~BCe) & BCi); + Eme = BCe ^ ((~BCi) & BCo); + Emi = BCi ^ ((~BCo) & BCu); + Emo = BCo ^ ((~BCu) & BCa); + Emu = BCu ^ ((~BCa) & BCe); + + Abi ^= Di; + BCa = ROL(Abi, 62); + Ago ^= Do; + BCe = ROL(Ago, 55); + Aku ^= Du; + BCi = ROL(Aku, 39); + Ama ^= Da; + BCo = ROL(Ama, 41); + Ase ^= De; + BCu = ROL(Ase, 2); + Esa = BCa ^ ((~BCe) & BCi); + Ese = BCe ^ ((~BCi) & BCo); + Esi = BCi ^ ((~BCo) & BCu); + Eso = BCo ^ ((~BCu) & BCa); + Esu = BCu ^ ((~BCa) & BCe); + + // prepareTheta + BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; + BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; + BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; + BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; + BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; + + // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^ ((~BCe) & BCi); + Aba ^= KeccakF_RoundConstants[round + 1]; + Abe = BCe ^ ((~BCi) & BCo); + Abi = BCi ^ ((~BCo) & BCu); + Abo = BCo ^ ((~BCu) & BCa); + Abu = BCu ^ ((~BCa) & BCe); + + Ebo ^= Do; + BCa = ROL(Ebo, 28); + Egu ^= Du; + BCe = ROL(Egu, 20); + Eka ^= Da; + BCi = ROL(Eka, 3); + Eme ^= De; + BCo = ROL(Eme, 45); + Esi ^= Di; + BCu = ROL(Esi, 61); + Aga = BCa ^ ((~BCe) & BCi); + Age = BCe ^ ((~BCi) & BCo); + Agi = BCi ^ ((~BCo) & BCu); + Ago = BCo ^ ((~BCu) & BCa); + Agu = BCu ^ ((~BCa) & BCe); + + Ebe ^= De; + BCa = ROL(Ebe, 1); + Egi ^= Di; + BCe = ROL(Egi, 6); + Eko ^= Do; + BCi = ROL(Eko, 25); + Emu ^= Du; + BCo = ROL(Emu, 8); + Esa ^= Da; + BCu = ROL(Esa, 18); + Aka = BCa ^ ((~BCe) & BCi); + Ake = BCe ^ ((~BCi) & BCo); + Aki = BCi ^ ((~BCo) & BCu); + Ako = BCo ^ ((~BCu) & BCa); + Aku = BCu ^ ((~BCa) & BCe); + + Ebu ^= Du; + BCa = ROL(Ebu, 27); + Ega ^= Da; + BCe = ROL(Ega, 36); + Eke ^= De; + BCi = ROL(Eke, 10); + Emi ^= Di; + BCo = ROL(Emi, 15); + Eso ^= Do; + BCu = ROL(Eso, 56); + Ama = BCa ^ ((~BCe) & BCi); + Ame = BCe ^ ((~BCi) & BCo); + Ami = BCi ^ ((~BCo) & BCu); + Amo = BCo ^ ((~BCu) & BCa); + Amu = BCu ^ ((~BCa) & BCe); + + Ebi ^= Di; + BCa = ROL(Ebi, 62); + Ego ^= Do; + BCe = ROL(Ego, 55); + Eku ^= Du; + BCi = ROL(Eku, 39); + Ema ^= Da; + BCo = ROL(Ema, 41); + Ese ^= De; + BCu = ROL(Ese, 2); + Asa = BCa ^ ((~BCe) & BCi); + Ase = BCe ^ ((~BCi) & BCo); + Asi = BCi ^ ((~BCo) & BCu); + Aso = BCo ^ ((~BCu) & BCa); + Asu = BCu ^ ((~BCa) & BCe); + } + + // copyToState(state, A) + state[0] = Aba; + state[1] = Abe; + state[2] = Abi; + state[3] = Abo; + state[4] = Abu; + state[5] = Aga; + state[6] = Age; + state[7] = Agi; + state[8] = Ago; + state[9] = Agu; + state[10] = Aka; + state[11] = Ake; + state[12] = Aki; + state[13] = Ako; + state[14] = Aku; + state[15] = Ama; + state[16] = Ame; + state[17] = Ami; + state[18] = Amo; + state[19] = Amu; + state[20] = Asa; + state[21] = Ase; + state[22] = Asi; + state[23] = Aso; + state[24] = Asu; +} + +/************************************************* + * Name: keccak_absorb + * + * Description: Absorb step of Keccak; + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, + size_t mlen, uint8_t p) { + size_t i; + uint8_t t[200]; + + /* Zero state */ + for (i = 0; i < 25; ++i) { + s[i] = 0; + } + + while (mlen >= r) { + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(m + 8 * i); + } + + KeccakF1600_StatePermute(s); + mlen -= r; + m += r; + } + + for (i = 0; i < r; ++i) { + t[i] = 0; + } + for (i = 0; i < mlen; ++i) { + t[i] = m[i]; + } + t[i] = p; + t[r - 1] |= 128; + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(t + 8 * i); + } +} + +/************************************************* + * Name: keccak_squeezeblocks + * + * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. + * Modifies the state. Can be called multiple times to keep + * squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *h: pointer to output blocks + * - size_t nblocks: number of blocks to be + * squeezed (written to h) + * - uint64_t *s: pointer to input/output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, + uint64_t *s, uint32_t r) { + while (nblocks > 0) { + KeccakF1600_StatePermute(s); + for (size_t i = 0; i < (r >> 3); i++) { + store64(h + 8 * i, s[i]); + } + h += r; + nblocks--; + } +} + +/************************************************* + * Name: keccak_inc_init + * + * Description: Initializes the incremental Keccak state to zero. + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + **************************************************/ +static void keccak_inc_init(uint64_t *s_inc) { + size_t i; + + for (i = 0; i < 25; ++i) { + s_inc[i] = 0; + } + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_absorb + * + * Description: Incremental keccak absorb + * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + **************************************************/ +static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, + size_t mlen) { + size_t i; + + /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ + while (mlen + s_inc[25] >= r) { + for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { + /* Take the i'th byte from message + xor with the s_inc[25] + i'th byte of the state; little-endian */ + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + mlen -= (size_t)(r - s_inc[25]); + m += r - s_inc[25]; + s_inc[25] = 0; + + KeccakF1600_StatePermute(s_inc); + } + + for (i = 0; i < mlen; i++) { + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + s_inc[25] += mlen; +} + +/************************************************* + * Name: keccak_inc_finalize + * + * Description: Finalizes Keccak absorb phase, prepares for squeezing + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { + /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, + so we can always use one more byte for p in the current state. */ + s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); + s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_squeeze + * + * Description: Incremental Keccak squeeze; can be called on byte-level + * + * Arguments: - uint8_t *h: pointer to output bytes + * - size_t outlen: number of bytes to be squeezed + * - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_inc_squeeze(uint8_t *h, size_t outlen, + uint64_t *s_inc, uint32_t r) { + size_t i; + + /* First consume any bytes we still have sitting around */ + for (i = 0; i < outlen && i < s_inc[25]; i++) { + /* There are s_inc[25] bytes left, so r - s_inc[25] is the first + available byte. We consume from there, i.e., up to r. */ + h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] -= i; + + /* Then squeeze the remaining necessary blocks */ + while (outlen > 0) { + KeccakF1600_StatePermute(s_inc); + + for (i = 0; i < outlen && i < r; i++) { + h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] = r - i; + } +} + +void shake128_inc_init(shake128incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); +} + +void shake128_inc_finalize(shake128incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); +} + +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); +} + +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake128_inc_ctx_release(shake128incctx *state) { + (void)state; +} + +void shake256_inc_init(shake256incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); +} + +void shake256_inc_finalize(shake256incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); +} + +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); +} + +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake256_inc_ctx_release(shake256incctx *state) { + (void)state; +} + + +/************************************************* + * Name: shake128_absorb + * + * Description: Absorb step of the SHAKE128 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake128_squeezeblocks + * + * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of + * SHAKE128_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake128ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); +} + +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake128_ctx_release(shake128ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake256_absorb + * + * Description: Absorb step of the SHAKE256 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake256_squeezeblocks + * + * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of + * SHAKE256_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake256ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); +} + +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake256_ctx_release(shake256ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake128 + * + * Description: SHAKE128 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE128_RATE; + uint8_t t[SHAKE128_RATE]; + shake128ctx s; + + shake128_absorb(&s, input, inlen); + shake128_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE128_RATE; + outlen -= nblocks * SHAKE128_RATE; + + if (outlen) { + shake128_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake128_ctx_release(&s); +} + +/************************************************* + * Name: shake256 + * + * Description: SHAKE256 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE256_RATE; + uint8_t t[SHAKE256_RATE]; + shake256ctx s; + + shake256_absorb(&s, input, inlen); + shake256_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE256_RATE; + outlen -= nblocks * SHAKE256_RATE; + + if (outlen) { + shake256_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake256_ctx_release(&s); +} + +void sha3_256_inc_init(sha3_256incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_256_inc_ctx_release(sha3_256incctx *state) { + (void)state; +} + +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); +} + +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { + uint8_t t[SHA3_256_RATE]; + keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); + + sha3_256_inc_ctx_release(state); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_256 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_256_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +void sha3_384_inc_init(sha3_384incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); +} + +void sha3_384_inc_ctx_release(sha3_384incctx *state) { + (void)state; +} + +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { + uint8_t t[SHA3_384_RATE]; + keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); + + sha3_384_inc_ctx_release(state); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_384 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_384_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +void sha3_512_inc_init(sha3_512incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); +} + +void sha3_512_inc_ctx_release(sha3_512incctx *state) { + (void)state; +} + +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { + uint8_t t[SHA3_512_RATE]; + keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); + + sha3_512_inc_ctx_release(state); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_512 + * + * Description: SHA3-512 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_512_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h new file mode 100644 index 0000000000..c29ebd8f9d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef FIPS202_H +#define FIPS202_H + +#include +#include + +#define SHAKE128_RATE 168 +#define SHAKE256_RATE 136 +#define SHA3_256_RATE 136 +#define SHA3_384_RATE 104 +#define SHA3_512_RATE 72 + +#define PQC_SHAKEINCCTX_U64WORDS 26 +#define PQC_SHAKECTX_U64WORDS 25 + +#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) +#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake128incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake128ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake256incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake256ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_256incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_384incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_512incctx; + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); +/* Free the state */ +void shake128_ctx_release(shake128ctx *state); +/* Copy the state. */ +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); + +/* Initialize incremental hashing API */ +void shake128_inc_init(shake128incctx *state); +/* Absorb more information into the XOF. + * + * Can be called multiple times. + */ +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); +/* Finalize the XOF for squeezing */ +void shake128_inc_finalize(shake128incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); +/* Copy the context of the SHAKE128 XOF */ +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); +/* Free the context of the SHAKE128 XOF */ +void shake128_inc_ctx_release(shake128incctx *state); + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); +/* Free the context held by this XOF */ +void shake256_ctx_release(shake256ctx *state); +/* Copy the context held by this XOF */ +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); + +/* Initialize incremental hashing API */ +void shake256_inc_init(shake256incctx *state); +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); +/* Prepares for squeeze phase */ +void shake256_inc_finalize(shake256incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); +/* Copy the state */ +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); +/* Free the state */ +void shake256_inc_ctx_release(shake256incctx *state); + +/* One-stop SHAKE128 call */ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* One-stop SHAKE256 call */ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_256_inc_init(sha3_256incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); +/* Copy the context */ +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_256_inc_ctx_release(sha3_256incctx *state); + +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_384_inc_init(sha3_384incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); +/* Copy the context */ +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_384_inc_ctx_release(sha3_384incctx *state); + +/* One-stop SHA3-384 shop */ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_512_inc_init(sha3_512incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); +/* Copy the context */ +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_512_inc_ctx_release(sha3_512incctx *state); + +/* One-stop SHA3-512 shop */ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.c new file mode 100644 index 0000000000..1ac3a49967 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.c @@ -0,0 +1,108 @@ +#include +#include "fp.h" + +const digit_t p[NWORDS_FIELD] = { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0x40ffffffffffffff }; +const digit_t p2[NWORDS_FIELD] = { 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0x81ffffffffffffff }; + +void +fp_sqrt(fp_t *x) +{ + (void)gf65376_sqrt(x, x); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + // ls is (0, 1, -1) and we want fp_is_square + // to return 0xFF..FF when ls is 1 or 0 and 0x00..00 otherwise + int32_t ls = gf65376_legendre(a); + return ~(uint32_t)(ls >> 1); +} + +void +fp_inv(fp_t *x) +{ + (void)gf65376_invert(x, x); +} + +void +fp_exp3div4(fp_t *a) +{ + // + // We optimise this by using the shape of the prime + // to avoid almost all multiplications: + // + // We write: + // (p - 3) / 4 = (65*2^376 - 4) / 4 + // = 65*2^374 - 1 + // = 65*(2^374 - 1) + 64 + // Then we first compute: + // a374 = a**(2^374 - 1) + // Then from this we get the desired result as: + // a**((p-3)/4) = a374**65 * a**64 + // We can compute this with 13 multiplications and 383 squares. + fp_t z3, z64, t11, tmp; + // Compute a**3, a**4 and a**64 + fp_sqr(&z64, a); + fp_mul(&z3, a, &z64); + fp_sqr(&z64, &z64); + // Compute t11 = a^3 * a^4 = a**(2^3 - 1) = a**7 + fp_mul(&t11, &z3, &z64); + fp_sqr(&z64, &z64); + fp_sqr(&z64, &z64); + fp_sqr(&z64, &z64); + fp_sqr(&z64, &z64); + // Compute a**(2^4 - 1) = a**15 + fp_sqr(&tmp, &t11); + fp_mul(a, a, &tmp); + // Compute a**(2^8 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 4; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^11 - 1) + for (int i = 0; i < 3; i++) + fp_sqr(a, a); + fp_mul(&t11, &t11, a); + // Compute a**(2^22 - 1) + fp_sqr(&tmp, &t11); + for (int i = 1; i < 11; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, &t11, &tmp); + // Compute a**(2^44 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 22; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^88 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 44; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^176 - 1)' + fp_sqr(&tmp, a); + for (int i = 1; i < 88; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^187 - 1) + for (int i = 0; i < 11; i++) + fp_sqr(a, a); + fp_mul(a, a, &t11); + // Compute a**(2^374 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 187; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(65*(2^374 - 1)) + fp_sqr(&tmp, a); + fp_sqr(&tmp, &tmp); + fp_sqr(&tmp, &tmp); + fp_sqr(&tmp, &tmp); + fp_sqr(&tmp, &tmp); + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(65*(2^374 - 1) + 64) + fp_mul(a, a, &z64); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.h new file mode 100644 index 0000000000..04e360fe19 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.h @@ -0,0 +1,135 @@ +#ifndef FP_H +#define FP_H + +// Include statements +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gf65376.h" + +// Type for elements of GF(p) +#define fp_t gf65376 + +// Operations in fp +static inline void +fp_neg(fp_t *d, const fp_t *a) +{ + gf65376_neg(d, a); +} + +void fp_add(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S +void fp_sub(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S +void fp_sqr(fp_t *out, const fp_t *a); // implemented in fp_asm.S +void fp_mul(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S + +static inline void +fp_mul_small(fp_t *d, const fp_t *a, uint32_t n) +{ + gf65376_mul_small(d, a, n); +} + +static inline void +fp_half(fp_t *d, const fp_t *a) +{ + gf65376_half(d, a); +} +// #define fp_half gf65376_half + +static inline void +fp_div3(fp_t *d, const fp_t *a) +{ + gf65376_div3(d, a); +} +// #define fp_div3 gf65376_div3 + +// Constant time selection and swapping +static inline void +fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) +{ + gf65376_select(d, a0, a1, ctl); +} +// #define fp_select gf65376_select +static inline void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + gf65376_cswap(a, b, ctl); +} +// #define fp_cswap gf65376_cswap + +// Comparisons for fp elements +static inline uint32_t +fp_is_zero(const fp_t *a) +{ + return gf65376_iszero(a); +} +// #define fp_is_zero gf65376_iszero + +static inline uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return gf65376_equals(a, b); +} +// #define fp_is_equal gf65376_equals + +// Set a uint32 to an Fp value +static inline void +fp_set_small(fp_t *d, uint32_t x) +{ + gf65376_set_small(d, x); +} +// #define fp_set_small gf65376_set_small + +// Encoding and decoding of bytes +static inline void +fp_encode(void *dst, const fp_t *a) +{ + gf65376_encode(dst, a); +} +// #define fp_encode gf65376_encode +static inline uint32_t +fp_decode(fp_t *d, const void *src) +{ + return gf65376_decode(d, src); +} +// #define fp_decode gf65376_decode +static inline void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + gf65376_decode_reduce(d, src, len); +} +// #define fp_decode_reduce gf65376_decode_reduce + +// These functions are essentially useless because we can just +// use = for the shallow copies we need, but they're here for +// now until we do a larger refactoring +static inline void +fp_copy(fp_t *out, const fp_t *a) +{ + memcpy(out, a, sizeof(fp_t)); +} + +static inline void +fp_set_zero(fp_t *a) +{ + memcpy(a, &ZERO, sizeof(fp_t)); +} + +static inline void +fp_set_one(fp_t *a) +{ + memcpy(a, &ONE, sizeof(fp_t)); +} + +// Functions defined in low level code but with different API +void fp_inv(fp_t *a); +void fp_sqrt(fp_t *a); +void fp_exp3div4(fp_t *a); +uint32_t fp_is_square(const fp_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.c new file mode 100644 index 0000000000..3269f6c66f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.c @@ -0,0 +1,188 @@ +#include "fp2.h" +#include +#include + +/* Arithmetic modulo X^2 + 1 */ + +void +fp2_encode(void *dst, const fp2_t *a) +{ + uint8_t *buf = dst; + fp_encode(buf, &(a->re)); + fp_encode(buf + FP_ENCODED_BYTES, &(a->im)); +} + +uint32_t +fp2_decode(fp2_t *d, const void *src) +{ + const uint8_t *buf = src; + uint32_t re, im; + + re = fp_decode(&(d->re), buf); + im = fp_decode(&(d->im), buf + FP_ENCODED_BYTES); + return re & im; +} + +void +fp2_inv(fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + fp_inv(&t0); + fp_mul(&(x->re), &(x->re), &t0); + fp_mul(&(x->im), &(x->im), &t0); + fp_neg(&(x->im), &(x->im)); +} + +void +fp2_batched_inv(fp2_t *x, int len) +{ + fp2_t t1[len], t2[len]; + fp2_t inverse; + + // x = x0,...,xn + // t1 = x0, x0*x1, ... ,x0 * x1 * ... * xn + t1[0] = x[0]; + for (int i = 1; i < len; i++) { + fp2_mul(&t1[i], &t1[i - 1], &x[i]); + } + + // inverse = 1/ (x0 * x1 * ... * xn) + inverse = t1[len - 1]; + fp2_inv(&inverse); + t2[0] = inverse; + + // t2 = 1/ (x0 * x1 * ... * xn), 1/ (x0 * x1 * ... * x(n-1)) , ... , 1/xO + for (int i = 1; i < len; i++) { + fp2_mul(&t2[i], &t2[i - 1], &x[len - i]); + } + + x[0] = t2[len - 1]; + for (int i = 1; i < len; i++) { + fp2_mul(&x[i], &t1[i - 1], &t2[len - i - 1]); + } +} + +uint32_t +fp2_is_square(const fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + + return fp_is_square(&t0); +} + +void +fp2_sqrt(fp2_t *a) +{ + fp_t x0, x1, t0, t1; + + /* From "Optimized One-Dimensional SQIsign Verification on Intel and + * Cortex-M4" by Aardal et al: https://eprint.iacr.org/2024/1563 */ + + // x0 = \delta = sqrt(a0^2 + a1^2). + fp_sqr(&x0, &(a->re)); + fp_sqr(&x1, &(a->im)); + fp_add(&x0, &x0, &x1); + fp_sqrt(&x0); + // If a1 = 0, there is a risk of \delta = -a0, which makes x0 = 0 below. + // In that case, we restore the value \delta = a0. + fp_select(&x0, &x0, &(a->re), fp_is_zero(&(a->im))); + // x0 = \delta + a0, t0 = 2 * x0. + fp_add(&x0, &x0, &(a->re)); + fp_add(&t0, &x0, &x0); + // x1 = t0^(p-3)/4. + fp_copy(&x1, &t0); + fp_exp3div4(&x1); + // x0 = x0 * x1, x1 = x1 * a1, t1 = (2x0)^2. + fp_mul(&x0, &x0, &x1); + fp_mul(&x1, &x1, &(a->im)); + fp_add(&t1, &x0, &x0); + fp_sqr(&t1, &t1); + // If t1 = t0, return x0 + x1*i, otherwise x1 - x0*i. + fp_sub(&t0, &t0, &t1); + uint32_t f = fp_is_zero(&t0); + fp_neg(&t1, &x0); + fp_copy(&t0, &x1); + fp_select(&t0, &t0, &x0, f); + fp_select(&t1, &t1, &x1, f); + + // Check if t0 is zero + uint32_t t0_is_zero = fp_is_zero(&t0); + // Check whether t0, t1 are odd + // Note: we encode to ensure canonical representation + uint8_t tmp_bytes[FP_ENCODED_BYTES]; + fp_encode(tmp_bytes, &t0); + uint32_t t0_is_odd = -((uint32_t)tmp_bytes[0] & 1); + fp_encode(tmp_bytes, &t1); + uint32_t t1_is_odd = -((uint32_t)tmp_bytes[0] & 1); + // We negate the output if: + // t0 is odd, or + // t0 is zero and t1 is odd + uint32_t negate_output = t0_is_odd | (t0_is_zero & t1_is_odd); + fp_neg(&x0, &t0); + fp_select(&(a->re), &t0, &x0, negate_output); + fp_neg(&x0, &t1); + fp_select(&(a->im), &t1, &x0, negate_output); +} + +uint32_t +fp2_sqrt_verify(fp2_t *a) +{ + fp2_t t0, t1; + + fp2_copy(&t0, a); + fp2_sqrt(a); + fp2_sqr(&t1, a); + + return (fp2_is_equal(&t0, &t1)); +} + +// exponentiation +void +fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size) +{ + fp2_t acc; + digit_t bit; + + fp2_copy(&acc, x); + fp2_set_one(out); + + // Iterate over each word of exp + for (int j = 0; j < size; j++) { + // Iterate over each bit of the word + for (int i = 0; i < RADIX; i++) { + bit = (exp[j] >> i) & 1; + if (bit == 1) { + fp2_mul(out, out, &acc); + } + fp2_sqr(&acc, &acc); + } + } +} + +void +fp2_print(const char *name, const fp2_t *a) +{ + printf("%s0x", name); + + uint8_t buf[FP_ENCODED_BYTES]; + fp_encode(&buf, &a->re); // Encoding ensures canonical rep + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + + printf(" + i*0x"); + + fp_encode(&buf, &a->im); + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + printf("\n"); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.h new file mode 100644 index 0000000000..81801fa9a9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.h @@ -0,0 +1,45 @@ +#ifndef FP2_H +#define FP2_H + +#define NO_FP2X_MUL +#define NO_FP2X_SQR + +#include + +extern void fp2_sq_c0(fp2_t *out, const fp2_t *in); +extern void fp2_sq_c1(fp_t *out, const fp2_t *in); + +extern void fp2_mul_c0(fp_t *out, const fp2_t *in0, const fp2_t *in1); +extern void fp2_mul_c1(fp_t *out, const fp2_t *in0, const fp2_t *in1); + +static inline void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t; + + fp2_mul_c0(&t, y, z); // c0 = a0*b0 - a1*b1 + fp2_mul_c1(&x->im, y, z); // c1 = a0*b1 + a1*b0 + x->re.arr[0] = t.arr[0]; + x->re.arr[1] = t.arr[1]; + x->re.arr[2] = t.arr[2]; + x->re.arr[3] = t.arr[3]; + x->re.arr[4] = t.arr[4]; + x->re.arr[5] = t.arr[5]; +} + +static inline void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp2_t t; + + fp2_sq_c0(&t, y); // c0 = (a0+a1)(a0-a1) + fp2_sq_c1(&x->im, y); // c1 = 2a0*a1 + x->re.arr[0] = t.re.arr[0]; + x->re.arr[1] = t.re.arr[1]; + x->re.arr[2] = t.re.arr[2]; + x->re.arr[3] = t.re.arr[3]; + x->re.arr[4] = t.re.arr[4]; + x->re.arr[5] = t.re.arr[5]; +} + +#endif \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2x.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2x.h new file mode 100644 index 0000000000..44cf103bf2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2x.h @@ -0,0 +1,162 @@ +#ifndef FP2X_H +#define FP2X_H + +#include +#include "fp.h" +#include + +// Structure for representing elements in GF(p^2) +typedef struct fp2_t +{ + fp_t re, im; +} fp2_t; + +static inline void +fp2_set_small(fp2_t *x, const uint32_t val) +{ + fp_set_small(&(x->re), val); + fp_set_zero(&(x->im)); +} + +static inline void +fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n) +{ + fp_mul_small(&x->re, &y->re, n); + fp_mul_small(&x->im, &y->im, n); +} + +static inline void +fp2_set_zero(fp2_t *x) +{ + fp_set_zero(&(x->re)); + fp_set_zero(&(x->im)); +} + +static inline void +fp2_set_one(fp2_t *x) +{ + fp_set_one(&(x->re)); + fp_set_zero(&(x->im)); +} + +static inline uint32_t +fp2_is_equal(const fp2_t *a, const fp2_t *b) +{ // Compare two GF(p^2) elements in constant time + // Returns 1 (true) if a=b, 0 (false) otherwise + + return fp_is_equal(&(a->re), &(b->re)) & fp_is_equal(&(a->im), &(b->im)); +} + +static inline uint32_t +fp2_is_zero(const fp2_t *a) +{ // Is a GF(p^2) element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + + return fp_is_zero(&(a->re)) & fp_is_zero(&(a->im)); +} + +static inline uint32_t +fp2_is_one(const fp2_t *a) +{ // Is a GF(p^2) element one? + // Returns 1 (true) if a=0, 0 (false) otherwise + return fp_is_equal(&(a->re), &ONE) & fp_is_zero(&(a->im)); +} + +static inline void +fp2_half(fp2_t *x, const fp2_t *y) +{ + fp_half(&(x->re), &(y->re)); + fp_half(&(x->im), &(y->im)); +} + +static inline void +fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_add(&(x->re), &(y->re), &(z->re)); + fp_add(&(x->im), &(y->im), &(z->im)); +} + +static inline void +fp2_add_one(fp2_t *x, const fp2_t *y) +{ + fp_add(&x->re, &y->re, &ONE); + fp_copy(&x->im, &y->im); +} + +static inline void +fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_sub(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &(y->im), &(z->im)); +} + +static inline void +fp2_neg(fp2_t *x, const fp2_t *y) +{ + fp_neg(&(x->re), &(y->re)); + fp_neg(&(x->im), &(y->im)); +} + +#ifndef NO_FP2X_MUL +static inline void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t0, t1; + + fp_add(&t0, &(y->re), &(y->im)); + fp_add(&t1, &(z->re), &(z->im)); + fp_mul(&t0, &t0, &t1); + fp_mul(&t1, &(y->im), &(z->im)); + fp_mul(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &t0, &t1); + fp_sub(&(x->im), &(x->im), &(x->re)); + fp_sub(&(x->re), &(x->re), &t1); +} +#endif + +#ifndef NO_FP2X_SQR +static inline void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp_t sum, diff; + + fp_add(&sum, &(y->re), &(y->im)); + fp_sub(&diff, &(y->re), &(y->im)); + fp_mul(&(x->im), &(y->re), &(y->im)); + fp_add(&(x->im), &(x->im), &(x->im)); + fp_mul(&(x->re), &sum, &diff); +} +#endif + +static inline void +fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl) +{ + fp_select(&(d->re), &(a0->re), &(a1->re), ctl); + fp_select(&(d->im), &(a0->im), &(a1->im), ctl); +} + +static inline void +fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl) +{ + fp_cswap(&(a->re), &(b->re), ctl); + fp_cswap(&(a->im), &(b->im), ctl); +} + +static inline void +fp2_copy(fp2_t *x, const fp2_t *y) +{ + *x = *y; +} + +// New functions +void fp2_encode(void *dst, const fp2_t *a); +uint32_t fp2_decode(fp2_t *d, const void *src); +void fp2_inv(fp2_t *x); +uint32_t fp2_is_square(const fp2_t *x); +void fp2_sqrt(fp2_t *x); +uint32_t fp2_sqrt_verify(fp2_t *a); +void fp2_batched_inv(fp2_t *x, int len); +void fp2_pow_vartime(fp2_t *out, const fp2_t *x, const uint64_t *exp, const int size); +void fp2_print(const char *name, const fp2_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp_asm.S b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp_asm.S new file mode 100755 index 0000000000..45b12dcf5f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp_asm.S @@ -0,0 +1,825 @@ +#include +.intel_syntax noprefix + +.set pbytes,32 +.set plimbs,4 + +#ifdef __APPLE__ +.section __TEXT,__const +#else +.section .rodata +#endif +p_plus_1: .quad 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x4100000000000000 + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",@progbits +#endif + +#include + +.text +.p2align 4,,15 + +.global fp_add +fp_add: + push r12 + push r13 + xor rax, rax + mov r8, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + mov r12, [rsi+32] + mov r13, [rsi+40] + add r8, [rdx] + adc r9, [rdx+8] + adc r10, [rdx+16] + adc r11, [rdx+24] + adc r12, [rdx+32] + adc r13, [rdx+40] + mov rax, r13 + sar rax, 63 + mov rdx, [rip+p+40] + and rdx, rax + sub r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rax + sbb r13, rdx + + mov rax, r13 + sar rax, 63 + mov rdx, [rip+p+40] + and rdx, rax + sub r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rax + sbb r13, rdx + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + mov [rdi+32], r12 + mov [rdi+40], r13 + pop r13 + pop r12 + ret + +.global fp_sub +fp_sub: + push r12 + push r13 + xor rax, rax + mov r8, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + mov r12, [rsi+32] + mov r13, [rsi+40] + sub r8, [rdx] + sbb r9, [rdx+8] + sbb r10, [rdx+16] + sbb r11, [rdx+24] + sbb r12, [rdx+32] + sbb r13, [rdx+40] + sbb rax, 0 + + mov rdx, [rip+p+40] + and rdx, rax + add r8, rax + adc r9, rax + adc r10, rax + adc r11, rax + adc r12, rax + adc r13, rdx + + mov rax, r13 + sar rax, 63 + mov rdx, [rip+p+40] + and rdx, rax + add r8, rax + adc r9, rax + adc r10, rax + adc r11, rax + adc r12, rax + adc r13, rdx + + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + mov [rdi+32], r12 + mov [rdi+40], r13 + pop r13 + pop r12 + ret + +///////////////////////////////////////////////////////////////// MACROS +// z = a x bi + z +// Inputs: base memory pointer M1 (a), +// bi pre-stored in rdx, +// accumulator z in [Z0:Z6] +// Output: [Z0:Z6] +// Temps: regs T0:T1 +///////////////////////////////////////////////////////////////// +.macro MULADD64x384 M1, Z0, Z1, Z2, Z3, Z4, Z5, Z6, T0, T1, C + mulx \T0, \T1, \M1 // A0*B0 + xor \C, \C + adox \Z0, \T1 + adox \Z1, \T0 + mulx \T0, \T1, 8\M1 // A0*B1 + adcx \Z1, \T1 + adox \Z2, \T0 + mulx \T0, \T1, 16\M1 // A0*B2 + adcx \Z2, \T1 + adox \Z3, \T0 + mulx \T0, \T1, 24\M1 // A0*B3 + adcx \Z3, \T1 + adox \Z4, \T0 + mulx \T0, \T1, 32\M1 // A0*B4 + adcx \Z4, \T1 + adox \Z5, \T0 + mulx \T0, \T1, 40\M1 // A0*B5 + adcx \Z5, \T1 + adox \Z6, \T0 + adc \Z6, 0 +.endm + +.macro MULADD64x64 M1, Z0, Z1, Z2, Z3, Z4, Z5, T0, T1 + mulx \T0, \T1, \M1 // A0*B0 + xor rax, rax + adox \Z4, \T1 + adox \Z5, \T0 +.endm + +//*********************************************************************** +// Multiplication in GF(p^2), non-complex part +// Operation: c [rdi] = a0 x b0 - a1 x b1 +// Inputs: a = [a1, a0] stored in [rsi] +// b = [b1, b0] stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_mul_c0 +fp2_mul_c0: + push r12 + push r13 + push r14 + push r15 + push rbx + mov rcx, rdx + sub rsp, 96 + + // [rdi0:5] <- 2p - b1 + mov r8, [rip+p2] + mov r9, [rip+p2+8] + mov r10, r9 + mov r11, r9 + mov r12, r9 + mov r13, [rip+p2+40] + mov rax, [rcx+48] + mov rdx, [rcx+56] + sub r8, rax + sbb r9, rdx + mov rax, [rcx+64] + mov rdx, [rcx+72] + sbb r10, rax + sbb r11, rdx + mov rax, [rcx+80] + mov rdx, [rcx+88] + sbb r12, rax + sbb r13, rdx + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + mov [rdi+32], r12 + mov [rdi+40], r13 + + // Correcting a to [0,p) + xor rax, rax + mov r8, [rsi+48] + mov r9, [rsi+56] + mov r10, [rsi+64] + mov r11, [rsi+72] + mov r12, [rsi+80] + mov r13, [rsi+88] + mov rbx, [rip+p] + mov rdx, [rip+p+40] + sub r8, rbx + sbb r9, rbx + sbb r10, rbx + sbb r11, rbx + sbb r12, rbx + sbb r13, rdx + sbb rax, 0 + and rdx, rax + add r8, rax + adc r9, rax + adc r10, rax + adc r11, rax + adc r12, rax + adc r13, rdx + mov [rsp+48], r8 + mov [rsp+56], r9 + mov [rsp+64], r10 + mov [rsp+72], r11 + mov [rsp+80], r12 + mov [rsp+88], r13 + + xor rax, rax + mov r8, [rsi] + mov r10, [rsi+8] + mov r12, [rsi+16] + mov r13, [rsi+24] + mov r14, [rsi+32] + mov r15, [rsi+40] + mov rdx, [rip+p+40] + sub r8, rbx + sbb r10, rbx + sbb r12, rbx + sbb r13, rbx + sbb r14, rbx + sbb r15, rdx + sbb rax, 0 + and rdx, rax + add r8, rax + adc r10, rax + adc r12, rax + adc r13, rax + adc r14, rax + adc r15, rdx + mov [rsp], r8 + mov [rsp+8], r10 + mov [rsp+16], r12 + mov [rsp+24], r13 + mov [rsp+32], r14 + mov [rsp+40], r15 + + // [r8:r14] <- z = a0 x b00 - a1 x b10 + mov rdx, [rcx] + mulx r9, r8, r8 + xor rax, rax + mulx r10, r11, r10 + adox r9, r11 + mulx r11, r12, r12 + adox r10, r12 + mulx r12, r13, r13 + adox r11, r13 + mulx r13, r14, r14 + adox r12, r14 + mulx r14, r15, r15 + adox r13, r15 + adox r14, rax + + mov rdx, [rdi] + MULADD64x384 [rsp+48], r8, r9, r10, r11, r12, r13, r14, r15, rbx, rax + // [r9:r14] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r8 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r9, r10, r11, r12, r13, r14, r15, rbx + + // [r9:r14, r8] <- z = a0 x b01 - a1 x b11 + z + mov rdx, [rcx+8] + MULADD64x384 [rsp], r9, r10, r11, r12, r13, r14, r8, r15, rbx, r8 + mov rdx, [rdi+8] + MULADD64x384 [rsp+48], r9, r10, r11, r12, r13, r14, r8, r15, rbx, rax + // [r10:r14, r8] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r9 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r10, r11, r12, r13, r14, r8, r15, rbx + + // [r10:r14, r8:r9] <- z = a0 x b02 - a1 x b12 + z + mov rdx, [rcx+16] + MULADD64x384 [rsp], r10, r11, r12, r13, r14, r8, r9, r15, rbx, r9 + mov rdx, [rdi+16] + MULADD64x384 [rsp+48], r10, r11, r12, r13, r14, r8, r9, r15, rbx, rax + // [r11:r14, r8:r9] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r10 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r11, r12, r13, r14, r8, r9, r15, rbx + + // [r11:r14, r8:r10] <- z = a0 x b03 - a1 x b13 + z + mov rdx, [rcx+24] + MULADD64x384 [rsp], r11, r12, r13, r14, r8, r9, r10, r15, rbx, r10 + mov rdx, [rdi+24] + MULADD64x384 [rsp+48], r11, r12, r13, r14, r8, r9, r10, r15, rbx, rax + // [r14, r8:r10] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r11 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r12, r13, r14, r8, r9, r10, r15, rbx + + // [r12:r14, r8:r11] <- z = a0 x b04 - a1 x b14 + z + mov rdx, [rcx+32] + MULADD64x384 [rsp], r12, r13, r14, r8, r9, r10, r11, r15, rbx, r11 + mov rdx, [rdi+32] + MULADD64x384 [rsp+48], r12, r13, r14, r8, r9, r10, r11, r15, rbx, rax + // [r14, r8:r11] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r12 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r13, r14, r8, r9, r10, r11, r15, rbx + + // [r13:r14, r8:r12] <- z = a0 x b05 - a1 x b15 + z + mov rdx, [rcx+40] + MULADD64x384 [rsp], r13, r14, r8, r9, r10, r11, r12, r15, rbx, r12 + mov rdx, [rdi+40] + MULADD64x384 [rsp+48], r13, r14, r8, r9, r10, r11, r12, r15, rbx, rax + // [r14, r8:r12] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r13 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r14, r8, r9, r10, r11, r12, r15, rbx + + // Final correction + mov rax, r12 + sar rax, 63 + mov rdx, [rip+p+40] + and rdx, rax + sub r14, rax + sbb r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rdx + + mov [rdi], r14 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + add rsp, 96 + pop rbx + pop r15 + pop r14 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Multiplication in GF(p^2), complex part +// Operation: c [rdi] = a0 x b1 + a1 x b0 +// Inputs: a = [a1, a0] stored in [rsi] +// b = [b1, b0] stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_mul_c1 +fp2_mul_c1: + push r12 + push r13 + push r14 + push r15 + push rbx + mov rcx, rdx + sub rsp, 96 + + // Correcting a to [0,p) + xor rax, rax + mov r8, [rsi+48] + mov r9, [rsi+56] + mov r10, [rsi+64] + mov r11, [rsi+72] + mov r12, [rsi+80] + mov r13, [rsi+88] + mov rbx, [rip+p] + mov rdx, [rip+p+40] + sub r8, rbx + sbb r9, rbx + sbb r10, rbx + sbb r11, rbx + sbb r12, rbx + sbb r13, rdx + sbb rax, 0 + and rdx, rax + add r8, rax + adc r9, rax + adc r10, rax + adc r11, rax + adc r12, rax + adc r13, rdx + mov [rsp+48], r8 + mov [rsp+56], r9 + mov [rsp+64], r10 + mov [rsp+72], r11 + mov [rsp+80], r12 + mov [rsp+88], r13 + + xor rax, rax + mov r8, [rsi] + mov r10, [rsi+8] + mov r12, [rsi+16] + mov r13, [rsi+24] + mov r14, [rsi+32] + mov r15, [rsi+40] + mov rdx, [rip+p+40] + sub r8, rbx + sbb r10, rbx + sbb r12, rbx + sbb r13, rbx + sbb r14, rbx + sbb r15, rdx + sbb rax, 0 + and rdx, rax + add r8, rax + adc r10, rax + adc r12, rax + adc r13, rax + adc r14, rax + adc r15, rdx + mov [rsp], r8 + mov [rsp+8], r10 + mov [rsp+16], r12 + mov [rsp+24], r13 + mov [rsp+32], r14 + mov [rsp+40], r15 + + // [r8:r14] <- z = a0 x b10 + a1 x b00 + mov rdx, [rcx+48] + mulx r9, r8, r8 + xor rax, rax + mulx r10, r11, r10 + adox r9, r11 + mulx r11, r12, r12 + adox r10, r12 + mulx r12, r13, r13 + adox r11, r13 + mulx r13, r14, r14 + adox r12, r14 + mulx r14, r15, r15 + adox r13, r15 + adox r14, rax + + mov rdx, [rcx] + MULADD64x384 [rsp+48], r8, r9, r10, r11, r12, r13, r14, r15, rbx, rax + // [r9:r14] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r8 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r9, r10, r11, r12, r13, r14, r15, rbx + + // [r9:r14, r8] <- z = a0 x b01 - a1 x b11 + z + mov rdx, [rcx+56] + MULADD64x384 [rsi], r9, r10, r11, r12, r13, r14, r8, r15, rbx, r8 + mov rdx, [rcx+8] + MULADD64x384 [rsp+48], r9, r10, r11, r12, r13, r14, r8, r15, rbx, rax + // [r10:r14, r8] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r9 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r10, r11, r12, r13, r14, r8, r15, rbx + + // [r10:r14, r8:r9] <- z = a0 x b02 - a1 x b12 + z + mov rdx, [rcx+64] + MULADD64x384 [rsp], r10, r11, r12, r13, r14, r8, r9, r15, rbx, r9 + mov rdx, [rcx+16] + MULADD64x384 [rsp+48], r10, r11, r12, r13, r14, r8, r9, r15, rbx, rax + // [r11:r14, r8:r9] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r10 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r11, r12, r13, r14, r8, r9, r15, rbx + + // [r11:r14, r8:r10] <- z = a0 x b03 - a1 x b13 + z + mov rdx, [rcx+72] + MULADD64x384 [rsp], r11, r12, r13, r14, r8, r9, r10, r15, rbx, r10 + mov rdx, [rcx+24] + MULADD64x384 [rsp+48], r11, r12, r13, r14, r8, r9, r10, r15, rbx, rax + // [r14, r8:r10] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r11 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r12, r13, r14, r8, r9, r10, r15, rbx + + // [r12:r14, r8:r11] <- z = a0 x b04 - a1 x b14 + z + mov rdx, [rcx+80] + MULADD64x384 [rsp], r12, r13, r14, r8, r9, r10, r11, r15, rbx, r11 + mov rdx, [rcx+32] + MULADD64x384 [rsp+48], r12, r13, r14, r8, r9, r10, r11, r15, rbx, rax + // [r14, r8:r11] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r12 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r13, r14, r8, r9, r10, r11, r15, rbx + + // [r13:r14, r8:r12] <- z = a0 x b05 - a1 x b15 + z + mov rdx, [rcx+88] + MULADD64x384 [rsp], r13, r14, r8, r9, r10, r11, r12, r15, rbx, r12 + mov rdx, [rcx+40] + MULADD64x384 [rsp+48], r13, r14, r8, r9, r10, r11, r12, r15, rbx, rax + // [r14, r8:r12] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r13 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], r14, r8, r9, r10, r11, r12, r15, rbx + + // Final correction + mov rax, r12 + sar rax, 63 + mov rdx, [rip+p+40] + and rdx, rax + sub r14, rax + sbb r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rdx + + mov [rdi], r14 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + add rsp, 96 + pop rbx + pop r15 + pop r14 + pop r13 + pop r12 + ret + +///////////////////////////////////////////////////////////////// MACRO +// z = a x b (mod p) +// Inputs: base memory pointers M0 (a), M1 (b) +// bi pre-stored in rdx, +// accumulator z in [Z0:Z6], pre-stores a0 x b +// Output: [Z0:Z6] +// Temps: regs T0:T1 +///////////////////////////////////////////////////////////////// +.macro FPMUL384x384 M0, M1, Z0, Z1, Z2, Z3, Z4, Z5, Z6, T0, T1 + // [Z1:Z6] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z0 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], \Z1, \Z2, \Z3, \Z4, \Z5, \Z6, \T0, \T1 + + // [Z1:Z6, Z0] <- z = a01 x a1 + z + mov rdx, 8\M0 + MULADD64x384 \M1, \Z1, \Z2, \Z3, \Z4, \Z5, \Z6, \Z0, \T0, \T1, \Z0 + // [Z2:Z6, Z0] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z1 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], \Z2, \Z3, \Z4, \Z5, \Z6, \Z0, \T0, \T1 + + // [Z2:Z6, Z0:Z1] <- z = a02 x a1 + z + mov rdx, 16\M0 + MULADD64x384 \M1, \Z2, \Z3, \Z4, \Z5, \Z6, \Z0, \Z1, \T0, \T1, \Z1 + // [Z3:Z6, Z0:Z1] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z2 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], \Z3, \Z4, \Z5, \Z6, \Z0, \Z1, \T0, \T1 + + // [Z3:Z6, Z0:Z2] <- z = a03 x a1 + z + mov rdx, 24\M0 + MULADD64x384 \M1, \Z3, \Z4, \Z5, \Z6, \Z0, \Z1, \Z2, \T0, \T1, \Z2 + // [Z4:Z6, Z0:Z2] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z3 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], \Z4, \Z5, \Z6, \Z0, \Z1, \Z2, \T0, \T1 + + // [Z4:Z6, Z0:Z3] <- z = a04 x a1 + z + mov rdx, 32\M0 + MULADD64x384 \M1, \Z4, \Z5, \Z6, \Z0, \Z1, \Z2, \Z3, \T0, \T1, \Z3 + // [Z5:Z6, Z0:Z3] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z4 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], \Z5, \Z6, \Z0, \Z1, \Z2, \Z3, \T0, \T1 + + // [Z5:Z6, Z0:Z4] <- z = a05 x a1 + z + mov rdx, 40\M0 + MULADD64x384 \M1, \Z5, \Z6, \Z0, \Z1, \Z2, \Z3, \Z4, \T0, \T1, \Z4 + // [Z6, Z0:Z4] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z5 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+40], \Z6, \Z0, \Z1, \Z2, \Z3, \Z4, \T0, \T1 +.endm + +//*********************************************************************** +// Squaring in GF(p^2), non-complex part +// Operation: c [rdi] = (a0+a1) x (a0-a1) +// Inputs: a = [a1, a0] stored in [rsi] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_sq_c0 +fp2_sq_c0: + push r12 + push r13 + push r14 + push r15 + + // a0 + a1 + mov rdx, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + mov r12, [rsi+32] + mov r13, [rsi+40] + add rdx, [rsi+48] + adc r9, [rsi+56] + adc r10, [rsi+64] + adc r11, [rsi+72] + adc r12, [rsi+80] + adc r13, [rsi+88] + mov [rdi], rdx + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + mov [rdi+32], r12 + mov [rdi+40], r13 + + // a0 - a1 + mov r8, [rsi] + mov r10, [rsi+8] + mov r12, [rsi+16] + mov r13, [rsi+24] + mov r14, [rsi+32] + mov r15, [rsi+40] + xor rax, rax + sub r8, [rsi+48] + sbb r10, [rsi+56] + sbb r12, [rsi+64] + sbb r13, [rsi+72] + sbb r14, [rsi+80] + sbb r15, [rsi+88] + sbb rax, 0 + + mov rcx, [rip+p+40] + and rcx, rax + add r8, rax + adc r10, rax + adc r12, rax + adc r13, rax + adc r14, rax + adc r15, rcx + + mov rax, r15 + sar rax, 63 + mov rcx, [rip+p+40] + and rcx, rax + add r8, rax + adc r10, rax + adc r12, rax + adc r13, rax + adc r14, rax + adc r15, rcx + + mov [rdi+48], r8 + mov [rdi+56], r10 + mov [rdi+64], r12 + mov [rdi+72], r13 + mov [rdi+80], r14 + mov [rdi+88], r15 + + // [r8:r14] <- z = a00 x a1 + mulx r9, r8, r8 + xor rax, rax + mulx r10, r11, r10 + adox r9, r11 + mulx r11, r12, r12 + adox r10, r12 + mulx r12, r13, r13 + adox r11, r13 + mulx r13, r14, r14 + adox r12, r14 + mulx r14, r15, r15 + adox r13, r15 + adox r14, rax + + FPMUL384x384 [rdi], [rdi+48], r8, r9, r10, r11, r12, r13, r14, r15, rcx + + // Final correction + mov rax, r12 + sar rax, 63 + mov rdx, [rip+p+40] + and rdx, rax + sub r14, rax + sbb r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rdx + + mov [rdi], r14 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + pop r15 + pop r14 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Squaring in GF(p^2), complex part +// Operation: c [rdi] = 2a0 x a1 +// Inputs: a = [a1, a0] stored in [reg_p1] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_sq_c1 +fp2_sq_c1: + push r12 + push r13 + push r14 + push r15 + + mov rdx, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + mov r12, [rsi+32] + mov r13, [rsi+40] + add rdx, rdx + adc r9, r9 + adc r10, r10 + adc r11, r11 + adc r12, r12 + adc r13, r13 + sub rsp, 48 + mov [rsp+8], r9 + mov [rsp+16], r10 + mov [rsp+24], r11 + mov [rsp+32], r12 + mov [rsp+40], r13 + + // [r8:r12] <- z = a00 x a1 + mulx r9, r8, [rsi+48] + xor rax, rax + mulx r10, r11, [rsi+56] + adox r9, r11 + mulx r11, r12, [rsi+64] + adox r10, r12 + mulx r12, r13, [rsi+72] + adox r11, r13 + mulx r13, r14, [rsi+80] + adox r12, r14 + mulx r14, r15, [rsi+88] + adox r13, r15 + adox r14, rax + + FPMUL384x384 [rsp], [rsi+48], r8, r9, r10, r11, r12, r13, r14, r15, rcx + add rsp, 48 + + // Final correction + mov rax, r12 + sar rax, 63 + mov rdx, [rip+p+40] + and rdx, rax + sub r14, rax + sbb r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rdx + + mov [rdi], r14 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + pop r15 + pop r14 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Field multiplication in GF(p) +// Operation: c = a x b mod p +// Inputs: a stored in [rsi], b stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp_mul +fp_mul: + push r12 + push r13 + push r14 + push r15 + push rbx + mov rcx, rdx + + // [r8:r14] <- z = a x b0 + mov rdx, [rcx] + mulx r9, r8, [rsi] + xor rax, rax + mulx r10, r11, [rsi+8] + adox r9, r11 + mulx r11, r12, [rsi+16] + adox r10, r12 + mulx r12, r13, [rsi+24] + adox r11, r13 + mulx r13, r14, [rsi+32] + adox r12, r14 + mulx r14, r15, [rsi+40] + adox r13, r15 + adox r14, rax + + FPMUL384x384 [rcx], [rsi], r8, r9, r10, r11, r12, r13, r14, r15, rbx + + // Final correction + mov rax, r12 + sar rax, 63 + mov rdx, [rip+p+40] + and rdx, rax + sub r14, rax + sbb r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rdx + + mov [rdi], r14 + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + pop rbx + pop r15 + pop r14 + pop r13 + pop r12 + ret + +.global fp_sqr +fp_sqr: + mov rdx, rsi + jmp fp_mul diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp_constants.h new file mode 100644 index 0000000000..063579ac33 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp_constants.h @@ -0,0 +1,17 @@ +#if RADIX == 32 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 12 +#else +#define NWORDS_FIELD 14 +#endif +#define NWORDS_ORDER 12 +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 6 +#else +#define NWORDS_FIELD 7 +#endif +#define NWORDS_ORDER 6 +#endif +#define BITS 384 +#define LOG2P 9 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.c new file mode 100644 index 0000000000..00875b1aa5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.c @@ -0,0 +1,792 @@ +#include "gf65376.h" + +// see gf65376.h +const gf65376 ZERO = { 0, 0, 0, 0, 0, 0 }; + +// see gf65376.h +const gf65376 ONE = { 0x0000000000000003, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x3D00000000000000 }; + +// see gf65376.h +const gf65376 gf65376_MINUS_ONE = { 0xFFFFFFFFFFFFFFFC, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x03FFFFFFFFFFFFFF }; + +// Montgomery representation of 2^256. +static const gf65376 R2 = { 0x3F03F03F03F03F13, 0x03F03F03F03F03F0, 0xF03F03F03F03F03F, + 0x3F03F03F03F03F03, 0x03F03F03F03F03F0, 0x1D3F03F03F03F03F }; + +// The modulus itself (this is also a valid representation of zero). +static const gf65376 MODULUS = { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40FFFFFFFFFFFFFF }; + +// 1/2^380 (in Montgomery representation). +static const gf65376 INVT380 = { 0x0000000000000010, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; + +static const gf65376 PM1O3 = { 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, + 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0x15aaaaaaaaaaaaaa }; + +// Expand the most significant bit of x into a full-width 64-bit word +// (0x0000000000000000 or 0xFFFFFFFFFFFFFFFF). +static inline uint64_t +sgnw(uint64_t x) +{ + return (uint64_t)(*(int64_t *)&x >> 63); +} + +// d <- u*f + v*g (in the field) +// Coefficients f and g are provided as unsigned integers, but they +// really are signed values which must be less than 2^62 (in absolute value). +static void +gf65376_lin(gf65376 *d, const gf65376 *u, const gf65376 *v, uint64_t f, uint64_t g) +{ + // f <- abs(f), keeping the sign in sf, and negating u accordingly + uint64_t sf = sgnw(f); + f = (f ^ sf) - sf; + gf65376 tu; + gf65376_neg(&tu, u); + gf65376_select(&tu, u, &tu, (uint32_t)sf); + + // g <- abs(g), keeping the sign in sg, and negating v accordingly + uint64_t sg = sgnw(g); + g = (g ^ sg) - sg; + gf65376 tv; + gf65376_neg(&tv, v); + gf65376_select(&tv, v, &tv, (uint32_t)sg); + + // Linear combination over plain integers. + uint64_t d0, d1, d2, d3, d4, d5, t; + inner_gf65376_umul_x2(d0, t, tu.v0, f, tv.v0, g); + inner_gf65376_umul_x2_add(d1, t, tu.v1, f, tv.v1, g, t); + inner_gf65376_umul_x2_add(d2, t, tu.v2, f, tv.v2, g, t); + inner_gf65376_umul_x2_add(d3, t, tu.v3, f, tv.v3, g, t); + inner_gf65376_umul_x2_add(d4, t, tu.v4, f, tv.v4, g, t); + inner_gf65376_umul_x2_add(d5, t, tu.v5, f, tv.v5, g, t); + + // Reduction: split into low part (376 bits) and high part + // (71 bits, since t can be up to 63 bits). If the high + // part is h, then: + // h*2^376 = (h mod 65)*2^376 + floor(h/65) mod q + uint64_t h0 = (d5 >> 56) | (t << 8); + uint64_t h1 = t >> 56; + d5 &= 0x00FFFFFFFFFFFFFF; + + // NOTE: 0xFC0FC0FC0FC0FC1 = 65^-1 % 2^64 + // NOTE: 0xFC1 = 65^-1 % 2^12 + uint64_t z0, z1, quo0, rem0, quo1, rem1; + inner_gf65376_umul(z0, z1, h0, 0xFC0FC0FC0FC0FC1); + (void)z0; + quo0 = z1 >> 2; + rem0 = h0 - (65 * quo0); + quo1 = (h1 * 0xFC1) >> 18; // Only keep bottom two bits + rem1 = h1 - (65 * quo1); + + // h = rem0 + 65*quo0 + (rem1 + 65*quo1)*2^64 + // = rem0 + rem1 + 65*(quo0 + quo1*2^64 + rem1*((2^64 - 1)/65)) + // We add rem0 and rem1 modulo 65, with an extra carry that + // goes into the folded part (multiple of 65). + uint64_t e, f0, f1; + unsigned char cc; + cc = inner_gf65376_adc(0, rem0 + 0xFFFFFFFFFFFFFFBE, rem1, &e); + cc = inner_gf65376_adc(cc, quo0, rem1 * 0x3F03F03F03F03F0, &f0); + cc = inner_gf65376_adc(cc, quo1, 0, &f1); + assert(cc == 0); + e -= 0xFFFFFFFFFFFFFFBE; + + // Now we only have to add e*2^384 + f0:f1 to the low part. + cc = inner_gf65376_adc(0, d0, f0, &d0); + cc = inner_gf65376_adc(cc, d1, f1, &d1); + cc = inner_gf65376_adc(cc, d2, 0, &d2); + cc = inner_gf65376_adc(cc, d3, 0, &d3); + cc = inner_gf65376_adc(cc, d4, 0, &d4); + (void)inner_gf65376_adc(cc, d5, e << 56, &d5); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; +} + +// d <- abs(floor((a*f + b*g) / 2^31)) +// Coefficients f and g are provided as unsigned integer, but they really +// are signed values, which MUST be at most 2^31 in absolute value. +// The computation is performed over the integers, not modulo q. The low +// 31 bits are dropped (in practice, callers provided appropriate coefficients +// f and g such that a*f + b*g is a multiple of 2^31. +// +// If a*f + b*g is negative, then the absolute value is computed, and the +// function returns 0xFFFFFFFFFFFFFFFF; otherwise, the function returns +// 0x0000000000000000. +static uint64_t +lindiv31abs(gf65376 *d, const gf65376 *a, const gf65376 *b, uint64_t f, uint64_t g) +{ + // f <- abs(f), keeping the sign in sf + uint64_t sf = sgnw(f); + f = (f ^ sf) - sf; + + // g <- abs(g), keeping the sign in sg + uint64_t sg = sgnw(g); + g = (g ^ sg) - sg; + + // Apply the signs of f and g to the source operands. + uint64_t a0, a1, a2, a3, a4, a5, a6; + uint64_t b0, b1, b2, b3, b4, b5, b6; + unsigned char cc; + + cc = inner_gf65376_sbb(0, a->v0 ^ sf, sf, &a0); + cc = inner_gf65376_sbb(cc, a->v1 ^ sf, sf, &a1); + cc = inner_gf65376_sbb(cc, a->v2 ^ sf, sf, &a2); + cc = inner_gf65376_sbb(cc, a->v3 ^ sf, sf, &a3); + cc = inner_gf65376_sbb(cc, a->v4 ^ sf, sf, &a4); + cc = inner_gf65376_sbb(cc, a->v5 ^ sf, sf, &a5); + (void)inner_gf65376_sbb(cc, 0, 0, &a6); + + cc = inner_gf65376_sbb(0, b->v0 ^ sg, sg, &b0); + cc = inner_gf65376_sbb(cc, b->v1 ^ sg, sg, &b1); + cc = inner_gf65376_sbb(cc, b->v2 ^ sg, sg, &b2); + cc = inner_gf65376_sbb(cc, b->v3 ^ sg, sg, &b3); + cc = inner_gf65376_sbb(cc, b->v4 ^ sg, sg, &b4); + cc = inner_gf65376_sbb(cc, b->v5 ^ sg, sg, &b5); + (void)inner_gf65376_sbb(cc, 0, 0, &b6); + + // Compute a*f + b*g into d0:d1:d2:d3:d4. Since f and g are at + // most 2^31, we can add two 128-bit products with no overflow. + // Note: a4 and b4 are both in {0, -1}. + uint64_t d0, d1, d2, d3, d4, d5, d6, t; + inner_gf65376_umul_x2(d0, t, a0, f, b0, g); + inner_gf65376_umul_x2_add(d1, t, a1, f, b1, g, t); + inner_gf65376_umul_x2_add(d2, t, a2, f, b2, g, t); + inner_gf65376_umul_x2_add(d3, t, a3, f, b3, g, t); + inner_gf65376_umul_x2_add(d4, t, a4, f, b4, g, t); + inner_gf65376_umul_x2_add(d5, t, a5, f, b5, g, t); + d6 = t - (a6 & f) - (b6 & g); + + // Right-shift the value by 31 bits. + d0 = (d0 >> 31) | (d1 << 33); + d1 = (d1 >> 31) | (d2 << 33); + d2 = (d2 >> 31) | (d3 << 33); + d3 = (d3 >> 31) | (d4 << 33); + d4 = (d4 >> 31) | (d5 << 33); + d5 = (d5 >> 31) | (d6 << 33); + + // If the result is negative, negate it. + t = sgnw(d6); + cc = inner_gf65376_sbb(0, d0 ^ t, t, &d0); + cc = inner_gf65376_sbb(cc, d1 ^ t, t, &d1); + cc = inner_gf65376_sbb(cc, d2 ^ t, t, &d2); + cc = inner_gf65376_sbb(cc, d3 ^ t, t, &d3); + cc = inner_gf65376_sbb(cc, d4 ^ t, t, &d4); + (void)inner_gf65376_sbb(cc, d5 ^ t, t, &d5); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + return t; +} + +// lzcnt(x) returns the number of leading bits of value 0 in x. It supports +// x == 0 (in which case the function returns 64). +#if defined __LZCNT__ +static inline uint64_t +lzcnt(uint64_t x) +{ + return _lzcnt_u64(x); +} +#else +static inline uint64_t +lzcnt(uint64_t x) +{ + uint64_t m, s; + m = sgnw((x >> 32) - 1); + s = m & 32; + x = (x >> 32) ^ (m & (x ^ (x >> 32))); + m = sgnw((x >> 16) - 1); + s |= m & 16; + x = (x >> 16) ^ (m & (x ^ (x >> 16))); + m = sgnw((x >> 8) - 1); + s |= m & 8; + x = (x >> 8) ^ (m & (x ^ (x >> 8))); + m = sgnw((x >> 4) - 1); + s |= m & 4; + x = (x >> 4) ^ (m & (x ^ (x >> 4))); + m = sgnw((x >> 2) - 1); + s |= m & 2; + x = (x >> 2) ^ (m & (x ^ (x >> 2))); + + // At this point, x fits on 2 bits. Count of extra zeros: + // x = 0 -> 2 + // x = 1 -> 1 + // x = 2 -> 0 + // x = 3 -> 0 + s += (2 - x) & ((x - 3) >> 2); + return s; +} +#endif + +// see gf65376.h +uint32_t +gf65376_div(gf65376 *d, const gf65376 *x, const gf65376 *y) +{ + // Extended binary GCD: + // + // a <- y + // b <- q (modulus) + // u <- x (self) + // v <- 0 + // + // Value a is normalized (in the 0..q-1 range). Values a and b are + // then considered as (signed) integers. Values u and v are field + // elements. + // + // Invariants: + // a*x = y*u mod q + // b*x = y*v mod q + // b is always odd + // + // At each step: + // if a is even, then: + // a <- a/2, u <- u/2 mod q + // else: + // if a < b: + // (a, u, b, v) <- (b, v, a, u) + // a <- (a-b)/2, u <- (u-v)/2 mod q + // + // What we implement below is the optimized version of this + // algorithm, as described in https://eprint.iacr.org/2020/972 + + gf65376 a, b, u, v; + uint64_t xa, xb, f0, g0, f1, g1; + uint32_t r; + + r = ~gf65376_iszero(y); + inner_gf65376_normalize(&a, y); + b = MODULUS; + u = *x; + v = ZERO; + + // Generic loop does 23*31 = 713 inner iterations. + for (int i = 0; i < 23; i++) { + // Get approximations of a and b over 64 bits: + // - If len(a) <= 64 and len(b) <= 64, then we just use + // their values (low limbs). + // - Otherwise, with n = max(len(a), len(b)), we use: + // (a mod 2^31) + 2^31*floor(a / 2^(n - 33)) + // (b mod 2^31) + 2^31*floor(b / 2^(n - 33)) + uint64_t m5 = a.v5 | b.v5; + uint64_t m4 = a.v4 | b.v4; + uint64_t m3 = a.v3 | b.v3; + uint64_t m2 = a.v2 | b.v2; + uint64_t m1 = a.v1 | b.v1; + uint64_t tnz5 = sgnw(m5 | -m5); + uint64_t tnz4 = sgnw(m4 | -m4) & ~tnz5; + uint64_t tnz3 = sgnw(m3 | -m3) & ~tnz5 & ~tnz4; + uint64_t tnz2 = sgnw(m2 | -m2) & ~tnz5 & ~tnz4 & ~tnz3; + uint64_t tnz1 = sgnw(m1 | -m1) & ~tnz5 & ~tnz4 & ~tnz3 & ~tnz2; + uint64_t tnzm = (m5 & tnz5) | (m4 & tnz4) | (m3 & tnz3) | (m2 & tnz2) | (m1 & tnz1); + uint64_t tnza = (a.v5 & tnz5) | (a.v4 & tnz4) | (a.v3 & tnz3) | (a.v2 & tnz2) | (a.v1 & tnz1); + uint64_t tnzb = (b.v5 & tnz5) | (b.v4 & tnz4) | (b.v3 & tnz3) | (b.v2 & tnz2) | (b.v1 & tnz1); + uint64_t snza = (a.v4 & tnz5) | (a.v3 & tnz4) | (a.v2 & tnz3) | (a.v1 & tnz2) | (a.v0 & tnz1); + uint64_t snzb = (b.v4 & tnz5) | (b.v3 & tnz4) | (b.v2 & tnz3) | (b.v1 & tnz2) | (b.v0 & tnz1); + + // If both len(a) <= 64 and len(b) <= 64, then: + // tnzm = 0 + // tnza = 0, snza = 0, tnzb = 0, snzb = 0 + // Otherwise: + // tnzm != 0 + // tnza contains the top non-zero limb of a + // snza contains the limb right below tnza + // tnzb contains the top non-zero limb of a + // snzb contains the limb right below tnzb + // + // We count the number of leading zero bits in tnzm: + // - If s <= 31, then the top 31 bits can be extracted from + // tnza and tnzb alone. + // - If 32 <= s <= 63, then we need some bits from snza and + // snzb as well. + int64_t s = lzcnt(tnzm); + uint64_t sm = (uint64_t)((31 - s) >> 63); + tnza ^= sm & (tnza ^ ((tnza << 32) | (snza >> 32))); + tnzb ^= sm & (tnzb ^ ((tnzb << 32) | (snzb >> 32))); + s -= 32 & sm; + tnza <<= s; + tnzb <<= s; + + // At this point: + // - If len(a) <= 64 and len(b) <= 64, then: + // tnza = 0 + // tnzb = 0 + // tnz1 = tnz2 = tnz3 = tnz4 = tnz5 = 0 + // we want to use the entire low words of a and b + // - Otherwise, we want to use the top 33 bits of tnza and + // tnzb, and the low 31 bits of the low words of a and b. + uint64_t tzx = ~(tnz1 | tnz2 | tnz3 | tnz4 | tnz5); + tnza |= a.v0 & tzx; + tnzb |= b.v0 & tzx; + xa = (a.v0 & 0x7FFFFFFF) | (tnza & 0xFFFFFFFF80000000); + xb = (b.v0 & 0x7FFFFFFF) | (tnzb & 0xFFFFFFFF80000000); + + // Compute the 31 inner iterations on xa and xb. + uint64_t fg0 = (uint64_t)1; + uint64_t fg1 = (uint64_t)1 << 32; + for (int j = 0; j < 31; j++) { + uint64_t a_odd, swap, t0, t1, t2; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf65376_sbb(0, xa, xb, &t0); + (void)inner_gf65376_sbb(cc, 0, 0, &swap); + swap &= a_odd; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + xa >>= 1; + fg1 <<= 1; + } + fg0 += 0x7FFFFFFF7FFFFFFF; + fg1 += 0x7FFFFFFF7FFFFFFF; + f0 = (fg0 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0 >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1 >> 32) - (uint64_t)0x7FFFFFFF; + + // Propagate updates to a, b, u and v. + gf65376 na, nb, nu, nv; + uint64_t nega = lindiv31abs(&na, &a, &b, f0, g0); + uint64_t negb = lindiv31abs(&nb, &a, &b, f1, g1); + f0 = (f0 ^ nega) - nega; + g0 = (g0 ^ nega) - nega; + f1 = (f1 ^ negb) - negb; + g1 = (g1 ^ negb) - negb; + gf65376_lin(&nu, &u, &v, f0, g0); + gf65376_lin(&nv, &u, &v, f1, g1); + a = na; + b = nb; + u = nu; + v = nv; + } + + // If y is invertible, then the final GCD is 1, and + // len(a) + len(b) <= 53, so we can end the computation with + // the low words directly. We only need 51 iterations to reach + // the point where b = 1. + // + // If y is zero, then v is unchanged (hence zero) and none of + // the subsequent iterations will change it either, so we get + // 0 on output, which is what we want. + xa = a.v0; + xb = b.v0; + f0 = 1; + g0 = 0; + f1 = 0; + g1 = 1; + for (int j = 0; j < 51; j++) { + uint64_t a_odd, swap, t0, t1, t2, t3; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf65376_sbb(0, xa, xb, &t0); + (void)inner_gf65376_sbb(cc, 0, 0, &swap); + swap &= a_odd; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (f0 ^ f1); + f0 ^= t2; + f1 ^= t2; + t3 = swap & (g0 ^ g1); + g0 ^= t3; + g1 ^= t3; + xa -= a_odd & xb; + f0 -= a_odd & f1; + g0 -= a_odd & g1; + xa >>= 1; + f1 <<= 1; + g1 <<= 1; + } + gf65376_lin(d, &u, &v, f1, g1); + + // At the point: + // - Numerator and denominator were both in Montgomery representation, + // but the two factors R canceled each other. + // - We have injected 31*23+51 = 764 extra factors of 2, hence we + // must divide the result by 2^764. + // - However, we also want to obtain the result in Montgomery + // representation, i.e. multiply by 2^256. We thus want to + // divide the current result by 2^(764 - 384) = 2^380. + // - We do this division by using a Montgomery multiplication with + // the Montgomery representation of 1/2^380, i.e. the integer + // 2^384/2^380 = 16. + gf65376_mul(d, d, &INVT380); + return r; +} + +// see gf65376.h +uint32_t +gf65376_invert(gf65376 *d, const gf65376 *a) +{ + return gf65376_div(d, &ONE, a); +} + +// see gf65376.h +int32_t +gf65376_legendre(const gf65376 *x) +{ + // Same algorithm as the binary GCD in gf65376_div(), with + // a few differences: + // - We do not keep track of the Bézout coefficients u and v. + // - In each inner iteration we adjust the running symbol value, + // which uses the low 3 bits of the values. + // - Since we need two extra bits of look-ahead, we can only run + // 29 inner iterations, and then need an extra recomputation + // for the last 2. + + gf65376 a, b; + uint64_t xa, xb, f0, g0, f1, g1, ls; + + inner_gf65376_normalize(&a, x); + b = MODULUS; + ls = 0; // running symbol information in bit 1. + + // Outer loop + for (int i = 0; i < 23; i++) { + // Get approximations of a and b over 64 bits. + uint64_t m5 = a.v5 | b.v5; + uint64_t m4 = a.v4 | b.v4; + uint64_t m3 = a.v3 | b.v3; + uint64_t m2 = a.v2 | b.v2; + uint64_t m1 = a.v1 | b.v1; + uint64_t tnz5 = sgnw(m5 | -m5); + uint64_t tnz4 = sgnw(m4 | -m4) & ~tnz5; + uint64_t tnz3 = sgnw(m3 | -m3) & ~tnz5 & ~tnz4; + uint64_t tnz2 = sgnw(m2 | -m2) & ~tnz5 & ~tnz4 & ~tnz3; + uint64_t tnz1 = sgnw(m1 | -m1) & ~tnz5 & ~tnz4 & ~tnz3 & ~tnz2; + uint64_t tnzm = (m5 & tnz5) | (m4 & tnz4) | (m3 & tnz3) | (m2 & tnz2) | (m1 & tnz1); + uint64_t tnza = (a.v5 & tnz5) | (a.v4 & tnz4) | (a.v3 & tnz3) | (a.v2 & tnz2) | (a.v1 & tnz1); + uint64_t tnzb = (b.v5 & tnz5) | (b.v4 & tnz4) | (b.v3 & tnz3) | (b.v2 & tnz2) | (b.v1 & tnz1); + uint64_t snza = (a.v4 & tnz5) | (a.v3 & tnz4) | (a.v2 & tnz3) | (a.v1 & tnz2) | (a.v0 & tnz1); + uint64_t snzb = (b.v4 & tnz5) | (b.v3 & tnz4) | (b.v2 & tnz3) | (b.v1 & tnz2) | (b.v0 & tnz1); + + int64_t s = lzcnt(tnzm); + uint64_t sm = (uint64_t)((31 - s) >> 63); + tnza ^= sm & (tnza ^ ((tnza << 32) | (snza >> 32))); + tnzb ^= sm & (tnzb ^ ((tnzb << 32) | (snzb >> 32))); + s -= 32 & sm; + tnza <<= s; + tnzb <<= s; + + uint64_t tzx = ~(tnz1 | tnz2 | tnz3 | tnz4 | tnz5); + tnza |= a.v0 & tzx; + tnzb |= b.v0 & tzx; + xa = (a.v0 & 0x7FFFFFFF) | (tnza & 0xFFFFFFFF80000000); + xb = (b.v0 & 0x7FFFFFFF) | (tnzb & 0xFFFFFFFF80000000); + + // First 290 inner iterations. + uint64_t fg0 = (uint64_t)1; + uint64_t fg1 = (uint64_t)1 << 32; + for (int j = 0; j < 29; j++) { + uint64_t a_odd, swap, t0, t1, t2; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf65376_sbb(0, xa, xb, &t0); + (void)inner_gf65376_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & xa & xb; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + xa >>= 1; + fg1 <<= 1; + ls ^= (xb + 2) >> 1; + } + + // Compute the updated a and b (low words only) to get + // enough bits for the next two iterations. + uint64_t fg0z = fg0 + 0x7FFFFFFF7FFFFFFF; + uint64_t fg1z = fg1 + 0x7FFFFFFF7FFFFFFF; + f0 = (fg0z & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0z >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1z & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1z >> 32) - (uint64_t)0x7FFFFFFF; + uint64_t a0 = (a.v0 * f0 + b.v0 * g0) >> 29; + uint64_t b0 = (a.v0 * f1 + b.v0 * g1) >> 29; + for (int j = 0; j < 2; j++) { + uint64_t a_odd, swap, t0, t1, t2, t3; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf65376_sbb(0, xa, xb, &t0); + (void)inner_gf65376_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & a0 & b0; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + t3 = swap & (a0 ^ b0); + a0 ^= t3; + b0 ^= t3; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + a0 -= a_odd & b0; + xa >>= 1; + fg1 <<= 1; + a0 >>= 1; + ls ^= (b0 + 2) >> 1; + } + + // Propagate updates to a and b. + fg0 += 0x7FFFFFFF7FFFFFFF; + fg1 += 0x7FFFFFFF7FFFFFFF; + f0 = (fg0 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0 >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1 >> 32) - (uint64_t)0x7FFFFFFF; + gf65376 na, nb; + uint64_t nega = lindiv31abs(&na, &a, &b, f0, g0); + (void)lindiv31abs(&nb, &a, &b, f1, g1); + ls ^= nega & nb.v0; + a = na; + b = nb; + } + + // Final iterations: values are at most 53 bits now. We do not + // need to keep track of update coefficients. Just like the GCD, + // we need only 51 iterations, because after 51 iterations, + // value a is 0 or 1, and b is 1, and no further modification to + // the Legendre symbol may happen. + xa = a.v0; + xb = b.v0; + for (int j = 0; j < 51; j++) { + uint64_t a_odd, swap, t0, t1; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf65376_sbb(0, xa, xb, &t0); + (void)inner_gf65376_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & xa & xb; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + xa -= a_odd & xb; + xa >>= 1; + ls ^= (xb + 2) >> 1; + } + + // At this point, if the source value was not zero, then the low + // bit of ls contains the QR status (0 = square, 1 = non-square), + // which we need to convert to the expected value (+1 or -1). + // If y == 0, then we return 0, per the API. + uint32_t r = 1 - ((uint32_t)ls & 2); + r &= ~gf65376_iszero(x); + return *(int32_t *)&r; +} + +// see gf65376.h +uint32_t +gf65376_sqrt(gf65376 *d, const gf65376 *a) +{ + // Candidate root is a^((q+1)/4), with (q+1)/4 = 65*2^374 + gf65376 y; + gf65376_xsquare(&y, a, 6); + gf65376_mul(&y, &y, a); + gf65376_xsquare(&y, &y, 374); + + // Normalize y and negate if necessary, to set the low bit to 0. + // The low bit check must be on the normal representation, + // not the Montgomery representation. + gf65376 yn; + inner_gf65376_montgomery_reduce(&yn, &y); + uint32_t ctl = -((uint32_t)yn.v0 & 1); + gf65376_neg(&yn, &y); + gf65376_select(&y, &y, &yn, ctl); + + // Check whether the candidate is indeed a square root. + gf65376_square(&yn, &y); + uint32_t r = gf65376_equals(&yn, a); + *d = y; + return r; +} + +// Little-endian encoding of a 64-bit integer. +static inline void +enc64le(void *dst, uint64_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); + buf[4] = (uint8_t)(x >> 32); + buf[5] = (uint8_t)(x >> 40); + buf[6] = (uint8_t)(x >> 48); + buf[7] = (uint8_t)(x >> 56); +} + +// Little-endian decoding of a 64-bit integer. +static inline uint64_t +dec64le(const void *src) +{ + const uint8_t *buf = src; + return (uint64_t)buf[0] | ((uint64_t)buf[1] << 8) | ((uint64_t)buf[2] << 16) | ((uint64_t)buf[3] << 24) | + ((uint64_t)buf[4] << 32) | ((uint64_t)buf[5] << 40) | ((uint64_t)buf[6] << 48) | ((uint64_t)buf[7] << 56); +} + +// see gf65376.h +void +gf65376_encode(void *dst, const gf65376 *a) +{ + uint8_t *buf = dst; + gf65376 x; + + inner_gf65376_montgomery_reduce(&x, a); + enc64le(buf, x.v0); + enc64le(buf + 8, x.v1); + enc64le(buf + 16, x.v2); + enc64le(buf + 24, x.v3); + enc64le(buf + 32, x.v4); + enc64le(buf + 40, x.v5); +} + +// see gf65376.h +uint32_t +gf65376_decode(gf65376 *d, const void *src) +{ + const uint8_t *buf = src; + uint64_t d0, d1, d2, d3, d4, d5, t; + unsigned char cc; + + d0 = dec64le(buf); + d1 = dec64le(buf + 8); + d2 = dec64le(buf + 16); + d3 = dec64le(buf + 24); + d4 = dec64le(buf + 32); + d5 = dec64le(buf + 40); + cc = inner_gf65376_sbb(0, d0, MODULUS.v0, &t); + cc = inner_gf65376_sbb(cc, d1, MODULUS.v1, &t); + cc = inner_gf65376_sbb(cc, d2, MODULUS.v2, &t); + cc = inner_gf65376_sbb(cc, d3, MODULUS.v3, &t); + cc = inner_gf65376_sbb(cc, d4, MODULUS.v4, &t); + cc = inner_gf65376_sbb(cc, d5, MODULUS.v5, &t); + + (void)inner_gf65376_sbb(cc, 0, 0, &t); + + // If the value was not canonical then t = 0; otherwise, t = -1. + d->v0 = d0 & t; + d->v1 = d1 & t; + d->v2 = d2 & t; + d->v3 = d3 & t; + d->v4 = d4 & t; + d->v5 = d5 & t; + + // Convert to Montgomery representation. + gf65376_mul(d, d, &R2); + + return (uint32_t)t; +} + +// see gf65376.h +void +gf65376_decode_reduce(gf65376 *d, const void *src, size_t len) +{ + const uint8_t *buf = src; + + *d = ZERO; + if (len == 0) { + return; + } + + size_t rem = len % 48; + if (rem != 0) { + // Input size is not a multiple of 48, we decode a partial + // block, which is already less than 2^383. + uint8_t tmp[48]; + size_t k; + + k = len - rem; + + memcpy(tmp, buf + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + d->v0 = dec64le(&tmp[0]); + d->v1 = dec64le(&tmp[8]); + d->v2 = dec64le(&tmp[16]); + d->v3 = dec64le(&tmp[24]); + d->v4 = dec64le(&tmp[32]); + d->v5 = dec64le(&tmp[40]); + + len = k; + } else { + // Input size is a multiple of 48, we decode a full block, + // and a reduction is needed. + len -= 48; + uint64_t d0 = dec64le(buf + len); + uint64_t d1 = dec64le(buf + len + 8); + uint64_t d2 = dec64le(buf + len + 16); + uint64_t d3 = dec64le(buf + len + 24); + uint64_t d4 = dec64le(buf + len + 32); + uint64_t d5 = dec64le(buf + len + 40); + inner_gf65376_partial_reduce(d, d0, d1, d2, d3, d4, d5); + } + + // Process all remaining blocks, in descending address order. + while (len > 0) { + gf65376_mul(d, d, &R2); + len -= 48; + uint64_t t0 = dec64le(buf + len); + uint64_t t1 = dec64le(buf + len + 8); + uint64_t t2 = dec64le(buf + len + 16); + uint64_t t3 = dec64le(buf + len + 24); + uint64_t t4 = dec64le(buf + len + 32); + uint64_t t5 = dec64le(buf + len + 40); + + gf65376 t; + inner_gf65376_partial_reduce(&t, t0, t1, t2, t3, t4, t5); + gf65376_add(d, d, &t); + } + + // Final conversion to Montgomery representation. + gf65376_mul(d, d, &R2); +} + +void +gf65376_div3(gf65376 *d, const gf65376 *a) +{ + const digit_t MAGIC = 0xAAAAAAAAAAAAAAAB; // 3^-1 mod 2^64 + uint64_t c0, c1, f0, f1; + gf65376 t; + + inner_gf65376_umul(f0, f1, a->arr[5], MAGIC); + t.arr[5] = f1 >> 1; + c1 = a->arr[5] - 3 * t.arr[5]; + + for (int32_t i = 4; i >= 0; i--) { + c0 = c1; + inner_gf65376_umul(f0, f1, a->arr[i], MAGIC); + t.arr[i] = f1 >> 1; + c1 = c0 + a->arr[i] - 3 * t.arr[i]; + t.arr[i] += c0 * ((MAGIC - 1) >> 1); + f0 = ((c1 >> 1) & c1); /* c1 == 3 */ + f1 = ((c1 >> 2) & !(c1 & 0x11)); /* c1 == 4 */ + f0 |= f1; + t.arr[i] += f0; + c1 = c1 - 3 * f0; + } + *d = t; + gf65376_sub(&t, d, &PM1O3); + gf65376_select(d, d, &t, -((c1 & 1) | (c1 >> 1))); // c1 >= 1 + gf65376_sub(&t, d, &PM1O3); + gf65376_select(d, d, &t, -(c1 == 2)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.h new file mode 100644 index 0000000000..2d04245fc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.h @@ -0,0 +1,1121 @@ +#ifndef gf65376_h__ +#define gf65376_h__ + +#ifdef __cplusplus +extern "C" +{ +#endif +#include +#include +#include +#include +#include +#include + + typedef uint64_t digit_t; // Datatype for representing field elements + + /* + * A gf65376 instance represents an integer modulo q. + * This is a structure; it can be copied with a simple assignment, and + * passed around as a value (though exchanging pointers is possibly more + * efficient). + * The contents are opaque. No calling code should make any assumption + * about the contents. + */ + typedef union + { + // Contents are opaque. + // Implementation note: this encodes the value in Montgomery + // representation, with R = 2^384. Only partial reduction is + // done internally to ensure the value is below 2^383 + struct + { + uint64_t v0; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t v5; + }; + digit_t arr[6]; + } gf65376; + + /* + * Constant zero (in the field). + */ + extern const gf65376 ZERO; + + /* + * Constant one (in the field). + */ + extern const gf65376 ONE; + + /* + * Constant -1 (in the field). + */ + extern const gf65376 gf65376_MINUS_ONE; + + /* + * API RULES: + * ========== + * + * Elementary operations on field elements are implemented by functions + * which take as parameter pointers to the operands. The first parameter + * is the pointer to the destination. Thus: + * gf65376 a = ...; + * gf65376 b = ...; + * gf65376 d; + * gf65376_sub(&d, &a, &b) + * sets field element d to a - b (implicitly modulo q). + * + * Operands may be used several times: it is always valid to use as + * output a gf65376 structure which is also used as input. + * + * Boolean values are represented by 32-bit integer (uint32_t) which have + * value exactly 0xFFFFFFFF (for "true") or 0x00000000 (for "false"). This + * convention minimizes the risk that a "smart" compiler breaks the + * constant-time property of the code through unfortunated optimizations. + * When a function expects such a Boolean, the caller MUST take care never + * to provide any value other than 0x00000000 or 0xFFFFFFFF. + * + * Values are encoded into exactly 48 bytes: value x modulo q is mapped to + * its unique integer representant in the [0..q-1] range, which is then + * encoded over 48 bytes with little-endian convention. Encoding is canonical + * and checked: when decoding (with gf65376_decode()), the input value is + * verified to be in the [0..q-1] range; for an out-of-range value, + * gf65376_decode() fills the output structure with zero, and returns + * 0x00000000. + * + * For most operations, the implementation is an inline function, defined + * below; the compiler can thus efficiently include it in the calling code. + * A few expensive operations (e.g. divisions) use non-inline functions, + * declared below but defined in gf65376.c + * + * All functions and macro whose name starts with "inner_gf65376_" are + * internal to this implementation and visible here only in order to + * support the API inline functions; they MUST NOT be used directly. + */ + +#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) +#include +#define inner_gf65376_adc(cc, a, b, d) _addcarry_u64(cc, a, b, (unsigned long long *)(void *)d) +#define inner_gf65376_sbb(cc, a, b, d) _subborrow_u64(cc, a, b, (unsigned long long *)(void *)d) +#else +static inline unsigned char +inner_gf65376_adc(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) +{ + unsigned __int128 t = (unsigned __int128)a + (unsigned __int128)b + cc; + *d = (uint64_t)t; + return (unsigned char)(t >> 64); +} +static inline unsigned char +inner_gf65376_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) +{ + unsigned __int128 t = (unsigned __int128)a - (unsigned __int128)b - cc; + *d = (uint64_t)t; + return (unsigned char)(-(uint64_t)(t >> 64)); +} +#endif + +#if defined _MSC_VER +#define inner_gf65376_umul(lo, hi, x, y) \ + do { \ + uint64_t umul_hi; \ + (lo) = _umul128((x), (y), &umul_hi); \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf65376_umul_add(lo, hi, x, y, z) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x), (y), &umul_hi); \ + unsigned char umul_cc; \ + umul_cc = inner_gf65376_adc(0, umul_lo, (z), &umul_lo); \ + (void)inner_gf65376_adc(umul_cc, umul_hi, 0, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf65376_umul_x2(lo, hi, x1, y1, x2, y2) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x1), (y1), &umul_hi); \ + uint64_t umul_lo2, umul_hi2; \ + umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + unsigned char umul_cc; \ + umul_cc = inner_gf65376_adc(0, umul_lo, umul_lo2, &umul_lo); \ + (void)inner_gf65376_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf65376_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x1), (y1), &umul_hi); \ + uint64_t umul_lo2, umul_hi2; \ + umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + unsigned char umul_cc; \ + umul_cc = inner_gf65376_adc(0, umul_lo, umul_lo2, &umul_lo); \ + (void)inner_gf65376_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ + umul_cc = inner_gf65376_adc(0, umul_lo, (z), &umul_lo); \ + (void)inner_gf65376_adc(umul_cc, umul_hi, 0, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#else +#define inner_gf65376_umul(lo, hi, x, y) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x) * (unsigned __int128)(y); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf65376_umul_add(lo, hi, x, y, z) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x) * (unsigned __int128)(y) + (unsigned __int128)(uint64_t)(z); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf65376_umul_x2(lo, hi, x1, y1, x2, y2) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = \ + (unsigned __int128)(x1) * (unsigned __int128)(y1) + (unsigned __int128)(x2) * (unsigned __int128)(y2); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf65376_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x1) * (unsigned __int128)(y1) + \ + (unsigned __int128)(x2) * (unsigned __int128)(y2) + (unsigned __int128)(uint64_t)(z); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#endif + + /* + * d <- a + b + */ + static inline void + gf65376_add(gf65376 *d, const gf65376 *a, const gf65376 *b) + { + uint64_t d0, d1, d2, d3, d4, d5, f; + unsigned char cc; + + // Raw addition. + cc = inner_gf65376_adc(0, a->v0, b->v0, &d0); + cc = inner_gf65376_adc(cc, a->v1, b->v1, &d1); + cc = inner_gf65376_adc(cc, a->v2, b->v2, &d2); + cc = inner_gf65376_adc(cc, a->v3, b->v3, &d3); + cc = inner_gf65376_adc(cc, a->v4, b->v4, &d4); + (void)inner_gf65376_adc(cc, a->v5, b->v5, &d5); + + // Sum is up to 2^384 - 2. Subtract q if the value is not lower + // than 2^383 (we subtract q by adding -q). + // Note: 0xBF = (-65) % 256, 56 = 376 - 5*64 + f = d5 >> 63; + cc = inner_gf65376_adc(0, d0, f, &d0); + cc = inner_gf65376_adc(cc, d1, 0, &d1); + cc = inner_gf65376_adc(cc, d2, 0, &d2); + cc = inner_gf65376_adc(cc, d3, 0, &d3); + cc = inner_gf65376_adc(cc, d4, 0, &d4); + (void)inner_gf65376_adc(cc, d5, ((uint64_t)0xBF << 56) & -f, &d5); + + // One subtraction of q might not be enough. + f = d5 >> 63; + cc = inner_gf65376_adc(0, d0, f, &d0); + cc = inner_gf65376_adc(cc, d1, 0, &d1); + cc = inner_gf65376_adc(cc, d2, 0, &d2); + cc = inner_gf65376_adc(cc, d3, 0, &d3); + cc = inner_gf65376_adc(cc, d4, 0, &d4); + (void)inner_gf65376_adc(cc, d5, ((uint64_t)0xBF << 56) & -f, &d5); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + } + + /* + * d <- a - b + */ + static inline void + gf65376_sub(gf65376 *d, const gf65376 *a, const gf65376 *b) + { + uint64_t d0, d1, d2, d3, d4, d5, m, f; + unsigned char cc; + + // Raw subtraction. + cc = inner_gf65376_sbb(0, a->v0, b->v0, &d0); + cc = inner_gf65376_sbb(cc, a->v1, b->v1, &d1); + cc = inner_gf65376_sbb(cc, a->v2, b->v2, &d2); + cc = inner_gf65376_sbb(cc, a->v3, b->v3, &d3); + cc = inner_gf65376_sbb(cc, a->v4, b->v4, &d4); + cc = inner_gf65376_sbb(cc, a->v5, b->v5, &d5); + + // Add 2*q if the result is negative. + // Note: 0x7E = (-2*65) % 256, 56 = 376 - 5*64 + (void)inner_gf65376_sbb(cc, 0, 0, &m); + cc = inner_gf65376_sbb(0, d0, m & 2, &d0); + cc = inner_gf65376_sbb(cc, d1, 0, &d1); + cc = inner_gf65376_sbb(cc, d2, 0, &d2); + cc = inner_gf65376_sbb(cc, d3, 0, &d3); + cc = inner_gf65376_sbb(cc, d4, 0, &d4); + (void)inner_gf65376_sbb(cc, d5, ((uint64_t)0x7E << 56) & m, &d5); + + // We might have overdone it; subtract q if necessary. + // Note: 0xBF = (-65) % 256, 56 = 376 - 5*64 + f = d5 >> 63; + cc = inner_gf65376_adc(0, d0, f, &d0); + cc = inner_gf65376_adc(cc, d1, 0, &d1); + cc = inner_gf65376_adc(cc, d2, 0, &d2); + cc = inner_gf65376_adc(cc, d3, 0, &d3); + cc = inner_gf65376_adc(cc, d4, 0, &d4); + (void)inner_gf65376_adc(cc, d5, ((uint64_t)0xBF << 56) & -f, &d5); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + } + + /* + * d <- -a + */ + static inline void + gf65376_neg(gf65376 *d, const gf65376 *a) + { + uint64_t d0, d1, d2, d3, d4, d5, f; + unsigned char cc; + + // 2*q - a + cc = inner_gf65376_sbb(0, (uint64_t)0xFFFFFFFFFFFFFFFE, a->v0, &d0); + cc = inner_gf65376_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v1, &d1); + cc = inner_gf65376_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v2, &d2); + cc = inner_gf65376_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v3, &d3); + cc = inner_gf65376_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v4, &d4); + (void)inner_gf65376_sbb(cc, (uint64_t)0x81FFFFFFFFFFFFFF, a->v5, &d5); + + // Subtract q if the value is not lower than 2^251. + f = d5 >> 63; + cc = inner_gf65376_adc(0, d0, f, &d0); + cc = inner_gf65376_adc(cc, d1, 0, &d1); + cc = inner_gf65376_adc(cc, d2, 0, &d2); + cc = inner_gf65376_adc(cc, d3, 0, &d3); + cc = inner_gf65376_adc(cc, d4, 0, &d4); + (void)inner_gf65376_adc(cc, d5, ((uint64_t)0xBF << 56) & -f, &d5); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + } + + /* + * If ctl == 0x00000000, then *a0 is copied into *d. + * If ctl == 0xFFFFFFFF, then *a1 is copied into *d. + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ + static inline void + gf65376_select(gf65376 *d, const gf65376 *a0, const gf65376 *a1, uint32_t ctl) + { + uint64_t cw = (uint64_t)*(int32_t *)&ctl; + d->v0 = a0->v0 ^ (cw & (a0->v0 ^ a1->v0)); + d->v1 = a0->v1 ^ (cw & (a0->v1 ^ a1->v1)); + d->v2 = a0->v2 ^ (cw & (a0->v2 ^ a1->v2)); + d->v3 = a0->v3 ^ (cw & (a0->v3 ^ a1->v3)); + d->v4 = a0->v4 ^ (cw & (a0->v4 ^ a1->v4)); + d->v5 = a0->v5 ^ (cw & (a0->v5 ^ a1->v5)); + } + + /* + * If ctl == 0x00000000, then *a and *b are unchanged. + * If ctl == 0xFFFFFFFF, then the contents of *a and *b are swapped. + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ + static inline void + gf65376_cswap(gf65376 *a, gf65376 *b, uint32_t ctl) + { + uint64_t cw = (uint64_t)*(int32_t *)&ctl; + uint64_t t; + t = cw & (a->v0 ^ b->v0); + a->v0 ^= t; + b->v0 ^= t; + t = cw & (a->v1 ^ b->v1); + a->v1 ^= t; + b->v1 ^= t; + t = cw & (a->v2 ^ b->v2); + a->v2 ^= t; + b->v2 ^= t; + t = cw & (a->v3 ^ b->v3); + a->v3 ^= t; + b->v3 ^= t; + t = cw & (a->v4 ^ b->v4); + a->v4 ^= t; + b->v4 ^= t; + t = cw & (a->v5 ^ b->v5); + a->v5 ^= t; + b->v5 ^= t; + } + + /* + * d <- a/2 + */ + static inline void + gf65376_half(gf65376 *d, const gf65376 *a) + { + uint64_t d0, d1, d2, d3, d4, d5; + + d0 = (a->v0 >> 1) | (a->v1 << 63); + d1 = (a->v1 >> 1) | (a->v2 << 63); + d2 = (a->v2 >> 1) | (a->v3 << 63); + d3 = (a->v3 >> 1) | (a->v4 << 63); + d4 = (a->v4 >> 1) | (a->v5 << 63); + d5 = a->v5 >> 1; + d5 += ((uint64_t)65 << 55) & -(a->v0 & 1); + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + } + + // Inner function: 384-bit to 383-bit reduction + static inline void + inner_gf65376_partial_reduce(gf65376 *d, + uint64_t a0, + uint64_t a1, + uint64_t a2, + uint64_t a3, + uint64_t a4, + uint64_t a5) + { + uint64_t d0, d1, d2, d3, d4, d5, h, quo, rem; + unsigned char cc; + + // Split value in high (8 bits) and low (376 bits) parts. + h = a5 >> 56; + a5 &= 0x00FFFFFFFFFFFFFF; + + // 65*2^376 = 1 mod q; hence, we add floor(h/65) + (h mod 65)*2^376 + // to the low part. + quo = (h * 0xFC1) >> 18; + rem = h - (65 * quo); + cc = inner_gf65376_adc(0, a0, quo, &d0); + cc = inner_gf65376_adc(cc, a1, 0, &d1); + cc = inner_gf65376_adc(cc, a2, 0, &d2); + cc = inner_gf65376_adc(cc, a3, 0, &d3); + cc = inner_gf65376_adc(cc, a4, 0, &d4); + (void)inner_gf65376_adc(cc, a5, rem << 56, &d5); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + } + + // Inner function: Normalize value *a into *d. + static inline void + inner_gf65376_normalize(gf65376 *d, const gf65376 *a) + { + uint64_t d0, d1, d2, d3, d4, d5, m; + unsigned char cc; + + // Subtract q. + cc = inner_gf65376_sbb(0, a->v0, 0xFFFFFFFFFFFFFFFF, &d0); + cc = inner_gf65376_sbb(cc, a->v1, 0xFFFFFFFFFFFFFFFF, &d1); + cc = inner_gf65376_sbb(cc, a->v2, 0xFFFFFFFFFFFFFFFF, &d2); + cc = inner_gf65376_sbb(cc, a->v3, 0xFFFFFFFFFFFFFFFF, &d3); + cc = inner_gf65376_sbb(cc, a->v4, 0xFFFFFFFFFFFFFFFF, &d4); + cc = inner_gf65376_sbb(cc, a->v5, 0x40FFFFFFFFFFFFFF, &d5); + + // Add back q if the result is negative. + (void)inner_gf65376_sbb(cc, 0, 0, &m); + cc = inner_gf65376_adc(0, d0, m, &d0); + cc = inner_gf65376_adc(cc, d1, m, &d1); + cc = inner_gf65376_adc(cc, d2, m, &d2); + cc = inner_gf65376_adc(cc, d3, m, &d3); + cc = inner_gf65376_adc(cc, d4, m, &d4); + (void)inner_gf65376_adc(cc, d5, m & 0x40FFFFFFFFFFFFFF, &d5); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + } + + /* + * d <- 2*a + */ + static inline void + gf65376_mul2(gf65376 *d, const gf65376 *a) + { + gf65376_add(d, a, a); + } + + /* + * d <- a*x + * (multiplication by a 32-bit integer) + */ + static inline void + gf65376_mul_small(gf65376 *d, const gf65376 *a, uint32_t x) + { + uint64_t d0, d1, d2, d3, d4, d5, d6, lo, hi, b, h, quo, rem; + unsigned char cc; + + // Product over the integers. Top output word (d6) is at most 31 bits. + b = (uint64_t)x; + inner_gf65376_umul(d0, d1, a->v0, b); + inner_gf65376_umul(d2, d3, a->v2, b); + inner_gf65376_umul(d4, d5, a->v4, b); + + inner_gf65376_umul(lo, hi, a->v1, b); + cc = inner_gf65376_adc(0, d1, lo, &d1); + cc = inner_gf65376_adc(cc, d2, hi, &d2); + inner_gf65376_umul(lo, hi, a->v3, b); + cc = inner_gf65376_adc(cc, d3, lo, &d3); + cc = inner_gf65376_adc(cc, d4, hi, &d4); + inner_gf65376_umul(lo, d6, a->v5, b); + cc = inner_gf65376_adc(cc, d5, lo, &d5); + (void)inner_gf65376_adc(cc, d6, 0, &d6); + + // Extract low 248-bit part, and the high part (at most 35 bits). + h = (d6 << 8) | (d5 >> 56); + d5 &= 0x00FFFFFFFFFFFFFF; + + // Fold h by adding floor(h/65) + (h mod 65)*2^376 to the low part. + inner_gf65376_umul(lo, hi, h, 0xFC0FC0FC0FC0FC1); + quo = hi >> 2; + rem = h - (65 * quo); + cc = inner_gf65376_adc(cc, d0, quo, &d0); + cc = inner_gf65376_adc(cc, d1, 0, &d1); + cc = inner_gf65376_adc(cc, d2, 0, &d2); + cc = inner_gf65376_adc(cc, d3, 0, &d3); + cc = inner_gf65376_adc(cc, d4, 0, &d4); + (void)inner_gf65376_adc(cc, d5, rem << 56, &d5); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + } + + /* + * d <- x + * Input value x (32-bit integer) is converted to field element x mod q. + */ + static inline void + gf65376_set_small(gf65376 *d, uint32_t x) + { + // We want Montgomery representation, i.e. x*2^384 mod q. + // We set h = x*2^8; then: + // x*2^384 = h*2^376 + // = (h mod 65)*2^376 + floor(h/65)*65*2^376 + // = (h mod 65)*2^376 + floor(h/65) mod q + // by using the fact that 65*2^376 = 1 mod q. + uint64_t h, lo, hi, quo, rem; + + // NOTE: 0xFC0FC0FC0FC0FC1 = 65^(-1) % 2^64 + h = (uint64_t)x << 8; + inner_gf65376_umul(lo, hi, h, 0xFC0FC0FC0FC0FC1); + (void)lo; + quo = hi >> 2; + rem = h - (65 * quo); + + d->v0 = quo; + d->v1 = 0; + d->v2 = 0; + d->v3 = 0; + d->v4 = 0; + d->v5 = rem << 56; + } + + // Inner function: d <- a/2^384, with normalization to [0..q-1]. + static inline void + inner_gf65376_montgomery_reduce(gf65376 *d, const gf65376 *a) + { + uint64_t x0, x1, x2, x3, x4, x5, f0, f1, f2, f3, f4, f5; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11; + uint64_t d0, d1, d2, d3, d4, d5; + uint64_t hi, t, w; + unsigned char cc; + + // Let m = -1/q mod 2^384 = 65*2^376 + 1 + // For input x, we compute f = x*m mod 2^384, then + // h = x + f*q, which is a multiple of 2^384. The output + // is then h/2^384. + // Since x < 2^384, we have: + // h <= 2^384 - 1 + (2^384 - 1)*q + // h <= q*2^384 + 2^384 - q - 1 + // Since h = 0 mod 2^384, this implies that h <= q*2^384. + // The output h/2^384 is therefore between 0 and q (inclusive). + + x0 = a->v0; + x1 = a->v1; + x2 = a->v2; + x3 = a->v3; + x4 = a->v4; + x5 = a->v5; + + // f = x*(-1/q) mod 2^384 + f0 = x0; + f1 = x1; + f2 = x2; + f3 = x3; + f4 = x4; + f5 = x5 + ((x0 * 65) << 56); + + // g = f*q + inner_gf65376_umul(g5, hi, f0, (uint64_t)65 << 56); + inner_gf65376_umul_add(g6, hi, f1, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g7, hi, f2, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g8, hi, f3, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g9, hi, f4, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g10, g11, f5, (uint64_t)65 << 56, hi); + + cc = inner_gf65376_sbb(0, 0, f0, &g0); + cc = inner_gf65376_sbb(cc, 0, f1, &g1); + cc = inner_gf65376_sbb(cc, 0, f2, &g2); + cc = inner_gf65376_sbb(cc, 0, f3, &g3); + cc = inner_gf65376_sbb(cc, 0, f4, &g4); + cc = inner_gf65376_sbb(cc, g5, f5, &g5); + cc = inner_gf65376_sbb(cc, g6, 0, &g6); + cc = inner_gf65376_sbb(cc, g7, 0, &g7); + cc = inner_gf65376_sbb(cc, g8, 0, &g8); + cc = inner_gf65376_sbb(cc, g9, 0, &g9); + cc = inner_gf65376_sbb(cc, g10, 0, &g10); + (void)inner_gf65376_sbb(cc, g11, 0, &g11); + + // h = x + f*q (we drop the low 384 bits). + cc = inner_gf65376_adc(0, g0, x0, &x0); + cc = inner_gf65376_adc(cc, g1, x1, &x1); + cc = inner_gf65376_adc(cc, g2, x2, &x2); + cc = inner_gf65376_adc(cc, g3, x3, &x3); + cc = inner_gf65376_adc(cc, g4, x4, &x4); + cc = inner_gf65376_adc(cc, g5, x5, &x5); + cc = inner_gf65376_adc(cc, g6, 0, &d0); + cc = inner_gf65376_adc(cc, g7, 0, &d1); + cc = inner_gf65376_adc(cc, g8, 0, &d2); + cc = inner_gf65376_adc(cc, g9, 0, &d3); + cc = inner_gf65376_adc(cc, g10, 0, &d4); + (void)inner_gf65376_adc(cc, g11, 0, &d5); + + // Normalize: if h = q, replace it with zero. + t = d0 & d1 & d2 & d3 & d4 & (d5 ^ ~(uint64_t)0x40FFFFFFFFFFFFFF); + cc = inner_gf65376_adc(0, t, 1, &t); + (void)inner_gf65376_sbb(cc, 0, 0, &w); + w = ~w; + d->v0 = d0 & w; + d->v1 = d1 & w; + d->v2 = d2 & w; + d->v3 = d3 & w; + d->v4 = d4 & w; + d->v5 = d5 & w; + } + + /* + * d <- a*b + */ + static inline void + gf65376_mul(gf65376 *d, const gf65376 *a, const gf65376 *b) + { + uint64_t e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11; + uint64_t f0, f1, f2, f3, f4, f5, lo, hi, lo2, hi2; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11; + unsigned char cc; + + // Multiplication over integers. + // 6 mul + inner_gf65376_umul(e0, e1, a->v0, b->v0); + inner_gf65376_umul(e2, e3, a->v1, b->v1); + inner_gf65376_umul(e4, e5, a->v2, b->v2); + inner_gf65376_umul(e6, e7, a->v3, b->v3); + inner_gf65376_umul(e8, e9, a->v4, b->v4); + inner_gf65376_umul(e10, e11, a->v5, b->v5); + + // + 5 mul = 11 + inner_gf65376_umul(lo, hi, a->v0, b->v1); + cc = inner_gf65376_adc(0, e1, lo, &e1); + cc = inner_gf65376_adc(cc, e2, hi, &e2); + inner_gf65376_umul(lo, hi, a->v0, b->v3); + cc = inner_gf65376_adc(cc, e3, lo, &e3); + cc = inner_gf65376_adc(cc, e4, hi, &e4); + inner_gf65376_umul(lo, hi, a->v0, b->v5); + cc = inner_gf65376_adc(cc, e5, lo, &e5); + cc = inner_gf65376_adc(cc, e6, hi, &e6); + inner_gf65376_umul(lo, hi, a->v2, b->v5); + cc = inner_gf65376_adc(cc, e7, lo, &e7); + cc = inner_gf65376_adc(cc, e8, hi, &e8); + inner_gf65376_umul(lo, hi, a->v4, b->v5); + cc = inner_gf65376_adc(cc, e9, lo, &e9); + cc = inner_gf65376_adc(cc, e10, hi, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // + 5 mul = 16 + inner_gf65376_umul(lo, hi, a->v1, b->v0); + cc = inner_gf65376_adc(0, e1, lo, &e1); + cc = inner_gf65376_adc(cc, e2, hi, &e2); + inner_gf65376_umul(lo, hi, a->v3, b->v0); + cc = inner_gf65376_adc(cc, e3, lo, &e3); + cc = inner_gf65376_adc(cc, e4, hi, &e4); + inner_gf65376_umul(lo, hi, a->v5, b->v0); + cc = inner_gf65376_adc(cc, e5, lo, &e5); + cc = inner_gf65376_adc(cc, e6, hi, &e6); + inner_gf65376_umul(lo, hi, a->v5, b->v2); + cc = inner_gf65376_adc(cc, e7, lo, &e7); + cc = inner_gf65376_adc(cc, e8, hi, &e8); + inner_gf65376_umul(lo, hi, a->v5, b->v4); + cc = inner_gf65376_adc(cc, e9, lo, &e9); + cc = inner_gf65376_adc(cc, e10, hi, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // + 4 mul = 20 + inner_gf65376_umul(lo, hi, a->v0, b->v2); + cc = inner_gf65376_adc(0, e2, lo, &e2); + cc = inner_gf65376_adc(cc, e3, hi, &e3); + inner_gf65376_umul(lo, hi, a->v0, b->v4); + cc = inner_gf65376_adc(cc, e4, lo, &e4); + cc = inner_gf65376_adc(cc, e5, hi, &e5); + inner_gf65376_umul(lo, hi, a->v2, b->v4); + cc = inner_gf65376_adc(cc, e6, lo, &e6); + cc = inner_gf65376_adc(cc, e7, hi, &e7); + inner_gf65376_umul(lo, hi, a->v3, b->v5); + cc = inner_gf65376_adc(cc, e8, lo, &e8); + cc = inner_gf65376_adc(cc, e9, hi, &e9); + cc = inner_gf65376_adc(cc, e10, 0, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // + 4 mul = 24 + inner_gf65376_umul(lo, hi, a->v2, b->v0); + cc = inner_gf65376_adc(0, e2, lo, &e2); + cc = inner_gf65376_adc(cc, e3, hi, &e3); + inner_gf65376_umul(lo, hi, a->v4, b->v0); + cc = inner_gf65376_adc(cc, e4, lo, &e4); + cc = inner_gf65376_adc(cc, e5, hi, &e5); + inner_gf65376_umul(lo, hi, a->v4, b->v2); + cc = inner_gf65376_adc(cc, e6, lo, &e6); + cc = inner_gf65376_adc(cc, e7, hi, &e7); + inner_gf65376_umul(lo, hi, a->v5, b->v3); + cc = inner_gf65376_adc(cc, e8, lo, &e8); + cc = inner_gf65376_adc(cc, e9, hi, &e9); + cc = inner_gf65376_adc(cc, e10, 0, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // + 3 mul = 27 + inner_gf65376_umul(lo, hi, a->v1, b->v2); + cc = inner_gf65376_adc(cc, e3, lo, &e3); + cc = inner_gf65376_adc(cc, e4, hi, &e4); + inner_gf65376_umul(lo, hi, a->v1, b->v4); + cc = inner_gf65376_adc(cc, e5, lo, &e5); + cc = inner_gf65376_adc(cc, e6, hi, &e6); + inner_gf65376_umul(lo, hi, a->v3, b->v4); + cc = inner_gf65376_adc(cc, e7, lo, &e7); + cc = inner_gf65376_adc(cc, e8, hi, &e8); + cc = inner_gf65376_adc(cc, e9, 0, &e9); + cc = inner_gf65376_adc(cc, e10, 0, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // + 3 mul = 30 + inner_gf65376_umul(lo, hi, a->v2, b->v1); + cc = inner_gf65376_adc(cc, e3, lo, &e3); + cc = inner_gf65376_adc(cc, e4, hi, &e4); + inner_gf65376_umul(lo, hi, a->v4, b->v1); + cc = inner_gf65376_adc(cc, e5, lo, &e5); + cc = inner_gf65376_adc(cc, e6, hi, &e6); + inner_gf65376_umul(lo, hi, a->v4, b->v3); + cc = inner_gf65376_adc(cc, e7, lo, &e7); + cc = inner_gf65376_adc(cc, e8, hi, &e8); + cc = inner_gf65376_adc(cc, e9, 0, &e9); + cc = inner_gf65376_adc(cc, e10, 0, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // + 2 mul = 32 + inner_gf65376_umul(lo, hi, a->v1, b->v3); + cc = inner_gf65376_adc(cc, e4, lo, &e4); + cc = inner_gf65376_adc(cc, e5, hi, &e5); + inner_gf65376_umul(lo, hi, a->v1, b->v5); + cc = inner_gf65376_adc(cc, e6, lo, &e6); + cc = inner_gf65376_adc(cc, e7, hi, &e7); + cc = inner_gf65376_adc(cc, e8, 0, &e8); + cc = inner_gf65376_adc(cc, e9, 0, &e9); + cc = inner_gf65376_adc(cc, e10, 0, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // + 2 mul = 34 + inner_gf65376_umul(lo, hi, a->v3, b->v1); + cc = inner_gf65376_adc(cc, e4, lo, &e4); + cc = inner_gf65376_adc(cc, e5, hi, &e5); + inner_gf65376_umul(lo, hi, a->v5, b->v1); + cc = inner_gf65376_adc(cc, e6, lo, &e6); + cc = inner_gf65376_adc(cc, e7, hi, &e7); + cc = inner_gf65376_adc(cc, e8, 0, &e8); + cc = inner_gf65376_adc(cc, e9, 0, &e9); + cc = inner_gf65376_adc(cc, e10, 0, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // + 2 mul = 36 + inner_gf65376_umul(lo, hi, a->v2, b->v3); + inner_gf65376_umul(lo2, hi2, a->v3, b->v2); + cc = inner_gf65376_adc(0, lo, lo2, &lo); + cc = inner_gf65376_adc(cc, hi, hi2, &hi); + cc = inner_gf65376_adc(cc, 0, 0, &hi2); + assert(cc == 0); + cc = inner_gf65376_adc(0, e5, lo, &e5); + cc = inner_gf65376_adc(cc, e6, hi, &e6); + cc = inner_gf65376_adc(cc, e7, hi2, &e7); + cc = inner_gf65376_adc(cc, e8, 0, &e8); + cc = inner_gf65376_adc(cc, e9, 0, &e9); + cc = inner_gf65376_adc(cc, e10, 0, &e10); + cc = inner_gf65376_adc(cc, e11, 0, &e11); + assert(cc == 0); + + // Montgomery reduction. + // + // Low part is lo(e) = e0..e5 (384 bits). + // Let m = -1/q mod 2^384; we add (lo(e)*m mod 2^384)*q to the + // high part g = e6..e11 (766 bits). + // + // We have m = 65*2^376 + 1. + f0 = e0; + f1 = e1; + f2 = e2; + f3 = e3; + f4 = e4; + f5 = e5 + ((e0 * 65) << 56); + + // g = f*q + inner_gf65376_umul(g5, hi, f0, (uint64_t)65 << 56); + inner_gf65376_umul_add(g6, hi, f1, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g7, hi, f2, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g8, hi, f3, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g9, hi, f4, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g10, g11, f5, (uint64_t)65 << 56, hi); + + cc = inner_gf65376_sbb(0, 0, f0, &g0); + cc = inner_gf65376_sbb(cc, 0, f1, &g1); + cc = inner_gf65376_sbb(cc, 0, f2, &g2); + cc = inner_gf65376_sbb(cc, 0, f3, &g3); + cc = inner_gf65376_sbb(cc, 0, f4, &g4); + cc = inner_gf65376_sbb(cc, g5, f5, &g5); + cc = inner_gf65376_sbb(cc, g6, 0, &g6); + cc = inner_gf65376_sbb(cc, g7, 0, &g7); + cc = inner_gf65376_sbb(cc, g8, 0, &g8); + cc = inner_gf65376_sbb(cc, g9, 0, &g9); + cc = inner_gf65376_sbb(cc, g10, 0, &g10); + cc = inner_gf65376_sbb(cc, g11, 0, &g11); + assert(cc == 0); + + // Add g = f*q to e0..e11. + // Since e0..e11 < 2^766 and f < 2^384, we know that the result + // is less than 2^766 + 2^384*65*2^376, which is less than 2^768. + // This is also a multiple of 2^256. We divide by 2^256 by simply + // dropping the low 256 bits (which are all equal to zero), and + // the result is less than 2**384 + cc = inner_gf65376_adc(0, g0, e0, &e0); + cc = inner_gf65376_adc(cc, g1, e1, &e1); + cc = inner_gf65376_adc(cc, g2, e2, &e2); + cc = inner_gf65376_adc(cc, g3, e3, &e3); + cc = inner_gf65376_adc(cc, g4, e4, &e4); + cc = inner_gf65376_adc(cc, g5, e5, &e5); + cc = inner_gf65376_adc(cc, g6, e6, &e6); + cc = inner_gf65376_adc(cc, g7, e7, &e7); + cc = inner_gf65376_adc(cc, g8, e8, &e8); + cc = inner_gf65376_adc(cc, g9, e9, &e9); + cc = inner_gf65376_adc(cc, g10, e10, &e10); + cc = inner_gf65376_adc(cc, g11, e11, &e11); + assert(cc == 0); + + // To ensure the result is in the allowable range, we still need to + // do a final reduction to ensure the value is smaller than 2^383 + inner_gf65376_partial_reduce(d, e6, e7, e8, e9, e10, e11); + } + + /* + * d <- a^2 + */ + static inline void + gf65376_square(gf65376 *d, const gf65376 *a) + { + uint64_t e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11; + uint64_t f0, f1, f2, f3, f4, f5, lo, hi; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11; + unsigned char cc; + + // Squaring over integers. + // 5 mul + inner_gf65376_umul(e1, e2, a->v0, a->v1); + inner_gf65376_umul(e3, e4, a->v0, a->v3); + inner_gf65376_umul(e5, e6, a->v0, a->v5); + inner_gf65376_umul(e7, e8, a->v2, a->v5); + inner_gf65376_umul(e9, e10, a->v4, a->v5); + + inner_gf65376_umul(lo, hi, a->v0, a->v2); + cc = inner_gf65376_adc(0, e2, lo, &e2); + cc = inner_gf65376_adc(cc, e3, hi, &e3); + inner_gf65376_umul(lo, hi, a->v0, a->v4); + cc = inner_gf65376_adc(cc, e4, lo, &e4); + cc = inner_gf65376_adc(cc, e5, hi, &e5); + inner_gf65376_umul(lo, hi, a->v1, a->v5); + cc = inner_gf65376_adc(cc, e6, lo, &e6); + cc = inner_gf65376_adc(cc, e7, hi, &e7); + inner_gf65376_umul(lo, hi, a->v3, a->v5); + cc = inner_gf65376_adc(cc, e8, lo, &e8); + cc = inner_gf65376_adc(cc, e9, hi, &e9); + (void)inner_gf65376_adc(cc, e10, 0, &e10); + + inner_gf65376_umul(lo, hi, a->v1, a->v2); + cc = inner_gf65376_adc(0, e3, lo, &e3); + cc = inner_gf65376_adc(cc, e4, hi, &e4); + inner_gf65376_umul(lo, hi, a->v1, a->v4); + cc = inner_gf65376_adc(cc, e5, lo, &e5); + cc = inner_gf65376_adc(cc, e6, hi, &e6); + inner_gf65376_umul(lo, hi, a->v3, a->v4); + cc = inner_gf65376_adc(cc, e7, lo, &e7); + cc = inner_gf65376_adc(cc, e8, hi, &e8); + (void)inner_gf65376_adc(cc, e9, 0, &e9); + + inner_gf65376_umul(lo, hi, a->v1, a->v3); + cc = inner_gf65376_adc(0, e4, lo, &e4); + cc = inner_gf65376_adc(cc, e5, hi, &e5); + inner_gf65376_umul(lo, hi, a->v2, a->v4); + cc = inner_gf65376_adc(cc, e6, lo, &e6); + cc = inner_gf65376_adc(cc, e7, hi, &e7); + (void)inner_gf65376_adc(cc, e8, 0, &e8); + + inner_gf65376_umul(lo, hi, a->v2, a->v3); + cc = inner_gf65376_adc(0, e5, lo, &e5); + cc = inner_gf65376_adc(cc, e6, hi, &e6); + (void)inner_gf65376_adc(cc, e7, 0, &e7); + + e11 = e10 >> 63; + e10 = (e10 << 1) | (e9 >> 63); + e9 = (e9 << 1) | (e8 >> 63); + e8 = (e8 << 1) | (e7 >> 63); + e7 = (e7 << 1) | (e6 >> 63); + e6 = (e6 << 1) | (e5 >> 63); + e5 = (e5 << 1) | (e4 >> 63); + e4 = (e4 << 1) | (e3 >> 63); + e3 = (e3 << 1) | (e2 >> 63); + e2 = (e2 << 1) | (e1 >> 63); + e1 = e1 << 1; + + inner_gf65376_umul(e0, hi, a->v0, a->v0); + cc = inner_gf65376_adc(0, e1, hi, &e1); + inner_gf65376_umul(lo, hi, a->v1, a->v1); + cc = inner_gf65376_adc(cc, e2, lo, &e2); + cc = inner_gf65376_adc(cc, e3, hi, &e3); + inner_gf65376_umul(lo, hi, a->v2, a->v2); + cc = inner_gf65376_adc(cc, e4, lo, &e4); + cc = inner_gf65376_adc(cc, e5, hi, &e5); + inner_gf65376_umul(lo, hi, a->v3, a->v3); + cc = inner_gf65376_adc(cc, e6, lo, &e6); + cc = inner_gf65376_adc(cc, e7, hi, &e7); + inner_gf65376_umul(lo, hi, a->v4, a->v4); + cc = inner_gf65376_adc(cc, e8, lo, &e8); + cc = inner_gf65376_adc(cc, e9, hi, &e9); + inner_gf65376_umul(lo, hi, a->v5, a->v5); + cc = inner_gf65376_adc(cc, e10, lo, &e10); + (void)inner_gf65376_adc(cc, e11, hi, &e11); + + // Montgomery reduction. + // + // Low part is lo(e) = e0..e5 (384 bits). + // Let m = -1/q mod 2^384; we add (lo(e)*m mod 2^384)*q to the + // high part g = e6..e11 (766 bits). + // + // We have m = 65*2^376 + 1. + f0 = e0; + f1 = e1; + f2 = e2; + f3 = e3; + f4 = e4; + f5 = e5 + ((e0 * 65) << 56); + + // g = f*q + inner_gf65376_umul(g5, hi, f0, (uint64_t)65 << 56); + inner_gf65376_umul_add(g6, hi, f1, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g7, hi, f2, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g8, hi, f3, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g9, hi, f4, (uint64_t)65 << 56, hi); + inner_gf65376_umul_add(g10, g11, f5, (uint64_t)65 << 56, hi); + + cc = inner_gf65376_sbb(0, 0, f0, &g0); + cc = inner_gf65376_sbb(cc, 0, f1, &g1); + cc = inner_gf65376_sbb(cc, 0, f2, &g2); + cc = inner_gf65376_sbb(cc, 0, f3, &g3); + cc = inner_gf65376_sbb(cc, 0, f4, &g4); + cc = inner_gf65376_sbb(cc, g5, f5, &g5); + cc = inner_gf65376_sbb(cc, g6, 0, &g6); + cc = inner_gf65376_sbb(cc, g7, 0, &g7); + cc = inner_gf65376_sbb(cc, g8, 0, &g8); + cc = inner_gf65376_sbb(cc, g9, 0, &g9); + cc = inner_gf65376_sbb(cc, g10, 0, &g10); + cc = inner_gf65376_sbb(cc, g11, 0, &g11); + assert(cc == 0); + + // Add g = f*q to e0..e11. + // Since e0..e11 < 2^766 and f < 2^384, we know that the result + // is less than 2^766 + 2^384*65*2^376, which is less than 2^768. + // This is also a multiple of 2^256. We divide by 2^256 by simply + // dropping the low 256 bits (which are all equal to zero), and + // the result is less than 2**384 + cc = inner_gf65376_adc(0, g0, e0, &e0); + cc = inner_gf65376_adc(cc, g1, e1, &e1); + cc = inner_gf65376_adc(cc, g2, e2, &e2); + cc = inner_gf65376_adc(cc, g3, e3, &e3); + cc = inner_gf65376_adc(cc, g4, e4, &e4); + cc = inner_gf65376_adc(cc, g5, e5, &e5); + cc = inner_gf65376_adc(cc, g6, e6, &e6); + cc = inner_gf65376_adc(cc, g7, e7, &e7); + cc = inner_gf65376_adc(cc, g8, e8, &e8); + cc = inner_gf65376_adc(cc, g9, e9, &e9); + cc = inner_gf65376_adc(cc, g10, e10, &e10); + cc = inner_gf65376_adc(cc, g11, e11, &e11); + assert(cc == 0); + + // To ensure the result is in the allowable range, we still need to + // do a final reduction to ensure the value is smaller than 2^383 + inner_gf65376_partial_reduce(d, e6, e7, e8, e9, e10, e11); + } + + /* + * d <- a^(2^n) + * This computes n successive squarings of value a, with result in d. + * n == 0 is a valid input (in that case, *a is copied into *d). + * This function is not constant-time with regard to n: the number of + * successive squarings may be observable through timing-based side channels. + */ + static inline void + gf65376_xsquare(gf65376 *d, const gf65376 *a, unsigned n) + { + if (n == 0) { + *d = *a; + return; + } + gf65376_square(d, a); + while (n-- > 1) { + gf65376_square(d, d); + } + } + + /* + * Returns 0xFFFFFFFF if *a is zero; otherwise, 0x00000000 is returned. + */ + static inline uint32_t + gf65376_iszero(const gf65376 *a) + { + uint64_t a0, a1, a2, a3, a4, a5, t0, t1, r; + + // Zero can be represented by 0 or by q. + a0 = a->v0; + a1 = a->v1; + a2 = a->v2; + a3 = a->v3; + a4 = a->v4; + a5 = a->v5; + t0 = a0 | a1 | a2 | a3 | a4 | a5; + t1 = ~a0 | ~a1 | ~a2 | ~a3 | ~a4 | (a5 ^ 0x40FFFFFFFFFFFFFF); + + // Top bit of r is 0 if and only if one of t0 or t1 is zero. + r = (t0 | -t0) & (t1 | -t1); + return (uint32_t)(r >> 63) - 1; + } + + /* + * Returns 0xFFFFFFFF if *a and *b represent the same field element; + * otherwise, 0x00000000 is returned. + */ + static inline uint32_t + gf65376_equals(const gf65376 *a, const gf65376 *b) + { + gf65376 d; + gf65376_sub(&d, a, b); + return gf65376_iszero(&d); + } + + /* + * d <- 1/a + * If *a is not zero, then the inverse is well-defined and written into *d, + * and the function returns 0xFFFFFFFF. If *a is zero, then this function + * sets *d to zero and returns 0x00000000. + */ + uint32_t gf65376_invert(gf65376 *d, const gf65376 *a); + + /* + * d <- a/b + * If *b is not zero, then this functions writes a/b into *d, and returns + * 0xFFFFFFFF. If *b is zero, then this function sets *d to zero (regardless + * of the value of *a) and returns 0x00000000. + */ + uint32_t gf65376_div(gf65376 *d, const gf65376 *a, const gf65376 *b); + + /* + * d <- a/3 + * Divides by 3 in the field by implementing the algorithm proposed in + * "Efficient Multiplication in Finite Field Extensions of Degree 5" + * by El Mrabet, Guillevic and Ionica at ASIACRYPT 2011. + */ + void gf65376_div3(gf65376 *out, const gf65376 *a); + + /* + * Get the Legendre symbol of *a (0 for zero, +1 for a non-zero square, + * -1 for a non-square). + */ + int32_t gf65376_legendre(const gf65376 *a); + + /* + * If *a is a square, then this function sets *d to a square root of a, + * and returns 0xFFFFFFFF. If *a is not a square, then this function + * sets *d to a square root of -a, and returns 0x00000000. + * In all cases, the value written into *d is such that the least significant + * bit of its integer representation (in [0..q-1]) is zero. + */ + uint32_t gf65376_sqrt(gf65376 *d, const gf65376 *a); + + /* + * Encode field element *a into buffer dst (exactly 32 bytes are written). + */ + void gf65376_encode(void *dst, const gf65376 *a); + + /* + * Decode source buffer src (exactly 32 bytes) into a field element *d. + * If the source value is not a valid canonical encoding, then *d is zero + * and the function returns 0x00000000; otherwise, the function returns + * 0xFFFFFFFF. + */ + uint32_t gf65376_decode(gf65376 *d, const void *src); + + /* + * Interpret the source buffer (of size len bytes) as an unsigned integer + * (little-endian convention) and reduce it modulo q, yielding a field + * element which is written into *d. Since reduction is applied, this + * function cannot fail. + */ + void gf65376_decode_reduce(gf65376 *d, const void *src, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.c new file mode 100644 index 0000000000..0424108019 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.c @@ -0,0 +1,93 @@ +#include +#include + +void +double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2) +{ + ec_dbl(&out->P1, &in->P1, &E1E2->E1); + ec_dbl(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + memmove(out, in, sizeof(theta_couple_point_t)); + } else { + double_couple_point(out, in, E1E2); + for (unsigned i = 0; i < n - 1; i++) { + double_couple_point(out, out, E1E2); + } + } +} + +void +add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2) +{ + ADD(&out->P1, &T1->P1, &T2->P1, &E1E2->E1); + ADD(&out->P2, &T1->P2, &T2->P2, &E1E2->E2); +} + +void +double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + DBL(&out->P1, &in->P1, &E1E2->E1); + DBL(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + *out = *in; + } else if (n == 1) { + double_couple_jac_point(out, in, E1E2); + } else { + fp2_t a1, a2, t1, t2; + + jac_to_ws(&out->P1, &t1, &a1, &in->P1, &E1E2->E1); + jac_to_ws(&out->P2, &t2, &a2, &in->P2, &E1E2->E2); + + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + for (unsigned i = 0; i < n - 1; i++) { + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + } + + jac_from_ws(&out->P1, &out->P1, &a1, &E1E2->E1); + jac_from_ws(&out->P2, &out->P2, &a2, &E1E2->E2); + } +} + +void +couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP) +{ + jac_to_xz(&P->P1, &xyP->P1); + jac_to_xz(&P->P2, &xyP->P2); +} + +void +copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2) +{ + // Copy the basis on E1 to (P, _) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P1, &B1->P); + copy_point(&ker->T2.P1, &B1->Q); + copy_point(&ker->T1m2.P1, &B1->PmQ); + + // Copy the basis on E2 to (_, P) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P2, &B2->P); + copy_point(&ker->T2.P2, &B2->Q); + copy_point(&ker->T1m2.P2, &B2->PmQ); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.h new file mode 100644 index 0000000000..2b16e23834 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.h @@ -0,0 +1,435 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The HD-isogenies algorithm required by the signature + * + */ + +#ifndef HD_H +#define HD_H + +#include +#include +#include + +/** @defgroup hd_module Abelian surfaces and their isogenies + * @{ + */ + +#define HD_extra_torsion 2 + +/** @defgroup hd_struct Data structures for dimension 2 + * @{ + */ + +/** @brief Type for couple point with XZ coordinates + * @typedef theta_couple_point_t + * + * @struct theta_couple_point + * + * Structure for the couple point on an elliptic product + * using XZ coordinates + */ +typedef struct theta_couple_point +{ + ec_point_t P1; + ec_point_t P2; +} theta_couple_point_t; + +/** @brief Type for three couple points T1, T2, T1-T2 with XZ coordinates + * @typedef theta_kernel_couple_points_t + * + * @struct theta_kernel_couple_points + * + * Structure for a triple of theta couple points T1, T2 and T1 - T2 + */ +typedef struct theta_kernel_couple_points +{ + theta_couple_point_t T1; + theta_couple_point_t T2; + theta_couple_point_t T1m2; +} theta_kernel_couple_points_t; + +/** @brief Type for couple point with XYZ coordinates + * @typedef theta_couple_jac_point_t + * + * @struct theta_couple_jac_point + * + * Structure for the couple point on an elliptic product + * using XYZ coordinates + */ +typedef struct theta_couple_jac_point +{ + jac_point_t P1; + jac_point_t P2; +} theta_couple_jac_point_t; + +/** @brief Type for couple curve * + * @typedef theta_couple_curve_t + * + * @struct theta_couple_curve + * + * the theta_couple_curve structure + */ +typedef struct theta_couple_curve +{ + ec_curve_t E1; + ec_curve_t E2; +} theta_couple_curve_t; + +/** @brief Type for a product E1 x E2 with corresponding bases + * @typedef theta_couple_curve_with_basis_t + * + * @struct theta_couple_curve_with_basis + * + * tType for a product E1 x E2 with corresponding bases Ei[2^n] + */ +typedef struct theta_couple_curve_with_basis +{ + ec_curve_t E1; + ec_curve_t E2; + ec_basis_t B1; + ec_basis_t B2; +} theta_couple_curve_with_basis_t; + +/** @brief Type for theta point * + * @typedef theta_point_t + * + * @struct theta_point + * + * the theta_point structure used + */ +typedef struct theta_point +{ + fp2_t x; + fp2_t y; + fp2_t z; + fp2_t t; +} theta_point_t; + +/** @brief Type for theta point with repeating components + * @typedef theta_point_compact_t + * + * @struct theta_point_compact + * + * the theta_point structure used for points with repeated components + */ +typedef struct theta_point_compact +{ + fp2_t x; + fp2_t y; +} theta_point_compact_t; + +/** @brief Type for theta structure * + * @typedef theta_structure_t + * + * @struct theta_structure + * + * the theta_structure structure used + */ +typedef struct theta_structure +{ + theta_point_t null_point; + bool precomputation; + + // Eight precomputed values used for doubling and + // (2,2)-isogenies. + fp2_t XYZ0; + fp2_t YZT0; + fp2_t XZT0; + fp2_t XYT0; + + fp2_t xyz0; + fp2_t yzt0; + fp2_t xzt0; + fp2_t xyt0; +} theta_structure_t; + +/** @brief A 2x2 matrix used for action by translation + * @typedef translation_matrix_t + * + * @struct translation_matrix + * + * Structure to hold 4 fp2_t elements representing a 2x2 matrix used when computing + * a compatible theta structure during gluing. + */ +typedef struct translation_matrix +{ + fp2_t g00; + fp2_t g01; + fp2_t g10; + fp2_t g11; +} translation_matrix_t; + +/** @brief A 4x4 matrix used for basis changes + * @typedef basis_change_matrix_t + * + * @struct basis_change_matrix + * + * Structure to hold 16 elements representing a 4x4 matrix used for changing + * the basis of a theta point. + */ +typedef struct basis_change_matrix +{ + fp2_t m[4][4]; +} basis_change_matrix_t; + +/** @brief Type for gluing (2,2) theta isogeny * + * @typedef theta_gluing_t + * + * @struct theta_gluing + * + * the theta_gluing structure + */ +typedef struct theta_gluing +{ + + theta_couple_curve_t domain; + theta_couple_jac_point_t xyK1_8; + theta_point_compact_t imageK1_8; + basis_change_matrix_t M; + theta_point_t precomputation; + theta_point_t codomain; + +} theta_gluing_t; + +/** @brief Type for standard (2,2) theta isogeny * + * @typedef theta_isogeny_t + * + * @struct theta_isogeny + * + * the theta_isogeny structure + */ +typedef struct theta_isogeny +{ + theta_point_t T1_8; + theta_point_t T2_8; + bool hadamard_bool_1; + bool hadamard_bool_2; + theta_structure_t domain; + theta_point_t precomputation; + theta_structure_t codomain; +} theta_isogeny_t; + +/** @brief Type for splitting isomorphism * + * @typedef theta_splitting_t + * + * @struct theta_splitting + * + * the theta_splitting structure + */ +typedef struct theta_splitting +{ + basis_change_matrix_t M; + theta_structure_t B; + +} theta_splitting_t; + +// end of hd_struct +/** + * @} + */ + +/** @defgroup hd_functions Functions for dimension 2 + * @{ + */ + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param n : the number of iteration + * @param E1E2 an elliptic product + * @param in the theta couple point in the elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the addition of two points in (X : Y : Z) coordinates on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param T1 the theta couple jac point in the elliptic product + * @param T2 the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1, P2), (Q1, Q2) + * out = (P1 + Q1, P2 + Q2) + * + **/ +void add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple jac point in on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param n : the number of iteration + * @param in the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief A forgetful function which returns (X : Z) points given a pair of (X : Y : Z) points + * + * @param P Output: the theta_couple_point + * @param xyP : the theta_couple_jac_point + **/ +void couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it does extra isotropy + * checks on the kernel. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it selects a random Montgomery + * model of the codomain. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success, 0 on failure + * + */ +int theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Given a bases B1 on E1 and B2 on E2 copies this to create a kernel + * on E1 x E2 as couple points T1, T2 and T1 - T2 + * + * @param ker Output: a kernel for dim_two_isogenies (T1, T2, T1-T2) + * @param B1 Input basis on E1 + * @param B2 Input basis on E2 + **/ +void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2); + +/** + * @brief Given a couple of points (P1, P2) on a couple of curves (E1, E2) + * this function tests if both points are of order exactly 2^t + * + * @param T: couple point (P1, P2) + * @param E: a couple of curves (E1, E2) + * @param t: an integer + * @returns 0xFFFFFFFF on success, 0 on failure + */ +static int +test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) +{ + int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); + int check_P2 = test_point_order_twof(&T->P2, &E->E2, t); + + return check_P1 & check_P2; +} + +// end of hd_functions +/** + * @} + */ +// end of hd_module +/** + * @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c new file mode 100644 index 0000000000..d980d12183 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c @@ -0,0 +1,143 @@ +#include + +#define FP2_ZERO 0 +#define FP2_ONE 1 +#define FP2_I 2 +#define FP2_MINUS_ONE 3 +#define FP2_MINUS_I 4 + +const int EVEN_INDEX[10][2] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 0}, {1, 2}, {2, 0}, {2, 1}, {3, 0}, {3, 3}}; +const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}; +const fp2_t FP2_CONSTANTS[5] = {{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e} +#elif RADIX == 32 +{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +#else +{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e} +#elif RADIX == 32 +{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +#else +{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff} +#endif +#endif +}}; +const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10] = {{{{FP2_ONE, FP2_I, FP2_ONE, FP2_I}, {FP2_ONE, FP2_MINUS_I, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_MINUS_ONE, FP2_MINUS_I}, {FP2_MINUS_ONE, FP2_I, FP2_MINUS_ONE, FP2_I}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}}; +const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6] = {{{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}, {{{FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.h new file mode 100644 index 0000000000..b3147a42a9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.h @@ -0,0 +1,18 @@ +#ifndef HD_SPLITTING_H +#define HD_SPLITTING_H + +#include +#include + +typedef struct precomp_basis_change_matrix { + uint8_t m[4][4]; +} precomp_basis_change_matrix_t; + +extern const int EVEN_INDEX[10][2]; +extern const int CHI_EVAL[4][4]; +extern const fp2_t FP2_CONSTANTS[5]; +extern const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10]; +extern const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6]; + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.c new file mode 100644 index 0000000000..0743974345 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.c @@ -0,0 +1,338 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Scalar multiplication [x]P + [y]Q where x and y are stored +// inside an ibz_vec_2_t [x, y] and P, Q \in E[2^f] +void +ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + digit_t scalars[2][NWORDS_ORDER]; + ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); + ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); +} + +// Given an ideal, computes the scalars s0, s1 which determine the kernel generator +// of the equivalent isogeny +void +id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lideal) +{ + ibz_t tmp; + ibz_init(&tmp); + + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + // construct the matrix of the dual of alpha on the 2^f-torsion + { + quat_alg_elem_t alpha; + quat_alg_elem_init(&alpha); + + int lideal_generator_ok UNUSED = quat_lideal_generator(&alpha, lideal, &QUATALG_PINFTY); + assert(lideal_generator_ok); + quat_alg_conj(&alpha, &alpha); + + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + quat_change_to_O0_basis(&coeffs, &alpha); + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + } + } + + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&alpha); + } + + // find the kernel of alpha modulo the norm of the ideal + { + const ibz_t *const norm = &lideal->norm; + + ibz_mod(&(*vec)[0], &mat[0][0], norm); + ibz_mod(&(*vec)[1], &mat[1][0], norm); + ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + if (ibz_is_even(&tmp)) { + ibz_mod(&(*vec)[0], &mat[0][1], norm); + ibz_mod(&(*vec)[1], &mat[1][1], norm); + } +#ifndef NDEBUG + ibz_gcd(&tmp, &(*vec)[0], norm); + ibz_gcd(&tmp, &(*vec)[1], &tmp); + assert(!ibz_cmp(&tmp, &ibz_const_one)); +#endif + } + + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&tmp); +} + +// helper function to apply a matrix to a basis of E[2^f] +// works in place +int +matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f) +{ + digit_t scalars[2][NWORDS_ORDER] = { 0 }; + int ret; + + ibz_t tmp, pow_two; + ibz_init(&tmp); + ibz_init(&pow_two); + ibz_pow(&pow_two, &ibz_const_two, f); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // reduction mod 2f + ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); + ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); + ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); + ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][0]); + ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); + + // second basis element S = [c]P + [d]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][1]); + ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); + + // Their difference R - S = [a - c]P + [b - d]Q + ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[0], &tmp); + ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[1], &tmp); + ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); + + ibz_finalize(&tmp); + ibz_finalize(&pow_two); + + return ret; +} + +// helper function to apply some endomorphism of E0 on the precomputed basis of E[2^f] +// works in place +void +endomorphism_application_even_basis(ec_basis_t *bas, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_t content; + ibz_init(&content); + + // decomposing theta on the basis + quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); + assert(ibz_is_odd(&content)); + + ibz_set(&mat[0][0], 0); + ibz_set(&mat[0][1], 0); + ibz_set(&mat[1][0], 0); + ibz_set(&mat[1][1], 0); + + // computing the matrix + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&mat[i][j], &mat[i][j], &content); + } + } + + // and now we apply it + matrix_application_even_basis(bas, E, &mat, f); + + ibz_vec_4_finalize(&coeffs); + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&content); + + ibz_finalize(&tmp); +} + +// compute the ideal whose kernel is generated by vec2[0]*BO[0] + vec2[1]*B0[1] where B0 is the +// canonical basis of E0 +void +id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f) +{ + + // algorithm: apply endomorphisms 1 and j+(1+k)/2 to the kernel point, + // the result should form a basis of the respective torsion subgroup. + // then apply i to the kernel point and decompose over said basis. + // hence we have an equation a*P + b*[j+(1+k)/2]P == [i]P, which will + // easily reveal an endomorphism that kills P. + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + if (f == TORSION_EVEN_POWER) { + ibz_copy(&two_pow, &TORSION_PLUS_2POWER); + } else { + ibz_pow(&two_pow, &ibz_const_two, f); + } + + { + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_copy(&mat[0][0], &(*vec2)[0]); + ibz_copy(&mat[1][0], &(*vec2)[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); + ibz_copy(&mat[0][1], &vec[0]); + ibz_copy(&mat[1][1], &vec[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); + ibz_add(&mat[0][1], &mat[0][1], &vec[0]); + ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + + ibz_mod(&mat[0][1], &mat[0][1], &two_pow); + ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + + ibz_mat_2x2_t inv; + ibz_mat_2x2_init(&inv); + { + int inv_ok UNUSED = ibz_mat_2x2_inv_mod(&inv, &mat, &two_pow); + assert(inv_ok); + } + ibz_mat_2x2_finalize(&mat); + + ibz_mat_2x2_eval(&vec, &ACTION_I, vec2); + ibz_mat_2x2_eval(&vec, &inv, &vec); + + ibz_mat_2x2_finalize(&inv); + } + + // final result: a - i + b*(j+(1+k)/2) + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + ibz_set(&gen.denom, 2); + ibz_add(&gen.coord[0], &vec[0], &vec[0]); + ibz_set(&gen.coord[1], -2); + ibz_add(&gen.coord[2], &vec[1], &vec[1]); + ibz_copy(&gen.coord[3], &vec[1]); + ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_vec_2_finalize(&vec); + + quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + assert(0 == ibz_cmp(&lideal->norm, &two_pow)); + + quat_alg_elem_finalize(&gen); + ibz_finalize(&two_pow); +} + +// finds mat such that: +// (mat*v).B2 = v.B1 +// where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q +// mat encodes the coordinates of the points of B1 in the basis B2 +// specifically requires B1 or B2 to be "full" w.r.t to the 2^n torsion, so that we use tate +// full = 0 assumes B2 is "full" so the easier case. +// if we want to switch the role of B2 and B1, we invert the matrix, e.g. set full = 1 +static void +_change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f, + bool invert) +{ + digit_t x1[NWORDS_ORDER] = { 0 }, x2[NWORDS_ORDER] = { 0 }, x3[NWORDS_ORDER] = { 0 }, x4[NWORDS_ORDER] = { 0 }; + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - f; +#endif + + // Ensure the input basis has points of order 2^f + if (invert) { + assert(test_basis_order_twof(B1, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B1, B2, E, f); + mp_invert_matrix(x1, x2, x3, x4, f, NWORDS_ORDER); + } else { + assert(test_basis_order_twof(B2, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B2, B1, E, f); + } + +#ifndef NDEBUG + { + if (invert) { + ec_point_t test, test2; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->P, E); + assert(ec_is_equal(&test, &test2)); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->Q, E); + assert(ec_is_equal(&test, &test2)); + } else { + ec_point_t test; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->P))); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->Q))); + } + } +#endif + + // Copy the results into the matrix + ibz_copy_digit_array(&((*mat)[0][0]), x1); + ibz_copy_digit_array(&((*mat)[1][0]), x2); + ibz_copy_digit_array(&((*mat)[0][1]), x3); + ibz_copy_digit_array(&((*mat)[1][1]), x4); +} + +void +change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, false); +} + +void +change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.h new file mode 100644 index 0000000000..1b4eaae3c5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.h @@ -0,0 +1,280 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The id2iso algorithms + */ + +#ifndef ID2ISO_H +#define ID2ISO_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @defgroup id2iso_id2iso Ideal to isogeny conversion + * @{ + */ +static const quat_represent_integer_params_t QUAT_represent_integer_params = { + .algebra = &QUATALG_PINFTY, /// The level-specific quaternion algebra + .order = &(EXTREMAL_ORDERS[0]), // The special extremal order O0 + .primality_test_iterations = QUAT_primality_num_iter // precompted bound on the iteration number in primality tests +}; + +/*************************** Functions *****************************/ + +/** @defgroup id2iso_others Other functions needed for id2iso + * @{ + */ + +/** + * @brief Scalar multiplication [x]P + [y]Q where x and y are stored inside an + * ibz_vec_2_t [x, y] and P, Q in E[2^f] + * + * @param res Output: the point R = [x]P + [y]Q + * @param scalar_vec: a vector of ibz type elements (x, y) + * @param f: an integer such that P, Q are in E[2^f] + * @param PQ: an x-only basis x(P), x(Q) and x(P-Q) + * @param curve: the curve E the points P, Q, R are defined on + * + */ +void ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Translating an ideal of norm 2^f dividing p²-1 into the corresponding + * kernel coefficients + * + * @param ker_dlog Output : two coefficients indicating the decomposition of the + * kernel over the canonical basis of E0[2^f] + * @param lideal_input : O0-ideal corresponding to the ideal to be translated of + * norm 2^f + * + */ +void id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *ker_dlog, const quat_left_ideal_t *lideal_input); + +/** + * @brief Applies some 2x2 matrix on a basis of E[2^TORSION_EVEN_POWER] + * + * @param P the basis + * @param E the curve + * @param mat the matrix + * @param f TORSION_EVEN_POWER + * @returns 1 if success, 0 if error + * + * helper function, works in place + * + */ +int matrix_application_even_basis(ec_basis_t *P, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f); + +/** + * @brief Applies some endomorphism of an alternate curve to E[f] + * + * @param P the basis + * @param index_alternate_curve index of the alternate order in the list of precomputed extremal + * orders + * @param E the curve (E is not required to be the alternate curve in question since in the end we + * only apply a matrix) + * @param theta the endomorphism + * @param f TORSION_EVEN_POWER + * + * helper function, works in place + * + */ +void endomorphism_application_even_basis(ec_basis_t *P, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f); + +/** + * @brief Translating a kernel on the curve E0, represented as a vector with + * respect to the precomputed 2^f-torsion basis, into the corresponding O0-ideal + * + * @param lideal Output : the output O0-ideal + * @param f : exponent definining the norm of the ideal to compute + * @param vec2 : length-2 vector giving the 2-power part of the kernel with + * respect to the precomputed 2^f basis + * + */ +void id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B2 = v.B1 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^f] + * @param B2 the target basis for E[2^e] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2 + */ +void change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B1 = [2^e-f]*v.B2 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^e] + * @param B2 the target basis for E[2^f] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2, by + * applying change_of_basis_matrix_tate and inverting the outcome + */ +void change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f); + +/** @} + */ + +/** @defgroup id2iso_arbitrary Arbitrary isogeny evaluation + * @{ + */ +/** + * @brief Function to find elements u, v, d1, d2, beta1, beta2 for the ideal to isogeny + * + * @param u Output: integer + * @param v Output: integer + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param d1 Output: integer + * @param d2 Output: integer + * @param index_alternate_order_1 Output: small integer (index of an alternate order) + * @param index_alternate_order_2 Output: small integer (index of an alternate order) + * @param target : integer, target norm + * @param lideal : O0-ideal defining the search space + * @param Bpoo : quaternion algebra + * @param num_alternate_order number of alternate order we consider + * @returns 1 if the computation succeeds, 0 otherwise + * + * Let us write ti = index_alternate_order_i, + * we look for u,v,beta1,beta2,d1,d2,t1,t2 + * such that u d1 + v d2 = target + * and where di = norm(betai)/norm(Ii), where the ideal Ii is equal to overbar{Ji} * lideal and + * betai is in Ii where Ji is a connecting ideal between the maximal order O0 and O_ti t1,t2 must be + * contained between 0 and num_alternate_order This corresponds to the function SuitableIdeals in + * the spec + */ +int find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order); + +/** + * @brief Computes an arbitrary isogeny of fixed degree starting from E0 + * and evaluates it a list of points of the form (P1,0) or (0,P2). + * + * @param lideal Output : an ideal of norm u + * @param u : integer + * @param small : bit indicating if we the value of u is "small" meaning that we + expect it to be + * around sqrt{p}, in that case we use a length slightly above + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny + (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @param index_alternate_order : index of the special extremal order to be used (in the list of + these orders) + * @returns the length of the chain if the computation succeeded, zero upon + failure + * + * F is an isogeny encoding an isogeny [adjust]*phi : E0 -> Eu of degree u + * note that the codomain of F can be either Eu x Eu' or Eu' x Eu for some curve + Eu' + */ +int fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param u Output: integer + * @param v Output: integer + * @param d1 Output: integer + * @param d2 Output: integer + * @param codomain the codomain of the isogeny corresponding to lideal + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : O0 - ideal in input + * @param Bpoo : the quaternion algebra + * @returns 1 if the computation succeeded, 0 otherwise + * + * Compute the codomain and image on the basis of E0 of the isogeny + * E0 -> codomain corresponding to lideal + * + * There is some integer e >= 0 such that + * 2^e * u, 2^e * v,beta1, beta2, d1, d2 are the output of find_uv + * on input target = 2^TORSION_PLUS_EVEN_POWER and lideal + * + * codomain and basis are computed with the help of a dimension 2 isogeny + * of degree 2^TORSION_PLUS_EVEN_POWER - e using a Kani diagram + * + */ +int dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : ideal in input + * @param codomain + * @returns 1 if the computation succeeds, 0 otherwise + * + * This is a wrapper around the ideal to isogeny clapotis function + */ +int dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.h new file mode 100644 index 0000000000..a0c2c02477 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.h @@ -0,0 +1,303 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for big integers in the reference implementation + */ + +#ifndef INTBIG_H +#define INTBIG_H + +#include +#if defined(MINI_GMP) +#include +#include +#else +#include +#endif +#include +#include + +/** @ingroup quat_quat + * @defgroup ibz_all Signed big integers (gmp-based) + * @{ + */ + +/** @defgroup ibz_t Precise number types + * @{ + */ + +/** @brief Type for signed long integers + * + * @typedef ibz_t + * + * For integers of arbitrary size, used by intbig module, using gmp + */ +typedef mpz_t ibz_t; + +/** @} + */ + +/** @defgroup ibz_c Constants + * @{ + */ + +/** + * Constant zero + */ +extern const ibz_t ibz_const_zero; + +/** + * Constant one + */ +extern const ibz_t ibz_const_one; + +/** + * Constant two + */ +extern const ibz_t ibz_const_two; + +/** + * Constant three + */ +extern const ibz_t ibz_const_three; + +/** @} + */ + +/** @defgroup ibz_finit Constructors and Destructors + * @{ + */ + +void ibz_init(ibz_t *x); +void ibz_finalize(ibz_t *x); + +/** @} + */ + +/** @defgroup ibz_za Basic integer arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b); + +/** @brief diff=a-b + */ +void ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b); + +/** @brief prod=a*b + */ +void ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b); + +/** @brief neg=-a + */ +void ibz_neg(ibz_t *neg, const ibz_t *a); + +/** @brief abs=|a| + */ +void ibz_abs(ibz_t *abs, const ibz_t *a); + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards zero. + */ +void ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b); + +/** @brief Euclidean division of a by 2^exp + * + * Computes a right shift of abs(a) by exp bits, then sets sign(quotient) to sign(a). + * + * Division and rounding is as in ibz_div. + */ +void ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp); + +/** @brief Two adic valuation computation + * + * Computes the position of the first 1 in the binary representation of the integer given in input + * + * When this number is a power of two this gives the two adic valuation of the integer + */ +int ibz_two_adic(ibz_t *pow); + +/** @brief r = a mod b + * + * Assumes valid inputs + * The sign of the divisor is ignored, the result is always non-negative + */ +void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); + +unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); + +/** @brief Test if a = 0 mod b + */ +int ibz_divides(const ibz_t *a, const ibz_t *b); + +/** @brief pow=x^e + * + * Assumes valid inputs, The case 0^0 yields 1. + */ +void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e); + +/** @brief pow=(x^e) mod m + * + * Assumes valid inputs + */ +void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibz_cmp(const ibz_t *a, const ibz_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibz_is_zero(const ibz_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibz_is_one(const ibz_t *x); + +/** @brief Compare x to y + * + * @returns 0 if x=y, positive if x>y, negative if x= 0 and target must hold sufficient elements to hold ibz + * + * @param target Target digit_t array + * @param ibz ibz source ibz_t element + */ +void ibz_to_digits(digit_t *target, const ibz_t *ibz); +#define ibz_to_digit_array(T, I) \ + do { \ + memset((T), 0, sizeof(T)); \ + ibz_to_digits((T), (I)); \ + } while (0) + +/** @brief get int32_t equal to the lowest bits of i + * + * Should not be used to get the value of i if its bitsize is close to 32 bit + * It can however be used on any i to get an int32_t of the same parity as i (and same value modulo + * 4) + * + * @param i Input integer + */ +int32_t ibz_get(const ibz_t *i); + +/** @brief generate random value in [a, b] + * assumed that a >= 0 and b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b); + +/** @brief generate random value in [-m, m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m); + +/** @brief Bitsize of a. + * + * @returns Bitsize of a. + * + */ +int ibz_bitsize(const ibz_t *a); + +/** @brief Size of a in given base. + * + * @returns Size of a in given base. + * + */ +int ibz_size_in_base(const ibz_t *a, int base); + +/** @} + */ + +/** @defgroup ibz_n Number theory functions + * @{ + */ + +/** + * @brief Greatest common divisor + * + * @param gcd Output: Set to the gcd of a and b + * @param a + * @param b + */ +void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b); + +/** + * @brief Modular inverse + * + * @param inv Output: Set to the integer in [0,mod[ such that a*inv = 1 mod (mod) if it exists + * @param a + * @param mod + * @returns 1 if inverse exists and was computed, 0 otherwise + */ +int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod); + +/** + * @brief Floor of Integer square root + * + * @param sqrt Output: Set to the floor of an integer square root + * @param a number of which a floor of an integer square root is searched + */ +void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/isog.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/isog.h new file mode 100644 index 0000000000..b251ca3cdc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/isog.h @@ -0,0 +1,28 @@ +#ifndef _ISOG_H_ +#define _ISOG_H_ +#include +#include + +/* KPS structure for isogenies of degree 2 or 4 */ +typedef struct +{ + ec_point_t K; +} ec_kps2_t; +typedef struct +{ + ec_point_t K[3]; +} ec_kps4_t; + +void xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P); // degree-2 isogeny construction +void xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24); + +void xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P); // degree-4 isogeny construction +void xisog_4_singular(ec_kps4_t *kps, ec_point_t *B24, const ec_point_t P, ec_point_t A24); + +void xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps); +void xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps); + +void xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps); +void xeval_4_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_point_t P, const ec_kps4_t *kps); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/isog_chains.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/isog_chains.c new file mode 100644 index 0000000000..abc9808057 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/isog_chains.c @@ -0,0 +1,241 @@ +#include "isog.h" +#include + +// since we use degree 4 isogeny steps, we need to handle the odd case with care +static uint32_t +ec_eval_even_strategy(ec_curve_t *curve, + ec_point_t *points, + unsigned len_points, + const ec_point_t *kernel, + const int isog_len) +{ + ec_curve_normalize_A24(curve); + ec_point_t A24; + copy_point(&A24, &curve->A24); + + int space = 1; + for (int i = 1; i < isog_len; i *= 2) + ++space; + + // Stack of remaining kernel points and their associated orders + ec_point_t splits[space]; + uint16_t todo[space]; + splits[0] = *kernel; + todo[0] = isog_len; + + int current = 0; // Pointer to current top of stack + + // Chain of 4-isogenies + for (int j = 0; j < isog_len / 2; ++j) { + assert(current >= 0); + assert(todo[current] >= 1); + // Get the next point of order 4 + while (todo[current] != 2) { + assert(todo[current] >= 3); + // A new split will be added + ++current; + assert(current < space); + // We set the seed of the new split to be computed and saved + copy_point(&splits[current], &splits[current - 1]); + // if we copied from the very first element, then we perform one additional doubling + unsigned num_dbls = todo[current - 1] / 4 * 2 + todo[current - 1] % 2; + todo[current] = todo[current - 1] - num_dbls; + while (num_dbls--) + xDBL_A24(&splits[current], &splits[current], &A24, false); + } + + if (j == 0) { + assert(fp2_is_one(&A24.z)); + if (!ec_is_four_torsion(&splits[current], curve)) + return -1; + + ec_point_t T; + xDBL_A24(&T, &splits[current], &A24, false); + if (fp2_is_zero(&T.x)) + return -1; // special isogenies not allowed + } else { + assert(todo[current] == 2); +#ifndef NDEBUG + if (fp2_is_zero(&splits[current].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + + ec_point_t test; + xDBL_A24(&test, &splits[current], &A24, false); + if (fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly zero before doubling"); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + } + + // Evaluate 4-isogeny + ec_kps4_t kps4; + xisog_4(&kps4, &A24, splits[current]); + xeval_4(splits, splits, current, &kps4); + for (int i = 0; i < current; ++i) + todo[i] -= 2; + xeval_4(points, points, len_points, &kps4); + + --current; + } + assert(isog_len % 2 ? !current : current == -1); + + // Final 2-isogeny + if (isog_len % 2) { +#ifndef NDEBUG + if (fp2_is_zero(&splits[0].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + ec_point_t test; + copy_point(&test, &splits[0]); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + + // We need to check the order of this point in case there were no 4-isogenies + if (isog_len == 1 && !ec_is_two_torsion(&splits[0], curve)) + return -1; + if (fp2_is_zero(&splits[0].x)) { + // special isogenies not allowed + // this case can only happen if isog_len == 1; otherwise the + // previous 4-isogenies we computed ensure that $T=(0:1)$ is put + // as the kernel of the dual isogeny + return -1; + } + + ec_kps2_t kps2; + xisog_2(&kps2, &A24, splits[0]); + xeval_2(points, points, len_points, &kps2); + } + + // Output curve in the form (A:C) + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + + return 0; +} + +uint32_t +ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points) +{ + copy_curve(image, &phi->curve); + return ec_eval_even_strategy(image, points, len_points, &phi->kernel, phi->length); +} + +// naive implementation +uint32_t +ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special) // do we allow special isogenies? +{ + + ec_point_t A24; + AC_to_A24(&A24, curve); + + ec_kps2_t kps; + ec_point_t small_K, big_K; + copy_point(&big_K, kernel); + + for (int i = 0; i < len; i++) { + copy_point(&small_K, &big_K); + // small_K = big_K; + for (int j = 0; j < len - i - 1; j++) { + xDBL_A24(&small_K, &small_K, &A24, false); + } + // Check the order of the point before the first isogeny step + if (i == 0 && !ec_is_two_torsion(&small_K, curve)) + return (uint32_t)-1; + // Perform isogeny step + if (fp2_is_zero(&small_K.x)) { + if (special) { + ec_point_t B24; + xisog_2_singular(&kps, &B24, A24); + xeval_2_singular(&big_K, &big_K, 1, &kps); + xeval_2_singular(points, points, len_points, &kps); + copy_point(&A24, &B24); + } else { + return (uint32_t)-1; + } + } else { + xisog_2(&kps, &A24, small_K); + xeval_2(&big_K, &big_K, 1, &kps); + xeval_2(points, points, len_points, &kps); + } + } + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + return 0; +} + +uint32_t +ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to) +{ + fp2_t t0, t1, t2, t3, t4; + + fp2_mul(&t0, &from->A, &from->C); + fp2_mul(&t1, &to->A, &to->C); + + fp2_mul(&t2, &t1, &to->C); // toA*toC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*toA*toC^2 + fp2_sqr(&t3, &to->A); + fp2_mul(&t3, &t3, &to->A); // toA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->Nx, &t3, &t2); // 2*toA^3-9*toA*toC^2 + fp2_mul(&t2, &t0, &from->A); // fromA^2*fromC + fp2_sqr(&t3, &from->C); + fp2_mul(&t3, &t3, &from->C); // fromC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*fromC^3 + fp2_sub(&t3, &t3, &t2); // 3*fromC^3-fromA^2*fromC + fp2_mul(&isom->Nx, &isom->Nx, &t3); // lambda_x = (2*toA^3-9*toA*toC^2)*(3*fromC^3-fromA^2*fromC) + + fp2_mul(&t2, &t0, &from->C); // fromA*fromC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*fromA*fromC^2 + fp2_sqr(&t3, &from->A); + fp2_mul(&t3, &t3, &from->A); // fromA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->D, &t3, &t2); // 2*fromA^3-9*fromA*fromC^2 + fp2_mul(&t2, &t1, &to->A); // toA^2*toC + fp2_sqr(&t3, &to->C); + fp2_mul(&t3, &t3, &to->C); // toC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*toC^3 + fp2_sub(&t3, &t3, &t2); // 3*toC^3-toA^2*toC + fp2_mul(&isom->D, &isom->D, &t3); // lambda_z = (2*fromA^3-9*fromA*fromC^2)*(3*toC^3-toA^2*toC) + + // Mont -> SW -> SW -> Mont + fp2_mul(&t0, &to->C, &from->A); + fp2_mul(&t0, &t0, &isom->Nx); // lambda_x*toC*fromA + fp2_mul(&t1, &from->C, &to->A); + fp2_mul(&t1, &t1, &isom->D); // lambda_z*fromC*toA + fp2_sub(&isom->Nz, &t0, &t1); // lambda_x*toC*fromA - lambda_z*fromC*toA + fp2_mul(&t0, &from->C, &to->C); + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // 3*fromC*toC + fp2_mul(&isom->D, &isom->D, &t0); // 3*lambda_z*fromC*toC + fp2_mul(&isom->Nx, &isom->Nx, &t0); // 3*lambda_x*fromC*toC + + return (fp2_is_zero(&isom->Nx) | fp2_is_zero(&isom->D)); +} + +void +ec_iso_eval(ec_point_t *P, ec_isom_t *isom) +{ + fp2_t tmp; + fp2_mul(&P->x, &P->x, &isom->Nx); + fp2_mul(&tmp, &P->z, &isom->Nz); + fp2_add(&P->x, &P->x, &tmp); + fp2_mul(&P->z, &P->z, &isom->D); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/keygen.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/keygen.c new file mode 100644 index 0000000000..c1c206c99d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/keygen.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +void +secret_key_init(secret_key_t *sk) +{ + quat_left_ideal_init(&(sk->secret_ideal)); + ibz_mat_2x2_init(&(sk->mat_BAcan_to_BA0_two)); + ec_curve_init(&sk->curve); +} + +void +secret_key_finalize(secret_key_t *sk) +{ + quat_left_ideal_finalize(&(sk->secret_ideal)); + ibz_mat_2x2_finalize(&(sk->mat_BAcan_to_BA0_two)); +} + +int +protocols_keygen(public_key_t *pk, secret_key_t *sk) +{ + int found = 0; + ec_basis_t B_0_two; + + // iterating until a solution has been found + while (!found) { + + found = quat_sampling_random_ideal_O0_given_norm( + &sk->secret_ideal, &SEC_DEGREE, 1, &QUAT_represent_integer_params, NULL); + + // replacing the secret key ideal by a shorter equivalent one for efficiency + found = found && quat_lideal_prime_norm_reduced_equivalent( + &sk->secret_ideal, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + + // ideal to isogeny clapotis + + found = found && dim2id2iso_arbitrary_isogeny_evaluation(&B_0_two, &sk->curve, &sk->secret_ideal); + } + + // Assert the isogeny was found and images have the correct order + assert(test_basis_order_twof(&B_0_two, &sk->curve, TORSION_EVEN_POWER)); + + // Compute a deterministic basis with a hint to speed up verification + pk->hint_pk = ec_curve_to_basis_2f_to_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER); + + // Assert the deterministic basis we computed has the correct order + assert(test_basis_order_twof(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the 2x2 matrix basis change from the canonical basis to the evaluation of our secret + // isogeny + change_of_basis_matrix_tate( + &sk->mat_BAcan_to_BA0_two, &sk->canonical_basis, &B_0_two, &sk->curve, TORSION_EVEN_POWER); + + // Set the public key from the codomain curve + copy_curve(&pk->curve, &sk->curve); + pk->curve.is_A24_computed_and_normalized = false; // We don't send any precomputation + + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lvlx.cmake b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lvlx.cmake new file mode 100644 index 0000000000..3ab2d2dc90 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lvlx.cmake @@ -0,0 +1,12 @@ +set(SOURCE_FILES_GF_${SVARIANT_UPPER}_BROADWELL + ${SOURCE_FILES_GF_SPECIFIC} + fp.c + ${LVLX_DIR}/fp2.c +) + +add_library(${LIB_GF_${SVARIANT_UPPER}} STATIC ${SOURCE_FILES_GF_${SVARIANT_UPPER}_BROADWELL}) +target_include_directories(${LIB_GF_${SVARIANT_UPPER}} PRIVATE ${INC_COMMON} ${PROJECT_SOURCE_DIR}/src/precomp/ref/${SVARIANT_LOWER}/include ${INC_GF} ${INC_GF_${SVARIANT_UPPER}} include ${INC_PUBLIC}) +target_compile_options(${LIB_GF_${SVARIANT_UPPER}} PRIVATE ${C_OPT_FLAGS}) +target_compile_definitions(${LIB_GF_${SVARIANT_UPPER}} PUBLIC SQISIGN_VARIANT=${SVARIANT_LOWER}) + +add_subdirectory(test) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.c new file mode 100644 index 0000000000..4956beda50 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include + +void +sqisign_secure_free(void *mem, size_t size) +{ + if (mem) { + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); + free(mem); + } +} +void +sqisign_secure_clear(void *mem, size_t size) +{ + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.h new file mode 100644 index 0000000000..ab8f6c6481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef MEM_H +#define MEM_H +#include +#include + +/** + * Clears and frees allocated memory. + * + * @param[out] mem Memory to be cleared and freed. + * @param size Size of memory to be cleared and freed. + */ +void sqisign_secure_free(void *mem, size_t size); + +/** + * Clears memory. + * + * @param[out] mem Memory to be cleared. + * @param size Size of memory to be cleared. + */ +void sqisign_secure_clear(void *mem, size_t size); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c new file mode 100644 index 0000000000..396d505aec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c @@ -0,0 +1,73 @@ +#include +#include +#if defined(MINI_GMP) +#include "mini-gmp.h" +#else +// This configuration is used only for testing +#include +#endif +#include + +// Exported for testing +int +mini_mpz_legendre(const mpz_t a, const mpz_t p) +{ + int res = 0; + mpz_t e; + mpz_init_set(e, p); + mpz_sub_ui(e, e, 1); + mpz_fdiv_q_2exp(e, e, 1); + mpz_powm(e, a, e, p); + + if (mpz_cmp_ui(e, 1) <= 0) { + res = mpz_get_si(e); + } else { + res = -1; + } + mpz_clear(e); + return res; +} + +#if defined(MINI_GMP) +int +mpz_legendre(const mpz_t a, const mpz_t p) +{ + return mini_mpz_legendre(a, p); +} +#endif + +// Exported for testing +double +mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + double ret; + int tmp_exp; + mpz_t tmp; + + // Handle the case where op is 0 + if (mpz_cmp_ui(op, 0) == 0) { + *exp = 0; + return 0.0; + } + + *exp = mpz_sizeinbase(op, 2); + + mpz_init_set(tmp, op); + + if (*exp > DBL_MAX_EXP) { + mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); + } + + ret = frexp(mpz_get_d(tmp), &tmp_exp); + mpz_clear(tmp); + + return ret; +} + +#if defined(MINI_GMP) +double +mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + return mini_mpz_get_d_2exp(exp, op); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.h new file mode 100644 index 0000000000..0113cfdfe6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.h @@ -0,0 +1,19 @@ +#ifndef MINI_GMP_EXTRA_H +#define MINI_GMP_EXTRA_H + +#if defined MINI_GMP +#include "mini-gmp.h" + +typedef long mp_exp_t; + +int mpz_legendre(const mpz_t a, const mpz_t p); +double mpz_get_d_2exp(signed long int *exp, const mpz_t op); +#else +// This configuration is used only for testing +#include +#endif + +int mini_mpz_legendre(const mpz_t a, const mpz_t p); +double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c new file mode 100644 index 0000000000..3830ab2031 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c @@ -0,0 +1,4671 @@ +/* Note: The code from mini-gmp is modifed from the original by + commenting out the definition of GMP_LIMB_BITS */ + +/* + mini-gmp, a minimalistic implementation of a GNU GMP subset. + + Contributed to the GNU project by Niels Möller + Additional functionalities and improvements by Marco Bodrato. + +Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* NOTE: All functions in this file which are not declared in + mini-gmp.h are internal, and are not intended to be compatible + with GMP or with future versions of mini-gmp. */ + +/* Much of the material copied from GMP files, including: gmp-impl.h, + longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, + mpn/generic/lshift.c, mpn/generic/mul_1.c, + mpn/generic/mul_basecase.c, mpn/generic/rshift.c, + mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, + mpn/generic/submul_1.c. */ + +#include +#include +#include +#include +#include +#include + +#include "mini-gmp.h" + +#if !defined(MINI_GMP_DONT_USE_FLOAT_H) +#include +#endif + + +/* Macros */ +/* Removed from here as it is passed as a compiler command-line definition */ +/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ + +#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) +#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) + +#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) +#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) + +#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) +#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) + +#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) +#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) + +#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) + +#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 +#define GMP_DBL_MANT_BITS DBL_MANT_DIG +#else +#define GMP_DBL_MANT_BITS (53) +#endif + +/* Return non-zero if xp,xsize and yp,ysize overlap. + If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no + overlap. If both these are false, there's an overlap. */ +#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ + ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) + +#define gmp_assert_nocarry(x) do { \ + mp_limb_t __cy = (x); \ + assert (__cy == 0); \ + (void) (__cy); \ + } while (0) + +#define gmp_clz(count, x) do { \ + mp_limb_t __clz_x = (x); \ + unsigned __clz_c = 0; \ + int LOCAL_SHIFT_BITS = 8; \ + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ + for (; \ + (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ + __clz_c += 8) \ + { __clz_x <<= LOCAL_SHIFT_BITS; } \ + for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ + __clz_x <<= 1; \ + (count) = __clz_c; \ + } while (0) + +#define gmp_ctz(count, x) do { \ + mp_limb_t __ctz_x = (x); \ + unsigned __ctz_c = 0; \ + gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ + (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ + } while (0) + +#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) + (bl); \ + (sh) = (ah) + (bh) + (__x < (al)); \ + (sl) = __x; \ + } while (0) + +#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) - (bl); \ + (sh) = (ah) - (bh) - ((al) < (bl)); \ + (sl) = __x; \ + } while (0) + +#define gmp_umul_ppmm(w1, w0, u, v) \ + do { \ + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ + if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned int __ww = (unsigned int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned long int __ww = (unsigned long int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else { \ + mp_limb_t __x0, __x1, __x2, __x3; \ + unsigned __ul, __vl, __uh, __vh; \ + mp_limb_t __u = (u), __v = (v); \ + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ + \ + __ul = __u & GMP_LLIMB_MASK; \ + __uh = __u >> (GMP_LIMB_BITS / 2); \ + __vl = __v & GMP_LLIMB_MASK; \ + __vh = __v >> (GMP_LIMB_BITS / 2); \ + \ + __x0 = (mp_limb_t) __ul * __vl; \ + __x1 = (mp_limb_t) __ul * __vh; \ + __x2 = (mp_limb_t) __uh * __vl; \ + __x3 = (mp_limb_t) __uh * __vh; \ + \ + __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ + (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ + } \ + } while (0) + +/* If mp_limb_t is of size smaller than int, plain u*v implies + automatic promotion to *signed* int, and then multiply may overflow + and cause undefined behavior. Explicitly cast to unsigned int for + that case. */ +#define gmp_umullo_limb(u, v) \ + ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) + +#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ + do { \ + mp_limb_t _qh, _ql, _r, _mask; \ + gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ + gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ + _r = (nl) - gmp_umullo_limb (_qh, (d)); \ + _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ + _qh += _mask; \ + _r += _mask & (d); \ + if (_r >= (d)) \ + { \ + _r -= (d); \ + _qh++; \ + } \ + \ + (r) = _r; \ + (q) = _qh; \ + } while (0) + +#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ + do { \ + mp_limb_t _q0, _t1, _t0, _mask; \ + gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ + gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ + \ + /* Compute the two most significant limbs of n - q'd */ \ + (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ + gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ + (q)++; \ + \ + /* Conditionally adjust q and the remainders */ \ + _mask = - (mp_limb_t) ((r1) >= _q0); \ + (q) += _mask; \ + gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ + if ((r1) >= (d1)) \ + { \ + if ((r1) > (d1) || (r0) >= (d0)) \ + { \ + (q)++; \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ + } \ + } \ + } while (0) + +/* Swap macros. */ +#define MP_LIMB_T_SWAP(x, y) \ + do { \ + mp_limb_t __mp_limb_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_limb_t_swap__tmp; \ + } while (0) +#define MP_SIZE_T_SWAP(x, y) \ + do { \ + mp_size_t __mp_size_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_size_t_swap__tmp; \ + } while (0) +#define MP_BITCNT_T_SWAP(x,y) \ + do { \ + mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_bitcnt_t_swap__tmp; \ + } while (0) +#define MP_PTR_SWAP(x, y) \ + do { \ + mp_ptr __mp_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_ptr_swap__tmp; \ + } while (0) +#define MP_SRCPTR_SWAP(x, y) \ + do { \ + mp_srcptr __mp_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_srcptr_swap__tmp; \ + } while (0) + +#define MPN_PTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_PTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) +#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_SRCPTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) + +#define MPZ_PTR_SWAP(x, y) \ + do { \ + mpz_ptr __mpz_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_ptr_swap__tmp; \ + } while (0) +#define MPZ_SRCPTR_SWAP(x, y) \ + do { \ + mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_srcptr_swap__tmp; \ + } while (0) + +const int mp_bits_per_limb = GMP_LIMB_BITS; + + +/* Memory allocation and other helper functions. */ +static void +gmp_die (const char *msg) +{ + fprintf (stderr, "%s\n", msg); + abort(); +} + +static void * +gmp_default_alloc (size_t size) +{ + void *p; + + assert (size > 0); + + p = malloc (size); + if (!p) + gmp_die("gmp_default_alloc: Virtual memory exhausted."); + + return p; +} + +static void * +gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) +{ + void * p; + + p = realloc (old, new_size); + + if (!p) + gmp_die("gmp_default_realloc: Virtual memory exhausted."); + + return p; +} + +static void +gmp_default_free (void *p, size_t unused_size) +{ + free (p); +} + +static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; +static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; +static void (*gmp_free_func) (void *, size_t) = gmp_default_free; + +void +mp_get_memory_functions (void *(**alloc_func) (size_t), + void *(**realloc_func) (void *, size_t, size_t), + void (**free_func) (void *, size_t)) +{ + if (alloc_func) + *alloc_func = gmp_allocate_func; + + if (realloc_func) + *realloc_func = gmp_reallocate_func; + + if (free_func) + *free_func = gmp_free_func; +} + +void +mp_set_memory_functions (void *(*alloc_func) (size_t), + void *(*realloc_func) (void *, size_t, size_t), + void (*free_func) (void *, size_t)) +{ + if (!alloc_func) + alloc_func = gmp_default_alloc; + if (!realloc_func) + realloc_func = gmp_default_realloc; + if (!free_func) + free_func = gmp_default_free; + + gmp_allocate_func = alloc_func; + gmp_reallocate_func = realloc_func; + gmp_free_func = free_func; +} + +#define gmp_alloc(size) ((*gmp_allocate_func)((size))) +#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) +#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) + +static mp_ptr +gmp_alloc_limbs (mp_size_t size) +{ + return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); +} + +static mp_ptr +gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) +{ + assert (size > 0); + return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); +} + +static void +gmp_free_limbs (mp_ptr old, mp_size_t size) +{ + gmp_free (old, size * sizeof (mp_limb_t)); +} + + +/* MPN interface */ + +void +mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + mp_size_t i; + for (i = 0; i < n; i++) + d[i] = s[i]; +} + +void +mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + while (--n >= 0) + d[n] = s[n]; +} + +int +mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + while (--n >= 0) + { + if (ap[n] != bp[n]) + return ap[n] > bp[n] ? 1 : -1; + } + return 0; +} + +static int +mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + if (an != bn) + return an < bn ? -1 : 1; + else + return mpn_cmp (ap, bp, an); +} + +static mp_size_t +mpn_normalized_size (mp_srcptr xp, mp_size_t n) +{ + while (n > 0 && xp[n-1] == 0) + --n; + return n; +} + +int +mpn_zero_p(mp_srcptr rp, mp_size_t n) +{ + return mpn_normalized_size (rp, n) == 0; +} + +void +mpn_zero (mp_ptr rp, mp_size_t n) +{ + while (--n >= 0) + rp[n] = 0; +} + +mp_limb_t +mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + i = 0; + do + { + mp_limb_t r = ap[i] + b; + /* Carry out */ + b = (r < b); + rp[i] = r; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b, r; + a = ap[i]; b = bp[i]; + r = a + cy; + cy = (r < cy); + r += b; + cy += (r < b); + rp[i] = r; + } + return cy; +} + +mp_limb_t +mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_add_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + + i = 0; + do + { + mp_limb_t a = ap[i]; + /* Carry out */ + mp_limb_t cy = a < b; + rp[i] = a - b; + b = cy; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b; + a = ap[i]; b = bp[i]; + b += cy; + cy = (b < cy); + cy += (a < b); + rp[i] = a - b; + } + return cy; +} + +mp_limb_t +mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_sub_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl + lpl; + cl += lpl < rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl - lpl; + cl += lpl > rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn >= 1); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); + + /* We first multiply by the low order limb. This result can be + stored, not added, to rp. We also avoid a loop for zeroing this + way. */ + + rp[un] = mpn_mul_1 (rp, up, un, vp[0]); + + /* Now accumulate the product of up[] and the next higher limb from + vp[]. */ + + while (--vn >= 1) + { + rp += 1, vp += 1; + rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); + } + return rp[un]; +} + +void +mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mpn_mul (rp, ap, n, bp, n); +} + +void +mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) +{ + mpn_mul (rp, ap, n, ap, n); +} + +mp_limb_t +mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + up += n; + rp += n; + + tnc = GMP_LIMB_BITS - cnt; + low_limb = *--up; + retval = low_limb >> tnc; + high_limb = (low_limb << cnt); + + while (--n != 0) + { + low_limb = *--up; + *--rp = high_limb | (low_limb >> tnc); + high_limb = (low_limb << cnt); + } + *--rp = high_limb; + + return retval; +} + +mp_limb_t +mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + tnc = GMP_LIMB_BITS - cnt; + high_limb = *up++; + retval = (high_limb << tnc); + low_limb = high_limb >> cnt; + + while (--n != 0) + { + high_limb = *up++; + *rp++ = low_limb | (high_limb << tnc); + low_limb = high_limb >> cnt; + } + *rp = low_limb; + + return retval; +} + +static mp_bitcnt_t +mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, + mp_limb_t ux) +{ + unsigned cnt; + + assert (ux == 0 || ux == GMP_LIMB_MAX); + assert (0 <= i && i <= un ); + + while (limb == 0) + { + i++; + if (i == un) + return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); + limb = ux ^ up[i]; + } + gmp_ctz (cnt, limb); + return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; +} + +mp_bitcnt_t +mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, 0); +} + +mp_bitcnt_t +mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, GMP_LIMB_MAX); +} + +void +mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (--n >= 0) + *rp++ = ~ *up++; +} + +mp_limb_t +mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (*up == 0) + { + *rp = 0; + if (!--n) + return 0; + ++up; ++rp; + } + *rp = - *up; + mpn_com (++rp, ++up, --n); + return 1; +} + + +/* MPN division interface. */ + +/* The 3/2 inverse is defined as + + m = floor( (B^3-1) / (B u1 + u0)) - B +*/ +mp_limb_t +mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) +{ + mp_limb_t r, m; + + { + mp_limb_t p, ql; + unsigned ul, uh, qh; + + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); + /* For notation, let b denote the half-limb base, so that B = b^2. + Split u1 = b uh + ul. */ + ul = u1 & GMP_LLIMB_MASK; + uh = u1 >> (GMP_LIMB_BITS / 2); + + /* Approximation of the high half of quotient. Differs from the 2/1 + inverse of the half limb uh, since we have already subtracted + u0. */ + qh = (u1 ^ GMP_LIMB_MAX) / uh; + + /* Adjust to get a half-limb 3/2 inverse, i.e., we want + + qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u + = floor( (b (~u) + b-1) / u), + + and the remainder + + r = b (~u) + b-1 - qh (b uh + ul) + = b (~u - qh uh) + b-1 - qh ul + + Subtraction of qh ul may underflow, which implies adjustments. + But by normalization, 2 u >= B > qh ul, so we need to adjust by + at most 2. + */ + + r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; + + p = (mp_limb_t) qh * ul; + /* Adjustment steps taken from udiv_qrnnd_c */ + if (r < p) + { + qh--; + r += u1; + if (r >= u1) /* i.e. we didn't get carry when adding to r */ + if (r < p) + { + qh--; + r += u1; + } + } + r -= p; + + /* Low half of the quotient is + + ql = floor ( (b r + b-1) / u1). + + This is a 3/2 division (on half-limbs), for which qh is a + suitable inverse. */ + + p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; + /* Unlike full-limb 3/2, we can add 1 without overflow. For this to + work, it is essential that ql is a full mp_limb_t. */ + ql = (p >> (GMP_LIMB_BITS / 2)) + 1; + + /* By the 3/2 trick, we don't need the high half limb. */ + r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; + + if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) + { + ql--; + r += u1; + } + m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; + if (r >= u1) + { + m++; + r -= u1; + } + } + + /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a + 3/2 inverse. */ + if (u0 > 0) + { + mp_limb_t th, tl; + r = ~r; + r += u0; + if (r < u0) + { + m--; + if (r >= u1) + { + m--; + r -= u1; + } + r -= u1; + } + gmp_umul_ppmm (th, tl, u0, m); + r += th; + if (r < th) + { + m--; + m -= ((r > u1) | ((r == u1) & (tl > u0))); + } + } + + return m; +} + +struct gmp_div_inverse +{ + /* Normalization shift count. */ + unsigned shift; + /* Normalized divisor (d0 unused for mpn_div_qr_1) */ + mp_limb_t d1, d0; + /* Inverse, for 2/1 or 3/2. */ + mp_limb_t di; +}; + +static void +mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) +{ + unsigned shift; + + assert (d > 0); + gmp_clz (shift, d); + inv->shift = shift; + inv->d1 = d << shift; + inv->di = mpn_invert_limb (inv->d1); +} + +static void +mpn_div_qr_2_invert (struct gmp_div_inverse *inv, + mp_limb_t d1, mp_limb_t d0) +{ + unsigned shift; + + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 <<= shift; + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); +} + +static void +mpn_div_qr_invert (struct gmp_div_inverse *inv, + mp_srcptr dp, mp_size_t dn) +{ + assert (dn > 0); + + if (dn == 1) + mpn_div_qr_1_invert (inv, dp[0]); + else if (dn == 2) + mpn_div_qr_2_invert (inv, dp[1], dp[0]); + else + { + unsigned shift; + mp_limb_t d1, d0; + + d1 = dp[dn-1]; + d0 = dp[dn-2]; + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); + } +} + +/* Not matching current public gmp interface, rather corresponding to + the sbpi1_div_* functions. */ +static mp_limb_t +mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + mp_limb_t d, di; + mp_limb_t r; + mp_ptr tp = NULL; + mp_size_t tn = 0; + + if (inv->shift > 0) + { + /* Shift, reusing qp area if possible. In-place shift if qp == np. */ + tp = qp; + if (!tp) + { + tn = nn; + tp = gmp_alloc_limbs (tn); + } + r = mpn_lshift (tp, np, nn, inv->shift); + np = tp; + } + else + r = 0; + + d = inv->d1; + di = inv->di; + while (--nn >= 0) + { + mp_limb_t q; + + gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); + if (qp) + qp[nn] = q; + } + if (tn) + gmp_free_limbs (tp, tn); + + return r >> inv->shift; +} + +static void +mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + unsigned shift; + mp_size_t i; + mp_limb_t d1, d0, di, r1, r0; + + assert (nn >= 2); + shift = inv->shift; + d1 = inv->d1; + d0 = inv->d0; + di = inv->di; + + if (shift > 0) + r1 = mpn_lshift (np, np, nn, shift); + else + r1 = 0; + + r0 = np[nn - 1]; + + i = nn - 2; + do + { + mp_limb_t n0, q; + n0 = np[i]; + gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + if (shift > 0) + { + assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); + r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); + r1 >>= shift; + } + + np[1] = r1; + np[0] = r0; +} + +static void +mpn_div_qr_pi1 (mp_ptr qp, + mp_ptr np, mp_size_t nn, mp_limb_t n1, + mp_srcptr dp, mp_size_t dn, + mp_limb_t dinv) +{ + mp_size_t i; + + mp_limb_t d1, d0; + mp_limb_t cy, cy1; + mp_limb_t q; + + assert (dn > 2); + assert (nn >= dn); + + d1 = dp[dn - 1]; + d0 = dp[dn - 2]; + + assert ((d1 & GMP_LIMB_HIGHBIT) != 0); + /* Iteration variable is the index of the q limb. + * + * We divide + * by + */ + + i = nn - dn; + do + { + mp_limb_t n0 = np[dn-1+i]; + + if (n1 == d1 && n0 == d0) + { + q = GMP_LIMB_MAX; + mpn_submul_1 (np+i, dp, dn, q); + n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ + } + else + { + gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); + + cy = mpn_submul_1 (np + i, dp, dn-2, q); + + cy1 = n0 < cy; + n0 = n0 - cy; + cy = n1 < cy1; + n1 = n1 - cy1; + np[dn-2+i] = n0; + + if (cy != 0) + { + n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); + q--; + } + } + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + np[dn - 1] = n1; +} + +static void +mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + mp_srcptr dp, mp_size_t dn, + const struct gmp_div_inverse *inv) +{ + assert (dn > 0); + assert (nn >= dn); + + if (dn == 1) + np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); + else if (dn == 2) + mpn_div_qr_2_preinv (qp, np, nn, inv); + else + { + mp_limb_t nh; + unsigned shift; + + assert (inv->d1 == dp[dn-1]); + assert (inv->d0 == dp[dn-2]); + assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); + + shift = inv->shift; + if (shift > 0) + nh = mpn_lshift (np, np, nn, shift); + else + nh = 0; + + mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); + + if (shift > 0) + gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); + } +} + +static void +mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) +{ + struct gmp_div_inverse inv; + mp_ptr tp = NULL; + + assert (dn > 0); + assert (nn >= dn); + + mpn_div_qr_invert (&inv, dp, dn); + if (dn > 2 && inv.shift > 0) + { + tp = gmp_alloc_limbs (dn); + gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); + dp = tp; + } + mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); + if (tp) + gmp_free_limbs (tp, dn); +} + + +/* MPN base conversion. */ +static unsigned +mpn_base_power_of_two_p (unsigned b) +{ + switch (b) + { + case 2: return 1; + case 4: return 2; + case 8: return 3; + case 16: return 4; + case 32: return 5; + case 64: return 6; + case 128: return 7; + case 256: return 8; + default: return 0; + } +} + +struct mpn_base_info +{ + /* bb is the largest power of the base which fits in one limb, and + exp is the corresponding exponent. */ + unsigned exp; + mp_limb_t bb; +}; + +static void +mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) +{ + mp_limb_t m; + mp_limb_t p; + unsigned exp; + + m = GMP_LIMB_MAX / b; + for (exp = 1, p = b; p <= m; exp++) + p *= b; + + info->exp = exp; + info->bb = p; +} + +static mp_bitcnt_t +mpn_limb_size_in_base_2 (mp_limb_t u) +{ + unsigned shift; + + assert (u > 0); + gmp_clz (shift, u); + return GMP_LIMB_BITS - shift; +} + +static size_t +mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) +{ + unsigned char mask; + size_t sn, j; + mp_size_t i; + unsigned shift; + + sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) + + bits - 1) / bits; + + mask = (1U << bits) - 1; + + for (i = 0, j = sn, shift = 0; j-- > 0;) + { + unsigned char digit = up[i] >> shift; + + shift += bits; + + if (shift >= GMP_LIMB_BITS && ++i < un) + { + shift -= GMP_LIMB_BITS; + digit |= up[i] << (bits - shift); + } + sp[j] = digit & mask; + } + return sn; +} + +/* We generate digits from the least significant end, and reverse at + the end. */ +static size_t +mpn_limb_get_str (unsigned char *sp, mp_limb_t w, + const struct gmp_div_inverse *binv) +{ + mp_size_t i; + for (i = 0; w > 0; i++) + { + mp_limb_t h, l, r; + + h = w >> (GMP_LIMB_BITS - binv->shift); + l = w << binv->shift; + + gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); + assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); + r >>= binv->shift; + + sp[i] = r; + } + return i; +} + +static size_t +mpn_get_str_other (unsigned char *sp, + int base, const struct mpn_base_info *info, + mp_ptr up, mp_size_t un) +{ + struct gmp_div_inverse binv; + size_t sn; + size_t i; + + mpn_div_qr_1_invert (&binv, base); + + sn = 0; + + if (un > 1) + { + struct gmp_div_inverse bbinv; + mpn_div_qr_1_invert (&bbinv, info->bb); + + do + { + mp_limb_t w; + size_t done; + w = mpn_div_qr_1_preinv (up, up, un, &bbinv); + un -= (up[un-1] == 0); + done = mpn_limb_get_str (sp + sn, w, &binv); + + for (sn += done; done < info->exp; done++) + sp[sn++] = 0; + } + while (un > 1); + } + sn += mpn_limb_get_str (sp + sn, up[0], &binv); + + /* Reverse order */ + for (i = 0; 2*i + 1 < sn; i++) + { + unsigned char t = sp[i]; + sp[i] = sp[sn - i - 1]; + sp[sn - i - 1] = t; + } + + return sn; +} + +size_t +mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) +{ + unsigned bits; + + assert (un > 0); + assert (up[un-1] > 0); + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_get_str_bits (sp, bits, up, un); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_get_str_other (sp, base, &info, up, un); + } +} + +static mp_size_t +mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, + unsigned bits) +{ + mp_size_t rn; + mp_limb_t limb; + unsigned shift; + + for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) + { + limb |= (mp_limb_t) sp[sn] << shift; + shift += bits; + if (shift >= GMP_LIMB_BITS) + { + shift -= GMP_LIMB_BITS; + rp[rn++] = limb; + /* Next line is correct also if shift == 0, + bits == 8, and mp_limb_t == unsigned char. */ + limb = (unsigned int) sp[sn] >> (bits - shift); + } + } + if (limb != 0) + rp[rn++] = limb; + else + rn = mpn_normalized_size (rp, rn); + return rn; +} + +/* Result is usually normalized, except for all-zero input, in which + case a single zero limb is written at *RP, and 1 is returned. */ +static mp_size_t +mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, + mp_limb_t b, const struct mpn_base_info *info) +{ + mp_size_t rn; + mp_limb_t w; + unsigned k; + size_t j; + + assert (sn > 0); + + k = 1 + (sn - 1) % info->exp; + + j = 0; + w = sp[j++]; + while (--k != 0) + w = w * b + sp[j++]; + + rp[0] = w; + + for (rn = 1; j < sn;) + { + mp_limb_t cy; + + w = sp[j++]; + for (k = 1; k < info->exp; k++) + w = w * b + sp[j++]; + + cy = mpn_mul_1 (rp, rp, rn, info->bb); + cy += mpn_add_1 (rp, rp, rn, w); + if (cy > 0) + rp[rn++] = cy; + } + assert (j == sn); + + return rn; +} + +mp_size_t +mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) +{ + unsigned bits; + + if (sn == 0) + return 0; + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_set_str_bits (rp, sp, sn, bits); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_set_str_other (rp, sp, sn, base, &info); + } +} + + +/* MPZ interface */ +void +mpz_init (mpz_t r) +{ + static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; + + r->_mp_alloc = 0; + r->_mp_size = 0; + r->_mp_d = (mp_ptr) &dummy_limb; +} + +/* The utility of this function is a bit limited, since many functions + assigns the result variable using mpz_swap. */ +void +mpz_init2 (mpz_t r, mp_bitcnt_t bits) +{ + mp_size_t rn; + + bits -= (bits != 0); /* Round down, except if 0 */ + rn = 1 + bits / GMP_LIMB_BITS; + + r->_mp_alloc = rn; + r->_mp_size = 0; + r->_mp_d = gmp_alloc_limbs (rn); +} + +void +mpz_clear (mpz_t r) +{ + if (r->_mp_alloc) + gmp_free_limbs (r->_mp_d, r->_mp_alloc); +} + +static mp_ptr +mpz_realloc (mpz_t r, mp_size_t size) +{ + size = GMP_MAX (size, 1); + + if (r->_mp_alloc) + r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); + else + r->_mp_d = gmp_alloc_limbs (size); + r->_mp_alloc = size; + + if (GMP_ABS (r->_mp_size) > size) + r->_mp_size = 0; + + return r->_mp_d; +} + +/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ +#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ + ? mpz_realloc(z,n) \ + : (z)->_mp_d) + +/* MPZ assignment and basic conversions. */ +void +mpz_set_si (mpz_t r, signed long int x) +{ + if (x >= 0) + mpz_set_ui (r, x); + else /* (x < 0) */ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); + mpz_neg (r, r); + } + else + { + r->_mp_size = -1; + MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); + } +} + +void +mpz_set_ui (mpz_t r, unsigned long int x) +{ + if (x > 0) + { + r->_mp_size = 1; + MPZ_REALLOC (r, 1)[0] = x; + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + while (x >>= LOCAL_GMP_LIMB_BITS) + { + ++ r->_mp_size; + MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; + } + } + } + else + r->_mp_size = 0; +} + +void +mpz_set (mpz_t r, const mpz_t x) +{ + /* Allow the NOP r == x */ + if (r != x) + { + mp_size_t n; + mp_ptr rp; + + n = GMP_ABS (x->_mp_size); + rp = MPZ_REALLOC (r, n); + + mpn_copyi (rp, x->_mp_d, n); + r->_mp_size = x->_mp_size; + } +} + +void +mpz_init_set_si (mpz_t r, signed long int x) +{ + mpz_init (r); + mpz_set_si (r, x); +} + +void +mpz_init_set_ui (mpz_t r, unsigned long int x) +{ + mpz_init (r); + mpz_set_ui (r, x); +} + +void +mpz_init_set (mpz_t r, const mpz_t x) +{ + mpz_init (r); + mpz_set (r, x); +} + +int +mpz_fits_slong_p (const mpz_t u) +{ + return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; +} + +static int +mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) +{ + int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; + mp_limb_t ulongrem = 0; + + if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) + ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; + + return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); +} + +int +mpz_fits_ulong_p (const mpz_t u) +{ + mp_size_t us = u->_mp_size; + + return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); +} + +int +mpz_fits_sint_p (const mpz_t u) +{ + return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; +} + +int +mpz_fits_uint_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; +} + +int +mpz_fits_sshort_p (const mpz_t u) +{ + return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; +} + +int +mpz_fits_ushort_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; +} + +long int +mpz_get_si (const mpz_t u) +{ + unsigned long r = mpz_get_ui (u); + unsigned long c = -LONG_MAX - LONG_MIN; + + if (u->_mp_size < 0) + /* This expression is necessary to properly handle -LONG_MIN */ + return -(long) c - (long) ((r - c) & LONG_MAX); + else + return (long) (r & LONG_MAX); +} + +unsigned long int +mpz_get_ui (const mpz_t u) +{ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + unsigned long r = 0; + mp_size_t n = GMP_ABS (u->_mp_size); + n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); + while (--n >= 0) + r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; + return r; + } + + return u->_mp_size == 0 ? 0 : u->_mp_d[0]; +} + +size_t +mpz_size (const mpz_t u) +{ + return GMP_ABS (u->_mp_size); +} + +mp_limb_t +mpz_getlimbn (const mpz_t u, mp_size_t n) +{ + if (n >= 0 && n < GMP_ABS (u->_mp_size)) + return u->_mp_d[n]; + else + return 0; +} + +void +mpz_realloc2 (mpz_t x, mp_bitcnt_t n) +{ + mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); +} + +mp_srcptr +mpz_limbs_read (mpz_srcptr x) +{ + return x->_mp_d; +} + +mp_ptr +mpz_limbs_modify (mpz_t x, mp_size_t n) +{ + assert (n > 0); + return MPZ_REALLOC (x, n); +} + +mp_ptr +mpz_limbs_write (mpz_t x, mp_size_t n) +{ + return mpz_limbs_modify (x, n); +} + +void +mpz_limbs_finish (mpz_t x, mp_size_t xs) +{ + mp_size_t xn; + xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); + x->_mp_size = xs < 0 ? -xn : xn; +} + +static mpz_srcptr +mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + x->_mp_alloc = 0; + x->_mp_d = (mp_ptr) xp; + x->_mp_size = xs; + return x; +} + +mpz_srcptr +mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + mpz_roinit_normal_n (x, xp, xs); + mpz_limbs_finish (x, xs); + return x; +} + + +/* Conversions and comparison to double. */ +void +mpz_set_d (mpz_t r, double x) +{ + int sign; + mp_ptr rp; + mp_size_t rn, i; + double B; + double Bi; + mp_limb_t f; + + /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is + zero or infinity. */ + if (x != x || x == x * 0.5) + { + r->_mp_size = 0; + return; + } + + sign = x < 0.0 ; + if (sign) + x = - x; + + if (x < 1.0) + { + r->_mp_size = 0; + return; + } + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + for (rn = 1; x >= B; rn++) + x *= Bi; + + rp = MPZ_REALLOC (r, rn); + + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + i = rn-1; + rp[i] = f; + while (--i >= 0) + { + x = B * x; + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + rp[i] = f; + } + + r->_mp_size = sign ? - rn : rn; +} + +void +mpz_init_set_d (mpz_t r, double x) +{ + mpz_init (r); + mpz_set_d (r, x); +} + +double +mpz_get_d (const mpz_t u) +{ + int m; + mp_limb_t l; + mp_size_t un; + double x; + double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + + un = GMP_ABS (u->_mp_size); + + if (un == 0) + return 0.0; + + l = u->_mp_d[--un]; + gmp_clz (m, l); + m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + + for (x = l; --un >= 0;) + { + x = B*x; + if (m > 0) { + l = u->_mp_d[un]; + m -= GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + x += l; + } + } + + if (u->_mp_size < 0) + x = -x; + + return x; +} + +int +mpz_cmpabs_d (const mpz_t x, double d) +{ + mp_size_t xn; + double B, Bi; + mp_size_t i; + + xn = x->_mp_size; + d = GMP_ABS (d); + + if (xn != 0) + { + xn = GMP_ABS (xn); + + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + + /* Scale d so it can be compared with the top limb. */ + for (i = 1; i < xn; i++) + d *= Bi; + + if (d >= B) + return -1; + + /* Compare floor(d) to top limb, subtract and cancel when equal. */ + for (i = xn; i-- > 0;) + { + mp_limb_t f, xl; + + f = (mp_limb_t) d; + xl = x->_mp_d[i]; + if (xl > f) + return 1; + else if (xl < f) + return -1; + d = B * (d - f); + } + } + return - (d > 0.0); +} + +int +mpz_cmp_d (const mpz_t x, double d) +{ + if (x->_mp_size < 0) + { + if (d >= 0.0) + return -1; + else + return -mpz_cmpabs_d (x, d); + } + else + { + if (d < 0.0) + return 1; + else + return mpz_cmpabs_d (x, d); + } +} + + +/* MPZ comparisons and the like. */ +int +mpz_sgn (const mpz_t u) +{ + return GMP_CMP (u->_mp_size, 0); +} + +int +mpz_cmp_si (const mpz_t u, long v) +{ + mp_size_t usize = u->_mp_size; + + if (v >= 0) + return mpz_cmp_ui (u, v); + else if (usize >= 0) + return 1; + else + return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); +} + +int +mpz_cmp_ui (const mpz_t u, unsigned long v) +{ + mp_size_t usize = u->_mp_size; + + if (usize < 0) + return -1; + else + return mpz_cmpabs_ui (u, v); +} + +int +mpz_cmp (const mpz_t a, const mpz_t b) +{ + mp_size_t asize = a->_mp_size; + mp_size_t bsize = b->_mp_size; + + if (asize != bsize) + return (asize < bsize) ? -1 : 1; + else if (asize >= 0) + return mpn_cmp (a->_mp_d, b->_mp_d, asize); + else + return mpn_cmp (b->_mp_d, a->_mp_d, -asize); +} + +int +mpz_cmpabs_ui (const mpz_t u, unsigned long v) +{ + mp_size_t un = GMP_ABS (u->_mp_size); + + if (! mpn_absfits_ulong_p (u->_mp_d, un)) + return 1; + else + { + unsigned long uu = mpz_get_ui (u); + return GMP_CMP(uu, v); + } +} + +int +mpz_cmpabs (const mpz_t u, const mpz_t v) +{ + return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), + v->_mp_d, GMP_ABS (v->_mp_size)); +} + +void +mpz_abs (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = GMP_ABS (r->_mp_size); +} + +void +mpz_neg (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = -r->_mp_size; +} + +void +mpz_swap (mpz_t u, mpz_t v) +{ + MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); + MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); +} + + +/* MPZ addition and subtraction */ + + +void +mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_t bb; + mpz_init_set_ui (bb, b); + mpz_add (r, a, bb); + mpz_clear (bb); +} + +void +mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_ui_sub (r, b, a); + mpz_neg (r, r); +} + +void +mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) +{ + mpz_neg (r, b); + mpz_add_ui (r, r, a); +} + +static mp_size_t +mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + mp_ptr rp; + mp_limb_t cy; + + if (an < bn) + { + MPZ_SRCPTR_SWAP (a, b); + MP_SIZE_T_SWAP (an, bn); + } + + rp = MPZ_REALLOC (r, an + 1); + cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); + + rp[an] = cy; + + return an + cy; +} + +static mp_size_t +mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + int cmp; + mp_ptr rp; + + cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); + if (cmp > 0) + { + rp = MPZ_REALLOC (r, an); + gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); + return mpn_normalized_size (rp, an); + } + else if (cmp < 0) + { + rp = MPZ_REALLOC (r, bn); + gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); + return -mpn_normalized_size (rp, bn); + } + else + return 0; +} + +void +mpz_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_add (r, a, b); + else + rn = mpz_abs_sub (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + +void +mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_sub (r, a, b); + else + rn = mpz_abs_add (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + + +/* MPZ multiplication */ +void +mpz_mul_si (mpz_t r, const mpz_t u, long int v) +{ + if (v < 0) + { + mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); + mpz_neg (r, r); + } + else + mpz_mul_ui (r, u, v); +} + +void +mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t vv; + mpz_init_set_ui (vv, v); + mpz_mul (r, u, vv); + mpz_clear (vv); + return; +} + +void +mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) +{ + int sign; + mp_size_t un, vn, rn; + mpz_t t; + mp_ptr tp; + + un = u->_mp_size; + vn = v->_mp_size; + + if (un == 0 || vn == 0) + { + r->_mp_size = 0; + return; + } + + sign = (un ^ vn) < 0; + + un = GMP_ABS (un); + vn = GMP_ABS (vn); + + mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); + + tp = t->_mp_d; + if (un >= vn) + mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); + else + mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); + + rn = un + vn; + rn -= tp[rn-1] == 0; + + t->_mp_size = sign ? - rn : rn; + mpz_swap (r, t); + mpz_clear (t); +} + +void +mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) +{ + mp_size_t un, rn; + mp_size_t limbs; + unsigned shift; + mp_ptr rp; + + un = GMP_ABS (u->_mp_size); + if (un == 0) + { + r->_mp_size = 0; + return; + } + + limbs = bits / GMP_LIMB_BITS; + shift = bits % GMP_LIMB_BITS; + + rn = un + limbs + (shift > 0); + rp = MPZ_REALLOC (r, rn); + if (shift > 0) + { + mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); + rp[rn-1] = cy; + rn -= (cy == 0); + } + else + mpn_copyd (rp + limbs, u->_mp_d, un); + + mpn_zero (rp, limbs); + + r->_mp_size = (u->_mp_size < 0) ? - rn : rn; +} + +void +mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_sub (r, r, t); + mpz_clear (t); +} + +void +mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_sub (r, r, t); + mpz_clear (t); +} + + +/* MPZ division */ +enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; + +/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ +static int +mpz_div_qr (mpz_t q, mpz_t r, + const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) +{ + mp_size_t ns, ds, nn, dn, qs; + ns = n->_mp_size; + ds = d->_mp_size; + + if (ds == 0) + gmp_die("mpz_div_qr: Divide by zero."); + + if (ns == 0) + { + if (q) + q->_mp_size = 0; + if (r) + r->_mp_size = 0; + return 0; + } + + nn = GMP_ABS (ns); + dn = GMP_ABS (ds); + + qs = ds ^ ns; + + if (nn < dn) + { + if (mode == GMP_DIV_CEIL && qs >= 0) + { + /* q = 1, r = n - d */ + if (r) + mpz_sub (r, n, d); + if (q) + mpz_set_ui (q, 1); + } + else if (mode == GMP_DIV_FLOOR && qs < 0) + { + /* q = -1, r = n + d */ + if (r) + mpz_add (r, n, d); + if (q) + mpz_set_si (q, -1); + } + else + { + /* q = 0, r = d */ + if (r) + mpz_set (r, n); + if (q) + q->_mp_size = 0; + } + return 1; + } + else + { + mp_ptr np, qp; + mp_size_t qn, rn; + mpz_t tq, tr; + + mpz_init_set (tr, n); + np = tr->_mp_d; + + qn = nn - dn + 1; + + if (q) + { + mpz_init2 (tq, qn * GMP_LIMB_BITS); + qp = tq->_mp_d; + } + else + qp = NULL; + + mpn_div_qr (qp, np, nn, d->_mp_d, dn); + + if (qp) + { + qn -= (qp[qn-1] == 0); + + tq->_mp_size = qs < 0 ? -qn : qn; + } + rn = mpn_normalized_size (np, dn); + tr->_mp_size = ns < 0 ? - rn : rn; + + if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) + { + if (q) + mpz_sub_ui (tq, tq, 1); + if (r) + mpz_add (tr, tr, d); + } + else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) + { + if (q) + mpz_add_ui (tq, tq, 1); + if (r) + mpz_sub (tr, tr, d); + } + + if (q) + { + mpz_swap (tq, q); + mpz_clear (tq); + } + if (r) + mpz_swap (tr, r); + + mpz_clear (tr); + + return rn != 0; + } +} + +void +mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); +} + +static void +mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t un, qn; + mp_size_t limb_cnt; + mp_ptr qp; + int adjust; + + un = u->_mp_size; + if (un == 0) + { + q->_mp_size = 0; + return; + } + limb_cnt = bit_index / GMP_LIMB_BITS; + qn = GMP_ABS (un) - limb_cnt; + bit_index %= GMP_LIMB_BITS; + + if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ + /* Note: Below, the final indexing at limb_cnt is valid because at + that point we have qn > 0. */ + adjust = (qn <= 0 + || !mpn_zero_p (u->_mp_d, limb_cnt) + || (u->_mp_d[limb_cnt] + & (((mp_limb_t) 1 << bit_index) - 1))); + else + adjust = 0; + + if (qn <= 0) + qn = 0; + else + { + qp = MPZ_REALLOC (q, qn); + + if (bit_index != 0) + { + mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); + qn -= qp[qn - 1] == 0; + } + else + { + mpn_copyi (qp, u->_mp_d + limb_cnt, qn); + } + } + + q->_mp_size = qn; + + if (adjust) + mpz_add_ui (q, q, 1); + if (un < 0) + mpz_neg (q, q); +} + +static void +mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t us, un, rn; + mp_ptr rp; + mp_limb_t mask; + + us = u->_mp_size; + if (us == 0 || bit_index == 0) + { + r->_mp_size = 0; + return; + } + rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + assert (rn > 0); + + rp = MPZ_REALLOC (r, rn); + un = GMP_ABS (us); + + mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); + + if (rn > un) + { + /* Quotient (with truncation) is zero, and remainder is + non-zero */ + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* Have to negate and sign extend. */ + mp_size_t i; + + gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); + for (i = un; i < rn - 1; i++) + rp[i] = GMP_LIMB_MAX; + + rp[rn-1] = mask; + us = -us; + } + else + { + /* Just copy */ + if (r != u) + mpn_copyi (rp, u->_mp_d, un); + + rn = un; + } + } + else + { + if (r != u) + mpn_copyi (rp, u->_mp_d, rn - 1); + + rp[rn-1] = u->_mp_d[rn-1] & mask; + + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* If r != 0, compute 2^{bit_count} - r. */ + mpn_neg (rp, rp, rn); + + rp[rn-1] &= mask; + + /* us is not used for anything else, so we can modify it + here to indicate flipped sign. */ + us = -us; + } + } + rn = mpn_normalized_size (rp, rn); + r->_mp_size = us < 0 ? -rn : rn; +} + +void +mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) +{ + gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_p (const mpz_t n, const mpz_t d) +{ + return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + +int +mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) +{ + mpz_t t; + int res; + + /* a == b (mod 0) iff a == b */ + if (mpz_sgn (m) == 0) + return (mpz_cmp (a, b) == 0); + + mpz_init (t); + mpz_sub (t, a, b); + res = mpz_divisible_p (t, m); + mpz_clear (t); + + return res; +} + +static unsigned long +mpz_div_qr_ui (mpz_t q, mpz_t r, + const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) +{ + unsigned long ret; + mpz_t rr, dd; + + mpz_init (rr); + mpz_init_set_ui (dd, d); + mpz_div_qr (q, rr, n, dd, mode); + mpz_clear (dd); + ret = mpz_get_ui (rr); + + if (r) + mpz_swap (r, rr); + mpz_clear (rr); + + return ret; +} + +unsigned long +mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); +} +unsigned long +mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} +unsigned long +mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_ui_p (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + + +/* GCD */ +static mp_limb_t +mpn_gcd_11 (mp_limb_t u, mp_limb_t v) +{ + unsigned shift; + + assert ( (u | v) > 0); + + if (u == 0) + return v; + else if (v == 0) + return u; + + gmp_ctz (shift, u | v); + + u >>= shift; + v >>= shift; + + if ( (u & 1) == 0) + MP_LIMB_T_SWAP (u, v); + + while ( (v & 1) == 0) + v >>= 1; + + while (u != v) + { + if (u > v) + { + u -= v; + do + u >>= 1; + while ( (u & 1) == 0); + } + else + { + v -= u; + do + v >>= 1; + while ( (v & 1) == 0); + } + } + return u << shift; +} + +mp_size_t +mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn > 0); + assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); + assert (vp[vn-1] > 0); + assert ((up[0] | vp[0]) & 1); + + if (un > vn) + mpn_div_qr (NULL, up, un, vp, vn); + + un = mpn_normalized_size (up, vn); + if (un == 0) + { + mpn_copyi (rp, vp, vn); + return vn; + } + + if (!(vp[0] & 1)) + MPN_PTR_SWAP (up, un, vp, vn); + + while (un > 1 || vn > 1) + { + int shift; + assert (vp[0] & 1); + + while (up[0] == 0) + { + up++; + un--; + } + gmp_ctz (shift, up[0]); + if (shift > 0) + { + gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); + un -= (up[un-1] == 0); + } + + if (un < vn) + MPN_PTR_SWAP (up, un, vp, vn); + else if (un == vn) + { + int c = mpn_cmp (up, vp, un); + if (c == 0) + { + mpn_copyi (rp, up, un); + return un; + } + else if (c < 0) + MP_PTR_SWAP (up, vp); + } + + gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); + un = mpn_normalized_size (up, un); + } + rp[0] = mpn_gcd_11 (up[0], vp[0]); + return 1; +} + +unsigned long +mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) +{ + mpz_t t; + mpz_init_set_ui(t, v); + mpz_gcd (t, u, t); + if (v > 0) + v = mpz_get_ui (t); + + if (g) + mpz_swap (t, g); + + mpz_clear (t); + + return v; +} + +static mp_bitcnt_t +mpz_make_odd (mpz_t r) +{ + mp_bitcnt_t shift; + + assert (r->_mp_size > 0); + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + shift = mpn_scan1 (r->_mp_d, 0); + mpz_tdiv_q_2exp (r, r, shift); + + return shift; +} + +void +mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv; + mp_bitcnt_t uz, vz, gz; + + if (u->_mp_size == 0) + { + mpz_abs (g, v); + return; + } + if (v->_mp_size == 0) + { + mpz_abs (g, u); + return; + } + + mpz_init (tu); + mpz_init (tv); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + if (tu->_mp_size < tv->_mp_size) + mpz_swap (tu, tv); + + tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); + mpz_mul_2exp (g, tu, gz); + + mpz_clear (tu); + mpz_clear (tv); +} + +void +mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv, s0, s1, t0, t1; + mp_bitcnt_t uz, vz, gz; + mp_bitcnt_t power; + int cmp; + + if (u->_mp_size == 0) + { + /* g = 0 u + sgn(v) v */ + signed long sign = mpz_sgn (v); + mpz_abs (g, v); + if (s) + s->_mp_size = 0; + if (t) + mpz_set_si (t, sign); + return; + } + + if (v->_mp_size == 0) + { + /* g = sgn(u) u + 0 v */ + signed long sign = mpz_sgn (u); + mpz_abs (g, u); + if (s) + mpz_set_si (s, sign); + if (t) + t->_mp_size = 0; + return; + } + + mpz_init (tu); + mpz_init (tv); + mpz_init (s0); + mpz_init (s1); + mpz_init (t0); + mpz_init (t1); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + uz -= gz; + vz -= gz; + + /* Cofactors corresponding to odd gcd. gz handled later. */ + if (tu->_mp_size < tv->_mp_size) + { + mpz_swap (tu, tv); + MPZ_SRCPTR_SWAP (u, v); + MPZ_PTR_SWAP (s, t); + MP_BITCNT_T_SWAP (uz, vz); + } + + /* Maintain + * + * u = t0 tu + t1 tv + * v = s0 tu + s1 tv + * + * where u and v denote the inputs with common factors of two + * eliminated, and det (s0, t0; s1, t1) = 2^p. Then + * + * 2^p tu = s1 u - t1 v + * 2^p tv = -s0 u + t0 v + */ + + /* After initial division, tu = q tv + tu', we have + * + * u = 2^uz (tu' + q tv) + * v = 2^vz tv + * + * or + * + * t0 = 2^uz, t1 = 2^uz q + * s0 = 0, s1 = 2^vz + */ + + mpz_tdiv_qr (t1, tu, tu, tv); + mpz_mul_2exp (t1, t1, uz); + + mpz_setbit (s1, vz); + power = uz + vz; + + if (tu->_mp_size > 0) + { + mp_bitcnt_t shift; + shift = mpz_make_odd (tu); + mpz_setbit (t0, uz + shift); + power += shift; + + for (;;) + { + int c; + c = mpz_cmp (tu, tv); + if (c == 0) + break; + + if (c < 0) + { + /* tv = tv' + tu + * + * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' + * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ + + mpz_sub (tv, tv, tu); + mpz_add (t0, t0, t1); + mpz_add (s0, s0, s1); + + shift = mpz_make_odd (tv); + mpz_mul_2exp (t1, t1, shift); + mpz_mul_2exp (s1, s1, shift); + } + else + { + mpz_sub (tu, tu, tv); + mpz_add (t1, t0, t1); + mpz_add (s1, s0, s1); + + shift = mpz_make_odd (tu); + mpz_mul_2exp (t0, t0, shift); + mpz_mul_2exp (s0, s0, shift); + } + power += shift; + } + } + else + mpz_setbit (t0, uz); + + /* Now tv = odd part of gcd, and -s0 and t0 are corresponding + cofactors. */ + + mpz_mul_2exp (tv, tv, gz); + mpz_neg (s0, s0); + + /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To + adjust cofactors, we need u / g and v / g */ + + mpz_divexact (s1, v, tv); + mpz_abs (s1, s1); + mpz_divexact (t1, u, tv); + mpz_abs (t1, t1); + + while (power-- > 0) + { + /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ + if (mpz_odd_p (s0) || mpz_odd_p (t0)) + { + mpz_sub (s0, s0, s1); + mpz_add (t0, t0, t1); + } + assert (mpz_even_p (t0) && mpz_even_p (s0)); + mpz_tdiv_q_2exp (s0, s0, 1); + mpz_tdiv_q_2exp (t0, t0, 1); + } + + /* Choose small cofactors (they should generally satify + + |s| < |u| / 2g and |t| < |v| / 2g, + + with some documented exceptions). Always choose the smallest s, + if there are two choices for s with same absolute value, choose + the one with smallest corresponding t (this asymmetric condition + is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ + mpz_add (s1, s0, s1); + mpz_sub (t1, t0, t1); + cmp = mpz_cmpabs (s0, s1); + if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) + { + mpz_swap (s0, s1); + mpz_swap (t0, t1); + } + if (u->_mp_size < 0) + mpz_neg (s0, s0); + if (v->_mp_size < 0) + mpz_neg (t0, t0); + + mpz_swap (g, tv); + if (s) + mpz_swap (s, s0); + if (t) + mpz_swap (t, t0); + + mpz_clear (tu); + mpz_clear (tv); + mpz_clear (s0); + mpz_clear (s1); + mpz_clear (t0); + mpz_clear (t1); +} + +void +mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t g; + + if (u->_mp_size == 0 || v->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + mpz_init (g); + + mpz_gcd (g, u, v); + mpz_divexact (g, u, g); + mpz_mul (r, g, v); + + mpz_clear (g); + mpz_abs (r, r); +} + +void +mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) +{ + if (v == 0 || u->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + v /= mpz_gcd_ui (NULL, u, v); + mpz_mul_ui (r, u, v); + + mpz_abs (r, r); +} + +int +mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) +{ + mpz_t g, tr; + int invertible; + + if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) + return 0; + + mpz_init (g); + mpz_init (tr); + + mpz_gcdext (g, tr, NULL, u, m); + invertible = (mpz_cmp_ui (g, 1) == 0); + + if (invertible) + { + if (tr->_mp_size < 0) + { + if (m->_mp_size >= 0) + mpz_add (tr, tr, m); + else + mpz_sub (tr, tr, m); + } + mpz_swap (r, tr); + } + + mpz_clear (g); + mpz_clear (tr); + return invertible; +} + + +/* Higher level operations (sqrt, pow and root) */ + +void +mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) +{ + unsigned long bit; + mpz_t tr; + mpz_init_set_ui (tr, 1); + + bit = GMP_ULONG_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (e & bit) + mpz_mul (tr, tr, b); + bit >>= 1; + } + while (bit > 0); + + mpz_swap (r, tr); + mpz_clear (tr); +} + +void +mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) +{ + mpz_t b; + + mpz_init_set_ui (b, blimb); + mpz_pow_ui (r, b, e); + mpz_clear (b); +} + +void +mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) +{ + mpz_t tr; + mpz_t base; + mp_size_t en, mn; + mp_srcptr mp; + struct gmp_div_inverse minv; + unsigned shift; + mp_ptr tp = NULL; + + en = GMP_ABS (e->_mp_size); + mn = GMP_ABS (m->_mp_size); + if (mn == 0) + gmp_die ("mpz_powm: Zero modulo."); + + if (en == 0) + { + mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); + return; + } + + mp = m->_mp_d; + mpn_div_qr_invert (&minv, mp, mn); + shift = minv.shift; + + if (shift > 0) + { + /* To avoid shifts, we do all our reductions, except the final + one, using a *normalized* m. */ + minv.shift = 0; + + tp = gmp_alloc_limbs (mn); + gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); + mp = tp; + } + + mpz_init (base); + + if (e->_mp_size < 0) + { + if (!mpz_invert (base, b, m)) + gmp_die ("mpz_powm: Negative exponent and non-invertible base."); + } + else + { + mp_size_t bn; + mpz_abs (base, b); + + bn = base->_mp_size; + if (bn >= mn) + { + mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); + bn = mn; + } + + /* We have reduced the absolute value. Now take care of the + sign. Note that we get zero represented non-canonically as + m. */ + if (b->_mp_size < 0) + { + mp_ptr bp = MPZ_REALLOC (base, mn); + gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); + bn = mn; + } + base->_mp_size = mpn_normalized_size (base->_mp_d, bn); + } + mpz_init_set_ui (tr, 1); + + while (--en >= 0) + { + mp_limb_t w = e->_mp_d[en]; + mp_limb_t bit; + + bit = GMP_LIMB_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (w & bit) + mpz_mul (tr, tr, base); + if (tr->_mp_size > mn) + { + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + bit >>= 1; + } + while (bit > 0); + } + + /* Final reduction */ + if (tr->_mp_size >= mn) + { + minv.shift = shift; + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + if (tp) + gmp_free_limbs (tp, mn); + + mpz_swap (r, tr); + mpz_clear (tr); + mpz_clear (base); +} + +void +mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) +{ + mpz_t e; + + mpz_init_set_ui (e, elimb); + mpz_powm (r, b, e, m); + mpz_clear (e); +} + +/* x=trunc(y^(1/z)), r=y-x^z */ +void +mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) +{ + int sgn; + mp_bitcnt_t bc; + mpz_t t, u; + + sgn = y->_mp_size < 0; + if ((~z & sgn) != 0) + gmp_die ("mpz_rootrem: Negative argument, with even root."); + if (z == 0) + gmp_die ("mpz_rootrem: Zeroth root."); + + if (mpz_cmpabs_ui (y, 1) <= 0) { + if (x) + mpz_set (x, y); + if (r) + r->_mp_size = 0; + return; + } + + mpz_init (u); + mpz_init (t); + bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; + mpz_setbit (t, bc); + + if (z == 2) /* simplify sqrt loop: z-1 == 1 */ + do { + mpz_swap (u, t); /* u = x */ + mpz_tdiv_q (t, y, u); /* t = y/x */ + mpz_add (t, t, u); /* t = y/x + x */ + mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + else /* z != 2 */ { + mpz_t v; + + mpz_init (v); + if (sgn) + mpz_neg (t, t); + + do { + mpz_swap (u, t); /* u = x */ + mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ + mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ + mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ + mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ + mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + + mpz_clear (v); + } + + if (r) { + mpz_pow_ui (t, u, z); + mpz_sub (r, y, t); + } + if (x) + mpz_swap (x, u); + mpz_clear (u); + mpz_clear (t); +} + +int +mpz_root (mpz_t x, const mpz_t y, unsigned long z) +{ + int res; + mpz_t r; + + mpz_init (r); + mpz_rootrem (x, r, y, z); + res = r->_mp_size == 0; + mpz_clear (r); + + return res; +} + +/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ +void +mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) +{ + mpz_rootrem (s, r, u, 2); +} + +void +mpz_sqrt (mpz_t s, const mpz_t u) +{ + mpz_rootrem (s, NULL, u, 2); +} + +int +mpz_perfect_square_p (const mpz_t u) +{ + if (u->_mp_size <= 0) + return (u->_mp_size == 0); + else + return mpz_root (NULL, u, 2); +} + +int +mpn_perfect_square_p (mp_srcptr p, mp_size_t n) +{ + mpz_t t; + + assert (n > 0); + assert (p [n-1] != 0); + return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); +} + +mp_size_t +mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) +{ + mpz_t s, r, u; + mp_size_t res; + + assert (n > 0); + assert (p [n-1] != 0); + + mpz_init (r); + mpz_init (s); + mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); + + assert (s->_mp_size == (n+1)/2); + mpn_copyd (sp, s->_mp_d, s->_mp_size); + mpz_clear (s); + res = r->_mp_size; + if (rp) + mpn_copyd (rp, r->_mp_d, res); + mpz_clear (r); + return res; +} + +/* Combinatorics */ + +void +mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) +{ + mpz_set_ui (x, n + (n == 0)); + if (m + 1 < 2) return; + while (n > m + 1) + mpz_mul_ui (x, x, n -= m); +} + +void +mpz_2fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 2); +} + +void +mpz_fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 1); +} + +void +mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) +{ + mpz_t t; + + mpz_set_ui (r, k <= n); + + if (k > (n >> 1)) + k = (k <= n) ? n - k : 0; + + mpz_init (t); + mpz_fac_ui (t, k); + + for (; k > 0; --k) + mpz_mul_ui (r, r, n--); + + mpz_divexact (r, r, t); + mpz_clear (t); +} + + +/* Primality testing */ + +/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ +/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ +static int +gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) +{ + int c, bit = 0; + + assert (b & 1); + assert (a != 0); + /* assert (mpn_gcd_11 (a, b) == 1); */ + + /* Below, we represent a and b shifted right so that the least + significant one bit is implicit. */ + b >>= 1; + + gmp_ctz(c, a); + a >>= 1; + + for (;;) + { + a >>= c; + /* (2/b) = -1 if b = 3 or 5 mod 8 */ + bit ^= c & (b ^ (b >> 1)); + if (a < b) + { + if (a == 0) + return bit & 1 ? -1 : 1; + bit ^= a & b; + a = b - a; + b -= a; + } + else + { + a -= b; + assert (a != 0); + } + + gmp_ctz(c, a); + ++c; + } +} + +static void +gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) +{ + mpz_mod (Qk, Qk, n); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + mpz_mul (V, V, V); + mpz_submul_ui (V, Qk, 2); + mpz_tdiv_r (V, V, n); + /* Q^{2k} = (Q^k)^2 */ + mpz_mul (Qk, Qk, Qk); +} + +/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ +/* with P=1, Q=Q; k = (n>>b0)|1. */ +/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ +/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ +static int +gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, + mp_bitcnt_t b0, const mpz_t n) +{ + mp_bitcnt_t bs; + mpz_t U; + int res; + + assert (b0 > 0); + assert (Q <= - (LONG_MIN / 2)); + assert (Q >= - (LONG_MAX / 2)); + assert (mpz_cmp_ui (n, 4) > 0); + assert (mpz_odd_p (n)); + + mpz_init_set_ui (U, 1); /* U1 = 1 */ + mpz_set_ui (V, 1); /* V1 = 1 */ + mpz_set_si (Qk, Q); + + for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) + { + /* U_{2k} <- U_k * V_k */ + mpz_mul (U, U, V); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + /* A step k->k+1 is performed if the bit in $n$ is 1 */ + /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ + /* should be 1 in $n+1$ (bs == b0) */ + if (b0 == bs || mpz_tstbit (n, bs)) + { + /* Q^{k+1} <- Q^k * Q */ + mpz_mul_si (Qk, Qk, Q); + /* U_{k+1} <- (U_k + V_k) / 2 */ + mpz_swap (U, V); /* Keep in V the old value of U_k */ + mpz_add (U, U, V); + /* We have to compute U/2, so we need an even value, */ + /* equivalent (mod n) */ + if (mpz_odd_p (U)) + mpz_add (U, U, n); + mpz_tdiv_q_2exp (U, U, 1); + /* V_{k+1} <-(D*U_k + V_k) / 2 = + U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ + mpz_mul_si (V, V, -2*Q); + mpz_add (V, U, V); + mpz_tdiv_r (V, V, n); + } + mpz_tdiv_r (U, U, n); + } + + res = U->_mp_size == 0; + mpz_clear (U); + return res; +} + +/* Performs strong Lucas' test on x, with parameters suggested */ +/* for the BPSW test. Qk is only passed to recycle a variable. */ +/* Requires GCD (x,6) = 1.*/ +static int +gmp_stronglucas (const mpz_t x, mpz_t Qk) +{ + mp_bitcnt_t b0; + mpz_t V, n; + mp_limb_t maxD, D; /* The absolute value is stored. */ + long Q; + mp_limb_t tl; + + /* Test on the absolute value. */ + mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); + + assert (mpz_odd_p (n)); + /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ + if (mpz_root (Qk, n, 2)) + return 0; /* A square is composite. */ + + /* Check Ds up to square root (in case, n is prime) + or avoid overflows */ + maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; + + D = 3; + /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ + /* For those Ds we have (D/n) = (n/|D|) */ + do + { + if (D >= maxD) + return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ + D += 2; + tl = mpz_tdiv_ui (n, D); + if (tl == 0) + return 0; + } + while (gmp_jacobi_coprime (tl, D) == 1); + + mpz_init (V); + + /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ + b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); + /* b0 = mpz_scan0 (n, 0); */ + + /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ + Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); + + if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ + while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ + /* V <- V ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + mpz_clear (V); + return (b0 != 0); +} + +static int +gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, + const mpz_t q, mp_bitcnt_t k) +{ + assert (k > 0); + + /* Caller must initialize y to the base. */ + mpz_powm (y, y, q, n); + + if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) + return 1; + + while (--k > 0) + { + mpz_powm_ui (y, y, 2, n); + if (mpz_cmp (y, nm1) == 0) + return 1; + } + return 0; +} + +/* This product is 0xc0cfd797, and fits in 32 bits. */ +#define GMP_PRIME_PRODUCT \ + (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) + +/* Bit (p+1)/2 is set, for each odd prime <= 61 */ +#define GMP_PRIME_MASK 0xc96996dcUL + +int +mpz_probab_prime_p (const mpz_t n, int reps) +{ + mpz_t nm1; + mpz_t q; + mpz_t y; + mp_bitcnt_t k; + int is_prime; + int j; + + /* Note that we use the absolute value of n only, for compatibility + with the real GMP. */ + if (mpz_even_p (n)) + return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; + + /* Above test excludes n == 0 */ + assert (n->_mp_size != 0); + + if (mpz_cmpabs_ui (n, 64) < 0) + return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; + + if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) + return 0; + + /* All prime factors are >= 31. */ + if (mpz_cmpabs_ui (n, 31*31) < 0) + return 2; + + mpz_init (nm1); + mpz_init (q); + + /* Find q and k, where q is odd and n = 1 + 2**k * q. */ + mpz_abs (nm1, n); + nm1->_mp_d[0] -= 1; + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + k = mpn_scan1 (nm1->_mp_d, 0); + mpz_tdiv_q_2exp (q, nm1, k); + + /* BPSW test */ + mpz_init_set_ui (y, 2); + is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); + reps -= 24; /* skip the first 24 repetitions */ + + /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = + j^2 + j + 41 using Euler's polynomial. We potentially stop early, + if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > + 30 (a[30] == 971 > 31*31 == 961). */ + + for (j = 0; is_prime & (j < reps); j++) + { + mpz_set_ui (y, (unsigned long) j*j+j+41); + if (mpz_cmp (y, nm1) >= 0) + { + /* Don't try any further bases. This "early" break does not affect + the result for any reasonable reps value (<=5000 was tested) */ + assert (j >= 30); + break; + } + is_prime = gmp_millerrabin (n, nm1, y, q, k); + } + mpz_clear (nm1); + mpz_clear (q); + mpz_clear (y); + + return is_prime; +} + + +/* Logical operations and bit manipulation. */ + +/* Numbers are treated as if represented in two's complement (and + infinitely sign extended). For a negative values we get the two's + complement from -x = ~x + 1, where ~ is bitwise complement. + Negation transforms + + xxxx10...0 + + into + + yyyy10...0 + + where yyyy is the bitwise complement of xxxx. So least significant + bits, up to and including the first one bit, are unchanged, and + the more significant bits are all complemented. + + To change a bit from zero to one in a negative number, subtract the + corresponding power of two from the absolute value. This can never + underflow. To change a bit from one to zero, add the corresponding + power of two, and this might overflow. E.g., if x = -001111, the + two's complement is 110001. Clearing the least significant bit, we + get two's complement 110000, and -010000. */ + +int +mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t limb_index; + unsigned shift; + mp_size_t ds; + mp_size_t dn; + mp_limb_t w; + int bit; + + ds = d->_mp_size; + dn = GMP_ABS (ds); + limb_index = bit_index / GMP_LIMB_BITS; + if (limb_index >= dn) + return ds < 0; + + shift = bit_index % GMP_LIMB_BITS; + w = d->_mp_d[limb_index]; + bit = (w >> shift) & 1; + + if (ds < 0) + { + /* d < 0. Check if any of the bits below is set: If so, our bit + must be complemented. */ + if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) + return bit ^ 1; + while (--limb_index >= 0) + if (d->_mp_d[limb_index] > 0) + return bit ^ 1; + } + return bit; +} + +static void +mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_limb_t bit; + mp_ptr dp; + + dn = GMP_ABS (d->_mp_size); + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + if (limb_index >= dn) + { + mp_size_t i; + /* The bit should be set outside of the end of the number. + We have to increase the size of the number. */ + dp = MPZ_REALLOC (d, limb_index + 1); + + dp[limb_index] = bit; + for (i = dn; i < limb_index; i++) + dp[i] = 0; + dn = limb_index + 1; + } + else + { + mp_limb_t cy; + + dp = d->_mp_d; + + cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); + if (cy > 0) + { + dp = MPZ_REALLOC (d, dn + 1); + dp[dn++] = cy; + } + } + + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +static void +mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_ptr dp; + mp_limb_t bit; + + dn = GMP_ABS (d->_mp_size); + dp = d->_mp_d; + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + assert (limb_index < dn); + + gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, + dn - limb_index, bit)); + dn = mpn_normalized_size (dp, dn); + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +void +mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (!mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_add_bit (d, bit_index); + else + mpz_abs_sub_bit (d, bit_index); + } +} + +void +mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); + } +} + +void +mpz_combit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); +} + +void +mpz_com (mpz_t r, const mpz_t u) +{ + mpz_add_ui (r, u, 1); + mpz_neg (r, r); +} + +void +mpz_and (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + r->_mp_size = 0; + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc & vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is positive, higher limbs don't matter. */ + rn = vx ? un : vn; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul & vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul & vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc | vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is negative, by sign extension higher limbs + don't matter. */ + rn = vx ? vn : un; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul | vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul | vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc ^ vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + rp = MPZ_REALLOC (r, un + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = (ul ^ vl ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = (ul ^ ux) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[un++] = rc; + else + un = mpn_normalized_size (rp, un); + + r->_mp_size = rx ? -un : un; +} + +static unsigned +gmp_popcount_limb (mp_limb_t x) +{ + unsigned c; + + /* Do 16 bits at a time, to avoid limb-sized constants. */ + int LOCAL_SHIFT_BITS = 16; + for (c = 0; x > 0;) + { + unsigned w = x - ((x >> 1) & 0x5555); + w = ((w >> 2) & 0x3333) + (w & 0x3333); + w = (w >> 4) + w; + w = ((w >> 8) & 0x000f) + (w & 0x000f); + c += w; + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) + x >>= LOCAL_SHIFT_BITS; + else + x = 0; + } + return c; +} + +mp_bitcnt_t +mpn_popcount (mp_srcptr p, mp_size_t n) +{ + mp_size_t i; + mp_bitcnt_t c; + + for (c = 0, i = 0; i < n; i++) + c += gmp_popcount_limb (p[i]); + + return c; +} + +mp_bitcnt_t +mpz_popcount (const mpz_t u) +{ + mp_size_t un; + + un = u->_mp_size; + + if (un < 0) + return ~(mp_bitcnt_t) 0; + + return mpn_popcount (u->_mp_d, un); +} + +mp_bitcnt_t +mpz_hamdist (const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_limb_t uc, vc, ul, vl, comp; + mp_srcptr up, vp; + mp_bitcnt_t c; + + un = u->_mp_size; + vn = v->_mp_size; + + if ( (un ^ vn) < 0) + return ~(mp_bitcnt_t) 0; + + comp = - (uc = vc = (un < 0)); + if (uc) + { + assert (vn < 0); + un = -un; + vn = -vn; + } + + up = u->_mp_d; + vp = v->_mp_d; + + if (un < vn) + MPN_SRCPTR_SWAP (up, un, vp, vn); + + for (i = 0, c = 0; i < vn; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + vl = (vp[i] ^ comp) + vc; + vc = vl < vc; + + c += gmp_popcount_limb (ul ^ vl); + } + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + c += gmp_popcount_limb (ul ^ comp); + } + + return c; +} + +mp_bitcnt_t +mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit + for u<0. Notice this test picks up any u==0 too. */ + if (i >= un) + return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); + + up = u->_mp_d; + ux = 0; + limb = up[i]; + + if (starting_bit != 0) + { + if (us < 0) + { + ux = mpn_zero_p (up, i); + limb = ~ limb + ux; + ux = - (mp_limb_t) (limb >= ux); + } + + /* Mask to 0 all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + } + + return mpn_common_scan (limb, i, up, un, ux); +} + +mp_bitcnt_t +mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + ux = - (mp_limb_t) (us >= 0); + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for + u<0. Notice this test picks up all cases of u==0 too. */ + if (i >= un) + return (ux ? starting_bit : ~(mp_bitcnt_t) 0); + + up = u->_mp_d; + limb = up[i] ^ ux; + + if (ux == 0) + limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ + + /* Mask all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + + return mpn_common_scan (limb, i, up, un, ux); +} + + +/* MPZ base conversion. */ + +size_t +mpz_sizeinbase (const mpz_t u, int base) +{ + mp_size_t un, tn; + mp_srcptr up; + mp_ptr tp; + mp_bitcnt_t bits; + struct gmp_div_inverse bi; + size_t ndigits; + + assert (base >= 2); + assert (base <= 62); + + un = GMP_ABS (u->_mp_size); + if (un == 0) + return 1; + + up = u->_mp_d; + + bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); + switch (base) + { + case 2: + return bits; + case 4: + return (bits + 1) / 2; + case 8: + return (bits + 2) / 3; + case 16: + return (bits + 3) / 4; + case 32: + return (bits + 4) / 5; + /* FIXME: Do something more clever for the common case of base + 10. */ + } + + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, up, un); + mpn_div_qr_1_invert (&bi, base); + + tn = un; + ndigits = 0; + do + { + ndigits++; + mpn_div_qr_1_preinv (tp, tp, tn, &bi); + tn -= (tp[tn-1] == 0); + } + while (tn > 0); + + gmp_free_limbs (tp, un); + return ndigits; +} + +char * +mpz_get_str (char *sp, int base, const mpz_t u) +{ + unsigned bits; + const char *digits; + mp_size_t un; + size_t i, sn, osn; + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + if (base > 1) + { + if (base <= 36) + digits = "0123456789abcdefghijklmnopqrstuvwxyz"; + else if (base > 62) + return NULL; + } + else if (base >= -1) + base = 10; + else + { + base = -base; + if (base > 36) + return NULL; + } + + sn = 1 + mpz_sizeinbase (u, base); + if (!sp) + { + osn = 1 + sn; + sp = (char *) gmp_alloc (osn); + } + else + osn = 0; + un = GMP_ABS (u->_mp_size); + + if (un == 0) + { + sp[0] = '0'; + sn = 1; + goto ret; + } + + i = 0; + + if (u->_mp_size < 0) + sp[i++] = '-'; + + bits = mpn_base_power_of_two_p (base); + + if (bits) + /* Not modified in this case. */ + sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); + else + { + struct mpn_base_info info; + mp_ptr tp; + + mpn_get_base_info (&info, base); + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, u->_mp_d, un); + + sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); + gmp_free_limbs (tp, un); + } + + for (; i < sn; i++) + sp[i] = digits[(unsigned char) sp[i]]; + +ret: + sp[sn] = '\0'; + if (osn && osn != sn + 1) + sp = (char*) gmp_realloc (sp, osn, sn + 1); + return sp; +} + +int +mpz_set_str (mpz_t r, const char *sp, int base) +{ + unsigned bits, value_of_a; + mp_size_t rn, alloc; + mp_ptr rp; + size_t dn, sn; + int sign; + unsigned char *dp; + + assert (base == 0 || (base >= 2 && base <= 62)); + + while (isspace( (unsigned char) *sp)) + sp++; + + sign = (*sp == '-'); + sp += sign; + + if (base == 0) + { + if (sp[0] == '0') + { + if (sp[1] == 'x' || sp[1] == 'X') + { + base = 16; + sp += 2; + } + else if (sp[1] == 'b' || sp[1] == 'B') + { + base = 2; + sp += 2; + } + else + base = 8; + } + else + base = 10; + } + + if (!*sp) + { + r->_mp_size = 0; + return -1; + } + sn = strlen(sp); + dp = (unsigned char *) gmp_alloc (sn); + + value_of_a = (base > 36) ? 36 : 10; + for (dn = 0; *sp; sp++) + { + unsigned digit; + + if (isspace ((unsigned char) *sp)) + continue; + else if (*sp >= '0' && *sp <= '9') + digit = *sp - '0'; + else if (*sp >= 'a' && *sp <= 'z') + digit = *sp - 'a' + value_of_a; + else if (*sp >= 'A' && *sp <= 'Z') + digit = *sp - 'A' + 10; + else + digit = base; /* fail */ + + if (digit >= (unsigned) base) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + + dp[dn++] = digit; + } + + if (!dn) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + bits = mpn_base_power_of_two_p (base); + + if (bits > 0) + { + alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_bits (rp, dp, dn, bits); + } + else + { + struct mpn_base_info info; + mpn_get_base_info (&info, base); + alloc = (dn + info.exp - 1) / info.exp; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_other (rp, dp, dn, base, &info); + /* Normalization, needed for all-zero input. */ + assert (rn > 0); + rn -= rp[rn-1] == 0; + } + assert (rn <= alloc); + gmp_free (dp, sn); + + r->_mp_size = sign ? - rn : rn; + + return 0; +} + +int +mpz_init_set_str (mpz_t r, const char *sp, int base) +{ + mpz_init (r); + return mpz_set_str (r, sp, base); +} + +size_t +mpz_out_str (FILE *stream, int base, const mpz_t x) +{ + char *str; + size_t len, n; + + str = mpz_get_str (NULL, base, x); + if (!str) + return 0; + len = strlen (str); + n = fwrite (str, 1, len, stream); + gmp_free (str, len + 1); + return n; +} + + +static int +gmp_detect_endian (void) +{ + static const int i = 2; + const unsigned char *p = (const unsigned char *) &i; + return 1 - *p; +} + +/* Import and export. Does not support nails. */ +void +mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, + size_t nails, const void *src) +{ + const unsigned char *p; + ptrdiff_t word_step; + mp_ptr rp; + mp_size_t rn; + + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes already copied to this limb (starting from + the low end). */ + size_t bytes; + /* The index where the limb should be stored, when completed. */ + mp_size_t i; + + if (nails != 0) + gmp_die ("mpz_import: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) src; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); + rp = MPZ_REALLOC (r, rn); + + for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) + { + size_t j; + for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) + { + limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); + if (bytes == sizeof(mp_limb_t)) + { + rp[i++] = limb; + bytes = 0; + limb = 0; + } + } + } + assert (i + (bytes > 0) == rn); + if (limb != 0) + rp[i++] = limb; + else + i = mpn_normalized_size (rp, i); + + r->_mp_size = i; +} + +void * +mpz_export (void *r, size_t *countp, int order, size_t size, int endian, + size_t nails, const mpz_t u) +{ + size_t count; + mp_size_t un; + + if (nails != 0) + gmp_die ("mpz_export: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + assert (size > 0 || u->_mp_size == 0); + + un = u->_mp_size; + count = 0; + if (un != 0) + { + size_t k; + unsigned char *p; + ptrdiff_t word_step; + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes left to do in this limb. */ + size_t bytes; + /* The index where the limb was read. */ + mp_size_t i; + + un = GMP_ABS (un); + + /* Count bytes in top limb. */ + limb = u->_mp_d[un-1]; + assert (limb != 0); + + k = (GMP_LIMB_BITS <= CHAR_BIT); + if (!k) + { + do { + int LOCAL_CHAR_BIT = CHAR_BIT; + k++; limb >>= LOCAL_CHAR_BIT; + } while (limb != 0); + } + /* else limb = 0; */ + + count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; + + if (!r) + r = gmp_alloc (count * size); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) r; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) + { + size_t j; + for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) + { + if (sizeof (mp_limb_t) == 1) + { + if (i < un) + *p = u->_mp_d[i++]; + else + *p = 0; + } + else + { + int LOCAL_CHAR_BIT = CHAR_BIT; + if (bytes == 0) + { + if (i < un) + limb = u->_mp_d[i++]; + bytes = sizeof (mp_limb_t); + } + *p = limb; + limb >>= LOCAL_CHAR_BIT; + bytes--; + } + } + } + assert (i == un); + assert (k == count); + } + + if (countp) + *countp = count; + + return r; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.h new file mode 100644 index 0000000000..f28cb360ce --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.h @@ -0,0 +1,311 @@ +/* mini-gmp, a minimalistic implementation of a GNU GMP subset. + +Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* About mini-gmp: This is a minimal implementation of a subset of the + GMP interface. It is intended for inclusion into applications which + have modest bignums needs, as a fallback when the real GMP library + is not installed. + + This file defines the public interface. */ + +#ifndef __MINI_GMP_H__ +#define __MINI_GMP_H__ + +/* For size_t */ +#include + +#if defined (__cplusplus) +extern "C" { +#endif + +void mp_set_memory_functions (void *(*) (size_t), + void *(*) (void *, size_t, size_t), + void (*) (void *, size_t)); + +void mp_get_memory_functions (void *(**) (size_t), + void *(**) (void *, size_t, size_t), + void (**) (void *, size_t)); + +#ifndef MINI_GMP_LIMB_TYPE +#define MINI_GMP_LIMB_TYPE long +#endif + +typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; +typedef long mp_size_t; +typedef unsigned long mp_bitcnt_t; + +typedef mp_limb_t *mp_ptr; +typedef const mp_limb_t *mp_srcptr; + +typedef struct +{ + int _mp_alloc; /* Number of *limbs* allocated and pointed + to by the _mp_d field. */ + int _mp_size; /* abs(_mp_size) is the number of limbs the + last field points to. If _mp_size is + negative this is a negative number. */ + mp_limb_t *_mp_d; /* Pointer to the limbs. */ +} __mpz_struct; + +typedef __mpz_struct mpz_t[1]; + +typedef __mpz_struct *mpz_ptr; +typedef const __mpz_struct *mpz_srcptr; + +extern const int mp_bits_per_limb; + +void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); +void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); +void mpn_zero (mp_ptr, mp_size_t); + +int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); +int mpn_zero_p (mp_srcptr, mp_size_t); + +mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); +void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); +int mpn_perfect_square_p (mp_srcptr, mp_size_t); +mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); +mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); + +mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); +mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); + +mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); +mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); + +void mpn_com (mp_ptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); + +mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); + +mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); +#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) + +size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); +mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); + +void mpz_init (mpz_t); +void mpz_init2 (mpz_t, mp_bitcnt_t); +void mpz_clear (mpz_t); + +#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) +#define mpz_even_p(z) (! mpz_odd_p (z)) + +int mpz_sgn (const mpz_t); +int mpz_cmp_si (const mpz_t, long); +int mpz_cmp_ui (const mpz_t, unsigned long); +int mpz_cmp (const mpz_t, const mpz_t); +int mpz_cmpabs_ui (const mpz_t, unsigned long); +int mpz_cmpabs (const mpz_t, const mpz_t); +int mpz_cmp_d (const mpz_t, double); +int mpz_cmpabs_d (const mpz_t, double); + +void mpz_abs (mpz_t, const mpz_t); +void mpz_neg (mpz_t, const mpz_t); +void mpz_swap (mpz_t, mpz_t); + +void mpz_add_ui (mpz_t, const mpz_t, unsigned long); +void mpz_add (mpz_t, const mpz_t, const mpz_t); +void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); +void mpz_sub (mpz_t, const mpz_t, const mpz_t); + +void mpz_mul_si (mpz_t, const mpz_t, long int); +void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_mul (mpz_t, const mpz_t, const mpz_t); +void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_addmul (mpz_t, const mpz_t, const mpz_t); +void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_submul (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); + +void mpz_mod (mpz_t, const mpz_t, const mpz_t); + +void mpz_divexact (mpz_t, const mpz_t, const mpz_t); + +int mpz_divisible_p (const mpz_t, const mpz_t); +int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); + +unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); + +unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); + +void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); + +int mpz_divisible_ui_p (const mpz_t, unsigned long); + +unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); +void mpz_gcd (mpz_t, const mpz_t, const mpz_t); +void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); +void mpz_lcm (mpz_t, const mpz_t, const mpz_t); +int mpz_invert (mpz_t, const mpz_t, const mpz_t); + +void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); +void mpz_sqrt (mpz_t, const mpz_t); +int mpz_perfect_square_p (const mpz_t); + +void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); +void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); +void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); + +void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); +int mpz_root (mpz_t, const mpz_t, unsigned long); + +void mpz_fac_ui (mpz_t, unsigned long); +void mpz_2fac_ui (mpz_t, unsigned long); +void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); +void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); + +int mpz_probab_prime_p (const mpz_t, int); + +int mpz_tstbit (const mpz_t, mp_bitcnt_t); +void mpz_setbit (mpz_t, mp_bitcnt_t); +void mpz_clrbit (mpz_t, mp_bitcnt_t); +void mpz_combit (mpz_t, mp_bitcnt_t); + +void mpz_com (mpz_t, const mpz_t); +void mpz_and (mpz_t, const mpz_t, const mpz_t); +void mpz_ior (mpz_t, const mpz_t, const mpz_t); +void mpz_xor (mpz_t, const mpz_t, const mpz_t); + +mp_bitcnt_t mpz_popcount (const mpz_t); +mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); +mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); +mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); + +int mpz_fits_slong_p (const mpz_t); +int mpz_fits_ulong_p (const mpz_t); +int mpz_fits_sint_p (const mpz_t); +int mpz_fits_uint_p (const mpz_t); +int mpz_fits_sshort_p (const mpz_t); +int mpz_fits_ushort_p (const mpz_t); +long int mpz_get_si (const mpz_t); +unsigned long int mpz_get_ui (const mpz_t); +double mpz_get_d (const mpz_t); +size_t mpz_size (const mpz_t); +mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); + +void mpz_realloc2 (mpz_t, mp_bitcnt_t); +mp_srcptr mpz_limbs_read (mpz_srcptr); +mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); +mp_ptr mpz_limbs_write (mpz_t, mp_size_t); +void mpz_limbs_finish (mpz_t, mp_size_t); +mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); + +#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} + +void mpz_set_si (mpz_t, signed long int); +void mpz_set_ui (mpz_t, unsigned long int); +void mpz_set (mpz_t, const mpz_t); +void mpz_set_d (mpz_t, double); + +void mpz_init_set_si (mpz_t, signed long int); +void mpz_init_set_ui (mpz_t, unsigned long int); +void mpz_init_set (mpz_t, const mpz_t); +void mpz_init_set_d (mpz_t, double); + +size_t mpz_sizeinbase (const mpz_t, int); +char *mpz_get_str (char *, int, const mpz_t); +int mpz_set_str (mpz_t, const char *, int); +int mpz_init_set_str (mpz_t, const char *, int); + +/* This long list taken from gmp.h. */ +/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, + defines EOF but not FILE. */ +#if defined (FILE) \ + || defined (H_STDIO) \ + || defined (_H_STDIO) /* AIX */ \ + || defined (_STDIO_H) /* glibc, Sun, SCO */ \ + || defined (_STDIO_H_) /* BSD, OSF */ \ + || defined (__STDIO_H) /* Borland */ \ + || defined (__STDIO_H__) /* IRIX */ \ + || defined (_STDIO_INCLUDED) /* HPUX */ \ + || defined (__dj_include_stdio_h_) /* DJGPP */ \ + || defined (_FILE_DEFINED) /* Microsoft */ \ + || defined (__STDIO__) /* Apple MPW MrC */ \ + || defined (_MSL_STDIO_H) /* Metrowerks */ \ + || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ + || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ + || defined (__STDIO_LOADED) /* VMS */ \ + || defined (_STDIO) /* HPE NonStop */ \ + || defined (__DEFINED_FILE) /* musl */ +size_t mpz_out_str (FILE *, int, const mpz_t); +#endif + +void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); +void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); + +#if defined (__cplusplus) +} +#endif +#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.h new file mode 100644 index 0000000000..b3733b520d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.h @@ -0,0 +1,88 @@ +#ifndef MP_H +#define MP_H + +#include +#include +#include + +// Functions taken from the GF module + +void mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +digit_t mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords); +void multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void MUL(digit_t *out, const digit_t a, const digit_t b); + +// Functions taken from the EC module + +void mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +void select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords); +void swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords); +int mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords); +bool mp_is_zero(const digit_t *a, unsigned int nwords); +void mp_mul2(digit_t *c, const digit_t *a, const digit_t *b); + +// Further functions for multiprecision arithmetic +void mp_print(const digit_t *a, size_t nwords); +void mp_copy(digit_t *b, const digit_t *a, size_t nwords); +void mp_neg(digit_t *a, unsigned int nwords); +bool mp_is_one(const digit_t *x, unsigned int nwords); +void mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords); +void mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords); +void mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords); +void mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords); + +#define mp_is_odd(x, nwords) (((nwords) != 0) & (int)(x)[0]) +#define mp_is_even(x, nwords) (!mp_is_odd(x, nwords)) + +/********************** Constant-time unsigned comparisons ***********************/ + +// The following functions return 1 (TRUE) if condition is true, 0 (FALSE) otherwise +static inline unsigned int +is_digit_nonzero_ct(digit_t x) +{ // Is x != 0? + return (unsigned int)((x | (0 - x)) >> (RADIX - 1)); +} + +static inline unsigned int +is_digit_zero_ct(digit_t x) +{ // Is x = 0? + return (unsigned int)(1 ^ is_digit_nonzero_ct(x)); +} + +static inline unsigned int +is_digit_lessthan_ct(digit_t x, digit_t y) +{ // Is x < y? + return (unsigned int)((x ^ ((x ^ y) | ((x - y) ^ y))) >> (RADIX - 1)); +} + +/********************** Platform-independent macros for digit-size operations + * **********************/ + +// Digit addition with carry +#define ADDC(sumOut, carryOut, addend1, addend2, carryIn) \ + { \ + digit_t tempReg = (addend1) + (digit_t)(carryIn); \ + (sumOut) = (addend2) + tempReg; \ + (carryOut) = (is_digit_lessthan_ct(tempReg, (digit_t)(carryIn)) | is_digit_lessthan_ct((sumOut), tempReg)); \ + } + +// Digit subtraction with borrow +#define SUBC(differenceOut, borrowOut, minuend, subtrahend, borrowIn) \ + { \ + digit_t tempReg = (minuend) - (subtrahend); \ + unsigned int borrowReg = \ + (is_digit_lessthan_ct((minuend), (subtrahend)) | ((borrowIn) & is_digit_zero_ct(tempReg))); \ + (differenceOut) = tempReg - (digit_t)(borrowIn); \ + (borrowOut) = borrowReg; \ + } + +// Shift right with flexible datatype +#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << (DigitSize - (shift))); + +// Digit shift left +#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((highIn) << (shift)) ^ ((lowIn) >> (RADIX - (shift))); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion.h new file mode 100644 index 0000000000..a567657464 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion.h @@ -0,0 +1,708 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for quaternion algebra operations + */ + +#ifndef QUATERNION_H +#define QUATERNION_H + +// #include +#include +#include "intbig.h" +#include + +/** @defgroup quat_quat Quaternion algebra + * @{ + */ + +/** @defgroup quat_vec_t Types for integer vectors and matrices + * @{ + */ + +/** @brief Type for vector of 2 big integers + * + * @typedef ibz_vec_2_t + */ +typedef ibz_t ibz_vec_2_t[2]; + +/** @brief Type for vectors of 4 integers + * + * @typedef ibz_vec_4_t + * + * Represented as a vector of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_vec_4_t[4]; + +/** @brief Type for 2 by 2 matrices of integers + * + * @typedef ibz_mat_2x2_t + * + * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_2x2_t[2][2]; + +/** @brief Type for 4 by 4 matrices of integers + * + * @typedef ibz_mat_4x4_t + * + * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_4x4_t[4][4]; +/** + * @} + */ + +/** @defgroup quat_quat_t Types for quaternion algebras + * @{ + */ + +/** @brief Type for quaternion algebras + * + * @typedef quat_alg_t + * + * @struct quat_alg + * + * The quaternion algebra ramified at p = 3 mod 4 and ∞. + */ +typedef struct quat_alg +{ + ibz_t p; ///< Prime number, must be = 3 mod 4. +} quat_alg_t; + +/** @brief Type for quaternion algebra elements + * + * @typedef quat_alg_elem_t + * + * @struct quat_alg_elem + * + * Represented as a array *coord* of 4 ibz_t integers and a common ibz_t denominator *denom*. + * + * The representation is not necessarily normalized, that is, gcd(denom, content(coord)) might not + * be 1. For getting a normalized representation, use the quat_alg_normalize function + * + * The elements are always represented in basis (1,i,j,ij) of the quaternion algebra, with i^2=-1 + * and j^2 = -p + */ +typedef struct quat_alg_elem +{ + ibz_t denom; ///< Denominator by which all coordinates are divided (big integer, must not be 0) + ibz_vec_4_t coord; ///< Numerators of the 4 coordinates of the quaternion algebra element in basis (1,i,j,ij) +} quat_alg_elem_t; + +/** @brief Type for lattices in dimension 4 + * + * @typedef quat_lattice_t + * + * @struct quat_lattice + * + * Represented as a rational (`frac`) times an integreal lattice (`basis`) + * + * The basis is such that its columns divided by its denominator are elements of + * the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + * + * All lattices must have full rank (4) + */ +typedef struct quat_lattice +{ + ibz_t denom; ///< Denominator by which the basis is divided (big integer, must not be 0) + ibz_mat_4x4_t basis; ///< Integer basis of the lattice (its columns divided by denom are + ///< algebra elements in the usual basis) +} quat_lattice_t; + +/** @brief Type for left ideals of maximal orders in quaternion algebras + * + * @typedef quat_left_ideal_t + * + * @struct quat_left_ideal + * + * The basis of the lattice representing it is such that its columns divided by its denominator are + * elements of the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + */ +typedef struct quat_left_ideal +{ + quat_lattice_t lattice; ///< lattice representing the ideal + ibz_t norm; ///< norm of the lattice + const quat_lattice_t *parent_order; ///< should be a maximal order +} quat_left_ideal_t; +/** @} + */ + +/** @brief Type for extremal maximal orders + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + * The basis of the order representing it is in hermite normal form, and its columns divid +ed by its denominator are elements of the quaternion algebra, represented in basis (1,z,t, +tz) where z^2 = -q, t^2 = -p. +*/ +typedef struct quat_p_extremal_maximal_order +{ + quat_lattice_t order; ///< the order represented as a lattice + quat_alg_elem_t z; ///< the element of small discriminant + quat_alg_elem_t t; ///< the element of norm p orthogonal to z + uint32_t q; ///< the absolute value of the square of z +} quat_p_extremal_maximal_order_t; + +/** @brief Type for represent integer parameters + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + */ +typedef struct quat_represent_integer_params +{ + int primality_test_iterations; ///< Primality test iterations + const quat_p_extremal_maximal_order_t *order; ///< The standard extremal maximal order + const quat_alg_t *algebra; ///< The quaternion algebra +} quat_represent_integer_params_t; + +/*************************** Functions *****************************/ + +/** @defgroup quat_c Constructors and Destructors + * @{ + */ +void quat_alg_init_set(quat_alg_t *alg, const ibz_t *p); +void quat_alg_finalize(quat_alg_t *alg); + +void quat_alg_elem_init(quat_alg_elem_t *elem); +void quat_alg_elem_finalize(quat_alg_elem_t *elem); + +void ibz_vec_2_init(ibz_vec_2_t *vec); +void ibz_vec_2_finalize(ibz_vec_2_t *vec); + +void ibz_vec_4_init(ibz_vec_4_t *vec); +void ibz_vec_4_finalize(ibz_vec_4_t *vec); + +void ibz_mat_2x2_init(ibz_mat_2x2_t *mat); +void ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat); + +void ibz_mat_4x4_init(ibz_mat_4x4_t *mat); +void ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat); + +void quat_lattice_init(quat_lattice_t *lat); +void quat_lattice_finalize(quat_lattice_t *lat); + +void quat_left_ideal_init(quat_left_ideal_t *lideal); +void quat_left_ideal_finalize(quat_left_ideal_t *lideal); +/** @} + */ + +/** @defgroup quat_printers Print functions for types from the quaternion module + * @{ + */ +void ibz_mat_2x2_print(const ibz_mat_2x2_t *mat); +void ibz_mat_4x4_print(const ibz_mat_4x4_t *mat); +void ibz_vec_2_print(const ibz_vec_2_t *vec); +void ibz_vec_4_print(const ibz_vec_4_t *vec); + +void quat_lattice_print(const quat_lattice_t *lat); +void quat_alg_print(const quat_alg_t *alg); +void quat_alg_elem_print(const quat_alg_elem_t *elem); +void quat_left_ideal_print(const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @defgroup quat_int Integer functions for quaternion algebra + * @{ + */ + +/** @defgroup quat_int_mat Integer matrix and vector functions + * @{ + */ + +/** @brief Copy matrix + * + * @param copy Output: Matrix into which copied will be copied + * @param copied + */ +void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied); + +/** + * @brief Inverse of 2x2 integer matrices modulo m + * + * @param inv Output matrix + * @param mat Input matrix + * @param m Integer modulo + * @return 1 if inverse exists 0 otherwise + */ +int ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m); + +/** @brief mat*vec in dimension 2 for integers + * + * @param res Output vector + * @param mat Input vector + * @param vec Input vector + */ +void ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, + const ibz_mat_4x4_t *mat); // dim4, lattice, test/dim4, ideal + +/** @brief transpose a 4x4 integer matrix + * + * @param transposed Output: is set to the transposition of mat + * @param mat Input matrix + */ +void ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat); + +/** @brief a*b for a,b integer 4x4 matrices + * + * Naive implementation + * + * @param res Output: A 4x4 integer matrix + * @param a + * @param b + */ +void ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b); + +/** @brief divides all values in matrix by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param mat + */ +int ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** + * @brief mat*vec + * + * + * @param res Output: coordinate vector + * @param mat Integer 4x4 matrix + * @param vec Integer vector (coordinate vector) + * + * Multiplies 4x4 integer matrix mat by a 4-integers column vector vec + */ +void ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec); + +/** + * @brief vec*mat + * + * + * @param res Output: coordinate vector. + * @param vec Integer vector (coordinate vector) + * @param mat Integer 4x4 matrix + * + * Multiplies 4x4 integer matrix mat by a 4-integers row vector vec (on the left) + */ +void ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @defgroup quat_integer Higher-level integer functions for quaternion algebra + * @{ + */ + +/** + * @brief Generates a random prime + * + * A number is accepted as prime if it passes a 30-round Miller-Rabin test. + * This function is fairly inefficient and mostly meant for tests. + * + * @returns 1 if a prime is found, 0 otherwise + * @param p Output: The prime (if found) + * @param is3mod4 If 1, the prime is required to be 3 mod 4, if 0 no congruence condition is imposed + * @param bitsize Maximal size of output prime + * @param probability_test_iterations Miller-Rabin iteartions for probabilistic primality testing in + * rejection sampling + */ +int ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations); + +/** + * @brief Find integers x and y such that x^2 + n*y^2 = p + * + * Uses Cornacchia's algorithm, should be used only for prime p + * + * @param x Output + * @param y Output + * @param n first parameter defining the equation + * @param p seond parameter defining the equation, must be prime + * @return 1 if success, 0 otherwise + */ +int ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p); + +/** @} + */ + +/** @defgroup quat_qf Quadratic form functions + * @{ + */ + +/** + * @brief Quadratic form evaluation + * + * qf and coord must be represented in the same basis. + * + * @param res Output: coordinate vector + * @param qf Quadratic form (4x4 integer matrix) + * @param coord Integer vector (coordinate vector) + */ +void quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord); +/** @} + */ + +/** @} + */ + +/** @defgroup quat_quat_f Quaternion algebra functions + * @{ + */ +/** + * @brief Copies an algebra element + * + * @param copy Output: The element into which another one is copied + * @param copied Source element copied into copy + */ +void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied); + +void quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg); + +/** @brief reduced norm of alg_elem x + * + * @param res_num Output: rational which will contain the numerator of the reduced norm of a + * @param res_denom Output: rational which will contain the denominator of the reduced norm of a (it + * is 1 if the norm is integer) + * @param x Algebra element whose norm is computed + * @param alg The quaternion algebra + */ +void quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *x, const quat_alg_t *alg); + +/** @brief Normalize representation of alg_elem x + * + * @param x Algebra element whose representation will be normalized + * + * Modification of x. + * Sets coord and denom of x so that gcd(denom, content(coord))=1 + * without changing the value of x = (coord0/denom, coord1/denom, coord2/denom, coord3/denom). + */ +void quat_alg_normalize(quat_alg_elem_t *x); + +/** + * @brief Standard involution in a quaternion algebra + * + * @param conj Output: image of x by standard involution of the quaternion algebra alg + * @param x element of alg whose image is searched + */ +void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x); + +/** + * @brief Given `x` ∈ `order`, factor it into its primitive and impritive parts + * + * Given `x` ∈ `order`, return a coordinate vector `primitive_x` and an integer `content` + * such that `x` = `content` · Λ `primitive_x`, where Λ is the basis of `order` + * and `x` / `content` is primitive in `order`. + * + * @param primitive_x Output: coordinates of a primitive element of `order` (in `order`'s basis) + * @param content Output: content of `x`'s coordinate vector in order's basis + * @param order order of `alg` + * @param x element of order, must be in `order` + */ +void quat_alg_make_primitive(ibz_vec_4_t *primitive_x, + ibz_t *content, + const quat_alg_elem_t *x, + const quat_lattice_t *order); + +// end quat_quat_f +/** @} + */ + +/** @defgroup quat_lat_f Lattice functions + * @{ + */ + +void quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2); + +/** + * @brief Test whether x ∈ lat. If so, compute its coordinates in lat's basis. + * + * @param coord Output: Set to the coordinates of x in lat. May be NULL. + * @param lat The lattice, not necessarily in HNF but full rank + * @param x An element of the quaternion algebra + * @return true if x ∈ lat + */ +int quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x); + +/** + * @brief Conjugate of a lattice with basis not in HNF + * + * @param conj Output: The lattice conjugate to lat. ATTENTION: is not under HNF + * @param lat Input lattice + */ +void quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat); + +/** + * @brief Multiply a lattice and an algebra element + * + * The element is multiplied to the right of the lattice + * + * @param prod Output: Lattice lat*elem + * @param lat Input lattice + * @param elem Algebra element + * @param alg The quaternion algebra + */ +void quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg); // ideal + +/** + * @brief Sample from the intersection of a lattice with a ball + * + * Sample a uniform non-zero vector of norm ≤ `radius` from the lattice. + * + * @param res Output: sampled quaternion from the lattice + * @param lattice Input lattice + * @param alg The quaternion algebra + * @param radius The ball radius (quaternion norm) + * @return 0 if an error occurred (ball too small or RNG error), 1 otherwise + */ +int quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius); + +// end quat_lat_f +/** @} + */ + +/** @defgroup quat_lideal_f Functions for left ideals + * @{ + */ + +/** @defgroup quat_lideal_c Creating left ideals + * @{ + */ + +/** + * @brief Left ideal of order, generated by x and N as order*x+order*N + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element. Must be non-zero + * @param N generating integer + * + * Creates the left ideal in order generated by the element x and the integer N. + * If x is not divisible (inside the order) by any integer divisor n>1 of N, + * then the norm of the output ideal is N. + * + */ +void quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg); + +/** @} + */ + +/** @defgroup quat_lideal_gen Generators of left ideals + * @{ + */ + +/** + * @brief Generator of 'lideal' + * + * @returns 1 if such a generator was found, 0 otherwise + * @param gen Output: non scalar generator of lideal + * @param lideal left ideal + * @param alg the quaternion algebra + * + * Ideal is generated by gen and the ideal's norm + * + * Bound has as default value QUATERNION_lideal_generator_search_bound + */ +int quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg); +/** @} + */ + +/** @defgroup quat_lideal_op Operations on left ideals + * @{ + */ + +/** + * @brief Copies an ideal + * + * @param copy Output: The ideal into which another one is copied + * @param copied Source ideal copied into copy. The parent order is not copied (only the pointer). + */ +void quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied); + +/** + * @brief Conjugate of a left ideal (not in HNF) + * + * @param conj Output: Ideal conjugate to lideal, with norm and parent order correctly set, but its + * lattice not in HNF + * @param new_parent_order Output: Will be set to the right order of lideal, and serve as parent + * order for conj (so must have at least the lifetime of conj) + * @param lideal input left ideal (of which conj will be the conjugate) + * @param alg the quaternion algebra + */ +void quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); + +/** + * @brief Intersection of two left ideals + * + * @param intersection Output: Left ideal which is the intersection of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_inter(quat_left_ideal_t *intersection, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief L2-reduce the basis of the left ideal, without considering its denominator + * + * This function reduce the basis of the lattice of the ideal, but it does completely ignore its + * denominator. So the outputs of this function must still e divided by the appropriate power of + * lideal.lattice.denom. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param reduced Output: Lattice defining the ideal, which has its basis in a lll-reduced form. + * Must be divided by lideal.lattice.denom before usage + * @param gram Output: Matrix of the quadratic form given by the norm on the basis of the reduced + * ideal, divided by the norm of the ideal + * @param lideal ideal whose basis will be reduced + * @param alg the quaternion algebra + */ +void quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // replaces lideal_lll + +/** + * @brief Multplies two ideals and L2-reduces the lattice of the result + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param prod Output: The product ideal with its lattice basis being L2-reduced + * @param gram Output: Gram matrix of the reduced norm (as quadratic but not bilinear form) on the + * basis of prod, divided by the norm of prod + * @param lideal1 Ideal at left in the product + * @param lideal2 Ideal at right in the product + * @param alg The quaternion algebra + */ +void quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Replaces an ideal by a smaller equivalent one of prime norm + * + * @returns 1 if the computation succeeded and 0 otherwise + * @param lideal In- and Output: Ideal to be replaced + * @param alg The quaternion algebra + * @param primality_num_iter number of repetition for primality testing + * @param equiv_bound_coeff bound on the coefficients for the candidates + */ +int quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff); + +/** @} + */ + +// end quat_lideal_f +/** @} + */ + +/** @defgroup quat_normeq Functions specific to special extremal maximal orders + * @{ + */ + +/** + * @brief Representing an integer by the quadratic norm form of a maximal extremal order + * + * @returns 1 if the computation succeeded + * @param gamma Output: a quaternion element + * @param n_gamma Target norm of gamma. n_gamma must be odd. If n_gamma/(p*params.order->q) < + * 2^QUAT_repres_bound_input failure is likely + * @param non_diag If set to 1 (instead of 0) and the order is O0, an additional property is ensured + * @param params Represent integer parameters specifying the algebra, the special extremal order, + * the number of trials for finding gamma and the number of iterations of the primality test. + * Special requirements apply if non-diag is set to 1 + * + * This algorithm finds a primitive quaternion element gamma of n_gamma inside any maximal extremal + * order. Failure is possible. Most efficient for the standard order. + * + * If non-diag is set to 1,this algorithm finds a primitive quaternion element gamma with some + * special properties used in fixed degree isogeny of n_gamma inside any maximal extremal order such + * that params->order->q=1 mod 4. Failure is possible. Most efficient for the standard order. The + * most important property is to avoid diagonal isogenies, meaning that the gamma returned by the + * algorithm must not be contained inside ZZ + 2 O where O is the maximal order params->order When O + * is the special order O0 corresponding to j=1728, we further need to avoid endomorphisms of E0xE0 + * and there is another requirement + * + * If non-diag is set to 1, the number of trials for finding gamma (in params), the number of + * iterations of the primality test and the value of params->order->q is required to be 1 mod 4 + */ +int quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params); + +/** @brief Basis change to (1,i,(i+j)/2,(1+ij)/2) for elements of O0 + * + * Change the basis in which an element is give from 1,i,j,ij to (1,i,(i+j)/2,(1+ij)/2) the ususal + * basis of the special maximal order O0 Only for elements of O0 + * + * @param vec Output: Coordinates of el in basis (1,i,(i+j)/2,(1+ij)/2) + * @param el Imput: An algebra element in O0 + */ +void quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el); + +/** + * @brief Random O0-ideal of given norm + * + * Much faster if norm is prime and is_prime is set to 1 + * + * @param lideal Output: O0-ideal of norm norm + * @param norm Norm of the ideal to be found + * @param is_prime Indicates if norm is prime: 1 if it is, 0 otherwise + * @param params Represent Integer parameters from the level-dependent constants + * @param prime_cofactor Prime distinct from the prime p defining the algebra but of similar size + * and coprime to norm. If is_prime is 1, it might be NULL. + * @returns 1 if success, 0 if no ideal found or randomness failed + */ +int quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor); +// end quat_normeq +/** @} + */ +// end quat_quat +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_constants.h new file mode 100644 index 0000000000..a2f4b52b93 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_constants.h @@ -0,0 +1,6 @@ +#include +#define QUAT_primality_num_iter 32 +#define QUAT_repres_bound_input 21 +#define QUAT_equiv_bound_coeff 64 +#define FINDUV_box_size 3 +#define FINDUV_cube_size 2400 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c new file mode 100644 index 0000000000..24402255d4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c @@ -0,0 +1,3626 @@ +#include +#include +#include +const ibz_t QUAT_prime_cofactor = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x8000000000000000}}} +#endif +; +const quat_alg_t QUATALG_PINFTY = { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x40ff}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x40ffffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x40ffffffffffffff}}} +#endif +}; +const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[8] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 1}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x680}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423,0x0,0x0,0x0,0x0,0x0,0x6800000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a,0x0,0x0,0x680000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 5}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed,0x0,0x0,0x0,0x0,0x0,0x2800000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b,0x0,0x0,0x280000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 13}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc07,0x925a,0x605a,0x9489,0x475b,0x7944,0x880f,0x65fa,0xed5a,0x329c,0x13f8,0x78f2,0xfffe,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x925adc07,0x9489605a,0x7944475b,0x65fa880f,0x329ced5a,0x78f213f8,0xfffffffe,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9489605a925adc07,0x65fa880f7944475b,0x78f213f8329ced5a,0xfffffffffffffffe,0xffffffffffffffff,0x207fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9c07,0x5ca4,0xc660,0xc2e5,0x94d7,0x2b1d,0x3b32,0xa3de,0x67a4,0x2fd3,0xfeab,0x1a11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5ca49c07,0xc2e5c660,0x2b1d94d7,0xa3de3b32,0x2fd367a4,0x1a11feab}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xc2e5c6605ca49c07,0xa3de3b322b1d94d7,0x1a11feab2fd367a4}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 17}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9a15,0x48a0,0x16ae,0xa42,0x3772,0x534a,0x26a7,0x2f5e,0xce7c,0x39eb,0xa365,0x745c,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0x657}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x48a09a15,0xa4216ae,0x534a3772,0x2f5e26a7,0x39ebce7c,0x745ca365,0xa2576a25,0x576a2576,0x6a2576a2,0x2576a257,0x76a2576a,0x6576a25}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4216ae48a09a15,0x2f5e26a7534a3772,0x745ca36539ebce7c,0x576a2576a2576a25,0x2576a2576a2576a2,0x6576a2576a2576a}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50e5,0x2533,0xb03b,0x2c45,0xfde,0xaaf1,0xafff,0x8c73,0xebfd,0xfb3,0xc7bc,0x26}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x253350e5,0x2c45b03b,0xaaf10fde,0x8c73afff,0xfb3ebfd,0x26c7bc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2c45b03b253350e5,0x8c73afffaaf10fde,0x26c7bc0fb3ebfd}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 41}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x73ba,0x1227,0x9519,0xedfb,0x605b,0xe80,0x1a20,0xf0b2,0xb418,0xa90c,0xb325,0xefd6,0x7e3e,0xf8fc,0xe3f1,0x8fc7,0x3f1f,0xfc7e,0xf1f8,0xc7e3,0x1f8f,0x7e3f,0xf8fc,0x71}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x122773ba,0xedfb9519,0xe80605b,0xf0b21a20,0xa90cb418,0xefd6b325,0xf8fc7e3e,0x8fc7e3f1,0xfc7e3f1f,0xc7e3f1f8,0x7e3f1f8f,0x71f8fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xedfb9519122773ba,0xf0b21a200e80605b,0xefd6b325a90cb418,0x8fc7e3f1f8fc7e3e,0xc7e3f1f8fc7e3f1f,0x71f8fc7e3f1f8f}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x73ba,0x8a7,0x681e,0x130f,0xeee3,0xd966,0x4ebe,0xf78b,0xba4d,0xfa9,0xc409,0x245}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x8a773ba,0x130f681e,0xd966eee3,0xf78b4ebe,0xfa9ba4d,0x245c409}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x130f681e08a773ba,0xf78b4ebed966eee3,0x245c4090fa9ba4d}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 73}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x30b3,0xeb66,0x87b7,0x617e,0x27c,0xfa7,0xdcf4,0x90c8,0x7e8b,0x9e3c,0xaf36,0xb7ba,0x5eeb,0xbaf7,0xbdd7,0x75ee,0x7baf,0xebdd,0xf75e,0xd7ba,0xeebd,0xaf75,0xdd7b,0x2eb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xeb6630b3,0x617e87b7,0xfa7027c,0x90c8dcf4,0x9e3c7e8b,0xb7baaf36,0xbaf75eeb,0x75eebdd7,0xebdd7baf,0xd7baf75e,0xaf75eebd,0x2ebdd7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x617e87b7eb6630b3,0x90c8dcf40fa7027c,0xb7baaf369e3c7e8b,0x75eebdd7baf75eeb,0xd7baf75eebdd7baf,0x2ebdd7baf75eebd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xb5ab,0x986,0x1b92,0x5123,0x4b2a,0x653b,0x4896,0xc0fd,0x579e,0xc06c,0xd20e,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x986b5ab,0x51231b92,0x653b4b2a,0xc0fd4896,0xc06c579e,0xf7d20e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x51231b920986b5ab,0xc0fd4896653b4b2a,0xf7d20ec06c579e}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 89}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0xbd79,0x489c,0xbd84,0xce46,0x9344,0xb194,0x642a,0x3c5a,0xdb04,0x96f5,0x6e1f,0x4dcb,0xff6e,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x489cbd79,0xce46bd84,0xb1949344,0x3c5a642a,0x96f5db04,0x4dcb6e1f,0xffffff6e,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xce46bd84489cbd79,0x3c5a642ab1949344,0x4dcb6e1f96f5db04,0xffffffffffffff6e,0xffffffffffffffff,0x207fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xa1c9,0x3fda,0x577,0x71a8,0xf4d3,0x4269,0xecf2,0x2a5d,0x41b6,0x6e41,0x47e5,0x782c,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x3fdaa1c9,0x71a80577,0x4269f4d3,0x2a5decf2,0x6e4141b6,0x782c47e5,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x71a805773fdaa1c9,0x2a5decf24269f4d3,0x782c47e56e4141b6,0x2}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 97}}; +const quat_left_ideal_t CONNECTING_IDEALS[8] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x3f45,0x9d13,0x18d8,0xd9d,0x581f,0x857d,0xdf68,0xd151,0x582a,0xa4d6,0xa864,0x68b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9d133f45,0xd9d18d8,0x857d581f,0xd151df68,0xa4d6582a,0x68ba864,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd9d18d89d133f45,0xd151df68857d581f,0x68ba864a4d6582a,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfad,0xcd37,0x66f0,0x90ea,0x2958,0x73d0,0xf9dd,0x3c75,0xe22e,0xbc3f,0xae14,0x8e28}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd37dfad,0x90ea66f0,0x73d02958,0x3c75f9dd,0xbc3fe22e,0x8e28ae14}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x90ea66f0cd37dfad,0x3c75f9dd73d02958,0x8e28ae14bc3fe22e}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe0bb,0x1b20,0x4939,0xd4cc,0xa436,0xac70,0x5d50,0xfe05,0xe870,0x178b,0xcef2,0xd21,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x1b20e0bb,0xd4cc4939,0xac70a436,0xfe055d50,0x178be870,0xd21cef2,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd4cc49391b20e0bb,0xfe055d50ac70a436,0xd21cef2178be870,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4ebd,0xc907,0x738,0xe090,0x47df,0xb03f,0x814f,0x7faa,0x3a11,0x23cb,0xde52,0x892d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9074ebd,0xe0900738,0xb03f47df,0x7faa814f,0x23cb3a11,0x892dde52}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe0900738c9074ebd,0x7faa814fb03f47df,0x892dde5223cb3a11}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50bf,0xeebf,0xe944,0xea4d,0x76d,0xcbc5,0x4919,0x12b0,0x71f3,0x9e30,0x3304,0x1265}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xeebf50bf,0xea4de944,0xcbc5076d,0x12b04919,0x9e3071f3,0x12653304}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xea4de944eebf50bf,0x12b04919cbc5076d,0x126533049e3071f3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x81c3,0xdc60,0x7bed,0xf8f0,0xdcf,0x4413,0xf95b,0x18b1,0x7f8a,0x3cd4,0xc0e,0xe4bd,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xdc6081c3,0xf8f07bed,0x44130dcf,0x18b1f95b,0x3cd47f8a,0xe4bd0c0e,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8f07beddc6081c3,0x18b1f95b44130dcf,0xe4bd0c0e3cd47f8a,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe941,0x658f,0x3299,0xf19f,0xa9e,0x87ec,0x213a,0x95b1,0x78be,0x6d82,0x1f89,0xfb91}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x658fe941,0xf19f3299,0x87ec0a9e,0x95b1213a,0x6d8278be,0xfb911f89}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf19f3299658fe941,0x95b1213a87ec0a9e,0xfb911f896d8278be}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x60fb,0xd399,0x887f,0xd263,0xe0e7,0xb202,0x699b,0xea34,0x5a15,0x4b8a,0x6763,0x8e95}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd39960fb,0xd263887f,0xb202e0e7,0xea34699b,0x4b8a5a15,0x8e956763}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xd263887fd39960fb,0xea34699bb202e0e7,0x8e9567634b8a5a15}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7edf,0xd82a,0x4c38,0xa9b9,0x663f,0xb4af,0xb83e,0x8f97,0x898d,0x9b3,0x342a,0x1298}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd82a7edf,0xa9b94c38,0xb4af663f,0x8f97b83e,0x9b3898d,0x1298342a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa9b94c38d82a7edf,0x8f97b83eb4af663f,0x1298342a09b3898d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb00f,0x8bbf,0x19a9,0xd6b,0xf7b,0xcd5c,0x74e7,0xd7e2,0xa419,0x3593,0x56a8,0x8de8,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x8bbfb00f,0xd6b19a9,0xcd5c0f7b,0xd7e274e7,0x3593a419,0x8de856a8,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd6b19a98bbfb00f,0xd7e274e7cd5c0f7b,0x8de856a83593a419,0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf007,0x6c34,0xd3b,0x6c6f,0xff26,0xd5e2,0x4cf0,0xf932,0xbec1,0x84e1,0x9955,0xdb05}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6c34f007,0x6c6f0d3b,0xd5e2ff26,0xf9324cf0,0x84e1bec1,0xdb059955}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6c6f0d3b6c34f007,0xf9324cf0d5e2ff26,0xdb05995584e1bec1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3a91,0xcd01,0xac55,0x9a52,0x9887,0x118f,0x4dec,0x4245,0xd869,0x1022,0x1d16,0x7ad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd013a91,0x9a52ac55,0x118f9887,0x42454dec,0x1022d869,0x7ad1d16}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9a52ac55cd013a91,0x42454dec118f9887,0x7ad1d161022d869}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4095,0x6a9f,0x1c86,0xfd81,0xe6a7,0xc52d,0xbb45,0xdbac,0x50ae,0x3a1b,0x87b,0x673a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6a9f4095,0xfd811c86,0xc52de6a7,0xdbacbb45,0x3a1b50ae,0x673a087b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xfd811c866a9f4095,0xdbacbb45c52de6a7,0x673a087b3a1b50ae}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4d27,0x98d5,0x3839,0x83ff,0x48b7,0x4d5b,0xc95b,0xbe45,0x9d44,0x36f3,0x4d57,0x6c26}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x98d54d27,0x83ff3839,0x4d5b48b7,0xbe45c95b,0x36f39d44,0x6c264d57}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x83ff383998d54d27,0xbe45c95b4d5b48b7,0x6c264d5736f39d44}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98a3,0xa25f,0x7811,0xbf10,0x9edd,0x52ef,0xc322,0x2e01,0xda9b,0x5768,0x69c7,0x66f9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa25f98a3,0xbf107811,0x52ef9edd,0x2e01c322,0x5768da9b,0x66f969c7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbf107811a25f98a3,0x2e01c32252ef9edd,0x66f969c75768da9b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x72e5,0x9d9a,0xd825,0xa187,0x73ca,0xd025,0xc63e,0xf623,0x3bef,0x472e,0xdb8f,0x698f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9d9a72e5,0xa187d825,0xd02573ca,0xf623c63e,0x472e3bef,0x698fdb8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa187d8259d9a72e5,0xf623c63ed02573ca,0x698fdb8f472e3bef}}} +#endif +, &MAXORD_O0}}; +const quat_alg_elem_t CONJUGATING_ELEMENTS[8] = {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#endif +}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.h new file mode 100644 index 0000000000..740da6e507 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.h @@ -0,0 +1,12 @@ +#include +#define MAXORD_O0 (EXTREMAL_ORDERS->order) +#define STANDARD_EXTREMAL_ORDER (EXTREMAL_ORDERS[0]) +#define NUM_ALTERNATE_EXTREMAL_ORDERS 7 +#define ALTERNATE_EXTREMAL_ORDERS (EXTREMAL_ORDERS+1) +#define ALTERNATE_CONNECTING_IDEALS (CONNECTING_IDEALS+1) +#define ALTERNATE_CONJUGATING_ELEMENTS (CONJUGATING_ELEMENTS+1) +extern const ibz_t QUAT_prime_cofactor; +extern const quat_alg_t QUATALG_PINFTY; +extern const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[8]; +extern const quat_left_ideal_t CONNECTING_IDEALS[8]; +extern const quat_alg_elem_t CONJUGATING_ELEMENTS[8]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_arm64crypto.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_arm64crypto.h new file mode 100644 index 0000000000..88c4bf48d0 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_arm64crypto.h @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef RANDOMBYTES_ARM64CRYPTO_H +#define RANDOMBYTES_ARM64CRYPTO_H + +#include + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +typedef struct { + unsigned char buffer[16]; + int buffer_pos; + unsigned long length_remaining; + unsigned char key[32]; + unsigned char ctr[16]; +} AES_XOF_struct; + +typedef struct { + unsigned char Key[32]; + unsigned char V[16]; + int reseed_counter; +} AES256_CTR_DRBG_struct; + +#endif /* RANDOMBYTES_ARM64CRYPTO_H */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c new file mode 100644 index 0000000000..3fc67acfb6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 and Unknown +// +/* +NIST-developed software is provided by NIST as a public service. You may use, +copy, and distribute copies of the software in any medium, provided that you +keep intact this entire notice. You may improve, modify, and create derivative +works of the software or any portion of the software, and you may copy and +distribute such modifications or works. Modified works should carry a notice +stating that you changed the software and should note the date and nature of any +such change. Please explicitly acknowledge the National Institute of Standards +and Technology as the source of the software. + +NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF +ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, +WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS +NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR +ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE +ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, +INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR +USEFULNESS OF THE SOFTWARE. + +You are solely responsible for determining the appropriateness of using and +distributing the software and you assume all risks associated with its use, +including but not limited to the risks and costs of program errors, compliance +with applicable laws, damage to or loss of data, programs or equipment, and the +unavailability or interruption of operation. This software is not intended to be +used in any situation where a failure could cause risk of injury or damage to +property. The software developed by NIST employees is not subject to copyright +protection within the United States. +*/ + +#include + +#include +#include "ctr_drbg.h" + +#ifdef ENABLE_CT_TESTING +#include +#endif + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +CTR_DRBG_STATE drbg; + +#ifndef CTRDRBG_TEST_BENCH +static +#endif +void +randombytes_init_aes_ni(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + (void)security_strength; // fixed to 256 + CTR_DRBG_init(&drbg, entropy_input, personalization_string, + (personalization_string == NULL) ? 0 : CTR_DRBG_ENTROPY_LEN); +} + +#ifndef CTRDRBG_TEST_BENCH +static +#endif +int +randombytes_aes_ni(unsigned char *x, size_t xlen) { + CTR_DRBG_generate(&drbg, x, xlen, NULL, 0); + return RNG_SUCCESS; +} + +#ifdef RANDOMBYTES_AES_NI +SQISIGN_API +int randombytes(unsigned char *random_array, unsigned long long nbytes) { + int ret = randombytes_aes_ni(random_array, nbytes); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); +#endif + return ret; +} + +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + randombytes_init_aes_ni(entropy_input, personalization_string, + security_strength); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c new file mode 100644 index 0000000000..689c29b242 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: MIT + +/* +The MIT License +Copyright (c) 2017 Daan Sprenkels +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +// In the case that are compiling on linux, we need to define _GNU_SOURCE +// *before* randombytes.h is included. Otherwise SYS_getrandom will not be +// declared. +#if defined(__linux__) || defined(__GNU__) +#define _GNU_SOURCE +#endif /* defined(__linux__) || defined(__GNU__) */ + +#if defined(_WIN32) +/* Windows */ +#include +#include /* CryptAcquireContext, CryptGenRandom */ +#endif /* defined(_WIN32) */ + +/* wasi */ +#if defined(__wasi__) +#include +#endif + +/* kFreeBSD */ +#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) +#define GNU_KFREEBSD +#endif + +#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +/* Linux */ +// We would need to include , but not every target has access +// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. +// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the +// linux repo. +#define RNDGETENTCNT 0x80045200 + +#include +#include +#include +#include +#include +#include +#include +#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ + ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) +#define USE_GLIBC +#include +#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ + (__GLIBC_MINOR__ > 24)) */ +#include +#include +#include +#include + +// We need SSIZE_MAX as the maximum read len from /dev/urandom +#if !defined(SSIZE_MAX) +#define SSIZE_MAX (SIZE_MAX / 2 - 1) +#endif /* defined(SSIZE_MAX) */ + +#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ + +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ +#include +#if defined(BSD) +#include +#endif +/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ +#if defined(__GNU__) +#undef BSD +#endif +#endif + +#if defined(__EMSCRIPTEN__) +#include +#include +#include +#include +#endif /* defined(__EMSCRIPTEN__) */ + +#if defined(_WIN32) +static int +randombytes_win32_randombytes(void *buf, size_t n) +{ + HCRYPTPROV ctx; + BOOL tmp; + DWORD to_read = 0; + const size_t MAX_DWORD = 0xFFFFFFFF; + + tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); + if (tmp == FALSE) + return -1; + + while (n > 0) { + to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); + tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); + if (tmp == FALSE) + return -1; + buf = ((char *)buf) + to_read; + n -= to_read; + } + + tmp = CryptReleaseContext(ctx, 0); + if (tmp == FALSE) + return -1; + + return 0; +} +#endif /* defined(_WIN32) */ + +#if defined(__wasi__) +static int +randombytes_wasi_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(__wasi__) */ + +#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) +#if defined(USE_GLIBC) +// getrandom is declared in glibc. +#elif defined(SYS_getrandom) +static ssize_t +getrandom(void *buf, size_t buflen, unsigned int flags) +{ + return syscall(SYS_getrandom, buf, buflen, flags); +} +#endif + +static int +randombytes_linux_randombytes_getrandom(void *buf, size_t n) +{ + /* I have thought about using a separate PRF, seeded by getrandom, but + * it turns out that the performance of getrandom is good enough + * (250 MB/s on my laptop). + */ + size_t offset = 0, chunk; + int ret; + while (n > 0) { + /* getrandom does not allow chunks larger than 33554431 */ + chunk = n <= 33554431 ? n : 33554431; + do { + ret = getrandom((char *)buf + offset, chunk, 0); + } while (ret == -1 && errno == EINTR); + if (ret < 0) + return ret; + offset += ret; + n -= ret; + } + assert(n == 0); + return 0; +} +#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ + defined(SYS_getrandom)) */ + +#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) + +#if defined(__linux__) +static int +randombytes_linux_read_entropy_ioctl(int device, int *entropy) +{ + return ioctl(device, RNDGETENTCNT, entropy); +} + +static int +randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) +{ + int retcode; + do { + rewind(stream); + retcode = fscanf(stream, "%d", entropy); + } while (retcode != 1 && errno == EINTR); + if (retcode != 1) { + return -1; + } + return 0; +} + +static int +randombytes_linux_wait_for_entropy(int device) +{ + /* We will block on /dev/random, because any increase in the OS' entropy + * level will unblock the request. I use poll here (as does libsodium), + * because we don't *actually* want to read from the device. */ + enum + { + IOCTL, + PROC + } strategy = IOCTL; + const int bits = 128; + struct pollfd pfd; + int fd; + FILE *proc_file; + int retcode, retcode_error = 0; // Used as return codes throughout this function + int entropy = 0; + + /* If the device has enough entropy already, we will want to return early */ + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + // printf("errno: %d (%s)\n", errno, strerror(errno)); + if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { + // The ioctl call on /dev/urandom has failed due to a + // - ENOTTY (unsupported action), or + // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). + // + // We will fall back to reading from + // `/proc/sys/kernel/random/entropy_avail`. This less ideal, + // because it allocates a file descriptor, and it may not work + // in a chroot. But at this point it seems we have no better + // options left. + strategy = PROC; + // Open the entropy count file + proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); + if (proc_file == NULL) { + return -1; + } + } else if (retcode != 0) { + // Unrecoverable ioctl error + return -1; + } + if (entropy >= bits) { + return 0; + } + + do { + fd = open("/dev/random", O_RDONLY); + } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ + if (fd == -1) { + /* Unrecoverable IO error */ + return -1; + } + + pfd.fd = fd; + pfd.events = POLLIN; + for (;;) { + retcode = poll(&pfd, 1, -1); + if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { + continue; + } else if (retcode == 1) { + if (strategy == IOCTL) { + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + } else if (strategy == PROC) { + retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); + } else { + return -1; // Unreachable + } + + if (retcode != 0) { + // Unrecoverable I/O error + retcode_error = retcode; + break; + } + if (entropy >= bits) { + break; + } + } else { + // Unreachable: poll() should only return -1 or 1 + retcode_error = -1; + break; + } + } + do { + retcode = close(fd); + } while (retcode == -1 && errno == EINTR); + if (strategy == PROC) { + do { + retcode = fclose(proc_file); + } while (retcode == -1 && errno == EINTR); + } + if (retcode_error != 0) { + return retcode_error; + } + return retcode; +} +#endif /* defined(__linux__) */ + +static int +randombytes_linux_randombytes_urandom(void *buf, size_t n) +{ + int fd; + size_t offset = 0, count; + ssize_t tmp; + do { + fd = open("/dev/urandom", O_RDONLY); + } while (fd == -1 && errno == EINTR); + if (fd == -1) + return -1; +#if defined(__linux__) + if (randombytes_linux_wait_for_entropy(fd) == -1) + return -1; +#endif + + while (n > 0) { + count = n <= SSIZE_MAX ? n : SSIZE_MAX; + tmp = read(fd, (char *)buf + offset, count); + if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { + continue; + } + if (tmp == -1) + return -1; /* Unrecoverable IO error */ + offset += tmp; + n -= tmp; + } + close(fd); + assert(n == 0); + return 0; +} +#endif /* defined(__linux__) && !defined(SYS_getrandom) */ + +#if defined(BSD) +static int +randombytes_bsd_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(BSD) */ + +#if defined(__EMSCRIPTEN__) +static int +randombytes_js_randombytes_nodejs(void *buf, size_t n) +{ + const int ret = EM_ASM_INT( + { + var crypto; + try { + crypto = require('crypto'); + } catch (error) { + return -2; + } + try { + writeArrayToMemory(crypto.randomBytes($1), $0); + return 0; + } catch (error) { + return -1; + } + }, + buf, + n); + switch (ret) { + case 0: + return 0; + case -1: + errno = EINVAL; + return -1; + case -2: + errno = ENOSYS; + return -1; + } + assert(false); // Unreachable +} +#endif /* defined(__EMSCRIPTEN__) */ + +SQISIGN_API +int +randombytes_select(unsigned char *buf, unsigned long long n) +{ +#if defined(__EMSCRIPTEN__) + return randombytes_js_randombytes_nodejs(buf, n); +#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +#if defined(USE_GLIBC) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#elif defined(SYS_getrandom) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#else + /* When we have enough entropy, we can read from /dev/urandom */ + return randombytes_linux_randombytes_urandom(buf, n); +#endif +#elif defined(BSD) + /* Use arc4random system call */ + return randombytes_bsd_randombytes(buf, n); +#elif defined(_WIN32) + /* Use windows API */ + return randombytes_win32_randombytes(buf, n); +#elif defined(__wasi__) + /* Use WASI */ + return randombytes_wasi_randombytes(buf, n); +#else +#error "randombytes(...) is not supported on this platform" +#endif +} + +#ifdef RANDOMBYTES_SYSTEM +SQISIGN_API +int +randombytes(unsigned char *x, unsigned long long xlen) +{ + + int ret = randombytes_select(x, (size_t)xlen); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); +#endif + return ret; +} + +SQISIGN_API +void +randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) +{ + (void)entropy_input; + (void)personalization_string; + (void)security_strength; +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h new file mode 100644 index 0000000000..0a9ca0e465 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef rng_h +#define rng_h + +#include + +/** + * Randombytes initialization. + * Initialization may be needed for some random number generators (e.g. CTR-DRBG). + * + * @param[in] entropy_input 48 bytes entropy input + * @param[in] personalization_string Personalization string + * @param[in] security_strength Security string + */ +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength); + +/** + * Random byte generation using /dev/urandom. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes_select(unsigned char *x, unsigned long long xlen); + +/** + * Random byte generation. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes(unsigned char *x, unsigned long long xlen); + +#endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sig.h new file mode 100644 index 0000000000..4c33510084 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sig.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef SQISIGN_H +#define SQISIGN_H + +#include +#include + +#if defined(ENABLE_SIGN) +/** + * SQIsign keypair generation. + * + * The implementation corresponds to SQIsign.CompactKeyGen() in the SQIsign spec. + * The caller is responsible to allocate sufficient memory to hold pk and sk. + * + * @param[out] pk SQIsign public key + * @param[out] sk SQIsign secret key + * @return int status code + */ +SQISIGN_API +int sqisign_keypair(unsigned char *pk, unsigned char *sk); + +/** + * SQIsign signature generation. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] sm Signature concatenated with message + * @param[out] smlen Pointer to the length of sm + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); +#endif + +/** + * SQIsign open signature. + * + * The implementation performs SQIsign.verify(). If the signature verification succeeded, the + * original message is stored in m. Keys provided is a compact public key. The caller is responsible + * to allocate sufficient memory to hold m. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sm Signature concatenated with message + * @param[in] smlen Length of sm + * @param[in] pk Compacted public key + * @return int status code + */ +SQISIGN_API +int sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk); + +/** + * SQIsign verify signature. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sign.c new file mode 100644 index 0000000000..9216bbe4d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sign.c @@ -0,0 +1,634 @@ +#include +#include +#include +#include +#include +#include + +// compute the commitment with ideal to isogeny clapotis +// and apply it to the basis of E0 (together with the multiplication by some scalar u) +static bool +commit(ec_curve_t *E_com, ec_basis_t *basis_even_com, quat_left_ideal_t *lideal_com) +{ + + bool found = false; + + found = quat_sampling_random_ideal_O0_given_norm(lideal_com, &COM_DEGREE, 1, &QUAT_represent_integer_params, NULL); + // replacing it with a shorter prime norm equivalent ideal + found = found && quat_lideal_prime_norm_reduced_equivalent( + lideal_com, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + // ideal to isogeny clapotis + found = found && dim2id2iso_arbitrary_isogeny_evaluation(basis_even_com, E_com, lideal_com); + return found; +} + +static void +compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const signature_t *sig, const secret_key_t *sk) +{ + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge + // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the + // 2^TORSION_EVEN_POWER torsion of EA + ibz_set(&vec[0], 1); + ibz_copy_digit_array(&vec[1], sig->chall_coeff); + + // now we compute the ideal associated to the challenge + // for that, we need to find vec such that + // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // is the image through the secret key isogeny of the canonical basis E0 + ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); + + // lideal_chall_two is the pullback of the ideal challenge through the secret key ideal + id2iso_kernel_dlogs_to_ideal_even(lideal_chall_two, &vec, TORSION_EVEN_POWER); + assert(ibz_cmp(&lideal_chall_two->norm, &TORSION_PLUS_2POWER) == 0); + + ibz_vec_2_finalize(&vec); +} + +static void +sample_response(quat_alg_elem_t *x, const quat_lattice_t *lattice, const ibz_t *lattice_content) +{ + ibz_t bound; + ibz_init(&bound); + ibz_pow(&bound, &ibz_const_two, SQIsign_response_length); + ibz_sub(&bound, &bound, &ibz_const_one); + ibz_mul(&bound, &bound, lattice_content); + + int ok UNUSED = quat_lattice_sample_from_ball(x, lattice, &QUATALG_PINFTY, &bound); + assert(ok); + + ibz_finalize(&bound); +} + +static void +compute_response_quat_element(quat_alg_elem_t *resp_quat, + ibz_t *lattice_content, + const secret_key_t *sk, + const quat_left_ideal_t *lideal_chall_two, + const quat_left_ideal_t *lideal_commit) +{ + quat_left_ideal_t lideal_chall_secret; + quat_lattice_t lattice_hom_chall_to_com, lat_commit; + + // Init + quat_left_ideal_init(&lideal_chall_secret); + quat_lattice_init(&lat_commit); + quat_lattice_init(&lattice_hom_chall_to_com); + + // lideal_chall_secret = lideal_secret * lideal_chall_two + quat_lideal_inter(&lideal_chall_secret, lideal_chall_two, &(sk->secret_ideal), &QUATALG_PINFTY); + + // now we compute lideal_com_to_chall which is dual(Icom)* lideal_chall_secret + quat_lattice_conjugate_without_hnf(&lat_commit, &(lideal_commit->lattice)); + quat_lattice_intersect(&lattice_hom_chall_to_com, &lideal_chall_secret.lattice, &lat_commit); + + // sampling the smallest response + ibz_mul(lattice_content, &lideal_chall_secret.norm, &lideal_commit->norm); + sample_response(resp_quat, &lattice_hom_chall_to_com, lattice_content); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_secret); + quat_lattice_finalize(&lat_commit); + quat_lattice_finalize(&lattice_hom_chall_to_com); +} + +static void +compute_backtracking_signature(signature_t *sig, quat_alg_elem_t *resp_quat, ibz_t *lattice_content, ibz_t *remain) +{ + uint_fast8_t backtracking; + ibz_t tmp; + ibz_init(&tmp); + + ibz_vec_4_t dummy_coord; + ibz_vec_4_init(&dummy_coord); + + quat_alg_make_primitive(&dummy_coord, &tmp, resp_quat, &MAXORD_O0); + ibz_mul(&resp_quat->denom, &resp_quat->denom, &tmp); + assert(quat_lattice_contains(NULL, &MAXORD_O0, resp_quat)); + + // the backtracking is the common part of the response and the challenge + // its degree is the scalar tmp computed above such that quat_resp is in tmp * O0. + backtracking = ibz_two_adic(&tmp); + sig->backtracking = backtracking; + + ibz_pow(&tmp, &ibz_const_two, backtracking); + ibz_div(lattice_content, remain, lattice_content, &tmp); + + ibz_finalize(&tmp); + ibz_vec_4_finalize(&dummy_coord); +} + +static uint_fast8_t +compute_random_aux_norm_and_helpers(signature_t *sig, + ibz_t *random_aux_norm, + ibz_t *degree_resp_inv, + ibz_t *remain, + const ibz_t *lattice_content, + quat_alg_elem_t *resp_quat, + quat_left_ideal_t *lideal_com_resp, + quat_left_ideal_t *lideal_commit) +{ + uint_fast8_t pow_dim2_deg_resp; + uint_fast8_t exp_diadic_val_full_resp; + + ibz_t tmp, degree_full_resp, degree_odd_resp, norm_d; + + // Init + ibz_init(°ree_full_resp); + ibz_init(°ree_odd_resp); + ibz_init(&norm_d); + ibz_init(&tmp); + + quat_alg_norm(°ree_full_resp, &norm_d, resp_quat, &QUATALG_PINFTY); + + // dividing by n(lideal_com) * n(lideal_secret_chall) + assert(ibz_is_one(&norm_d)); + ibz_div(°ree_full_resp, remain, °ree_full_resp, lattice_content); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); + + // computing the diadic valuation + exp_diadic_val_full_resp = ibz_two_adic(°ree_full_resp); + sig->two_resp_length = exp_diadic_val_full_resp; + + // removing the power of two part + ibz_pow(&tmp, &ibz_const_two, exp_diadic_val_full_resp); + ibz_div(°ree_odd_resp, remain, °ree_full_resp, &tmp); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); +#ifndef NDEBUG + ibz_pow(&tmp, &ibz_const_two, SQIsign_response_length - sig->backtracking); + assert(ibz_cmp(&tmp, °ree_odd_resp) > 0); +#endif + + // creating the ideal + quat_alg_conj(resp_quat, resp_quat); + + // setting the norm + ibz_mul(&tmp, &lideal_commit->norm, °ree_odd_resp); + quat_lideal_create(lideal_com_resp, resp_quat, &tmp, &MAXORD_O0, &QUATALG_PINFTY); + + // now we compute the ideal_aux + // computing the norm + pow_dim2_deg_resp = SQIsign_response_length - exp_diadic_val_full_resp - sig->backtracking; + ibz_pow(remain, &ibz_const_two, pow_dim2_deg_resp); + ibz_sub(random_aux_norm, remain, °ree_odd_resp); + + // multiplying by 2^HD_extra_torsion to account for the fact that + // we use extra torsion above the kernel + for (int i = 0; i < HD_extra_torsion; i++) + ibz_mul(remain, remain, &ibz_const_two); + + ibz_invmod(degree_resp_inv, °ree_odd_resp, remain); + + ibz_finalize(°ree_full_resp); + ibz_finalize(°ree_odd_resp); + ibz_finalize(&norm_d); + ibz_finalize(&tmp); + + return pow_dim2_deg_resp; +} + +static int +evaluate_random_aux_isogeny_signature(ec_curve_t *E_aux, + ec_basis_t *B_aux, + const ibz_t *norm, + const quat_left_ideal_t *lideal_com_resp) +{ + quat_left_ideal_t lideal_aux; + quat_left_ideal_t lideal_aux_resp_com; + + // Init + quat_left_ideal_init(&lideal_aux); + quat_left_ideal_init(&lideal_aux_resp_com); + + // sampling the ideal at random + int found = quat_sampling_random_ideal_O0_given_norm( + &lideal_aux, norm, 0, &QUAT_represent_integer_params, &QUAT_prime_cofactor); + + if (found) { + // pushing forward + quat_lideal_inter(&lideal_aux_resp_com, lideal_com_resp, &lideal_aux, &QUATALG_PINFTY); + + // now we evaluate this isogeny on the basis of E0 + found = dim2id2iso_arbitrary_isogeny_evaluation(B_aux, E_aux, &lideal_aux_resp_com); + + // Clean up + quat_left_ideal_finalize(&lideal_aux_resp_com); + quat_left_ideal_finalize(&lideal_aux); + } + + return found; +} + +static int +compute_dim2_isogeny_challenge(theta_couple_curve_with_basis_t *codomain, + theta_couple_curve_with_basis_t *domain, + const ibz_t *degree_resp_inv, + int pow_dim2_deg_resp, + int exp_diadic_val_full_resp, + int reduced_order) +{ + // now, we compute the isogeny Phi : Ecom x Eaux -> Echl' x Eaux' + // where Echl' is 2^exp_diadic_val_full_resp isogenous to Echal + // ker Phi = <(Bcom_can.P,Baux.P),(Bcom_can.Q,Baux.Q)> + + // preparing the domain + theta_couple_curve_t EcomXEaux; + copy_curve(&EcomXEaux.E1, &domain->E1); + copy_curve(&EcomXEaux.E2, &domain->E2); + + // preparing the kernel + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &domain->B1, &domain->B2); + + // dividing by the degree of the response + digit_t scalar[NWORDS_ORDER]; + ibz_to_digit_array(scalar, degree_resp_inv); + ec_mul(&dim_two_ker.T1.P2, scalar, reduced_order, &dim_two_ker.T1.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T2.P2, scalar, reduced_order, &dim_two_ker.T2.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T1m2.P2, scalar, reduced_order, &dim_two_ker.T1m2.P2, &EcomXEaux.E2); + + // and multiplying by 2^exp_diadic... + double_couple_point_iter(&dim_two_ker.T1, exp_diadic_val_full_resp, &dim_two_ker.T1, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T2, exp_diadic_val_full_resp, &dim_two_ker.T2, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T1m2, exp_diadic_val_full_resp, &dim_two_ker.T1m2, &EcomXEaux); + + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const Tev1 = pushed_points + 0, *const Tev2 = pushed_points + 1, + *const Tev1m2 = pushed_points + 2; + + // Set points on the commitment curve + copy_point(&Tev1->P1, &domain->B1.P); + copy_point(&Tev2->P1, &domain->B1.Q); + copy_point(&Tev1m2->P1, &domain->B1.PmQ); + + // Zero points on the aux curve + ec_point_init(&Tev1->P2); + ec_point_init(&Tev2->P2); + ec_point_init(&Tev1m2->P2); + + theta_couple_curve_t codomain_product; + + // computation of the dim2 isogeny + if (!theta_chain_compute_and_eval_randomized(pow_dim2_deg_resp, + &EcomXEaux, + &dim_two_ker, + true, + &codomain_product, + pushed_points, + sizeof(pushed_points) / sizeof(*pushed_points))) + return 0; + + assert(test_couple_point_order_twof(Tev1, &codomain_product, reduced_order)); + + // Set the auxiliary curve + copy_curve(&codomain->E1, &codomain_product.E2); + + // Set the codomain curve from the dim 2 isogeny + // it should always be the first curve + copy_curve(&codomain->E2, &codomain_product.E1); + + // Set the evaluated basis points + copy_point(&codomain->B1.P, &Tev1->P2); + copy_point(&codomain->B1.Q, &Tev2->P2); + copy_point(&codomain->B1.PmQ, &Tev1m2->P2); + + copy_point(&codomain->B2.P, &Tev1->P1); + copy_point(&codomain->B2.Q, &Tev2->P1); + copy_point(&codomain->B2.PmQ, &Tev1m2->P1); + return 1; +} + +static int +compute_small_chain_isogeny_signature(ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2, + const quat_alg_elem_t *resp_quat, + int pow_dim2_deg_resp, + int length) +{ + int ret = 1; + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec_resp_two; + ibz_vec_2_init(&vec_resp_two); + + quat_left_ideal_t lideal_resp_two; + quat_left_ideal_init(&lideal_resp_two); + + // computing the ideal + ibz_pow(&two_pow, &ibz_const_two, length); + + // we compute the generator of the challenge ideal + quat_lideal_create(&lideal_resp_two, resp_quat, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + // computing the coefficients of the kernel in terms of the basis of O0 + id2iso_ideal_to_kernel_dlogs_even(&vec_resp_two, &lideal_resp_two); + + ec_point_t points[3]; + copy_point(&points[0], &B_chall_2->P); + copy_point(&points[1], &B_chall_2->Q); + copy_point(&points[2], &B_chall_2->PmQ); + + // getting down to the right order and applying the matrix + ec_dbl_iter_basis(B_chall_2, pow_dim2_deg_resp + HD_extra_torsion, B_chall_2, E_chall_2); + assert(test_basis_order_twof(B_chall_2, E_chall_2, length)); + + ec_point_t ker; + // applying the vector to find the kernel + ec_biscalar_mul_ibz_vec(&ker, &vec_resp_two, length, B_chall_2, E_chall_2); + assert(test_point_order_twof(&ker, E_chall_2, length)); + + // computing the isogeny and pushing the points + if (ec_eval_small_chain(E_chall_2, &ker, length, points, 3, true)) { + ret = 0; + } + + // copying the result + copy_point(&B_chall_2->P, &points[0]); + copy_point(&B_chall_2->Q, &points[1]); + copy_point(&B_chall_2->PmQ, &points[2]); + + ibz_finalize(&two_pow); + ibz_vec_2_finalize(&vec_resp_two); + quat_left_ideal_finalize(&lideal_resp_two); + + return ret; +} + +static int +compute_challenge_codomain_signature(const signature_t *sig, + secret_key_t *sk, + ec_curve_t *E_chall, + const ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2) +{ + ec_isog_even_t phi_chall; + ec_basis_t bas_sk; + copy_basis(&bas_sk, &sk->canonical_basis); + + phi_chall.curve = sk->curve; + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + assert(test_basis_order_twof(&bas_sk, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the kernel + { + ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_sk.P, &bas_sk.Q, &bas_sk.PmQ, &sk->curve); + } + assert(test_point_order_twof(&phi_chall.kernel, &sk->curve, TORSION_EVEN_POWER)); + + // Double kernel to get correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &sk->curve); + + assert(test_point_order_twof(&phi_chall.kernel, E_chall, phi_chall.length)); + + // Compute the codomain from challenge isogeny + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + +#ifndef NDEBUG + fp2_t j_chall, j_codomain; + ec_j_inv(&j_codomain, E_chall_2); + ec_j_inv(&j_chall, E_chall); + // apparently its always the second one curve + assert(fp2_is_equal(&j_chall, &j_codomain)); +#endif + + // applying the isomorphism from E_chall_2 to E_chall + ec_isom_t isom; + if (ec_isomorphism(&isom, E_chall_2, E_chall)) + return 0; // error due to a corner case with 1/p probability + ec_iso_eval(&B_chall_2->P, &isom); + ec_iso_eval(&B_chall_2->Q, &isom); + ec_iso_eval(&B_chall_2->PmQ, &isom); + + return 1; +} + +static void +set_aux_curve_signature(signature_t *sig, ec_curve_t *E_aux) +{ + ec_normalize_curve(E_aux); + fp2_copy(&sig->E_aux_A, &E_aux->A); +} + +static void +compute_and_set_basis_change_matrix(signature_t *sig, + const ec_basis_t *B_aux_2, + ec_basis_t *B_chall_2, + ec_curve_t *E_aux_2, + ec_curve_t *E_chall, + int f) +{ + // Matrices for change of bases matrices + ibz_mat_2x2_t mat_Baux2_to_Baux2_can, mat_Bchall_can_to_Bchall; + ibz_mat_2x2_init(&mat_Baux2_to_Baux2_can); + ibz_mat_2x2_init(&mat_Bchall_can_to_Bchall); + + // Compute canonical bases + ec_basis_t B_can_chall, B_aux_2_can; + sig->hint_chall = ec_curve_to_basis_2f_to_hint(&B_can_chall, E_chall, TORSION_EVEN_POWER); + sig->hint_aux = ec_curve_to_basis_2f_to_hint(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(B_aux_2, E_aux_2, f)); + fp2_t w0; + weil(&w0, f, &B_aux_2->P, &B_aux_2->Q, &B_aux_2->PmQ, E_aux_2); + } +#endif + + // compute the matrix to go from B_aux_2 to B_aux_2_can + change_of_basis_matrix_tate_invert(&mat_Baux2_to_Baux2_can, &B_aux_2_can, B_aux_2, E_aux_2, f); + + // apply the change of basis to B_chall_2 + matrix_application_even_basis(B_chall_2, E_chall, &mat_Baux2_to_Baux2_can, f); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_can_chall, E_chall, TORSION_EVEN_POWER)); + } +#endif + + // compute the matrix to go from B_chall_can to B_chall_2 + change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); + + // Assert all values in the matrix are of the expected size for packing + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + + // Set the basis change matrix to signature + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + + // Finalise the matrices + ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); + ibz_mat_2x2_finalize(&mat_Baux2_to_Baux2_can); +} + +int +protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l) +{ + int ret = 0; + int reduced_order = 0; // work around false positive gcc warning + + uint_fast8_t pow_dim2_deg_resp; + assert(SQIsign_response_length <= (intmax_t)UINT_FAST8_MAX); // otherwise we might need more bits there + + ibz_t remain, lattice_content, random_aux_norm, degree_resp_inv; + ibz_init(&remain); + ibz_init(&lattice_content); + ibz_init(&random_aux_norm); + ibz_init(°ree_resp_inv); + + quat_alg_elem_t resp_quat; + quat_alg_elem_init(&resp_quat); + + quat_left_ideal_t lideal_commit, lideal_com_resp; + quat_left_ideal_init(&lideal_commit); + quat_left_ideal_init(&lideal_com_resp); + + // This structure holds two curves E1 x E2 together with a basis + // Bi of E[2^n] for each of these curves + theta_couple_curve_with_basis_t Ecom_Eaux; + // This structure holds two curves E1 x E2 together with a basis + // Bi of Ei[2^n] + theta_couple_curve_with_basis_t Eaux2_Echall2; + + // This will hold the challenge curve + ec_curve_t E_chall = sk->curve; + + ec_curve_init(&Ecom_Eaux.E1); + ec_curve_init(&Ecom_Eaux.E2); + + while (!ret) { + + // computing the commitment + ret = commit(&Ecom_Eaux.E1, &Ecom_Eaux.B1, &lideal_commit); + + // start again if the commitment generation has failed + if (!ret) { + continue; + } + + // Hash the message to a kernel generator + // i.e. a scalar such that ker = P + [s]Q + hash_to_challenge(&sig->chall_coeff, pk, &Ecom_Eaux.E1, m, l); + // Compute the challenge ideal and response quaternion element + { + quat_left_ideal_t lideal_chall_two; + quat_left_ideal_init(&lideal_chall_two); + + // computing the challenge ideal + compute_challenge_ideal_signature(&lideal_chall_two, sig, sk); + compute_response_quat_element(&resp_quat, &lattice_content, sk, &lideal_chall_two, &lideal_commit); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_two); + } + + // computing the amount of backtracking we're making + // and removing it + compute_backtracking_signature(sig, &resp_quat, &lattice_content, &remain); + + // creating lideal_com * lideal_resp + // we first compute the norm of lideal_resp + // norm of the resp_quat + pow_dim2_deg_resp = compute_random_aux_norm_and_helpers(sig, + &random_aux_norm, + °ree_resp_inv, + &remain, + &lattice_content, + &resp_quat, + &lideal_com_resp, + &lideal_commit); + + // notational conventions: + // B0 = canonical basis of E0 + // B_com = image through commitment isogeny (odd degree) of canonical basis of E0 + // B_aux = image through aux_resp_com isogeny (odd degree) of canonical basis of E0 + + if (pow_dim2_deg_resp > 0) { + // Evaluate the random aux ideal on the curve E0 and its basis to find E_aux and B_aux + ret = + evaluate_random_aux_isogeny_signature(&Ecom_Eaux.E2, &Ecom_Eaux.B2, &random_aux_norm, &lideal_com_resp); + + // auxiliary isogeny computation failed we must start again + if (!ret) { + continue; + } + +#ifndef NDEBUG + // testing that the order of the points in the bases is as expected + assert(test_basis_order_twof(&Ecom_Eaux.B1, &Ecom_Eaux.E1, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(&Ecom_Eaux.B2, &Ecom_Eaux.E2, TORSION_EVEN_POWER)); +#endif + + // applying the matrix to compute Baux + // first, we reduce to the relevant order + reduced_order = pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length; + ec_dbl_iter_basis(&Ecom_Eaux.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Ecom_Eaux.B2, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B2, &Ecom_Eaux.E2); + + // Given all the above data, compute a dim two isogeny with domain + // E_com x E_aux + // and codomain + // E_aux_2 x E_chall_2 (note: E_chall_2 is isomorphic to E_chall) + // and evaluated points stored as bases in + // B_aux_2 on E_aux_2 + // B_chall_2 on E_chall_2 + ret = compute_dim2_isogeny_challenge( + &Eaux2_Echall2, &Ecom_Eaux, °ree_resp_inv, pow_dim2_deg_resp, sig->two_resp_length, reduced_order); + if (!ret) + continue; + } else { + // No 2d isogeny needed, so simulate a "Kani matrix" identity here + copy_curve(&Eaux2_Echall2.E1, &Ecom_Eaux.E1); + copy_curve(&Eaux2_Echall2.E2, &Ecom_Eaux.E1); + + reduced_order = sig->two_resp_length; + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + copy_basis(&Eaux2_Echall2.B2, &Eaux2_Echall2.B1); + } + + // computation of the remaining small chain of two isogenies when needed + if (sig->two_resp_length > 0) { + if (!compute_small_chain_isogeny_signature( + &Eaux2_Echall2.E2, &Eaux2_Echall2.B2, &resp_quat, pow_dim2_deg_resp, sig->two_resp_length)) { + assert(0); // this shouldn't fail + } + } + + // computation of the challenge codomain + if (!compute_challenge_codomain_signature(sig, sk, &E_chall, &Eaux2_Echall2.E2, &Eaux2_Echall2.B2)) + assert(0); // this shouldn't fail + } + + // Set to the signature the Montgomery A-coefficient of E_aux_2 + set_aux_curve_signature(sig, &Eaux2_Echall2.E1); + + // Set the basis change matrix from canonical bases to the supplied bases + compute_and_set_basis_change_matrix( + sig, &Eaux2_Echall2.B1, &Eaux2_Echall2.B2, &Eaux2_Echall2.E1, &E_chall, reduced_order); + + quat_alg_elem_finalize(&resp_quat); + quat_left_ideal_finalize(&lideal_commit); + quat_left_ideal_finalize(&lideal_com_resp); + + ibz_finalize(&lattice_content); + ibz_finalize(&remain); + ibz_finalize(°ree_resp_inv); + ibz_finalize(&random_aux_norm); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/signature.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/signature.h new file mode 100644 index 0000000000..ba38c360e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/signature.h @@ -0,0 +1,97 @@ +/** @file + * + * @brief The key generation and signature protocols + */ + +#ifndef SIGNATURE_H +#define SIGNATURE_H + +#include +#include +#include +#include + +/** @defgroup signature SQIsignHD key generation and signature protocols + * @{ + */ +/** @defgroup signature_t Types for SQIsignHD key generation and signature protocols + * @{ + */ + +/** @brief Type for the secret keys + * + * @typedef secret_key_t + * + * @struct secret_key + * + */ +typedef struct secret_key +{ + ec_curve_t curve; /// the public curve, but with little precomputations + quat_left_ideal_t secret_ideal; + ibz_mat_2x2_t mat_BAcan_to_BA0_two; // mat_BA0_to_BAcan*BA0 = BAcan, where BAcan is the + // canonical basis of EA[2^e], and BA0 the image of the + // basis of E0[2^e] through the secret isogeny + ec_basis_t canonical_basis; // the canonical basis of the public key curve +} secret_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void secret_key_init(secret_key_t *sk); +void secret_key_finalize(secret_key_t *sk); + +/** + * @brief Key generation + * + * @param pk Output: will contain the public key + * @param sk Output: will contain the secret key + * @returns 1 if success, 0 otherwise + */ +int protocols_keygen(public_key_t *pk, secret_key_t *sk); + +/** + * @brief Signature computation + * + * @param sig Output: will contain the signature + * @param sk secret key + * @param pk public key + * @param m message + * @param l size + * @returns 1 if success, 0 otherwise + */ +int protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a secret key as a byte array + * + * @param enc : Byte array to encode the secret key (including public key) in + * @param sk : Secret key to encode + * @param pk : Public key to encode + */ +void secret_key_to_bytes(unsigned char *enc, const secret_key_t *sk, const public_key_t *pk); + +/** + * @brief Decodes a secret key (and public key) from a byte array + * + * @param sk : Structure to decode the secret key in + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +void secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign.c new file mode 100644 index 0000000000..7335c38d9a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign.c @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#if defined(ENABLE_SIGN) +#include +#endif + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +sqisign_keypair(unsigned char *pk, unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + secret_key_init(&skt); + + ret = !protocols_keygen(&pkt, &skt); + + secret_key_to_bytes(sk, &skt, &pkt); + public_key_to_bytes(pk, &pkt); + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + memmove(sm + SIGNATURE_BYTES, m, mlen); + + ret = !protocols_sign(&sigt, &pkt, &skt, sm + SIGNATURE_BYTES, mlen); + if (ret != 0) { + *smlen = 0; + goto err; + } + + signature_to_bytes(sm, &sigt); + *smlen = SIGNATURE_BYTES + mlen; + +err: + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + ret = !protocols_sign(&sigt, &pkt, &skt, m, mlen); + if (ret != 0) { + *slen = 0; + goto err; + } + + signature_to_bytes(s, &sigt); + *slen = SIGNATURE_BYTES; + +err: + secret_key_finalize(&skt); + return ret; +} +#endif + +SQISIGN_API +int +sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk) +{ + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sm); + + ret = !protocols_verify(&sigt, &pkt, sm + SIGNATURE_BYTES, smlen - SIGNATURE_BYTES); + + if (!ret) { + *mlen = smlen - SIGNATURE_BYTES; + memmove(m, sm + SIGNATURE_BYTES, *mlen); + } else { + *mlen = 0; + memset(m, 0, smlen - SIGNATURE_BYTES); + } + + return ret; +} + +SQISIGN_API +int +sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk) +{ + + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sig); + + ret = !protocols_verify(&sigt, &pkt, m, mlen); + + return ret; +} + +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk) +{ + return sqisign_verify(m, mlen, sig, siglen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h new file mode 100644 index 0000000000..007d2572b9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h @@ -0,0 +1,1071 @@ + +#ifndef SQISIGN_NAMESPACE_H +#define SQISIGN_NAMESPACE_H + +//#define DISABLE_NAMESPACING + +#if defined(_WIN32) +#define SQISIGN_API __declspec(dllexport) +#else +#define SQISIGN_API __attribute__((visibility("default"))) +#endif + +#define PARAM_JOIN3_(a, b, c) sqisign_##a##_##b##_##c +#define PARAM_JOIN3(a, b, c) PARAM_JOIN3_(a, b, c) +#define PARAM_NAME3(end, s) PARAM_JOIN3(SQISIGN_VARIANT, end, s) + +#define PARAM_JOIN2_(a, b) sqisign_##a##_##b +#define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) +#define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) + +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + +#if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) +#if defined(SQISIGN_BUILD_TYPE_REF) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) +#elif defined(SQISIGN_BUILD_TYPE_OPT) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(opt, s) +#elif defined(SQISIGN_BUILD_TYPE_BROADWELL) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(broadwell, s) +#elif defined(SQISIGN_BUILD_TYPE_ARM64CRYPTO) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(arm64crypto, s) +#else +#error "Build type not known" +#endif + +#else +#define SQISIGN_NAMESPACE(s) s +#endif + +// Namespacing symbols exported from algebra.c: +#undef quat_alg_add +#undef quat_alg_conj +#undef quat_alg_coord_mul +#undef quat_alg_elem_copy +#undef quat_alg_elem_copy_ibz +#undef quat_alg_elem_equal +#undef quat_alg_elem_is_zero +#undef quat_alg_elem_mul_by_scalar +#undef quat_alg_elem_set +#undef quat_alg_equal_denom +#undef quat_alg_init_set_ui +#undef quat_alg_make_primitive +#undef quat_alg_mul +#undef quat_alg_norm +#undef quat_alg_normalize +#undef quat_alg_scalar +#undef quat_alg_sub + +#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) + +// Namespacing symbols exported from api.c: +#undef crypto_sign +#undef crypto_sign_keypair +#undef crypto_sign_open + +#define crypto_sign SQISIGN_NAMESPACE(crypto_sign) +#define crypto_sign_keypair SQISIGN_NAMESPACE(crypto_sign_keypair) +#define crypto_sign_open SQISIGN_NAMESPACE(crypto_sign_open) + +// Namespacing symbols exported from basis.c: +#undef ec_curve_to_basis_2f_from_hint +#undef ec_curve_to_basis_2f_to_hint +#undef ec_recover_y +#undef lift_basis +#undef lift_basis_normalized + +#define ec_curve_to_basis_2f_from_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_from_hint) +#define ec_curve_to_basis_2f_to_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_to_hint) +#define ec_recover_y SQISIGN_NAMESPACE(ec_recover_y) +#define lift_basis SQISIGN_NAMESPACE(lift_basis) +#define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) + +// Namespacing symbols exported from biextension.c: +#undef clear_cofac +#undef ec_dlog_2_tate +#undef ec_dlog_2_weil +#undef fp2_frob +#undef reduced_tate +#undef weil + +#define clear_cofac SQISIGN_NAMESPACE(clear_cofac) +#define ec_dlog_2_tate SQISIGN_NAMESPACE(ec_dlog_2_tate) +#define ec_dlog_2_weil SQISIGN_NAMESPACE(ec_dlog_2_weil) +#define fp2_frob SQISIGN_NAMESPACE(fp2_frob) +#define reduced_tate SQISIGN_NAMESPACE(reduced_tate) +#define weil SQISIGN_NAMESPACE(weil) + +// Namespacing symbols exported from common.c: +#undef hash_to_challenge +#undef public_key_finalize +#undef public_key_init + +#define hash_to_challenge SQISIGN_NAMESPACE(hash_to_challenge) +#define public_key_finalize SQISIGN_NAMESPACE(public_key_finalize) +#define public_key_init SQISIGN_NAMESPACE(public_key_init) + +// Namespacing symbols exported from dim2.c: +#undef ibz_2x2_mul_mod +#undef ibz_mat_2x2_add +#undef ibz_mat_2x2_copy +#undef ibz_mat_2x2_det_from_ibz +#undef ibz_mat_2x2_eval +#undef ibz_mat_2x2_inv_mod +#undef ibz_mat_2x2_set +#undef ibz_vec_2_set + +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) + +// Namespacing symbols exported from dim2id2iso.c: +#undef dim2id2iso_arbitrary_isogeny_evaluation +#undef dim2id2iso_ideal_to_isogeny_clapotis +#undef find_uv +#undef fixed_degree_isogeny_and_eval + +#define dim2id2iso_arbitrary_isogeny_evaluation SQISIGN_NAMESPACE(dim2id2iso_arbitrary_isogeny_evaluation) +#define dim2id2iso_ideal_to_isogeny_clapotis SQISIGN_NAMESPACE(dim2id2iso_ideal_to_isogeny_clapotis) +#define find_uv SQISIGN_NAMESPACE(find_uv) +#define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) + +// Namespacing symbols exported from dim4.c: +#undef ibz_inv_dim4_make_coeff_mpm +#undef ibz_inv_dim4_make_coeff_pmp +#undef ibz_mat_4x4_copy +#undef ibz_mat_4x4_equal +#undef ibz_mat_4x4_eval +#undef ibz_mat_4x4_eval_t +#undef ibz_mat_4x4_gcd +#undef ibz_mat_4x4_identity +#undef ibz_mat_4x4_inv_with_det_as_denom +#undef ibz_mat_4x4_is_identity +#undef ibz_mat_4x4_mul +#undef ibz_mat_4x4_negate +#undef ibz_mat_4x4_scalar_div +#undef ibz_mat_4x4_scalar_mul +#undef ibz_mat_4x4_transpose +#undef ibz_mat_4x4_zero +#undef ibz_vec_4_add +#undef ibz_vec_4_content +#undef ibz_vec_4_copy +#undef ibz_vec_4_copy_ibz +#undef ibz_vec_4_is_zero +#undef ibz_vec_4_linear_combination +#undef ibz_vec_4_negate +#undef ibz_vec_4_scalar_div +#undef ibz_vec_4_scalar_mul +#undef ibz_vec_4_set +#undef ibz_vec_4_sub +#undef quat_qf_eval + +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) + +// Namespacing symbols exported from ec.c: +#undef cswap_points +#undef ec_biscalar_mul +#undef ec_curve_init +#undef ec_curve_init_from_A +#undef ec_curve_normalize_A24 +#undef ec_curve_verify_A +#undef ec_dbl +#undef ec_dbl_iter +#undef ec_dbl_iter_basis +#undef ec_has_zero_coordinate +#undef ec_is_basis_four_torsion +#undef ec_is_equal +#undef ec_is_four_torsion +#undef ec_is_two_torsion +#undef ec_is_zero +#undef ec_j_inv +#undef ec_ladder3pt +#undef ec_mul +#undef ec_normalize_curve +#undef ec_normalize_curve_and_A24 +#undef ec_normalize_point +#undef ec_point_init +#undef select_point +#undef xADD +#undef xDBL +#undef xDBLADD +#undef xDBLMUL +#undef xDBL_A24 +#undef xDBL_E0 +#undef xMUL + +#define cswap_points SQISIGN_NAMESPACE(cswap_points) +#define ec_biscalar_mul SQISIGN_NAMESPACE(ec_biscalar_mul) +#define ec_curve_init SQISIGN_NAMESPACE(ec_curve_init) +#define ec_curve_init_from_A SQISIGN_NAMESPACE(ec_curve_init_from_A) +#define ec_curve_normalize_A24 SQISIGN_NAMESPACE(ec_curve_normalize_A24) +#define ec_curve_verify_A SQISIGN_NAMESPACE(ec_curve_verify_A) +#define ec_dbl SQISIGN_NAMESPACE(ec_dbl) +#define ec_dbl_iter SQISIGN_NAMESPACE(ec_dbl_iter) +#define ec_dbl_iter_basis SQISIGN_NAMESPACE(ec_dbl_iter_basis) +#define ec_has_zero_coordinate SQISIGN_NAMESPACE(ec_has_zero_coordinate) +#define ec_is_basis_four_torsion SQISIGN_NAMESPACE(ec_is_basis_four_torsion) +#define ec_is_equal SQISIGN_NAMESPACE(ec_is_equal) +#define ec_is_four_torsion SQISIGN_NAMESPACE(ec_is_four_torsion) +#define ec_is_two_torsion SQISIGN_NAMESPACE(ec_is_two_torsion) +#define ec_is_zero SQISIGN_NAMESPACE(ec_is_zero) +#define ec_j_inv SQISIGN_NAMESPACE(ec_j_inv) +#define ec_ladder3pt SQISIGN_NAMESPACE(ec_ladder3pt) +#define ec_mul SQISIGN_NAMESPACE(ec_mul) +#define ec_normalize_curve SQISIGN_NAMESPACE(ec_normalize_curve) +#define ec_normalize_curve_and_A24 SQISIGN_NAMESPACE(ec_normalize_curve_and_A24) +#define ec_normalize_point SQISIGN_NAMESPACE(ec_normalize_point) +#define ec_point_init SQISIGN_NAMESPACE(ec_point_init) +#define select_point SQISIGN_NAMESPACE(select_point) +#define xADD SQISIGN_NAMESPACE(xADD) +#define xDBL SQISIGN_NAMESPACE(xDBL) +#define xDBLADD SQISIGN_NAMESPACE(xDBLADD) +#define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) +#define xMUL SQISIGN_NAMESPACE(xMUL) + +// Namespacing symbols exported from ec_jac.c: +#undef ADD +#undef DBL +#undef DBLW +#undef copy_jac_point +#undef jac_from_ws +#undef jac_init +#undef jac_is_equal +#undef jac_neg +#undef jac_to_ws +#undef jac_to_xz +#undef jac_to_xz_add_components +#undef select_jac_point + +#define ADD SQISIGN_NAMESPACE(ADD) +#define DBL SQISIGN_NAMESPACE(DBL) +#define DBLW SQISIGN_NAMESPACE(DBLW) +#define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) +#define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) +#define jac_init SQISIGN_NAMESPACE(jac_init) +#define jac_is_equal SQISIGN_NAMESPACE(jac_is_equal) +#define jac_neg SQISIGN_NAMESPACE(jac_neg) +#define jac_to_ws SQISIGN_NAMESPACE(jac_to_ws) +#define jac_to_xz SQISIGN_NAMESPACE(jac_to_xz) +#define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) +#define select_jac_point SQISIGN_NAMESPACE(select_jac_point) + +// Namespacing symbols exported from encode_signature.c: +#undef secret_key_from_bytes +#undef secret_key_to_bytes + +#define secret_key_from_bytes SQISIGN_NAMESPACE(secret_key_from_bytes) +#define secret_key_to_bytes SQISIGN_NAMESPACE(secret_key_to_bytes) + +// Namespacing symbols exported from encode_verification.c: +#undef public_key_from_bytes +#undef public_key_to_bytes +#undef signature_from_bytes +#undef signature_to_bytes + +#define public_key_from_bytes SQISIGN_NAMESPACE(public_key_from_bytes) +#define public_key_to_bytes SQISIGN_NAMESPACE(public_key_to_bytes) +#define signature_from_bytes SQISIGN_NAMESPACE(signature_from_bytes) +#define signature_to_bytes SQISIGN_NAMESPACE(signature_to_bytes) + +// Namespacing symbols exported from finit.c: +#undef ibz_mat_2x2_finalize +#undef ibz_mat_2x2_init +#undef ibz_mat_4x4_finalize +#undef ibz_mat_4x4_init +#undef ibz_vec_2_finalize +#undef ibz_vec_2_init +#undef ibz_vec_4_finalize +#undef ibz_vec_4_init +#undef quat_alg_elem_finalize +#undef quat_alg_elem_init +#undef quat_alg_finalize +#undef quat_alg_init_set +#undef quat_lattice_finalize +#undef quat_lattice_init +#undef quat_left_ideal_finalize +#undef quat_left_ideal_init + +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) + +// Namespacing symbols exported from fp.c: +#undef fp_select +#undef p +#undef p2 + +#define fp_select SQISIGN_NAMESPACE(fp_select) +#define p SQISIGN_NAMESPACE(p) +#define p2 SQISIGN_NAMESPACE(p2) + +// Namespacing symbols exported from fp.c, fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_exp3div4 +#undef fp_inv +#undef fp_is_square +#undef fp_sqrt + +#define fp_exp3div4 SQISIGN_NAMESPACE(fp_exp3div4) +#define fp_inv SQISIGN_NAMESPACE(fp_inv) +#define fp_is_square SQISIGN_NAMESPACE(fp_is_square) +#define fp_sqrt SQISIGN_NAMESPACE(fp_sqrt) + +// Namespacing symbols exported from fp2.c: +#undef fp2_add +#undef fp2_add_one +#undef fp2_batched_inv +#undef fp2_copy +#undef fp2_cswap +#undef fp2_decode +#undef fp2_encode +#undef fp2_half +#undef fp2_inv +#undef fp2_is_equal +#undef fp2_is_one +#undef fp2_is_square +#undef fp2_is_zero +#undef fp2_mul +#undef fp2_mul_small +#undef fp2_neg +#undef fp2_pow_vartime +#undef fp2_print +#undef fp2_select +#undef fp2_set_one +#undef fp2_set_small +#undef fp2_set_zero +#undef fp2_sqr +#undef fp2_sqrt +#undef fp2_sqrt_verify +#undef fp2_sub + +#define fp2_add SQISIGN_NAMESPACE(fp2_add) +#define fp2_add_one SQISIGN_NAMESPACE(fp2_add_one) +#define fp2_batched_inv SQISIGN_NAMESPACE(fp2_batched_inv) +#define fp2_copy SQISIGN_NAMESPACE(fp2_copy) +#define fp2_cswap SQISIGN_NAMESPACE(fp2_cswap) +#define fp2_decode SQISIGN_NAMESPACE(fp2_decode) +#define fp2_encode SQISIGN_NAMESPACE(fp2_encode) +#define fp2_half SQISIGN_NAMESPACE(fp2_half) +#define fp2_inv SQISIGN_NAMESPACE(fp2_inv) +#define fp2_is_equal SQISIGN_NAMESPACE(fp2_is_equal) +#define fp2_is_one SQISIGN_NAMESPACE(fp2_is_one) +#define fp2_is_square SQISIGN_NAMESPACE(fp2_is_square) +#define fp2_is_zero SQISIGN_NAMESPACE(fp2_is_zero) +#define fp2_mul SQISIGN_NAMESPACE(fp2_mul) +#define fp2_mul_small SQISIGN_NAMESPACE(fp2_mul_small) +#define fp2_neg SQISIGN_NAMESPACE(fp2_neg) +#define fp2_pow_vartime SQISIGN_NAMESPACE(fp2_pow_vartime) +#define fp2_print SQISIGN_NAMESPACE(fp2_print) +#define fp2_select SQISIGN_NAMESPACE(fp2_select) +#define fp2_set_one SQISIGN_NAMESPACE(fp2_set_one) +#define fp2_set_small SQISIGN_NAMESPACE(fp2_set_small) +#define fp2_set_zero SQISIGN_NAMESPACE(fp2_set_zero) +#define fp2_sqr SQISIGN_NAMESPACE(fp2_sqr) +#define fp2_sqrt SQISIGN_NAMESPACE(fp2_sqrt) +#define fp2_sqrt_verify SQISIGN_NAMESPACE(fp2_sqrt_verify) +#define fp2_sub SQISIGN_NAMESPACE(fp2_sub) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_copy +#undef fp_cswap +#undef fp_decode +#undef fp_decode_reduce +#undef fp_div3 +#undef fp_encode +#undef fp_half +#undef fp_is_equal +#undef fp_is_zero +#undef fp_mul_small +#undef fp_neg +#undef fp_set_one +#undef fp_set_small +#undef fp_set_zero + +#define fp_copy SQISIGN_NAMESPACE(fp_copy) +#define fp_cswap SQISIGN_NAMESPACE(fp_cswap) +#define fp_decode SQISIGN_NAMESPACE(fp_decode) +#define fp_decode_reduce SQISIGN_NAMESPACE(fp_decode_reduce) +#define fp_div3 SQISIGN_NAMESPACE(fp_div3) +#define fp_encode SQISIGN_NAMESPACE(fp_encode) +#define fp_half SQISIGN_NAMESPACE(fp_half) +#define fp_is_equal SQISIGN_NAMESPACE(fp_is_equal) +#define fp_is_zero SQISIGN_NAMESPACE(fp_is_zero) +#define fp_mul_small SQISIGN_NAMESPACE(fp_mul_small) +#define fp_neg SQISIGN_NAMESPACE(fp_neg) +#define fp_set_one SQISIGN_NAMESPACE(fp_set_one) +#define fp_set_small SQISIGN_NAMESPACE(fp_set_small) +#define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef fp_add +#undef fp_mul +#undef fp_sqr +#undef fp_sub + +#define fp_add SQISIGN_NAMESPACE(fp_add) +#define fp_mul SQISIGN_NAMESPACE(fp_mul) +#define fp_sqr SQISIGN_NAMESPACE(fp_sqr) +#define fp_sub SQISIGN_NAMESPACE(fp_sub) + +// Namespacing symbols exported from gf27500.c: +#undef gf27500_decode +#undef gf27500_decode_reduce +#undef gf27500_div +#undef gf27500_div3 +#undef gf27500_encode +#undef gf27500_invert +#undef gf27500_legendre +#undef gf27500_sqrt + +#define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) +#define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) +#define gf27500_div SQISIGN_NAMESPACE(gf27500_div) +#define gf27500_div3 SQISIGN_NAMESPACE(gf27500_div3) +#define gf27500_encode SQISIGN_NAMESPACE(gf27500_encode) +#define gf27500_invert SQISIGN_NAMESPACE(gf27500_invert) +#define gf27500_legendre SQISIGN_NAMESPACE(gf27500_legendre) +#define gf27500_sqrt SQISIGN_NAMESPACE(gf27500_sqrt) + +// Namespacing symbols exported from gf27500.c, gf5248.c, gf65376.c: +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 + +#define fp2_mul_c0 SQISIGN_NAMESPACE(fp2_mul_c0) +#define fp2_mul_c1 SQISIGN_NAMESPACE(fp2_mul_c1) +#define fp2_sq_c0 SQISIGN_NAMESPACE(fp2_sq_c0) +#define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) + +// Namespacing symbols exported from gf5248.c: +#undef gf5248_decode +#undef gf5248_decode_reduce +#undef gf5248_div +#undef gf5248_div3 +#undef gf5248_encode +#undef gf5248_invert +#undef gf5248_legendre +#undef gf5248_sqrt + +#define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) +#define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) +#define gf5248_div SQISIGN_NAMESPACE(gf5248_div) +#define gf5248_div3 SQISIGN_NAMESPACE(gf5248_div3) +#define gf5248_encode SQISIGN_NAMESPACE(gf5248_encode) +#define gf5248_invert SQISIGN_NAMESPACE(gf5248_invert) +#define gf5248_legendre SQISIGN_NAMESPACE(gf5248_legendre) +#define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) + +// Namespacing symbols exported from gf65376.c: +#undef gf65376_decode +#undef gf65376_decode_reduce +#undef gf65376_div +#undef gf65376_div3 +#undef gf65376_encode +#undef gf65376_invert +#undef gf65376_legendre +#undef gf65376_sqrt + +#define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) +#define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) +#define gf65376_div SQISIGN_NAMESPACE(gf65376_div) +#define gf65376_div3 SQISIGN_NAMESPACE(gf65376_div3) +#define gf65376_encode SQISIGN_NAMESPACE(gf65376_encode) +#define gf65376_invert SQISIGN_NAMESPACE(gf65376_invert) +#define gf65376_legendre SQISIGN_NAMESPACE(gf65376_legendre) +#define gf65376_sqrt SQISIGN_NAMESPACE(gf65376_sqrt) + +// Namespacing symbols exported from hd.c: +#undef add_couple_jac_points +#undef copy_bases_to_kernel +#undef couple_jac_to_xz +#undef double_couple_jac_point +#undef double_couple_jac_point_iter +#undef double_couple_point +#undef double_couple_point_iter + +#define add_couple_jac_points SQISIGN_NAMESPACE(add_couple_jac_points) +#define copy_bases_to_kernel SQISIGN_NAMESPACE(copy_bases_to_kernel) +#define couple_jac_to_xz SQISIGN_NAMESPACE(couple_jac_to_xz) +#define double_couple_jac_point SQISIGN_NAMESPACE(double_couple_jac_point) +#define double_couple_jac_point_iter SQISIGN_NAMESPACE(double_couple_jac_point_iter) +#define double_couple_point SQISIGN_NAMESPACE(double_couple_point) +#define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) + +// Namespacing symbols exported from hnf.c: +#undef ibz_mat_4x4_is_hnf +#undef ibz_mat_4xn_hnf_mod_core +#undef ibz_vec_4_copy_mod +#undef ibz_vec_4_linear_combination_mod +#undef ibz_vec_4_scalar_mul_mod + +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) + +// Namespacing symbols exported from hnf_internal.c: +#undef ibz_centered_mod +#undef ibz_conditional_assign +#undef ibz_mod_not_zero +#undef ibz_xgcd_with_u_not_0 + +#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) + +// Namespacing symbols exported from ibz_division.c: +#undef ibz_xgcd + +#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) + +// Namespacing symbols exported from id2iso.c: +#undef change_of_basis_matrix_tate +#undef change_of_basis_matrix_tate_invert +#undef ec_biscalar_mul_ibz_vec +#undef endomorphism_application_even_basis +#undef id2iso_ideal_to_kernel_dlogs_even +#undef id2iso_kernel_dlogs_to_ideal_even +#undef matrix_application_even_basis + +#define change_of_basis_matrix_tate SQISIGN_NAMESPACE(change_of_basis_matrix_tate) +#define change_of_basis_matrix_tate_invert SQISIGN_NAMESPACE(change_of_basis_matrix_tate_invert) +#define ec_biscalar_mul_ibz_vec SQISIGN_NAMESPACE(ec_biscalar_mul_ibz_vec) +#define endomorphism_application_even_basis SQISIGN_NAMESPACE(endomorphism_application_even_basis) +#define id2iso_ideal_to_kernel_dlogs_even SQISIGN_NAMESPACE(id2iso_ideal_to_kernel_dlogs_even) +#define id2iso_kernel_dlogs_to_ideal_even SQISIGN_NAMESPACE(id2iso_kernel_dlogs_to_ideal_even) +#define matrix_application_even_basis SQISIGN_NAMESPACE(matrix_application_even_basis) + +// Namespacing symbols exported from ideal.c: +#undef quat_lideal_add +#undef quat_lideal_class_gram +#undef quat_lideal_conjugate_without_hnf +#undef quat_lideal_copy +#undef quat_lideal_create +#undef quat_lideal_create_principal +#undef quat_lideal_equals +#undef quat_lideal_generator +#undef quat_lideal_inter +#undef quat_lideal_inverse_lattice_without_hnf +#undef quat_lideal_mul +#undef quat_lideal_norm +#undef quat_lideal_right_order +#undef quat_lideal_right_transporter +#undef quat_order_discriminant +#undef quat_order_is_maximal + +#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) + +// Namespacing symbols exported from intbig.c: +#undef ibz_abs +#undef ibz_add +#undef ibz_bitsize +#undef ibz_cmp +#undef ibz_cmp_int32 +#undef ibz_convert_to_str +#undef ibz_copy +#undef ibz_copy_digits +#undef ibz_div +#undef ibz_div_2exp +#undef ibz_div_floor +#undef ibz_divides +#undef ibz_finalize +#undef ibz_gcd +#undef ibz_get +#undef ibz_init +#undef ibz_invmod +#undef ibz_is_even +#undef ibz_is_odd +#undef ibz_is_one +#undef ibz_is_zero +#undef ibz_legendre +#undef ibz_mod +#undef ibz_mod_ui +#undef ibz_mul +#undef ibz_neg +#undef ibz_pow +#undef ibz_pow_mod +#undef ibz_print +#undef ibz_probab_prime +#undef ibz_rand_interval +#undef ibz_rand_interval_bits +#undef ibz_rand_interval_i +#undef ibz_rand_interval_minm_m +#undef ibz_set +#undef ibz_set_from_str +#undef ibz_size_in_base +#undef ibz_sqrt +#undef ibz_sqrt_floor +#undef ibz_sqrt_mod_p +#undef ibz_sub +#undef ibz_swap +#undef ibz_to_digits +#undef ibz_two_adic + +#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) +#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) +#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) + +// Namespacing symbols exported from integers.c: +#undef ibz_cornacchia_prime +#undef ibz_generate_random_prime + +#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) + +// Namespacing symbols exported from isog_chains.c: +#undef ec_eval_even +#undef ec_eval_small_chain +#undef ec_iso_eval +#undef ec_isomorphism + +#define ec_eval_even SQISIGN_NAMESPACE(ec_eval_even) +#define ec_eval_small_chain SQISIGN_NAMESPACE(ec_eval_small_chain) +#define ec_iso_eval SQISIGN_NAMESPACE(ec_iso_eval) +#define ec_isomorphism SQISIGN_NAMESPACE(ec_isomorphism) + +// Namespacing symbols exported from keygen.c: +#undef protocols_keygen +#undef secret_key_finalize +#undef secret_key_init + +#define protocols_keygen SQISIGN_NAMESPACE(protocols_keygen) +#define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) +#define secret_key_init SQISIGN_NAMESPACE(secret_key_init) + +// Namespacing symbols exported from l2.c: +#undef quat_lattice_lll +#undef quat_lll_core + +#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) + +// Namespacing symbols exported from lat_ball.c: +#undef quat_lattice_bound_parallelogram +#undef quat_lattice_sample_from_ball + +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) + +// Namespacing symbols exported from lattice.c: +#undef quat_lattice_add +#undef quat_lattice_alg_elem_mul +#undef quat_lattice_conjugate_without_hnf +#undef quat_lattice_contains +#undef quat_lattice_dual_without_hnf +#undef quat_lattice_equal +#undef quat_lattice_gram +#undef quat_lattice_hnf +#undef quat_lattice_inclusion +#undef quat_lattice_index +#undef quat_lattice_intersect +#undef quat_lattice_mat_alg_coord_mul_without_hnf +#undef quat_lattice_mul +#undef quat_lattice_reduce_denom + +#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) + +// Namespacing symbols exported from lll_applications.c: +#undef quat_lideal_lideal_mul_reduced +#undef quat_lideal_prime_norm_reduced_equivalent +#undef quat_lideal_reduce_basis + +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) + +// Namespacing symbols exported from lll_verification.c: +#undef ibq_vec_4_copy_ibz +#undef quat_lll_bilinear +#undef quat_lll_gram_schmidt_transposed_with_ibq +#undef quat_lll_set_ibq_parameters +#undef quat_lll_verify + +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) + +// Namespacing symbols exported from mem.c: +#undef sqisign_secure_clear +#undef sqisign_secure_free + +#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) + +// Namespacing symbols exported from mp.c: +#undef MUL +#undef mp_add +#undef mp_compare +#undef mp_copy +#undef mp_inv_2e +#undef mp_invert_matrix +#undef mp_is_one +#undef mp_is_zero +#undef mp_mod_2exp +#undef mp_mul +#undef mp_mul2 +#undef mp_neg +#undef mp_print +#undef mp_shiftl +#undef mp_shiftr +#undef mp_sub +#undef multiple_mp_shiftl +#undef select_ct +#undef swap_ct + +#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) +#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) +#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) +#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) +#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) +#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) + +// Namespacing symbols exported from normeq.c: +#undef quat_change_to_O0_basis +#undef quat_lattice_O0_set +#undef quat_lattice_O0_set_extremal +#undef quat_order_elem_create +#undef quat_represent_integer +#undef quat_sampling_random_ideal_O0_given_norm + +#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) + +// Namespacing symbols exported from printer.c: +#undef ibz_mat_2x2_print +#undef ibz_mat_4x4_print +#undef ibz_vec_2_print +#undef ibz_vec_4_print +#undef quat_alg_elem_print +#undef quat_alg_print +#undef quat_lattice_print +#undef quat_left_ideal_print + +#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) + +// Namespacing symbols exported from random_input_generation.c: +#undef quat_test_input_random_ideal_generation +#undef quat_test_input_random_ideal_lattice_generation +#undef quat_test_input_random_lattice_generation + +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) + +// Namespacing symbols exported from rationals.c: +#undef ibq_abs +#undef ibq_add +#undef ibq_cmp +#undef ibq_copy +#undef ibq_finalize +#undef ibq_init +#undef ibq_inv +#undef ibq_is_ibz +#undef ibq_is_one +#undef ibq_is_zero +#undef ibq_mat_4x4_finalize +#undef ibq_mat_4x4_init +#undef ibq_mat_4x4_print +#undef ibq_mul +#undef ibq_neg +#undef ibq_reduce +#undef ibq_set +#undef ibq_sub +#undef ibq_to_ibz +#undef ibq_vec_4_finalize +#undef ibq_vec_4_init +#undef ibq_vec_4_print + +#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) + +// Namespacing symbols exported from sign.c: +#undef protocols_sign + +#define protocols_sign SQISIGN_NAMESPACE(protocols_sign) + +// Namespacing symbols exported from sqisign.c: +#undef sqisign_keypair +#undef sqisign_open +#undef sqisign_sign +#undef sqisign_sign_signature +#undef sqisign_verify +#undef sqisign_verify_signature + +#define sqisign_keypair SQISIGN_NAMESPACE(sqisign_keypair) +#define sqisign_open SQISIGN_NAMESPACE(sqisign_open) +#define sqisign_sign SQISIGN_NAMESPACE(sqisign_sign) +#define sqisign_sign_signature SQISIGN_NAMESPACE(sqisign_sign_signature) +#define sqisign_verify SQISIGN_NAMESPACE(sqisign_verify) +#define sqisign_verify_signature SQISIGN_NAMESPACE(sqisign_verify_signature) + +// Namespacing symbols exported from theta_isogenies.c: +#undef theta_chain_compute_and_eval +#undef theta_chain_compute_and_eval_randomized +#undef theta_chain_compute_and_eval_verify + +#define theta_chain_compute_and_eval SQISIGN_NAMESPACE(theta_chain_compute_and_eval) +#define theta_chain_compute_and_eval_randomized SQISIGN_NAMESPACE(theta_chain_compute_and_eval_randomized) +#define theta_chain_compute_and_eval_verify SQISIGN_NAMESPACE(theta_chain_compute_and_eval_verify) + +// Namespacing symbols exported from theta_structure.c: +#undef double_iter +#undef double_point +#undef is_product_theta_point +#undef theta_precomputation + +#define double_iter SQISIGN_NAMESPACE(double_iter) +#define double_point SQISIGN_NAMESPACE(double_point) +#define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) +#define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) + +// Namespacing symbols exported from verify.c: +#undef protocols_verify + +#define protocols_verify SQISIGN_NAMESPACE(protocols_verify) + +// Namespacing symbols exported from xeval.c: +#undef xeval_2 +#undef xeval_2_singular +#undef xeval_4 + +#define xeval_2 SQISIGN_NAMESPACE(xeval_2) +#define xeval_2_singular SQISIGN_NAMESPACE(xeval_2_singular) +#define xeval_4 SQISIGN_NAMESPACE(xeval_4) + +// Namespacing symbols exported from xisog.c: +#undef xisog_2 +#undef xisog_2_singular +#undef xisog_4 + +#define xisog_2 SQISIGN_NAMESPACE(xisog_2) +#define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) +#define xisog_4 SQISIGN_NAMESPACE(xisog_4) + +// Namespacing symbols from precomp: +#undef BASIS_E0_PX +#undef BASIS_E0_QX +#undef p_cofactor_for_2f +#undef CURVES_WITH_ENDOMORPHISMS +#undef EVEN_INDEX +#undef CHI_EVAL +#undef FP2_CONSTANTS +#undef SPLITTING_TRANSFORMS +#undef NORMALIZATION_TRANSFORMS +#undef QUAT_prime_cofactor +#undef QUATALG_PINFTY +#undef EXTREMAL_ORDERS +#undef CONNECTING_IDEALS +#undef CONJUGATING_ELEMENTS +#undef TWO_TO_SECURITY_BITS +#undef TORSION_PLUS_2POWER +#undef SEC_DEGREE +#undef COM_DEGREE + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_parameters.txt b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_parameters.txt new file mode 100644 index 0000000000..52241becdc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_parameters.txt @@ -0,0 +1,3 @@ +lvl = 3 +p = 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +num_orders = 8 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c new file mode 100644 index 0000000000..478a9ab25b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c @@ -0,0 +1,1283 @@ +#include "theta_isogenies.h" +#include +#include +#include +#include +#include + +// Select a base change matrix in constant time, with M1 a regular +// base change matrix and M2 a precomputed base change matrix +// If option = 0 then M <- M1, else if option = 0xFF...FF then M <- M2 +static inline void +select_base_change_matrix(basis_change_matrix_t *M, + const basis_change_matrix_t *M1, + const precomp_basis_change_matrix_t *M2, + const uint32_t option) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + fp2_select(&M->m[i][j], &M1->m[i][j], &FP2_CONSTANTS[M2->m[i][j]], option); +} + +// Set a regular base change matrix from a precomputed one +static inline void +set_base_change_matrix_from_precomp(basis_change_matrix_t *res, const precomp_basis_change_matrix_t *M) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + res->m[i][j] = FP2_CONSTANTS[M->m[i][j]]; +} + +static inline void +choose_index_theta_point(fp2_t *res, int ind, const theta_point_t *T) +{ + const fp2_t *src = NULL; + switch (ind % 4) { + case 0: + src = &T->x; + break; + case 1: + src = &T->y; + break; + case 2: + src = &T->z; + break; + case 3: + src = &T->t; + break; + default: + assert(0); + } + fp2_copy(res, src); +} + +// same as apply_isomorphism method but more efficient when the t component of P is zero. +static void +apply_isomorphism_general(theta_point_t *res, + const basis_change_matrix_t *M, + const theta_point_t *P, + const bool Pt_not_zero) +{ + fp2_t x1; + theta_point_t temp; + + fp2_mul(&temp.x, &P->x, &M->m[0][0]); + fp2_mul(&x1, &P->y, &M->m[0][1]); + fp2_add(&temp.x, &temp.x, &x1); + fp2_mul(&x1, &P->z, &M->m[0][2]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&temp.y, &P->x, &M->m[1][0]); + fp2_mul(&x1, &P->y, &M->m[1][1]); + fp2_add(&temp.y, &temp.y, &x1); + fp2_mul(&x1, &P->z, &M->m[1][2]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&temp.z, &P->x, &M->m[2][0]); + fp2_mul(&x1, &P->y, &M->m[2][1]); + fp2_add(&temp.z, &temp.z, &x1); + fp2_mul(&x1, &P->z, &M->m[2][2]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&temp.t, &P->x, &M->m[3][0]); + fp2_mul(&x1, &P->y, &M->m[3][1]); + fp2_add(&temp.t, &temp.t, &x1); + fp2_mul(&x1, &P->z, &M->m[3][2]); + fp2_add(&temp.t, &temp.t, &x1); + + if (Pt_not_zero) { + fp2_mul(&x1, &P->t, &M->m[0][3]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&x1, &P->t, &M->m[1][3]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&x1, &P->t, &M->m[2][3]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&x1, &P->t, &M->m[3][3]); + fp2_add(&temp.t, &temp.t, &x1); + } + + fp2_copy(&res->x, &temp.x); + fp2_copy(&res->y, &temp.y); + fp2_copy(&res->z, &temp.z); + fp2_copy(&res->t, &temp.t); +} + +static void +apply_isomorphism(theta_point_t *res, const basis_change_matrix_t *M, const theta_point_t *P) +{ + apply_isomorphism_general(res, M, P, true); +} + +// set res = M1 * M2 with matrix multiplication +static void +base_change_matrix_multiplication(basis_change_matrix_t *res, + const basis_change_matrix_t *M1, + const basis_change_matrix_t *M2) +{ + basis_change_matrix_t tmp; + fp2_t sum, m_ik, m_kj; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + fp2_set_zero(&sum); + for (int k = 0; k < 4; k++) { + m_ik = M1->m[i][k]; + m_kj = M2->m[k][j]; + fp2_mul(&m_ik, &m_ik, &m_kj); + fp2_add(&sum, &sum, &m_ik); + } + tmp.m[i][j] = sum; + } + } + *res = tmp; +} + +// compute the theta_point corresponding to the couple of point T on an elliptic product +static void +base_change(theta_point_t *out, const theta_gluing_t *phi, const theta_couple_point_t *T) +{ + theta_point_t null_point; + + // null_point = (a : b : c : d) + // a = P1.x P2.x, b = P1.x P2.z, c = P1.z P2.x, d = P1.z P2.z + fp2_mul(&null_point.x, &T->P1.x, &T->P2.x); + fp2_mul(&null_point.y, &T->P1.x, &T->P2.z); + fp2_mul(&null_point.z, &T->P2.x, &T->P1.z); + fp2_mul(&null_point.t, &T->P1.z, &T->P2.z); + + // Apply the basis change + apply_isomorphism(out, &phi->M, &null_point); +} + +static void +action_by_translation_z_and_det(fp2_t *z_inv, fp2_t *det_inv, const ec_point_t *P4, const ec_point_t *P2) +{ + // Store the Z-coordinate to invert + fp2_copy(z_inv, &P4->z); + + // Then collect detij = xij wij - uij zij + fp2_t tmp; + fp2_mul(det_inv, &P4->x, &P2->z); + fp2_mul(&tmp, &P4->z, &P2->x); + fp2_sub(det_inv, det_inv, &tmp); +} + +static void +action_by_translation_compute_matrix(translation_matrix_t *G, + const ec_point_t *P4, + const ec_point_t *P2, + const fp2_t *z_inv, + const fp2_t *det_inv) +{ + fp2_t tmp; + + // Gi.g10 = uij xij /detij - xij/zij + fp2_mul(&tmp, &P4->x, z_inv); + fp2_mul(&G->g10, &P4->x, &P2->x); + fp2_mul(&G->g10, &G->g10, det_inv); + fp2_sub(&G->g10, &G->g10, &tmp); + + // Gi.g11 = uij zij * detij + fp2_mul(&G->g11, &P2->x, det_inv); + fp2_mul(&G->g11, &G->g11, &P4->z); + + // Gi.g00 = -Gi.g11 + fp2_neg(&G->g00, &G->g11); + + // Gi.g01 = - wij zij detij + fp2_mul(&G->g01, &P2->z, det_inv); + fp2_mul(&G->g01, &G->g01, &P4->z); + fp2_neg(&G->g01, &G->g01); +} + +// Returns 1 if the basis is as expected and 0 otherwise +// We only expect this to fail for malformed signatures, so +// do not require this to run in constant time. +static int +verify_two_torsion(const theta_couple_point_t *K1_2, const theta_couple_point_t *K2_2, const theta_couple_curve_t *E12) +{ + // First check if any point in K1_2 or K2_2 is zero, if they are then the points did not have + // order 8 when we started gluing + if (ec_is_zero(&K1_2->P1) | ec_is_zero(&K1_2->P2) | ec_is_zero(&K2_2->P1) | ec_is_zero(&K2_2->P2)) { + return 0; + } + + // Now ensure that P1, Q1 and P2, Q2 are independent. For points of order two this means + // that they're not the same + if (ec_is_equal(&K1_2->P1, &K2_2->P1) | ec_is_equal(&K1_2->P2, &K2_2->P2)) { + return 0; + } + + // Finally, double points to ensure all points have order exactly 0 + theta_couple_point_t O1, O2; + double_couple_point(&O1, K1_2, E12); + double_couple_point(&O2, K2_2, E12); + // If this check fails then the points had order 2*f for some f, and the kernel is malformed. + if (!(ec_is_zero(&O1.P1) & ec_is_zero(&O1.P2) & ec_is_zero(&O2.P1) & ec_is_zero(&O2.P2))) { + return 0; + } + + return 1; +} + +// Computes the action by translation for four points +// (P1, P2) and (Q1, Q2) on E1 x E2 simultaneously to +// save on inversions. +// Returns 0 if any of Pi or Qi does not have order 2 +// and 1 otherwise +static int +action_by_translation(translation_matrix_t *Gi, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute points of order 2 from Ki_4 + theta_couple_point_t K1_2, K2_2; + double_couple_point(&K1_2, K1_4, E12); + double_couple_point(&K2_2, K2_4, E12); + + if (!verify_two_torsion(&K1_2, &K2_2, E12)) { + return 0; + } + + // We need to invert four Z coordinates and + // four determinants which we do with batched + // inversion + fp2_t inverses[8]; + action_by_translation_z_and_det(&inverses[0], &inverses[4], &K1_4->P1, &K1_2.P1); + action_by_translation_z_and_det(&inverses[1], &inverses[5], &K1_4->P2, &K1_2.P2); + action_by_translation_z_and_det(&inverses[2], &inverses[6], &K2_4->P1, &K2_2.P1); + action_by_translation_z_and_det(&inverses[3], &inverses[7], &K2_4->P2, &K2_2.P2); + + fp2_batched_inv(inverses, 8); + if (fp2_is_zero(&inverses[0])) + return 0; // something was wrong with our input (which somehow was not caught by + // verify_two_torsion) + + action_by_translation_compute_matrix(&Gi[0], &K1_4->P1, &K1_2.P1, &inverses[0], &inverses[4]); + action_by_translation_compute_matrix(&Gi[1], &K1_4->P2, &K1_2.P2, &inverses[1], &inverses[5]); + action_by_translation_compute_matrix(&Gi[2], &K2_4->P1, &K2_2.P1, &inverses[2], &inverses[6]); + action_by_translation_compute_matrix(&Gi[3], &K2_4->P2, &K2_2.P2, &inverses[3], &inverses[7]); + + return 1; +} + +// Given the appropriate four torsion, computes the +// change of basis to compute the correct theta null +// point. +// Returns 0 if the order of K1_4 or K2_4 is not 4 +static int +gluing_change_of_basis(basis_change_matrix_t *M, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute the four 2x2 matrices for the action by translation + // on the four points: + translation_matrix_t Gi[4]; + if (!action_by_translation(Gi, K1_4, K2_4, E12)) + return 0; + + // Computation of the 4x4 matrix from Mij + // t001, t101 (resp t002, t102) first column of M11 * M21 (resp M12 * M22) + fp2_t t001, t101, t002, t102, tmp; + + fp2_mul(&t001, &Gi[0].g00, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g01, &Gi[2].g10); + fp2_add(&t001, &t001, &tmp); + + fp2_mul(&t101, &Gi[0].g10, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g11, &Gi[2].g10); + fp2_add(&t101, &t101, &tmp); + + fp2_mul(&t002, &Gi[1].g00, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g01, &Gi[3].g10); + fp2_add(&t002, &t002, &tmp); + + fp2_mul(&t102, &Gi[1].g10, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g11, &Gi[3].g10); + fp2_add(&t102, &t102, &tmp); + + // trace for the first row + fp2_set_one(&M->m[0][0]); + fp2_mul(&tmp, &t001, &t002); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + + fp2_mul(&M->m[0][1], &t001, &t102); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + + fp2_mul(&M->m[0][2], &t101, &t002); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + + fp2_mul(&M->m[0][3], &t101, &t102); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + + // Compute the action of (0,out.K2_4.P2) for the second row + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][1]); + fp2_mul(&M->m[1][0], &Gi[3].g00, &M->m[0][0]); + fp2_add(&M->m[1][0], &M->m[1][0], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][1]); + fp2_mul(&M->m[1][1], &Gi[3].g10, &M->m[0][0]); + fp2_add(&M->m[1][1], &M->m[1][1], &tmp); + + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][3]); + fp2_mul(&M->m[1][2], &Gi[3].g00, &M->m[0][2]); + fp2_add(&M->m[1][2], &M->m[1][2], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][3]); + fp2_mul(&M->m[1][3], &Gi[3].g10, &M->m[0][2]); + fp2_add(&M->m[1][3], &M->m[1][3], &tmp); + + // compute the action of (K1_4.P1,0) for the third row + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][2]); + fp2_mul(&M->m[2][0], &Gi[0].g00, &M->m[0][0]); + fp2_add(&M->m[2][0], &M->m[2][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][3]); + fp2_mul(&M->m[2][1], &Gi[0].g00, &M->m[0][1]); + fp2_add(&M->m[2][1], &M->m[2][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][2]); + fp2_mul(&M->m[2][2], &Gi[0].g10, &M->m[0][0]); + fp2_add(&M->m[2][2], &M->m[2][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][3]); + fp2_mul(&M->m[2][3], &Gi[0].g10, &M->m[0][1]); + fp2_add(&M->m[2][3], &M->m[2][3], &tmp); + + // compute the action of (K1_4.P1,K2_4.P2) for the final row + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][2]); + fp2_mul(&M->m[3][0], &Gi[0].g00, &M->m[1][0]); + fp2_add(&M->m[3][0], &M->m[3][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][3]); + fp2_mul(&M->m[3][1], &Gi[0].g00, &M->m[1][1]); + fp2_add(&M->m[3][1], &M->m[3][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][2]); + fp2_mul(&M->m[3][2], &Gi[0].g10, &M->m[1][0]); + fp2_add(&M->m[3][2], &M->m[3][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][3]); + fp2_mul(&M->m[3][3], &Gi[0].g10, &M->m[1][1]); + fp2_add(&M->m[3][3], &M->m[3][3], &tmp); + + return 1; +} + +/** + * @brief Compute the gluing isogeny from an elliptic product + * + * @param out Output: the theta_gluing + * @param K1_8 a couple point + * @param E12 an elliptic curve product + * @param K2_8 a point in E2[8] + * + * out : E1xE2 -> A of kernel [4](K1_8,K2_8) + * if the kernel supplied has the incorrect order, or gluing seems malformed, + * returns 0, otherwise returns 1. + */ +static int +gluing_compute(theta_gluing_t *out, + const theta_couple_curve_t *E12, + const theta_couple_jac_point_t *xyK1_8, + const theta_couple_jac_point_t *xyK2_8, + bool verify) +{ + // Ensure that we have been given the eight torsion +#ifndef NDEBUG + { + int check = test_jac_order_twof(&xyK1_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK1_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK1_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P2 does not have order 8"); + } +#endif + + out->xyK1_8 = *xyK1_8; + out->domain = *E12; + + // Given points in E[8] x E[8] we need the four torsion below + theta_couple_jac_point_t xyK1_4, xyK2_4; + + double_couple_jac_point(&xyK1_4, xyK1_8, E12); + double_couple_jac_point(&xyK2_4, xyK2_8, E12); + + // Convert from (X:Y:Z) coordinates to (X:Z) + theta_couple_point_t K1_8, K2_8; + theta_couple_point_t K1_4, K2_4; + + couple_jac_to_xz(&K1_8, xyK1_8); + couple_jac_to_xz(&K2_8, xyK2_8); + couple_jac_to_xz(&K1_4, &xyK1_4); + couple_jac_to_xz(&K2_4, &xyK2_4); + + // Set the basis change matrix, if we have not been given a valid K[8] for this computation + // gluing_change_of_basis will detect this and return 0 + if (!gluing_change_of_basis(&out->M, &K1_4, &K2_4, E12)) { + debug_print("gluing failed as kernel does not have correct order"); + return 0; + } + + // apply the base change to the kernel + theta_point_t TT1, TT2; + + base_change(&TT1, out, &K1_8); + base_change(&TT2, out, &K2_8); + + // compute the codomain + to_squared_theta(&TT1, &TT1); + to_squared_theta(&TT2, &TT2); + + // If the kernel is well formed then TT1.t and TT2.t are zero + // if they are not, we exit early as the signature we are validating + // is probably malformed + if (!(fp2_is_zero(&TT1.t) & fp2_is_zero(&TT2.t))) { + debug_print("gluing failed TT1.t or TT2.t is not zero"); + return 0; + } + // Test our projective factors are non zero + if (fp2_is_zero(&TT1.x) | fp2_is_zero(&TT2.x) | fp2_is_zero(&TT1.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT1.z)) + return 0; // invalid input + + // Projective factor: Ax + fp2_mul(&out->codomain.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.y, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.z, &TT1.x, &TT2.z); + fp2_set_zero(&out->codomain.t); + // Projective factor: ABCxz + fp2_mul(&out->precomputation.x, &TT1.y, &TT2.z); + fp2_copy(&out->precomputation.y, &out->codomain.z); + fp2_copy(&out->precomputation.z, &out->codomain.y); + fp2_set_zero(&out->precomputation.t); + + // Compute the two components of phi(K1_8) = (x:x:y:y). + fp2_mul(&out->imageK1_8.x, &TT1.x, &out->precomputation.x); + fp2_mul(&out->imageK1_8.y, &TT1.z, &out->precomputation.z); + + // If K1_8 and K2_8 are our 8-torsion points, this ensures that the + // 4-torsion points [2]K1_8 and [2]K2_8 are isotropic. + if (verify) { + fp2_t t1, t2; + fp2_mul(&t1, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&out->imageK1_8.x, &t1)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t2, &t1)) + return 0; + } + + // compute the final codomain + hadamard(&out->codomain, &out->codomain); + return 1; +} + +// sub routine of the gluing eval +static void +gluing_eval_point(theta_point_t *image, const theta_couple_jac_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T1, T2; + add_components_t add_comp1, add_comp2; + + // Compute the cross addition components of P1+Q1 and P2+Q2 + jac_to_xz_add_components(&add_comp1, &P->P1, &phi->xyK1_8.P1, &phi->domain.E1); + jac_to_xz_add_components(&add_comp2, &P->P2, &phi->xyK1_8.P2, &phi->domain.E2); + + // Compute T1 and T2 derived from the cross addition components. + fp2_mul(&T1.x, &add_comp1.u, &add_comp2.u); // T1x = u1u2 + fp2_mul(&T2.t, &add_comp1.v, &add_comp2.v); // T2t = v1v2 + fp2_add(&T1.x, &T1.x, &T2.t); // T1x = u1u2 + v1v2 + fp2_mul(&T1.y, &add_comp1.u, &add_comp2.w); // T1y = u1w2 + fp2_mul(&T1.z, &add_comp1.w, &add_comp2.u); // T1z = w1u2 + fp2_mul(&T1.t, &add_comp1.w, &add_comp2.w); // T1t = w1w2 + fp2_add(&T2.x, &add_comp1.u, &add_comp1.v); // T2x = (u1+v1) + fp2_add(&T2.y, &add_comp2.u, &add_comp2.v); // T2y = (u2+v2) + fp2_mul(&T2.x, &T2.x, &T2.y); // T2x = (u1+v1)(u2+v2) + fp2_sub(&T2.x, &T2.x, &T1.x); // T1x = v1u2 + u1v2 + fp2_mul(&T2.y, &add_comp1.v, &add_comp2.w); // T2y = v1w2 + fp2_mul(&T2.z, &add_comp1.w, &add_comp2.v); // T2z = w1v2 + fp2_set_zero(&T2.t); // T2t = 0 + + // Apply the basis change and compute their respective square + // theta(P+Q) = M.T1 - M.T2 and theta(P-Q) = M.T1 + M.T2 + apply_isomorphism_general(&T1, &phi->M, &T1, true); + apply_isomorphism_general(&T2, &phi->M, &T2, false); + pointwise_square(&T1, &T1); + pointwise_square(&T2, &T2); + + // the difference between the two is therefore theta(P+Q)theta(P-Q) + // whose hadamard transform is then the product of the dual + // theta_points of phi(P) and phi(Q). + fp2_sub(&T1.x, &T1.x, &T2.x); + fp2_sub(&T1.y, &T1.y, &T2.y); + fp2_sub(&T1.z, &T1.z, &T2.z); + fp2_sub(&T1.t, &T1.t, &T2.t); + hadamard(&T1, &T1); + + // Compute (x, y, z, t) + // As imageK1_8 = (x:x:y:y), its inverse is (y:y:x:x). + fp2_mul(&image->x, &T1.x, &phi->imageK1_8.y); + fp2_mul(&image->y, &T1.y, &phi->imageK1_8.y); + fp2_mul(&image->z, &T1.z, &phi->imageK1_8.x); + fp2_mul(&image->t, &T1.t, &phi->imageK1_8.x); + + hadamard(image, image); +} + +// Same as gluing_eval_point but in the very special case where we already know that the point will +// have a zero coordinate at the place where the zero coordinate of the dual_theta_nullpoint would +// have made the computation difficult +static int +gluing_eval_point_special_case(theta_point_t *image, const theta_couple_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T; + + // Apply the basis change + base_change(&T, phi, P); + + // Apply the to_squared_theta transform + to_squared_theta(&T, &T); + + // This coordinate should always be 0 in a gluing because D=0. + // If this is not the case, something went very wrong, so reject + if (!fp2_is_zero(&T.t)) + return 0; + + // Compute (x, y, z, t) + fp2_mul(&image->x, &T.x, &phi->precomputation.x); + fp2_mul(&image->y, &T.y, &phi->precomputation.y); + fp2_mul(&image->z, &T.z, &phi->precomputation.z); + fp2_set_zero(&image->t); + + hadamard(image, image); + return 1; +} + +/** + * @brief Evaluate a gluing isogeny from an elliptic product on a basis + * + * @param image1 Output: the theta_point of the image of the first couple of points + * @param image2 Output : the theta point of the image of the second couple of points + * @param xyT1: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param xyT2: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param phi : a gluing isogeny E1 x E2 -> A + * + **/ +static void +gluing_eval_basis(theta_point_t *image1, + theta_point_t *image2, + const theta_couple_jac_point_t *xyT1, + const theta_couple_jac_point_t *xyT2, + const theta_gluing_t *phi) +{ + gluing_eval_point(image1, xyT1, phi); + gluing_eval_point(image2, xyT2, phi); +} + +/** + * @brief Compute a (2,2) isogeny in dimension 2 in the theta_model + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_8 a point in A[8] + * @param T2_8 a point in A[8] + * @param hadamard_bool_1 a boolean used for the last two steps of the chain + * @param hadamard_bool_2 a boolean used for the last two steps of the chain + * + * out : A -> B of kernel [4](T1_8,T2_8) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * verify: add extra sanity check to ensure our 8-torsion points are coherent with the isogeny + * + */ +static int +theta_isogeny_compute(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_8, + const theta_point_t *T2_8, + bool hadamard_bool_1, + bool hadamard_bool_2, + bool verify) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_8; + out->T2_8 = *T2_8; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_8); + to_squared_theta(&TT1, &TT1); + hadamard(&TT2, T2_8); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_8); + to_squared_theta(&TT2, T2_8); + } + + fp2_t t1, t2; + + // Test that our projective factor ABCDxzw is non zero, where + // TT1=(Ax, Bx, Cy, Dy), TT2=(Az, Bw, Cz, Dw) + // But ABCDxzw=0 can only happen if we had an unexpected splitting in + // the isogeny chain. + // In either case reject + // (this is not strictly necessary, we could just return (0:0:0:0)) + if (fp2_is_zero(&TT2.x) | fp2_is_zero(&TT2.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT2.t) | fp2_is_zero(&TT1.x) | + fp2_is_zero(&TT1.y)) + return 0; + + fp2_mul(&t1, &TT1.x, &TT2.y); + fp2_mul(&t2, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.null_point.x, &TT2.x, &t1); + fp2_mul(&out->codomain.null_point.y, &TT2.y, &t2); + fp2_mul(&out->codomain.null_point.z, &TT2.z, &t1); + fp2_mul(&out->codomain.null_point.t, &TT2.t, &t2); + fp2_t t3; + fp2_mul(&t3, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.x, &t3, &TT1.y); + fp2_mul(&out->precomputation.y, &t3, &TT1.x); + fp2_copy(&out->precomputation.z, &out->codomain.null_point.t); + fp2_copy(&out->precomputation.t, &out->codomain.null_point.z); + + // If T1_8 and T2_8 are our 8-torsion points, this ensures that the + // 4-torsion points 2T1_8 and 2T2_8 are isotropic. + if (verify) { + fp2_mul(&t1, &TT1.x, &out->precomputation.x); + fp2_mul(&t2, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT1.z, &out->precomputation.z); + fp2_mul(&t2, &TT1.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.y, &out->precomputation.y); + fp2_mul(&t2, &TT2.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + } + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } + return 1; +} + +/** + * @brief Compute a (2,2) isogeny when only the 4 torsion above the kernel is known and not the 8 + * torsion + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_4 a point in A[4] + * @param T2_4 a point in A[4] + * @param hadamard_bool_1 a boolean + * @param hadamard_bool_2 a boolean + * + * out : A -> B of kernel [2](T1_4,T2_4) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_4(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_4, + const theta_point_t *T2_4, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_4; + out->T2_8 = *T2_4; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + // we will compute: + // TT1 = (xAB, _ , xCD, _) + // TT2 = (AA,BB,CC,DD) + + // fp2_t xA_inv,zA_inv,tB_inv; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_4); + to_squared_theta(&TT1, &TT1); + + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_4); + to_squared_theta(&TT2, &A->null_point); + } + + fp2_t sqaabb, sqaacc; + fp2_mul(&sqaabb, &TT2.x, &TT2.y); + fp2_mul(&sqaacc, &TT2.x, &TT2.z); + // No need to check the square roots, only used for signing. + // sqaabb = sqrt(AA*BB) + fp2_sqrt(&sqaabb); + // sqaacc = sqrt(AA*CC) + fp2_sqrt(&sqaacc); + + // we compute out->codomain.null_point = (xAB * sqaacc * AA, xAB *sqaabb *sqaacc, xCD*sqaabb * + // AA) out->precomputation = (xAB * BB * CC *DD , sqaabb * CC * DD * xAB , sqaacc * BB* DD * xAB + // , xCD * sqaabb *sqaacc * BB) + + fp2_mul(&out->codomain.null_point.y, &sqaabb, &sqaacc); + fp2_mul(&out->precomputation.t, &out->codomain.null_point.y, &TT1.z); + fp2_mul(&out->codomain.null_point.y, &out->codomain.null_point.y, + &TT1.x); // done for out->codomain.null_point.y + + fp2_mul(&out->codomain.null_point.t, &TT1.z, &sqaabb); + fp2_mul(&out->codomain.null_point.t, &out->codomain.null_point.t, + &TT2.x); // done for out->codomain.null_point.t + + fp2_mul(&out->codomain.null_point.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.null_point.z, &out->codomain.null_point.x, + &TT2.z); // done for out->codomain.null_point.z + fp2_mul(&out->codomain.null_point.x, &out->codomain.null_point.x, + &sqaacc); // done for out->codomain.null_point.x + + fp2_mul(&out->precomputation.x, &TT1.x, &TT2.t); + fp2_mul(&out->precomputation.z, &out->precomputation.x, &TT2.y); + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.z); + fp2_mul(&out->precomputation.y, &out->precomputation.x, &sqaabb); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &out->precomputation.z, &sqaacc); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +/** + * @brief Compute a (2,2) isogeny when only the kernel is known and not the 8 or 4 torsion above + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_2 a point in A[2] + * @param T2_2 a point in A[2] + * @param hadamard_bool_1 a boolean + * @param boo2 a boolean + * + * out : A -> B of kernel (T1_2,T2_2) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_2(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_2, + const theta_point_t *T2_2, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_2; + out->T2_8 = *T2_2; + out->codomain.precomputation = false; + + theta_point_t TT2; + // we will compute: + // TT2 = (AA,BB,CC,DD) + + if (hadamard_bool_1) { + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT2, &A->null_point); + } + + // we compute out->codomain.null_point = (AA,sqaabb, sqaacc, sqaadd) + // out->precomputation = ( BB * CC *DD , sqaabb * CC * DD , sqaacc * BB* DD , sqaadd * BB * CC) + fp2_copy(&out->codomain.null_point.x, &TT2.x); + fp2_mul(&out->codomain.null_point.y, &TT2.x, &TT2.y); + fp2_mul(&out->codomain.null_point.z, &TT2.x, &TT2.z); + fp2_mul(&out->codomain.null_point.t, &TT2.x, &TT2.t); + // No need to check the square roots, only used for signing. + fp2_sqrt(&out->codomain.null_point.y); + fp2_sqrt(&out->codomain.null_point.z); + fp2_sqrt(&out->codomain.null_point.t); + + fp2_mul(&out->precomputation.x, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.y, + &out->precomputation.x, + &out->codomain.null_point.y); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &TT2.t, &out->codomain.null_point.z); + fp2_mul(&out->precomputation.z, &out->precomputation.z, &TT2.y); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &TT2.z, &out->codomain.null_point.t); + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +static void +theta_isogeny_eval(theta_point_t *out, const theta_isogeny_t *phi, const theta_point_t *P) +{ + if (phi->hadamard_bool_1) { + hadamard(out, P); + to_squared_theta(out, out); + } else { + to_squared_theta(out, P); + } + fp2_mul(&out->x, &out->x, &phi->precomputation.x); + fp2_mul(&out->y, &out->y, &phi->precomputation.y); + fp2_mul(&out->z, &out->z, &phi->precomputation.z); + fp2_mul(&out->t, &out->t, &phi->precomputation.t); + + if (phi->hadamard_bool_2) { + hadamard(out, out); + } +} + +#if defined(ENABLE_SIGN) +// Sample a random secret index in [0, 5] to select one of the 6 normalisation +// matrices for the normalisation of the output of the (2,2)-chain during +// splitting +static unsigned char +sample_random_index(void) +{ + // To avoid bias in reduction we should only consider integers smaller + // than 2^32 which are a multiple of 6, so we only reduce bytes with a + // value in [0, 4294967292-1]. + // We have 4294967292/2^32 = ~99.9999999% chance that the first try is "good". + unsigned char seed_arr[4]; + uint32_t seed; + + do { + randombytes(seed_arr, 4); + seed = (seed_arr[0] | (seed_arr[1] << 8) | (seed_arr[2] << 16) | (seed_arr[3] << 24)); + } while (seed >= 4294967292U); + + uint32_t secret_index = seed - (((uint64_t)seed * 2863311531U) >> 34) * 6; + assert(secret_index == seed % 6); // ensure the constant time trick above works + return (unsigned char)secret_index; +} +#endif + +static bool +splitting_compute(theta_splitting_t *out, const theta_structure_t *A, int zero_index, bool randomize) + +{ + // init + uint32_t ctl; + uint32_t count = 0; + fp2_t U_cst, t1, t2; + + memset(&out->M, 0, sizeof(basis_change_matrix_t)); + + // enumerate through all indices + for (int i = 0; i < 10; i++) { + fp2_set_zero(&U_cst); + for (int t = 0; t < 4; t++) { + // Iterate through the null point + choose_index_theta_point(&t2, t, &A->null_point); + choose_index_theta_point(&t1, t ^ EVEN_INDEX[i][1], &A->null_point); + + // Compute t1 * t2 + fp2_mul(&t1, &t1, &t2); + // If CHI_EVAL(i,t) is +1 we want ctl to be 0 and + // If CHI_EVAL(i,t) is -1 we want ctl to be 0xFF..FF + ctl = (uint32_t)(CHI_EVAL[EVEN_INDEX[i][0]][t] >> 1); + assert(ctl == 0 || ctl == 0xffffffff); + + fp2_neg(&t2, &t1); + fp2_select(&t1, &t1, &t2, ctl); + + // Then we compute U_cst ± (t1 * t2) + fp2_add(&U_cst, &U_cst, &t1); + } + + // If U_cst is 0 then update the splitting matrix + ctl = fp2_is_zero(&U_cst); + count -= ctl; + select_base_change_matrix(&out->M, &out->M, &SPLITTING_TRANSFORMS[i], ctl); + if (zero_index != -1 && i == zero_index && + !ctl) { // extra checks if we know exactly where the 0 index should be + return 0; + } + } + +#if defined(ENABLE_SIGN) + // Pick a random normalization matrix + if (randomize) { + unsigned char secret_index = sample_random_index(); + basis_change_matrix_t Mrandom; + + set_base_change_matrix_from_precomp(&Mrandom, &NORMALIZATION_TRANSFORMS[0]); + + // Use a constant time selection to pick the index we want + for (unsigned char i = 1; i < 6; i++) { + // When i == secret_index, mask == 0 and 0xFF..FF otherwise + int32_t mask = i - secret_index; + mask = (mask | -mask) >> 31; + select_base_change_matrix(&Mrandom, &Mrandom, &NORMALIZATION_TRANSFORMS[i], ~mask); + } + base_change_matrix_multiplication(&out->M, &Mrandom, &out->M); + } +#else + assert(!randomize); +#endif + + // apply the isomorphism to ensure the null point is compatible with splitting + apply_isomorphism(&out->B.null_point, &out->M, &A->null_point); + + // splitting was successful only if exactly one zero was identified + return count == 1; +} + +static int +theta_product_structure_to_elliptic_product(theta_couple_curve_t *E12, theta_structure_t *A) +{ + fp2_t xx, yy; + + // This should be true from our computations in splitting_compute + // but still check this for sanity + if (!is_product_theta_point(&A->null_point)) + return 0; + + ec_curve_init(&(E12->E1)); + ec_curve_init(&(E12->E2)); + + // A valid elliptic theta null point has no zero coordinate + if (fp2_is_zero(&A->null_point.x) | fp2_is_zero(&A->null_point.y) | fp2_is_zero(&A->null_point.z)) + return 0; + + // xx = x², yy = y² + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.y); + // xx = x^4, yy = y^4 + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A2 = -2(x^4+y^4)/(x^4-y^4) + fp2_add(&E12->E2.A, &xx, &yy); + fp2_sub(&E12->E2.C, &xx, &yy); + fp2_add(&E12->E2.A, &E12->E2.A, &E12->E2.A); + fp2_neg(&E12->E2.A, &E12->E2.A); + + // same with x,z + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.z); + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A1 = -2(x^4+z^4)/(x^4-z^4) + fp2_add(&E12->E1.A, &xx, &yy); + fp2_sub(&E12->E1.C, &xx, &yy); + fp2_add(&E12->E1.A, &E12->E1.A, &E12->E1.A); + fp2_neg(&E12->E1.A, &E12->E1.A); + + if (fp2_is_zero(&E12->E1.C) | fp2_is_zero(&E12->E2.C)) + return 0; + + return 1; +} + +static int +theta_point_to_montgomery_point(theta_couple_point_t *P12, const theta_point_t *P, const theta_structure_t *A) +{ + fp2_t temp; + const fp2_t *x, *z; + + if (!is_product_theta_point(P)) + return 0; + + x = &P->x; + z = &P->y; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->z; + z = &P->t; + } + if (fp2_is_zero(x) & fp2_is_zero(z)) { + return 0; // at this point P=(0:0:0:0) so is invalid + } + // P2.X = A.null_point.y * P.x + A.null_point.x * P.y + // P2.Z = - A.null_point.y * P.x + A.null_point.x * P.y + fp2_mul(&P12->P2.x, &A->null_point.y, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P2.z, &temp, &P12->P2.x); + fp2_add(&P12->P2.x, &P12->P2.x, &temp); + + x = &P->x; + z = &P->z; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->y; + z = &P->t; + } + // P1.X = A.null_point.z * P.x + A.null_point.x * P.z + // P1.Z = -A.null_point.z * P.x + A.null_point.x * P.z + fp2_mul(&P12->P1.x, &A->null_point.z, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P1.z, &temp, &P12->P1.x); + fp2_add(&P12->P1.x, &P12->P1.x, &temp); + return 1; +} + +static int +_theta_chain_compute_impl(unsigned n, + theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + bool verify, + bool randomize) +{ + theta_structure_t theta; + + // lift the basis + theta_couple_jac_point_t xyT1, xyT2; + + ec_basis_t bas1 = { .P = ker->T1.P1, .Q = ker->T2.P1, .PmQ = ker->T1m2.P1 }; + ec_basis_t bas2 = { .P = ker->T1.P2, .Q = ker->T2.P2, .PmQ = ker->T1m2.P2 }; + if (!lift_basis(&xyT1.P1, &xyT2.P1, &bas1, &E12->E1)) + return 0; + if (!lift_basis(&xyT1.P2, &xyT2.P2, &bas2, &E12->E2)) + return 0; + + const unsigned extra = HD_extra_torsion * extra_torsion; + +#ifndef NDEBUG + assert(extra == 0 || extra == 2); // only cases implemented + if (!test_point_order_twof(&bas2.P, &E12->E2, n + extra)) + debug_print("bas2.P does not have correct order"); + + if (!test_jac_order_twof(&xyT2.P2, &E12->E2, n + extra)) + debug_print("xyT2.P2 does not have correct order"); +#endif + + theta_point_t pts[numP ? numP : 1]; + + int space = 1; + for (unsigned i = 1; i < n; i *= 2) + ++space; + + uint16_t todo[space]; + todo[0] = n - 2 + extra; + + int current = 0; + + // kernel points for the gluing isogeny + theta_couple_jac_point_t jacQ1[space], jacQ2[space]; + jacQ1[0] = xyT1; + jacQ2[0] = xyT2; + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + // the gluing isogeny is quite a bit more expensive than the others, + // so we adjust the usual splitting rule here a little bit: towards + // the end of the doubling chain it will be cheaper to recompute the + // doublings after evaluation than to push the intermediate points. + const unsigned num_dbls = todo[current - 1] >= 16 ? todo[current - 1] / 2 : todo[current - 1] - 1; + assert(num_dbls && num_dbls < todo[current - 1]); + double_couple_jac_point_iter(&jacQ1[current], num_dbls, &jacQ1[current - 1], E12); + double_couple_jac_point_iter(&jacQ2[current], num_dbls, &jacQ2[current - 1], E12); + todo[current] = todo[current - 1] - num_dbls; + } + + // kernel points for the remaining isogeny steps + theta_point_t thetaQ1[space], thetaQ2[space]; + + // the gluing step + theta_gluing_t first_step; + { + assert(todo[current] == 1); + + // compute the gluing isogeny + if (!gluing_compute(&first_step, E12, &jacQ1[current], &jacQ2[current], verify)) + return 0; + + // evaluate + for (unsigned j = 0; j < numP; ++j) { + assert(ec_is_zero(&P12[j].P1) || ec_is_zero(&P12[j].P2)); + if (!gluing_eval_point_special_case(&pts[j], &P12[j], &first_step)) + return 0; + } + + // push kernel points through gluing isogeny + for (int j = 0; j < current; ++j) { + gluing_eval_basis(&thetaQ1[j], &thetaQ2[j], &jacQ1[j], &jacQ2[j], &first_step); + --todo[j]; + } + + --current; + } + + // set-up the theta_structure for the first codomain + theta.null_point = first_step.codomain; + theta.precomputation = 0; + theta_precomputation(&theta); + + theta_isogeny_t step; + + // and now we do the remaining steps + for (unsigned i = 1; current >= 0 && todo[current]; ++i) { + assert(current < space); + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + const unsigned num_dbls = todo[current - 1] / 2; + assert(num_dbls && num_dbls < todo[current - 1]); + double_iter(&thetaQ1[current], &theta, &thetaQ1[current - 1], num_dbls); + double_iter(&thetaQ2[current], &theta, &thetaQ2[current - 1], num_dbls); + todo[current] = todo[current - 1] - num_dbls; + } + + // computing the next step + int ret; + if (i == n - 2) // penultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 0, verify); + else if (i == n - 1) // ultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 1, 0, false); + else + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 1, verify); + if (!ret) + return 0; + + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + + // updating the codomain + theta = step.codomain; + + // pushing the kernel + assert(todo[current] == 1); + for (int j = 0; j < current; ++j) { + theta_isogeny_eval(&thetaQ1[j], &step, &thetaQ1[j]); + theta_isogeny_eval(&thetaQ2[j], &step, &thetaQ2[j]); + assert(todo[j]); + --todo[j]; + } + + --current; + } + + assert(current == -1); + + if (!extra_torsion) { + if (n >= 3) { + // in the last step we've skipped pushing the kernel since current was == 0, let's do it now + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + } + + // penultimate step + theta_isogeny_compute_4(&step, &theta, &thetaQ1[0], &thetaQ2[0], 0, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + + // ultimate step + theta_isogeny_compute_2(&step, &theta, &thetaQ1[0], &thetaQ2[0], 1, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + } + + // final splitting step + theta_splitting_t last_step; + + bool is_split = splitting_compute(&last_step, &theta, extra_torsion ? 8 : -1, randomize); + + if (!is_split) { + debug_print("kernel did not generate an isogeny between elliptic products"); + return 0; + } + + if (!theta_product_structure_to_elliptic_product(E34, &last_step.B)) + return 0; + + // evaluate + for (size_t j = 0; j < numP; ++j) { + apply_isomorphism(&pts[j], &last_step.M, &pts[j]); + if (!theta_point_to_montgomery_point(&P12[j], &pts[j], &last_step.B)) + return 0; + } + + return 1; +} + +int +theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, false); +} + +// Like theta_chain_compute_and_eval, adding extra verification checks; +// used in the signature verification +int +theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, true, false); +} + +int +theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.h new file mode 100644 index 0000000000..d151811fe7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.h @@ -0,0 +1,18 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta isogeny header + */ + +#ifndef THETA_ISOGENY_H +#define THETA_ISOGENY_H + +#include +#include +#include +#include "theta_structure.h" +#include +#include + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_structure.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_structure.c new file mode 100644 index 0000000000..ce97ac61a8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_structure.c @@ -0,0 +1,78 @@ +#include "theta_structure.h" +#include + +void +theta_precomputation(theta_structure_t *A) +{ + + if (A->precomputation) { + return; + } + + theta_point_t A_dual; + to_squared_theta(&A_dual, &A->null_point); + + fp2_t t1, t2; + fp2_mul(&t1, &A_dual.x, &A_dual.y); + fp2_mul(&t2, &A_dual.z, &A_dual.t); + fp2_mul(&A->XYZ0, &t1, &A_dual.z); + fp2_mul(&A->XYT0, &t1, &A_dual.t); + fp2_mul(&A->YZT0, &t2, &A_dual.y); + fp2_mul(&A->XZT0, &t2, &A_dual.x); + + fp2_mul(&t1, &A->null_point.x, &A->null_point.y); + fp2_mul(&t2, &A->null_point.z, &A->null_point.t); + fp2_mul(&A->xyz0, &t1, &A->null_point.z); + fp2_mul(&A->xyt0, &t1, &A->null_point.t); + fp2_mul(&A->yzt0, &t2, &A->null_point.y); + fp2_mul(&A->xzt0, &t2, &A->null_point.x); + + A->precomputation = true; +} + +void +double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in) +{ + to_squared_theta(out, in); + fp2_sqr(&out->x, &out->x); + fp2_sqr(&out->y, &out->y); + fp2_sqr(&out->z, &out->z); + fp2_sqr(&out->t, &out->t); + + if (!A->precomputation) { + theta_precomputation(A); + } + fp2_mul(&out->x, &out->x, &A->YZT0); + fp2_mul(&out->y, &out->y, &A->XZT0); + fp2_mul(&out->z, &out->z, &A->XYT0); + fp2_mul(&out->t, &out->t, &A->XYZ0); + + hadamard(out, out); + + fp2_mul(&out->x, &out->x, &A->yzt0); + fp2_mul(&out->y, &out->y, &A->xzt0); + fp2_mul(&out->z, &out->z, &A->xyt0); + fp2_mul(&out->t, &out->t, &A->xyz0); +} + +void +double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp) +{ + if (exp == 0) { + *out = *in; + } else { + double_point(out, A, in); + for (int i = 1; i < exp; i++) { + double_point(out, A, out); + } + } +} + +uint32_t +is_product_theta_point(const theta_point_t *P) +{ + fp2_t t1, t2; + fp2_mul(&t1, &P->x, &P->t); + fp2_mul(&t2, &P->y, &P->z); + return fp2_is_equal(&t1, &t2); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_structure.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_structure.h new file mode 100644 index 0000000000..fc630b750a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_structure.h @@ -0,0 +1,135 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta structure header + */ + +#ifndef THETA_STRUCTURE_H +#define THETA_STRUCTURE_H + +#include +#include +#include + +/** @internal + * @ingroup hd_module + * @defgroup hd_theta Functions for theta structures + * @{ + */ + +/** + * @brief Perform the hadamard transform on a theta point + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x+y+z+t, x-y+z-t, x+y-z-t, x-y-z+t) + * + */ +static inline void +hadamard(theta_point_t *out, const theta_point_t *in) +{ + fp2_t t1, t2, t3, t4; + + // t1 = x + y + fp2_add(&t1, &in->x, &in->y); + // t2 = x - y + fp2_sub(&t2, &in->x, &in->y); + // t3 = z + t + fp2_add(&t3, &in->z, &in->t); + // t4 = z - t + fp2_sub(&t4, &in->z, &in->t); + + fp2_add(&out->x, &t1, &t3); + fp2_add(&out->y, &t2, &t4); + fp2_sub(&out->z, &t1, &t3); + fp2_sub(&out->t, &t2, &t4); +} + +/** + * @brief Square the coordinates of a theta point + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2, y^2, z^2, t^2) + * + */ +static inline void +pointwise_square(theta_point_t *out, const theta_point_t *in) +{ + fp2_sqr(&out->x, &in->x); + fp2_sqr(&out->y, &in->y); + fp2_sqr(&out->z, &in->z); + fp2_sqr(&out->t, &in->t); +} + +/** + * @brief Square the coordinates and then perform the hadamard transform + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2+y^2+z^2+t^2, x^2-y^2+z^2-t^2, x^2+y^2-z^2-t^2, x^2-y^2-z^2+t^2) + * + */ +static inline void +to_squared_theta(theta_point_t *out, const theta_point_t *in) +{ + pointwise_square(out, in); + hadamard(out, out); +} + +/** + * @brief Perform the theta structure precomputation + * + * @param A Output: the theta_structure + * + * if A.null_point = (x,y,z,t) + * if (xx,yy,zz,tt) = to_squared_theta(A.null_point) + * Computes y0,z0,t0,Y0,Z0,T0 = x/y,x/z,x/t,XX/YY,XX/ZZ,XX/TT + * + */ +void theta_precomputation(theta_structure_t *A); + +/** + * @brief Compute the double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * in = (x,y,z,t) + * out = [2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in); + +/** + * @brief Compute the iterated double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * @param exp the exponent + * in = (x,y,z,t) + * out = [2^2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp); + +/* + * @brief Check if a theta point is a product theta point + * + * @param P a theta point + * @return 0xFFFFFFFF if true, zero otherwise + */ +uint32_t is_product_theta_point(const theta_point_t *P); + +// end hd_theta +/** + * @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.c new file mode 100644 index 0000000000..242ea08fe2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.c @@ -0,0 +1,75 @@ +#include +#include + +static clock_t global_timer; + +clock_t +tic(void) +{ + global_timer = clock(); + return global_timer; +} + +float +tac(void) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); + return ms; +} + +float +TAC(const char *str) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); +#ifndef NDEBUG + printf("%s [%d ms]\n", str, (int)ms); +#endif + return ms; +} + +float +toc(const clock_t t) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + return ms; +} + +float +TOC(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,clock()-t); + // return (float) (clock()-t); +} + +float +TOC_clock(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, clock() - t); + return (float)(clock() - t); +} + +clock_t +dclock(const clock_t t) +{ + return (clock() - t); +} + +float +clock_to_time(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,t); + // return (float) (t); +} + +float +clock_print(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, t); + return (float)(t); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.h new file mode 100644 index 0000000000..5a6a505fc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.h @@ -0,0 +1,49 @@ + +#ifndef TOOLS_H +#define TOOLS_H + +#include + +// Debug printing: +// https://stackoverflow.com/questions/1644868/define-macro-for-debug-printing-in-c +#ifndef NDEBUG +#define DEBUG_PRINT 1 +#else +#define DEBUG_PRINT 0 +#endif + +#ifndef __FILE_NAME__ +#define __FILE_NAME__ "NA" +#endif + +#ifndef __LINE__ +#define __LINE__ 0 +#endif + +#ifndef __func__ +#define __func__ "NA" +#endif + +#define debug_print(fmt) \ + do { \ + if (DEBUG_PRINT) \ + printf("warning: %s, file %s, line %d, function %s().\n", \ + fmt, \ + __FILE_NAME__, \ + __LINE__, \ + __func__); \ + } while (0) + + +clock_t tic(void); +float tac(void); /* time in ms since last tic */ +float TAC(const char *str); /* same, but prints it with label 'str' */ +float toc(const clock_t t); /* time in ms since t */ +float TOC(const clock_t t, const char *str); /* same, but prints it with label 'str' */ +float TOC_clock(const clock_t t, const char *str); + +clock_t dclock(const clock_t t); // return the clock cycle diff between now and t +float clock_to_time(const clock_t t, + const char *str); // convert the number of clock cycles t to time +float clock_print(const clock_t t, const char *str); +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c new file mode 100644 index 0000000000..1a6c203035 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c @@ -0,0 +1,43 @@ +#include +#include +#include +const ibz_t TWO_TO_SECURITY_BITS = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t TORSION_PLUS_2POWER = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x100000000000000}}} +#endif +; +const ibz_t SEC_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t COM_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.h new file mode 100644 index 0000000000..f5e4e9fb66 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.h @@ -0,0 +1,6 @@ +#include +#define TORSION_2POWER_BYTES 48 +extern const ibz_t TWO_TO_SECURITY_BITS; +extern const ibz_t TORSION_PLUS_2POWER; +extern const ibz_t SEC_DEGREE; +extern const ibz_t COM_DEGREE; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tutil.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tutil.h new file mode 100644 index 0000000000..59f162093e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tutil.h @@ -0,0 +1,36 @@ +#ifndef TUTIL_H +#define TUTIL_H + +#include +#include + +#if defined(__GNUC__) || defined(__clang__) +#define BSWAP16(i) __builtin_bswap16((i)) +#define BSWAP32(i) __builtin_bswap32((i)) +#define BSWAP64(i) __builtin_bswap64((i)) +#define UNUSED __attribute__((unused)) +#else +#define BSWAP16(i) ((((i) >> 8) & 0xff) | (((i) & 0xff00) << 8)) +#define BSWAP32(i) \ + ((((i) >> 24) & 0xff) | (((i) >> 8) & 0xff00) | (((i) & 0xff00) << 8) | ((i) << 24)) +#define BSWAP64(i) ((BSWAP32((i) >> 32) & 0xffffffff) | (BSWAP32(i) << 32) +#define UNUSED +#endif + +#if defined(RADIX_64) +#define digit_t uint64_t +#define sdigit_t int64_t +#define RADIX 64 +#define LOG2RADIX 6 +#define BSWAP_DIGIT(i) BSWAP64(i) +#elif defined(RADIX_32) +#define digit_t uint32_t +#define sdigit_t int32_t +#define RADIX 32 +#define LOG2RADIX 5 +#define BSWAP_DIGIT(i) BSWAP32(i) +#else +#error "Radix must be 32bit or 64 bit" +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S new file mode 100644 index 0000000000..2311fa9bc8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S @@ -0,0 +1,122 @@ +#*************************************************************************** +# This implementation is a modified version of the code, +# written by Nir Drucker and Shay Gueron +# AWS Cryptographic Algorithms Group +# (ndrucker@amazon.com, gueron@amazon.com) +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# The license is detailed in the file LICENSE.txt, and applies to this file. +#*************************************************************************** + +.intel_syntax noprefix +.data + +.p2align 4, 0x90 +MASK1: +.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d +CON1: +.long 1,1,1,1 + +.set k256_size, 32 + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",@progbits +#endif +.text + +################################################################################ +# void aes256_key_expansion(OUT aes256_ks_t* ks, IN const uint8_t* key); +# The output parameter must be 16 bytes aligned! +# +#Linux ABI +#define out rdi +#define in rsi + +#define CON xmm0 +#define MASK_REG xmm1 + +#define IN0 xmm2 +#define IN1 xmm3 + +#define TMP1 xmm4 +#define TMP2 xmm5 + +#define ZERO xmm15 + +.macro ROUND1 in0 in1 + add out, k256_size + vpshufb TMP2, \in1, MASK_REG + aesenclast TMP2, CON + vpslld CON, CON, 1 + vpslldq TMP1, \in0, 4 + vpxor \in0, \in0, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor \in0, \in0, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor \in0, \in0, TMP1 + vpxor \in0, \in0, TMP2 + vmovdqa [out], \in0 + +.endm + +.macro ROUND2 + vpshufd TMP2, IN0, 0xff + aesenclast TMP2, ZERO + vpslldq TMP1, IN1, 4 + vpxor IN1, IN1, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor IN1, IN1, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor IN1, IN1, TMP1 + vpxor IN1, IN1, TMP2 + vmovdqa [out+16], IN1 +.endm + +#ifdef __APPLE__ +#define AES256_KEY_EXPANSION _aes256_key_expansion +#else +#define AES256_KEY_EXPANSION aes256_key_expansion +#endif + +#ifndef __APPLE__ +.type AES256_KEY_EXPANSION,@function +.hidden AES256_KEY_EXPANSION +#endif +.globl AES256_KEY_EXPANSION +AES256_KEY_EXPANSION: + vmovdqu IN0, [in] + vmovdqu IN1, [in+16] + vmovdqa [out], IN0 + vmovdqa [out+16], IN1 + + vmovdqa CON, [rip+CON1] + vmovdqa MASK_REG, [rip+MASK1] + + vpxor ZERO, ZERO, ZERO + + mov ax, 6 +.loop256: + + ROUND1 IN0, IN1 + dec ax + ROUND2 + jne .loop256 + + ROUND1 IN0, IN1 + + ret +#ifndef __APPLE__ +.size AES256_KEY_EXPANSION, .-AES256_KEY_EXPANSION +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/verification.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/verification.h new file mode 100644 index 0000000000..af674691da --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/verification.h @@ -0,0 +1,123 @@ +/** @file + * + * @brief The verification protocol + */ + +#ifndef VERIFICATION_H +#define VERIFICATION_H + +#include +#include + +/** @defgroup verification SQIsignHD verification protocol + * @{ + */ + +/** @defgroup verification_t Types for SQIsignHD verification protocol + * @{ + */ + +typedef digit_t scalar_t[NWORDS_ORDER]; +typedef scalar_t scalar_mtx_2x2_t[2][2]; + +/** @brief Type for the signature + * + * @typedef signature_t + * + * @struct signature + * + */ +typedef struct signature +{ + fp2_t E_aux_A; // the Montgomery A-coefficient for the auxiliary curve + uint8_t backtracking; + uint8_t two_resp_length; + scalar_mtx_2x2_t mat_Bchall_can_to_B_chall; // the matrix of the desired basis + scalar_t chall_coeff; + uint8_t hint_aux; + uint8_t hint_chall; +} signature_t; + +/** @brief Type for the public keys + * + * @typedef public_key_t + * + * @struct public_key + * + */ +typedef struct public_key +{ + ec_curve_t curve; // the normalized A-coefficient of the Montgomery curve + uint8_t hint_pk; +} public_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void public_key_init(public_key_t *pk); +void public_key_finalize(public_key_t *pk); + +void hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length); + +/** + * @brief Verification + * + * @param sig signature + * @param pk public key + * @param m message + * @param l size + * @returns 1 if the signature verifies, 0 otherwise + */ +int protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a signature as a byte array + * + * @param enc : Byte array to encode the signature in + * @param sig : Signature to encode + */ +void signature_to_bytes(unsigned char *enc, const signature_t *sig); + +/** + * @brief Decodes a signature from a byte array + * + * @param sig : Structure to decode the signature in + * @param enc : Byte array to decode + */ +void signature_from_bytes(signature_t *sig, const unsigned char *enc); + +/** + * @brief Encodes a public key as a byte array + * + * @param enc : Byte array to encode the public key in + * @param pk : Public key to encode + */ +unsigned char *public_key_to_bytes(unsigned char *enc, const public_key_t *pk); + +/** + * @brief Decodes a public key from a byte array + * + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +const unsigned char *public_key_from_bytes(public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/verify.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/verify.c new file mode 100644 index 0000000000..b5f78ad398 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/verify.c @@ -0,0 +1,309 @@ +#include +#include +#include +#include +#include + +// Check that the basis change matrix elements are canonical +// representatives modulo 2^(SQIsign_response_length + 2). +static int +check_canonical_basis_change_matrix(const signature_t *sig) +{ + // This works as long as all values in sig->mat_Bchall_can_to_B_chall are + // positive integers. + int ret = 1; + scalar_t aux; + + memset(aux, 0, NWORDS_ORDER * sizeof(digit_t)); + aux[0] = 0x1; + multiple_mp_shiftl(aux, SQIsign_response_length + HD_extra_torsion - (int)sig->backtracking, NWORDS_ORDER); + + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + if (mp_compare(aux, sig->mat_Bchall_can_to_B_chall[i][j], NWORDS_ORDER) <= 0) { + ret = 0; + } + } + } + + return ret; +} + +// Compute the 2^n isogeny from the signature with kernel +// P + [chall_coeff]Q and store the codomain in E_chall +static int +compute_challenge_verify(ec_curve_t *E_chall, const signature_t *sig, const ec_curve_t *Epk, const uint8_t hint_pk) +{ + ec_basis_t bas_EA; + ec_isog_even_t phi_chall; + + // Set domain and length of 2^n isogeny + copy_curve(&phi_chall.curve, Epk); + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + + // Compute the basis from the supplied hint + if (!ec_curve_to_basis_2f_from_hint(&bas_EA, &phi_chall.curve, TORSION_EVEN_POWER, hint_pk)) // canonical + return 0; + + // recovering the exact challenge + { + if (!ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_EA.P, &bas_EA.Q, &bas_EA.PmQ, &phi_chall.curve)) { + return 0; + }; + } + + // Double the kernel until is has the correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &phi_chall.curve); + + // Compute the codomain + copy_curve(E_chall, &phi_chall.curve); + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + return 1; +} + +// same as matrix_application_even_basis() in id2iso.c, with some modifications: +// - this version works with a matrix of scalars (not ibz_t). +// - reduction modulo 2^f of matrix elements is removed here, because it is +// assumed that the elements are already cannonical representatives modulo +// 2^f; this is ensured by calling check_canonical_basis_change_matrix() at +// the beginning of protocols_verify(). +static int +matrix_scalar_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, scalar_mtx_2x2_t *mat, int f) +{ + scalar_t scalar0, scalar1; + memset(scalar0, 0, NWORDS_ORDER * sizeof(digit_t)); + memset(scalar1, 0, NWORDS_ORDER * sizeof(digit_t)); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + if (!ec_biscalar_mul(&bas->P, (*mat)[0][0], (*mat)[1][0], f, &tmp_bas, E)) + return 0; + // second basis element S = [c]P + [d]Q + if (!ec_biscalar_mul(&bas->Q, (*mat)[0][1], (*mat)[1][1], f, &tmp_bas, E)) + return 0; + // Their difference R - S = [a - c]P + [b - d]Q + mp_sub(scalar0, (*mat)[0][0], (*mat)[0][1], NWORDS_ORDER); + mp_mod_2exp(scalar0, f, NWORDS_ORDER); + mp_sub(scalar1, (*mat)[1][0], (*mat)[1][1], NWORDS_ORDER); + mp_mod_2exp(scalar1, f, NWORDS_ORDER); + return ec_biscalar_mul(&bas->PmQ, scalar0, scalar1, f, &tmp_bas, E); +} + +// Compute the bases for the challenge and auxillary curve from +// the canonical bases. Challenge basis is reconstructed from the +// compressed scalars within the challenge. +static int +challenge_and_aux_basis_verify(ec_basis_t *B_chall_can, + ec_basis_t *B_aux_can, + ec_curve_t *E_chall, + ec_curve_t *E_aux, + signature_t *sig, + const int pow_dim2_deg_resp) +{ + + // recovering the canonical basis as TORSION_EVEN_POWER for consistency with signing + if (!ec_curve_to_basis_2f_from_hint(B_chall_can, E_chall, TORSION_EVEN_POWER, sig->hint_chall)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_chall_can, + TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion - sig->two_resp_length, + B_chall_can, + E_chall); + + if (!ec_curve_to_basis_2f_from_hint(B_aux_can, E_aux, TORSION_EVEN_POWER, sig->hint_aux)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_aux_can, TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion, B_aux_can, E_aux); + +#ifndef NDEBUG + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp + sig->two_resp_length)) + debug_print("canonical basis has wrong order, expect something to fail"); +#endif + + // applying the change matrix on the basis of E_chall + return matrix_scalar_application_even_basis(B_chall_can, + E_chall, + &sig->mat_Bchall_can_to_B_chall, + pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length); +} + +// When two_resp_length is non-zero, we must compute a small 2^n-isogeny +// updating E_chall as the codomain as well as push the basis on E_chall +// through this isogeny +static int +two_response_isogeny_verify(ec_curve_t *E_chall, ec_basis_t *B_chall_can, const signature_t *sig, int pow_dim2_deg_resp) +{ + ec_point_t ker, points[3]; + + // choosing the right point for the small two_isogenies + if (mp_is_even(sig->mat_Bchall_can_to_B_chall[0][0], NWORDS_ORDER) && + mp_is_even(sig->mat_Bchall_can_to_B_chall[1][0], NWORDS_ORDER)) { + copy_point(&ker, &B_chall_can->Q); + } else { + copy_point(&ker, &B_chall_can->P); + } + + copy_point(&points[0], &B_chall_can->P); + copy_point(&points[1], &B_chall_can->Q); + copy_point(&points[2], &B_chall_can->PmQ); + + ec_dbl_iter(&ker, pow_dim2_deg_resp + HD_extra_torsion, &ker, E_chall); + +#ifndef NDEBUG + if (!test_point_order_twof(&ker, E_chall, sig->two_resp_length)) + debug_print("kernel does not have order 2^(two_resp_length"); +#endif + + if (ec_eval_small_chain(E_chall, &ker, sig->two_resp_length, points, 3, false)) { + return 0; + } + +#ifndef NDEBUG + if (!test_point_order_twof(&points[0], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[0] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[1], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[1] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[2], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[2] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + copy_point(&B_chall_can->P, &points[0]); + copy_point(&B_chall_can->Q, &points[1]); + copy_point(&B_chall_can->PmQ, &points[2]); + return 1; +} + +// The commitment curve can be recovered from the codomain of the 2D +// isogeny built from the bases computed during verification. +static int +compute_commitment_curve_verify(ec_curve_t *E_com, + const ec_basis_t *B_chall_can, + const ec_basis_t *B_aux_can, + const ec_curve_t *E_chall, + const ec_curve_t *E_aux, + int pow_dim2_deg_resp) + +{ +#ifndef NDEBUG + // Check all the points are the correct order + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_chall_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + + if (!test_basis_order_twof(B_aux_can, E_aux, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_aux_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + // now compute the dim2 isogeny from Echall x E_aux -> E_com x E_aux' + // of kernel B_chall_can x B_aux_can + + // first we set-up the kernel + theta_couple_curve_t EchallxEaux; + copy_curve(&EchallxEaux.E1, E_chall); + copy_curve(&EchallxEaux.E2, E_aux); + + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, B_chall_can, B_aux_can); + + // computing the isogeny + theta_couple_curve_t codomain; + int codomain_splits; + ec_curve_init(&codomain.E1); + ec_curve_init(&codomain.E2); + // handling the special case where we don't need to perform any dim2 computation + if (pow_dim2_deg_resp == 0) { + codomain_splits = 1; + copy_curve(&codomain.E1, &EchallxEaux.E1); + copy_curve(&codomain.E2, &EchallxEaux.E2); + // We still need to check that E_chall is supersingular + // This assumes that HD_extra_torsion == 2 + if (!ec_is_basis_four_torsion(B_chall_can, E_chall)) { + return 0; + } + } else { + codomain_splits = theta_chain_compute_and_eval_verify( + pow_dim2_deg_resp, &EchallxEaux, &dim_two_ker, true, &codomain, NULL, 0); + } + + // computing the commitment curve + // its always the first one because of our (2^n,2^n)-isogeny formulae + copy_curve(E_com, &codomain.E1); + + return codomain_splits; +} + +// SQIsign verification +int +protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l) +{ + int verify; + + if (!check_canonical_basis_change_matrix(sig)) + return 0; + + // Computation of the length of the dim 2 2^n isogeny + int pow_dim2_deg_resp = SQIsign_response_length - (int)sig->two_resp_length - (int)sig->backtracking; + + // basic sanity test: checking that the response is not too long + if (pow_dim2_deg_resp < 0) + return 0; + // The dim 2 isogeny embeds a dim 1 isogeny of odd degree, so it can + // never be of length 2. + if (pow_dim2_deg_resp == 1) + return 0; + + // check the public curve is valid + if (!ec_curve_verify_A(&(pk->curve).A)) + return 0; + + // Set auxiliary curve from the A-coefficient within the signature + ec_curve_t E_aux; + if (!ec_curve_init_from_A(&E_aux, &sig->E_aux_A)) + return 0; // invalid curve + + // checking that we are given A-coefficients and no precomputation + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF && !pk->curve.is_A24_computed_and_normalized); + + // computation of the challenge + ec_curve_t E_chall; + if (!compute_challenge_verify(&E_chall, sig, &pk->curve, pk->hint_pk)) { + return 0; + } + + // Computation of the canonical bases for the challenge and aux curve + ec_basis_t B_chall_can, B_aux_can; + + if (!challenge_and_aux_basis_verify(&B_chall_can, &B_aux_can, &E_chall, &E_aux, sig, pow_dim2_deg_resp)) { + return 0; + } + + // When two_resp_length != 0 we need to compute a second, short 2^r-isogeny + if (sig->two_resp_length > 0) { + if (!two_response_isogeny_verify(&E_chall, &B_chall_can, sig, pow_dim2_deg_resp)) { + return 0; + } + } + + // We can recover the commitment curve with a 2D isogeny + // The supplied signature did not compute an isogeny between eliptic products + // and so definitely is an invalid signature. + ec_curve_t E_com; + if (!compute_commitment_curve_verify(&E_com, &B_chall_can, &B_aux_can, &E_chall, &E_aux, pow_dim2_deg_resp)) + return 0; + + scalar_t chk_chall; + + // recomputing the challenge vector + hash_to_challenge(&chk_chall, pk, &E_com, m, l); + + // performing the final check + verify = mp_compare(sig->chall_coeff, chk_chall, NWORDS_ORDER) == 0; + + return verify; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/xeval.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/xeval.c new file mode 100644 index 0000000000..7fc7170423 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/xeval.c @@ -0,0 +1,64 @@ +#include "isog.h" +#include "ec.h" +#include + +// ----------------------------------------------------------------------------------------- +// ----------------------------------------------------------------------------------------- + +// Degree-2 isogeny evaluation with kenerl generated by P != (0, 0) +void +xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1, t2; + for (int j = 0; j < lenQ; j++) { + fp2_add(&t0, &Q[j].x, &Q[j].z); + fp2_sub(&t1, &Q[j].x, &Q[j].z); + fp2_mul(&t2, &kps->K.x, &t1); + fp2_mul(&t1, &kps->K.z, &t0); + fp2_add(&t0, &t2, &t1); + fp2_sub(&t1, &t2, &t1); + fp2_mul(&R[j].x, &Q[j].x, &t0); + fp2_mul(&R[j].z, &Q[j].z, &t1); + } +} + +void +xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1; + for (int i = 0; i < lenQ; i++) { + fp2_mul(&t0, &Q[i].x, &Q[i].z); + fp2_mul(&t1, &kps->K.x, &Q[i].z); + fp2_add(&t1, &t1, &Q[i].x); + fp2_mul(&t1, &t1, &Q[i].x); + fp2_sqr(&R[i].x, &Q[i].z); + fp2_add(&R[i].x, &R[i].x, &t1); + fp2_mul(&R[i].z, &t0, &kps->K.z); + } +} + +// Degree-4 isogeny evaluation with kenerl generated by P such that [2]P != (0, 0) +void +xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps) +{ + const ec_point_t *K = kps->K; + + fp2_t t0, t1; + + for (int i = 0; i < lenQ; i++) { + fp2_add(&t0, &Q[i].x, &Q[i].z); + fp2_sub(&t1, &Q[i].x, &Q[i].z); + fp2_mul(&(R[i].x), &t0, &K[1].x); + fp2_mul(&(R[i].z), &t1, &K[2].x); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &K[0].x); + fp2_add(&t1, &(R[i].x), &(R[i].z)); + fp2_sub(&(R[i].z), &(R[i].x), &(R[i].z)); + fp2_sqr(&t1, &t1); + fp2_sqr(&(R[i].z), &(R[i].z)); + fp2_add(&(R[i].x), &t0, &t1); + fp2_sub(&t0, &t0, &(R[i].z)); + fp2_mul(&(R[i].x), &(R[i].x), &t1); + fp2_mul(&(R[i].z), &(R[i].z), &t0); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/xisog.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/xisog.c new file mode 100644 index 0000000000..7242d29433 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/xisog.c @@ -0,0 +1,61 @@ +#include "isog.h" +#include "ec.h" +#include + +// ------------------------------------------------------------------------- +// ------------------------------------------------------------------------- + +// Degree-2 isogeny with kernel generated by P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P) +{ + fp2_sqr(&B->x, &P.x); + fp2_sqr(&B->z, &P.z); + fp2_sub(&B->x, &B->z, &B->x); + fp2_add(&kps->K.x, &P.x, &P.z); + fp2_sub(&kps->K.z, &P.x, &P.z); +} + +void +xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24) +{ + // No need to check the square root, only used for signing. + fp2_t t0, four; + fp2_set_small(&four, 4); + fp2_add(&t0, &A24.x, &A24.x); + fp2_sub(&t0, &t0, &A24.z); + fp2_add(&t0, &t0, &t0); + fp2_inv(&A24.z); + fp2_mul(&t0, &t0, &A24.z); + fp2_copy(&kps->K.x, &t0); + fp2_add(&B24->x, &t0, &t0); + fp2_sqr(&t0, &t0); + fp2_sub(&t0, &t0, &four); + fp2_sqrt(&t0); + fp2_neg(&kps->K.z, &t0); + fp2_add(&B24->z, &t0, &t0); + fp2_add(&B24->x, &B24->x, &B24->z); + fp2_add(&B24->z, &B24->z, &B24->z); +} + +// Degree-4 isogeny with kernel generated by P such that [2]P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P) +{ + ec_point_t *K = kps->K; + + fp2_sqr(&K[0].x, &P.x); + fp2_sqr(&K[0].z, &P.z); + fp2_add(&K[1].x, &K[0].z, &K[0].x); + fp2_sub(&K[1].z, &K[0].z, &K[0].x); + fp2_mul(&B->x, &K[1].x, &K[1].z); + fp2_sqr(&B->z, &K[0].z); + + // Constants for xeval_4 + fp2_add(&K[2].x, &P.x, &P.z); + fp2_sub(&K[1].x, &P.x, &P.z); + fp2_add(&K[0].x, &K[0].z, &K[0].z); + fp2_add(&K[0].x, &K[0].x, &K[0].x); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/COPYING.LGPL new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/COPYING.LGPL @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/LICENSE b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/LICENSE new file mode 100644 index 0000000000..8f71f43fee --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/NOTICE b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/NOTICE new file mode 100644 index 0000000000..6eccf392fa --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/NOTICE @@ -0,0 +1,21 @@ +Copyright 2023-2025 the SQIsign team. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +The DPE Library is (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, +LORIA/INRIA, and licensed under the GNU Lesser General Public License, +version 3. You may obtain a copy of the License at + + https://www.gnu.org/licenses/lgpl-3.0.en.html + +or in the file COPYING.LGPL. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes.h new file mode 100644 index 0000000000..e35ec3705b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef AES_H +#define AES_H + +#include +#include + +void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); +#define AES_ECB_encrypt AES_256_ECB + +#ifdef ENABLE_AESNI +int AES_128_CTR_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +int AES_128_CTR_4R_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#define AES_128_CTR AES_128_CTR_NI +#else +int AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes_c.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes_c.c new file mode 100644 index 0000000000..5e2d7d6161 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes_c.c @@ -0,0 +1,783 @@ +// SPDX-License-Identifier: MIT and Apache-2.0 + +/* + * AES implementation based on code from PQClean, + * which is in turn based on BearSSL (https://bearssl.org/) + * by Thomas Pornin. + * + * + * Copyright (c) 2016 Thomas Pornin + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#define AES128_KEYBYTES 16 +#define AES192_KEYBYTES 24 +#define AES256_KEYBYTES 32 +#define AESCTR_NONCEBYTES 12 +#define AES_BLOCKBYTES 16 + +#define PQC_AES128_STATESIZE 88 +typedef struct +{ + uint64_t sk_exp[PQC_AES128_STATESIZE]; +} aes128ctx; + +#define PQC_AES192_STATESIZE 104 +typedef struct +{ + uint64_t sk_exp[PQC_AES192_STATESIZE]; +} aes192ctx; + +#define PQC_AES256_STATESIZE 120 +typedef struct +{ + uint64_t sk_exp[PQC_AES256_STATESIZE]; +} aes256ctx; + +/** Initializes the context **/ +void aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key); + +void aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key); + +void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx); + +void aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx); + +/** Frees the context **/ +void aes128_ctx_release(aes128ctx *r); + +/** Initializes the context **/ +void aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key); + +void aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key); + +void aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx); + +void aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx); + +void aes192_ctx_release(aes192ctx *r); + +/** Initializes the context **/ +void aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key); + +void aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key); + +void aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx); + +void aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx); + +/** Frees the context **/ +void aes256_ctx_release(aes256ctx *r); + +static inline uint32_t +br_dec32le(const unsigned char *src) +{ + return (uint32_t)src[0] | ((uint32_t)src[1] << 8) | ((uint32_t)src[2] << 16) | + ((uint32_t)src[3] << 24); +} + +static void +br_range_dec32le(uint32_t *v, size_t num, const unsigned char *src) +{ + while (num-- > 0) { + *v++ = br_dec32le(src); + src += 4; + } +} + +static inline uint32_t +br_swap32(uint32_t x) +{ + x = ((x & (uint32_t)0x00FF00FF) << 8) | ((x >> 8) & (uint32_t)0x00FF00FF); + return (x << 16) | (x >> 16); +} + +static inline void +br_enc32le(unsigned char *dst, uint32_t x) +{ + dst[0] = (unsigned char)x; + dst[1] = (unsigned char)(x >> 8); + dst[2] = (unsigned char)(x >> 16); + dst[3] = (unsigned char)(x >> 24); +} + +static void +br_range_enc32le(unsigned char *dst, const uint32_t *v, size_t num) +{ + while (num-- > 0) { + br_enc32le(dst, *v++); + dst += 4; + } +} + +static void +br_aes_ct64_bitslice_Sbox(uint64_t *q) +{ + /* + * This S-box implementation is a straightforward translation of + * the circuit described by Boyar and Peralta in "A new + * combinational logic minimization technique with applications + * to cryptology" (https://eprint.iacr.org/2009/191.pdf). + * + * Note that variables x* (input) and s* (output) are numbered + * in "reverse" order (x0 is the high bit, x7 is the low bit). + */ + + uint64_t x0, x1, x2, x3, x4, x5, x6, x7; + uint64_t y1, y2, y3, y4, y5, y6, y7, y8, y9; + uint64_t y10, y11, y12, y13, y14, y15, y16, y17, y18, y19; + uint64_t y20, y21; + uint64_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9; + uint64_t z10, z11, z12, z13, z14, z15, z16, z17; + uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; + uint64_t t10, t11, t12, t13, t14, t15, t16, t17, t18, t19; + uint64_t t20, t21, t22, t23, t24, t25, t26, t27, t28, t29; + uint64_t t30, t31, t32, t33, t34, t35, t36, t37, t38, t39; + uint64_t t40, t41, t42, t43, t44, t45, t46, t47, t48, t49; + uint64_t t50, t51, t52, t53, t54, t55, t56, t57, t58, t59; + uint64_t t60, t61, t62, t63, t64, t65, t66, t67; + uint64_t s0, s1, s2, s3, s4, s5, s6, s7; + + x0 = q[7]; + x1 = q[6]; + x2 = q[5]; + x3 = q[4]; + x4 = q[3]; + x5 = q[2]; + x6 = q[1]; + x7 = q[0]; + + /* + * Top linear transformation. + */ + y14 = x3 ^ x5; + y13 = x0 ^ x6; + y9 = x0 ^ x3; + y8 = x0 ^ x5; + t0 = x1 ^ x2; + y1 = t0 ^ x7; + y4 = y1 ^ x3; + y12 = y13 ^ y14; + y2 = y1 ^ x0; + y5 = y1 ^ x6; + y3 = y5 ^ y8; + t1 = x4 ^ y12; + y15 = t1 ^ x5; + y20 = t1 ^ x1; + y6 = y15 ^ x7; + y10 = y15 ^ t0; + y11 = y20 ^ y9; + y7 = x7 ^ y11; + y17 = y10 ^ y11; + y19 = y10 ^ y8; + y16 = t0 ^ y11; + y21 = y13 ^ y16; + y18 = x0 ^ y16; + + /* + * Non-linear section. + */ + t2 = y12 & y15; + t3 = y3 & y6; + t4 = t3 ^ t2; + t5 = y4 & x7; + t6 = t5 ^ t2; + t7 = y13 & y16; + t8 = y5 & y1; + t9 = t8 ^ t7; + t10 = y2 & y7; + t11 = t10 ^ t7; + t12 = y9 & y11; + t13 = y14 & y17; + t14 = t13 ^ t12; + t15 = y8 & y10; + t16 = t15 ^ t12; + t17 = t4 ^ t14; + t18 = t6 ^ t16; + t19 = t9 ^ t14; + t20 = t11 ^ t16; + t21 = t17 ^ y20; + t22 = t18 ^ y19; + t23 = t19 ^ y21; + t24 = t20 ^ y18; + + t25 = t21 ^ t22; + t26 = t21 & t23; + t27 = t24 ^ t26; + t28 = t25 & t27; + t29 = t28 ^ t22; + t30 = t23 ^ t24; + t31 = t22 ^ t26; + t32 = t31 & t30; + t33 = t32 ^ t24; + t34 = t23 ^ t33; + t35 = t27 ^ t33; + t36 = t24 & t35; + t37 = t36 ^ t34; + t38 = t27 ^ t36; + t39 = t29 & t38; + t40 = t25 ^ t39; + + t41 = t40 ^ t37; + t42 = t29 ^ t33; + t43 = t29 ^ t40; + t44 = t33 ^ t37; + t45 = t42 ^ t41; + z0 = t44 & y15; + z1 = t37 & y6; + z2 = t33 & x7; + z3 = t43 & y16; + z4 = t40 & y1; + z5 = t29 & y7; + z6 = t42 & y11; + z7 = t45 & y17; + z8 = t41 & y10; + z9 = t44 & y12; + z10 = t37 & y3; + z11 = t33 & y4; + z12 = t43 & y13; + z13 = t40 & y5; + z14 = t29 & y2; + z15 = t42 & y9; + z16 = t45 & y14; + z17 = t41 & y8; + + /* + * Bottom linear transformation. + */ + t46 = z15 ^ z16; + t47 = z10 ^ z11; + t48 = z5 ^ z13; + t49 = z9 ^ z10; + t50 = z2 ^ z12; + t51 = z2 ^ z5; + t52 = z7 ^ z8; + t53 = z0 ^ z3; + t54 = z6 ^ z7; + t55 = z16 ^ z17; + t56 = z12 ^ t48; + t57 = t50 ^ t53; + t58 = z4 ^ t46; + t59 = z3 ^ t54; + t60 = t46 ^ t57; + t61 = z14 ^ t57; + t62 = t52 ^ t58; + t63 = t49 ^ t58; + t64 = z4 ^ t59; + t65 = t61 ^ t62; + t66 = z1 ^ t63; + s0 = t59 ^ t63; + s6 = t56 ^ ~t62; + s7 = t48 ^ ~t60; + t67 = t64 ^ t65; + s3 = t53 ^ t66; + s4 = t51 ^ t66; + s5 = t47 ^ t65; + s1 = t64 ^ ~s3; + s2 = t55 ^ ~t67; + + q[7] = s0; + q[6] = s1; + q[5] = s2; + q[4] = s3; + q[3] = s4; + q[2] = s5; + q[1] = s6; + q[0] = s7; +} + +static void +br_aes_ct64_ortho(uint64_t *q) +{ +#define SWAPN(cl, ch, s, x, y) \ + do { \ + uint64_t a, b; \ + a = (x); \ + b = (y); \ + (x) = (a & (uint64_t)(cl)) | ((b & (uint64_t)(cl)) << (s)); \ + (y) = ((a & (uint64_t)(ch)) >> (s)) | (b & (uint64_t)(ch)); \ + } while (0) + +#define SWAP2(x, y) SWAPN(0x5555555555555555, 0xAAAAAAAAAAAAAAAA, 1, x, y) +#define SWAP4(x, y) SWAPN(0x3333333333333333, 0xCCCCCCCCCCCCCCCC, 2, x, y) +#define SWAP8(x, y) SWAPN(0x0F0F0F0F0F0F0F0F, 0xF0F0F0F0F0F0F0F0, 4, x, y) + + SWAP2(q[0], q[1]); + SWAP2(q[2], q[3]); + SWAP2(q[4], q[5]); + SWAP2(q[6], q[7]); + + SWAP4(q[0], q[2]); + SWAP4(q[1], q[3]); + SWAP4(q[4], q[6]); + SWAP4(q[5], q[7]); + + SWAP8(q[0], q[4]); + SWAP8(q[1], q[5]); + SWAP8(q[2], q[6]); + SWAP8(q[3], q[7]); +} + +static void +br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t *w) +{ + uint64_t x0, x1, x2, x3; + + x0 = w[0]; + x1 = w[1]; + x2 = w[2]; + x3 = w[3]; + x0 |= (x0 << 16); + x1 |= (x1 << 16); + x2 |= (x2 << 16); + x3 |= (x3 << 16); + x0 &= (uint64_t)0x0000FFFF0000FFFF; + x1 &= (uint64_t)0x0000FFFF0000FFFF; + x2 &= (uint64_t)0x0000FFFF0000FFFF; + x3 &= (uint64_t)0x0000FFFF0000FFFF; + x0 |= (x0 << 8); + x1 |= (x1 << 8); + x2 |= (x2 << 8); + x3 |= (x3 << 8); + x0 &= (uint64_t)0x00FF00FF00FF00FF; + x1 &= (uint64_t)0x00FF00FF00FF00FF; + x2 &= (uint64_t)0x00FF00FF00FF00FF; + x3 &= (uint64_t)0x00FF00FF00FF00FF; + *q0 = x0 | (x2 << 8); + *q1 = x1 | (x3 << 8); +} + +static void +br_aes_ct64_interleave_out(uint32_t *w, uint64_t q0, uint64_t q1) +{ + uint64_t x0, x1, x2, x3; + + x0 = q0 & (uint64_t)0x00FF00FF00FF00FF; + x1 = q1 & (uint64_t)0x00FF00FF00FF00FF; + x2 = (q0 >> 8) & (uint64_t)0x00FF00FF00FF00FF; + x3 = (q1 >> 8) & (uint64_t)0x00FF00FF00FF00FF; + x0 |= (x0 >> 8); + x1 |= (x1 >> 8); + x2 |= (x2 >> 8); + x3 |= (x3 >> 8); + x0 &= (uint64_t)0x0000FFFF0000FFFF; + x1 &= (uint64_t)0x0000FFFF0000FFFF; + x2 &= (uint64_t)0x0000FFFF0000FFFF; + x3 &= (uint64_t)0x0000FFFF0000FFFF; + w[0] = (uint32_t)x0 | (uint32_t)(x0 >> 16); + w[1] = (uint32_t)x1 | (uint32_t)(x1 >> 16); + w[2] = (uint32_t)x2 | (uint32_t)(x2 >> 16); + w[3] = (uint32_t)x3 | (uint32_t)(x3 >> 16); +} + +static const unsigned char Rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36 }; + +static uint32_t +sub_word(uint32_t x) +{ + uint64_t q[8]; + + memset(q, 0, sizeof q); + q[0] = x; + br_aes_ct64_ortho(q); + br_aes_ct64_bitslice_Sbox(q); + br_aes_ct64_ortho(q); + return (uint32_t)q[0]; +} + +static void +br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, unsigned int key_len) +{ + unsigned int i, j, k, nk, nkf; + uint32_t tmp; + uint32_t skey[60]; + unsigned nrounds = 10 + ((key_len - 16) >> 2); + + nk = (key_len >> 2); + nkf = ((nrounds + 1) << 2); + br_range_dec32le(skey, (key_len >> 2), key); + tmp = skey[(key_len >> 2) - 1]; + for (i = nk, j = 0, k = 0; i < nkf; i++) { + if (j == 0) { + tmp = (tmp << 24) | (tmp >> 8); + tmp = sub_word(tmp) ^ Rcon[k]; + } else if (nk > 6 && j == 4) { + tmp = sub_word(tmp); + } + tmp ^= skey[i - nk]; + skey[i] = tmp; + if (++j == nk) { + j = 0; + k++; + } + } + + for (i = 0, j = 0; i < nkf; i += 4, j += 2) { + uint64_t q[8]; + + br_aes_ct64_interleave_in(&q[0], &q[4], skey + i); + q[1] = q[0]; + q[2] = q[0]; + q[3] = q[0]; + q[5] = q[4]; + q[6] = q[4]; + q[7] = q[4]; + br_aes_ct64_ortho(q); + comp_skey[j + 0] = + (q[0] & (uint64_t)0x1111111111111111) | (q[1] & (uint64_t)0x2222222222222222) | + (q[2] & (uint64_t)0x4444444444444444) | (q[3] & (uint64_t)0x8888888888888888); + comp_skey[j + 1] = + (q[4] & (uint64_t)0x1111111111111111) | (q[5] & (uint64_t)0x2222222222222222) | + (q[6] & (uint64_t)0x4444444444444444) | (q[7] & (uint64_t)0x8888888888888888); + } +} + +static void +br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, unsigned int nrounds) +{ + unsigned u, v, n; + + n = (nrounds + 1) << 1; + for (u = 0, v = 0; u < n; u++, v += 4) { + uint64_t x0, x1, x2, x3; + + x0 = x1 = x2 = x3 = comp_skey[u]; + x0 &= (uint64_t)0x1111111111111111; + x1 &= (uint64_t)0x2222222222222222; + x2 &= (uint64_t)0x4444444444444444; + x3 &= (uint64_t)0x8888888888888888; + x1 >>= 1; + x2 >>= 2; + x3 >>= 3; + skey[v + 0] = (x0 << 4) - x0; + skey[v + 1] = (x1 << 4) - x1; + skey[v + 2] = (x2 << 4) - x2; + skey[v + 3] = (x3 << 4) - x3; + } +} + +static inline void +add_round_key(uint64_t *q, const uint64_t *sk) +{ + q[0] ^= sk[0]; + q[1] ^= sk[1]; + q[2] ^= sk[2]; + q[3] ^= sk[3]; + q[4] ^= sk[4]; + q[5] ^= sk[5]; + q[6] ^= sk[6]; + q[7] ^= sk[7]; +} + +static inline void +shift_rows(uint64_t *q) +{ + int i; + + for (i = 0; i < 8; i++) { + uint64_t x; + + x = q[i]; + q[i] = + (x & (uint64_t)0x000000000000FFFF) | ((x & (uint64_t)0x00000000FFF00000) >> 4) | + ((x & (uint64_t)0x00000000000F0000) << 12) | ((x & (uint64_t)0x0000FF0000000000) >> 8) | + ((x & (uint64_t)0x000000FF00000000) << 8) | ((x & (uint64_t)0xF000000000000000) >> 12) | + ((x & (uint64_t)0x0FFF000000000000) << 4); + } +} + +static inline uint64_t +rotr32(uint64_t x) +{ + return (x << 32) | (x >> 32); +} + +static inline void +mix_columns(uint64_t *q) +{ + uint64_t q0, q1, q2, q3, q4, q5, q6, q7; + uint64_t r0, r1, r2, r3, r4, r5, r6, r7; + + q0 = q[0]; + q1 = q[1]; + q2 = q[2]; + q3 = q[3]; + q4 = q[4]; + q5 = q[5]; + q6 = q[6]; + q7 = q[7]; + r0 = (q0 >> 16) | (q0 << 48); + r1 = (q1 >> 16) | (q1 << 48); + r2 = (q2 >> 16) | (q2 << 48); + r3 = (q3 >> 16) | (q3 << 48); + r4 = (q4 >> 16) | (q4 << 48); + r5 = (q5 >> 16) | (q5 << 48); + r6 = (q6 >> 16) | (q6 << 48); + r7 = (q7 >> 16) | (q7 << 48); + + q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0); + q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1); + q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2); + q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3); + q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4); + q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5); + q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6); + q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7); +} + +static void +inc4_be(uint32_t *x) +{ + uint32_t t = br_swap32(*x) + 4; + *x = br_swap32(t); +} + +static void +aes_ecb4x(unsigned char out[64], + const uint32_t ivw[16], + const uint64_t *sk_exp, + unsigned int nrounds) +{ + uint32_t w[16]; + uint64_t q[8]; + unsigned int i; + + memcpy(w, ivw, sizeof(w)); + for (i = 0; i < 4; i++) { + br_aes_ct64_interleave_in(&q[i], &q[i + 4], w + (i << 2)); + } + br_aes_ct64_ortho(q); + + add_round_key(q, sk_exp); + for (i = 1; i < nrounds; i++) { + br_aes_ct64_bitslice_Sbox(q); + shift_rows(q); + mix_columns(q); + add_round_key(q, sk_exp + (i << 3)); + } + br_aes_ct64_bitslice_Sbox(q); + shift_rows(q); + add_round_key(q, sk_exp + 8 * nrounds); + + br_aes_ct64_ortho(q); + for (i = 0; i < 4; i++) { + br_aes_ct64_interleave_out(w + (i << 2), q[i], q[i + 4]); + } + br_range_enc32le(out, w, 16); +} + +static void +aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) +{ + aes_ecb4x(out, ivw, sk_exp, nrounds); + + /* Increase counter for next 4 blocks */ + inc4_be(ivw + 3); + inc4_be(ivw + 7); + inc4_be(ivw + 11); + inc4_be(ivw + 15); +} + +static void +aes_ecb(unsigned char *out, + const unsigned char *in, + size_t nblocks, + const uint64_t *rkeys, + unsigned int nrounds) +{ + uint32_t blocks[16]; + unsigned char t[64]; + + while (nblocks >= 4) { + br_range_dec32le(blocks, 16, in); + aes_ecb4x(out, blocks, rkeys, nrounds); + nblocks -= 4; + in += 64; + out += 64; + } + + if (nblocks) { + br_range_dec32le(blocks, nblocks * 4, in); + aes_ecb4x(t, blocks, rkeys, nrounds); + memcpy(out, t, nblocks * 16); + } +} + +static void +aes_ctr(unsigned char *out, + size_t outlen, + const unsigned char *iv, + const uint64_t *rkeys, + unsigned int nrounds) +{ + uint32_t ivw[16]; + size_t i; + uint32_t cc = 0; + + br_range_dec32le(ivw, 3, iv); + memcpy(ivw + 4, ivw, 3 * sizeof(uint32_t)); + memcpy(ivw + 8, ivw, 3 * sizeof(uint32_t)); + memcpy(ivw + 12, ivw, 3 * sizeof(uint32_t)); + ivw[3] = br_swap32(cc); + ivw[7] = br_swap32(cc + 1); + ivw[11] = br_swap32(cc + 2); + ivw[15] = br_swap32(cc + 3); + + while (outlen > 64) { + aes_ctr4x(out, ivw, rkeys, nrounds); + out += 64; + outlen -= 64; + } + if (outlen > 0) { + unsigned char tmp[64]; + aes_ctr4x(tmp, ivw, rkeys, nrounds); + for (i = 0; i < outlen; i++) { + out[i] = tmp[i]; + } + } +} + +void +aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key) +{ + uint64_t skey[22]; + + br_aes_ct64_keysched(skey, key, 16); + br_aes_ct64_skey_expand(r->sk_exp, skey, 10); +} + +void +aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key) +{ + aes128_ecb_keyexp(r, key); +} + +void +aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key) +{ + uint64_t skey[26]; + + br_aes_ct64_keysched(skey, key, 24); + br_aes_ct64_skey_expand(r->sk_exp, skey, 12); +} + +void +aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key) +{ + aes192_ecb_keyexp(r, key); +} + +void +aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key) +{ + uint64_t skey[30]; + + br_aes_ct64_keysched(skey, key, 32); + br_aes_ct64_skey_expand(r->sk_exp, skey, 14); +} + +void +aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key) +{ + aes256_ecb_keyexp(r, key); +} + +void +aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 10); +} + +void +aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 10); +} + +void +aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 12); +} + +void +aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 12); +} + +void +aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 14); +} + +void +aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 14); +} + +void +aes128_ctx_release(aes128ctx *r) +{ +} + +void +aes192_ctx_release(aes192ctx *r) +{ +} + +void +aes256_ctx_release(aes256ctx *r) +{ +} + +int +AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen) +{ + aes128ctx ctx; + const unsigned char iv[16] = { 0 }; + + aes128_ctr_keyexp(&ctx, input); + aes128_ctr(output, outputByteLen, iv, &ctx); + aes128_ctx_release(&ctx); + + return (int)outputByteLen; +} + +void +AES_256_ECB(const uint8_t *input, const unsigned char *key, unsigned char *output) +{ + aes256ctx ctx; + + aes256_ecb_keyexp(&ctx, key); + aes256_ecb(output, input, 1, &ctx); + aes256_ctx_release(&ctx); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c new file mode 100644 index 0000000000..50629f9fec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c @@ -0,0 +1,280 @@ +#include +#include "internal.h" + +// Internal helper functions + +void +quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) +{ + ibz_t bp; + ibz_init(&bp); + ibz_set(&bp, p); + quat_alg_init_set(alg, &bp); + ibz_finalize(&bp); +} + +void +quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg) +{ + ibz_t prod; + ibz_vec_4_t sum; + ibz_init(&prod); + ibz_vec_4_init(&sum); + + ibz_set(&(sum[0]), 0); + ibz_set(&(sum[1]), 0); + ibz_set(&(sum[2]), 0); + ibz_set(&(sum[3]), 0); + + // compute 1 coordinate + ibz_mul(&prod, &((*a)[2]), &((*b)[2])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[3])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[0])); + ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[1])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + // compute i coordiante + ibz_mul(&prod, &((*a)[2]), &((*b)[3])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[2])); + ibz_sub(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[1])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[0])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + // compute j coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[2])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[0])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[3])); + ibz_sub(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[1])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + // compute ij coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[3])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[0])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[1])); + ibz_sub(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[2])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + + ibz_copy(&((*res)[0]), &(sum[0])); + ibz_copy(&((*res)[1]), &(sum[1])); + ibz_copy(&((*res)[2]), &(sum[2])); + ibz_copy(&((*res)[3]), &(sum[3])); + + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &(a->denom), &(b->denom)); + // temporarily set res_a.denom to a.denom/gcd, and res_b.denom to b.denom/gcd + ibz_div(&(res_a->denom), &r, &(a->denom), &gcd); + ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); + for (int i = 0; i < 4; i++) { + // multiply coordiates by reduced denominators from the other element + ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + } + // multiply both reduced denominators + ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); + // multiply them by the gcd to get the new common denominator + ibz_mul(&(res_b->denom), &(res_a->denom), &gcd); + ibz_mul(&(res_a->denom), &(res_a->denom), &gcd); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +// Public Functions + +void +quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then add + ibz_copy(&(res->denom), &(res_a.denom)); + ibz_vec_4_add(&(res->coord), &(res_a.coord), &(res_b.coord)); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then substract + ibz_copy(&res->denom, &res_a.denom); + ibz_vec_4_sub(&res->coord, &res_a.coord, &res_b.coord); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg) +{ + // denominator: product of denominators + ibz_mul(&(res->denom), &(a->denom), &(b->denom)); + quat_alg_coord_mul(&(res->coord), &(a->coord), &(b->coord), alg); +} + +void +quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_t *alg) +{ + ibz_t r, g; + quat_alg_elem_t norm; + ibz_init(&r); + ibz_init(&g); + quat_alg_elem_init(&norm); + + quat_alg_conj(&norm, a); + quat_alg_mul(&norm, a, &norm, alg); + ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_div(res_denom, &r, &(norm.denom), &g); + ibz_abs(res_denom, res_denom); + ibz_abs(res_num, res_num); + assert(ibz_cmp(res_denom, &ibz_const_zero) > 0); + + quat_alg_elem_finalize(&norm); + ibz_finalize(&r); + ibz_finalize(&g); +} + +void +quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) +{ + ibz_copy(&(elem->denom), denominator); + ibz_copy(&(elem->coord[0]), numerator); + ibz_set(&(elem->coord[1]), 0); + ibz_set(&(elem->coord[2]), 0); + ibz_set(&(elem->coord[3]), 0); +} + +void +quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) +{ + ibz_copy(&(conj->denom), &(x->denom)); + ibz_copy(&(conj->coord[0]), &(x->coord[0])); + ibz_neg(&(conj->coord[1]), &(x->coord[1])); + ibz_neg(&(conj->coord[2]), &(x->coord[2])); + ibz_neg(&(conj->coord[3]), &(x->coord[3])); +} + +void +quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg_elem_t *x, const quat_lattice_t *order) +{ + int ok UNUSED = quat_lattice_contains(primitive_x, order, x); + assert(ok); + ibz_vec_4_content(content, primitive_x); + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + } + ibz_finalize(&r); +} + +void +quat_alg_normalize(quat_alg_elem_t *x) +{ + ibz_t gcd, sign, r; + ibz_init(&gcd); + ibz_init(&sign); + ibz_init(&r); + ibz_vec_4_content(&gcd, &(x->coord)); + ibz_gcd(&gcd, &gcd, &(x->denom)); + ibz_div(&(x->denom), &r, &(x->denom), &gcd); + ibz_vec_4_scalar_div(&(x->coord), &gcd, &(x->coord)); + ibz_set(&sign, 2 * (0 > ibz_cmp(&ibz_const_zero, &(x->denom))) - 1); + ibz_vec_4_scalar_mul(&(x->coord), &sign, &(x->coord)); + ibz_mul(&(x->denom), &sign, &(x->denom)); + ibz_finalize(&gcd); + ibz_finalize(&sign); + ibz_finalize(&r); +} + +int +quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t diff; + quat_alg_elem_init(&diff); + quat_alg_sub(&diff, a, b); + int res = quat_alg_elem_is_zero(&diff); + quat_alg_elem_finalize(&diff); + return (res); +} + +int +quat_alg_elem_is_zero(const quat_alg_elem_t *x) +{ + int res = ibz_vec_4_is_zero(&(x->coord)); + return (res); +} + +void +quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&(elem->coord[0]), coord0); + ibz_set(&(elem->coord[1]), coord1); + ibz_set(&(elem->coord[2]), coord2); + ibz_set(&(elem->coord[3]), coord3); + + ibz_set(&(elem->denom), denom); +} + +void +quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) +{ + ibz_copy(©->denom, &copied->denom); + ibz_copy(©->coord[0], &copied->coord[0]); + ibz_copy(©->coord[1], &copied->coord[1]); + ibz_copy(©->coord[2], &copied->coord[2]); + ibz_copy(©->coord[3], &copied->coord[3]); +} + +// helper functions for lattices +void +quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3) +{ + ibz_copy(&(elem->coord[0]), coord0); + ibz_copy(&(elem->coord[1]), coord1); + ibz_copy(&(elem->coord[2]), coord2); + ibz_copy(&(elem->coord[3]), coord3); + + ibz_copy(&(elem->denom), denom); +} + +void +quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + } + ibz_copy(&(res->denom), &(elem->denom)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/api.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/api.c new file mode 100644 index 0000000000..e01f911e87 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/api.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk) +{ + + return sqisign_keypair(pk, sk); +} + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk) +{ + return sqisign_sign(sm, smlen, m, mlen, sk); +} +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk) +{ + return sqisign_open(m, mlen, sm, smlen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/api.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/api.h new file mode 100644 index 0000000000..8a37d4ba4e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/api.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef api_h +#define api_h + +#include + +#define CRYPTO_SECRETKEYBYTES 529 +#define CRYPTO_PUBLICKEYBYTES 97 +#define CRYPTO_BYTES 224 + +#define CRYPTO_ALGNAME "SQIsign_lvl3" + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk); + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk); +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk); + +#endif /* api_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/basis.c new file mode 100644 index 0000000000..94cb7fcacb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/basis.c @@ -0,0 +1,416 @@ +#include "ec.h" +#include "fp2.h" +#include "e0_basis.h" +#include + +uint32_t +ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve) +{ // Recover y-coordinate of a point on the Montgomery curve y^2 = x^3 + Ax^2 + x + fp2_t t0; + + fp2_sqr(&t0, Px); + fp2_mul(y, &t0, &curve->A); // Ax^2 + fp2_add(y, y, Px); // Ax^2 + x + fp2_mul(&t0, &t0, Px); + fp2_add(y, y, &t0); // x^3 + Ax^2 + x + // This is required, because we do not yet know that our curves are + // supersingular so our points live on the twist with B = 1. + return fp2_sqrt_verify(y); +} + +static void +difference_point(ec_point_t *PQ, const ec_point_t *P, const ec_point_t *Q, const ec_curve_t *curve) +{ + // Given P,Q in projective x-only, computes a deterministic choice for (P-Q) + // Based on Proposition 3 of https://eprint.iacr.org/2017/518.pdf + + fp2_t Bxx, Bxz, Bzz, t0, t1; + + fp2_mul(&t0, &P->x, &Q->x); + fp2_mul(&t1, &P->z, &Q->z); + fp2_sub(&Bxx, &t0, &t1); + fp2_sqr(&Bxx, &Bxx); + fp2_mul(&Bxx, &Bxx, &curve->C); // C*(P.x*Q.x-P.z*Q.z)^2 + fp2_add(&Bxz, &t0, &t1); + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + fp2_add(&Bzz, &t0, &t1); + fp2_mul(&Bxz, &Bxz, &Bzz); // (P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_sub(&Bzz, &t0, &t1); + fp2_sqr(&Bzz, &Bzz); + fp2_mul(&Bzz, &Bzz, &curve->C); // C*(P.x*Q.z-P.z*Q.x)^2 + fp2_mul(&Bxz, &Bxz, &curve->C); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &curve->A); + fp2_add(&t0, &t0, &t0); + fp2_add(&Bxz, &Bxz, &t0); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + 2*A*P.x*Q.z*P.z*Q.x + + // To ensure that the denominator is a fourth power in Fp, we normalize by + // C*C_bar^2*(P.z)_bar^2*(Q.z)_bar^2 + fp_copy(&t0.re, &curve->C.re); + fp_neg(&t0.im, &curve->C.im); + fp2_sqr(&t0, &t0); + fp2_mul(&t0, &t0, &curve->C); + fp_copy(&t1.re, &P->z.re); + fp_neg(&t1.im, &P->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp_copy(&t1.re, &Q->z.re); + fp_neg(&t1.im, &Q->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&Bxx, &Bxx, &t0); + fp2_mul(&Bxz, &Bxz, &t0); + fp2_mul(&Bzz, &Bzz, &t0); + + // Solving quadratic equation + fp2_sqr(&t0, &Bxz); + fp2_mul(&t1, &Bxx, &Bzz); + fp2_sub(&t0, &t0, &t1); + // No need to check if t0 is square, as per the entangled basis algorithm. + fp2_sqrt(&t0); + fp2_add(&PQ->x, &Bxz, &t0); + fp2_copy(&PQ->z, &Bzz); +} + +// Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and the point +// P = (X/Z : 1). For generic implementation see lift_basis() +uint32_t +lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + assert(fp2_is_one(&B->P.z)); + assert(fp2_is_one(&E->C)); + + fp2_copy(&P->x, &B->P.x); + fp2_copy(&Q->x, &B->Q.x); + fp2_copy(&Q->z, &B->Q.z); + fp2_set_one(&P->z); + uint32_t ret = ec_recover_y(&P->y, &P->x, E); + + // Algorithm of Okeya-Sakurai to recover y.Q in the montgomery model + fp2_t v1, v2, v3, v4; + fp2_mul(&v1, &P->x, &Q->z); + fp2_add(&v2, &Q->x, &v1); + fp2_sub(&v3, &Q->x, &v1); + fp2_sqr(&v3, &v3); + fp2_mul(&v3, &v3, &B->PmQ.x); + fp2_add(&v1, &E->A, &E->A); + fp2_mul(&v1, &v1, &Q->z); + fp2_add(&v2, &v2, &v1); + fp2_mul(&v4, &P->x, &Q->x); + fp2_add(&v4, &v4, &Q->z); + fp2_mul(&v2, &v2, &v4); + fp2_mul(&v1, &v1, &Q->z); + fp2_sub(&v2, &v2, &v1); + fp2_mul(&v2, &v2, &B->PmQ.z); + fp2_sub(&Q->y, &v3, &v2); + fp2_add(&v1, &P->y, &P->y); + fp2_mul(&v1, &v1, &Q->z); + fp2_mul(&v1, &v1, &B->PmQ.z); + fp2_mul(&Q->x, &Q->x, &v1); + fp2_mul(&Q->z, &Q->z, &v1); + + // Transforming to a jacobian coordinate + fp2_sqr(&v1, &Q->z); + fp2_mul(&Q->y, &Q->y, &v1); + fp2_mul(&Q->x, &Q->x, &Q->z); + return ret; +} + +uint32_t +lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + // Normalise the curve E such that (A : C) is (A/C : 1) + // and the point x(P) = (X/Z : 1). + fp2_t inverses[2]; + fp2_copy(&inverses[0], &B->P.z); + fp2_copy(&inverses[1], &E->C); + + fp2_batched_inv(inverses, 2); + fp2_set_one(&B->P.z); + fp2_set_one(&E->C); + + fp2_mul(&B->P.x, &B->P.x, &inverses[0]); + fp2_mul(&E->A, &E->A, &inverses[1]); + + // Lift the basis to Jacobian points P, Q + return lift_basis_normalized(P, Q, B, E); +} + +// Given an x-coordinate, determines if this is a valid +// point on the curve. Assumes C=1. +static uint32_t +is_on_curve(const fp2_t *x, const ec_curve_t *curve) +{ + assert(fp2_is_one(&curve->C)); + fp2_t t0; + + fp2_add(&t0, x, &curve->A); // x + (A/C) + fp2_mul(&t0, &t0, x); // x^2 + (A/C)*x + fp2_add_one(&t0, &t0); // x^2 + (A/C)*x + 1 + fp2_mul(&t0, &t0, x); // x^3 + (A/C)*x^2 + x + + return fp2_is_square(&t0); +} + +// Helper function which given a point of order k*2^n with n maximal +// and k odd, computes a point of order 2^f +static inline void +clear_cofactor_for_maximal_even_order(ec_point_t *P, ec_curve_t *curve, int f) +{ + // clear out the odd cofactor to get a point of order 2^n + ec_mul(P, p_cofactor_for_2f, P_COFACTOR_FOR_2F_BITLENGTH, P, curve); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_A24(P, P, &curve->A24, curve->is_A24_computed_and_normalized); + } +} + +// Helper function which finds an NQR -1 / (1 + i*b) for entangled basis generation +static uint8_t +find_nqr_factor(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + // factor = -1/(1 + i*b) for b in Fp will be NQR whenever 1 + b^2 is NQR + // in Fp, so we find one of these and then invert (1 + i*b). We store b + // as a u8 hint to save time in verification. + + // We return the hint as a u8, but use (uint16_t)n to give 2^16 - 1 + // to make failure cryptographically negligible, with a fallback when + // n > 128 is required. + uint8_t hint; + uint32_t found = 0; + uint16_t n = start; + + bool qr_b = 1; + fp_t b, tmp; + fp2_t z, t0, t1; + + do { + while (qr_b) { + // find b with 1 + b^2 a non-quadratic residue + fp_set_small(&tmp, (uint32_t)n * n + 1); + qr_b = fp_is_square(&tmp); + n++; // keeps track of b = n - 1 + } + + // for Px := -A/(1 + i*b) to be on the curve + // is equivalent to A^2*(z-1) - z^2 NQR for z = 1 + i*b + // thus prevents unnecessary inversion pre-check + + // t0 = z - 1 = i*b + // t1 = z = 1 + i*b + fp_set_small(&b, (uint32_t)n - 1); + fp2_set_zero(&t0); + fp2_set_one(&z); + fp_copy(&z.im, &b); + fp_copy(&t0.im, &b); + + // A^2*(z-1) - z^2 + fp2_sqr(&t1, &curve->A); + fp2_mul(&t0, &t0, &t1); // A^2 * (z - 1) + fp2_sqr(&t1, &z); + fp2_sub(&t0, &t0, &t1); // A^2 * (z - 1) - z^2 + found = !fp2_is_square(&t0); + + qr_b = 1; + } while (!found); + + // set Px to -A/(1 + i*b) + fp2_copy(x, &z); + fp2_inv(x); + fp2_mul(x, x, &curve->A); + fp2_neg(x, x); + + /* + * With very low probability n will not fit in 7 bits. + * We set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + hint = n <= 128 ? n - 1 : 0; + + return hint; +} + +// Helper function which finds a point x(P) = n * A +static uint8_t +find_nA_x_coord(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + assert(!fp2_is_square(&curve->A)); // Only to be called when A is a NQR + + // when A is NQR we allow x(P) to be a multiple n*A of A + uint8_t n = start; + if (n == 1) { + fp2_copy(x, &curve->A); + } else { + fp2_mul_small(x, &curve->A, n); + } + + while (!is_on_curve(x, curve)) { + fp2_add(x, x, &curve->A); + n++; + } + + /* + * With very low probability (1/2^128), n will not fit in 7 bits. + * In this case, we set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + uint8_t hint = n < 128 ? n : 0; + return hint; +} + +// The entangled basis generation does not allow A = 0 +// so we simply return the one we have already precomputed +static void +ec_basis_E0_2f(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + assert(fp2_is_zero(&curve->A)); + ec_point_t P, Q; + + // Set P, Q to precomputed (X : 1) values + fp2_copy(&P.x, &BASIS_E0_PX); + fp2_copy(&Q.x, &BASIS_E0_QX); + fp2_set_one(&P.z); + fp2_set_one(&Q.z); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_E0(&P, &P); + xDBL_E0(&Q, &Q); + } + + // Set P, Q in the basis and compute x(P - Q) + copy_point(&PQ2->P, &P); + copy_point(&PQ2->Q, &Q); + difference_point(&PQ2->PmQ, &P, &Q, curve); +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// and stores hints as an array for faster recomputation at a later point +uint8_t +ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 0; + } + + uint8_t hint; + bool hint_A = fp2_is_square(&curve->A); + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_A) { + // when A is NQR we allow x(P) to be a multiple n*A of A + hint = find_nA_x_coord(&P.x, curve, 1); + } else { + // when A is QR we instead have to find (1 + b^2) a NQR + // such that x(P) = -A / (1 + i*b) + hint = find_nqr_factor(&P.x, curve, 1); + } + + fp2_set_one(&P.z); + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + + // Finally, we compress hint_A and hint into a single bytes. + // We choose to set the LSB of hint to hint_A + assert(hint < 128); // We expect hint to be 7-bits in size + return (hint << 1) | hint_A; +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// given the hints as an array for faster basis computation +int +ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 1; + } + + // The LSB of hint encodes whether A is a QR + // The remaining 7-bits are used to find a valid x(P) + bool hint_A = hint & 1; + uint8_t hint_P = hint >> 1; + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_P) { + // When hint_P = 0 it means we did not find a point in 128 attempts + // this is very rare and we almost never expect to need this fallback + // In either case, we can start with b = 128 to skip testing the known + // values which will not work + if (!hint_A) { + find_nA_x_coord(&P.x, curve, 128); + } else { + find_nqr_factor(&P.x, curve, 128); + } + } else { + // Otherwise we use the hint to directly find x(P) based on hint_A + if (!hint_A) { + // when A is NQR, we have found n such that x(P) = n*A + fp2_mul_small(&P.x, &curve->A, hint_P); + } else { + // when A is QR we have found b such that (1 + b^2) is a NQR in + // Fp, so we must compute x(P) = -A / (1 + i*b) + fp_set_one(&P.x.re); + fp_set_small(&P.x.im, hint_P); + fp2_inv(&P.x); + fp2_mul(&P.x, &P.x, &curve->A); + fp2_neg(&P.x, &P.x); + } + } + fp2_set_one(&P.z); + +#ifndef NDEBUG + int passed = 1; + passed = is_on_curve(&P.x, curve); + passed &= !fp2_is_square(&P.x); + + if (!passed) + return 0; +#endif + + // set xQ to -xP - A + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + +#ifndef NDEBUG + passed &= test_basis_order_twof(PQ2, curve, f); + + if (!passed) + return 0; +#endif + + return 1; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/bench.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/bench.h new file mode 100644 index 0000000000..c253825828 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/bench.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: Apache-2.0 +#ifndef BENCH_H__ +#define BENCH_H__ + +#include +#include +#include +#include +#include +#if defined(__APPLE__) +#include "bench_macos.h" +#endif + +#if defined(TARGET_ARM) || defined(TARGET_S390X) || defined(NO_CYCLE_COUNTER) +#define BENCH_UNIT0 "nanoseconds" +#define BENCH_UNIT3 "microseconds" +#define BENCH_UNIT6 "milliseconds" +#define BENCH_UNIT9 "seconds" +#else +#define BENCH_UNIT0 "cycles" +#define BENCH_UNIT3 "kilocycles" +#define BENCH_UNIT6 "megacycles" +#define BENCH_UNIT9 "gigacycles" +#endif + +static inline void +cpucycles_init(void) { +#if defined(__APPLE__) && defined(TARGET_ARM64) + macos_init_rdtsc(); +#endif +} + +static inline uint64_t +cpucycles(void) +{ +#if defined(TARGET_AMD64) || defined(TARGET_X86) + uint32_t hi, lo; + + asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)lo) | ((uint64_t)hi << 32); +#elif defined(TARGET_S390X) + uint64_t tod; + asm volatile("stckf %0\n" : "=Q"(tod) : : "cc"); + return (tod * 1000 / 4096); +#elif defined(TARGET_ARM64) && !defined(NO_CYCLE_COUNTER) +#if defined(__APPLE__) + return macos_rdtsc(); +#else + uint64_t cycles; + asm volatile("mrs %0, PMCCNTR_EL0" : "=r"(cycles)); + return cycles; +#endif // __APPLE__ +#else + struct timespec time; + clock_gettime(CLOCK_REALTIME, &time); + return (uint64_t)time.tv_sec * 1000000000 + time.tv_nsec; +#endif +} + +static inline int +CMPFUNC(const void *a, const void *b) +{ + uint64_t aa = *(uint64_t *)a, bb = *(uint64_t *)b; + + if (aa > bb) + return +1; + if (aa < bb) + return -1; + return 0; +} + +static inline uint32_t +ISQRT(uint64_t x) +{ + uint32_t r = 0; + for (ssize_t i = 31; i >= 0; --i) { + uint32_t s = r + (1 << i); + if ((uint64_t)s * s <= x) + r = s; + } + return r; +} + +static inline double +_TRUNC(uint64_t x) +{ + return x / 1000 / 1000.; +} +#define _FMT ".3lf" +#define _UNIT BENCH_UNIT6 + +#define BENCH_CODE_1(RUNS) \ + { \ + const size_t count = (RUNS); \ + if (!count) \ + abort(); \ + uint64_t cycles, cycles1, cycles2; \ + uint64_t cycles_list[count]; \ + cycles = 0; \ + for (size_t i = 0; i < count; ++i) { \ + cycles1 = cpucycles(); + +#define BENCH_CODE_2(name) \ + cycles2 = cpucycles(); \ + cycles_list[i] = cycles2 - cycles1; \ + cycles += cycles2 - cycles1; \ + } \ + qsort(cycles_list, count, sizeof(uint64_t), CMPFUNC); \ + uint64_t variance = 0; \ + for (size_t i = 0; i < count; ++i) { \ + int64_t off = cycles_list[i] - cycles / count; \ + variance += off * off; \ + } \ + variance /= count; \ + printf(" %-10s", name); \ + printf(" | average %9" _FMT " | stddev %9" _FMT, \ + _TRUNC(cycles / count), \ + _TRUNC(ISQRT(variance))); \ + printf(" | median %9" _FMT " | min %9" _FMT " | max %9" _FMT, \ + _TRUNC(cycles_list[count / 2]), \ + _TRUNC(cycles_list[0]), \ + _TRUNC(cycles_list[count - 1])); \ + printf(" (%s)\n", _UNIT); \ + } + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/bench_macos.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/bench_macos.h new file mode 100644 index 0000000000..0494fc85e9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/bench_macos.h @@ -0,0 +1,143 @@ +// WARNING: must be run as root on an M1 device +// WARNING: fragile, uses private apple APIs +// currently no command line interface, see variables at top of main + +/* +no warranty; use at your own risk - i believe this code needs +some minor changes to work on some later hardware and/or software revisions, +which is unsurprising given the use of undocumented, private APIs. +------------------------------------------------------------------------------ +This code is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2020 Dougall Johnson +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ + +/* + Based on https://github.com/travisdowns/robsize + Henry Wong + http://blog.stuffedcow.net/2013/05/measuring-rob-capacity/ + 2014-10-14 +*/ + +#include +#include +#include +#include + +#define KPERF_LIST \ + /* ret, name, params */ \ + F(int, kpc_force_all_ctrs_set, int) \ + F(int, kpc_set_counting, uint32_t) \ + F(int, kpc_set_thread_counting, uint32_t) \ + F(int, kpc_set_config, uint32_t, void *) \ + F(int, kpc_get_thread_counters, int, unsigned int, void *) + +#define F(ret, name, ...) \ + typedef ret name##proc(__VA_ARGS__); \ + static name##proc *name; +KPERF_LIST +#undef F + +#define CFGWORD_EL0A64EN_MASK (0x20000) + +#define CPMU_CORE_CYCLE 0x02 + +#define KPC_CLASS_FIXED (0) +#define KPC_CLASS_CONFIGURABLE (1) + +#define COUNTERS_COUNT 10 +#define KPC_MASK ((1u << KPC_CLASS_CONFIGURABLE) | (1u << KPC_CLASS_FIXED)) +static uint64_t g_config[COUNTERS_COUNT]; +static uint64_t g_counters[COUNTERS_COUNT]; + +static void +macos_configure_rdtsc() +{ + if (kpc_force_all_ctrs_set(1)) { + printf("kpc_force_all_ctrs_set failed\n"); + return; + } + + if (kpc_set_config(KPC_MASK, g_config)) { + printf("kpc_set_config failed\n"); + return; + } + + if (kpc_set_counting(KPC_MASK)) { + printf("kpc_set_counting failed\n"); + return; + } + + if (kpc_set_thread_counting(KPC_MASK)) { + printf("kpc_set_thread_counting failed\n"); + return; + } +} + +static void +macos_init_rdtsc() +{ + void *kperf = + dlopen("/System/Library/PrivateFrameworks/kperf.framework/Versions/A/kperf", RTLD_LAZY); + if (!kperf) { + printf("kperf = %p\n", kperf); + return; + } +#define F(ret, name, ...) \ + name = (name##proc *)(intptr_t)(dlsym(kperf, #name)); \ + if (!name) { \ + printf("%s = %p\n", #name, (void *)(intptr_t)name); \ + return; \ + } + KPERF_LIST +#undef F + + g_config[0] = CPMU_CORE_CYCLE | CFGWORD_EL0A64EN_MASK; + + macos_configure_rdtsc(); +} + +static uint64_t +macos_rdtsc(void) +{ + if (kpc_get_thread_counters(0, COUNTERS_COUNT, g_counters)) { + printf("kpc_get_thread_counters failed\n"); + return 1; + } + return g_counters[2]; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/biextension.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/biextension.c new file mode 100644 index 0000000000..1df7ab938b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/biextension.c @@ -0,0 +1,770 @@ +#include +#include +#include +#include + +/* + * We implement the biextension arithmetic by using the cubical torsor + * representation. For now only implement the 2^e-ladder. + * + * Warning: cubicalADD is off by a factor x4 with respect to the correct + * cubical arithmetic. This does not affect the Weil pairing or the Tate + * pairing over F_{p^2} (due to the final exponentiation), but would give + * the wrong result if we compute the Tate pairing over F_p. + */ + +// this would be exactly like xADD if PQ was 'antinormalised' as (1,z) +// Cost: 3M + 2S + 3a + 3s +// Note: if needed, cubicalDBL is simply xDBL_A24 normalized and +// costs 3M + 2S + 2a + 2s + +static void +cubicalADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const fp2_t *ixPQ) +{ + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&R->z, &t3); + fp2_sqr(&t2, &t2); + fp2_mul(&R->x, ixPQ, &t2); +} + +// Given cubical reps of P, Q and x(P - Q) = (1 : ixPQ) +// compute P + Q, [2]Q +// Cost: 6M + 4S + 4a + 4s +static void +cubicalDBLADD(ec_point_t *PpQ, + ec_point_t *QQ, + const ec_point_t *P, + const ec_point_t *Q, + const fp2_t *ixPQ, + const ec_point_t *A24) +{ + // A24 = (A+2C/4C: 1) + assert(fp2_is_one(&A24->z)); + + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&PpQ->x, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_sqr(&t2, &PpQ->x); + fp2_sqr(&QQ->z, &t3); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &PpQ->x); + fp2_add(&PpQ->x, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&PpQ->z, &t3); + fp2_sqr(&PpQ->x, &PpQ->x); + fp2_mul(&PpQ->x, ixPQ, &PpQ->x); + fp2_sub(&t3, &t2, &QQ->z); + fp2_mul(&QQ->x, &t2, &QQ->z); + fp2_mul(&t0, &t3, &A24->x); + fp2_add(&t0, &t0, &QQ->z); + fp2_mul(&QQ->z, &t0, &t3); +} + +// iterative biextension doubling +static void +biext_ladder_2e(uint32_t e, + ec_point_t *PnQ, + ec_point_t *nQ, + const ec_point_t *PQ, + const ec_point_t *Q, + const fp2_t *ixP, + const ec_point_t *A24) +{ + copy_point(PnQ, PQ); + copy_point(nQ, Q); + for (uint32_t i = 0; i < e; i++) { + cubicalDBLADD(PnQ, nQ, PnQ, nQ, ixP, A24); + } +} + +// Compute the monodromy ratio X/Z above as a (X:Z) point to avoid a division +// We implicitly use (1,0) as a cubical point above 0_E +static void +point_ratio(ec_point_t *R, const ec_point_t *PnQ, const ec_point_t *nQ, const ec_point_t *P) +{ + // Sanity tests + assert(ec_is_zero(nQ)); + assert(ec_is_equal(PnQ, P)); + + fp2_mul(&R->x, &nQ->x, &P->x); + fp2_copy(&R->z, &PnQ->x); +} + +// Compute the cubical translation of P by a point of 2-torsion T +static void +translate(ec_point_t *P, const ec_point_t *T) +{ + // When we translate, the following three things can happen: + // T = (A : 0) then the translation of P should be P + // T = (0 : B) then the translation of P = (X : Z) should be (Z : X) + // Otherwise T = (A : B) and P translates to (AX - BZ : BX - AZ) + // We compute this in constant time by computing the generic case + // and then using constant time swaps. + fp2_t PX_new, PZ_new; + + { + fp2_t t0, t1; + + // PX_new = AX - BZ + fp2_mul(&t0, &T->x, &P->x); + fp2_mul(&t1, &T->z, &P->z); + fp2_sub(&PX_new, &t0, &t1); + + // PZ_new = BX - AZ + fp2_mul(&t0, &T->z, &P->x); + fp2_mul(&t1, &T->x, &P->z); + fp2_sub(&PZ_new, &t0, &t1); + } + + // When we have A zero we should return (Z : X) + uint32_t TA_is_zero = fp2_is_zero(&T->x); + fp2_select(&PX_new, &PX_new, &P->z, TA_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->x, TA_is_zero); + + // When we have B zero we should return (X : Z) + uint32_t TB_is_zero = fp2_is_zero(&T->z); + fp2_select(&PX_new, &PX_new, &P->x, TB_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->z, TB_is_zero); + + // Set the point to the desired result + fp2_copy(&P->x, &PX_new); + fp2_copy(&P->z, &PZ_new); +} + +// Compute the biextension monodromy g_P,Q^{2^g} (in level 1) via the +// cubical arithmetic of P+2^e Q. +// The suffix _i means that we are given 1/x(P) as parameter. Warning: to +// get meaningful result when using the monodromy to compute pairings, we +// need P, Q, PQ, A24 to be normalised (this is not strictly necessary, but +// care need to be taken when they are not normalised. Only handle the +// normalised case for now) +static void +monodromy_i(ec_point_t *R, const pairing_params_t *pairing_data, bool swap_PQ) +{ + fp2_t ixP; + ec_point_t P, Q, PnQ, nQ; + + // When we compute the Weil pairing we need both P + [2^e]Q and + // Q + [2^e]P which we can do easily with biext_ladder_2e() below + // we use a bool to decide wether to use Q, ixP or P, ixQ in the + // ladder and P or Q in translation. + if (!swap_PQ) { + copy_point(&P, &pairing_data->P); + copy_point(&Q, &pairing_data->Q); + fp2_copy(&ixP, &pairing_data->ixP); + } else { + copy_point(&P, &pairing_data->Q); + copy_point(&Q, &pairing_data->P); + fp2_copy(&ixP, &pairing_data->ixQ); + } + + // Compute the biextension ladder P + [2^e]Q + biext_ladder_2e(pairing_data->e - 1, &PnQ, &nQ, &pairing_data->PQ, &Q, &ixP, &pairing_data->A24); + translate(&PnQ, &nQ); + translate(&nQ, &nQ); + point_ratio(R, &PnQ, &nQ, &P); +} + +// Normalize the points and also store 1/x(P), 1/x(Q) +static void +cubical_normalization(pairing_params_t *pairing_data, const ec_point_t *P, const ec_point_t *Q) +{ + fp2_t t[4]; + fp2_copy(&t[0], &P->x); + fp2_copy(&t[1], &P->z); + fp2_copy(&t[2], &Q->x); + fp2_copy(&t[3], &Q->z); + fp2_batched_inv(t, 4); + + // Store PZ / PX and QZ / QX + fp2_mul(&pairing_data->ixP, &P->z, &t[0]); + fp2_mul(&pairing_data->ixQ, &Q->z, &t[2]); + + // Store x(P), x(Q) normalised to (X/Z : 1) + fp2_mul(&pairing_data->P.x, &P->x, &t[1]); + fp2_mul(&pairing_data->Q.x, &Q->x, &t[3]); + fp2_set_one(&pairing_data->P.z); + fp2_set_one(&pairing_data->Q.z); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// We assume the points are normalised correctly +static void +weil_n(fp2_t *r, const pairing_params_t *pairing_data) +{ + ec_point_t R0, R1; + monodromy_i(&R0, pairing_data, true); + monodromy_i(&R1, pairing_data, false); + + fp2_mul(r, &R0.x, &R1.z); + fp2_inv(r); + fp2_mul(r, r, &R0.z); + fp2_mul(r, r, &R1.x); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// Normalise the points and call the code above +// The code will crash (division by 0) if either P or Q is (0:1) +void +weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + pairing_params_t pairing_data; + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + // Compute the Weil pairing e_(2^n)(P, Q) + weil_n(r, &pairing_data); +} + +// two helper functions for reducing the tate pairing +// clear_cofac clears (p + 1) // 2^f for an Fp2 value +void +clear_cofac(fp2_t *r, const fp2_t *a) +{ + digit_t exp = *p_cofactor_for_2f; + exp >>= 1; + + fp2_t x; + fp2_copy(&x, a); + fp2_copy(r, a); + + // removes cofac + while (exp > 0) { + fp2_sqr(r, r); + if (exp & 1) { + fp2_mul(r, r, &x); + } + exp >>= 1; + } +} + +// applies frobenius a + ib --> a - ib to an fp2 element +void +fp2_frob(fp2_t *out, const fp2_t *in) +{ + fp_copy(&(out->re), &(in->re)); + fp_neg(&(out->im), &(in->im)); +} + +// reduced Tate pairing, normalizes the points, assumes PQ is P+Q in (X:Z) +// coordinates. Computes 1/x(P) and 1/x(Q) for efficient cubical ladder +void +reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - e; + ec_point_t R; + pairing_params_t pairing_data; + + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + monodromy_i(&R, &pairing_data, true); + + // we get unreduced tate as R.X, R.Z + // reduced tate is -(R.Z/R.X)^((p^2 - 1) div 2^f) + // we reuse R.X and R.Z to split reduction step ^(p-1) into frobenius and ^-1 + fp2_t frob, tmp; + fp2_copy(&tmp, &R.x); + fp2_frob(&frob, &R.x); + fp2_mul(&R.x, &R.z, &frob); + fp2_frob(&frob, &R.z); + fp2_mul(&R.z, &tmp, &frob); + fp2_inv(&R.x); + fp2_mul(r, &R.x, &R.z); + + clear_cofac(r, r); + // clear remaining 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(r, r); + } +} + +// Functions to compute discrete logs by computing the Weil pairing of points +// followed by computing the dlog in Fp^2 +// (If we work with full order points, it would be faster to use the Tate +// pairings rather than the Weil pairings; this is not implemented yet) + +// recursive dlog function +static bool +fp2_dlog_2e_rec(digit_t *a, long len, fp2_t *pows_f, fp2_t *pows_g, long stacklen) +{ + if (len == 0) { + // *a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + return true; + } else if (len == 1) { + if (fp2_is_one(&pows_f[stacklen - 1])) { + // a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else if (fp2_is_equal(&pows_f[stacklen - 1], &pows_g[stacklen - 1])) { + // a = 1; + a[0] = 1; + for (int i = 1; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_mul(&pows_f[i], &pows_f[i], &pows_g[i]); // new_f = f*g + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else { + return false; + } + } else { + long right = (double)len * 0.5; + long left = len - right; + pows_f[stacklen] = pows_f[stacklen - 1]; + pows_g[stacklen] = pows_g[stacklen - 1]; + for (int i = 0; i < left; i++) { + fp2_sqr(&pows_f[stacklen], &pows_f[stacklen]); + fp2_sqr(&pows_g[stacklen], &pows_g[stacklen]); + } + // uint32_t dlp1 = 0, dlp2 = 0; + digit_t dlp1[NWORDS_ORDER], dlp2[NWORDS_ORDER]; + bool ok; + ok = fp2_dlog_2e_rec(dlp1, right, pows_f, pows_g, stacklen + 1); + if (!ok) + return false; + ok = fp2_dlog_2e_rec(dlp2, left, pows_f, pows_g, stacklen); + if (!ok) + return false; + // a = dlp1 + 2^right * dlp2 + multiple_mp_shiftl(dlp2, right, NWORDS_ORDER); + mp_add(a, dlp2, dlp1, NWORDS_ORDER); + + return true; + } +} + +// compute DLP: compute scal such that f = g^scal with f, 1/g as input +static bool +fp2_dlog_2e(digit_t *scal, const fp2_t *f, const fp2_t *g_inverse, int e) +{ + long log, len = e; + for (log = 0; len > 1; len >>= 1) + log++; + log += 1; + + fp2_t pows_f[log], pows_g[log]; + pows_f[0] = *f; + pows_g[0] = *g_inverse; + + for (int i = 0; i < NWORDS_ORDER; i++) { + scal[i] = 0; + } + + bool ok = fp2_dlog_2e_rec(scal, e, pows_f, pows_g, 1); + assert(ok); + + return ok; +} + +// Normalize the bases (P, Q), (R, S) and store their inverse +// and additionally normalise the curve to (A/C : 1) +static void +cubical_normalization_dlog(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + fp2_t t[11]; + ec_basis_t *PQ = &pairing_dlog_data->PQ; + ec_basis_t *RS = &pairing_dlog_data->RS; + fp2_copy(&t[0], &PQ->P.x); + fp2_copy(&t[1], &PQ->P.z); + fp2_copy(&t[2], &PQ->Q.x); + fp2_copy(&t[3], &PQ->Q.z); + fp2_copy(&t[4], &PQ->PmQ.x); + fp2_copy(&t[5], &PQ->PmQ.z); + fp2_copy(&t[6], &RS->P.x); + fp2_copy(&t[7], &RS->P.z); + fp2_copy(&t[8], &RS->Q.x); + fp2_copy(&t[9], &RS->Q.z); + fp2_copy(&t[10], &curve->C); + + fp2_batched_inv(t, 11); + + fp2_mul(&pairing_dlog_data->ixP, &PQ->P.z, &t[0]); + fp2_mul(&PQ->P.x, &PQ->P.x, &t[1]); + fp2_set_one(&PQ->P.z); + + fp2_mul(&pairing_dlog_data->ixQ, &PQ->Q.z, &t[2]); + fp2_mul(&PQ->Q.x, &PQ->Q.x, &t[3]); + fp2_set_one(&PQ->Q.z); + + fp2_mul(&PQ->PmQ.x, &PQ->PmQ.x, &t[5]); + fp2_set_one(&PQ->PmQ.z); + + fp2_mul(&pairing_dlog_data->ixR, &RS->P.z, &t[6]); + fp2_mul(&RS->P.x, &RS->P.x, &t[7]); + fp2_set_one(&RS->P.z); + + fp2_mul(&pairing_dlog_data->ixS, &RS->Q.z, &t[8]); + fp2_mul(&RS->Q.x, &RS->Q.x, &t[9]); + fp2_set_one(&RS->Q.z); + + fp2_mul(&curve->A, &curve->A, &t[10]); + fp2_set_one(&curve->C); +} + +// Given two bases and basis = compute +// x(P - R), x(P - S), x(R - Q), x(S - Q) +static void +compute_difference_points(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + jac_point_t xyP, xyQ, xyR, xyS, temp; + + // lifting the two basis points, assumes that x(P) and x(R) + // and the curve itself are normalised to (X : 1) + lift_basis_normalized(&xyP, &xyQ, &pairing_dlog_data->PQ, curve); + lift_basis_normalized(&xyR, &xyS, &pairing_dlog_data->RS, curve); + + // computation of the differences + // x(P - R) + jac_neg(&temp, &xyR); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmR, &temp); + + // x(P - S) + jac_neg(&temp, &xyS); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmS, &temp); + + // x(R - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyR, curve); + jac_to_xz(&pairing_dlog_data->diff.RmQ, &temp); + + // x(S - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyS, curve); + jac_to_xz(&pairing_dlog_data->diff.SmQ, &temp); +} + +// Inline all the Weil pairing computations needed for ec_dlog_2_weil +static void +weil_dlog(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + ec_point_t nP, nQ, nR, nS, nPQ, PnQ, nPR, PnR, nPS, PnS, nRQ, RnQ, nSQ, SnQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&nPR, &pairing_dlog_data->diff.PmR); + copy_point(&nPS, &pairing_dlog_data->diff.PmS); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + copy_point(&RnQ, &pairing_dlog_data->diff.RmQ); + copy_point(&SnQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&nPQ, &nPQ, &nP, &pairing_dlog_data->ixQ); + cubicalADD(&nPR, &nPR, &nP, &pairing_dlog_data->ixR); + cubicalDBLADD(&nPS, &nP, &nPS, &nP, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnQ, &PnQ, &nQ, &pairing_dlog_data->ixP); + cubicalADD(&RnQ, &RnQ, &nQ, &pairing_dlog_data->ixR); + cubicalDBLADD(&SnQ, &nQ, &SnQ, &nQ, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + // weil(&w0,e,&PQ->P,&PQ->Q,&PQ->PmQ,&A24); + translate(&nPQ, &nP); + translate(&nPR, &nP); + translate(&nPS, &nP); + translate(&PnQ, &nQ); + translate(&RnQ, &nQ); + translate(&SnQ, &nQ); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference weil pairing + ec_point_t T0, T1; + fp2_t w1[5], w2[5]; + + // e(P, Q) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &PnQ, &nQ, &pairing_dlog_data->PQ.P); + // For the first element we need it's inverse for + // fp2_dlog_2e so we swap w1 and w2 here to save inversions + fp2_mul(&w2[0], &T0.x, &T1.z); + fp2_mul(&w1[0], &T1.x, &T0.z); + + // e(P,R) = w0^r2 + point_ratio(&T0, &nPR, &nP, &pairing_dlog_data->RS.P); + point_ratio(&T1, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[1], &T0.x, &T1.z); + fp2_mul(&w2[1], &T1.x, &T0.z); + + // e(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &RnQ, &nQ, &pairing_dlog_data->RS.P); + fp2_mul(&w1[2], &T0.x, &T1.z); + fp2_mul(&w2[2], &T1.x, &T0.z); + + // e(P,S) = w0^s2 + point_ratio(&T0, &nPS, &nP, &pairing_dlog_data->RS.Q); + point_ratio(&T1, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[3], &T0.x, &T1.z); + fp2_mul(&w2[3], &T1.x, &T0.z); + + // e(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &SnQ, &nQ, &pairing_dlog_data->RS.Q); + fp2_mul(&w1[4], &T0.x, &T1.z); + fp2_mul(&w2[4], &T1.x, &T0.z); + + fp2_batched_inv(w1, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + assert(test_point_order_twof(&PQ->Q, curve, e)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + + weil_dlog(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} + +// Inline all the Tate pairing computations needed for ec_dlog_2_weil +// including reduction, assumes a bases PQ of full E[2^e_full] torsion +// and a bases RS of smaller E[2^e] torsion +static void +tate_dlog_partial(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - pairing_dlog_data->e; + + ec_point_t nP, nQ, nR, nS, nPQ, PnR, PnS, nRQ, nSQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < e_full - 1; i++) { + cubicalDBLADD(&nPQ, &nP, &nPQ, &nP, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + translate(&nPQ, &nP); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference Tate pairing + ec_point_t T0; + fp2_t w1[5], w2[5]; + + // t(P, Q)^(2^e_diff) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + fp2_copy(&w1[0], &T0.x); + fp2_copy(&w2[0], &T0.z); + + // t(R,P) = w0^r2 + point_ratio(&T0, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[1], &T0.x); + fp2_copy(&w2[1], &T0.z); + + // t(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[2], &T0.x); + fp2_copy(&w1[2], &T0.z); + + // t(S,P) = w0^s2 + point_ratio(&T0, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[3], &T0.x); + fp2_copy(&w2[3], &T0.z); + + // t(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[4], &T0.x); + fp2_copy(&w1[4], &T0.z); + + // batched reduction using projective representation + for (int i = 0; i < 5; i++) { + fp2_t frob, tmp; + fp2_copy(&tmp, &w1[i]); + // inline frobenius for ^p + // multiply by inverse to get ^(p-1) + fp2_frob(&frob, &w1[i]); + fp2_mul(&w1[i], &w2[i], &frob); + + // repeat for denom + fp2_frob(&frob, &w2[i]); + fp2_mul(&w2[i], &tmp, &frob); + } + + // batched normalization + fp2_batched_inv(w2, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + for (int i = 0; i < 5; i++) { + clear_cofac(&w1[i], &w1[i]); + + // removes 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(&w1[i], &w1[i]); + } + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + // assume PQ is a full torsion basis + // returns a, b, c, d such that R = [a]P + [b]Q, S = [c]P + [d]Q + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - e; +#endif + assert(test_basis_order_twof(PQ, curve, e_full)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + tate_dlog_partial(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/biextension.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/biextension.h new file mode 100644 index 0000000000..1a50fcc738 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/biextension.h @@ -0,0 +1,82 @@ +#ifndef _BIEXT_H_ +#define _BIEXT_H_ + +#include +#include + +typedef struct pairing_params +{ + uint32_t e; // Points have order 2^e + ec_point_t P; // x(P) + ec_point_t Q; // x(Q) + ec_point_t PQ; // x(P-Q) = (PQX/PQZ : 1) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_params_t; + +// For two bases and store: +// x(P - R), x(P - S), x(R - Q), x(S - Q) +typedef struct pairing_dlog_diff_points +{ + ec_point_t PmR; // x(P - R) + ec_point_t PmS; // x(P - S) + ec_point_t RmQ; // x(R - Q) + ec_point_t SmQ; // x(S - Q) +} pairing_dlog_diff_points_t; + +typedef struct pairing_dlog_params +{ + uint32_t e; // Points have order 2^e + ec_basis_t PQ; // x(P), x(Q), x(P-Q) + ec_basis_t RS; // x(R), x(S), x(R-S) + pairing_dlog_diff_points_t diff; // x(P - R), x(P - S), x(R - Q), x(S - Q) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + fp2_t ixR; // RZ/RX + fp2_t ixS; // SZ/SX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_dlog_params_t; + +// Computes e = e_{2^e}(P, Q) using biextension ladder +void weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Computes (reduced) z = t_{2^e}(P, Q) using biextension ladder +void reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Given two bases and computes scalars +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +// Given two bases and +// where is a basis for E[2^f] +// the full 2-torsion, and a basis +// for smaller torsion E[2^e] +// computes scalars r1, r2, s1, s2 +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +void ec_dlog_2_tate_to_full(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + ec_basis_t *RS, + ec_curve_t *curve, + int e); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c new file mode 100644 index 0000000000..d393e9cb11 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include + +void +public_key_init(public_key_t *pk) +{ + ec_curve_init(&pk->curve); +} + +void +public_key_finalize(public_key_t *pk) +{ +} + +// compute the challenge as the hash of the message and the commitment curve and public key +void +hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length) +{ + unsigned char buf[2 * FP2_ENCODED_BYTES]; + { + fp2_t j1, j2; + ec_j_inv(&j1, &pk->curve); + ec_j_inv(&j2, com_curve); + fp2_encode(buf, &j1); + fp2_encode(buf + FP2_ENCODED_BYTES, &j2); + } + + { + // The type scalar_t represents an element of GF(p), which is about + // 2*lambda bits, where lambda = 128, 192 or 256, according to the + // security level. Thus, the variable scalar should have enough memory + // for the values produced by SHAKE256 in the intermediate iterations. + + shake256incctx ctx; + + size_t hash_bytes = ((2 * SECURITY_BITS) + 7) / 8; + size_t limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + size_t bits = (2 * SECURITY_BITS) % RADIX; + digit_t mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, buf, 2 * FP2_ENCODED_BYTES); + shake256_inc_absorb(&ctx, message, length); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + for (int i = 2; i < HASH_ITERATIONS; i++) { + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + } + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + + hash_bytes = ((TORSION_EVEN_POWER - SQIsign_response_length) + 7) / 8; + limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + bits = (TORSION_EVEN_POWER - SQIsign_response_length) % RADIX; + mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + +#ifdef TARGET_BIG_ENDIAN + for (int i = 0; i < NWORDS_ORDER; i++) + (*scalar)[i] = BSWAP_DIGIT((*scalar)[i]); +#endif + + mp_mod_2exp(*scalar, SECURITY_BITS, NWORDS_ORDER); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2.c new file mode 100644 index 0000000000..b31ae7771a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +// internal helpers, also for other files +void +ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) +{ + ibz_set(&((*vec)[0]), a0); + ibz_set(&((*vec)[1]), a1); +} +void +ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) +{ + ibz_set(&((*mat)[0][0]), a00); + ibz_set(&((*mat)[0][1]), a01); + ibz_set(&((*mat)[1][0]), a10); + ibz_set(&((*mat)[1][1]), a11); +} + +void +ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) +{ + ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); + ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); + ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); + ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); +} + +void +ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) +{ + ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); + ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); + ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); + ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); +} + +void +ibz_mat_2x2_det_from_ibz(ibz_t *det, const ibz_t *a11, const ibz_t *a12, const ibz_t *a21, const ibz_t *a22) +{ + ibz_t prod; + ibz_init(&prod); + ibz_mul(&prod, a12, a21); + ibz_mul(det, a11, a22); + ibz_sub(det, det, &prod); + ibz_finalize(&prod); +} + +void +ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec) +{ + ibz_t prod; + ibz_vec_2_t matvec; + ibz_init(&prod); + ibz_vec_2_init(&matvec); + ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); + ibz_copy(&(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); + ibz_add(&(matvec[0]), &(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); + ibz_copy(&(matvec[1]), &prod); + ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); + ibz_add(&(matvec[1]), &(matvec[1]), &prod); + ibz_copy(&((*res)[0]), &(matvec[0])); + ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_finalize(&prod); + ibz_vec_2_finalize(&matvec); +} + +// modular 2x2 operations + +void +ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2x2_t *mat_b, const ibz_t *m) +{ + ibz_t mul; + ibz_mat_2x2_t sums; + ibz_init(&mul); + ibz_mat_2x2_init(&sums); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_set(&(sums[i][j]), 0); + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + for (int k = 0; k < 2; k++) { + ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); + ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); + ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + } + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + } + } + ibz_finalize(&mul); + ibz_mat_2x2_finalize(&sums); +} + +int +ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m) +{ + ibz_t det, prod; + ibz_init(&det); + ibz_init(&prod); + ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mod(&det, &det, m); + ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_sub(&det, &det, &prod); + ibz_mod(&det, &det, m); + int res = ibz_invmod(&det, &det, m); + // return 0 matrix if non invertible determinant + ibz_set(&prod, res); + ibz_mul(&det, &det, &prod); + // compute inverse + ibz_copy(&prod, &((*mat)[0][0])); + ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); + ibz_copy(&((*inv)[1][1]), &prod); + ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); + ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); + ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + } + } + ibz_finalize(&det); + ibz_finalize(&prod); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c new file mode 100644 index 0000000000..171473d481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c @@ -0,0 +1,1172 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +_fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + + // var declaration + int ret; + ibz_t two_pow, tmp; + quat_alg_elem_t theta; + + ec_curve_t E0; + copy_curve(&E0, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].curve); + ec_curve_normalize_A24(&E0); + + unsigned length; + + int u_bitsize = ibz_bitsize(u); + + // deciding the power of 2 of the dim2 isogeny we use for this + // the smaller the faster, but if it set too low there is a risk that + // RepresentInteger will fail + if (!small) { + // in that case, we just set it to be the biggest value possible + length = TORSION_EVEN_POWER - HD_extra_torsion; + } else { + length = ibz_bitsize(&QUATALG_PINFTY.p) + QUAT_repres_bound_input - u_bitsize; + assert(u_bitsize < (int)length); + assert(length < TORSION_EVEN_POWER - HD_extra_torsion); + } + assert(length); + + // var init + ibz_init(&two_pow); + ibz_init(&tmp); + quat_alg_elem_init(&theta); + + ibz_pow(&two_pow, &ibz_const_two, length); + ibz_copy(&tmp, u); + assert(ibz_cmp(&two_pow, &tmp) > 0); + assert(!ibz_is_even(&tmp)); + + // computing the endomorphism theta of norm u * (2^(length) - u) + ibz_sub(&tmp, &two_pow, &tmp); + ibz_mul(&tmp, &tmp, u); + assert(!ibz_is_even(&tmp)); + + // setting-up the quat_represent_integer_params + quat_represent_integer_params_t ri_params; + ri_params.primality_test_iterations = QUAT_represent_integer_params.primality_test_iterations; + + quat_p_extremal_maximal_order_t order_hnf; + quat_alg_elem_init(&order_hnf.z); + quat_alg_elem_copy(&order_hnf.z, &EXTREMAL_ORDERS[index_alternate_order].z); + quat_alg_elem_init(&order_hnf.t); + quat_alg_elem_copy(&order_hnf.t, &EXTREMAL_ORDERS[index_alternate_order].t); + quat_lattice_init(&order_hnf.order); + ibz_copy(&order_hnf.order.denom, &EXTREMAL_ORDERS[index_alternate_order].order.denom); + ibz_mat_4x4_copy(&order_hnf.order.basis, &EXTREMAL_ORDERS[index_alternate_order].order.basis); + order_hnf.q = EXTREMAL_ORDERS[index_alternate_order].q; + ri_params.order = &order_hnf; + ri_params.algebra = &QUATALG_PINFTY; + +#ifndef NDEBUG + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->z)); + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->t)); +#endif + + ret = quat_represent_integer(&theta, &tmp, 1, &ri_params); + + assert(!ibz_is_even(&tmp)); + + if (!ret) { + printf("represent integer failed for the alternate order number %d and for " + "a target of " + "size %d for a u of size %d with length = " + "%u \n", + index_alternate_order, + ibz_bitsize(&tmp), + ibz_bitsize(u), + length); + goto cleanup; + } + quat_lideal_create(lideal, &theta, u, &order_hnf.order, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&order_hnf.z); + quat_alg_elem_finalize(&order_hnf.t); + quat_lattice_finalize(&order_hnf.order); + +#ifndef NDEBUG + ibz_t test_norm, test_denom; + ibz_init(&test_denom); + ibz_init(&test_norm); + quat_alg_norm(&test_norm, &test_denom, &theta, &QUATALG_PINFTY); + assert(ibz_is_one(&test_denom)); + assert(ibz_cmp(&test_norm, &tmp) == 0); + assert(!ibz_is_even(&tmp)); + assert(quat_lattice_contains(NULL, &EXTREMAL_ORDERS[index_alternate_order].order, &theta)); + ibz_finalize(&test_norm); + ibz_finalize(&test_denom); +#endif + + ec_basis_t B0_two; + // copying the basis + copy_basis(&B0_two, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].basis_even); + assert(test_basis_order_twof(&B0_two, &E0, TORSION_EVEN_POWER)); + ec_dbl_iter_basis(&B0_two, TORSION_EVEN_POWER - length - HD_extra_torsion, &B0_two, &E0); + + assert(test_basis_order_twof(&B0_two, &E0, length + HD_extra_torsion)); + + // now we set-up the kernel + theta_couple_point_t T1; + theta_couple_point_t T2, T1m2; + + copy_point(&T1.P1, &B0_two.P); + copy_point(&T2.P1, &B0_two.Q); + copy_point(&T1m2.P1, &B0_two.PmQ); + + // multiplication of theta by (u)^-1 mod 2^(length+2) + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_copy(&tmp, u); + ibz_invmod(&tmp, &tmp, &two_pow); + assert(!ibz_is_even(&tmp)); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta to the basis + ec_basis_t B0_two_theta; + copy_basis(&B0_two_theta, &B0_two); + endomorphism_application_even_basis(&B0_two_theta, index_alternate_order, &E0, &theta, length + HD_extra_torsion); + + // Ensure the basis we're using has the expected order + assert(test_basis_order_twof(&B0_two_theta, &E0, length + HD_extra_torsion)); + + // Set-up the domain E0 x E0 + theta_couple_curve_t E00; + E00.E1 = E0; + E00.E2 = E0; + + // Set-up the kernel from the bases + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &B0_two, &B0_two_theta); + + ret = theta_chain_compute_and_eval(length, &E00, &dim_two_ker, true, E34, P12, numP); + if (!ret) + goto cleanup; + + assert(length); + ret = (int)length; + +cleanup: + // var finalize + ibz_finalize(&two_pow); + ibz_finalize(&tmp); + quat_alg_elem_finalize(&theta); + + return ret; +} + +int +fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + return _fixed_degree_isogeny_impl(lideal, u, small, E34, P12, numP, index_alternate_order); +} + +// takes the output of LLL and apply some small treatment on the basis +// reordering vectors and switching some signs if needed to make it in a nicer +// shape +static void +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +{ + // if the left order is the special one, then we apply some additional post + // treatment + if (is_special_order) { + // reordering the basis if needed + if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + } + ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); + ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); + ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); + ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + // in this case it seems that we need to swap the second and third + // element, and then recompute entirely the second element from the first + // first we swap the second and third element + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } + + // adjusting the sign if needed + if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); + ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); + ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + } + } + if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); + ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); + ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + } + // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + } + } +} + +// enumerate all vectors in an hypercube of norm m for the infinity norm +// with respect to a basis whose gram matrix is given by gram +// Returns an int `count`, the number of vectors found with the desired +// properties +static int +enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t *gram, const ibz_t *adjusted_norm) +{ + + ibz_t remain, norm; + ibz_vec_4_t point; + + ibz_init(&remain); + ibz_init(&norm); + ibz_vec_4_init(&point); + + assert(m > 0); + + int count = 0; + int dim = 2 * m + 1; + int dim2 = dim * dim; + int dim3 = dim2 * dim; + + // if the basis is of the form alpha, i*alpha, beta, i*beta + // we can remove some values due to symmetry of the basis that + bool need_remove_symmetry = + (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + + int check1, check2, check3; + + // Enumerate over points in a hypercube with coordinates (x, y, z, w) + for (int x = -m; x <= 0; x++) { // We only check non-positive x-values + for (int y = -m; y < m + 1; y++) { + // Once x = 0 we only consider non-positive y values + if (x == 0 && y > 0) { + break; + } + for (int z = -m; z < m + 1; z++) { + // If x and y are both zero, we only consider non-positive z values + if (x == 0 && y == 0 && z > 0) { + break; + } + for (int w = -m; w < m + 1; w++) { + // If x, y, z are all zero, we only consider negative w values + if (x == 0 && y == 0 && z == 0 && w >= 0) { + break; + } + + // Now for each candidate (x, y, z, w) we need to check a number of + // conditions We have already filtered for symmetry with several break + // statements, but there are more checks. + + // 1. We do not allow all (x, y, z, w) to be multiples of 2 + // 2. We do not allow all (x, y, z, w) to be multiples of 3 + // 3. We do not want elements of the same norm, so we quotient out the + // action + // of a group of order four generated by i for a basis expected to + // be of the form: [gamma, i gamma, beta, i beta ]. + + // Ensure that not all values are even + if (!((x | y | z | w) & 1)) { + continue; + } + // Ensure that not all values are multiples of three + if (x % 3 == 0 && y % 3 == 0 && z % 3 == 0 && w % 3 == 0) { + continue; + } + + check1 = (m + w) + dim * (m + z) + dim2 * (m + y) + dim3 * (m + x); + check2 = (m - z) + dim * (m + w) + dim2 * (m - x) + dim3 * (m + y); + check3 = (m + z) + dim * (m - w) + dim2 * (m + x) + dim3 * (m - y); + + // either the basis does not have symmetry and we are good, + // or there is a special symmetry that we can exploit + // and we ensure that we don't record the same norm in the list + if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { + // Set the point as a vector (x, y, z, w) + ibz_set(&point[0], x); + ibz_set(&point[1], y); + ibz_set(&point[2], z); + ibz_set(&point[3], w); + + // Evaluate this through the gram matrix and divide out by the + // adjusted_norm + quat_qf_eval(&norm, gram, &point); + ibz_div(&norm, &remain, &norm, adjusted_norm); + assert(ibz_is_zero(&remain)); + + if (ibz_mod_ui(&norm, 2) == 1) { + ibz_set(&vecs[count][0], x); + ibz_set(&vecs[count][1], y); + ibz_set(&vecs[count][2], z); + ibz_set(&vecs[count][3], w); + ibz_copy(&norms[count], &norm); + count++; + } + } + } + } + } + } + + ibz_finalize(&remain); + ibz_finalize(&norm); + ibz_vec_4_finalize(&point); + + return count - 1; +} + +// enumerate through the two list given in input to find to integer d1,d2 such +// that there exists u,v with u d1 + v d2 = target the bool is diagonal +// indicates if the two lists are the same +static int +find_uv_from_lists(ibz_t *au, + ibz_t *bu, + ibz_t *av, + ibz_t *bv, + ibz_t *u, + ibz_t *v, + int *index_sol1, + int *index_sol2, + const ibz_t *target, + const ibz_t *small_norms1, + const ibz_t *small_norms2, + const ibz_t *quotients, + const int index1, + const int index2, + const int is_diagonal, + const int number_sum_square) +{ + + ibz_t n, remain, adjusted_norm; + ibz_init(&n); + ibz_init(&remain); + ibz_init(&adjusted_norm); + + int found = 0; + int cmp; + ibz_copy(&n, target); + + // enumerating through the list + for (int i1 = 0; i1 < index1; i1++) { + ibz_mod(&adjusted_norm, &n, &small_norms1[i1]); + int starting_index2; + if (is_diagonal) { + starting_index2 = i1; + } else { + starting_index2 = 0; + } + for (int i2 = starting_index2; i2 < index2; i2++) { + // u = target / d1 mod d2 + if (!ibz_invmod(&remain, &small_norms2[i2], &small_norms1[i1])) { + continue; + } + ibz_mul(v, &remain, &adjusted_norm); + ibz_mod(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + while (!found && cmp < 0) { + if (number_sum_square > 0) { + found = ibz_cornacchia_prime(av, bv, &ibz_const_one, v); + } else if (number_sum_square == 0) { + found = 1; + } + if (found) { + ibz_mul(&remain, v, &small_norms2[i2]); + ibz_copy(au, &n); + ibz_sub(u, au, &remain); + assert(ibz_cmp(u, &ibz_const_zero) > 0); + ibz_div(u, &remain, u, &small_norms1[i1]); + assert(ibz_is_zero(&remain)); + // we want to remove weird cases where u,v have big power of two + found = found && (ibz_get(u) != 0 && ibz_get(v) != 0); + if (number_sum_square == 2) { + found = ibz_cornacchia_prime(au, bu, &ibz_const_one, u); + } + } + if (!found) { + ibz_add(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + } + } + + if (found) { + // copying the indices + *index_sol1 = i1; + *index_sol2 = i2; + break; + } + } + if (found) { + break; + } + } + + ibz_finalize(&n); + ibz_finalize(&remain); + ibz_finalize(&adjusted_norm); + + return found; +} + +struct vec_and_norm +{ + ibz_vec_4_t vec; + ibz_t norm; + int idx; +}; + +static int +compare_vec_by_norm(const void *_first, const void *_second) +{ + const struct vec_and_norm *first = _first, *second = _second; + int res = ibz_cmp(&first->norm, &second->norm); + if (res != 0) + return res; + else + return first->idx - second->idx; +} + +// use several special curves +// we assume that the first one is always j=1728 +int +find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order) + +{ + + // variable declaration & init + ibz_vec_4_t vec; + ibz_t n; + ibz_t au, bu, av, bv; + ibz_t norm_d; + ibz_t remain; + ibz_init(&au); + ibz_init(&bu); + ibz_init(&av); + ibz_init(&bv); + ibz_init(&norm_d); + ibz_init(&n); + ibz_vec_4_init(&vec); + ibz_init(&remain); + + ibz_copy(&n, target); + + ibz_t adjusted_norm[num_alternate_order + 1]; + ibz_mat_4x4_t gram[num_alternate_order + 1], reduced[num_alternate_order + 1]; + quat_left_ideal_t ideal[num_alternate_order + 1]; + + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_init(&adjusted_norm[i]); + ibz_mat_4x4_init(&gram[i]); + ibz_mat_4x4_init(&reduced[i]); + quat_left_ideal_init(&ideal[i]); + } + + // first we reduce the ideal given in input + quat_lideal_copy(&ideal[0], lideal); + quat_lideal_reduce_basis(&reduced[0], &gram[0], &ideal[0], Bpoo); + + ibz_mat_4x4_copy(&ideal[0].lattice.basis, &reduced[0]); + ibz_set(&adjusted_norm[0], 1); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + + // for efficient lattice reduction, we replace ideal[0] by the equivalent + // ideal of smallest norm + quat_left_ideal_t reduced_id; + quat_left_ideal_init(&reduced_id); + quat_lideal_copy(&reduced_id, &ideal[0]); + quat_alg_elem_t delta; + // delta will be the element of smallest norm + quat_alg_elem_init(&delta); + ibz_set(&delta.coord[0], 1); + ibz_set(&delta.coord[1], 0); + ibz_set(&delta.coord[2], 0); + ibz_set(&delta.coord[3], 0); + ibz_copy(&delta.denom, &reduced_id.lattice.denom); + ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); + assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); + + // reduced_id = ideal[0] * \overline{delta}/n(ideal[0]) + quat_alg_conj(&delta, &delta); + ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); + quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); + ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + + // and conj_ideal is the conjugate of reduced_id + // init the right order; + quat_lattice_t right_order; + quat_lattice_init(&right_order); + // computing the conjugate + quat_left_ideal_t conj_ideal; + quat_left_ideal_init(&conj_ideal); + quat_lideal_conjugate_without_hnf(&conj_ideal, &right_order, &reduced_id, Bpoo); + + // computing all the other connecting ideals and reducing them + for (int i = 1; i < num_alternate_order + 1; i++) { + quat_lideal_lideal_mul_reduced(&ideal[i], &gram[i], &conj_ideal, &ALTERNATE_CONNECTING_IDEALS[i - 1], Bpoo); + ibz_mat_4x4_copy(&reduced[i], &ideal[i].lattice.basis); + ibz_set(&adjusted_norm[i], 1); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + } + + // enumerating small vectors + + // global parameters for the enumeration + int m = FINDUV_box_size; + int m4 = FINDUV_cube_size; + + ibz_vec_4_t small_vecs[num_alternate_order + 1][m4]; + ibz_t small_norms[num_alternate_order + 1][m4]; + ibz_vec_4_t alternate_small_vecs[num_alternate_order + 1][m4]; + ibz_t alternate_small_norms[num_alternate_order + 1][m4]; + ibz_t quotients[num_alternate_order + 1][m4]; + int indices[num_alternate_order + 1]; + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_init(&small_norms[j][i]); + ibz_vec_4_init(&small_vecs[j][i]); + ibz_init(&alternate_small_norms[j][i]); + ibz_init("ients[j][i]); + ibz_vec_4_init(&alternate_small_vecs[j][i]); + } + // enumeration in the hypercube of norm m + indices[j] = enumerate_hypercube(small_vecs[j], small_norms[j], m, &gram[j], &adjusted_norm[j]); + + // sorting the list + { + struct vec_and_norm small_vecs_and_norms[indices[j]]; + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs_and_norms[i].vec, &small_vecs[j][i], sizeof(ibz_vec_4_t)); + memcpy(&small_vecs_and_norms[i].norm, &small_norms[j][i], sizeof(ibz_t)); + small_vecs_and_norms[i].idx = i; + } + qsort(small_vecs_and_norms, indices[j], sizeof(*small_vecs_and_norms), compare_vec_by_norm); + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs[j][i], &small_vecs_and_norms[i].vec, sizeof(ibz_vec_4_t)); + memcpy(&small_norms[j][i], &small_vecs_and_norms[i].norm, sizeof(ibz_t)); + } +#ifndef NDEBUG + for (int i = 1; i < indices[j]; ++i) + assert(ibz_cmp(&small_norms[j][i - 1], &small_norms[j][i]) <= 0); +#endif + } + + for (int i = 0; i < indices[j]; i++) { + ibz_div("ients[j][i], &remain, &n, &small_norms[j][i]); + } + } + + int found = 0; + int i1; + int i2; + for (int j1 = 0; j1 < num_alternate_order + 1; j1++) { + for (int j2 = j1; j2 < num_alternate_order + 1; j2++) { + // in this case, there are some small adjustements to make + int is_diago = (j1 == j2); + found = find_uv_from_lists(&au, + &bu, + &av, + &bv, + u, + v, + &i1, + &i2, + target, + small_norms[j1], + small_norms[j2], + quotients[j2], + indices[j1], + indices[j2], + is_diago, + 0); + // } + + if (found) { + // recording the solutions that we found + ibz_copy(&beta1->denom, &ideal[j1].lattice.denom); + ibz_copy(&beta2->denom, &ideal[j2].lattice.denom); + ibz_copy(d1, &small_norms[j1][i1]); + ibz_copy(d2, &small_norms[j2][i2]); + ibz_mat_4x4_eval(&beta1->coord, &reduced[j1], &small_vecs[j1][i1]); + ibz_mat_4x4_eval(&beta2->coord, &reduced[j2], &small_vecs[j2][i2]); + assert(quat_lattice_contains(NULL, &ideal[j1].lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal[j2].lattice, beta2)); + if (j1 != 0 || j2 != 0) { + ibz_div(&delta.denom, &remain, &delta.denom, &lideal->norm); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + ibz_mul(&delta.denom, &delta.denom, &conj_ideal.norm); + } + if (j1 != 0) { + // we send back beta1 to the original ideal + quat_alg_mul(beta1, &delta, beta1, Bpoo); + quat_alg_normalize(beta1); + } + if (j2 != 0) { + // we send back beta2 to the original ideal + quat_alg_mul(beta2, &delta, beta2, Bpoo); + quat_alg_normalize(beta2); + } + + // if the selected element belong to an alternate order, we conjugate it + if (j1 != 0) { + quat_alg_conj(beta1, beta1); + } + if (j2 != 0) { + quat_alg_conj(beta2, beta2); + } + +#ifndef NDEBUG + quat_alg_norm(&remain, &norm_d, beta1, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d1, &ideal->norm); + if (j1 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j1 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + quat_alg_norm(&remain, &norm_d, beta2, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d2, &ideal->norm); + if (j2 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j2 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta2)); + + quat_left_ideal_t ideal_test; + quat_lattice_t ro; + quat_left_ideal_init(&ideal_test); + quat_lattice_init(&ro); + if (j1 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j1 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta1)); + } + if (j2 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j2 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta2)); + } + + quat_lattice_finalize(&ro); + quat_left_ideal_finalize(&ideal_test); +#endif + + *index_alternate_order_1 = j1; + *index_alternate_order_2 = j2; + break; + } + } + if (found) { + break; + } + } + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_finalize(&small_norms[j][i]); + ibz_vec_4_finalize(&small_vecs[j][i]); + ibz_finalize(&alternate_small_norms[j][i]); + ibz_finalize("ients[j][i]); + ibz_vec_4_finalize(&alternate_small_vecs[j][i]); + } + } + + // var finalize + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_mat_4x4_finalize(&gram[i]); + ibz_mat_4x4_finalize(&reduced[i]); + quat_left_ideal_finalize(&ideal[i]); + ibz_finalize(&adjusted_norm[i]); + } + + ibz_finalize(&n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&au); + ibz_finalize(&bu); + ibz_finalize(&av); + ibz_finalize(&bv); + ibz_finalize(&remain); + ibz_finalize(&norm_d); + quat_lattice_finalize(&right_order); + quat_left_ideal_finalize(&conj_ideal); + quat_left_ideal_finalize(&reduced_id); + quat_alg_elem_finalize(&delta); + + return found; +} + +int +dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo) +{ + ibz_t target, tmp, two_pow; + ; + quat_alg_elem_t theta; + + ibz_t norm_d; + ibz_init(&norm_d); + ibz_t test1, test2; + ibz_init(&test1); + ibz_init(&test2); + + ibz_init(&target); + ibz_init(&tmp); + ibz_init(&two_pow); + int exp = TORSION_EVEN_POWER; + quat_alg_elem_init(&theta); + + // first, we find u,v,d1,d2,beta1,beta2 + // such that u*d1 + v*d2 = 2^TORSION_EVEN_POWER and there are ideals of + // norm d1,d2 equivalent to ideal beta1 and beta2 are elements of norm nd1, + // nd2 where n=n(lideal) + int ret; + int index_order1 = 0, index_order2 = 0; +#ifndef NDEBUG + unsigned int Fu_length, Fv_length; +#endif + ret = find_uv(u, + v, + beta1, + beta2, + d1, + d2, + &index_order1, + &index_order2, + &TORSION_PLUS_2POWER, + lideal, + Bpoo, + NUM_ALTERNATE_EXTREMAL_ORDERS); + if (!ret) { + goto cleanup; + } + + assert(ibz_is_odd(d1) && ibz_is_odd(d2)); + // compute the valuation of the GCD of u,v + ibz_gcd(&tmp, u, v); + assert(ibz_cmp(&tmp, &ibz_const_zero) != 0); + int exp_gcd = ibz_two_adic(&tmp); + exp = TORSION_EVEN_POWER - exp_gcd; + // removing the power of 2 from u and v + ibz_div(u, &test1, u, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + ibz_div(v, &test1, v, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + +#ifndef NDEBUG + // checking that ud1+vd2 = 2^exp + ibz_t pow_check, tmp_check; + ibz_init(&pow_check); + ibz_init(&tmp_check); + ibz_pow(&pow_check, &ibz_const_two, exp); + ibz_mul(&tmp_check, d1, u); + ibz_sub(&pow_check, &pow_check, &tmp_check); + ibz_mul(&tmp_check, v, d2); + ibz_sub(&pow_check, &pow_check, &tmp_check); + assert(ibz_cmp(&pow_check, &ibz_const_zero) == 0); + ibz_finalize(&tmp_check); + ibz_finalize(&pow_check); +#endif + + // now we compute the dimension 2 isogeny + // F : Eu x Ev -> E x E' + // where we have phi_u : Eu -> E_index_order1 and phi_v : Ev -> E_index_order2 + // if we have phi1 : E_index_order_1 -> E of degree d1 + // and phi2 : E_index_order_2 -> E of degree d2 + // we can define theta = phi2 o hat{phi1} + // and the kernel of F is given by + // ( [ud1](P), phiv o theta o hat{phiu} (P)),( [ud1](Q), phiv o theta o + // hat{phiu} (Q)) where P,Q is a basis of E0[2e] + + // now we set-up the kernel + // ec_curve_t E0 = CURVE_E0; + ec_curve_t E1; + copy_curve(&E1, &CURVES_WITH_ENDOMORPHISMS[index_order1].curve); + ec_curve_t E2; + copy_curve(&E2, &CURVES_WITH_ENDOMORPHISMS[index_order2].curve); + ec_basis_t bas1, bas2; + theta_couple_curve_t E01; + theta_kernel_couple_points_t ker; + + ec_basis_t bas_u; + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + + // we start by computing theta = beta2 \hat{beta1}/n + ibz_set(&theta.denom, 1); + quat_alg_conj(&theta, beta1); + quat_alg_mul(&theta, beta2, &theta, &QUATALG_PINFTY); + ibz_mul(&theta.denom, &theta.denom, &lideal->norm); + + // now we perform the actual computation + quat_left_ideal_t idealu, idealv; + quat_left_ideal_init(&idealu); + quat_left_ideal_init(&idealv); + theta_couple_curve_t Fu_codomain, Fv_codomain; + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const V1 = pushed_points + 0, *const V2 = pushed_points + 1, *const V1m2 = pushed_points + 2; + theta_couple_point_t P, Q, PmQ; + + copy_point(&P.P1, &bas1.P); + copy_point(&PmQ.P1, &bas1.PmQ); + copy_point(&Q.P1, &bas1.Q); + // Set points to zero + ec_point_init(&P.P2); + ec_point_init(&Q.P2); + ec_point_init(&PmQ.P2); + + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + // we perform the computation of phiu with a fixed degree isogeny + ret = fixed_degree_isogeny_and_eval( + &idealu, u, true, &Fu_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order1); + + if (!ret) { + goto cleanup; + } + assert(test_point_order_twof(&V1->P1, &Fu_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fu_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fu_length = (unsigned int)ret; + // presumably the correct curve is the first one, we check this + fp2_t w0a, w1a, w2a; + ec_curve_t E1_tmp, Fu_codomain_E1_tmp, Fu_codomain_E2_tmp; + copy_curve(&E1_tmp, &E1); + copy_curve(&Fu_codomain_E1_tmp, &Fu_codomain.E1); + copy_curve(&Fu_codomain_E2_tmp, &Fu_codomain.E2); + weil(&w0a, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fu_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fu_codomain_E2_tmp); + ibz_pow(&two_pow, &ibz_const_two, Fu_length); + ibz_sub(&two_pow, &two_pow, u); + + // now we are checking that the weil pairings are equal to the correct value + digit_t digit_u[NWORDS_ORDER] = { 0 }; + ibz_to_digit_array(digit_u, u); + fp2_t test_powa; + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); +#endif + + // copying the basis images + copy_point(&bas_u.P, &V1->P1); + copy_point(&bas_u.Q, &V2->P1); + copy_point(&bas_u.PmQ, &V1m2->P1); + + // copying the points to the first part of the kernel + copy_point(&ker.T1.P1, &bas_u.P); + copy_point(&ker.T2.P1, &bas_u.Q); + copy_point(&ker.T1m2.P1, &bas_u.PmQ); + copy_curve(&E01.E1, &Fu_codomain.E1); + + copy_point(&P.P1, &bas2.P); + copy_point(&PmQ.P1, &bas2.PmQ); + copy_point(&Q.P1, &bas2.Q); + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + + // computation of phiv + ret = fixed_degree_isogeny_and_eval( + &idealv, v, true, &Fv_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order2); + if (!ret) { + goto cleanup; + } + + assert(test_point_order_twof(&V1->P1, &Fv_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fv_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fv_length = (unsigned int)ret; + ec_curve_t E2_tmp, Fv_codomain_E1_tmp, Fv_codomain_E2_tmp; + copy_curve(&E2_tmp, &E2); + copy_curve(&Fv_codomain_E1_tmp, &Fv_codomain.E1); + copy_curve(&Fv_codomain_E2_tmp, &Fv_codomain.E2); + // presumably the correct curve is the first one, we check this + weil(&w0a, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fv_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fv_codomain_E2_tmp); + if (Fv_length == 0) { + ibz_set(&tmp, 1); + ibz_set(&two_pow, 1); + } else { + ibz_pow(&two_pow, &ibz_const_two, Fv_length); + ibz_sub(&two_pow, &two_pow, v); + } + + // now we are checking that one of the two is equal to the correct value + ibz_to_digit_array(digit_u, v); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); + +#endif + + copy_point(&bas2.P, &V1->P1); + copy_point(&bas2.Q, &V2->P1); + copy_point(&bas2.PmQ, &V1m2->P1); + + // multiplying theta by 1 / (d1 * n(connecting_ideal2)) + ibz_pow(&two_pow, &ibz_const_two, TORSION_EVEN_POWER); + ibz_copy(&tmp, d1); + if (index_order2 > 0) { + ibz_mul(&tmp, &tmp, &ALTERNATE_CONNECTING_IDEALS[index_order2 - 1].norm); + } + ibz_invmod(&tmp, &tmp, &two_pow); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta + endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); + + assert(test_basis_order_twof(&bas2, &Fv_codomain.E1, TORSION_EVEN_POWER)); + + // copying points to the second part of the kernel + copy_point(&ker.T1.P2, &bas2.P); + copy_point(&ker.T2.P2, &bas2.Q); + copy_point(&ker.T1m2.P2, &bas2.PmQ); + copy_curve(&E01.E2, &Fv_codomain.E1); + + // copying the points to the first part of the kernel + quat_left_ideal_finalize(&idealu); + quat_left_ideal_finalize(&idealv); + + double_couple_point_iter(&ker.T1, TORSION_EVEN_POWER - exp, &ker.T1, &E01); + double_couple_point_iter(&ker.T2, TORSION_EVEN_POWER - exp, &ker.T2, &E01); + double_couple_point_iter(&ker.T1m2, TORSION_EVEN_POWER - exp, &ker.T1m2, &E01); + + assert(test_point_order_twof(&ker.T1.P1, &E01.E1, exp)); + assert(test_point_order_twof(&ker.T1m2.P2, &E01.E2, exp)); + + assert(ibz_is_odd(u)); + + // now we evaluate the basis points through the isogeny + assert(test_basis_order_twof(&bas_u, &E01.E1, TORSION_EVEN_POWER)); + + // evaluating the basis through the isogeny of degree u*d1 + copy_point(&pushed_points[0].P1, &bas_u.P); + copy_point(&pushed_points[2].P1, &bas_u.PmQ); + copy_point(&pushed_points[1].P1, &bas_u.Q); + // Set points to zero + ec_point_init(&pushed_points[0].P2); + ec_point_init(&pushed_points[1].P2); + ec_point_init(&pushed_points[2].P2); + + theta_couple_curve_t theta_codomain; + + ret = theta_chain_compute_and_eval_randomized( + exp, &E01, &ker, false, &theta_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points)); + if (!ret) { + goto cleanup; + } + + theta_couple_point_t T1, T2, T1m2; + T1 = pushed_points[0]; + T2 = pushed_points[1]; + T1m2 = pushed_points[2]; + + assert(test_point_order_twof(&T1.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1.P1, &theta_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1m2.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + + copy_point(&basis->P, &T1.P1); + copy_point(&basis->Q, &T2.P1); + copy_point(&basis->PmQ, &T1m2.P1); + copy_curve(codomain, &theta_codomain.E1); + + // using weil pairing to verify that we selected the correct curve + fp2_t w0, w1; + // ec_curve_t E0 = CURVE_E0; + // ec_basis_t bas0 = BASIS_EVEN; + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, codomain); + + digit_t digit_d[NWORDS_ORDER] = { 0 }; + ibz_mul(&tmp, d1, u); + ibz_mul(&tmp, &tmp, u); + ibz_mod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_to_digit_array(digit_d, &tmp); + fp2_t test_pow; + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + + // then we have selected the wrong one + if (!fp2_is_equal(&w1, &test_pow)) { + copy_point(&basis->P, &T1.P2); + copy_point(&basis->Q, &T2.P2); + copy_point(&basis->PmQ, &T1m2.P2); + copy_curve(codomain, &theta_codomain.E2); + +// verifying that the other one is the good one +#ifndef NDEBUG + ec_curve_t codomain_tmp; + copy_curve(&codomain_tmp, codomain); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1)); +#endif + } + + // now we apply M / (u * d1) where M is the matrix corresponding to the + // endomorphism beta1 = phi o dual(phi1) we multiply beta1 by the inverse of + // (u*d1) mod 2^TORSION_EVEN_POWER + ibz_mul(&tmp, u, d1); + if (index_order1 != 0) { + ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); + } + ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); + ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); + ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); + ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + + endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + ec_curve_t E0 = CURVE_E0; + ec_curve_t codomain_tmp; + ec_basis_t bas0 = CURVES_WITH_ENDOMORPHISMS[0].basis_even; + copy_curve(&codomain_tmp, codomain); + copy_curve(&E1_tmp, &E1); + copy_curve(&E2_tmp, &E2); + weil(&w0a, TORSION_EVEN_POWER, &bas0.P, &bas0.Q, &bas0.PmQ, &E0); + weil(&w1a, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + digit_t tmp_d[2 * NWORDS_ORDER] = { 0 }; + if (index_order1 != 0) { + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order1].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + if (index_order2 != 0) { + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order2].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + ibz_to_digit_array(tmp_d, &lideal->norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1a)); + } +#endif + +cleanup: + ibz_finalize(&norm_d); + ibz_finalize(&test1); + ibz_finalize(&test2); + ibz_finalize(&target); + ibz_finalize(&tmp); + ibz_finalize(&two_pow); + quat_alg_elem_finalize(&theta); + return ret; +} + +int +dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal) +{ + int ret; + + quat_alg_elem_t beta1, beta2; + ibz_t u, v, d1, d2; + + quat_alg_elem_init(&beta1); + quat_alg_elem_init(&beta2); + + ibz_init(&u); + ibz_init(&v); + ibz_init(&d1); + ibz_init(&d2); + + ret = dim2id2iso_ideal_to_isogeny_clapotis( + &beta1, &beta2, &u, &v, &d1, &d2, codomain, basis, lideal, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&beta1); + quat_alg_elem_finalize(&beta2); + + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&d1); + ibz_finalize(&d2); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim4.c new file mode 100644 index 0000000000..495dc2dcb2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim4.c @@ -0,0 +1,470 @@ +#include +#include "internal.h" + +// internal helper functions +void +ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b) +{ + ibz_mat_4x4_t mat; + ibz_t prod; + ibz_init(&prod); + ibz_mat_4x4_init(&mat); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(mat[i][j]), 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); + ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + } + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*res)[i][j]), &(mat[i][j])); + } + } + ibz_mat_4x4_finalize(&mat); + ibz_finalize(&prod); +} + +// helper functions for lattices +void +ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&((*vec)[0]), coord0); + ibz_set(&((*vec)[1]), coord1); + ibz_set(&((*vec)[2]), coord2); + ibz_set(&((*vec)[3]), coord3); +} + +void +ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_copy(&((*new)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) +{ + ibz_copy(&((*res)[0]), coord0); + ibz_copy(&((*res)[1]), coord1); + ibz_copy(&((*res)[2]), coord2); + ibz_copy(&((*res)[3]), coord3); +} + +void +ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) +{ + ibz_gcd(content, &((*v)[0]), &((*v)[1])); + ibz_gcd(content, &((*v)[2]), content); + ibz_gcd(content, &((*v)[3]), content); +} + +void +ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_neg(&((*neg)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +void +ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +int +ibz_vec_4_is_zero(const ibz_vec_4_t *x) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + res &= ibz_is_zero(&((*x)[i])); + } + return (res); +} + +void +ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b) +{ + ibz_t prod; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + } +} + +int +ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + res = res && ibz_is_zero(&r); + } + ibz_finalize(&r); + return (res); +} + +void +ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) +{ + ibz_mat_4x4_t work; + ibz_mat_4x4_init(&work); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(work[i][j]), &((*mat)[j][i])); + } + } + ibz_mat_4x4_copy(transposed, &work); + ibz_mat_4x4_finalize(&work); +} + +void +ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*zero)[i][j]), 0); + } + } +} + +void +ibz_mat_4x4_identity(ibz_mat_4x4_t *id) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*id)[i][j]), 0); + } + ibz_set(&((*id)[i][i]), 1); + } +} + +int +ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + } + } + return (res); +} + +int +ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) +{ + int res = 0; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + } + } + return (!res); +} + +void +ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + } + } +} + +void +ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) +{ + ibz_t d; + ibz_init(&d); + ibz_copy(&d, &((*mat)[0][0])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_gcd(&d, &d, &((*mat)[i][j])); + } + } + ibz_copy(gcd, &d); + ibz_finalize(&d); +} + +int +ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + res = res && ibz_is_zero(&r); + } + } + ibz_finalize(&r); + return (res); +} + +// 4x4 inversion helper functions +void +ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, a1, a2); + ibz_mul(&prod, b1, b2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_add(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +void +ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, b1, b2); + ibz_mul(&prod, a1, a2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_sub(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +// Method from https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf 3rd of May +// 2023, 16h15 CEST +int +ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat) +{ + ibz_t prod, work_det; + ibz_mat_4x4_t work; + ibz_t s[6]; + ibz_t c[6]; + for (int i = 0; i < 6; i++) { + ibz_init(&(s[i])); + ibz_init(&(c[i])); + } + ibz_mat_4x4_init(&work); + ibz_init(&prod); + ibz_init(&work_det); + + // compute some 2x2 minors, store them in s and c + for (int i = 0; i < 3; i++) { + ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + } + for (int i = 0; i < 2; i++) { + ibz_mat_2x2_det_from_ibz( + &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + ibz_mat_2x2_det_from_ibz( + &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + } + ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + + // compute det + ibz_set(&work_det, 0); + for (int i = 0; i < 6; i++) { + ibz_mul(&prod, &(s[i]), &(c[5 - i])); + if ((i != 1) && (i != 4)) { + ibz_add(&work_det, &work_det, &prod); + } else { + ibz_sub(&work_det, &work_det, &prod); + } + } + // compute transposed adjugate + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 2; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } + } + for (int k = 2; k < 4; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } + } + } + if (inv != NULL) { + // put transposed adjugate in result, or 0 if no inverse + ibz_set(&prod, !ibz_is_zero(&work_det)); + ibz_mat_4x4_scalar_mul(inv, &prod, &work); + } + // output det + if (det != NULL) + ibz_copy(det, &work_det); + for (int i = 0; i < 6; i++) { + ibz_finalize(&s[i]); + ibz_finalize(&c[i]); + } + ibz_mat_4x4_finalize(&work); + ibz_finalize(&work_det); + ibz_finalize(&prod); + return (!ibz_is_zero(det)); +} + +// matrix evaluation + +void +ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +// quadratic forms + +void +quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + ibz_mat_4x4_eval(&sum, qf, coord); + for (int i = 0; i < 4; i++) { + ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + if (i > 0) { + ibz_add(&(sum[0]), &(sum[0]), &prod); + } else { + ibz_copy(&sum[0], &prod); + } + } + ibz_copy(res, &sum[0]); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dpe.h new file mode 100644 index 0000000000..b9a7a35e0b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dpe.h @@ -0,0 +1,743 @@ +/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. + +This file is part of the DPE Library. + +The DPE Library is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 3 of the License, or (at your +option) any later version. + +The DPE Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with the DPE Library; see the file COPYING.LIB. +If not, see . */ + +#ifndef __DPE +#define __DPE + +#include /* For abort */ +#include /* For fprintf */ +#include /* for round, floor, ceil */ +#include + +/* if you change the version, please change it in Makefile too */ +#define DPE_VERSION_MAJOR 1 +#define DPE_VERSION_MINOR 7 + +#if defined(__GNUC__) && (__GNUC__ >= 3) +# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) +# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) +# define DPE_UNUSED_ATTR __attribute__((unused)) +#else +# define DPE_LIKELY(x) (x) +# define DPE_UNLIKELY(x) (x) +# define DPE_UNUSED_ATTR +#endif + +/* If no user defined mode, define it to double */ +#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) +# define DPE_USE_DOUBLE +#endif + +#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) +# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." +#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#endif + +#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) +# define DPE_LITTLEENDIAN32 +#endif + +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) +# define DPE_DEFINE_ROUND_TRUNC +#endif + +#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 +# define DPE_ISFINITE __builtin_isfinite +#elif defined(isfinite) +# define DPE_ISFINITE isfinite /* new C99 function */ +#else +# define DPE_ISFINITE finite /* obsolete BSD function */ +#endif + +/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ +/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with + 1/2 <= m < 1 */ +/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ +#if defined(DPE_USE_DOUBLE) +# define DPE_DOUBLE double /* mantissa type */ +# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ +# define DPE_2_POW_BITSIZE 0x1P53 +# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 +# define DPE_LDEXP __builtin_ldexp +# define DPE_FREXP __builtin_frexp +# define DPE_FLOOR __builtin_floor +# define DPE_CEIL __builtin_ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND __builtin_round +# define DPE_TRUNC __builtin_trunc +# endif +# else +# define DPE_LDEXP ldexp +# define DPE_FREXP frexp +# define DPE_FLOOR floor +# define DPE_CEIL ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND round +# define DPE_TRUNC trunc +# endif +# endif + +#elif defined(DPE_USE_LONGDOUBLE) +# define DPE_DOUBLE long double +# define DPE_BITSIZE 64 +# define DPE_2_POW_BITSIZE 0x1P64 +# define DPE_LDEXP ldexpl +# define DPE_FREXP frexpl +# define DPE_FLOOR floorl +# define DPE_CEIL ceill +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundl +# define DPE_TRUNC truncl +# endif + +#elif defined(DPE_USE_FLOAT128) +# include "quadmath.h" +# define DPE_DOUBLE __float128 +# define DPE_BITSIZE 113 +# define DPE_2_POW_BITSIZE 0x1P113 +# define DPE_LDEXP ldexpq +# define DPE_FLOOR floorq +# define DPE_CEIL ceilq +# define DPE_FREXP frexpq +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundq +# define DPE_TRUNC truncq +# endif + +#else +# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" +#endif + +/* If no C99, do what we can */ +#ifndef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) +# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) +#endif + +#if defined(DPE_USE_LONG) +# define DPE_EXP_T long /* exponent type */ +# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ +#elif defined(DPE_USE_LONGLONG) +# define DPE_EXP_T long long +# define DPE_EXPMIN LLONG_MIN +#else +# define DPE_EXP_T int /* exponent type */ +# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ +#endif + +#ifdef DPE_LITTLEENDIAN32 +typedef union +{ + double d; +#if INT_MAX == 0x7FFFFFFFL + int i[2]; +#elif LONG_MAX == 0x7FFFFFFFL + long i[2]; +#elif SHRT_MAX == 0x7FFFFFFFL + short i[2]; +#else +# error Cannot find a 32 bits integer type. +#endif +} dpe_double_words; +#endif + +typedef struct +{ + DPE_DOUBLE d; /* significand */ + DPE_EXP_T exp; /* exponent */ +} dpe_struct; + +typedef dpe_struct dpe_t[1]; + +#define DPE_MANT(x) ((x)->d) +#define DPE_EXP(x) ((x)->exp) +#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) + +#define DPE_INLINE static inline + +/* initialize */ +DPE_INLINE void +dpe_init (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* clear */ +DPE_INLINE void +dpe_clear (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* set x to y */ +DPE_INLINE void +dpe_set (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to -y */ +DPE_INLINE void +dpe_neg (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to |y| */ +DPE_INLINE void +dpe_abs (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ +/* FIXME: don't inline this function yet ? */ +static void +dpe_normalize (dpe_t x) +{ + if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) + { + if (DPE_MANT(x) == 0.0) + DPE_EXP(x) = DPE_EXPMIN; + /* otherwise let the exponent of NaN, Inf unchanged */ + } + else + { + DPE_EXP_T e; +#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ + dpe_double_words dw; + dw.d = DPE_MANT(x); + e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ + DPE_EXP(x) += e - 1022; + dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; + DPE_MANT(x) = dw.d; +#else /* portable code */ + double m = DPE_MANT(x); + DPE_MANT(x) = DPE_FREXP (m, &e); + DPE_EXP(x) += e; +#endif + } +} + +#if defined(DPE_USE_DOUBLE) +static const double dpe_scale_tab[54] = { + 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, + 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, + 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, + 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, + 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, + 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, + 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; +#endif + +DPE_INLINE DPE_DOUBLE +dpe_scale (DPE_DOUBLE d, int s) +{ + /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ +#if defined(DPE_USE_DOUBLE) + return d * dpe_scale_tab [-s]; +#else /* portable code */ + return DPE_LDEXP (d, s); +#endif +} + +/* set x to y */ +DPE_INLINE void +dpe_set_d (dpe_t x, double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ld (dpe_t x, long double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ui (dpe_t x, unsigned long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_si (dpe_t x, long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +DPE_INLINE long +dpe_get_si (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (long) d; +} + +DPE_INLINE unsigned long +dpe_get_ui (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (d < 0.0) ? 0 : (unsigned long) d; +} + +DPE_INLINE double +dpe_get_d (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +DPE_INLINE long double +dpe_get_ld (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +#if defined(__GMP_H__) || defined(__MINI_GMP_H__) +/* set x to y */ +DPE_INLINE void +dpe_set_z (dpe_t x, mpz_t y) +{ + long e; + DPE_MANT(x) = mpz_get_d_2exp (&e, y); + DPE_EXP(x) = (DPE_EXP_T) e; +} + +/* set x to y, rounded to nearest */ +DPE_INLINE void +dpe_get_z (mpz_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey >= DPE_BITSIZE) /* y is an integer */ + { + DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ + mpz_set_d (x, d); /* should be exact */ + mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); + } + else /* DPE_EXP(y) < DPE_BITSIZE */ + { + if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ + mpz_set_ui (x, 0); + else + { + DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); + mpz_set_d (x, (double) DPE_ROUND(d)); + } + } +} + +/* return e and x such that y = x*2^e */ +DPE_INLINE mp_exp_t +dpe_get_z_exp (mpz_t x, dpe_t y) +{ + mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); + return DPE_EXP(y) - DPE_BITSIZE; +} +#endif + +/* x <- y + z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_add (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y+z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_set (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y - z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_sub (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y-z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_neg (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y * z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_mul (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- sqrt(y), assuming y is normalized, returns x normalized */ +DPE_INLINE void +dpe_sqrt (dpe_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey % 2) + { + /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ + DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); + DPE_EXP(x) = (ey + 1) / 2; + } + else + { + DPE_MANT(x) = sqrt (DPE_MANT(y)); + DPE_EXP(x) = ey / 2; + } +} + +/* x <- y / z, assuming y and z are normalized, returns x normalized. + Assumes z is not zero. */ +DPE_INLINE void +dpe_div (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- y * z, assuming y normalized, returns x normalized */ +DPE_INLINE void +dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ +DPE_INLINE void +dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y * 2^e */ +DPE_INLINE void +dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; +} + +/* x <- y / 2^e */ +DPE_INLINE void +dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; +} + +/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' + type has fewer bits than the significand in dpe_t) */ +DPE_INLINE DPE_EXP_T +dpe_get_si_exp (long *x, dpe_t y) +{ + if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ + { + *x = (long) (DPE_MANT(y) * 2147483648.0); + return DPE_EXP(y) - 31; + } + else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ + { + *x = (long) (DPE_MANT (y) * 9223372036854775808.0); + return DPE_EXP(y) - 63; + } + else + { + fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); + exit (1); + } +} + +static DPE_UNUSED_ATTR int dpe_str_prec = 16; +static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; + +static int +dpe_out_str (FILE *s, int base, dpe_t x) +{ + DPE_DOUBLE d = DPE_MANT(x); + DPE_EXP_T e2 = DPE_EXP(x); + int e10 = 0; + char sign = ' '; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } + if (d == 0.0) +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%1.*f", dpe_str_prec, d); +#else + return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); +#endif + if (d < 0) + { + d = -d; + sign = '-'; + } + if (e2 > 0) + { + while (e2 > 0) + { + e2 --; + d *= 2.0; + if (d >= 10.0) + { + d /= 10.0; + e10 ++; + } + } + } + else /* e2 <= 0 */ + { + while (e2 < 0) + { + e2 ++; + d /= 2.0; + if (d < 1.0) + { + d *= 10.0; + e10 --; + } + } + } +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); +#else + return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); +#endif +} + +static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; + +static size_t +dpe_inp_str (dpe_t x, FILE *s, int base) +{ + size_t res; + DPE_DOUBLE d; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } +#ifdef DPE_USE_DOUBLE + res = fscanf (s, "%lf", &d); +#elif defined(DPE_USE_LONGDOUBLE) + res = fscanf (s, "%Lf", &d); +#else + { + long double d_ld; + res = fscanf (s, "%Lf", &d_ld); + d = d_ld; + } +#endif + dpe_set_d (x, d); + return res; +} + +DPE_INLINE void +dpe_dump (dpe_t x) +{ + dpe_out_str (stdout, 10, x); + putchar ('\n'); +} + +DPE_INLINE int +dpe_zero_p (dpe_t x) +{ + return DPE_MANT (x) == 0; +} + +/* return a positive value if x > y + a negative value if x < y + and 0 otherwise (x=y). */ +DPE_INLINE int +dpe_cmp (dpe_t x, dpe_t y) +{ + int sx = DPE_SIGN(x); + int d = sx - DPE_SIGN(y); + + if (d != 0) + return d; + else if (DPE_EXP(x) > DPE_EXP(y)) + return (sx > 0) ? 1 : -1; + else if (DPE_EXP(y) > DPE_EXP(x)) + return (sx > 0) ? -1 : 1; + else /* DPE_EXP(x) = DPE_EXP(y) */ + return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); +} + +DPE_INLINE int +dpe_cmp_d (dpe_t x, double d) +{ + dpe_t y; + dpe_set_d (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_ui (dpe_t x, unsigned long d) +{ + dpe_t y; + dpe_set_ui (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_si (dpe_t x, long d) +{ + dpe_t y; + dpe_set_si (y, d); + return dpe_cmp (x, y); +} + +/* set x to integer nearest to y */ +DPE_INLINE void +dpe_round (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) < 0) /* |y| < 1/2 */ + dpe_set_ui (x, 0); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_ROUND(d)); + } +} + +/* set x to the fractional part of y, defined as y - trunc(y), thus the + fractional part has absolute value in [0, 1), and same sign as y */ +DPE_INLINE void +dpe_frac (dpe_t x, dpe_t y) +{ + /* If |y| is smaller than 1, keep it */ + if (DPE_EXP(y) <= 0) + dpe_set (x, y); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set_ui (x, 0); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, d - DPE_TRUNC(d)); + } +} + +/* set x to largest integer <= y */ +DPE_INLINE void +dpe_floor (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ + dpe_set_ui (x, 0); + else /* -1 < y < 0 */ + dpe_set_si (x, -1); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_FLOOR(d)); + } +} + +/* set x to smallest integer >= y */ +DPE_INLINE void +dpe_ceil (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ + dpe_set_ui (x, 1); + else /* -1 < y <= 0 */ + dpe_set_si (x, 0); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_CEIL(d)); + } +} + +DPE_INLINE void +dpe_swap (dpe_t x, dpe_t y) +{ + DPE_EXP_T i = DPE_EXP (x); + DPE_DOUBLE d = DPE_MANT (x); + DPE_EXP (x) = DPE_EXP (y); + DPE_MANT (x) = DPE_MANT (y); + DPE_EXP (y) = i; + DPE_MANT (y) = d; +} + +#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.c new file mode 100644 index 0000000000..1b12a8380f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.c @@ -0,0 +1,55 @@ +#include +const fp2_t BASIS_E0_PX = { +#if 0 +#elif RADIX == 16 +{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12} +#elif RADIX == 32 +{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1} +#else +{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e} +#elif RADIX == 32 +{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164} +#else +{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418} +#endif +#endif +}; +const fp2_t BASIS_E0_QX = { +#if 0 +#elif RADIX == 16 +{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd} +#elif RADIX == 32 +{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28} +#else +{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a} +#elif RADIX == 32 +{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9} +#else +{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d} +#endif +#endif +}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.h new file mode 100644 index 0000000000..05cafb8462 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.h @@ -0,0 +1,3 @@ +#include +extern const fp2_t BASIS_E0_PX; +extern const fp2_t BASIS_E0_QX; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.c new file mode 100644 index 0000000000..be4e4e55b1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.c @@ -0,0 +1,665 @@ +#include +#include +#include +#include + +void +ec_point_init(ec_point_t *P) +{ // Initialize point as identity element (1:0) + fp2_set_one(&(P->x)); + fp2_set_zero(&(P->z)); +} + +void +ec_curve_init(ec_curve_t *E) +{ // Initialize the curve struct + // Initialize the constants + fp2_set_zero(&(E->A)); + fp2_set_one(&(E->C)); + + // Initialize the point (A+2 : 4C) + ec_point_init(&(E->A24)); + + // Set the bool to be false by default + E->is_A24_computed_and_normalized = false; +} + +void +select_point(ec_point_t *Q, const ec_point_t *P1, const ec_point_t *P2, const digit_t option) +{ // Select points in constant time + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +cswap_points(ec_point_t *P, ec_point_t *Q, const digit_t option) +{ // Swap points in constant time + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then P <- Q and Q <- P + fp2_cswap(&(P->x), &(Q->x), option); + fp2_cswap(&(P->z), &(Q->z), option); +} + +void +ec_normalize_point(ec_point_t *P) +{ + fp2_inv(&P->z); + fp2_mul(&P->x, &P->x, &P->z); + fp2_set_one(&(P->z)); +} + +void +ec_normalize_curve(ec_curve_t *E) +{ + fp2_inv(&E->C); + fp2_mul(&E->A, &E->A, &E->C); + fp2_set_one(&E->C); +} + +void +ec_curve_normalize_A24(ec_curve_t *E) +{ + if (!E->is_A24_computed_and_normalized) { + AC_to_A24(&E->A24, E); + ec_normalize_point(&E->A24); + E->is_A24_computed_and_normalized = true; + } + assert(fp2_is_one(&E->A24.z)); +} + +void +ec_normalize_curve_and_A24(ec_curve_t *E) +{ // Neither the curve or A24 are guaranteed to be normalized. + // First we normalize (A/C : 1) and conditionally compute + if (!fp2_is_one(&E->C)) { + ec_normalize_curve(E); + } + + if (!E->is_A24_computed_and_normalized) { + // Now compute A24 = ((A + 2) / 4 : 1) + fp2_add_one(&E->A24.x, &E->A); // re(A24.x) = re(A) + 1 + fp2_add_one(&E->A24.x, &E->A24.x); // re(A24.x) = re(A) + 2 + fp_copy(&E->A24.x.im, &E->A.im); // im(A24.x) = im(A) + + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 2 + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 4 + fp2_set_one(&E->A24.z); + + E->is_A24_computed_and_normalized = true; + } +} + +uint32_t +ec_is_zero(const ec_point_t *P) +{ + return fp2_is_zero(&P->z); +} + +uint32_t +ec_has_zero_coordinate(const ec_point_t *P) +{ + return fp2_is_zero(&P->x) | fp2_is_zero(&P->z); +} + +uint32_t +ec_is_equal(const ec_point_t *P, const ec_point_t *Q) +{ // Evaluate if two points in Montgomery coordinates (X:Z) are equal + // Returns 0xFFFFFFFF (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1; + + // Check if P, Q are the points at infinity + uint32_t l_zero = ec_is_zero(P); + uint32_t r_zero = ec_is_zero(Q); + + // Check if PX * QZ = QX * PZ + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + uint32_t lr_equal = fp2_is_equal(&t0, &t1); + + // Points are equal if + // - Both are zero, or + // - neither are zero AND PX * QZ = QX * PZ + return (l_zero & r_zero) | (~l_zero & ~r_zero * lr_equal); +} + +uint32_t +ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + if (ec_is_zero(P)) + return 0; + + uint32_t x_is_zero, tmp_is_zero; + fp2_t t0, t1, t2; + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t0, &t1); + fp2_mul(&t2, &t2, &E->A); + fp2_mul(&t1, &t1, &E->C); + fp2_add(&t1, &t1, &t1); + fp2_add(&t0, &t1, &t2); // 4 (CX^2+CZ^2+AXZ) + + x_is_zero = fp2_is_zero(&P->x); + tmp_is_zero = fp2_is_zero(&t0); + + // two torsion if x or x^2 + Ax + 1 is zero + return x_is_zero | tmp_is_zero; +} + +uint32_t +ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + ec_point_t test; + xDBL_A24(&test, P, &E->A24, E->is_A24_computed_and_normalized); + return ec_is_two_torsion(&test, E); +} + +uint32_t +ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E) +{ // Check if basis points (P, Q) form a full 2^t-basis + ec_point_t P2, Q2; + xDBL_A24(&P2, &B->P, &E->A24, E->is_A24_computed_and_normalized); + xDBL_A24(&Q2, &B->Q, &E->A24, E->is_A24_computed_and_normalized); + return (ec_is_two_torsion(&P2, E) & ec_is_two_torsion(&Q2, E) & ~ec_is_equal(&P2, &Q2)); +} + +int +ec_curve_verify_A(const fp2_t *A) +{ // Verify the Montgomery coefficient A is valid (A^2-4 \ne 0) + // Return 1 if curve is valid, 0 otherwise + fp2_t t; + fp2_set_one(&t); + fp_add(&t.re, &t.re, &t.re); // t=2 + if (fp2_is_equal(A, &t)) + return 0; + fp_neg(&t.re, &t.re); // t=-2 + if (fp2_is_equal(A, &t)) + return 0; + return 1; +} + +int +ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A) +{ // Initialize the curve from the A coefficient and check it is valid + // Return 1 if curve is valid, 0 otherwise + ec_curve_init(E); + fp2_copy(&E->A, A); // Set A + return ec_curve_verify_A(A); +} + +void +ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve) +{ // j-invariant computation for Montgommery coefficient A2=(A+2C:4C) + fp2_t t0, t1; + + fp2_sqr(&t1, &curve->C); + fp2_sqr(j_inv, &curve->A); + fp2_add(&t0, &t1, &t1); + fp2_sub(&t0, j_inv, &t0); + fp2_sub(&t0, &t0, &t1); + fp2_sub(j_inv, &t0, &t1); + fp2_sqr(&t1, &t1); + fp2_mul(j_inv, j_inv, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_sqr(&t1, &t0); + fp2_mul(&t0, &t0, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_inv(j_inv); + fp2_mul(j_inv, &t0, j_inv); +} + +void +xDBL_E0(ec_point_t *Q, const ec_point_t *P) +{ // Doubling of a Montgomery point in projective coordinates (X:Z) on the curve E0 with (A:C) = (0:1). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C) = (0:1). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&Q->z, &t1, &t2); + fp2_mul(&Q->z, &Q->z, &t2); +} + +void +xDBL(ec_point_t *Q, const ec_point_t *P, const ec_point_t *AC) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). Computation of coefficient values A+2C and 4C + // on-the-fly. + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t3, &AC->z, &AC->z); + fp2_mul(&t1, &t1, &t3); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&t0, &t3, &AC->x); + fp2_mul(&t0, &t0, &t2); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and + // the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + if (!A24_normalized) + fp2_mul(&t1, &t1, &A24->z); + fp2_mul(&Q->x, &t0, &t1); + fp2_mul(&t0, &t2, &A24->x); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ) +{ // Differential addition of Montgomery points in projective coordinates (X:Z). + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, and difference + // PQ=P-Q=(XPQ:ZPQ). + // Output: projective Montgomery point R <- P+Q = (XR:ZR) such that x(P+Q)=XR/ZR. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&t2, &t2); + fp2_sqr(&t3, &t3); + fp2_mul(&t2, &PQ->z, &t2); + fp2_mul(&R->z, &PQ->x, &t3); + fp2_copy(&R->x, &t2); +} + +void +xDBLADD(ec_point_t *R, + ec_point_t *S, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_point_t *A24, + const bool A24_normalized) +{ // Simultaneous doubling and differential addition. + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, the difference + // PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points R <- 2*P = (XR:ZR) such that x(2P)=XR/ZR, and S <- P+Q = (XS:ZS) such that = + // x(Q+P)=XS/ZS. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&R->x, &t0); + fp2_sub(&t2, &Q->x, &Q->z); + fp2_add(&S->x, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t2); + fp2_sqr(&R->z, &t1); + fp2_mul(&t1, &t1, &S->x); + fp2_sub(&t2, &R->x, &R->z); + if (!A24_normalized) + fp2_mul(&R->z, &R->z, &A24->z); + fp2_mul(&R->x, &R->x, &R->z); + fp2_mul(&S->x, &A24->x, &t2); + fp2_sub(&S->z, &t0, &t1); + fp2_add(&R->z, &R->z, &S->x); + fp2_add(&S->x, &t0, &t1); + fp2_mul(&R->z, &R->z, &t2); + fp2_sqr(&S->z, &S->z); + fp2_sqr(&S->x, &S->x); + fp2_mul(&S->z, &S->z, &PQ->x); + fp2_mul(&S->x, &S->x, &PQ->z); +} + +void +xMUL(ec_point_t *Q, const ec_point_t *P, const digit_t *k, const int kbits, const ec_curve_t *curve) +{ // The Montgomery ladder + // Input: projective Montgomery point P=(XP:ZP) such that xP=XP/ZP, a scalar k of bitlength kbits, and + // the Montgomery curve constants (A:C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points Q <- k*P = (XQ:ZQ) such that x(k*P)=XQ/ZQ. + ec_point_t R0, R1, A24; + digit_t mask; + unsigned int bit, prevbit = 0, swap; + + if (!curve->is_A24_computed_and_normalized) { + // Computation of A24=(A+2C:4C) + fp2_add(&A24.x, &curve->C, &curve->C); + fp2_add(&A24.z, &A24.x, &A24.x); + fp2_add(&A24.x, &A24.x, &curve->A); + } else { + fp2_copy(&A24.x, &curve->A24.x); + fp2_copy(&A24.z, &curve->A24.z); + // Assert A24 has been normalised + assert(fp2_is_one(&A24.z)); + } + + // R0 <- (1:0), R1 <- P + ec_point_init(&R0); + fp2_copy(&R1.x, &P->x); + fp2_copy(&R1.z, &P->z); + + // Main loop + for (int i = kbits - 1; i >= 0; i--) { + bit = (k[i >> LOG2RADIX] >> (i & (RADIX - 1))) & 1; + swap = bit ^ prevbit; + prevbit = bit; + mask = 0 - (digit_t)swap; + + cswap_points(&R0, &R1, mask); + xDBLADD(&R0, &R1, &R0, &R1, P, &A24, true); + } + swap = 0 ^ prevbit; + mask = 0 - (digit_t)swap; + cswap_points(&R0, &R1, mask); + + fp2_copy(&Q->x, &R0.x); + fp2_copy(&Q->z, &R0.z); +} + +int +xDBLMUL(ec_point_t *S, + const ec_point_t *P, + const digit_t *k, + const ec_point_t *Q, + const digit_t *l, + const ec_point_t *PQ, + const int kbits, + const ec_curve_t *curve) +{ // The Montgomery biladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, scalars k and l of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants (A:C). + // Output: projective Montgomery point S <- k*P + l*Q = (XS:ZS) such that x(k*P + l*Q)=XS/ZS. + + int i, A_is_zero; + digit_t evens, mevens, bitk0, bitl0, maskk, maskl, temp, bs1_ip1, bs2_ip1, bs1_i, bs2_i, h; + digit_t sigma[2] = { 0 }, pre_sigma = 0; + digit_t k_t[NWORDS_ORDER], l_t[NWORDS_ORDER], one[NWORDS_ORDER] = { 0 }, r[2 * BITS] = { 0 }; + ec_point_t DIFF1a, DIFF1b, DIFF2a, DIFF2b, R[3] = { 0 }, T[3]; + + // differential additions formulas are invalid in this case + if (ec_has_zero_coordinate(P) | ec_has_zero_coordinate(Q) | ec_has_zero_coordinate(PQ)) + return 0; + + // Derive sigma according to parity + bitk0 = (k[0] & 1); + bitl0 = (l[0] & 1); + maskk = 0 - bitk0; // Parity masks: 0 if even, otherwise 1...1 + maskl = 0 - bitl0; + sigma[0] = (bitk0 ^ 1); + sigma[1] = (bitl0 ^ 1); + evens = sigma[0] + sigma[1]; // Count number of even scalars + mevens = 0 - (evens & 1); // Mask mevens <- 0 if # even of scalars = 0 or 2, otherwise mevens = 1...1 + + // If k and l are both even or both odd, pick sigma = (0,1) + sigma[0] = (sigma[0] & mevens); + sigma[1] = (sigma[1] & mevens) | (1 & ~mevens); + + // Convert even scalars to odd + one[0] = 1; + mp_sub(k_t, k, one, NWORDS_ORDER); + mp_sub(l_t, l, one, NWORDS_ORDER); + select_ct(k_t, k_t, k, maskk, NWORDS_ORDER); + select_ct(l_t, l_t, l, maskl, NWORDS_ORDER); + + // Scalar recoding + for (i = 0; i < kbits; i++) { + // If sigma[0] = 1 swap k_t and l_t + maskk = 0 - (sigma[0] ^ pre_sigma); + swap_ct(k_t, l_t, maskk, NWORDS_ORDER); + + if (i == kbits - 1) { + bs1_ip1 = 0; + bs2_ip1 = 0; + } else { + bs1_ip1 = mp_shiftr(k_t, 1, NWORDS_ORDER); + bs2_ip1 = mp_shiftr(l_t, 1, NWORDS_ORDER); + } + bs1_i = k_t[0] & 1; + bs2_i = l_t[0] & 1; + + r[2 * i] = bs1_i ^ bs1_ip1; + r[2 * i + 1] = bs2_i ^ bs2_ip1; + + // Revert sigma if second bit, r_(2i+1), is 1 + pre_sigma = sigma[0]; + maskk = 0 - r[2 * i + 1]; + select_ct(&temp, &sigma[0], &sigma[1], maskk, 1); + select_ct(&sigma[1], &sigma[1], &sigma[0], maskk, 1); + sigma[0] = temp; + } + + // Point initialization + ec_point_init(&R[0]); + maskk = 0 - sigma[0]; + select_point(&R[1], P, Q, maskk); + select_point(&R[2], Q, P, maskk); + + fp2_copy(&DIFF1a.x, &R[1].x); + fp2_copy(&DIFF1a.z, &R[1].z); + fp2_copy(&DIFF1b.x, &R[2].x); + fp2_copy(&DIFF1b.z, &R[2].z); + + // Initialize DIFF2a <- P+Q, DIFF2b <- P-Q + xADD(&R[2], &R[1], &R[2], PQ); + if (ec_has_zero_coordinate(&R[2])) + return 0; // non valid formulas + + fp2_copy(&DIFF2a.x, &R[2].x); + fp2_copy(&DIFF2a.z, &R[2].z); + fp2_copy(&DIFF2b.x, &PQ->x); + fp2_copy(&DIFF2b.z, &PQ->z); + + A_is_zero = fp2_is_zero(&curve->A); + + // Main loop + for (i = kbits - 1; i >= 0; i--) { + h = r[2 * i] + r[2 * i + 1]; // in {0, 1, 2} + maskk = 0 - (h & 1); + select_point(&T[0], &R[0], &R[1], maskk); + maskk = 0 - (h >> 1); + select_point(&T[0], &T[0], &R[2], maskk); + if (A_is_zero) { + xDBL_E0(&T[0], &T[0]); + } else { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(&T[0], &T[0], &curve->A24, true); + } + + maskk = 0 - r[2 * i + 1]; // in {0, 1} + select_point(&T[1], &R[0], &R[1], maskk); + select_point(&T[2], &R[1], &R[2], maskk); + + cswap_points(&DIFF1a, &DIFF1b, maskk); + xADD(&T[1], &T[1], &T[2], &DIFF1a); + xADD(&T[2], &R[0], &R[2], &DIFF2a); + + // If hw (mod 2) = 1 then swap DIFF2a and DIFF2b + maskk = 0 - (h & 1); + cswap_points(&DIFF2a, &DIFF2b, maskk); + + // R <- T + copy_point(&R[0], &T[0]); + copy_point(&R[1], &T[1]); + copy_point(&R[2], &T[2]); + } + + // Output R[evens] + select_point(S, &R[0], &R[1], mevens); + + maskk = 0 - (bitk0 & bitl0); + select_point(S, S, &R[2], maskk); + return 1; +} + +int +ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *E) +{ // The 3-point Montgomery ladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, a scalar k of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C/4C:1). + // Output: projective Montgomery point R <- P + m*Q = (XR:ZR) such that x(P + m*Q)=XR/ZR. + assert(E->is_A24_computed_and_normalized); + if (!fp2_is_one(&E->A24.z)) { + return 0; + } + // Formulas are not valid in that case + if (ec_has_zero_coordinate(PQ)) { + return 0; + } + + ec_point_t X0, X1, X2; + copy_point(&X0, Q); + copy_point(&X1, P); + copy_point(&X2, PQ); + + int i, j; + digit_t t; + for (i = 0; i < NWORDS_ORDER; i++) { + t = 1; + for (j = 0; j < RADIX; j++) { + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + xDBLADD(&X0, &X1, &X0, &X1, &X2, &E->A24, true); + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + t <<= 1; + }; + }; + copy_point(R, &X1); + return 1; +} + +// WRAPPERS to export + +void +ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve) +{ + // If A24 = ((A+2)/4 : 1) we save multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + } else { + // Otherwise we compute A24 on the fly for doubling + xDBL(res, P, (const ec_point_t *)curve); + } +} + +void +ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve) +{ + if (n == 0) { + copy_point(res, P); + return; + } + + // When the chain is long enough, we should normalise A24 + if (n > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is normalized we can save some multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + for (int i = 0; i < n - 1; i++) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, res, &curve->A24, true); + } + } else { + // Otherwise we do normal doubling + xDBL(res, P, (const ec_point_t *)curve); + for (int i = 0; i < n - 1; i++) { + xDBL(res, res, (const ec_point_t *)curve); + } + } +} + +void +ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve) +{ + ec_dbl_iter(&res->P, n, &B->P, curve); + ec_dbl_iter(&res->Q, n, &B->Q, curve); + ec_dbl_iter(&res->PmQ, n, &B->PmQ, curve); +} + +void +ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve) +{ + // For large scalars it's worth normalising anyway + if (kbits > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is computed and normalized we save some Fp2 multiplications + xMUL(res, P, scalar, kbits, curve); +} + +int +ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + if (fp2_is_zero(&PQ->PmQ.z)) + return 0; + + /* Differential additions behave badly when PmQ = (0:1), so we need to + * treat this case specifically. Since we assume P, Q are a basis, this + * can happen only if kbits==1 */ + if (kbits == 1) { + // Sanity check: our basis should be given by 2-torsion points + if (!ec_is_two_torsion(&PQ->P, curve) || !ec_is_two_torsion(&PQ->Q, curve) || + !ec_is_two_torsion(&PQ->PmQ, curve)) + return 0; + digit_t bP, bQ; + bP = (scalarP[0] & 1); + bQ = (scalarQ[0] & 1); + if (bP == 0 && bQ == 0) + ec_point_init(res); //(1: 0) + else if (bP == 1 && bQ == 0) + copy_point(res, &PQ->P); + else if (bP == 0 && bQ == 1) + copy_point(res, &PQ->Q); + else if (bP == 1 && bQ == 1) + copy_point(res, &PQ->PmQ); + else // should never happen + assert(0); + return 1; + } else { + ec_curve_t E; + copy_curve(&E, curve); + + if (!fp2_is_zero(&curve->A)) { // If A is not zero normalize + ec_curve_normalize_A24(&E); + } + return xDBLMUL(res, &PQ->P, scalarP, &PQ->Q, scalarQ, &PQ->PmQ, kbits, (const ec_curve_t *)&E); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h new file mode 100644 index 0000000000..ee2be38060 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h @@ -0,0 +1,668 @@ +/** @file + * + * @authors Luca De Feo, Francisco RH + * + * @brief Elliptic curve stuff + */ + +#ifndef EC_H +#define EC_H +#include +#include +#include +#include +#include + +/** @defgroup ec Elliptic curves + * @{ + */ + +/** @defgroup ec_t Data structures + * @{ + */ + +/** @brief Projective point on the Kummer line E/pm 1 in Montgomery coordinates + * + * @typedef ec_point_t + * + * @struct ec_point_t + * + * A projective point in (X:Z) or (X:Y:Z) coordinates (tbd). + */ +typedef struct ec_point_t +{ + fp2_t x; + fp2_t z; +} ec_point_t; + +/** @brief Projective point in Montgomery coordinates + * + * @typedef jac_point_t + * + * @struct jac_point_t + * + * A projective point in (X:Y:Z) coordinates + */ +typedef struct jac_point_t +{ + fp2_t x; + fp2_t y; + fp2_t z; +} jac_point_t; + +/** @brief Addition components + * + * @typedef add_components_t + * + * @struct add_components_t + * + * 3 components u,v,w that define the (X:Z) coordinates of both + * addition and substraction of two distinct points with + * P+Q =(u-v:w) and P-Q = (u+v=w) + */ +typedef struct add_components_t +{ + fp2_t u; + fp2_t v; + fp2_t w; +} add_components_t; + +/** @brief A basis of a torsion subgroup + * + * @typedef ec_basis_t + * + * @struct ec_basis_t + * + * A pair of points (or a triplet, tbd) forming a basis of a torsion subgroup. + */ +typedef struct ec_basis_t +{ + ec_point_t P; + ec_point_t Q; + ec_point_t PmQ; +} ec_basis_t; + +/** @brief An elliptic curve + * + * @typedef ec_curve_t + * + * @struct ec_curve_t + * + * An elliptic curve in projective Montgomery form + */ +typedef struct ec_curve_t +{ + fp2_t A; + fp2_t C; ///< cannot be 0 + ec_point_t A24; // the point (A+2 : 4C) + bool is_A24_computed_and_normalized; // says if A24 has been computed and normalized +} ec_curve_t; + +/** @brief An isogeny of degree a power of 2 + * + * @typedef ec_isog_even_t + * + * @struct ec_isog_even_t + */ +typedef struct ec_isog_even_t +{ + ec_curve_t curve; ///< The domain curve + ec_point_t kernel; ///< A kernel generator + unsigned length; ///< The length as a 2-isogeny walk +} ec_isog_even_t; + +/** @brief Isomorphism of Montgomery curves + * + * @typedef ec_isom_t + * + * @struct ec_isom_t + * + * The isomorphism is given by the map maps (X:Z) ↦ ( (Nx X + Nz Z) : (D Z) ) + */ +typedef struct ec_isom_t +{ + fp2_t Nx; + fp2_t Nz; + fp2_t D; +} ec_isom_t; + +// end ec_t +/** @} + */ + +/** @defgroup ec_curve_t Curves and isomorphisms + * @{ + */ + +// Initalisation for curves and points +void ec_curve_init(ec_curve_t *E); +void ec_point_init(ec_point_t *P); + +/** + * @brief Verify that a Montgomery coefficient is valid + * + * @param A an fp2_t + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_verify_A(const fp2_t *A); + +/** + * @brief Initialize an elliptic curve from a coefficient + * + * @param A an fp2_t + * @param E the elliptic curve to initialize + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A); + +// Copying points, bases and curves +static inline void +copy_point(ec_point_t *P, const ec_point_t *Q) +{ + fp2_copy(&P->x, &Q->x); + fp2_copy(&P->z, &Q->z); +} + +static inline void +copy_basis(ec_basis_t *B1, const ec_basis_t *B0) +{ + copy_point(&B1->P, &B0->P); + copy_point(&B1->Q, &B0->Q); + copy_point(&B1->PmQ, &B0->PmQ); +} + +static inline void +copy_curve(ec_curve_t *E1, const ec_curve_t *E2) +{ + fp2_copy(&(E1->A), &(E2->A)); + fp2_copy(&(E1->C), &(E2->C)); + E1->is_A24_computed_and_normalized = E2->is_A24_computed_and_normalized; + copy_point(&E1->A24, &E2->A24); +} + +// Functions for working with the A24 point and normalisation + +/** + * @brief Reduce (A : C) to (A/C : 1) in place + * + * @param E a curve + */ +void ec_normalize_curve(ec_curve_t *E); + +/** + * @brief Reduce (A + 2 : 4C) to ((A+2)/4C : 1) in place + * + * @param E a curve + */ +void ec_curve_normalize_A24(ec_curve_t *E); + +/** + * @brief Normalise both (A : C) and (A + 2 : 4C) as above, in place + * + * @param E a curve + */ +void ec_normalize_curve_and_A24(ec_curve_t *E); + +/** + * @brief Given a curve E, compute (A+2 : 4C) + * + * @param A24 the value (A+2 : 4C) to return into + * @param E a curve + */ +static inline void +AC_to_A24(ec_point_t *A24, const ec_curve_t *E) +{ + // Maybe we already have this computed + if (E->is_A24_computed_and_normalized) { + copy_point(A24, &E->A24); + return; + } + + // A24 = (A+2C : 4C) + fp2_add(&A24->z, &E->C, &E->C); + fp2_add(&A24->x, &E->A, &A24->z); + fp2_add(&A24->z, &A24->z, &A24->z); +} + +/** + * @brief Given a curve the point (A+2 : 4C) compute the curve coefficients (A : C) + * + * @param E a curve to compute + * @param A24 the value (A+2 : 4C) + */ +static inline void +A24_to_AC(ec_curve_t *E, const ec_point_t *A24) +{ + // (A:C) = ((A+2C)*2-4C : 4C) + fp2_add(&E->A, &A24->x, &A24->x); + fp2_sub(&E->A, &E->A, &A24->z); + fp2_add(&E->A, &E->A, &E->A); + fp2_copy(&E->C, &A24->z); +} + +/** + * @brief j-invariant. + * + * @param j_inv computed j_invariant + * @param curve input curve + */ +void ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve); + +/** + * @brief Isomorphism of elliptic curve + * Takes as input two isomorphic Kummer lines in Montgomery form, and output an isomorphism between + * them + * + * @param isom computed isomorphism + * @param from domain curve + * @param to image curve + * @return 0xFFFFFFFF if there was an error during the computation, zero otherwise + */ +uint32_t ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to); + +/** + * @brief In-place evaluation of an isomorphism + * + * @param P a point + * @param isom an isomorphism + */ +void ec_iso_eval(ec_point_t *P, ec_isom_t *isom); + +/** @} + */ +/** @defgroup ec_point_t Point operations + * @{ + */ + +/** + * @brief Point equality + * + * @param P a point + * @param Q a point + * @return 0xFFFFFFFF if equal, zero otherwise + */ +uint32_t ec_is_equal(const ec_point_t *P, const ec_point_t *Q); + +/** + * @brief Point equality + * + * @param P a point + * @return 0xFFFFFFFF if point at infinity, zero otherwise + */ +uint32_t ec_is_zero(const ec_point_t *P); + +/** + * @brief Two torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Four torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Reduce Z-coordinate of point in place + * + * @param P a point + */ +void ec_normalize_point(ec_point_t *P); + +void xDBL_E0(ec_point_t *Q, const ec_point_t *P); +void xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ); +void xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized); + +/** + * @brief Point doubling + * + * @param res computed double of P + * @param P a point + * @param curve an elliptic curve + */ +void ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve); + +/** + * @brief Point iterated doubling + * + * @param res computed double of P + * @param P a point + * @param n the number of double + * @param curve the curve on which P lays + */ +void ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Iterated doubling for a basis P, Q, PmQ + * + * @param res the computed iterated double of basis B + * @param n the number of doubles + * @param B the basis to double + * @param curve the parent curve of the basis + */ +void ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve); + +/** + * @brief Point multiplication + * + * @param res computed scalar * P + * @param curve the curve + * @param scalar an unsigned multi-precision integer + * @param P a point + * @param kbits numer of bits of the scalar + */ +void ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Combination P+m*Q + * + * @param R computed P + m * Q + * @param curve the curve + * @param m an unsigned multi-precision integer + * @param P a point + * @param Q a point + * @param PQ the difference P-Q + * @return 0 if there was an error, 1 otherwise + */ +int ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Linear combination of points of a basis + * + * @param res computed scalarP * P + scalarQ * Q + * @param scalarP an unsigned multi-precision integer + * @param scalarQ an unsigned multi-precision integer + * @param kbits number of bits of the scalars, or n for points of order 2^n + * @param PQ a torsion basis consisting of points P and Q + * @param curve the curve + * + * @return 0 if there was an error, 1 otherwise + */ +int ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +// end point computations +/** + * @} + */ + +/** @defgroup ec_dlog_t Torsion basis computations + * @{ + */ + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve along with a hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * + * @return A hint + * + * The algorithm is deterministc + */ +uint8_t ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f); + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve and a given hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * @param hint the hint + * + * @return 1 is the basis is valid, 0 otherwise + * + * The algorithm is deterministc + */ +int ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint); +/** // end basis computations + * @} + */ + +/** @defgroup ec_isog_t Isogenies + * @{ + */ + +/** + * @brief Evaluate isogeny of even degree on list of points. + * Returns 0 if successful and -1 if kernel has the wrong order or includes (0:1). + * + * @param image computed image curve + * @param phi isogeny + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points); + +/** + * @brief Multiplicative strategy for a short isogeny chain. Returns 1 if successfull and -1 + * if kernel has the wrong order or includes (0:1) when special=false. + * + * @param curve domain curve, to be overwritten by the codomain curve. + * @param kernel a kernel generator of order 2^len + * @param len the length of t he 2-isogeny chain + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * @param special if true, allow isogenies with (0:1) in the kernel + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special); + +/** + * @brief Recover Y-coordinate from X-coordinate and curve coefficients. + * + * @param y: a y-coordinate + * @param Px: a x-coordinate + * @param curve: the elliptic curve + * + * @return 0xFFFFFFFF if the point was on the curve, 0 otherwise + */ +uint32_t ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve); + +// Jacobian point init and copying +void jac_init(jac_point_t *P); +void copy_jac_point(jac_point_t *P, const jac_point_t *Q); + +/** + * @brief Test if two Jacobian points are equal + * + * @param P: a point + * @param Q: a point + * + * @return 0xFFFFFFFF if they are equal, 0 otherwise + */ +uint32_t jac_is_equal(const jac_point_t *P, const jac_point_t *Q); + +// Convert from Jacobian to x-only (just drop the Y-coordinate) +void jac_to_xz(ec_point_t *P, const jac_point_t *xyP); +// Convert from Jacobian coordinates in Montgomery model to Weierstrass +void jac_to_ws(jac_point_t *P, fp2_t *t, fp2_t *ao3, const jac_point_t *Q, const ec_curve_t *curve); +void jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve); + +// Jacobian arithmetic +void jac_neg(jac_point_t *Q, const jac_point_t *P); +void ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); +void DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC); +void DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t); +void jac_to_xz_add_components(add_components_t *uvw, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + * + * + * Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and + * the point P = (X/Z : 1). For generic implementation see lift_basis() + */ +uint32_t lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + */ +uint32_t lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Check if basis points (P, Q) form a full 4-basis + * + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if they form a basis, 0 otherwise + */ +uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); + +/* + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Test functions for printing and order checking, only used in debug mode + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + */ + +/** + * @brief Check if a point (X : Z) has order exactly 2^t + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) +{ + ec_point_t test; + ec_curve_t curve; + test = *P; + copy_curve(&curve, E); + + if (ec_is_zero(&test)) + return 0; + // Scale point by 2^(t-1) + ec_dbl_iter(&test, t - 1, &test, &curve); + // If it's zero now, it doesnt have order 2^t + if (ec_is_zero(&test)) + return 0; + // Ensure [2^t] P = 0 + ec_dbl(&test, &test, &curve); + return ec_is_zero(&test); +} + +/** + * @brief Check if basis points (P, Q, PmQ) all have order exactly 2^t + * + * @param B: a basis + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) +{ + int check_P = test_point_order_twof(&B->P, E, t); + int check_Q = test_point_order_twof(&B->Q, E, t); + int check_PmQ = test_point_order_twof(&B->PmQ, E, t); + + return check_P & check_Q & check_PmQ; +} + +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} + +// Prints the x-coordinate of the point (X : 1) +static void +ec_point_print(const char *name, ec_point_t P) +{ + fp2_t a; + if (fp2_is_zero(&P.z)) { + printf("%s = INF\n", name); + } else { + fp2_copy(&a, &P.z); + fp2_inv(&a); + fp2_mul(&a, &a, &P.x); + fp2_print(name, &a); + } +} + +// Prints the Montgomery coefficient A +static void +ec_curve_print(const char *name, ec_curve_t E) +{ + fp2_t a; + fp2_copy(&a, &E.C); + fp2_inv(&a); + fp2_mul(&a, &a, &E.A); + fp2_print(name, &a); +} + +#endif +// end isogeny computations +/** + * @} + */ + +// end ec +/** + * @} + */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_jac.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_jac.c new file mode 100644 index 0000000000..20ca68c9b2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_jac.c @@ -0,0 +1,335 @@ +#include +#include + +void +jac_init(jac_point_t *P) +{ // Initialize Montgomery in Jacobian coordinates as identity element (0:1:0) + fp2_set_zero(&P->x); + fp2_set_one(&P->y); + fp2_set_zero(&P->z); +} + +uint32_t +jac_is_equal(const jac_point_t *P, const jac_point_t *Q) +{ // Evaluate if two points in Jacobian coordinates (X:Y:Z) are equal + // Returns 1 (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1, t2, t3; + + fp2_sqr(&t0, &Q->z); + fp2_mul(&t2, &P->x, &t0); // x1*z2^2 + fp2_sqr(&t1, &P->z); + fp2_mul(&t3, &Q->x, &t1); // x2*z1^2 + fp2_sub(&t2, &t2, &t3); + + fp2_mul(&t0, &t0, &Q->z); + fp2_mul(&t0, &P->y, &t0); // y1*z2^3 + fp2_mul(&t1, &t1, &P->z); + fp2_mul(&t1, &Q->y, &t1); // y2*z1^3 + fp2_sub(&t0, &t0, &t1); + + return fp2_is_zero(&t0) & fp2_is_zero(&t2); +} + +void +jac_to_xz(ec_point_t *P, const jac_point_t *xyP) +{ + fp2_copy(&P->x, &xyP->x); + fp2_copy(&P->z, &xyP->z); + fp2_sqr(&P->z, &P->z); + + // If xyP = (0:1:0), we currently have P=(0 : 0) but we want to set P=(1:0) + uint32_t c1, c2; + fp2_t one; + fp2_set_one(&one); + + c1 = fp2_is_zero(&P->x); + c2 = fp2_is_zero(&P->z); + fp2_select(&P->x, &P->x, &one, c1 & c2); +} + +void +jac_to_ws(jac_point_t *Q, fp2_t *t, fp2_t *ao3, const jac_point_t *P, const ec_curve_t *curve) +{ + // Cost of 3M + 2S when A != 0. + fp_t one; + fp2_t a; + /* a = 1 - A^2/3, U = X + (A*Z^2)/3, V = Y, W = Z, T = a*Z^4*/ + fp_set_one(&one); + if (!fp2_is_zero(&(curve->A))) { + fp_div3(&(ao3->re), &(curve->A.re)); + fp_div3(&(ao3->im), &(curve->A.im)); + fp2_sqr(t, &P->z); + fp2_mul(&Q->x, ao3, t); + fp2_add(&Q->x, &Q->x, &P->x); + fp2_sqr(t, t); + fp2_mul(&a, ao3, &(curve->A)); + fp_sub(&(a.re), &one, &(a.re)); + fp_neg(&(a.im), &(a.im)); + fp2_mul(t, t, &a); + } else { + fp2_copy(&Q->x, &P->x); + fp2_sqr(t, &P->z); + fp2_sqr(t, t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve) +{ + // Cost of 1M + 1S when A != 0. + fp2_t t; + /* X = U - (A*W^2)/3, Y = V, Z = W. */ + if (!fp2_is_zero(&(curve->A))) { + fp2_sqr(&t, &P->z); + fp2_mul(&t, &t, ao3); + fp2_sub(&Q->x, &P->x, &t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +copy_jac_point(jac_point_t *P, const jac_point_t *Q) +{ + fp2_copy(&(P->x), &(Q->x)); + fp2_copy(&(P->y), &(Q->y)); + fp2_copy(&(P->z), &(Q->z)); +} + +void +jac_neg(jac_point_t *Q, const jac_point_t *P) +{ + fp2_copy(&Q->x, &P->x); + fp2_neg(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC) +{ // Cost of 6M + 6S. + // Doubling on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding to + // (X/Z^2,Y/Z^3) This version receives the coefficient value A + fp2_t t0, t1, t2, t3; + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // t0 = 3x1^2 + fp2_sqr(&t1, &P->z); // t1 = z1^2 + fp2_mul(&t2, &P->x, &AC->A); + fp2_add(&t2, &t2, &t2); // t2 = 2Ax1 + fp2_add(&t2, &t1, &t2); // t2 = 2Ax1+z1^2 + fp2_mul(&t2, &t1, &t2); // t2 = z1^2(2Ax1+z1^2) + fp2_add(&t2, &t0, &t2); // t2 = alpha = 3x1^2 + z1^2(2Ax1+z1^2) + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); // z2 = 2y1z1 + fp2_sqr(&t0, &Q->z); + fp2_mul(&t0, &t0, &AC->A); // t0 = 4Ay1^2z1^2 + fp2_sqr(&t1, &P->y); + fp2_add(&t1, &t1, &t1); // t1 = 2y1^2 + fp2_add(&t3, &P->x, &P->x); // t3 = 2x1 + fp2_mul(&t3, &t1, &t3); // t3 = 4x1y1^2 + fp2_sqr(&Q->x, &t2); // x2 = alpha^2 + fp2_sub(&Q->x, &Q->x, &t0); // x2 = alpha^2 - 4Ay1^2z1^2 + fp2_sub(&Q->x, &Q->x, &t3); + fp2_sub(&Q->x, &Q->x, &t3); // x2 = alpha^2 - 4Ay1^2z1^2 - 8x1y1^2 + fp2_sub(&Q->y, &t3, &Q->x); // y2 = 4x1y1^2 - x2 + fp2_mul(&Q->y, &Q->y, &t2); // y2 = alpha(4x1y1^2 - x2) + fp2_sqr(&t1, &t1); // t1 = 4y1^4 + fp2_sub(&Q->y, &Q->y, &t1); + fp2_sub(&Q->y, &Q->y, &t1); // y2 = alpha(4x1y1^2 - x2) - 8y1^4 + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t) +{ // Cost of 3M + 5S. + // Doubling on a Weierstrass curve, representation in modified Jacobian coordinates + // (X:Y:Z:T=a*Z^4) corresponding to (X/Z^2,Y/Z^3), where a is the curve coefficient. + // Formula from https://hyperelliptic.org/EFD/g1p/auto-shortw-modified.html + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_t xx, c, cc, r, s, m; + // XX = X^2 + fp2_sqr(&xx, &P->x); + // A = 2*Y^2 + fp2_sqr(&c, &P->y); + fp2_add(&c, &c, &c); + // AA = A^2 + fp2_sqr(&cc, &c); + // R = 2*AA + fp2_add(&r, &cc, &cc); + // S = (X+A)^2-XX-AA + fp2_add(&s, &P->x, &c); + fp2_sqr(&s, &s); + fp2_sub(&s, &s, &xx); + fp2_sub(&s, &s, &cc); + // M = 3*XX+T1 + fp2_add(&m, &xx, &xx); + fp2_add(&m, &m, &xx); + fp2_add(&m, &m, t); + // X3 = M^2-2*S + fp2_sqr(&Q->x, &m); + fp2_sub(&Q->x, &Q->x, &s); + fp2_sub(&Q->x, &Q->x, &s); + // Z3 = 2*Y*Z + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); + // Y3 = M*(S-X3)-R + fp2_sub(&Q->y, &s, &Q->x); + fp2_mul(&Q->y, &Q->y, &m); + fp2_sub(&Q->y, &Q->y, &r); + // T3 = 2*R*T1 + fp2_mul(u, t, &r); + fp2_add(u, u, u); + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +select_jac_point(jac_point_t *Q, const jac_point_t *P1, const jac_point_t *P2, const digit_t option) +{ // Select points + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->y), &(P1->y), &(P2->y), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Addition on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding + // to (x,y) = (X/Z^2,Y/Z^3) This version receives the coefficient value A + // + // Complete routine, to handle all edge cases: + // if ZP == 0: # P == inf + // return Q + // if ZQ == 0: # Q == inf + // return P + // dy <- YQ*ZP**3 - YP*ZQ**3 + // dx <- XQ*ZP**2 - XP*ZQ**2 + // if dx == 0: # x1 == x2 + // if dy == 0: # ... and y1 == y2: doubling case + // dy <- ZP*ZQ * (3*XP^2 + ZP^2 * (2*A*XP + ZP^2)) + // dx <- 2*YP*ZP + // else: # ... but y1 != y2, thus P = -Q + // return inf + // XR <- dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) + // YR <- dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3 + // ZR <- dx * ZP * ZQ + + // Constant time processing: + // - The case for P == 0 or Q == 0 is handled at the end with conditional select + // - dy and dx are computed for both the normal and doubling cases, we switch when + // dx == dy == 0 for the normal case. + // - If we have that P = -Q then dx = 0 and so ZR will be zero, giving us the point + // at infinity for "free". + // + // These current formula are expensive and I'm probably missing some tricks... + // Thought I'd get the ball rolling. + // Cost 17M + 6S + 13a + fp2_t t0, t1, t2, t3, u1, u2, v1, dx, dy; + + /* If P is zero or Q is zero we will conditionally swap before returning. */ + uint32_t ctl1 = fp2_is_zero(&P->z); + uint32_t ctl2 = fp2_is_zero(&Q->z); + + /* Precompute some values */ + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + + /* Compute dy and dx for ordinary case */ + fp2_mul(&v1, &t1, &Q->z); // v1 = z2^3 + fp2_mul(&t2, &t0, &P->z); // t2 = z1^3 + fp2_mul(&v1, &v1, &P->y); // v1 = y1z2^3 + fp2_mul(&t2, &t2, &Q->y); // t2 = y2z1^3 + fp2_sub(&dy, &t2, &v1); // dy = y2z1^3 - y1z2^3 + fp2_mul(&u2, &t0, &Q->x); // u2 = x2z1^2 + fp2_mul(&u1, &t1, &P->x); // u1 = x1z2^2 + fp2_sub(&dx, &u2, &u1); // dx = x2z1^2 - x1z2^2 + + /* Compute dy and dx for doubling case */ + fp2_add(&t1, &P->y, &P->y); // dx_dbl = t1 = 2y1 + fp2_add(&t2, &AC->A, &AC->A); // t2 = 2A + fp2_mul(&t2, &t2, &P->x); // t2 = 2Ax1 + fp2_add(&t2, &t2, &t0); // t2 = 2Ax1 + z1^2 + fp2_mul(&t2, &t2, &t0); // t2 = z1^2 * (2Ax1 + z1^2) + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t2, &t2, &t0); // t2 = x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 2*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 3*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_mul(&t2, &t2, &Q->z); // dy_dbl = t2 = z2 * (3*x1^2 + z1^2 * (2Ax1 + z1^2)) + + /* If dx is zero and dy is zero swap with double variables */ + uint32_t ctl = fp2_is_zero(&dx) & fp2_is_zero(&dy); + fp2_select(&dx, &dx, &t1, ctl); + fp2_select(&dy, &dy, &t2, ctl); + + /* Some more precomputations */ + fp2_mul(&t0, &P->z, &Q->z); // t0 = z1z2 + fp2_sqr(&t1, &t0); // t1 = z1z2^2 + fp2_sqr(&t2, &dx); // t2 = dx^2 + fp2_sqr(&t3, &dy); // t3 = dy^2 + + /* Compute x3 = dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) */ + fp2_mul(&R->x, &AC->A, &t1); // x3 = A*(z1z2)^2 + fp2_add(&R->x, &R->x, &u1); // x3 = A*(z1z2)^2 + u1 + fp2_add(&R->x, &R->x, &u2); // x3 = A*(z1z2)^2 + u1 + u2 + fp2_mul(&R->x, &R->x, &t2); // x3 = dx^2 * (A*(z1z2)^2 + u1 + u2) + fp2_sub(&R->x, &t3, &R->x); // x3 = dy^2 - dx^2 * (A*(z1z2)^2 + u1 + u2) + + /* Compute y3 = dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3*/ + fp2_mul(&R->y, &u1, &t2); // y3 = u1 * dx^2 + fp2_sub(&R->y, &R->y, &R->x); // y3 = u1 * dx^2 - x3 + fp2_mul(&R->y, &R->y, &dy); // y3 = dy * (u1 * dx^2 - x3) + fp2_mul(&t3, &t2, &dx); // t3 = dx^3 + fp2_mul(&t3, &t3, &v1); // t3 = v1 * dx^3 + fp2_sub(&R->y, &R->y, &t3); // y3 = dy * (u1 * dx^2 - x3) - v1 * dx^3 + + /* Compute z3 = dx * z1 * z2 */ + fp2_mul(&R->z, &dx, &t0); + + /* Finally, we need to set R = P is Q.Z = 0 and R = Q if P.Z = 0 */ + select_jac_point(R, R, Q, ctl1); + select_jac_point(R, R, P, ctl2); +} + +void +jac_to_xz_add_components(add_components_t *add_comp, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Take P and Q in E distinct, two jac_point_t, return three components u,v and w in Fp2 such + // that the xz coordinates of P+Q are (u-v:w) and of P-Q are (u+v:w) + + fp2_t t0, t1, t2, t3, t4, t5, t6; + + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + fp2_mul(&t2, &P->x, &t1); // t2 = x1z2^2 + fp2_mul(&t3, &t0, &Q->x); // t3 = z1^2x2 + fp2_mul(&t4, &P->y, &Q->z); // t4 = y1z2 + fp2_mul(&t4, &t4, &t1); // t4 = y1z2^3 + fp2_mul(&t5, &P->z, &Q->y); // t5 = z1y2 + fp2_mul(&t5, &t5, &t0); // t5 = z1^3y2 + fp2_mul(&t0, &t0, &t1); // t0 = (z1z2)^2 + fp2_mul(&t6, &t4, &t5); // t6 = (z1z_2)^3y1y2 + fp2_add(&add_comp->v, &t6, &t6); // v = 2(z1z_2)^3y1y2 + fp2_sqr(&t4, &t4); // t4 = y1^2z2^6 + fp2_sqr(&t5, &t5); // t5 = z1^6y_2^2 + fp2_add(&t4, &t4, &t5); // t4 = z1^6y_2^2 + y1^2z2^6 + fp2_add(&t5, &t2, &t3); // t5 = x1z2^2 +z_1^2x2 + fp2_add(&t6, &t3, &t3); // t6 = 2z_1^2x2 + fp2_sub(&t6, &t5, &t6); // t6 = lambda = x1z2^2 - z_1^2x2 + fp2_sqr(&t6, &t6); // t6 = lambda^2 = (x1z2^2 - z_1^2x2)^2 + fp2_mul(&t1, &AC->A, &t0); // t1 = A*(z1z2)^2 + fp2_add(&t1, &t5, &t1); // t1 = gamma =A*(z1z2)^2 + x1z2^2 +z_1^2x2 + fp2_mul(&t1, &t1, &t6); // t1 = gamma*lambda^2 + fp2_sub(&add_comp->u, &t4, &t1); // u = z1^6y_2^2 + y1^2z2^6 - gamma*lambda^2 + fp2_mul(&add_comp->w, &t6, &t0); // w = (z1z2)^2(lambda)^2 +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_params.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_params.c new file mode 100644 index 0000000000..ae214aabed --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_params.c @@ -0,0 +1,4 @@ +#include +// p+1 divided by the power of 2 +const digit_t p_cofactor_for_2f[1] = {65}; + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_params.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_params.h new file mode 100644 index 0000000000..941abd5452 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec_params.h @@ -0,0 +1,12 @@ +#ifndef EC_PARAMS_H +#define EC_PARAMS_H + +#include + +#define TORSION_EVEN_POWER 376 + +// p+1 divided by the power of 2 +extern const digit_t p_cofactor_for_2f[1]; +#define P_COFACTOR_FOR_2F_BITLENGTH 7 + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_signature.c new file mode 100644 index 0000000000..112c695941 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_signature.c @@ -0,0 +1,208 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// ibz_t + +static byte_t * +ibz_to_bytes(byte_t *enc, const ibz_t *x, size_t nbytes, bool sgn) +{ +#ifndef NDEBUG + { + // make sure there is enough space + ibz_t abs, bnd; + ibz_init(&bnd); + ibz_init(&abs); + ibz_pow(&bnd, &ibz_const_two, 8 * nbytes - sgn); + ibz_abs(&abs, x); + assert(ibz_cmp(&abs, &bnd) < 0); + ibz_finalize(&bnd); + ibz_finalize(&abs); + } +#endif + const size_t digits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + digit_t d[digits]; + memset(d, 0, sizeof(d)); + if (ibz_cmp(x, &ibz_const_zero) >= 0) { + // non-negative, straightforward. + ibz_to_digits(d, x); + } else { + assert(sgn); + // negative; use two's complement. + ibz_t tmp; + ibz_init(&tmp); + ibz_neg(&tmp, x); + ibz_sub(&tmp, &tmp, &ibz_const_one); + ibz_to_digits(d, &tmp); + for (size_t i = 0; i < digits; ++i) + d[i] = ~d[i]; +#ifndef NDEBUG + { + // make sure the result is correct + ibz_t chk; + ibz_init(&chk); + ibz_copy_digit_array(&tmp, d); + ibz_sub(&tmp, &tmp, x); + ibz_pow(&chk, &ibz_const_two, 8 * sizeof(d)); + assert(!ibz_cmp(&tmp, &chk)); + ibz_finalize(&chk); + } +#endif + ibz_finalize(&tmp); + } + encode_digits(enc, d, nbytes); + return enc + nbytes; +} + +static const byte_t * +ibz_from_bytes(ibz_t *x, const byte_t *enc, size_t nbytes, bool sgn) +{ + assert(nbytes > 0); + const size_t ndigits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + assert(ndigits > 0); + digit_t d[ndigits]; + memset(d, 0, sizeof(d)); + decode_digits(d, enc, nbytes, ndigits); + if (sgn && enc[nbytes - 1] >> 7) { + // negative, decode two's complement + const size_t s = sizeof(digit_t) - 1 - (sizeof(d) - nbytes); + assert(s < sizeof(digit_t)); + d[ndigits - 1] |= ((digit_t)-1) >> 8 * s << 8 * s; + for (size_t i = 0; i < ndigits; ++i) + d[i] = ~d[i]; + ibz_copy_digits(x, d, ndigits); + ibz_add(x, x, &ibz_const_one); + ibz_neg(x, x); + } else { + // non-negative + ibz_copy_digits(x, d, ndigits); + } + return enc + nbytes; +} + +// public API + +void +secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = public_key_to_bytes(enc, pk); + +#ifndef NDEBUG + { + fp2_t lhs, rhs; + fp2_mul(&lhs, &sk->curve.A, &pk->curve.C); + fp2_mul(&rhs, &sk->curve.C, &pk->curve.A); + assert(fp2_is_equal(&lhs, &rhs)); + } +#endif + + enc = ibz_to_bytes(enc, &sk->secret_ideal.norm, FP_ENCODED_BYTES, false); + { + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + int ret UNUSED = quat_lideal_generator(&gen, &sk->secret_ideal, &QUATALG_PINFTY); + assert(ret); + // we skip encoding the denominator since it won't change the generated ideal +#ifndef NDEBUG + { + // let's make sure that the denominator is indeed coprime to the norm of the ideal + ibz_t gcd; + ibz_init(&gcd); + ibz_gcd(&gcd, &gen.denom, &sk->secret_ideal.norm); + assert(!ibz_cmp(&gcd, &ibz_const_one)); + ibz_finalize(&gcd); + } +#endif + enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); +} + +void +secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = public_key_from_bytes(pk, enc); + + { + ibz_t norm; + ibz_init(&norm); + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); + enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); + ibz_finalize(&norm); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); + + sk->curve = pk->curve; + ec_curve_to_basis_2f_from_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER, pk->hint_pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_verification.c new file mode 100644 index 0000000000..fecdb9c259 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_verification.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// fp2_t + +static byte_t * +fp2_to_bytes(byte_t *enc, const fp2_t *x) +{ + fp2_encode(enc, x); + return enc + FP2_ENCODED_BYTES; +} + +static const byte_t * +fp2_from_bytes(fp2_t *x, const byte_t *enc) +{ + fp2_decode(x, enc); + return enc + FP2_ENCODED_BYTES; +} + +// curves and points + +static byte_t * +proj_to_bytes(byte_t *enc, const fp2_t *x, const fp2_t *z) +{ + assert(!fp2_is_zero(z)); + fp2_t tmp = *z; + fp2_inv(&tmp); +#ifndef NDEBUG + { + fp2_t chk; + fp2_mul(&chk, z, &tmp); + fp2_t one; + fp2_set_one(&one); + assert(fp2_is_equal(&chk, &one)); + } +#endif + fp2_mul(&tmp, x, &tmp); + enc = fp2_to_bytes(enc, &tmp); + return enc; +} + +static const byte_t * +proj_from_bytes(fp2_t *x, fp2_t *z, const byte_t *enc) +{ + enc = fp2_from_bytes(x, enc); + fp2_set_one(z); + return enc; +} + +static byte_t * +ec_curve_to_bytes(byte_t *enc, const ec_curve_t *curve) +{ + return proj_to_bytes(enc, &curve->A, &curve->C); +} + +static const byte_t * +ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) +{ + memset(curve, 0, sizeof(*curve)); + return proj_from_bytes(&curve->A, &curve->C, enc); +} + +static byte_t * +ec_point_to_bytes(byte_t *enc, const ec_point_t *point) +{ + return proj_to_bytes(enc, &point->x, &point->z); +} + +static const byte_t * +ec_point_from_bytes(ec_point_t *point, const byte_t *enc) +{ + return proj_from_bytes(&point->x, &point->z, enc); +} + +static byte_t * +ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) +{ + enc = ec_point_to_bytes(enc, &basis->P); + enc = ec_point_to_bytes(enc, &basis->Q); + enc = ec_point_to_bytes(enc, &basis->PmQ); + return enc; +} + +static const byte_t * +ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) +{ + enc = ec_point_from_bytes(&basis->P, enc); + enc = ec_point_from_bytes(&basis->Q, enc); + enc = ec_point_from_bytes(&basis->PmQ, enc); + return enc; +} + +// public API + +byte_t * +public_key_to_bytes(byte_t *enc, const public_key_t *pk) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_to_bytes(enc, &pk->curve); + *enc++ = pk->hint_pk; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +const byte_t * +public_key_from_bytes(public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_from_bytes(&pk->curve, enc); + pk->hint_pk = *enc++; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +void +signature_to_bytes(byte_t *enc, const signature_t *sig) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = fp2_to_bytes(enc, &sig->E_aux_A); + + *enc++ = sig->backtracking; + *enc++ = sig->two_resp_length; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][1], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][1], nbytes); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + encode_digits(enc, sig->chall_coeff, nbytes); + enc += nbytes; + + *enc++ = sig->hint_aux; + *enc++ = sig->hint_chall; + + assert(enc - start == SIGNATURE_BYTES); +} + +void +signature_from_bytes(signature_t *sig, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = fp2_from_bytes(&sig->E_aux_A, enc); + + sig->backtracking = *enc++; + sig->two_resp_length = *enc++; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + decode_digits(sig->chall_coeff, enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + sig->hint_aux = *enc++; + sig->hint_chall = *enc++; + + assert(enc - start == SIGNATURE_BYTES); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encoded_sizes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encoded_sizes.h new file mode 100644 index 0000000000..50a8781bb6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encoded_sizes.h @@ -0,0 +1,11 @@ +#define SECURITY_BITS 192 +#define SQIsign_response_length 192 +#define HASH_ITERATIONS 256 +#define FP_ENCODED_BYTES 48 +#define FP2_ENCODED_BYTES 96 +#define EC_CURVE_ENCODED_BYTES 96 +#define EC_POINT_ENCODED_BYTES 96 +#define EC_BASIS_ENCODED_BYTES 288 +#define PUBLICKEY_BYTES 97 +#define SECRETKEY_BYTES 529 +#define SIGNATURE_BYTES 224 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c new file mode 100644 index 0000000000..8aafeac12b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c @@ -0,0 +1,3812 @@ +#include +#include +#include +const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x7e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1} +#elif RADIX == 32 +{0x1f8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1, 0x0, 0x0, 0x0, 0x0, 0x3f00000000000000} +#else +{0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12} +#elif RADIX == 32 +{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1} +#else +{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e} +#elif RADIX == 32 +{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164} +#else +{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd} +#elif RADIX == 32 +{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28} +#else +{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a} +#elif RADIX == 32 +{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9} +#else +{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1e36, 0x1718, 0xced, 0x186e, 0x83d, 0x1a23, 0xf5b, 0x5ca, 0x194d, 0x1bd8, 0xb67, 0x9f7, 0x1806, 0x17ae, 0x508, 0x117f, 0x5cc, 0x1809, 0x14b1, 0x85f, 0xcf0, 0x1b0c, 0x1753, 0x1484, 0xb5f, 0x1d62, 0x808, 0x1cc3, 0x844, 0x9} +#elif RADIX == 32 +{0xb8c78d9, 0x70dcced, 0xbd11a0f, 0x34b94f5, 0x67dec65, 0x193eeb, 0x508bd76, 0x97322fe, 0xf4b1c04, 0x633c10b, 0x9753d8, 0xb12d7e9, 0x986808e, 0x9113} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1a0f70dccedb8c78, 0x7dec6534b94f5bd1, 0xe508bd760193eeb6, 0x10bf4b1c0497322f, 0x2d7e909753d8633c, 0x3722113986808eb1} +#else +{0x1ee1b99db718f1, 0x14d2e53d6f4468, 0x300c9f75b3ef63, 0x497322fe508bd7, 0xc678217e96380, 0x2c4b5fa425d4f6, 0xb51089cc34047} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1785, 0x1652, 0x4b4, 0x1b37, 0x918, 0x12d, 0x1340, 0x16d3, 0xee, 0xb43, 0x52a, 0x1ff, 0x1e6b, 0x1424, 0x609, 0x1e2c, 0x19bd, 0x18f, 0x174a, 0x134d, 0x6f4, 0xa33, 0x1d5c, 0xa53, 0x73c, 0x361, 0x372, 0x1242, 0x87c, 0x17} +#elif RADIX == 32 +{0xb295e16, 0x366e4b4, 0x96a46, 0xbada734, 0x2a5a183, 0x9ac3fe5, 0x609a127, 0xe6f7c58, 0xb74a0c7, 0x99bd269, 0xa7d5c51, 0xb09cf14, 0x4843721, 0x381f2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6a46366e4b4b295e, 0xa5a183bada734009, 0x8609a1279ac3fe52, 0x269b74a0c7e6f7c5, 0x9cf14a7d5c5199bd, 0x5ce1f24843721b0} +#else +{0xc6cdc969652bc, 0xeeb69cd0025a9, 0x3cd61ff2952d0c, 0x7e6f7c58609a12, 0x3337a4d36e9418, 0x6c273c529f5714, 0x2e70f92421b90} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6f75,0xc742,0x1abb,0xc3b2,0x4bff,0xf015,0x66b,0xc51b,0xacd6,0x30c2,0xf641,0x625b,0x2e88,0xbe5,0x5121,0xbe40,0x8ac2,0x755b,0xb8c9,0x4eb6,0xb07,0x46b6,0x84cf,0x47}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc7426f75,0xc3b21abb,0xf0154bff,0xc51b066b,0x30c2acd6,0x625bf641,0xbe52e88,0xbe405121,0x755b8ac2,0x4eb6b8c9,0x46b60b07,0x4784cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc3b21abbc7426f75,0xc51b066bf0154bff,0x625bf64130c2acd6,0xbe4051210be52e88,0x4eb6b8c9755b8ac2,0x4784cf46b60b07}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9db8,0x479b,0xe350,0xae1e,0x4f92,0x6572,0x60a4,0x89ed,0x12f4,0xb88d,0x64b6,0xf9ca,0x26b,0xc086,0x83b8,0xb2c7,0x88a8,0xe99b,0x57b3,0x9017,0xe033,0x9d5d,0x5de6,0x37}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x479b9db8,0xae1ee350,0x65724f92,0x89ed60a4,0xb88d12f4,0xf9ca64b6,0xc086026b,0xb2c783b8,0xe99b88a8,0x901757b3,0x9d5de033,0x375de6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xae1ee350479b9db8,0x89ed60a465724f92,0xf9ca64b6b88d12f4,0xb2c783b8c086026b,0x901757b3e99b88a8,0x375de69d5de033}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x23f7,0x1d02,0x3431,0x354e,0xba31,0x23a4,0xe6c4,0x6a9c,0x64c,0xea8,0x419f,0xe54f,0x3cb9,0xc02d,0x3caf,0xe7a3,0x2d32,0x31d4,0xed80,0x47d9,0x2086,0x69f4,0x80d3,0x25}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1d0223f7,0x354e3431,0x23a4ba31,0x6a9ce6c4,0xea8064c,0xe54f419f,0xc02d3cb9,0xe7a33caf,0x31d42d32,0x47d9ed80,0x69f42086,0x2580d3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x354e34311d0223f7,0x6a9ce6c423a4ba31,0xe54f419f0ea8064c,0xe7a33cafc02d3cb9,0x47d9ed8031d42d32,0x2580d369f42086}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x908b,0x38bd,0xe544,0x3c4d,0xb400,0xfea,0xf994,0x3ae4,0x5329,0xcf3d,0x9be,0x9da4,0xd177,0xf41a,0xaede,0x41bf,0x753d,0x8aa4,0x4736,0xb149,0xf4f8,0xb949,0x7b30,0xb8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x38bd908b,0x3c4de544,0xfeab400,0x3ae4f994,0xcf3d5329,0x9da409be,0xf41ad177,0x41bfaede,0x8aa4753d,0xb1494736,0xb949f4f8,0xb87b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3c4de54438bd908b,0x3ae4f9940feab400,0x9da409becf3d5329,0x41bfaedef41ad177,0xb14947368aa4753d,0xb87b30b949f4f8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x83a3,0xab6f,0x4f99,0xe1f6,0xc2e8,0x2b61,0xd921,0xec7a,0x4f14,0x7555,0xf78e,0xe0fd,0xb2bf,0x44b,0xfb09,0x107c,0xf365,0x55f7,0x633,0x9bbe,0x409c,0x9c11,0x25b0,0xf1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xab6f83a3,0xe1f64f99,0x2b61c2e8,0xec7ad921,0x75554f14,0xe0fdf78e,0x44bb2bf,0x107cfb09,0x55f7f365,0x9bbe0633,0x9c11409c,0xf125b0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe1f64f99ab6f83a3,0xec7ad9212b61c2e8,0xe0fdf78e75554f14,0x107cfb09044bb2bf,0x9bbe063355f7f365,0xf125b09c11409c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc3d,0x130,0x16ca,0x127f,0x1c5c,0x57d0,0x3ece,0x2e8d,0xc5ae,0xeb26,0x1272,0x6cab,0x79c7,0x7c9,0x321b,0xfeb3,0xc99f,0xb33e,0xefa2,0x62c3,0x7bbe,0x777c,0xc959,0x4e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x130dc3d,0x127f16ca,0x57d01c5c,0x2e8d3ece,0xeb26c5ae,0x6cab1272,0x7c979c7,0xfeb3321b,0xb33ec99f,0x62c3efa2,0x777c7bbe,0x4ec959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x127f16ca0130dc3d,0x2e8d3ece57d01c5c,0x6cab1272eb26c5ae,0xfeb3321b07c979c7,0x62c3efa2b33ec99f,0x4ec959777c7bbe}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8f83,0xf9b,0xec59,0x68d7,0x8301,0x787e,0x909b,0x2714,0xe264,0x8ea5,0x9950,0x60f4,0x971d,0x392b,0x4d1b,0xeb9a,0xb9fb,0xdd02,0xcbaa,0x1f24,0x626c,0x6afb,0xfc8,0x91}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9b8f83,0x68d7ec59,0x787e8301,0x2714909b,0x8ea5e264,0x60f49950,0x392b971d,0xeb9a4d1b,0xdd02b9fb,0x1f24cbaa,0x6afb626c,0x910fc8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x68d7ec590f9b8f83,0x2714909b787e8301,0x60f499508ea5e264,0xeb9a4d1b392b971d,0x1f24cbaadd02b9fb,0x910fc86afb626c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7c5d,0x5490,0xb066,0x1e09,0x3d17,0xd49e,0x26de,0x1385,0xb0eb,0x8aaa,0x871,0x1f02,0x4d40,0xfbb4,0x4f6,0xef83,0xc9a,0xaa08,0xf9cc,0x6441,0xbf63,0x63ee,0xda4f,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x54907c5d,0x1e09b066,0xd49e3d17,0x138526de,0x8aaab0eb,0x1f020871,0xfbb44d40,0xef8304f6,0xaa080c9a,0x6441f9cc,0x63eebf63,0xeda4f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1e09b06654907c5d,0x138526ded49e3d17,0x1f0208718aaab0eb,0xef8304f6fbb44d40,0x6441f9ccaa080c9a,0xeda4f63eebf63}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x194c, 0x43, 0xc70, 0x1c7a, 0xa7a, 0xdf7, 0x1564, 0x1809, 0x8e8, 0x3a5, 0x1399, 0xf0a, 0x914, 0x1a27, 0xb1c, 0xcd0, 0xfc, 0xaa4, 0xd87, 0x1ed2, 0x2c0, 0x8e4, 0x1b93, 0x1a3f, 0x1d9b, 0x1a00, 0xbce, 0x17d2, 0x1a07, 0xf} +#elif RADIX == 32 +{0x21e531, 0xb8f4c70, 0x46fba9e, 0xa301356, 0x991d2a3, 0x451e153, 0xb1cd13a, 0x3f19a0, 0x4d87552, 0x20b03da, 0x7fb9347, 0x766f4, 0xfa4bced, 0x3d81e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xba9eb8f4c70021e5, 0x91d2a3a30135646f, 0xb1cd13a451e1539, 0x3da4d8755203f19a, 0x766f47fb934720b0, 0xcae81efa4bced00} +#else +{0x3d71e98e0043ca, 0xe8c04d591beea, 0x5228f0a9cc8e95, 0x203f19a0b1cd13, 0x641607b49b0eaa, 0x401d9bd1fee4d1, 0x65740f7d25e76} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1ed1, 0x10, 0x131c, 0x171e, 0x1a9e, 0x37d, 0xd59, 0x602, 0xa3a, 0x8e9, 0x14e6, 0x3c2, 0x1a45, 0x689, 0x2c7, 0x334, 0x3f, 0x1aa9, 0x1361, 0x7b4, 0xb0, 0x1a39, 0x1ee4, 0x1e8f, 0x766, 0x1680, 0x12f3, 0x1df4, 0x1e81, 0x4} +#elif RADIX == 32 +{0x87b44, 0xae3d31c, 0x91beea7, 0xe8c04d5, 0xe6474a8, 0x9147854, 0x2c7344e, 0x80fc668, 0x9361d54, 0xc82c0f6, 0x1fee4d1, 0x401d9bd, 0xbe92f3b, 0x27a07} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xeea7ae3d31c0087b, 0x6474a8e8c04d591b, 0x82c7344e9147854e, 0xf69361d5480fc66, 0x1d9bd1fee4d1c82c, 0x116ba07be92f3b40} +#else +{0x4f5c7a638010f6, 0x23a30135646fba, 0x748a3c2a7323a5, 0x480fc6682c7344, 0x390581ed26c3aa, 0x500766f47fb934, 0x8b5d03df4979d} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x187c, 0x10c9, 0xfda, 0x189b, 0x3b, 0xbcd, 0x16ab, 0xabe, 0x102, 0x19b7, 0x288, 0x1c7e, 0x1ee8, 0x452, 0x853, 0x1b5a, 0x1ca8, 0x1129, 0xd16, 0x168a, 0x1414, 0x6ed, 0xc0, 0xda2, 0x19ae, 0x12fe, 0x1813, 0xdd8, 0x102e, 0x1f} +#elif RADIX == 32 +{0x864e1f3, 0xf136fda, 0xb5e680e, 0x957d6a, 0x88cdb84, 0xba38fc2, 0x8532297, 0xf2a36b4, 0x4d16894, 0x6d052d1, 0x440c037, 0x7f66b9b, 0xbb18139, 0x390b9} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x680ef136fda864e1, 0x8cdb840957d6ab5e, 0x48532297ba38fc28, 0x2d14d16894f2a36b, 0x66b9b440c0376d05, 0x3dec0b9bb181397f} +#else +{0x1de26dfb50c9c3, 0x10255f5aad79a0, 0x3dd1c7e14466dc, 0x4f2a36b4853229, 0x6da0a5a29a2d12, 0x5fd9ae6d10300d, 0xeb605cdd8c09c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1ca3, 0x16ad, 0x12b3, 0x9d7, 0xb37, 0x118b, 0xb22, 0x1662, 0xa8f, 0xd68, 0x6d5, 0x1a1f, 0x1f29, 0x632, 0x1b7e, 0xb6, 0xba7, 0xeca, 0x11ed, 0x13b, 0x18cc, 0x19a2, 0x77, 0x1582, 0x11ff, 0xc5f, 0x7de, 0x4b1, 0x1a7f, 0x18} +#elif RADIX == 32 +{0xb56f28f, 0xd3af2b3, 0x28c5acd, 0x3ecc4b2, 0xd56b42a, 0xca743e6, 0xb7e3197, 0x2e9c16d, 0x71ed765, 0x1633027, 0x4077cd, 0x2fc7feb, 0x9627de6, 0x39fc} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x5acdd3af2b3b56f2, 0x56b42a3ecc4b228c, 0xdb7e3197ca743e6d, 0x2771ed7652e9c16, 0xc7feb04077cd1633, 0x24529fc9627de62f} +#else +{0x1ba75e5676ade5, 0x28fb312c8a316b, 0x3e53a1f36ab5a1, 0x52e9c16db7e319, 0x22c6604ee3daec, 0xbf1ffac101df3, 0x1e94fe4b13ef3} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1f7a, 0x1a13, 0x11f4, 0xaeb, 0x997, 0x12d, 0x315, 0x1d7, 0x2fc, 0x736, 0x927, 0x350, 0x695, 0x14ac, 0x703, 0x1ec7, 0x1567, 0x1527, 0x7ee, 0x1a23, 0x11aa, 0x919, 0x130b, 0x199e, 0x137d, 0x795, 0x4e4, 0x1dc6, 0xa87, 0xd} +#elif RADIX == 32 +{0xd09fde9, 0xd5d71f4, 0x5096a65, 0xf03ae31, 0x2739b0b, 0xa546a09, 0x703a561, 0xd59fd8e, 0x67eea93, 0xcc6ab44, 0x3d30b48, 0xcacdf73, 0xb8c4e43, 0x29a1f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6a65d5d71f4d09fd, 0x739b0bf03ae31509, 0xe703a561a546a092, 0xb4467eea93d59fd8, 0xcdf733d30b48cc6a, 0x3b52a1fb8c4e43ca} +#else +{0x4babae3e9a13fb, 0x2fc0eb8c5425a9, 0xd2a3504939cd8, 0x3d59fd8e703a56, 0x198d5688cfdd52, 0x72b37dccf4c2d2, 0xd6950fdc62721} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xa54, 0x1685, 0x1b20, 0x1632, 0x1047, 0x159e, 0x14a0, 0x94c, 0x3c8, 0x793, 0x3a2, 0x1938, 0x1899, 0x15b7, 0xefa, 0xcc8, 0x12c3, 0x1335, 0x4ef, 0x1e93, 0x1861, 0x1602, 0x1d6c, 0x1ae7, 0x187, 0x18b1, 0x857, 0x8da, 0x12f7, 0xa} +#elif RADIX == 32 +{0xb42a951, 0xec65b20, 0xacf411, 0x212994a, 0xa23c98f, 0x2672703, 0xefaadbe, 0xcb0d990, 0x64ef99a, 0x16187d2, 0xcfd6cb0, 0x58861f5, 0x1b4857c, 0x13bdd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf411ec65b20b42a9, 0x23c98f212994a0ac, 0xefaadbe2672703a, 0x7d264ef99acb0d99, 0x861f5cfd6cb01618, 0x14a4bdd1b4857c58} +#else +{0x23d8cb64168552, 0x3c84a65282b3d0, 0x71339381d11e4c, 0x2cb0d990efaadb, 0x2c30fa4c9df33, 0x162187d73f5b2c, 0xa525ee8da42be} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1e6b, 0x111, 0x74d, 0xb04, 0x738, 0x178f, 0xdc5, 0x835, 0x724, 0xaf9, 0xf3c, 0x1855, 0x266, 0x1b16, 0x1cf0, 0x1aa3, 0x32f, 0xce, 0x1f26, 0x16ba, 0x1cb6, 0x9b8, 0x12de, 0x1cef, 0x1a72, 0x1d68, 0xa02, 0x1c67, 0xa67, 0x13} +#elif RADIX == 32 +{0x88f9ae, 0x160874d, 0x5bc79ce, 0x9106adc, 0x3c57c9c, 0x99b0aaf, 0xcf0d8b0, 0xcbf547, 0x5f26067, 0xc72dad7, 0xdf2de4d, 0xb469cb9, 0x8cea02e, 0x1899f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x79ce160874d088f9, 0xc57c9c9106adc5bc, 0x7cf0d8b099b0aaf3, 0xad75f260670cbf54, 0x69cb9df2de4dc72d, 0x2c4699f8cea02eb4} +#else +{0x1c2c10e9a111f3, 0x72441ab716f1e7, 0x4cd85579e2be4, 0x70cbf547cf0d8b, 0x38e5b5aebe4c0c, 0x2d1a72e77cb793, 0x5e34cfc675017} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x12d6, 0x1c7a, 0x9bb, 0x1ce1, 0x1ca, 0xf3f, 0x1036, 0x19a6, 0x1c79, 0x5bf, 0x3, 0x1a92, 0x1d08, 0xeaa, 0x11e8, 0xab1, 0x1ed2, 0x80c, 0x10c9, 0x1517, 0xc18, 0x1513, 0x1dff, 0xc00, 0x16a0, 0x14ce, 0x72d, 0x1a86, 0xd45, 0x19} +#elif RADIX == 32 +{0xe3d4b5b, 0xb9c29bb, 0x679f872, 0xe734d03, 0x32dff1, 0x4235240, 0x1e87557, 0x7b49563, 0xf0c9406, 0x9b062a2, 0x1dffa8, 0x675a818, 0x50c72da, 0x8517} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf872b9c29bbe3d4b, 0x32dff1e734d03679, 0x31e8755742352400, 0x2a2f0c94067b4956, 0x5a81801dffa89b06, 0x172351750c72da67} +#else +{0x657385377c7a96, 0x479cd340d9e7e1, 0x3a11a9200196ff, 0x67b495631e8755, 0x1360c545e19280, 0x19d6a060077fea, 0xb91a8ba86396d} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ebb,0xe120,0x35fc,0x20e3,0xba01,0xff68,0x2ef4,0x62f6,0x5e93,0x94c1,0x3f93,0x804c,0xddc5,0x5b3d,0x1d31,0xf673,0x6e47,0x3d32,0x242c,0x6f7e,0x764b,0x63cb,0xbf4,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe1201ebb,0x20e335fc,0xff68ba01,0x62f62ef4,0x94c15e93,0x804c3f93,0x5b3dddc5,0xf6731d31,0x3d326e47,0x6f7e242c,0x63cb764b,0xf70bf4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x20e335fce1201ebb,0x62f62ef4ff68ba01,0x804c3f9394c15e93,0xf6731d315b3dddc5,0x6f7e242c3d326e47,0xf70bf463cb764b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe76c,0x34d0,0x684,0xee5,0x43c6,0x5a38,0x4bd5,0x2867,0xd3c5,0x2ee1,0xf790,0x18bf,0xbb64,0x3924,0x7d25,0xe0bc,0x913a,0x1355,0x50e9,0x7091,0x6724,0x21b2,0xc027,0xaa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x34d0e76c,0xee50684,0x5a3843c6,0x28674bd5,0x2ee1d3c5,0x18bff790,0x3924bb64,0xe0bc7d25,0x1355913a,0x709150e9,0x21b26724,0xaac027}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xee5068434d0e76c,0x28674bd55a3843c6,0x18bff7902ee1d3c5,0xe0bc7d253924bb64,0x709150e91355913a,0xaac02721b26724}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbd01,0x45bb,0x58bc,0x8007,0xbf5b,0xfd7,0x440b,0x7f9,0x54ed,0xe5db,0x2ba9,0xcd7b,0xfc98,0x1314,0x1470,0x9e9b,0xca3,0x944c,0x73c6,0x4cc9,0xa757,0x45fe,0x8b40,0x46}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x45bbbd01,0x800758bc,0xfd7bf5b,0x7f9440b,0xe5db54ed,0xcd7b2ba9,0x1314fc98,0x9e9b1470,0x944c0ca3,0x4cc973c6,0x45fea757,0x468b40}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x800758bc45bbbd01,0x7f9440b0fd7bf5b,0xcd7b2ba9e5db54ed,0x9e9b14701314fc98,0x4cc973c6944c0ca3,0x468b4045fea757}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe145,0x1edf,0xca03,0xdf1c,0x45fe,0x97,0xd10b,0x9d09,0xa16c,0x6b3e,0xc06c,0x7fb3,0x223a,0xa4c2,0xe2ce,0x98c,0x91b8,0xc2cd,0xdbd3,0x9081,0x89b4,0x9c34,0xf40b,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1edfe145,0xdf1cca03,0x9745fe,0x9d09d10b,0x6b3ea16c,0x7fb3c06c,0xa4c2223a,0x98ce2ce,0xc2cd91b8,0x9081dbd3,0x9c3489b4,0x8f40b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdf1cca031edfe145,0x9d09d10b009745fe,0x7fb3c06c6b3ea16c,0x98ce2cea4c2223a,0x9081dbd3c2cd91b8,0x8f40b9c3489b4}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3e42,0x35b4,0xc315,0x4acc,0x7905,0x734e,0xe57,0x941d,0xcc00,0x9010,0x652,0x5679,0x1e7c,0x69d5,0x77f0,0x5936,0x9815,0xdc49,0xdbae,0x8415,0x2381,0x706d,0x1b55,0x35}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x35b43e42,0x4accc315,0x734e7905,0x941d0e57,0x9010cc00,0x56790652,0x69d51e7c,0x593677f0,0xdc499815,0x8415dbae,0x706d2381,0x351b55}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4accc31535b43e42,0x941d0e57734e7905,0x567906529010cc00,0x593677f069d51e7c,0x8415dbaedc499815,0x351b55706d2381}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9f23,0x1f88,0x311a,0x8d4e,0x15a2,0x199f,0x997,0x8bcf,0xc7a0,0xc956,0x3de8,0x254b,0x1224,0x1a69,0x604a,0x9cb1,0xa8f7,0xc6ee,0x5903,0x65b8,0xe8a5,0xa271,0x7d6e,0xb3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1f889f23,0x8d4e311a,0x199f15a2,0x8bcf0997,0xc956c7a0,0x254b3de8,0x1a691224,0x9cb1604a,0xc6eea8f7,0x65b85903,0xa271e8a5,0xb37d6e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8d4e311a1f889f23,0x8bcf0997199f15a2,0x254b3de8c956c7a0,0x9cb1604a1a691224,0x65b85903c6eea8f7,0xb37d6ea271e8a5}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfad4,0x9280,0x39ea,0xba3b,0xb12b,0x1c9c,0x5ffd,0x2c19,0x13bf,0x2145,0xaf34,0x30c1,0x70d8,0x27ea,0x6539,0xb50a,0x3106,0x3638,0x7fad,0xa5d2,0x912a,0xb0e6,0xb4a1,0xfd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9280fad4,0xba3b39ea,0x1c9cb12b,0x2c195ffd,0x214513bf,0x30c1af34,0x27ea70d8,0xb50a6539,0x36383106,0xa5d27fad,0xb0e6912a,0xfdb4a1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xba3b39ea9280fad4,0x2c195ffd1c9cb12b,0x30c1af34214513bf,0xb50a653927ea70d8,0xa5d27fad36383106,0xfdb4a1b0e6912a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc1be,0xca4b,0x3cea,0xb533,0x86fa,0x8cb1,0xf1a8,0x6be2,0x33ff,0x6fef,0xf9ad,0xa986,0xe183,0x962a,0x880f,0xa6c9,0x67ea,0x23b6,0x2451,0x7bea,0xdc7e,0x8f92,0xe4aa,0xca}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xca4bc1be,0xb5333cea,0x8cb186fa,0x6be2f1a8,0x6fef33ff,0xa986f9ad,0x962ae183,0xa6c9880f,0x23b667ea,0x7bea2451,0x8f92dc7e,0xcae4aa}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5333ceaca4bc1be,0x6be2f1a88cb186fa,0xa986f9ad6fef33ff,0xa6c9880f962ae183,0x7bea245123b667ea,0xcae4aa8f92dc7e}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x9a9, 0x8c7, 0x119d, 0xada, 0x1d98, 0xc97, 0x1a31, 0xdc6, 0x192a, 0xf3c, 0x509, 0xc5b, 0xea7, 0x1eb9, 0x59d, 0x1e3c, 0x114b, 0x88, 0x5a9, 0x1154, 0x18c0, 0x11db, 0x1ba9, 0x3b8, 0x837, 0x18d0, 0x17eb, 0x211, 0x1aa7, 0x11} +#elif RADIX == 32 +{0x463a6a6, 0x15b519d, 0x164bf66, 0xa9b8da3, 0x979e64, 0xa9d8b65, 0x59df5cb, 0x452fc78, 0x85a9044, 0xde3022a, 0x71ba98e, 0x6820dc7, 0x4237ebc, 0xca9c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbf6615b519d463a6, 0x979e64a9b8da3164, 0x859df5cba9d8b650, 0x22a85a9044452fc7, 0x20dc771ba98ede30, 0x2a32a9c4237ebc68} +#else +{0x4c2b6a33a8c74d, 0x12a6e368c592fd, 0x5d4ec5b284bcf3, 0x4452fc7859df5c, 0x5bc604550b5208, 0x1a08371dc6ea63, 0x4d954e211bf5e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1ae8, 0xa31, 0x1467, 0x2b6, 0x1f66, 0xb25, 0x168c, 0x1371, 0x64a, 0xbcf, 0x1942, 0x1b16, 0xba9, 0xfae, 0x167, 0x1f8f, 0x452, 0x822, 0x16a, 0x455, 0x1e30, 0xc76, 0x6ea, 0x18ee, 0x20d, 0x1e34, 0xdfa, 0x1884, 0x12a9, 0xd} +#elif RADIX == 32 +{0x518eba1, 0x856d467, 0xc592fd9, 0x2a6e368, 0x425e799, 0xea762d9, 0x1677d72, 0x114bf1e, 0xa16a411, 0xb78c08a, 0xdc6ea63, 0x1a08371, 0x108dfaf, 0x2baa7} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2fd9856d467518eb, 0x25e7992a6e368c59, 0xe1677d72ea762d94, 0x8aa16a411114bf1, 0x8371dc6ea63b78c, 0x290caa7108dfaf1a} +#else +{0x330ada8cea31d7, 0x64a9b8da3164bf, 0x1753b16ca12f3c, 0x1114bf1e1677d7, 0x76f1811542d482, 0x46820dc771ba98, 0x4465538846fd7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x954, 0x49a, 0xee7, 0x1037, 0x171c, 0x81, 0x448, 0x76f, 0x1615, 0xefe, 0xe70, 0xc54, 0x3d4, 0xc30, 0x1aaf, 0x72c, 0x464, 0x7a7, 0x5b7, 0x1f2a, 0xa98, 0x8db, 0x1689, 0x1cc1, 0x11ae, 0x4bf, 0x1ddc, 0x1f93, 0x1b3e, 0xb} +#elif RADIX == 32 +{0x24d2551, 0x206eee7, 0x8040dc7, 0x54ede44, 0x7077f58, 0xf518a8e, 0xaaf6180, 0x9190e59, 0x45b73d3, 0xdaa63e5, 0x8368946, 0x5fc6bb9, 0xf27ddc2, 0x1dcfb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xdc7206eee724d25, 0x77f5854ede44804, 0x9aaf6180f518a8e7, 0x3e545b73d39190e5, 0xc6bb98368946daa6, 0x14aecfbf27ddc25f} +#else +{0xe40dddce49a4a, 0x6153b791201037, 0x7a8c547383bfa, 0x39190e59aaf618, 0x5b54c7ca8b6e7a, 0x17f1aee60da251, 0xa5767df93eee1} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xf14, 0xa31, 0x805, 0x19bd, 0x1b37, 0x5d5, 0x1211, 0x9c0, 0x557, 0x6b5, 0x1b2a, 0x775, 0x1a4f, 0x1d9, 0x520, 0x16be, 0x3d, 0x1cae, 0x4ca, 0x1a17, 0x1e64, 0x170b, 0x136, 0x1cd4, 0x150b, 0x1111, 0xf0b, 0x1af9, 0x3ce, 0x1c} +#elif RADIX == 32 +{0x518bc53, 0xf37a805, 0x12eaecd, 0x5d38121, 0x2a35a95, 0x93ceebb, 0x5200ece, 0xf6d7c, 0xe4cae57, 0x5f99342, 0xa8136b8, 0x88d42f9, 0x5f2f0b8, 0x1df3b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xaecdf37a805518bc, 0xa35a955d3812112e, 0xc5200ece93ceebb2, 0x342e4cae5700f6d7, 0xd42f9a8136b85f99, 0x1530f3b5f2f0b888} +#else +{0x1be6f500aa3178, 0x5574e04844babb, 0x749e775d951ad4, 0x700f6d7c5200ec, 0xbf32685c995ca, 0x22350be6a04dae, 0xa9879daf9785c} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1b6e, 0x5aa, 0x1bd9, 0x1e85, 0x1615, 0x1629, 0xb8b, 0x1066, 0x1532, 0x19ad, 0xe24, 0xcb8, 0x17fc, 0x2ab, 0x1726, 0x1ad5, 0x1c83, 0x1b32, 0x75e, 0x1794, 0x161d, 0x9c4, 0x11b6, 0x1c02, 0x14bb, 0x15d2, 0x10d5, 0x26b, 0x1765, 0x14} +#elif RADIX == 32 +{0x2d56dba, 0x7d0bbd9, 0xbb14d85, 0xca0ccb8, 0x24cd6d4, 0xff1970e, 0x726155d, 0x720f5ab, 0x875ed99, 0x25876f2, 0x51b64e, 0xe952ef8, 0x4d70d5a, 0x23d94} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x4d857d0bbd92d56d, 0x4cd6d4ca0ccb8bb1, 0xb726155dff1970e2, 0x6f2875ed99720f5a, 0x52ef8051b64e2587, 0x2f5dd944d70d5ae9} +#else +{0xafa177b25aadb, 0x5328332e2ec536, 0x6ff8cb871266b6, 0x1720f5ab726155, 0x44b0ede50ebdb3, 0x3a54bbe0146d93, 0x76eeca26b86ad} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x18aa, 0x459, 0x747, 0x401, 0x14be, 0x13ba, 0xafb, 0x1cb4, 0x636, 0xd10, 0x16ec, 0x1e6e, 0x1ee5, 0x1475, 0xf82, 0x1695, 0x1a54, 0xe4e, 0x1856, 0x459, 0x752, 0x1d56, 0x15a7, 0xde2, 0x158c, 0x623, 0x17, 0x10d9, 0x1156, 0x19} +#elif RADIX == 32 +{0x22ce2ab, 0x8802747, 0xb9dd52f, 0xdb968af, 0xec68818, 0xb97cdd6, 0xf82a3af, 0x6952d2a, 0x3856727, 0xb1d488b, 0xc55a7ea, 0x11d631b, 0x1b20173, 0x955a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd52f880274722ce2, 0xc68818db968afb9d, 0xaf82a3afb97cdd6e, 0x88b38567276952d2, 0xd631bc55a7eab1d4, 0x2b7455a1b2017311} +#else +{0x5f1004e8e459c5, 0x636e5a2bee7754, 0x7dcbe6eb763440, 0x76952d2af82a3a, 0x563a911670ace4, 0x44758c6f1569fa, 0x57a2ad0d900b9} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1557, 0x1987, 0x65f, 0x1c20, 0x14ef, 0xb3b, 0xbbe, 0x19db, 0xc77, 0x566, 0x9ea, 0xcab, 0xafc, 0x1fda, 0xb44, 0x1fe6, 0x1af3, 0x1829, 0x2ef, 0xc23, 0x83d, 0x82c, 0x1fa8, 0x14b, 0xd6e, 0xde8, 0x260, 0x1019, 0x97a, 0x3} +#elif RADIX == 32 +{0xcc3d55c, 0xf84065f, 0xe59dd3b, 0xdf3b6bb, 0xea2b331, 0xbf19569, 0xb44fed2, 0xebcffcc, 0x62efc14, 0x620f584, 0x97fa841, 0xf435b82, 0x322606, 0x1a5ea} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xdd3bf84065fcc3d5, 0xa2b331df3b6bbe59, 0xcb44fed2bf19569e, 0x58462efc14ebcffc, 0x35b8297fa841620f, 0x17765ea0322606f4} +#else +{0x77f080cbf987aa, 0x477cedaef96774, 0x15f8cab4f51599, 0x4ebcffccb44fed, 0x2c41eb08c5df82, 0x3d0d6e0a5fea10, 0xbbb2f50191303} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xb02, 0xc60, 0x791, 0x1cf7, 0xc15, 0x125a, 0x1697, 0xca1, 0x327, 0x89f, 0xf64, 0xddf, 0xcb7, 0x1977, 0x29f, 0x100a, 0xdac, 0xc8, 0x1e16, 0x1c4e, 0xedf, 0x1ec0, 0x1ac0, 0x1bbd, 0x16ee, 0x106a, 0x35c, 0x11cc, 0xdde, 0x20} +#elif RADIX == 32 +{0x6302c0b, 0x79ee791, 0x792d305, 0x9d94369, 0x6444f8c, 0x2ddbbef, 0x29fcbbb, 0x36b2014, 0xde16064, 0x3b7f89, 0x7bac0f6, 0x355bbb7, 0x39835c8, 0x4077a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd30579ee7916302c, 0x444f8c9d94369792, 0x429fcbbb2ddbbef6, 0xf89de1606436b201, 0x5bbb77bac0f603b7, 0x30b77a39835c835} +#else +{0xaf3dcf22c6058, 0x327650da5e4b4c, 0x596eddf7b2227c, 0x436b201429fcbb, 0x4076ff13bc2c0c, 0xd56eeddeeb03d, 0x185bbd1cc1ae4} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe463,0x3132,0x31,0xb872,0xdbee,0x1045,0x2b88,0x62c5,0xee3c,0xde5c,0xb179,0xa84f,0x18e5,0x355e,0x9a0f,0xbef8,0x783a,0x35b5,0x6d1c,0xaa31,0x3024,0xed81,0xa0f6,0x8a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3132e463,0xb8720031,0x1045dbee,0x62c52b88,0xde5cee3c,0xa84fb179,0x355e18e5,0xbef89a0f,0x35b5783a,0xaa316d1c,0xed813024,0x8aa0f6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb87200313132e463,0x62c52b881045dbee,0xa84fb179de5cee3c,0xbef89a0f355e18e5,0xaa316d1c35b5783a,0x8aa0f6ed813024}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcf24,0xdac2,0xe08b,0xd2f9,0x13a,0xf1f,0x9517,0xfa7c,0xa1c5,0x581e,0x4d0b,0x3e59,0x97cc,0x7506,0xee19,0xa48e,0xb1b0,0x50c2,0xb5a7,0x4b1d,0x2fcd,0xee68,0xab65,0x85}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdac2cf24,0xd2f9e08b,0xf1f013a,0xfa7c9517,0x581ea1c5,0x3e594d0b,0x750697cc,0xa48eee19,0x50c2b1b0,0x4b1db5a7,0xee682fcd,0x85ab65}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd2f9e08bdac2cf24,0xfa7c95170f1f013a,0x3e594d0b581ea1c5,0xa48eee19750697cc,0x4b1db5a750c2b1b0,0x85ab65ee682fcd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8b69,0x7be5,0xdf28,0x9c91,0xf929,0x7c60,0x6c50,0x4f81,0x714a,0x59da,0x2741,0x3c71,0x223a,0x79bf,0x14bd,0xa26f,0xc787,0x606d,0xc74c,0xef81,0xd1c4,0x32a,0x55ff,0x6a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7be58b69,0x9c91df28,0x7c60f929,0x4f816c50,0x59da714a,0x3c712741,0x79bf223a,0xa26f14bd,0x606dc787,0xef81c74c,0x32ad1c4,0x6a55ff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c91df287be58b69,0x4f816c507c60f929,0x3c71274159da714a,0xa26f14bd79bf223a,0xef81c74c606dc787,0x6a55ff032ad1c4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1b9d,0xcecd,0xffce,0x478d,0x2411,0xefba,0xd477,0x9d3a,0x11c3,0x21a3,0x4e86,0x57b0,0xe71a,0xcaa1,0x65f0,0x4107,0x87c5,0xca4a,0x92e3,0x55ce,0xcfdb,0x127e,0x5f09,0x75}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecd1b9d,0x478dffce,0xefba2411,0x9d3ad477,0x21a311c3,0x57b04e86,0xcaa1e71a,0x410765f0,0xca4a87c5,0x55ce92e3,0x127ecfdb,0x755f09}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x478dffcececd1b9d,0x9d3ad477efba2411,0x57b04e8621a311c3,0x410765f0caa1e71a,0x55ce92e3ca4a87c5,0x755f09127ecfdb}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd69f,0xa20a,0x2dbf,0x4897,0x3199,0xde89,0xe5f9,0x293e,0x826b,0xb67a,0x9878,0x508f,0x1cd5,0xbfc7,0xa6dc,0xa78c,0xa5a7,0xf717,0x2bd3,0x9a61,0x7d35,0xb772,0xba39,0x5d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa20ad69f,0x48972dbf,0xde893199,0x293ee5f9,0xb67a826b,0x508f9878,0xbfc71cd5,0xa78ca6dc,0xf717a5a7,0x9a612bd3,0xb7727d35,0x5dba39}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x48972dbfa20ad69f,0x293ee5f9de893199,0x508f9878b67a826b,0xa78ca6dcbfc71cd5,0x9a612bd3f717a5a7,0x5dba39b7727d35}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xeec1,0x1e36,0x61bb,0x9e9f,0xe1d8,0x9166,0x8a8e,0xb5cd,0xc787,0x4281,0xb7db,0xc5fe,0x29b,0x7038,0xad1a,0xdfb3,0x5d88,0xa643,0xce34,0xe9d5,0xfe7,0xc15c,0xb80f,0xbc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1e36eec1,0x9e9f61bb,0x9166e1d8,0xb5cd8a8e,0x4281c787,0xc5feb7db,0x7038029b,0xdfb3ad1a,0xa6435d88,0xe9d5ce34,0xc15c0fe7,0xbcb80f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9e9f61bb1e36eec1,0xb5cd8a8e9166e1d8,0xc5feb7db4281c787,0xdfb3ad1a7038029b,0xe9d5ce34a6435d88,0xbcb80fc15c0fe7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb7ff,0xc2,0x2b8a,0x5a59,0xd318,0x52ca,0x9b64,0xad19,0x8df,0xc9b8,0x7b28,0x9d09,0xe309,0x9,0xfb09,0xcbb9,0x6a67,0x1137,0x707c,0xaa5,0xcdf5,0x3ffd,0xfb9e,0xb9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc2b7ff,0x5a592b8a,0x52cad318,0xad199b64,0xc9b808df,0x9d097b28,0x9e309,0xcbb9fb09,0x11376a67,0xaa5707c,0x3ffdcdf5,0xb9fb9e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5a592b8a00c2b7ff,0xad199b6452cad318,0x9d097b28c9b808df,0xcbb9fb090009e309,0xaa5707c11376a67,0xb9fb9e3ffdcdf5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2961,0x5df5,0xd240,0xb768,0xce66,0x2176,0x1a06,0xd6c1,0x7d94,0x4985,0x6787,0xaf70,0xe32a,0x4038,0x5923,0x5873,0x5a58,0x8e8,0xd42c,0x659e,0x82ca,0x488d,0x45c6,0xa2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5df52961,0xb768d240,0x2176ce66,0xd6c11a06,0x49857d94,0xaf706787,0x4038e32a,0x58735923,0x8e85a58,0x659ed42c,0x488d82ca,0xa245c6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb768d2405df52961,0xd6c11a062176ce66,0xaf70678749857d94,0x587359234038e32a,0x659ed42c08e85a58,0xa245c6488d82ca}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0xccf, 0xad2, 0x1c72, 0x1f31, 0x155, 0xa3, 0x1062, 0xf6e, 0xe60, 0x87b, 0x1891, 0xb45, 0x1de8, 0x7f8, 0x18a6, 0x1e67, 0x988, 0x1887, 0xca7, 0x216, 0x1daa, 0x1aa5, 0xc3a, 0x11d3, 0x1282, 0x55d, 0x15fc, 0x1132, 0x1c5e, 0x8} +#elif RADIX == 32 +{0x569333d, 0x7e63c72, 0x2051855, 0x81edd06, 0x9143db9, 0x7a168b8, 0x8a63fc7, 0xa623ccf, 0xcca7c43, 0x2f6a842, 0xa6c3ad5, 0xaeca0a3, 0x2655fc2, 0x617a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x18557e63c7256933, 0x143db981edd06205, 0xf8a63fc77a168b89, 0x842cca7c43a623cc, 0xca0a3a6c3ad52f6a, 0xf8317a2655fc2ae} +#else +{0x2afcc78e4ad266, 0x6607b741881461, 0x3bd0b45c48a1ed, 0x3a623ccf8a63fc, 0x25ed5085994f88, 0x2bb2828e9b0eb5, 0x7c18bd132afe1} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x13b1, 0x12b4, 0xf1c, 0xfcc, 0x1855, 0x1028, 0x1418, 0x3db, 0x1b98, 0xa1e, 0xe24, 0x2d1, 0x77a, 0x11fe, 0x1e29, 0x799, 0x1a62, 0x1e21, 0x1329, 0x1085, 0xf6a, 0x16a9, 0x1b0e, 0x1474, 0xca0, 0x157, 0x157f, 0x144c, 0x1317, 0x1b} +#elif RADIX == 32 +{0x95a4ec7, 0x5f98f1c, 0x8814615, 0x607b741, 0x2450f6e, 0xde85a2e, 0xe298ff1, 0xe988f33, 0xb329f10, 0x4bdaa10, 0xe9b0eb5, 0xabb2828, 0x89957f0, 0x19c5e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x46155f98f1c95a4e, 0x450f6e607b741881, 0x3e298ff1de85a2e2, 0xa10b329f10e988f3, 0xb2828e9b0eb54bda, 0x32a0c5e89957f0ab} +#else +{0x2abf31e392b49d, 0x3981edd0620518, 0xef42d1712287b, 0xe988f33e298ff, 0x297b54216653e2, 0x2aeca0a3a6c3ad, 0x91062f44cabf8} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xdd8, 0x13bc, 0x17ae, 0x83e, 0x10c6, 0x1a72, 0x270, 0x84, 0xb92, 0x431, 0x1fdf, 0x9cf, 0x2a9, 0x121d, 0x5d5, 0x1d9f, 0xa48, 0xec9, 0xcfc, 0x6ee, 0x1812, 0x66b, 0xed8, 0xf7, 0x117b, 0x1fb7, 0xc5, 0x1f00, 0x134f, 0x1f} +#elif RADIX == 32 +{0x9de3763, 0x907d7ae, 0xd39431, 0x4810827, 0xdf218ae, 0xaa539ff, 0x5d590e8, 0xa923b3e, 0xccfc764, 0x5e048dd, 0xeeed833, 0xdbc5ec1, 0xe000c5f, 0x39d3f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9431907d7ae9de37, 0xf218ae48108270d3, 0xe5d590e8aa539ffd, 0x8ddccfc764a923b3, 0xc5ec1eeed8335e04, 0x195cd3fe000c5fdb} +#else +{0x6320faf5d3bc6e, 0x39204209c34e50, 0x45529cffef90c5, 0x4a923b3e5d590e, 0x6bc091bb99f8ec, 0x76f17b07bbb60c, 0xcae69ff00062f} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xf36, 0x2c8, 0x1ab4, 0x17c1, 0x10be, 0x1a20, 0x1baf, 0x3ce, 0x1088, 0xd75, 0x1e25, 0x10f8, 0x3d2, 0x1b8, 0x9c7, 0x168, 0x44c, 0x372, 0xc50, 0x1d9a, 0x1b99, 0xab9, 0x8af, 0x657, 0xe84, 0xe1d, 0x1675, 0x47, 0x157e, 0xc} +#elif RADIX == 32 +{0x1643cd9, 0xaf83ab4, 0xfd1042f, 0x2079dba, 0x256bac2, 0xf4a1f1e, 0x9c70dc0, 0x11302d0, 0x4c501b9, 0xcee67b3, 0xae8af55, 0xeba10c, 0x8f6757, 0x245f8} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x42faf83ab41643c, 0x56bac22079dbafd1, 0x9c70dc0f4a1f1e2, 0x7b34c501b911302d, 0xba10cae8af55cee6, 0x373d5f808f67570e} +#else +{0x5f5f075682c879, 0x881e76ebf4410, 0x7a50f8f12b5d6, 0x111302d09c70dc, 0x39dccf6698a037, 0x43ae8432ba2bd5, 0xb5eafc047b3ab} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x4b0, 0x31c, 0x92f, 0xf0d, 0xbc1, 0x1e89, 0x4ce, 0x1480, 0xdee, 0x504, 0x970, 0x16c3, 0xcb6, 0xae7, 0x1147, 0x8c, 0xc2a, 0x1ff9, 0x7d8, 0xfe9, 0x1fb1, 0x748, 0x998, 0xb85, 0x1a8e, 0x19c7, 0x5f7, 0x103c, 0x12a4, 0xe} +#elif RADIX == 32 +{0x18e12c1, 0x5e1a92f, 0xef44af0, 0xba9004c, 0x7028237, 0x2dad869, 0x147573b, 0xb0a8119, 0x27d8ffc, 0x47ec5fd, 0xa9983a, 0xe3ea397, 0x785f7c, 0x33a92} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x4af05e1a92f18e12, 0x28237ba9004cef4, 0x9147573b2dad8697, 0x5fd27d8ffcb0a811, 0xea3970a9983a47ec, 0x3134a920785f7ce3} +#else +{0x60bc3525e31c25, 0x5eea40133bd12b, 0x596d6c34b81411, 0x4b0a8119147573, 0x48fd8bfa4fb1ff, 0x38fa8e5c2a660e, 0x85a54903c2fbe} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x15a9, 0x1ae1, 0x1dd2, 0xa61, 0x1259, 0xfad, 0xe49, 0x1f6d, 0xd9a, 0x1371, 0xee7, 0x1179, 0x1bcf, 0x876, 0x3ca, 0xf7c, 0x1192, 0x315, 0x916, 0x1aa5, 0x1ca9, 0x10cb, 0xe32, 0x18b9, 0xf58, 0x1932, 0x1cce, 0x1ba7, 0x1377, 0x6} +#elif RADIX == 32 +{0xd70d6a4, 0x54c3dd2, 0x97d6c96, 0x6bedae4, 0xe79b8b6, 0xf3e2f2e, 0x3ca43b6, 0xc649ef8, 0xa91618a, 0x5f2a754, 0x72e3286, 0x993d631, 0x74fccec, 0x34ddf} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6c9654c3dd2d70d6, 0x79b8b66bedae497d, 0x83ca43b6f3e2f2ee, 0x754a91618ac649ef, 0x3d63172e32865f2a, 0x29d8ddf74fccec99} +#else +{0x2ca987ba5ae1ad, 0x59afb6b925f5b2, 0x379f179773cdc5, 0x2c649ef83ca43b, 0x4be54ea9522c31, 0x264f58c5cb8ca1, 0x4ac6efba7e676} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1f79, 0xcad, 0x18f2, 0x1ba7, 0x1d14, 0x1fc6, 0x197d, 0x522, 0xab, 0x7bd, 0x57b, 0x1fbf, 0x12, 0xb50, 0x425, 0x1aa3, 0x1c8e, 0x11cf, 0x1c1b, 0x1774, 0x3fc, 0x36a, 0x148f, 0x1fd3, 0x608, 0x1711, 0x1142, 0xcfa, 0xd43, 0xd} +#elif RADIX == 32 +{0x656fde5, 0x374f8f2, 0xdfe3745, 0xaca4597, 0x7b3de82, 0x4bf7e5, 0x4255a80, 0xf23b546, 0x9c1b8e7, 0x50ff2ee, 0xa748f1b, 0x889823f, 0x9f5142b, 0x2a50d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3745374f8f2656fd, 0xb3de82aca4597dfe, 0x64255a8004bf7e57, 0x2ee9c1b8e7f23b54, 0x9823fa748f1b50ff, 0x3a4f50d9f5142b88} +#else +{0xa6e9f1e4cadfb, 0xab29165f7f8dd, 0x25fbf2bd9ef4, 0x7f23b5464255a8, 0x6a1fe5dd38371c, 0x622608fe9d23c6, 0xce7a86cfa8a15} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x14a, 0x1236, 0x839, 0xe2, 0xe2d, 0xe17, 0x1b8f, 0x18dd, 0xb20, 0xeb8, 0x1da9, 0xc53, 0x12e8, 0x146, 0x1b9b, 0x154, 0x1121, 0x1049, 0x105d, 0x631, 0xc9, 0xbe0, 0x8fa, 0xbc0, 0x34b, 0x178a, 0x77b, 0x2a7, 0x105b, 0x15} +#elif RADIX == 32 +{0x91b052a, 0x41c4839, 0xf70bb8b, 0x831bbb8, 0xa975c2c, 0xba18a7d, 0xb9b0a34, 0xc4842a9, 0x305d824, 0x324c6, 0x808fa5f, 0xc50d2d7, 0x54e77bb, 0x2a16c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbb8b41c483991b05, 0x975c2c831bbb8f70, 0x9b9b0a34ba18a7da, 0x4c6305d824c4842a, 0xd2d7808fa5f0032, 0xad416c54e77bbc5} +#else +{0x1683890732360a, 0x320c6eee3dc2ee, 0x25d0c53ed4bae1, 0x4c4842a9b9b0a3, 0x6006498c60bb04, 0x71434b5e023e97, 0x56a0b62a73bdd} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xafa5,0x4195,0xbb2d,0xdd24,0xa3ca,0xc678,0xf995,0x2ccb,0x5c3b,0xf9ff,0xd06,0x1f9b,0x926d,0x4e3b,0x2881,0x24f2,0xcf4c,0x8e9a,0xa38d,0x24cb,0xe8f2,0x28a1,0x581c,0xde}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4195afa5,0xdd24bb2d,0xc678a3ca,0x2ccbf995,0xf9ff5c3b,0x1f9b0d06,0x4e3b926d,0x24f22881,0x8e9acf4c,0x24cba38d,0x28a1e8f2,0xde581c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd24bb2d4195afa5,0x2ccbf995c678a3ca,0x1f9b0d06f9ff5c3b,0x24f228814e3b926d,0x24cba38d8e9acf4c,0xde581c28a1e8f2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcd88,0x9cea,0x593c,0xb5a8,0x79c6,0xc07c,0x496f,0xfb85,0x5ac9,0x381c,0xf4f8,0xfa59,0xb7a3,0x5caa,0x24c2,0x67c8,0x31b3,0x7585,0xbe8a,0xb89f,0xa29f,0x6cd5,0xc156,0x25}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ceacd88,0xb5a8593c,0xc07c79c6,0xfb85496f,0x381c5ac9,0xfa59f4f8,0x5caab7a3,0x67c824c2,0x758531b3,0xb89fbe8a,0x6cd5a29f,0x25c156}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5a8593c9ceacd88,0xfb85496fc07c79c6,0xfa59f4f8381c5ac9,0x67c824c25caab7a3,0xb89fbe8a758531b3,0x25c1566cd5a29f}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9627,0xd297,0x9200,0x73de,0xaa89,0xf44f,0x99c7,0x2d45,0xb1eb,0xab2b,0x4168,0x976f,0x1e88,0x7777,0x2f39,0x6648,0xc224,0xd5a1,0xb815,0x861b,0xf76f,0xb476,0x4123,0xbe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd2979627,0x73de9200,0xf44faa89,0x2d4599c7,0xab2bb1eb,0x976f4168,0x77771e88,0x66482f39,0xd5a1c224,0x861bb815,0xb476f76f,0xbe4123}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x73de9200d2979627,0x2d4599c7f44faa89,0x976f4168ab2bb1eb,0x66482f3977771e88,0x861bb815d5a1c224,0xbe4123b476f76f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x505b,0xbe6a,0x44d2,0x22db,0x5c35,0x3987,0x66a,0xd334,0xa3c4,0x600,0xf2f9,0xe064,0x6d92,0xb1c4,0xd77e,0xdb0d,0x30b3,0x7165,0x5c72,0xdb34,0x170d,0xd75e,0xa7e3,0x21}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbe6a505b,0x22db44d2,0x39875c35,0xd334066a,0x600a3c4,0xe064f2f9,0xb1c46d92,0xdb0dd77e,0x716530b3,0xdb345c72,0xd75e170d,0x21a7e3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22db44d2be6a505b,0xd334066a39875c35,0xe064f2f90600a3c4,0xdb0dd77eb1c46d92,0xdb345c72716530b3,0x21a7e3d75e170d}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x43c6,0x55d8,0x682a,0xc215,0x706e,0xac4c,0x5ce,0x1182,0x8b72,0x90e3,0xf04f,0x6a11,0xc345,0x3488,0x45b0,0x5d3f,0x556b,0x9896,0x7b20,0x8d46,0xa9e3,0x7b0c,0xd428,0xba}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x55d843c6,0xc215682a,0xac4c706e,0x118205ce,0x90e38b72,0x6a11f04f,0x3488c345,0x5d3f45b0,0x9896556b,0x8d467b20,0x7b0ca9e3,0xbad428}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc215682a55d843c6,0x118205ceac4c706e,0x6a11f04f90e38b72,0x5d3f45b03488c345,0x8d467b209896556b,0xbad4287b0ca9e3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x91a5,0xf9ad,0x243c,0xedb9,0xc4f5,0xce5f,0xd6d7,0x3592,0x40df,0xdead,0x1489,0xe297,0x55b1,0xee4d,0xda9d,0x9e1f,0x4a5c,0xd99a,0x6c6b,0xa585,0x62fc,0x4383,0xc1ad,0xc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9ad91a5,0xedb9243c,0xce5fc4f5,0x3592d6d7,0xdead40df,0xe2971489,0xee4d55b1,0x9e1fda9d,0xd99a4a5c,0xa5856c6b,0x438362fc,0xc0c1ad}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xedb9243cf9ad91a5,0x3592d6d7ce5fc4f5,0xe2971489dead40df,0x9e1fda9dee4d55b1,0xa5856c6bd99a4a5c,0xc0c1ad438362fc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf454,0x6191,0x2181,0x2fc4,0x66fb,0xc44f,0x7bb6,0x9b1c,0x99f,0xee09,0xb1a3,0xf8f9,0xf234,0x5151,0x595c,0x4e44,0xa80a,0x305c,0x9930,0x25f6,0x8e50,0xb812,0xff4d,0xb8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6191f454,0x2fc42181,0xc44f66fb,0x9b1c7bb6,0xee09099f,0xf8f9b1a3,0x5151f234,0x4e44595c,0x305ca80a,0x25f69930,0xb8128e50,0xb8ff4d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2fc421816191f454,0x9b1c7bb6c44f66fb,0xf8f9b1a3ee09099f,0x4e44595c5151f234,0x25f69930305ca80a,0xb8ff4db8128e50}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbc3a,0xaa27,0x97d5,0x3dea,0x8f91,0x53b3,0xfa31,0xee7d,0x748d,0x6f1c,0xfb0,0x95ee,0x3cba,0xcb77,0xba4f,0xa2c0,0xaa94,0x6769,0x84df,0x72b9,0x561c,0x84f3,0x2bd7,0x45}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa27bc3a,0x3dea97d5,0x53b38f91,0xee7dfa31,0x6f1c748d,0x95ee0fb0,0xcb773cba,0xa2c0ba4f,0x6769aa94,0x72b984df,0x84f3561c,0x452bd7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3dea97d5aa27bc3a,0xee7dfa3153b38f91,0x95ee0fb06f1c748d,0xa2c0ba4fcb773cba,0x72b984df6769aa94,0x452bd784f3561c}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x10c4, 0x1e1a, 0x1b68, 0xa71, 0xa1a, 0x1f60, 0xdda, 0xb59, 0x1bc0, 0x26c, 0x1325, 0x4cf, 0x1375, 0x10ef, 0x1702, 0x1eff, 0x171f, 0x467, 0xc1c, 0xf3b, 0xf74, 0x6fa, 0x177f, 0x911, 0x8bc, 0x16b7, 0x1022, 0xa5f, 0xa55, 0x9} +#elif RADIX == 32 +{0xf0d4311, 0x94e3b68, 0xafb0286, 0x16b2dd, 0x251366f, 0xdd499f3, 0x702877c, 0xdc7fdff, 0x6c1c233, 0xd3dd1e7, 0x2377f37, 0x5ba2f12, 0x4bf022b, 0x9955} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x28694e3b68f0d43, 0x51366f016b2ddafb, 0xf702877cdd499f32, 0x1e76c1c233dc7fdf, 0xa2f122377f37d3dd, 0x45a9554bf022b5b} +#else +{0xd29c76d1e1a86, 0x3c05acb76bec0a, 0x66ea4cf99289b3, 0x3dc7fdff702877, 0x7a7ba3ced83846, 0x56e8bc488ddfcd, 0x22d4aaa5f8115} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x14af, 0x786, 0xeda, 0x129c, 0x286, 0x17d8, 0xb76, 0x2d6, 0x6f0, 0x89b, 0x1cc9, 0x933, 0x1cdd, 0x143b, 0x1dc0, 0x1fbf, 0x1dc7, 0x119, 0x1b07, 0x3ce, 0x13dd, 0x19be, 0xddf, 0x244, 0x1a2f, 0x15ad, 0x1c08, 0xa97, 0xa95, 0x3} +#elif RADIX == 32 +{0x3c352bc, 0xa538eda, 0x6bec0a1, 0xc05acb7, 0xc944d9b, 0x375267c, 0xdc0a1df, 0xf71ff7f, 0xdb0708c, 0xf4f7479, 0x88ddfcd, 0xd6e8bc4, 0x52fc08a, 0x1aa55} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc0a1a538eda3c352, 0x944d9bc05acb76be, 0xfdc0a1df375267cc, 0x479db0708cf71ff7, 0xe8bc488ddfcdf4f7, 0x2fd6a5552fc08ad6} +#else +{0x434a71db4786a5, 0x6f016b2ddafb02, 0x79ba933e64a26c, 0x4f71ff7fdc0a1d, 0x3e9ee8f3b60e11, 0x35ba2f122377f3, 0x7ab52aa97e045} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xd3b, 0x1cbd, 0x1177, 0x1087, 0x5d2, 0x1535, 0x1cb5, 0x1372, 0x158a, 0x931, 0x12da, 0x1b9d, 0x44e, 0xa00, 0xb71, 0xe8a, 0x1c57, 0x1a1, 0x5bb, 0x1180, 0x15f0, 0x1ca3, 0x119b, 0x16cc, 0xd3a, 0xaa7, 0xbc3, 0x9fc, 0xb07, 0x1a} +#elif RADIX == 32 +{0xe5eb4ef, 0xa10f177, 0x5a9a974, 0x2a6e5cb, 0xda498d6, 0x13b73b2, 0xb715001, 0xf15dd14, 0x5bb0d0, 0x1d7c230, 0x9919be5, 0x53b4ead, 0x3f8bc35, 0xfc1d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa974a10f177e5eb4, 0xa498d62a6e5cb5a9, 0x4b71500113b73b2d, 0x23005bb0d0f15dd1, 0xb4ead9919be51d7c, 0x3cbec1d3f8bc3553} +#else +{0x69421e2efcbd69, 0x58a9b972d6a6a5, 0x89db9d96d24c6, 0xf15dd14b71500, 0x23af84600b761a, 0x54ed3ab66466f9, 0xe1f60e9fc5e1a} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x186, 0x245, 0xa48, 0x11da, 0x1354, 0x9fc, 0x168f, 0xff7, 0x1f2c, 0x6a2, 0x6fb, 0x980, 0x164f, 0xbb8, 0x49c, 0x1ad1, 0x145f, 0x80a, 0xf93, 0x2d8, 0x1846, 0x43, 0x5a9, 0x3a, 0x72e, 0x1e10, 0x741, 0x783, 0x967, 0x1a} +#elif RADIX == 32 +{0x122861b, 0x23b4a48, 0xf4fe4d5, 0xb1fef68, 0xfb3517c, 0x93d3006, 0x49c5dc5, 0x517f5a2, 0xf93405, 0x1e1185b, 0x745a902, 0x81cb80, 0xf06741f, 0xf59c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe4d523b4a4812286, 0xb3517cb1fef68f4f, 0x249c5dc593d3006f, 0x85b0f93405517f5a, 0x1cb80745a9021e11, 0x6ea59cf06741f08} +#else +{0x2a47694902450c, 0x72c7fbda3d3f93, 0x2c9e98037d9a8b, 0x5517f5a249c5dc, 0x43c230b61f2680, 0x42072e01d16a40, 0x3752ce7833a0f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1064, 0x8a7, 0x7c, 0x1876, 0xf16, 0x3a0, 0x124, 0x637, 0x11bf, 0x223, 0x6d, 0x58e, 0xcde, 0xaf, 0x99c, 0x1c62, 0xdcb, 0xe10, 0x7ba, 0x127f, 0x1a23, 0x69a, 0x7bd, 0x238, 0x455, 0x16ac, 0x1147, 0x12a, 0x14c1, 0x5} +#elif RADIX == 32 +{0x453c190, 0xb0ec07c, 0x41d03c5, 0xfcc6e12, 0x6d111c6, 0x378b1c0, 0x99c057b, 0x372f8c4, 0xe7ba708, 0xd688e4f, 0x707bd34, 0x5611544, 0x255147b, 0x2d304} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3c5b0ec07c453c1, 0xd111c6fcc6e1241d, 0x499c057b378b1c06, 0xe4fe7ba708372f8c, 0x11544707bd34d688, 0x24bd304255147b56} +#else +{0xb61d80f88a783, 0x1bf31b8490740f, 0x59bc58e036888e, 0x372f8c499c057, 0x1ad11c9fcf74e1, 0x55845511c1ef4d, 0x21e98212a8a3d} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xaab, 0x60b, 0x8a0, 0x15d7, 0xbd8, 0x3ab, 0x1641, 0x1771, 0x134a, 0x17a, 0x785, 0x624, 0x1d, 0x1c3d, 0xcb1, 0xb5e, 0x23f, 0xf53, 0x879, 0x5e2, 0x903, 0xaff, 0xf72, 0xa2d, 0x7f4, 0xeb8, 0xd96, 0x1715, 0xffa, 0xa} +#elif RADIX == 32 +{0x305aaad, 0x2bae8a0, 0x11d5af6, 0x2aee364, 0x850bd4d, 0x74c487, 0xcb1e1e8, 0x88fd6bc, 0x48797a9, 0xfa40cbc, 0x5af7257, 0x5c1fd14, 0xe2ad967, 0x12fea} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x5af62bae8a0305aa, 0x50bd4d2aee36411d, 0xccb1e1e8074c4878, 0xcbc48797a988fd6b, 0x1fd145af7257fa40, 0x2bfffeae2ad9675c} +#else +{0x6c575d14060b55, 0x34abb8d904756b, 0x403a6243c285ea, 0x188fd6bccb1e1e, 0x7f48197890f2f5, 0x5707f4516bdc95, 0x5bfff57156cb3} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x195c, 0x1d55, 0x99f, 0x11f, 0x106b, 0xab1, 0x3e7, 0x1e40, 0xa1e, 0xdf0, 0x1dd4, 0x5cd, 0xfc3, 0x1c99, 0xbfa, 0x1ead, 0x1f6, 0x12fa, 0x1465, 0xad7, 0x1a84, 0x18d8, 0x1b7f, 0x9fe, 0x14b1, 0x13b7, 0x189f, 0x12bc, 0xabc, 0x1f} +#elif RADIX == 32 +{0xeaae573, 0xc23e99f, 0x7558c1a, 0x7bc803e, 0xd46f828, 0xf0cb9bd, 0xbfae4cb, 0x7dbd5a, 0xf46597d, 0xc6a115a, 0xfdb7fc6, 0xdbd2c53, 0x57989f9, 0x37af2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8c1ac23e99feaae5, 0x46f8287bc803e755, 0xabfae4cbf0cb9bdd, 0x15af46597d07dbd5, 0xd2c53fdb7fc6c6a1, 0x1d6aaf257989f9db} +#else +{0x35847d33fd55ca, 0x21ef200f9d5630, 0x5f865cdeea37c1, 0x507dbd5abfae4c, 0x58d422b5e8cb2f, 0x76f4b14ff6dff1, 0xeb55792bcc4fc} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x970, 0x18b4, 0xc62, 0xf59, 0xf33, 0x6c0, 0x5ae, 0x86b, 0x1690, 0x17e1, 0x829, 0xab5, 0x169, 0x1115, 0x1b7e, 0x17fa, 0xcae, 0x1b7, 0xc7b, 0xb70, 0x11fc, 0x1417, 0x8b4, 0x1b78, 0x35a, 0x18e, 0x1e46, 0x15f0, 0xf64, 0x15} +#elif RADIX == 32 +{0xc5a25c2, 0xdeb2c62, 0xe3603cc, 0x410d65a, 0x29bf0da, 0x5a556a8, 0xb7e88a8, 0xb2baff5, 0xc7b0db, 0xbc7f16e, 0xf08b4a0, 0xc70d6b6, 0xbe1e460, 0x29d92} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3ccdeb2c62c5a25, 0x9bf0da410d65ae36, 0x5b7e88a85a556a82, 0x16e0c7b0dbb2baff, 0xd6b6f08b4a0bc7f, 0x316bd92be1e460c7} +#else +{0x19bd658c58b44b, 0x69043596b8d80f, 0x42d2ab5414df86, 0x3b2baff5b7e88a, 0x178fe2dc18f61b, 0x31c35adbc22d28, 0x875ec95f0f230} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x313b,0xc18a,0x812a,0x406d,0x472a,0x9fca,0x9f07,0xb030,0x8b7b,0x7924,0x2af6,0x9e99,0x2b81,0x8eb8,0x35ee,0x59c8,0x7655,0x34cc,0x5aaf,0x326,0xe58d,0xf8b7,0x969a,0x6e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc18a313b,0x406d812a,0x9fca472a,0xb0309f07,0x79248b7b,0x9e992af6,0x8eb82b81,0x59c835ee,0x34cc7655,0x3265aaf,0xf8b7e58d,0x6e969a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x406d812ac18a313b,0xb0309f079fca472a,0x9e992af679248b7b,0x59c835ee8eb82b81,0x3265aaf34cc7655,0x6e969af8b7e58d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6610,0xfd89,0xb147,0xcf39,0x2b02,0x4ccf,0xed64,0x8470,0xaaf6,0x1891,0x8c78,0xf074,0x8a4c,0xfaed,0xd66c,0xf52b,0xf1c5,0xb0a,0x5cd,0x46f8,0x79a3,0x81de,0x451d,0xd9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfd896610,0xcf39b147,0x4ccf2b02,0x8470ed64,0x1891aaf6,0xf0748c78,0xfaed8a4c,0xf52bd66c,0xb0af1c5,0x46f805cd,0x81de79a3,0xd9451d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcf39b147fd896610,0x8470ed644ccf2b02,0xf0748c781891aaf6,0xf52bd66cfaed8a4c,0x46f805cd0b0af1c5,0xd9451d81de79a3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1869,0x2ce0,0x425c,0x7d0f,0x30c8,0x1c3e,0xd562,0xfb41,0x3951,0xeccc,0x9c8a,0xb265,0x829,0xd879,0x3c42,0x2cbf,0xb1d2,0xd9d3,0xee28,0x7fdf,0xccdd,0x3ad,0xa6d9,0x3b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2ce01869,0x7d0f425c,0x1c3e30c8,0xfb41d562,0xeccc3951,0xb2659c8a,0xd8790829,0x2cbf3c42,0xd9d3b1d2,0x7fdfee28,0x3adccdd,0x3ba6d9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7d0f425c2ce01869,0xfb41d5621c3e30c8,0xb2659c8aeccc3951,0x2cbf3c42d8790829,0x7fdfee28d9d3b1d2,0x3ba6d903adccdd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcec5,0x3e75,0x7ed5,0xbf92,0xb8d5,0x6035,0x60f8,0x4fcf,0x7484,0x86db,0xd509,0x6166,0xd47e,0x7147,0xca11,0xa637,0x89aa,0xcb33,0xa550,0xfcd9,0x1a72,0x748,0x6965,0x91}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3e75cec5,0xbf927ed5,0x6035b8d5,0x4fcf60f8,0x86db7484,0x6166d509,0x7147d47e,0xa637ca11,0xcb3389aa,0xfcd9a550,0x7481a72,0x916965}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbf927ed53e75cec5,0x4fcf60f86035b8d5,0x6166d50986db7484,0xa637ca117147d47e,0xfcd9a550cb3389aa,0x91696507481a72}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x29a3,0x7abe,0x2ef1,0x26a6,0xa5a5,0x54e6,0xf4c8,0xb56f,0x2bae,0x1aae,0xd9ba,0x94ed,0x2df5,0x882c,0xc686,0x6f64,0x29f7,0x850a,0x9eee,0x617c,0x5678,0x3108,0x8ebe,0x86}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7abe29a3,0x26a62ef1,0x54e6a5a5,0xb56ff4c8,0x1aae2bae,0x94edd9ba,0x882c2df5,0x6f64c686,0x850a29f7,0x617c9eee,0x31085678,0x868ebe}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x26a62ef17abe29a3,0xb56ff4c854e6a5a5,0x94edd9ba1aae2bae,0x6f64c686882c2df5,0x617c9eee850a29f7,0x868ebe31085678}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc2a5,0x8ce6,0x3729,0xaa2b,0xb9d2,0xbf43,0xe2be,0xaf25,0x4ffb,0xec8e,0xf85a,0x94c6,0xe027,0x3c64,0xf4ad,0xf63,0x86ba,0xa244,0xde0f,0x2390,0x11e1,0xdd7c,0xcd4c,0x33}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8ce6c2a5,0xaa2b3729,0xbf43b9d2,0xaf25e2be,0xec8e4ffb,0x94c6f85a,0x3c64e027,0xf63f4ad,0xa24486ba,0x2390de0f,0xdd7c11e1,0x33cd4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaa2b37298ce6c2a5,0xaf25e2bebf43b9d2,0x94c6f85aec8e4ffb,0xf63f4ad3c64e027,0x2390de0fa24486ba,0x33cd4cdd7c11e1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1893,0xa4bf,0x1eb8,0x9df0,0x91b1,0x17b0,0xe4ae,0x6ba1,0x35fd,0xd56b,0xc03f,0x82a8,0x99cd,0x30be,0xf3a3,0x181e,0x879b,0x518,0x3e8,0xed0e,0xc0ff,0xe2d6,0xe29c,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa4bf1893,0x9df01eb8,0x17b091b1,0x6ba1e4ae,0xd56b35fd,0x82a8c03f,0x30be99cd,0x181ef3a3,0x518879b,0xed0e03e8,0xe2d6c0ff,0x1e29c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9df01eb8a4bf1893,0x6ba1e4ae17b091b1,0x82a8c03fd56b35fd,0x181ef3a330be99cd,0xed0e03e80518879b,0x1e29ce2d6c0ff}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd65d,0x8541,0xd10e,0xd959,0x5a5a,0xab19,0xb37,0x4a90,0xd451,0xe551,0x2645,0x6b12,0xd20a,0x77d3,0x3979,0x909b,0xd608,0x7af5,0x6111,0x9e83,0xa987,0xcef7,0x7141,0x79}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8541d65d,0xd959d10e,0xab195a5a,0x4a900b37,0xe551d451,0x6b122645,0x77d3d20a,0x909b3979,0x7af5d608,0x9e836111,0xcef7a987,0x797141}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd959d10e8541d65d,0x4a900b37ab195a5a,0x6b122645e551d451,0x909b397977d3d20a,0x9e8361117af5d608,0x797141cef7a987}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x187f, 0x1c7d, 0x163a, 0x54d, 0x197f, 0x1a5d, 0x1833, 0x823, 0x11ae, 0x225, 0xd6d, 0x1627, 0xd6c, 0x1625, 0xa36, 0xf64, 0xcfa, 0x327, 0x188d, 0xfab, 0x56b, 0x91c, 0x1cc5, 0x4fe, 0x2ae, 0x181a, 0x195, 0x1288, 0x10fc, 0x3} +#elif RADIX == 32 +{0xe3ee1fc, 0xca9b63a, 0x3d2ee5f, 0xb904783, 0x6d112c6, 0x5b2c4ed, 0xa36b12b, 0xb3e9ec8, 0x788d193, 0xe15adf5, 0xfdcc548, 0xd0ab89, 0x510195c, 0x1c3f2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xee5fca9b63ae3ee1, 0xd112c6b9047833d2, 0x8a36b12b5b2c4ed6, 0xdf5788d193b3e9ec, 0xab89fdcc548e15a, 0x40183f2510195c0d} +#else +{0x3f9536c75c7dc3, 0x1ae411e0cf4bb9, 0x5ad96276b68896, 0x3b3e9ec8a36b12, 0x1c2b5beaf11a32, 0x342ae27f73152, 0xfcc1f92880cae} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0xe9d, 0x171f, 0xd8e, 0x1953, 0xe5f, 0x1e97, 0x1e0c, 0x1208, 0xc6b, 0x889, 0x1b5b, 0x589, 0xb5b, 0x1589, 0x28d, 0x13d9, 0x1b3e, 0x8c9, 0x1e23, 0x1bea, 0x15a, 0xa47, 0x1731, 0x113f, 0x10ab, 0xe06, 0x65, 0x4a2, 0x83f, 0x1a} +#elif RADIX == 32 +{0xb8fba77, 0xf2a6d8e, 0xcf4bb97, 0xae411e0, 0x5b444b1, 0xd6cb13b, 0x28dac4a, 0xecfa7b2, 0x5e23464, 0x3856b7d, 0x7f73152, 0x342ae2, 0x9440657, 0xf0fc} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbb97f2a6d8eb8fba, 0xb444b1ae411e0cf4, 0x228dac4ad6cb13b5, 0xb7d5e23464ecfa7b, 0x42ae27f731523856, 0x1e460fc944065703} +#else +{0x2fe54db1d71f74, 0x46b9047833d2ee, 0x56b6589dada225, 0x4ecfa7b228dac4, 0x470ad6fabc468c, 0x40d0ab89fdcc54, 0xf2307e4a2032b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x237, 0xee8, 0xd8c, 0xafb, 0x18cd, 0x1ce1, 0x162a, 0x11c9, 0x1bbc, 0x1415, 0x1c35, 0x1d0c, 0x1104, 0x1558, 0x9d, 0xb17, 0x1097, 0x16d2, 0xc02, 0x1573, 0x1c5f, 0x1bec, 0x1a73, 0x1dfe, 0x1923, 0x18d6, 0x221, 0x11ee, 0x1581, 0xb} +#elif RADIX == 32 +{0x77408dd, 0x55f6d8c, 0xae70e33, 0xf239362, 0x35a0aee, 0x413a19c, 0x9daac4, 0x425d62e, 0x6c02b69, 0x6717eae, 0xfda73df, 0x6b648fb, 0x3dc221c, 0x1c606} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe3355f6d8c77408, 0x5a0aeef239362ae7, 0xe09daac4413a19c3, 0xeae6c02b69425d62, 0x648fbfda73df6717, 0x38396063dc221c6b} +#else +{0x66abedb18ee811, 0x3bc8e4d8ab9c38, 0x2209d0ce1ad057, 0x1425d62e09daac, 0x6ce2fd5cd8056d, 0x1ad923eff69cf7, 0xbdcb031ee110e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x16a4, 0x11f0, 0x446, 0x1b2b, 0x129e, 0x1b52, 0x25, 0x18e4, 0x15d7, 0x545, 0x1502, 0x3af, 0x1b45, 0xff3, 0x1423, 0x1574, 0x1c5a, 0xff0, 0x1663, 0x114b, 0xc99, 0x1c89, 0x11f0, 0x15fd, 0x17a1, 0x14dd, 0x17f7, 0x1451, 0x5af, 0x17} +#elif RADIX == 32 +{0x8f85a92, 0xb656446, 0x5da94a7, 0x5f1c802, 0x22a2d7, 0xd1475f5, 0x4237f9e, 0x716aae9, 0x76637f8, 0x4b26629, 0xfb1f0e4, 0x6ede86b, 0x8a37f7a, 0x376be} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x94a7b6564468f85a, 0x22a2d75f1c8025da, 0x94237f9ed1475f50, 0x62976637f8716aae, 0xde86bfb1f0e44b26, 0x25496be8a37f7a6e} +#else +{0x4f6cac88d1f0b5, 0x5d7c7200976a52, 0x768a3afa811516, 0x716aae94237f9, 0x964cc52ecc6ff, 0x1bb7a1afec7c39, 0x264b5f451bfbd} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xc89, 0x16f8, 0x1bcf, 0x14c7, 0x1c81, 0x1c37, 0x3b1, 0xb00, 0x5e, 0xdb5, 0x920, 0x14db, 0x41, 0x1bd7, 0x159d, 0x1889, 0x1318, 0x95d, 0x13d5, 0x46b, 0x18bd, 0x1bf1, 0x1bf6, 0x1ba2, 0x2d6, 0x1b06, 0x17c1, 0x1a40, 0x1f02, 0x11} +#elif RADIX == 32 +{0xb7c3226, 0x698fbcf, 0x1e1bf20, 0x796003b, 0x206da81, 0x1069b69, 0x59ddeb8, 0xcc63113, 0x73d54ae, 0x8e2f48d, 0x45bf6df, 0x830b5b7, 0x4817c1d, 0xdc0b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbf20698fbcfb7c32, 0x6da81796003b1e1, 0x359ddeb81069b692, 0x48d73d54aecc6311, 0xb5b745bf6df8e2f, 0x9b3c0b4817c1d83} +#else +{0x40d31f79f6f864, 0x5e5800ec786fc, 0x40834db49036d4, 0x6cc6311359ddeb, 0x71c5e91ae7aa95, 0x60c2d6dd16fdb7, 0x4d9e05a40be0e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x8c0, 0x125b, 0x1d1c, 0x8a8, 0x1c41, 0xbb7, 0x15bf, 0x15ec, 0x959, 0x1fc5, 0xc2, 0x2ff, 0x1dd2, 0x1c02, 0x9db, 0x139d, 0x9a, 0x1654, 0xce7, 0xf6d, 0x13e5, 0x19be, 0x1f28, 0x161c, 0xe9f, 0x940, 0x77d, 0x162c, 0x385, 0x4} +#elif RADIX == 32 +{0x92da300, 0x5151d1c, 0xf5dbf10, 0x66bd95b, 0xc2fe2a5, 0x7485fe0, 0x9dbe017, 0x26a73a, 0xace7b2a, 0xf4f95ed, 0x39f28cd, 0xa03a7ec, 0xc5877d4, 0x20e16} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbf105151d1c92da3, 0x2fe2a566bd95bf5d, 0xa9dbe0177485fe0c, 0x5edace7b2a026a73, 0x3a7ec39f28cdf4f9, 0x20e16c5877d4a0} +#else +{0x20a2a3a3925b46, 0x159af656fd76fc, 0x3ba42ff0617f15, 0x2026a73a9dbe01, 0x3e9f2bdb59cf65, 0x280e9fb0e7ca33, 0x1070b62c3bea} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xd30, 0x670, 0x165f, 0x18f8, 0x3fe, 0x11e5, 0x663, 0x270, 0x18cb, 0x42b, 0x11c3, 0xe0a, 0x4fc, 0x18ad, 0xfd0, 0x3fa, 0x1957, 0x1544, 0x941, 0x181e, 0x661, 0x18b9, 0x74a, 0xa70, 0x866, 0x11f8, 0xd20, 0xae3, 0x19b8, 0xb} +#elif RADIX == 32 +{0x33834c1, 0xb1f165f, 0x38f28ff, 0x2c4e066, 0xc3215e3, 0x3f1c151, 0xfd0c569, 0x655c7f4, 0xc941aa2, 0xc998703, 0xe074ac5, 0xfc21994, 0x5c6d208, 0x1d6e1} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x28ffb1f165f33834, 0x3215e32c4e06638f, 0x4fd0c5693f1c151c, 0x703c941aa2655c7f, 0x21994e074ac5c998, 0x311e6e15c6d208fc} +#else +{0x7f63e2cbe67069, 0xcb138198e3ca3, 0x49f8e0a8e190af, 0x2655c7f4fd0c56, 0x39330e07928354, 0x3f08665381d2b1, 0x84f370ae36904} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1f47, 0x9e3, 0x5d, 0xdc6, 0x18a3, 0x1c99, 0x1253, 0x179f, 0x16b, 0x1b87, 0x27a, 0x9f8, 0x1064, 0x9ed, 0xe66, 0x47d, 0x4e9, 0x1805, 0x1349, 0x40, 0x1bbd, 0x7f6, 0x1c57, 0x1f9f, 0x11e9, 0x14cf, 0xe61, 0x1892, 0x833, 0x10} +#elif RADIX == 32 +{0x4f1fd1e, 0xdb8c05d, 0x3e4ce28, 0xaef3f25, 0x7adc385, 0x1913f02, 0xe664f6c, 0x93a48fa, 0x1349c02, 0xb6ef408, 0x3fc573f, 0x67c7a7f, 0x124e61a, 0xcf} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xce28db8c05d4f1fd, 0xadc385aef3f253e4, 0xae664f6c1913f027, 0x4081349c0293a48f, 0xc7a7f3fc573fb6ef, 0x79e0cf124e61a67} +#else +{0x51b7180ba9e3fa, 0x16bbcfc94f9338, 0x60c89f813d6e1c, 0x293a48fae664f6, 0x76dde810269380, 0x19f1e9fcff15cf, 0x3cf067892730d} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x703,0xe86d,0xe89e,0xbcf8,0x675b,0xe250,0x9f65,0xe8ec,0x2c83,0x11ca,0x4751,0x192a,0xf9d8,0xf46a,0xeb89,0x4f40,0x2a2c,0xdcf,0xfff9,0x13f9,0x24e7,0x8348,0xb9af,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe86d0703,0xbcf8e89e,0xe250675b,0xe8ec9f65,0x11ca2c83,0x192a4751,0xf46af9d8,0x4f40eb89,0xdcf2a2c,0x13f9fff9,0x834824e7,0x6b9af}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbcf8e89ee86d0703,0xe8ec9f65e250675b,0x192a475111ca2c83,0x4f40eb89f46af9d8,0x13f9fff90dcf2a2c,0x6b9af834824e7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e40,0xb548,0xf9c7,0x6598,0x7e33,0x25c6,0x6cbf,0x2ef2,0xa630,0xdd99,0xaef2,0xf320,0x4a2,0x93a7,0x4541,0x2f7c,0xbf45,0x1a7a,0x24f4,0x52a9,0xd3b4,0xa12a,0x9d37,0xb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb5481e40,0x6598f9c7,0x25c67e33,0x2ef26cbf,0xdd99a630,0xf320aef2,0x93a704a2,0x2f7c4541,0x1a7abf45,0x52a924f4,0xa12ad3b4,0xb09d37}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6598f9c7b5481e40,0x2ef26cbf25c67e33,0xf320aef2dd99a630,0x2f7c454193a704a2,0x52a924f41a7abf45,0xb09d37a12ad3b4}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e1,0x2283,0x3774,0x83d4,0xf33f,0x1fc,0x2790,0xde59,0xe89d,0xc942,0x2c1b,0x6574,0x55b1,0x3a3c,0x9f11,0xbb0a,0x6813,0xa69,0xff9d,0xc94c,0xdede,0xce6b,0x18c6,0xa9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x228301e1,0x83d43774,0x1fcf33f,0xde592790,0xc942e89d,0x65742c1b,0x3a3c55b1,0xbb0a9f11,0xa696813,0xc94cff9d,0xce6bdede,0xa918c6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x83d43774228301e1,0xde59279001fcf33f,0x65742c1bc942e89d,0xbb0a9f113a3c55b1,0xc94cff9d0a696813,0xa918c6ce6bdede}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf8fd,0x1792,0x1761,0x4307,0x98a4,0x1daf,0x609a,0x1713,0xd37c,0xee35,0xb8ae,0xe6d5,0x627,0xb95,0x1476,0xb0bf,0xd5d3,0xf230,0x6,0xec06,0xdb18,0x7cb7,0x4650,0xf9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1792f8fd,0x43071761,0x1daf98a4,0x1713609a,0xee35d37c,0xe6d5b8ae,0xb950627,0xb0bf1476,0xf230d5d3,0xec060006,0x7cb7db18,0xf94650}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x430717611792f8fd,0x1713609a1daf98a4,0xe6d5b8aeee35d37c,0xb0bf14760b950627,0xec060006f230d5d3,0xf946507cb7db18}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2d5d,0x46e9,0x4215,0x63b0,0x8358,0xdc91,0x80aa,0x6970,0x4e7d,0x266d,0xc13a,0xe4ea,0x504e,0xbc38,0xdbaf,0x119b,0xa3cc,0x45d8,0x98db,0x7b90,0x3a5b,0xde6a,0x3676,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x46e92d5d,0x63b04215,0xdc918358,0x697080aa,0x266d4e7d,0xe4eac13a,0xbc38504e,0x119bdbaf,0x45d8a3cc,0x7b9098db,0xde6a3a5b,0x83676}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x63b0421546e92d5d,0x697080aadc918358,0xe4eac13a266d4e7d,0x119bdbafbc38504e,0x7b9098db45d8a3cc,0x83676de6a3a5b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1db1,0x61ae,0x220b,0xc2e,0xa7ee,0xb16a,0x8697,0xf90c,0x7505,0xced5,0x5cf8,0xb601,0x6235,0x27ad,0x9fdf,0x57d0,0xca2,0xa6d2,0x94db,0xb53a,0x8bd2,0xa3ad,0xfe95,0x92}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x61ae1db1,0xc2e220b,0xb16aa7ee,0xf90c8697,0xced57505,0xb6015cf8,0x27ad6235,0x57d09fdf,0xa6d20ca2,0xb53a94db,0xa3ad8bd2,0x92fe95}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc2e220b61ae1db1,0xf90c8697b16aa7ee,0xb6015cf8ced57505,0x57d09fdf27ad6235,0xb53a94dba6d20ca2,0x92fe95a3ad8bd2}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa809,0xf0cf,0xb393,0xf0ab,0x181a,0xb5bc,0x1833,0xb0ea,0xff0e,0x3088,0xb299,0x4f5c,0x5a20,0x5b86,0xad7b,0x9ffd,0x2216,0x4e4c,0xb8eb,0x989,0x712f,0xa798,0x8e8f,0x45}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf0cfa809,0xf0abb393,0xb5bc181a,0xb0ea1833,0x3088ff0e,0x4f5cb299,0x5b865a20,0x9ffdad7b,0x4e4c2216,0x989b8eb,0xa798712f,0x458e8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf0abb393f0cfa809,0xb0ea1833b5bc181a,0x4f5cb2993088ff0e,0x9ffdad7b5b865a20,0x989b8eb4e4c2216,0x458e8fa798712f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd2a3,0xb916,0xbdea,0x9c4f,0x7ca7,0x236e,0x7f55,0x968f,0xb182,0xd992,0x3ec5,0x1b15,0xafb1,0x43c7,0x2450,0xee64,0x5c33,0xba27,0x6724,0x846f,0xc5a4,0x2195,0xc989,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb916d2a3,0x9c4fbdea,0x236e7ca7,0x968f7f55,0xd992b182,0x1b153ec5,0x43c7afb1,0xee642450,0xba275c33,0x846f6724,0x2195c5a4,0xf7c989}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c4fbdeab916d2a3,0x968f7f55236e7ca7,0x1b153ec5d992b182,0xee64245043c7afb1,0x846f6724ba275c33,0xf7c9892195c5a4}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x17bf, 0xbb8, 0x485, 0x1296, 0x1b32, 0xe0b, 0x13f9, 0x15f5, 0x1a2b, 0xff6, 0xf53, 0x70a, 0x36c, 0x40f, 0xa89, 0xca6, 0x37e, 0x1488, 0x1796, 0x1be2, 0x1e0f, 0xf10, 0x1628, 0x1a20, 0x1ee7, 0x1e53, 0x48c, 0x94, 0xd53, 0xd} +#elif RADIX == 32 +{0x5dc5efd, 0xa52c485, 0x9705ecc, 0xaebeb3f, 0x537fb68, 0xdb0e14f, 0xa892078, 0xdf994c, 0x5796a44, 0x8783f7c, 0x4162878, 0x29fb9f4, 0x12848cf, 0x2a54c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x5ecca52c4855dc5e, 0x37fb68aebeb3f970, 0xca892078db0e14f5, 0xf7c5796a440df994, 0xfb9f441628788783, 0x406754c12848cf29} +#else +{0x194a5890abb8bd, 0x22bafacfe5c17b, 0x46d870a7a9bfdb, 0x40df994ca89207, 0x10f07ef8af2d48, 0x4a7ee7d1058a1e, 0xff3aa60942467} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x66d, 0xaee, 0x1121, 0x14a5, 0x1ecc, 0xb82, 0xcfe, 0x1d7d, 0x168a, 0x1bfd, 0x13d4, 0x1c2, 0x18db, 0x903, 0x12a2, 0x1329, 0xdf, 0x1522, 0x15e5, 0x1ef8, 0x783, 0x3c4, 0x58a, 0x1e88, 0x1fb9, 0x794, 0x123, 0x1825, 0x1754, 0x1c} +#elif RADIX == 32 +{0x57719b7, 0x294b121, 0xe5c17b3, 0x2bafacf, 0xd4dfeda, 0x36c3853, 0x2a2481e, 0x37e653, 0x15e5a91, 0x21e0fdf, 0x1058a1e, 0xca7ee7d, 0x4a1233, 0x22d53} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x17b3294b12157719, 0x4dfeda2bafacfe5c, 0x32a2481e36c3853d, 0xfdf15e5a91037e65, 0x7ee7d1058a1e21e0, 0x2e99d5304a1233ca} +#else +{0x665296242aee33, 0x68aebeb3f9705e, 0x71b61c29ea6ff6, 0x1037e6532a2481, 0x443c1fbe2bcb52, 0x729fb9f4416287, 0x70cea98250919} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x3e9, 0x9f6, 0x1c50, 0x27e, 0xa85, 0x39c, 0xa7b, 0x177c, 0xdfc, 0x77e, 0x1490, 0x11b8, 0xd2b, 0x17dc, 0xd7c, 0x16a0, 0xe21, 0xb86, 0x15bb, 0x844, 0x146c, 0xe51, 0xc6d, 0x143d, 0x1d2b, 0x1715, 0x18bb, 0xdc8, 0x55d, 0x16} +#elif RADIX == 32 +{0x4fb0fa6, 0x44fdc50, 0xb1ce2a1, 0xf2ef8a7, 0x903bf37, 0x4ae3714, 0xd7cbee3, 0x3886d40, 0x95bb5c3, 0x8d1b108, 0x7ac6d72, 0x8af4ae8, 0xb918bbb, 0x2f575} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe2a144fdc504fb0f, 0x3bf37f2ef8a7b1c, 0xd7cbee34ae37149, 0x10895bb5c33886d4, 0xf4ae87ac6d728d1b, 0x2a55575b918bbb8a} +#else +{0x4289fb8a09f61f, 0x5fcbbe29ec738a, 0x1a571b8a481df9, 0x33886d40d7cbee, 0x51a362112b76b8, 0x62bd2ba1eb1b5c, 0x4eaabadc8c5dd} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x793, 0x1095, 0x8d0, 0x676, 0x2be, 0x1a9d, 0x6d6, 0x1d0, 0x112a, 0x18e1, 0x1741, 0xc68, 0x156d, 0x113f, 0x181e, 0x201, 0xcd7, 0xbb7, 0xdb, 0x64c, 0x181e, 0x63, 0x965, 0xf2, 0xc95, 0x50d, 0x1ec2, 0x1c03, 0x5b4, 0x1b} +#elif RADIX == 32 +{0x84a9e4f, 0x8cec8d0, 0x6d4e8af, 0xa83a06d, 0x41c70c4, 0x5b58d17, 0x81e89fd, 0xb35c403, 0x80db5db, 0x1e078c9, 0xe496503, 0x86b2541, 0x807ec22, 0x166d3} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe8af8cec8d084a9e, 0x1c70c4a83a06d6d4, 0x381e89fd5b58d174, 0x8c980db5dbb35c40, 0xb2541e4965031e07, 0x14256d3807ec2286} +#else +{0x5f19d91a10953c, 0x12a0e81b5b53a2, 0x6adac68ba0e386, 0x3b35c40381e89f, 0x63c0f19301b6bb, 0x21ac9507925940, 0xa12b69c03f611} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x71d, 0xf0e, 0x506, 0x1aec, 0x3f6, 0x2c1, 0x17dd, 0x43f, 0x1552, 0x1488, 0x10c3, 0x5ea, 0xfd4, 0x634, 0x1eb1, 0x1711, 0x1424, 0xeb1, 0xfe1, 0xa0a, 0x165f, 0x5c8, 0x1544, 0x1493, 0x329, 0x19ec, 0x1db4, 0x983, 0x790, 0x1d} +#elif RADIX == 32 +{0x7871c77, 0xb5d8506, 0xd1608fd, 0x4887f7d, 0xc3a4455, 0xf50bd50, 0xeb131a3, 0xd092e23, 0x4fe1758, 0x4597d41, 0x275442e, 0xf60ca69, 0x307db4c, 0x26e41} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8fdb5d85067871c, 0x3a44554887f7dd16, 0x3eb131a3f50bd50c, 0xd414fe1758d092e2, 0xca69275442e4597, 0x1e5de41307db4cf6} +#else +{0x7b6bb0a0cf0e38, 0x55221fdf745823, 0x1fa85ea861d222, 0xd092e23eb131a, 0x48b2fa829fc2eb, 0x3d8329a49d510b, 0xf2ef20983eda6} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x704, 0x1718, 0x1f41, 0x1569, 0x1353, 0x403, 0x8ba, 0xd3b, 0x1e9a, 0xca6, 0x1433, 0xc05, 0x2dd, 0xf7d, 0x12c8, 0x1109, 0x1797, 0x4e2, 0xf77, 0x569, 0xfcf, 0x1dd4, 0x11a4, 0x1354, 0x1563, 0x14b7, 0x6ad, 0xf7e, 0x251, 0xe} +#elif RADIX == 32 +{0xb8c1c11, 0xead3f41, 0xa201cd4, 0x69a768b, 0x336537a, 0xb7580b4, 0x2c87be8, 0x5e5e213, 0x2f77271, 0xa3f3cad, 0xa91a4ee, 0x5bd58e6, 0xefc6ada, 0x2f945} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1cd4ead3f41b8c1c, 0x36537a69a768ba20, 0x32c87be8b7580b43, 0xcad2f772715e5e21, 0xd58e6a91a4eea3f3, 0x480945efc6ada5b} +#else +{0x29d5a7e8371838, 0x69a69da2e88073, 0x45bac05a19b29b, 0x15e5e2132c87be, 0x547e795a5eee4e, 0x16f5639aa4693b, 0x2404a2f7e356d} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xf6, 0x15a2, 0x1cbc, 0x185c, 0x9a1, 0xc2f, 0x1123, 0x11, 0xda7, 0x1628, 0x41, 0x1163, 0x12f7, 0x9aa, 0x1235, 0x1444, 0x1c4a, 0x3b6, 0xfee, 0x96, 0x1ed, 0x1f4d, 0x5ec, 0x1bf2, 0x1bca, 0x151d, 0x58f, 0x293, 0x960, 0x20} +#elif RADIX == 32 +{0xad103db, 0x70b9cbc, 0x3617a68, 0x9c02312, 0x41b1436, 0xbde2c60, 0x2354d54, 0x712a889, 0xcfee1db, 0x687b412, 0xe45ecfa, 0x8eef2b7, 0x52658fa, 0x3f580} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x7a6870b9cbcad103, 0x1b14369c02312361, 0x92354d54bde2c604, 0x412cfee1db712a88, 0xef2b7e45ecfa687b, 0x37da58052658fa8e} +#else +{0x50e1739795a207, 0x5a7008c48d85e9, 0x25ef163020d8a1, 0x3712a8892354d5, 0x4d0f68259fdc3b, 0x23bbcadf917b3e, 0xbad2c02932c7d} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xbc5, 0xa1d, 0xe8a, 0xe9c, 0x1af1, 0x13b5, 0xa68, 0x4a4, 0x135e, 0x171, 0x716, 0x2c2, 0x1c2b, 0x332, 0x349, 0x138c, 0x168b, 0x21c, 0x1629, 0xb97, 0x186, 0x629, 0x6e8, 0x497, 0x128c, 0x19d2, 0xcc1, 0x121, 0x250, 0x1a} +#elif RADIX == 32 +{0x50eaf17, 0x5d38e8a, 0x89daebc, 0x78948a6, 0x160b8cd, 0xac5847, 0x3491997, 0x5a2e718, 0xf62910e, 0x4861972, 0x2e6e831, 0xe94a309, 0x242cc1c, 0xd940} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xaebc5d38e8a50eaf, 0x60b8cd78948a689d, 0x834919970ac58471, 0x972f62910e5a2e71, 0x4a3092e6e8314861, 0x5e4940242cc1ce9} +#else +{0x78ba71d14a1d5e, 0x35e25229a276ba, 0x38562c238b05c6, 0x65a2e718349199, 0x290c32e5ec5221, 0x3a528c24b9ba0c, 0x2f24a0121660e} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac65,0x6102,0xe1f0,0x7b39,0x64be,0xff4d,0x8256,0xd11b,0x4645,0x7a89,0x814c,0x66e7,0x77a,0xc4d8,0xe691,0x1f42,0xfdb9,0x547b,0x752,0x18d9,0x9279,0xe604,0xbed4,0xec}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6102ac65,0x7b39e1f0,0xff4d64be,0xd11b8256,0x7a894645,0x66e7814c,0xc4d8077a,0x1f42e691,0x547bfdb9,0x18d90752,0xe6049279,0xecbed4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7b39e1f06102ac65,0xd11b8256ff4d64be,0x66e7814c7a894645,0x1f42e691c4d8077a,0x18d90752547bfdb9,0xecbed4e6049279}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3380,0xe477,0x9e18,0x218d,0xddc6,0x4cc5,0xb33f,0x59e7,0xb291,0xa1a1,0x8f77,0x92a2,0x480e,0x82af,0x40f1,0x5d48,0x83b0,0x4229,0xcb9e,0xff7a,0x2e32,0xa78,0x71fc,0x16}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe4773380,0x218d9e18,0x4cc5ddc6,0x59e7b33f,0xa1a1b291,0x92a28f77,0x82af480e,0x5d4840f1,0x422983b0,0xff7acb9e,0xa782e32,0x1671fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x218d9e18e4773380,0x59e7b33f4cc5ddc6,0x92a28f77a1a1b291,0x5d4840f182af480e,0xff7acb9e422983b0,0x1671fc0a782e32}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbb17,0xaa62,0x774e,0x2e59,0xe440,0xebce,0x874e,0xbfdb,0x3afd,0xa7ba,0xded2,0x78aa,0x7568,0xcfed,0x5633,0xa1de,0x4c5e,0x5796,0x5727,0xec25,0xac0a,0xce9c,0x3f13,0x98}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa62bb17,0x2e59774e,0xebcee440,0xbfdb874e,0xa7ba3afd,0x78aaded2,0xcfed7568,0xa1de5633,0x57964c5e,0xec255727,0xce9cac0a,0x983f13}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e59774eaa62bb17,0xbfdb874eebcee440,0x78aaded2a7ba3afd,0xa1de5633cfed7568,0xec25572757964c5e,0x983f13ce9cac0a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x539b,0x9efd,0x1e0f,0x84c6,0x9b41,0xb2,0x7da9,0x2ee4,0xb9ba,0x8576,0x7eb3,0x9918,0xf885,0x3b27,0x196e,0xe0bd,0x246,0xab84,0xf8ad,0xe726,0x6d86,0x19fb,0x412b,0x13}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9efd539b,0x84c61e0f,0xb29b41,0x2ee47da9,0x8576b9ba,0x99187eb3,0x3b27f885,0xe0bd196e,0xab840246,0xe726f8ad,0x19fb6d86,0x13412b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x84c61e0f9efd539b,0x2ee47da900b29b41,0x99187eb38576b9ba,0xe0bd196e3b27f885,0xe726f8adab840246,0x13412b19fb6d86}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb919,0xcfad,0xeb7f,0x81f8,0x4d97,0xf272,0x4300,0xdd38,0x1b01,0x826,0x1894,0x3e43,0x7310,0xa84,0x4161,0x7c63,0xec4,0x9625,0xe475,0xadc9,0x5a7,0xfa6a,0xb7e3,0x7e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcfadb919,0x81f8eb7f,0xf2724d97,0xdd384300,0x8261b01,0x3e431894,0xa847310,0x7c634161,0x96250ec4,0xadc9e475,0xfa6a05a7,0x7eb7e3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x81f8eb7fcfadb919,0xdd384300f2724d97,0x3e43189408261b01,0x7c6341610a847310,0xadc9e47596250ec4,0x7eb7e3fa6a05a7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e83,0xade4,0x9d21,0x2e51,0x42e5,0xd3,0xac79,0xe0a8,0x32e2,0xfcf2,0xb504,0xc941,0xa0d0,0x8016,0x5485,0x3331,0xabd7,0xc296,0xf76e,0xef5,0xce39,0x8e31,0x165c,0x56}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xade41e83,0x2e519d21,0xd342e5,0xe0a8ac79,0xfcf232e2,0xc941b504,0x8016a0d0,0x33315485,0xc296abd7,0xef5f76e,0x8e31ce39,0x56165c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e519d21ade41e83,0xe0a8ac7900d342e5,0xc941b504fcf232e2,0x333154858016a0d0,0xef5f76ec296abd7,0x56165c8e31ce39}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdd11,0x6e27,0xfbdb,0xf5d9,0xd6cb,0x9fef,0xc59a,0x7a4,0xfbd,0x5c3e,0xbc2,0xd091,0x6546,0xc9d0,0x193e,0x93fa,0x776,0x2763,0xdecd,0xbbe3,0xcec1,0x6abf,0x9070,0x66}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6e27dd11,0xf5d9fbdb,0x9fefd6cb,0x7a4c59a,0x5c3e0fbd,0xd0910bc2,0xc9d06546,0x93fa193e,0x27630776,0xbbe3decd,0x6abfcec1,0x669070}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5d9fbdb6e27dd11,0x7a4c59a9fefd6cb,0xd0910bc25c3e0fbd,0x93fa193ec9d06546,0xbbe3decd27630776,0x6690706abfcec1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x46e7,0x3052,0x1480,0x7e07,0xb268,0xd8d,0xbcff,0x22c7,0xe4fe,0xf7d9,0xe76b,0xc1bc,0x8cef,0xf57b,0xbe9e,0x839c,0xf13b,0x69da,0x1b8a,0x5236,0xfa58,0x595,0x481c,0x81}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x305246e7,0x7e071480,0xd8db268,0x22c7bcff,0xf7d9e4fe,0xc1bce76b,0xf57b8cef,0x839cbe9e,0x69daf13b,0x52361b8a,0x595fa58,0x81481c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e071480305246e7,0x22c7bcff0d8db268,0xc1bce76bf7d9e4fe,0x839cbe9ef57b8cef,0x52361b8a69daf13b,0x81481c0595fa58}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0xb89, 0xf4d, 0xbd8, 0x18d2, 0x781, 0x1f79, 0xef5, 0xcfd, 0xa7d, 0x121a, 0x59e, 0x18a3, 0x2b, 0x122f, 0x9d5, 0x18aa, 0x105f, 0x1bcd, 0x871, 0x1f9d, 0xae4, 0xc5c, 0x385, 0xbe8, 0x1878, 0xcd8, 0x7a1, 0x7c8, 0x5b4, 0xe} +#elif RADIX == 32 +{0x7a6ae25, 0x71a4bd8, 0x5fbc9e0, 0xf59faef, 0x9e90d29, 0xaf1465, 0x9d59178, 0xc17f154, 0xa871de6, 0xe2b93f3, 0xd038562, 0x6c61e17, 0xf907a16, 0x306d0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc9e071a4bd87a6ae, 0xe90d29f59faef5fb, 0x49d591780af14659, 0x3f3a871de6c17f15, 0x61e17d038562e2b9, 0x9956d0f907a166c} +#else +{0x40e3497b0f4d5c, 0x27d67ebbd7ef27, 0x40578a32cf4869, 0x6c17f1549d5917, 0x5c5727e750e3bc, 0x1b18785f40e158, 0x4cab687c83d0b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0xb60, 0x3d3, 0x12f6, 0xe34, 0x9e0, 0xfde, 0xbbd, 0xb3f, 0x129f, 0x1486, 0x1967, 0x1e28, 0x180a, 0xc8b, 0x1275, 0x1e2a, 0xc17, 0xef3, 0xa1c, 0x7e7, 0x2b9, 0xb17, 0xe1, 0x2fa, 0x61e, 0xb36, 0x1e8, 0x1f2, 0x156d, 0xc} +#elif RADIX == 32 +{0x1e9ad81, 0x1c692f6, 0xd7ef278, 0x7d67ebb, 0x67a434a, 0x2bc519, 0x275645e, 0xb05fc55, 0xea1c779, 0xb8ae4fc, 0xf40e158, 0x9b18785, 0x3e41e85, 0x245b4} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf2781c692f61e9ad, 0x7a434a7d67ebbd7e, 0x5275645e02bc5196, 0x4fcea1c779b05fc5, 0x18785f40e158b8ae, 0x20e55b43e41e859b} +#else +{0x7038d25ec3d35b, 0x29f59faef5fbc9, 0x7015e28cb3d21a, 0x1b05fc55275645, 0x1715c9f9d438ef, 0x66c61e17d03856, 0x32ada1f20f42} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x441, 0x1774, 0x1527, 0x106a, 0x577, 0x3fc, 0xf92, 0x12c4, 0x96a, 0x10ea, 0x10f5, 0x11c9, 0x1f8, 0x1407, 0x1bcc, 0x16c4, 0x15c1, 0x790, 0x5bc, 0x1c28, 0xbc6, 0x123c, 0xf19, 0x1d6f, 0x361, 0x1fcd, 0x1dc9, 0x20c, 0x17c6, 0x6} +#elif RADIX == 32 +{0xbba1104, 0xe0d5527, 0x21fe15d, 0xaa588f9, 0xf587525, 0x7e23930, 0xbcca038, 0x5706d89, 0x5bc3c8, 0xe2f1b85, 0xdef1991, 0xe68d87a, 0x419dc9f, 0x35f18} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe15de0d5527bba11, 0x587525aa588f921f, 0x9bcca0387e23930f, 0xb8505bc3c85706d8, 0x8d87adef1991e2f1, 0x139f18419dc9fe6} +#else +{0x3bc1aaa4f77422, 0x16a9623e487f85, 0x43f11c987ac3a9, 0x5706d89bcca03, 0x3c5e370a0b7879, 0x79a361eb7bc664, 0x9cf8c20cee4f} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x98a, 0x1bbb, 0x7d8, 0xd84, 0x3fe, 0x90b, 0xfe8, 0x12c3, 0x1e84, 0xde3, 0xbe1, 0x1217, 0x1925, 0x84a, 0xa0e, 0x7cd, 0x1854, 0x768, 0x6e6, 0x1d87, 0xfac, 0x6df, 0x109b, 0x64d, 0x9f2, 0x596, 0x435, 0x1918, 0x1095, 0x0} +#elif RADIX == 32 +{0xddda628, 0x9b087d8, 0x84858ff, 0x12586fe, 0xe16f1fa, 0x49642eb, 0xa0e4256, 0x6150f9a, 0xe6e63b4, 0xfbeb3b0, 0x9b09b36, 0xcb27c8c, 0x2304352, 0x4257} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x58ff9b087d8ddda6, 0x16f1fa12586fe848, 0xaa0e425649642ebe, 0x3b0e6e63b46150f9, 0x27c8c9b09b36fbeb, 0xa2c2572304352cb} +#else +{0x7f3610fb1bbb4c, 0x684961bfa12163, 0x324b2175f0b78f, 0x46150f9aa0e425, 0x5f7d6761cdcc76, 0x32c9f2326c26cd, 0x51612b91821a9} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x17ec, 0x6b9, 0x1dc0, 0x1783, 0x18ee, 0xdd4, 0x1c7f, 0x1fb2, 0x16b0, 0x196e, 0x1e5a, 0x1fda, 0x11f9, 0x117, 0x1c30, 0x1a47, 0x2a2, 0x19e6, 0x1347, 0x2bb, 0x1463, 0x1f37, 0xa64, 0x3c6, 0x1910, 0x2bc, 0xbc0, 0x17e8, 0x1cfd, 0xa} +#elif RADIX == 32 +{0x35cdfb1, 0xaf07dc0, 0xf6ea63b, 0xc3f65c7, 0x5acb75a, 0x7e7fb5e, 0xc3008bc, 0xa8b48f, 0x7347cf3, 0xbd18c57, 0x8ca64f9, 0x5e64407, 0xfd0bc01, 0x163f6} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa63baf07dc035cdf, 0xacb75ac3f65c7f6e, 0xfc3008bc7e7fb5e5, 0xc577347cf30a8b48, 0x644078ca64f9bd18, 0x2d073f6fd0bc015e} +#else +{0x775e0fb806b9bf, 0x6b0fd971fdba98, 0x63f3fdaf2d65ba, 0x30a8b48fc3008b, 0x37a318aee68f9e, 0x5799101e32993e, 0x6439fb7e85e00} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x440, 0x172e, 0x4f, 0x1e07, 0x15ce, 0x1b55, 0x68e, 0x2c, 0x13bb, 0x1f43, 0x1dda, 0x1fb4, 0xe54, 0x1502, 0x723, 0x7e7, 0x1147, 0x1ba0, 0x3d0, 0xf7c, 0x1754, 0x5fc, 0x1098, 0x16aa, 0x182, 0x1c1d, 0x18e9, 0x13ce, 0xbae, 0x18} +#elif RADIX == 32 +{0xb971102, 0xbc0e04f, 0xedaad73, 0xec05868, 0xdafa1ce, 0x953f69d, 0x723a813, 0x451cfce, 0x83d0dd0, 0xe5d51ef, 0x550982f, 0xe860ad, 0x79d8e9e, 0x40eba} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xad73bc0e04fb9711, 0xafa1ceec05868eda, 0xe723a813953f69dd, 0x1ef83d0dd0451cfc, 0x860ad550982fe5d5, 0xc2eba79d8e9e0e} +#else +{0x67781c09f72e22, 0x3bb0161a3b6ab5, 0x1ca9fb4eed7d0e, 0x451cfce723a81, 0x7cbaa3df07a1ba, 0x3a182b554260b, 0x6175d3cec74f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x18c5, 0x1326, 0x1d4d, 0x19eb, 0xea, 0x947, 0x1adf, 0xbf5, 0xafe, 0x1225, 0x18a0, 0xb3a, 0x8e0, 0xaea, 0x17aa, 0x19a5, 0x912, 0x634, 0x15c7, 0x1df7, 0x13cb, 0x1894, 0xeaa, 0xa69, 0x6ca, 0x1b49, 0x26f, 0x1f50, 0xd92, 0x6} +#elif RADIX == 32 +{0x9936314, 0xb3d7d4d, 0xf4a383a, 0xf97ebad, 0xa0912ab, 0x3816758, 0x7aa5752, 0x244b34b, 0xf5c731a, 0xa4f2fbe, 0xd2eaac4, 0xa49b294, 0xea026fd, 0x3364b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x383ab3d7d4d99363, 0x912abf97ebadf4a, 0xb7aa57523816758a, 0xfbef5c731a244b34, 0x9b294d2eaac4a4f2, 0x54764bea026fda4} +#else +{0x7567afa9b326c6, 0x2fe5faeb7d28e0, 0x11c0b3ac504895, 0x2244b34b7aa575, 0x149e5f7deb8e63, 0x6926ca534baab1, 0x2a3b25f50137e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x132f, 0x6d5, 0x95b, 0xa68, 0x1814, 0x12d3, 0x1f1e, 0x857, 0x14fa, 0xcf, 0x1f19, 0xe1b, 0x1cf7, 0xa53, 0x1455, 0x5ef, 0x3e2, 0x199c, 0x1162, 0x38d, 0x174b, 0x794, 0xef6, 0xf74, 0x9c, 0x1f55, 0x1c4d, 0x56f, 0x1638, 0x19} +#elif RADIX == 32 +{0x36accbf, 0x14d095b, 0xe969e05, 0xe90aff1, 0x19067d3, 0x3ddc37f, 0x455529f, 0xf88bdf, 0xb162cce, 0xa5d2c71, 0xe8ef63c, 0xaa8271e, 0xadfc4df, 0xa8e0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x9e0514d095b36acc, 0x9067d3e90aff1e96, 0xf455529f3ddc37f1, 0xc71b162cce0f88bd, 0x8271ee8ef63ca5d2, 0x30898e0adfc4dfaa} +#else +{0xa29a12b66d599, 0x4fa42bfc7a5a78, 0x79eee1bf8c833e, 0x60f88bdf455529, 0x14ba58e362c599, 0x6aa09c7ba3bd8f, 0x804c7056fe26f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3d63,0xdad1,0xf501,0xd58f,0x8741,0xd265,0xf8bd,0xb3b9,0xac08,0xfc8b,0x45ab,0xbcdf,0x501,0x9f7,0x10ed,0x102f,0xc6e3,0xdc57,0xf892,0x8db4,0x2c76,0x21ab,0x2bc3,0x8e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdad13d63,0xd58ff501,0xd2658741,0xb3b9f8bd,0xfc8bac08,0xbcdf45ab,0x9f70501,0x102f10ed,0xdc57c6e3,0x8db4f892,0x21ab2c76,0x8e2bc3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd58ff501dad13d63,0xb3b9f8bdd2658741,0xbcdf45abfc8bac08,0x102f10ed09f70501,0x8db4f892dc57c6e3,0x8e2bc321ab2c76}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc998,0x418c,0xa8e4,0x2354,0x622a,0xb76d,0x5487,0xdad9,0x1672,0x522b,0xa00f,0xdfa5,0x296b,0xe17c,0x595e,0x91e1,0xa22d,0xe126,0x904c,0x9288,0x5075,0xc6c5,0x61b0,0xb1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x418cc998,0x2354a8e4,0xb76d622a,0xdad95487,0x522b1672,0xdfa5a00f,0xe17c296b,0x91e1595e,0xe126a22d,0x9288904c,0xc6c55075,0xb161b0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2354a8e4418cc998,0xdad95487b76d622a,0xdfa5a00f522b1672,0x91e1595ee17c296b,0x9288904ce126a22d,0xb161b0c6c55075}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1271,0x594e,0x16ee,0x35fa,0xaf0e,0x11b2,0x1fca,0x24b7,0xa3e3,0x2bcc,0xc2f0,0x6409,0xf8e1,0x6a8f,0x67e,0xe7ee,0xad00,0x2b9a,0x6813,0x5e0a,0x6dec,0x48f5,0xbd1d,0xb3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x594e1271,0x35fa16ee,0x11b2af0e,0x24b71fca,0x2bcca3e3,0x6409c2f0,0x6a8ff8e1,0xe7ee067e,0x2b9aad00,0x5e0a6813,0x48f56dec,0xb3bd1d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x35fa16ee594e1271,0x24b71fca11b2af0e,0x6409c2f02bcca3e3,0xe7ee067e6a8ff8e1,0x5e0a68132b9aad00,0xb3bd1d48f56dec}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc29d,0x252e,0xafe,0x2a70,0x78be,0x2d9a,0x742,0x4c46,0x53f7,0x374,0xba54,0x4320,0xfafe,0xf608,0xef12,0xefd0,0x391c,0x23a8,0x76d,0x724b,0xd389,0xde54,0xd43c,0x71}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x252ec29d,0x2a700afe,0x2d9a78be,0x4c460742,0x37453f7,0x4320ba54,0xf608fafe,0xefd0ef12,0x23a8391c,0x724b076d,0xde54d389,0x71d43c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2a700afe252ec29d,0x4c4607422d9a78be,0x4320ba54037453f7,0xefd0ef12f608fafe,0x724b076d23a8391c,0x71d43cde54d389}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd70d,0x31e4,0xa551,0x7483,0x6f09,0x34d,0x6a80,0x85f,0x6b11,0xe29b,0x188,0x38d2,0x85b,0xa241,0xc423,0xddc8,0x3260,0x1722,0xf3a4,0x7cf7,0x36e8,0x7955,0xeeb9,0xc6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x31e4d70d,0x7483a551,0x34d6f09,0x85f6a80,0xe29b6b11,0x38d20188,0xa241085b,0xddc8c423,0x17223260,0x7cf7f3a4,0x795536e8,0xc6eeb9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7483a55131e4d70d,0x85f6a80034d6f09,0x38d20188e29b6b11,0xddc8c423a241085b,0x7cf7f3a417223260,0xc6eeb9795536e8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x59a9,0x8f53,0xd42f,0xf65b,0x7134,0x4475,0x9543,0x8428,0x4555,0x7d45,0x7bfb,0xe15d,0xe9c2,0x24ec,0xf17f,0x88ea,0x766c,0xbf2d,0x2b42,0x2771,0x5dfc,0xd040,0xfa62,0xc9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f5359a9,0xf65bd42f,0x44757134,0x84289543,0x7d454555,0xe15d7bfb,0x24ece9c2,0x88eaf17f,0xbf2d766c,0x27712b42,0xd0405dfc,0xc9fa62}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf65bd42f8f5359a9,0x8428954344757134,0xe15d7bfb7d454555,0x88eaf17f24ece9c2,0x27712b42bf2d766c,0xc9fa62d0405dfc}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4b49,0x89b0,0x8c52,0x91ca,0xed1b,0xd527,0x453,0x82d,0xb0eb,0xb6bf,0x3790,0x5816,0x49bb,0xa0a7,0xffc6,0x5530,0x23b9,0x12bb,0x52c4,0x6f51,0x25fd,0x62d,0x723d,0xc6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x89b04b49,0x91ca8c52,0xd527ed1b,0x82d0453,0xb6bfb0eb,0x58163790,0xa0a749bb,0x5530ffc6,0x12bb23b9,0x6f5152c4,0x62d25fd,0xc6723d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x91ca8c5289b04b49,0x82d0453d527ed1b,0x58163790b6bfb0eb,0x5530ffc6a0a749bb,0x6f5152c412bb23b9,0xc6723d062d25fd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x28f3,0xce1b,0x5aae,0x8b7c,0x90f6,0xfcb2,0x957f,0xf7a0,0x94ee,0x1d64,0xfe77,0xc72d,0xf7a4,0x5dbe,0x3bdc,0x2237,0xcd9f,0xe8dd,0xc5b,0x8308,0xc917,0x86aa,0x1146,0x39}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xce1b28f3,0x8b7c5aae,0xfcb290f6,0xf7a0957f,0x1d6494ee,0xc72dfe77,0x5dbef7a4,0x22373bdc,0xe8ddcd9f,0x83080c5b,0x86aac917,0x391146}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8b7c5aaece1b28f3,0xf7a0957ffcb290f6,0xc72dfe771d6494ee,0x22373bdc5dbef7a4,0x83080c5be8ddcd9f,0x39114686aac917}}} +#endif +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.h new file mode 100644 index 0000000000..5bb17f554a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.h @@ -0,0 +1,31 @@ +#ifndef ENDOMORPHISM_ACTION_H +#define ENDOMORPHISM_ACTION_H +#include +#include +#include +/** Type for precomputed endomorphism rings applied to precomputed torsion bases. + * + * Precomputed by the precompute scripts. + * + * @typedef curve_with_endomorphism_ring_t + * + * @struct curve_with_endomorphism_ring + **/ +typedef struct curve_with_endomorphism_ring { + ec_curve_t curve; + ec_basis_t basis_even; + ibz_mat_2x2_t action_i, action_j, action_k; + ibz_mat_2x2_t action_gen2, action_gen3, action_gen4; +} curve_with_endomorphism_ring_t; +#define CURVE_E0 (CURVES_WITH_ENDOMORPHISMS->curve) +#define BASIS_EVEN (CURVES_WITH_ENDOMORPHISMS->basis_even) +#define ACTION_I (CURVES_WITH_ENDOMORPHISMS->action_i) +#define ACTION_J (CURVES_WITH_ENDOMORPHISMS->action_j) +#define ACTION_K (CURVES_WITH_ENDOMORPHISMS->action_k) +#define ACTION_GEN2 (CURVES_WITH_ENDOMORPHISMS->action_gen2) +#define ACTION_GEN3 (CURVES_WITH_ENDOMORPHISMS->action_gen3) +#define ACTION_GEN4 (CURVES_WITH_ENDOMORPHISMS->action_gen4) +#define NUM_ALTERNATE_STARTING_CURVES 7 +#define ALTERNATE_STARTING_CURVES (CURVES_WITH_ENDOMORPHISMS+1) +extern const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8]; +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/finit.c new file mode 100644 index 0000000000..b3808edf07 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/finit.c @@ -0,0 +1,122 @@ +#include "internal.h" + +void +quat_alg_init_set(quat_alg_t *alg, const ibz_t *p) +{ + ibz_init(&(*alg).p); + ibz_copy(&(*alg).p, p); +} +void +quat_alg_finalize(quat_alg_t *alg) +{ + ibz_finalize(&(*alg).p); +} + +void +quat_alg_elem_init(quat_alg_elem_t *elem) +{ + ibz_vec_4_init(&(*elem).coord); + ibz_init(&(*elem).denom); + ibz_set(&(*elem).denom, 1); +} +void +quat_alg_elem_finalize(quat_alg_elem_t *elem) +{ + ibz_vec_4_finalize(&(*elem).coord); + ibz_finalize(&(*elem).denom); +} + +void +ibz_vec_2_init(ibz_vec_2_t *vec) +{ + ibz_init(&((*vec)[0])); + ibz_init(&((*vec)[1])); +} + +void +ibz_vec_2_finalize(ibz_vec_2_t *vec) +{ + ibz_finalize(&((*vec)[0])); + ibz_finalize(&((*vec)[1])); +} + +void +ibz_vec_4_init(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_init(&(*vec)[i]); + } +} +void +ibz_vec_4_finalize(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_finalize(&(*vec)[i]); + } +} + +void +ibz_mat_2x2_init(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +ibz_mat_4x4_init(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +quat_lattice_init(quat_lattice_t *lat) +{ + ibz_mat_4x4_init(&(*lat).basis); + ibz_init(&(*lat).denom); + ibz_set(&(*lat).denom, 1); +} +void +quat_lattice_finalize(quat_lattice_t *lat) +{ + ibz_finalize(&(*lat).denom); + ibz_mat_4x4_finalize(&(*lat).basis); +} + +void +quat_left_ideal_init(quat_left_ideal_t *lideal) +{ + quat_lattice_init(&(*lideal).lattice); + ibz_init(&(*lideal).norm); + (*lideal).parent_order = NULL; +} +void +quat_left_ideal_finalize(quat_left_ideal_t *lideal) +{ + ibz_finalize(&(*lideal).norm); + quat_lattice_finalize(&(*lideal).lattice); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.c new file mode 100644 index 0000000000..f2992d8c7f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: PD and Apache-2.0 + +/* FIPS202 implementation based on code from PQClean, + * which is in turn based based on the public domain implementation in + * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html + * by Ronny Van Keer + * and the public domain "TweetFips202" implementation + * from https://twitter.com/tweetfips202 + * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ + +#include +#include +#include +#include + +#include "fips202.h" + +#define NROUNDS 24 +#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) + +/************************************************* + * Name: load64 + * + * Description: Load 8 bytes into uint64_t in little-endian order + * + * Arguments: - const uint8_t *x: pointer to input byte array + * + * Returns the loaded 64-bit unsigned integer + **************************************************/ +static uint64_t load64(const uint8_t *x) { + uint64_t r = 0; + for (size_t i = 0; i < 8; ++i) { + r |= (uint64_t)x[i] << 8 * i; + } + + return r; +} + +/************************************************* + * Name: store64 + * + * Description: Store a 64-bit integer to a byte array in little-endian order + * + * Arguments: - uint8_t *x: pointer to the output byte array + * - uint64_t u: input 64-bit unsigned integer + **************************************************/ +static void store64(uint8_t *x, uint64_t u) { + for (size_t i = 0; i < 8; ++i) { + x[i] = (uint8_t) (u >> 8 * i); + } +} + +/* Keccak round constants */ +static const uint64_t KeccakF_RoundConstants[NROUNDS] = { + 0x0000000000000001ULL, 0x0000000000008082ULL, + 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, + 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, + 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, + 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, + 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, + 0x0000000080000001ULL, 0x8000000080008008ULL +}; + +/************************************************* + * Name: KeccakF1600_StatePermute + * + * Description: The Keccak F1600 Permutation + * + * Arguments: - uint64_t *state: pointer to input/output Keccak state + **************************************************/ +static void KeccakF1600_StatePermute(uint64_t *state) { + int round; + + uint64_t Aba, Abe, Abi, Abo, Abu; + uint64_t Aga, Age, Agi, Ago, Agu; + uint64_t Aka, Ake, Aki, Ako, Aku; + uint64_t Ama, Ame, Ami, Amo, Amu; + uint64_t Asa, Ase, Asi, Aso, Asu; + uint64_t BCa, BCe, BCi, BCo, BCu; + uint64_t Da, De, Di, Do, Du; + uint64_t Eba, Ebe, Ebi, Ebo, Ebu; + uint64_t Ega, Ege, Egi, Ego, Egu; + uint64_t Eka, Eke, Eki, Eko, Eku; + uint64_t Ema, Eme, Emi, Emo, Emu; + uint64_t Esa, Ese, Esi, Eso, Esu; + + // copyFromState(A, state) + Aba = state[0]; + Abe = state[1]; + Abi = state[2]; + Abo = state[3]; + Abu = state[4]; + Aga = state[5]; + Age = state[6]; + Agi = state[7]; + Ago = state[8]; + Agu = state[9]; + Aka = state[10]; + Ake = state[11]; + Aki = state[12]; + Ako = state[13]; + Aku = state[14]; + Ama = state[15]; + Ame = state[16]; + Ami = state[17]; + Amo = state[18]; + Amu = state[19]; + Asa = state[20]; + Ase = state[21]; + Asi = state[22]; + Aso = state[23]; + Asu = state[24]; + + for (round = 0; round < NROUNDS; round += 2) { + // prepareTheta + BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; + BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; + BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; + BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; + BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; + + // thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Aba ^= Da; + BCa = Aba; + Age ^= De; + BCe = ROL(Age, 44); + Aki ^= Di; + BCi = ROL(Aki, 43); + Amo ^= Do; + BCo = ROL(Amo, 21); + Asu ^= Du; + BCu = ROL(Asu, 14); + Eba = BCa ^ ((~BCe) & BCi); + Eba ^= KeccakF_RoundConstants[round]; + Ebe = BCe ^ ((~BCi) & BCo); + Ebi = BCi ^ ((~BCo) & BCu); + Ebo = BCo ^ ((~BCu) & BCa); + Ebu = BCu ^ ((~BCa) & BCe); + + Abo ^= Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka ^= Da; + BCi = ROL(Aka, 3); + Ame ^= De; + BCo = ROL(Ame, 45); + Asi ^= Di; + BCu = ROL(Asi, 61); + Ega = BCa ^ ((~BCe) & BCi); + Ege = BCe ^ ((~BCi) & BCo); + Egi = BCi ^ ((~BCo) & BCu); + Ego = BCo ^ ((~BCu) & BCa); + Egu = BCu ^ ((~BCa) & BCe); + + Abe ^= De; + BCa = ROL(Abe, 1); + Agi ^= Di; + BCe = ROL(Agi, 6); + Ako ^= Do; + BCi = ROL(Ako, 25); + Amu ^= Du; + BCo = ROL(Amu, 8); + Asa ^= Da; + BCu = ROL(Asa, 18); + Eka = BCa ^ ((~BCe) & BCi); + Eke = BCe ^ ((~BCi) & BCo); + Eki = BCi ^ ((~BCo) & BCu); + Eko = BCo ^ ((~BCu) & BCa); + Eku = BCu ^ ((~BCa) & BCe); + + Abu ^= Du; + BCa = ROL(Abu, 27); + Aga ^= Da; + BCe = ROL(Aga, 36); + Ake ^= De; + BCi = ROL(Ake, 10); + Ami ^= Di; + BCo = ROL(Ami, 15); + Aso ^= Do; + BCu = ROL(Aso, 56); + Ema = BCa ^ ((~BCe) & BCi); + Eme = BCe ^ ((~BCi) & BCo); + Emi = BCi ^ ((~BCo) & BCu); + Emo = BCo ^ ((~BCu) & BCa); + Emu = BCu ^ ((~BCa) & BCe); + + Abi ^= Di; + BCa = ROL(Abi, 62); + Ago ^= Do; + BCe = ROL(Ago, 55); + Aku ^= Du; + BCi = ROL(Aku, 39); + Ama ^= Da; + BCo = ROL(Ama, 41); + Ase ^= De; + BCu = ROL(Ase, 2); + Esa = BCa ^ ((~BCe) & BCi); + Ese = BCe ^ ((~BCi) & BCo); + Esi = BCi ^ ((~BCo) & BCu); + Eso = BCo ^ ((~BCu) & BCa); + Esu = BCu ^ ((~BCa) & BCe); + + // prepareTheta + BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; + BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; + BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; + BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; + BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; + + // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^ ((~BCe) & BCi); + Aba ^= KeccakF_RoundConstants[round + 1]; + Abe = BCe ^ ((~BCi) & BCo); + Abi = BCi ^ ((~BCo) & BCu); + Abo = BCo ^ ((~BCu) & BCa); + Abu = BCu ^ ((~BCa) & BCe); + + Ebo ^= Do; + BCa = ROL(Ebo, 28); + Egu ^= Du; + BCe = ROL(Egu, 20); + Eka ^= Da; + BCi = ROL(Eka, 3); + Eme ^= De; + BCo = ROL(Eme, 45); + Esi ^= Di; + BCu = ROL(Esi, 61); + Aga = BCa ^ ((~BCe) & BCi); + Age = BCe ^ ((~BCi) & BCo); + Agi = BCi ^ ((~BCo) & BCu); + Ago = BCo ^ ((~BCu) & BCa); + Agu = BCu ^ ((~BCa) & BCe); + + Ebe ^= De; + BCa = ROL(Ebe, 1); + Egi ^= Di; + BCe = ROL(Egi, 6); + Eko ^= Do; + BCi = ROL(Eko, 25); + Emu ^= Du; + BCo = ROL(Emu, 8); + Esa ^= Da; + BCu = ROL(Esa, 18); + Aka = BCa ^ ((~BCe) & BCi); + Ake = BCe ^ ((~BCi) & BCo); + Aki = BCi ^ ((~BCo) & BCu); + Ako = BCo ^ ((~BCu) & BCa); + Aku = BCu ^ ((~BCa) & BCe); + + Ebu ^= Du; + BCa = ROL(Ebu, 27); + Ega ^= Da; + BCe = ROL(Ega, 36); + Eke ^= De; + BCi = ROL(Eke, 10); + Emi ^= Di; + BCo = ROL(Emi, 15); + Eso ^= Do; + BCu = ROL(Eso, 56); + Ama = BCa ^ ((~BCe) & BCi); + Ame = BCe ^ ((~BCi) & BCo); + Ami = BCi ^ ((~BCo) & BCu); + Amo = BCo ^ ((~BCu) & BCa); + Amu = BCu ^ ((~BCa) & BCe); + + Ebi ^= Di; + BCa = ROL(Ebi, 62); + Ego ^= Do; + BCe = ROL(Ego, 55); + Eku ^= Du; + BCi = ROL(Eku, 39); + Ema ^= Da; + BCo = ROL(Ema, 41); + Ese ^= De; + BCu = ROL(Ese, 2); + Asa = BCa ^ ((~BCe) & BCi); + Ase = BCe ^ ((~BCi) & BCo); + Asi = BCi ^ ((~BCo) & BCu); + Aso = BCo ^ ((~BCu) & BCa); + Asu = BCu ^ ((~BCa) & BCe); + } + + // copyToState(state, A) + state[0] = Aba; + state[1] = Abe; + state[2] = Abi; + state[3] = Abo; + state[4] = Abu; + state[5] = Aga; + state[6] = Age; + state[7] = Agi; + state[8] = Ago; + state[9] = Agu; + state[10] = Aka; + state[11] = Ake; + state[12] = Aki; + state[13] = Ako; + state[14] = Aku; + state[15] = Ama; + state[16] = Ame; + state[17] = Ami; + state[18] = Amo; + state[19] = Amu; + state[20] = Asa; + state[21] = Ase; + state[22] = Asi; + state[23] = Aso; + state[24] = Asu; +} + +/************************************************* + * Name: keccak_absorb + * + * Description: Absorb step of Keccak; + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, + size_t mlen, uint8_t p) { + size_t i; + uint8_t t[200]; + + /* Zero state */ + for (i = 0; i < 25; ++i) { + s[i] = 0; + } + + while (mlen >= r) { + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(m + 8 * i); + } + + KeccakF1600_StatePermute(s); + mlen -= r; + m += r; + } + + for (i = 0; i < r; ++i) { + t[i] = 0; + } + for (i = 0; i < mlen; ++i) { + t[i] = m[i]; + } + t[i] = p; + t[r - 1] |= 128; + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(t + 8 * i); + } +} + +/************************************************* + * Name: keccak_squeezeblocks + * + * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. + * Modifies the state. Can be called multiple times to keep + * squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *h: pointer to output blocks + * - size_t nblocks: number of blocks to be + * squeezed (written to h) + * - uint64_t *s: pointer to input/output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, + uint64_t *s, uint32_t r) { + while (nblocks > 0) { + KeccakF1600_StatePermute(s); + for (size_t i = 0; i < (r >> 3); i++) { + store64(h + 8 * i, s[i]); + } + h += r; + nblocks--; + } +} + +/************************************************* + * Name: keccak_inc_init + * + * Description: Initializes the incremental Keccak state to zero. + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + **************************************************/ +static void keccak_inc_init(uint64_t *s_inc) { + size_t i; + + for (i = 0; i < 25; ++i) { + s_inc[i] = 0; + } + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_absorb + * + * Description: Incremental keccak absorb + * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + **************************************************/ +static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, + size_t mlen) { + size_t i; + + /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ + while (mlen + s_inc[25] >= r) { + for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { + /* Take the i'th byte from message + xor with the s_inc[25] + i'th byte of the state; little-endian */ + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + mlen -= (size_t)(r - s_inc[25]); + m += r - s_inc[25]; + s_inc[25] = 0; + + KeccakF1600_StatePermute(s_inc); + } + + for (i = 0; i < mlen; i++) { + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + s_inc[25] += mlen; +} + +/************************************************* + * Name: keccak_inc_finalize + * + * Description: Finalizes Keccak absorb phase, prepares for squeezing + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { + /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, + so we can always use one more byte for p in the current state. */ + s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); + s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_squeeze + * + * Description: Incremental Keccak squeeze; can be called on byte-level + * + * Arguments: - uint8_t *h: pointer to output bytes + * - size_t outlen: number of bytes to be squeezed + * - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_inc_squeeze(uint8_t *h, size_t outlen, + uint64_t *s_inc, uint32_t r) { + size_t i; + + /* First consume any bytes we still have sitting around */ + for (i = 0; i < outlen && i < s_inc[25]; i++) { + /* There are s_inc[25] bytes left, so r - s_inc[25] is the first + available byte. We consume from there, i.e., up to r. */ + h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] -= i; + + /* Then squeeze the remaining necessary blocks */ + while (outlen > 0) { + KeccakF1600_StatePermute(s_inc); + + for (i = 0; i < outlen && i < r; i++) { + h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] = r - i; + } +} + +void shake128_inc_init(shake128incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); +} + +void shake128_inc_finalize(shake128incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); +} + +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); +} + +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake128_inc_ctx_release(shake128incctx *state) { + (void)state; +} + +void shake256_inc_init(shake256incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); +} + +void shake256_inc_finalize(shake256incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); +} + +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); +} + +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake256_inc_ctx_release(shake256incctx *state) { + (void)state; +} + + +/************************************************* + * Name: shake128_absorb + * + * Description: Absorb step of the SHAKE128 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake128_squeezeblocks + * + * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of + * SHAKE128_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake128ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); +} + +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake128_ctx_release(shake128ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake256_absorb + * + * Description: Absorb step of the SHAKE256 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake256_squeezeblocks + * + * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of + * SHAKE256_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake256ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); +} + +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake256_ctx_release(shake256ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake128 + * + * Description: SHAKE128 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE128_RATE; + uint8_t t[SHAKE128_RATE]; + shake128ctx s; + + shake128_absorb(&s, input, inlen); + shake128_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE128_RATE; + outlen -= nblocks * SHAKE128_RATE; + + if (outlen) { + shake128_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake128_ctx_release(&s); +} + +/************************************************* + * Name: shake256 + * + * Description: SHAKE256 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE256_RATE; + uint8_t t[SHAKE256_RATE]; + shake256ctx s; + + shake256_absorb(&s, input, inlen); + shake256_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE256_RATE; + outlen -= nblocks * SHAKE256_RATE; + + if (outlen) { + shake256_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake256_ctx_release(&s); +} + +void sha3_256_inc_init(sha3_256incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_256_inc_ctx_release(sha3_256incctx *state) { + (void)state; +} + +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); +} + +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { + uint8_t t[SHA3_256_RATE]; + keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); + + sha3_256_inc_ctx_release(state); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_256 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_256_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +void sha3_384_inc_init(sha3_384incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); +} + +void sha3_384_inc_ctx_release(sha3_384incctx *state) { + (void)state; +} + +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { + uint8_t t[SHA3_384_RATE]; + keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); + + sha3_384_inc_ctx_release(state); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_384 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_384_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +void sha3_512_inc_init(sha3_512incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); +} + +void sha3_512_inc_ctx_release(sha3_512incctx *state) { + (void)state; +} + +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { + uint8_t t[SHA3_512_RATE]; + keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); + + sha3_512_inc_ctx_release(state); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_512 + * + * Description: SHA3-512 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_512_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h new file mode 100644 index 0000000000..c29ebd8f9d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef FIPS202_H +#define FIPS202_H + +#include +#include + +#define SHAKE128_RATE 168 +#define SHAKE256_RATE 136 +#define SHA3_256_RATE 136 +#define SHA3_384_RATE 104 +#define SHA3_512_RATE 72 + +#define PQC_SHAKEINCCTX_U64WORDS 26 +#define PQC_SHAKECTX_U64WORDS 25 + +#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) +#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake128incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake128ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake256incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake256ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_256incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_384incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_512incctx; + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); +/* Free the state */ +void shake128_ctx_release(shake128ctx *state); +/* Copy the state. */ +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); + +/* Initialize incremental hashing API */ +void shake128_inc_init(shake128incctx *state); +/* Absorb more information into the XOF. + * + * Can be called multiple times. + */ +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); +/* Finalize the XOF for squeezing */ +void shake128_inc_finalize(shake128incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); +/* Copy the context of the SHAKE128 XOF */ +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); +/* Free the context of the SHAKE128 XOF */ +void shake128_inc_ctx_release(shake128incctx *state); + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); +/* Free the context held by this XOF */ +void shake256_ctx_release(shake256ctx *state); +/* Copy the context held by this XOF */ +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); + +/* Initialize incremental hashing API */ +void shake256_inc_init(shake256incctx *state); +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); +/* Prepares for squeeze phase */ +void shake256_inc_finalize(shake256incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); +/* Copy the state */ +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); +/* Free the state */ +void shake256_inc_ctx_release(shake256incctx *state); + +/* One-stop SHAKE128 call */ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* One-stop SHAKE256 call */ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_256_inc_init(sha3_256incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); +/* Copy the context */ +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_256_inc_ctx_release(sha3_256incctx *state); + +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_384_inc_init(sha3_384incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); +/* Copy the context */ +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_384_inc_ctx_release(sha3_384incctx *state); + +/* One-stop SHA3-384 shop */ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_512_inc_init(sha3_512incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); +/* Copy the context */ +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_512_inc_ctx_release(sha3_512incctx *state); + +/* One-stop SHA3-512 shop */ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.c new file mode 100644 index 0000000000..48e2937f17 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.c @@ -0,0 +1,15 @@ +#include + +/* + * If ctl == 0x00000000, then *d is set to a0 + * If ctl == 0xFFFFFFFF, then *d is set to a1 + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ +void +fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) +{ + digit_t cw = (int32_t)ctl; + for (unsigned int i = 0; i < NWORDS_FIELD; i++) { + (*d)[i] = (*a0)[i] ^ (cw & ((*a0)[i] ^ (*a1)[i])); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.h new file mode 100644 index 0000000000..1241d5801e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.h @@ -0,0 +1,48 @@ +#ifndef FP_H +#define FP_H + +//////////////////////////////////////////////// NOTE: this is placed here for now +#include +#include +#include +#include +#include +#include +#include +#include + +typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements + +extern const digit_t ONE[NWORDS_FIELD]; +extern const digit_t ZERO[NWORDS_FIELD]; +// extern const digit_t PM1O3[NWORDS_FIELD]; + +void fp_set_small(fp_t *x, const digit_t val); +void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val); +void fp_set_zero(fp_t *x); +void fp_set_one(fp_t *x); +uint32_t fp_is_equal(const fp_t *a, const fp_t *b); +uint32_t fp_is_zero(const fp_t *a); +void fp_copy(fp_t *out, const fp_t *a); + +void fp_encode(void *dst, const fp_t *a); +void fp_decode_reduce(fp_t *d, const void *src, size_t len); +uint32_t fp_decode(fp_t *d, const void *src); + +void fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl); +void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl); + +void fp_add(fp_t *out, const fp_t *a, const fp_t *b); +void fp_sub(fp_t *out, const fp_t *a, const fp_t *b); +void fp_neg(fp_t *out, const fp_t *a); +void fp_sqr(fp_t *out, const fp_t *a); +void fp_mul(fp_t *out, const fp_t *a, const fp_t *b); + +void fp_inv(fp_t *x); +uint32_t fp_is_square(const fp_t *a); +void fp_sqrt(fp_t *a); +void fp_half(fp_t *out, const fp_t *a); +void fp_exp3div4(fp_t *out, const fp_t *a); +void fp_div3(fp_t *out, const fp_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp2.c new file mode 100644 index 0000000000..a2589525f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp2.c @@ -0,0 +1,328 @@ +#include +#include +#include + +/* Arithmetic modulo X^2 + 1 */ + +void +fp2_set_small(fp2_t *x, const digit_t val) +{ + fp_set_small(&(x->re), val); + fp_set_zero(&(x->im)); +} + +void +fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n) +{ + fp_mul_small(&x->re, &y->re, n); + fp_mul_small(&x->im, &y->im, n); +} + +void +fp2_set_one(fp2_t *x) +{ + fp_set_one(&(x->re)); + fp_set_zero(&(x->im)); +} + +void +fp2_set_zero(fp2_t *x) +{ + fp_set_zero(&(x->re)); + fp_set_zero(&(x->im)); +} + +// Is a GF(p^2) element zero? +// Returns 0xFF...FF (true) if a=0, 0 (false) otherwise +uint32_t +fp2_is_zero(const fp2_t *a) +{ + return fp_is_zero(&(a->re)) & fp_is_zero(&(a->im)); +} + +// Compare two GF(p^2) elements in constant time +// Returns 0xFF...FF (true) if a=b, 0 (false) otherwise +uint32_t +fp2_is_equal(const fp2_t *a, const fp2_t *b) +{ + return fp_is_equal(&(a->re), &(b->re)) & fp_is_equal(&(a->im), &(b->im)); +} + +// Is a GF(p^2) element one? +// Returns 0xFF...FF (true) if a=1, 0 (false) otherwise +uint32_t +fp2_is_one(const fp2_t *a) +{ + return fp_is_equal(&(a->re), &ONE) & fp_is_zero(&(a->im)); +} + +void +fp2_copy(fp2_t *x, const fp2_t *y) +{ + fp_copy(&(x->re), &(y->re)); + fp_copy(&(x->im), &(y->im)); +} + +void +fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_add(&(x->re), &(y->re), &(z->re)); + fp_add(&(x->im), &(y->im), &(z->im)); +} + +void +fp2_add_one(fp2_t *x, const fp2_t *y) +{ + fp_add(&x->re, &y->re, &ONE); + fp_copy(&x->im, &y->im); +} + +void +fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_sub(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &(y->im), &(z->im)); +} + +void +fp2_neg(fp2_t *x, const fp2_t *y) +{ + fp_neg(&(x->re), &(y->re)); + fp_neg(&(x->im), &(y->im)); +} + +void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t0, t1; + + fp_add(&t0, &(y->re), &(y->im)); + fp_add(&t1, &(z->re), &(z->im)); + fp_mul(&t0, &t0, &t1); + fp_mul(&t1, &(y->im), &(z->im)); + fp_mul(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &t0, &t1); + fp_sub(&(x->im), &(x->im), &(x->re)); + fp_sub(&(x->re), &(x->re), &t1); +} + +void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp_t sum, diff; + + fp_add(&sum, &(y->re), &(y->im)); + fp_sub(&diff, &(y->re), &(y->im)); + fp_mul(&(x->im), &(y->re), &(y->im)); + fp_add(&(x->im), &(x->im), &(x->im)); + fp_mul(&(x->re), &sum, &diff); +} + +void +fp2_inv(fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + fp_inv(&t0); + fp_mul(&(x->re), &(x->re), &t0); + fp_mul(&(x->im), &(x->im), &t0); + fp_neg(&(x->im), &(x->im)); +} + +uint32_t +fp2_is_square(const fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + + return fp_is_square(&t0); +} + +void +fp2_sqrt(fp2_t *a) +{ + fp_t x0, x1, t0, t1; + + /* From "Optimized One-Dimensional SQIsign Verification on Intel and + * Cortex-M4" by Aardal et al: https://eprint.iacr.org/2024/1563 */ + + // x0 = \delta = sqrt(a0^2 + a1^2). + fp_sqr(&x0, &(a->re)); + fp_sqr(&x1, &(a->im)); + fp_add(&x0, &x0, &x1); + fp_sqrt(&x0); + // If a1 = 0, there is a risk of \delta = -a0, which makes x0 = 0 below. + // In that case, we restore the value \delta = a0. + fp_select(&x0, &x0, &(a->re), fp_is_zero(&(a->im))); + // x0 = \delta + a0, t0 = 2 * x0. + fp_add(&x0, &x0, &(a->re)); + fp_add(&t0, &x0, &x0); + + // x1 = t0^(p-3)/4 + fp_exp3div4(&x1, &t0); + + // x0 = x0 * x1, x1 = x1 * a1, t1 = (2x0)^2. + fp_mul(&x0, &x0, &x1); + fp_mul(&x1, &x1, &(a->im)); + fp_add(&t1, &x0, &x0); + fp_sqr(&t1, &t1); + // If t1 = t0, return x0 + x1*i, otherwise x1 - x0*i. + fp_sub(&t0, &t0, &t1); + uint32_t f = fp_is_zero(&t0); + fp_neg(&t1, &x0); + fp_copy(&t0, &x1); + fp_select(&t0, &t0, &x0, f); + fp_select(&t1, &t1, &x1, f); + + // Check if t0 is zero + uint32_t t0_is_zero = fp_is_zero(&t0); + + // Check whether t0, t1 are odd + // Note: we encode to ensure canonical representation + uint8_t tmp_bytes[FP_ENCODED_BYTES]; + fp_encode(tmp_bytes, &t0); + uint32_t t0_is_odd = -((uint32_t)tmp_bytes[0] & 1); + fp_encode(tmp_bytes, &t1); + uint32_t t1_is_odd = -((uint32_t)tmp_bytes[0] & 1); + + // We negate the output if: + // t0 is odd, or + // t0 is zero and t1 is odd + uint32_t negate_output = t0_is_odd | (t0_is_zero & t1_is_odd); + fp_neg(&x0, &t0); + fp_select(&(a->re), &t0, &x0, negate_output); + fp_neg(&x0, &t1); + fp_select(&(a->im), &t1, &x0, negate_output); +} + +uint32_t +fp2_sqrt_verify(fp2_t *a) +{ + fp2_t t0, t1; + + fp2_copy(&t0, a); + fp2_sqrt(a); + fp2_sqr(&t1, a); + + return (fp2_is_equal(&t0, &t1)); +} + +void +fp2_half(fp2_t *x, const fp2_t *y) +{ + fp_half(&(x->re), &(y->re)); + fp_half(&(x->im), &(y->im)); +} + +void +fp2_batched_inv(fp2_t *x, int len) +{ + fp2_t t1[len], t2[len]; + fp2_t inverse; + + // x = x0,...,xn + // t1 = x0, x0*x1, ... ,x0 * x1 * ... * xn + fp2_copy(&t1[0], &x[0]); + for (int i = 1; i < len; i++) { + fp2_mul(&t1[i], &t1[i - 1], &x[i]); + } + + // inverse = 1/ (x0 * x1 * ... * xn) + fp2_copy(&inverse, &t1[len - 1]); + fp2_inv(&inverse); + + fp2_copy(&t2[0], &inverse); + // t2 = 1/ (x0 * x1 * ... * xn), 1/ (x0 * x1 * ... * x(n-1)) , ... , 1/xO + for (int i = 1; i < len; i++) { + fp2_mul(&t2[i], &t2[i - 1], &x[len - i]); + } + + fp2_copy(&x[0], &t2[len - 1]); + + for (int i = 1; i < len; i++) { + fp2_mul(&x[i], &t1[i - 1], &t2[len - i - 1]); + } +} + +// exponentiation using square and multiply +// Warning!! Not constant time! +void +fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size) +{ + fp2_t acc; + digit_t bit; + + fp2_copy(&acc, x); + fp2_set_one(out); + + // Iterate over each word of exp + for (int j = 0; j < size; j++) { + // Iterate over each bit of the word + for (int i = 0; i < RADIX; i++) { + bit = (exp[j] >> i) & 1; + if (bit == 1) { + fp2_mul(out, out, &acc); + } + fp2_sqr(&acc, &acc); + } + } +} + +void +fp2_print(const char *name, const fp2_t *a) +{ + printf("%s0x", name); + + uint8_t buf[FP_ENCODED_BYTES]; + fp_encode(&buf, &a->re); // Encoding ensures canonical rep + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + + printf(" + i*0x"); + + fp_encode(&buf, &a->im); + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + printf("\n"); +} + +void +fp2_encode(void *dst, const fp2_t *a) +{ + uint8_t *buf = dst; + fp_encode(buf, &(a->re)); + fp_encode(buf + FP_ENCODED_BYTES, &(a->im)); +} + +uint32_t +fp2_decode(fp2_t *d, const void *src) +{ + const uint8_t *buf = src; + uint32_t re, im; + + re = fp_decode(&(d->re), buf); + im = fp_decode(&(d->im), buf + FP_ENCODED_BYTES); + return re & im; +} + +void +fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl) +{ + fp_select(&(d->re), &(a0->re), &(a1->re), ctl); + fp_select(&(d->im), &(a0->im), &(a1->im), ctl); +} + +void +fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl) +{ + fp_cswap(&(a->re), &(b->re), ctl); + fp_cswap(&(a->im), &(b->im), ctl); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp2.h new file mode 100644 index 0000000000..00e673b7ca --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp2.h @@ -0,0 +1,41 @@ +#ifndef FP2_H +#define FP2_H + +#include +#include "fp.h" +#include + +// Structure for representing elements in GF(p^2) +typedef struct fp2_t +{ + fp_t re, im; +} fp2_t; + +void fp2_set_small(fp2_t *x, const digit_t val); +void fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n); +void fp2_set_one(fp2_t *x); +void fp2_set_zero(fp2_t *x); +uint32_t fp2_is_zero(const fp2_t *a); +uint32_t fp2_is_equal(const fp2_t *a, const fp2_t *b); +uint32_t fp2_is_one(const fp2_t *a); +void fp2_copy(fp2_t *x, const fp2_t *y); +void fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_add_one(fp2_t *x, const fp2_t *y); +void fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_neg(fp2_t *x, const fp2_t *y); +void fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_sqr(fp2_t *x, const fp2_t *y); +void fp2_inv(fp2_t *x); +uint32_t fp2_is_square(const fp2_t *x); +void fp2_sqrt(fp2_t *x); +uint32_t fp2_sqrt_verify(fp2_t *a); +void fp2_half(fp2_t *x, const fp2_t *y); +void fp2_batched_inv(fp2_t *x, int len); +void fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size); +void fp2_print(const char *name, const fp2_t *a); +void fp2_encode(void *dst, const fp2_t *a); +uint32_t fp2_decode(fp2_t *d, const void *src); +void fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl); +void fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_constants.h new file mode 100644 index 0000000000..063579ac33 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_constants.h @@ -0,0 +1,17 @@ +#if RADIX == 32 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 12 +#else +#define NWORDS_FIELD 14 +#endif +#define NWORDS_ORDER 12 +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 6 +#else +#define NWORDS_FIELD 7 +#endif +#define NWORDS_ORDER 6 +#endif +#define BITS 384 +#define LOG2P 9 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c new file mode 100644 index 0000000000..2aaad84dc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c @@ -0,0 +1,1234 @@ +// clang-format off +// Command line : python monty.py 32 +// 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +#ifdef RADIX_32 + +#include +#include + +#define sspint int32_t +#define spint uint32_t +#define udpint uint64_t +#define dpint uint64_t + +#define Wordlength 32 +#define Nlimbs 14 +#define Radix 28 +#define Nbits 383 +#define Nbytes 48 + +#define MONTGOMERY +// propagate carries +inline static spint prop(spint *n) { + int i; + spint mask = ((spint)1 << 28u) - (spint)1; + sspint carry = (sspint)n[0]; + carry >>= 28u; + n[0] &= mask; + for (i = 1; i < 13; i++) { + carry += (sspint)n[i]; + n[i] = (spint)carry & mask; + carry >>= 28u; + } + n[13] += (spint)carry; + return -((n[13] >> 1) >> 30u); +} + +// propagate carries and add p if negative, propagate carries again +inline static int flatten(spint *n) { + spint carry = prop(n); + n[0] -= (spint)1u & carry; + n[13] += ((spint)0x41000u) & carry; + (void)prop(n); + return (int)(carry & 1); +} + +// Montgomery final subtract +static int modfsb(spint *n) { + n[0] += (spint)1u; + n[13] -= (spint)0x41000u; + return flatten(n); +} + +// Modular addition - reduce less than 2p +static void modadd(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] + b[0]; + n[1] = a[1] + b[1]; + n[2] = a[2] + b[2]; + n[3] = a[3] + b[3]; + n[4] = a[4] + b[4]; + n[5] = a[5] + b[5]; + n[6] = a[6] + b[6]; + n[7] = a[7] + b[7]; + n[8] = a[8] + b[8]; + n[9] = a[9] + b[9]; + n[10] = a[10] + b[10]; + n[11] = a[11] + b[11]; + n[12] = a[12] + b[12]; + n[13] = a[13] + b[13]; + n[0] += (spint)2u; + n[13] -= (spint)0x82000u; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[13] += ((spint)0x82000u) & carry; + (void)prop(n); +} + +// Modular subtraction - reduce less than 2p +static void modsub(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] - b[0]; + n[1] = a[1] - b[1]; + n[2] = a[2] - b[2]; + n[3] = a[3] - b[3]; + n[4] = a[4] - b[4]; + n[5] = a[5] - b[5]; + n[6] = a[6] - b[6]; + n[7] = a[7] - b[7]; + n[8] = a[8] - b[8]; + n[9] = a[9] - b[9]; + n[10] = a[10] - b[10]; + n[11] = a[11] - b[11]; + n[12] = a[12] - b[12]; + n[13] = a[13] - b[13]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[13] += ((spint)0x82000u) & carry; + (void)prop(n); +} + +// Modular negation +static void modneg(const spint *b, spint *n) { + spint carry; + n[0] = (spint)0 - b[0]; + n[1] = (spint)0 - b[1]; + n[2] = (spint)0 - b[2]; + n[3] = (spint)0 - b[3]; + n[4] = (spint)0 - b[4]; + n[5] = (spint)0 - b[5]; + n[6] = (spint)0 - b[6]; + n[7] = (spint)0 - b[7]; + n[8] = (spint)0 - b[8]; + n[9] = (spint)0 - b[9]; + n[10] = (spint)0 - b[10]; + n[11] = (spint)0 - b[11]; + n[12] = (spint)0 - b[12]; + n[13] = (spint)0 - b[13]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[13] += ((spint)0x82000u) & carry; + (void)prop(n); +} + +// Overflow limit = 18446744073709551616 +// maximum possible = 1008877845989814286 +// Modular multiplication, c=a*b mod 2p +static void modmul(const spint *a, const spint *b, spint *c) { + dpint t = 0; + spint p13 = 0x41000u; + spint q = ((spint)1 << 28u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + t += (dpint)a[0] * b[0]; + spint v0 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[1]; + t += (dpint)a[1] * b[0]; + spint v1 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[2]; + t += (dpint)a[1] * b[1]; + t += (dpint)a[2] * b[0]; + spint v2 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[3]; + t += (dpint)a[1] * b[2]; + t += (dpint)a[2] * b[1]; + t += (dpint)a[3] * b[0]; + spint v3 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[4]; + t += (dpint)a[1] * b[3]; + t += (dpint)a[2] * b[2]; + t += (dpint)a[3] * b[1]; + t += (dpint)a[4] * b[0]; + spint v4 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[5]; + t += (dpint)a[1] * b[4]; + t += (dpint)a[2] * b[3]; + t += (dpint)a[3] * b[2]; + t += (dpint)a[4] * b[1]; + t += (dpint)a[5] * b[0]; + spint v5 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[6]; + t += (dpint)a[1] * b[5]; + t += (dpint)a[2] * b[4]; + t += (dpint)a[3] * b[3]; + t += (dpint)a[4] * b[2]; + t += (dpint)a[5] * b[1]; + t += (dpint)a[6] * b[0]; + spint v6 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[7]; + t += (dpint)a[1] * b[6]; + t += (dpint)a[2] * b[5]; + t += (dpint)a[3] * b[4]; + t += (dpint)a[4] * b[3]; + t += (dpint)a[5] * b[2]; + t += (dpint)a[6] * b[1]; + t += (dpint)a[7] * b[0]; + spint v7 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[8]; + t += (dpint)a[1] * b[7]; + t += (dpint)a[2] * b[6]; + t += (dpint)a[3] * b[5]; + t += (dpint)a[4] * b[4]; + t += (dpint)a[5] * b[3]; + t += (dpint)a[6] * b[2]; + t += (dpint)a[7] * b[1]; + t += (dpint)a[8] * b[0]; + spint v8 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[9]; + t += (dpint)a[1] * b[8]; + t += (dpint)a[2] * b[7]; + t += (dpint)a[3] * b[6]; + t += (dpint)a[4] * b[5]; + t += (dpint)a[5] * b[4]; + t += (dpint)a[6] * b[3]; + t += (dpint)a[7] * b[2]; + t += (dpint)a[8] * b[1]; + t += (dpint)a[9] * b[0]; + spint v9 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[10]; + t += (dpint)a[1] * b[9]; + t += (dpint)a[2] * b[8]; + t += (dpint)a[3] * b[7]; + t += (dpint)a[4] * b[6]; + t += (dpint)a[5] * b[5]; + t += (dpint)a[6] * b[4]; + t += (dpint)a[7] * b[3]; + t += (dpint)a[8] * b[2]; + t += (dpint)a[9] * b[1]; + t += (dpint)a[10] * b[0]; + spint v10 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[11]; + t += (dpint)a[1] * b[10]; + t += (dpint)a[2] * b[9]; + t += (dpint)a[3] * b[8]; + t += (dpint)a[4] * b[7]; + t += (dpint)a[5] * b[6]; + t += (dpint)a[6] * b[5]; + t += (dpint)a[7] * b[4]; + t += (dpint)a[8] * b[3]; + t += (dpint)a[9] * b[2]; + t += (dpint)a[10] * b[1]; + t += (dpint)a[11] * b[0]; + spint v11 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[12]; + t += (dpint)a[1] * b[11]; + t += (dpint)a[2] * b[10]; + t += (dpint)a[3] * b[9]; + t += (dpint)a[4] * b[8]; + t += (dpint)a[5] * b[7]; + t += (dpint)a[6] * b[6]; + t += (dpint)a[7] * b[5]; + t += (dpint)a[8] * b[4]; + t += (dpint)a[9] * b[3]; + t += (dpint)a[10] * b[2]; + t += (dpint)a[11] * b[1]; + t += (dpint)a[12] * b[0]; + spint v12 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[0] * b[13]; + t += (dpint)a[1] * b[12]; + t += (dpint)a[2] * b[11]; + t += (dpint)a[3] * b[10]; + t += (dpint)a[4] * b[9]; + t += (dpint)a[5] * b[8]; + t += (dpint)a[6] * b[7]; + t += (dpint)a[7] * b[6]; + t += (dpint)a[8] * b[5]; + t += (dpint)a[9] * b[4]; + t += (dpint)a[10] * b[3]; + t += (dpint)a[11] * b[2]; + t += (dpint)a[12] * b[1]; + t += (dpint)a[13] * b[0]; + t += (dpint)v0 * (dpint)p13; + spint v13 = ((spint)t & mask); + t >>= 28; + t += (dpint)a[1] * b[13]; + t += (dpint)a[2] * b[12]; + t += (dpint)a[3] * b[11]; + t += (dpint)a[4] * b[10]; + t += (dpint)a[5] * b[9]; + t += (dpint)a[6] * b[8]; + t += (dpint)a[7] * b[7]; + t += (dpint)a[8] * b[6]; + t += (dpint)a[9] * b[5]; + t += (dpint)a[10] * b[4]; + t += (dpint)a[11] * b[3]; + t += (dpint)a[12] * b[2]; + t += (dpint)a[13] * b[1]; + t += (dpint)v1 * (dpint)p13; + c[0] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[2] * b[13]; + t += (dpint)a[3] * b[12]; + t += (dpint)a[4] * b[11]; + t += (dpint)a[5] * b[10]; + t += (dpint)a[6] * b[9]; + t += (dpint)a[7] * b[8]; + t += (dpint)a[8] * b[7]; + t += (dpint)a[9] * b[6]; + t += (dpint)a[10] * b[5]; + t += (dpint)a[11] * b[4]; + t += (dpint)a[12] * b[3]; + t += (dpint)a[13] * b[2]; + t += (dpint)v2 * (dpint)p13; + c[1] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[3] * b[13]; + t += (dpint)a[4] * b[12]; + t += (dpint)a[5] * b[11]; + t += (dpint)a[6] * b[10]; + t += (dpint)a[7] * b[9]; + t += (dpint)a[8] * b[8]; + t += (dpint)a[9] * b[7]; + t += (dpint)a[10] * b[6]; + t += (dpint)a[11] * b[5]; + t += (dpint)a[12] * b[4]; + t += (dpint)a[13] * b[3]; + t += (dpint)v3 * (dpint)p13; + c[2] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[4] * b[13]; + t += (dpint)a[5] * b[12]; + t += (dpint)a[6] * b[11]; + t += (dpint)a[7] * b[10]; + t += (dpint)a[8] * b[9]; + t += (dpint)a[9] * b[8]; + t += (dpint)a[10] * b[7]; + t += (dpint)a[11] * b[6]; + t += (dpint)a[12] * b[5]; + t += (dpint)a[13] * b[4]; + t += (dpint)v4 * (dpint)p13; + c[3] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[5] * b[13]; + t += (dpint)a[6] * b[12]; + t += (dpint)a[7] * b[11]; + t += (dpint)a[8] * b[10]; + t += (dpint)a[9] * b[9]; + t += (dpint)a[10] * b[8]; + t += (dpint)a[11] * b[7]; + t += (dpint)a[12] * b[6]; + t += (dpint)a[13] * b[5]; + t += (dpint)v5 * (dpint)p13; + c[4] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[6] * b[13]; + t += (dpint)a[7] * b[12]; + t += (dpint)a[8] * b[11]; + t += (dpint)a[9] * b[10]; + t += (dpint)a[10] * b[9]; + t += (dpint)a[11] * b[8]; + t += (dpint)a[12] * b[7]; + t += (dpint)a[13] * b[6]; + t += (dpint)v6 * (dpint)p13; + c[5] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[7] * b[13]; + t += (dpint)a[8] * b[12]; + t += (dpint)a[9] * b[11]; + t += (dpint)a[10] * b[10]; + t += (dpint)a[11] * b[9]; + t += (dpint)a[12] * b[8]; + t += (dpint)a[13] * b[7]; + t += (dpint)v7 * (dpint)p13; + c[6] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[8] * b[13]; + t += (dpint)a[9] * b[12]; + t += (dpint)a[10] * b[11]; + t += (dpint)a[11] * b[10]; + t += (dpint)a[12] * b[9]; + t += (dpint)a[13] * b[8]; + t += (dpint)v8 * (dpint)p13; + c[7] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[9] * b[13]; + t += (dpint)a[10] * b[12]; + t += (dpint)a[11] * b[11]; + t += (dpint)a[12] * b[10]; + t += (dpint)a[13] * b[9]; + t += (dpint)v9 * (dpint)p13; + c[8] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[10] * b[13]; + t += (dpint)a[11] * b[12]; + t += (dpint)a[12] * b[11]; + t += (dpint)a[13] * b[10]; + t += (dpint)v10 * (dpint)p13; + c[9] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[11] * b[13]; + t += (dpint)a[12] * b[12]; + t += (dpint)a[13] * b[11]; + t += (dpint)v11 * (dpint)p13; + c[10] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[12] * b[13]; + t += (dpint)a[13] * b[12]; + t += (dpint)v12 * (dpint)p13; + c[11] = ((spint)t & mask); + t >>= 28; + t += (dpint)a[13] * b[13]; + t += (dpint)v13 * (dpint)p13; + c[12] = ((spint)t & mask); + t >>= 28; + c[13] = (spint)t; +} + +// Modular squaring, c=a*a mod 2p +static void modsqr(const spint *a, spint *c) { + udpint tot; + udpint t = 0; + spint p13 = 0x41000u; + spint q = ((spint)1 << 28u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + tot = (udpint)a[0] * a[0]; + t = tot; + spint v0 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[1]; + tot *= 2; + t += tot; + spint v1 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[2]; + tot *= 2; + tot += (udpint)a[1] * a[1]; + t += tot; + spint v2 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[3]; + tot += (udpint)a[1] * a[2]; + tot *= 2; + t += tot; + spint v3 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[4]; + tot += (udpint)a[1] * a[3]; + tot *= 2; + tot += (udpint)a[2] * a[2]; + t += tot; + spint v4 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[5]; + tot += (udpint)a[1] * a[4]; + tot += (udpint)a[2] * a[3]; + tot *= 2; + t += tot; + spint v5 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[6]; + tot += (udpint)a[1] * a[5]; + tot += (udpint)a[2] * a[4]; + tot *= 2; + tot += (udpint)a[3] * a[3]; + t += tot; + spint v6 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[7]; + tot += (udpint)a[1] * a[6]; + tot += (udpint)a[2] * a[5]; + tot += (udpint)a[3] * a[4]; + tot *= 2; + t += tot; + spint v7 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[8]; + tot += (udpint)a[1] * a[7]; + tot += (udpint)a[2] * a[6]; + tot += (udpint)a[3] * a[5]; + tot *= 2; + tot += (udpint)a[4] * a[4]; + t += tot; + spint v8 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[9]; + tot += (udpint)a[1] * a[8]; + tot += (udpint)a[2] * a[7]; + tot += (udpint)a[3] * a[6]; + tot += (udpint)a[4] * a[5]; + tot *= 2; + t += tot; + spint v9 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[10]; + tot += (udpint)a[1] * a[9]; + tot += (udpint)a[2] * a[8]; + tot += (udpint)a[3] * a[7]; + tot += (udpint)a[4] * a[6]; + tot *= 2; + tot += (udpint)a[5] * a[5]; + t += tot; + spint v10 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[11]; + tot += (udpint)a[1] * a[10]; + tot += (udpint)a[2] * a[9]; + tot += (udpint)a[3] * a[8]; + tot += (udpint)a[4] * a[7]; + tot += (udpint)a[5] * a[6]; + tot *= 2; + t += tot; + spint v11 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[12]; + tot += (udpint)a[1] * a[11]; + tot += (udpint)a[2] * a[10]; + tot += (udpint)a[3] * a[9]; + tot += (udpint)a[4] * a[8]; + tot += (udpint)a[5] * a[7]; + tot *= 2; + tot += (udpint)a[6] * a[6]; + t += tot; + spint v12 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[0] * a[13]; + tot += (udpint)a[1] * a[12]; + tot += (udpint)a[2] * a[11]; + tot += (udpint)a[3] * a[10]; + tot += (udpint)a[4] * a[9]; + tot += (udpint)a[5] * a[8]; + tot += (udpint)a[6] * a[7]; + tot *= 2; + t += tot; + t += (udpint)v0 * p13; + spint v13 = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[1] * a[13]; + tot += (udpint)a[2] * a[12]; + tot += (udpint)a[3] * a[11]; + tot += (udpint)a[4] * a[10]; + tot += (udpint)a[5] * a[9]; + tot += (udpint)a[6] * a[8]; + tot *= 2; + tot += (udpint)a[7] * a[7]; + t += tot; + t += (udpint)v1 * p13; + c[0] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[2] * a[13]; + tot += (udpint)a[3] * a[12]; + tot += (udpint)a[4] * a[11]; + tot += (udpint)a[5] * a[10]; + tot += (udpint)a[6] * a[9]; + tot += (udpint)a[7] * a[8]; + tot *= 2; + t += tot; + t += (udpint)v2 * p13; + c[1] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[3] * a[13]; + tot += (udpint)a[4] * a[12]; + tot += (udpint)a[5] * a[11]; + tot += (udpint)a[6] * a[10]; + tot += (udpint)a[7] * a[9]; + tot *= 2; + tot += (udpint)a[8] * a[8]; + t += tot; + t += (udpint)v3 * p13; + c[2] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[4] * a[13]; + tot += (udpint)a[5] * a[12]; + tot += (udpint)a[6] * a[11]; + tot += (udpint)a[7] * a[10]; + tot += (udpint)a[8] * a[9]; + tot *= 2; + t += tot; + t += (udpint)v4 * p13; + c[3] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[5] * a[13]; + tot += (udpint)a[6] * a[12]; + tot += (udpint)a[7] * a[11]; + tot += (udpint)a[8] * a[10]; + tot *= 2; + tot += (udpint)a[9] * a[9]; + t += tot; + t += (udpint)v5 * p13; + c[4] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[6] * a[13]; + tot += (udpint)a[7] * a[12]; + tot += (udpint)a[8] * a[11]; + tot += (udpint)a[9] * a[10]; + tot *= 2; + t += tot; + t += (udpint)v6 * p13; + c[5] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[7] * a[13]; + tot += (udpint)a[8] * a[12]; + tot += (udpint)a[9] * a[11]; + tot *= 2; + tot += (udpint)a[10] * a[10]; + t += tot; + t += (udpint)v7 * p13; + c[6] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[8] * a[13]; + tot += (udpint)a[9] * a[12]; + tot += (udpint)a[10] * a[11]; + tot *= 2; + t += tot; + t += (udpint)v8 * p13; + c[7] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[9] * a[13]; + tot += (udpint)a[10] * a[12]; + tot *= 2; + tot += (udpint)a[11] * a[11]; + t += tot; + t += (udpint)v9 * p13; + c[8] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[10] * a[13]; + tot += (udpint)a[11] * a[12]; + tot *= 2; + t += tot; + t += (udpint)v10 * p13; + c[9] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[11] * a[13]; + tot *= 2; + tot += (udpint)a[12] * a[12]; + t += tot; + t += (udpint)v11 * p13; + c[10] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[12] * a[13]; + tot *= 2; + t += tot; + t += (udpint)v12 * p13; + c[11] = ((spint)t & mask); + t >>= 28; + tot = (udpint)a[13] * a[13]; + t += tot; + t += (udpint)v13 * p13; + c[12] = ((spint)t & mask); + t >>= 28; + c[13] = (spint)t; +} + +// copy +static void modcpy(const spint *a, spint *c) { + int i; + for (i = 0; i < 14; i++) { + c[i] = a[i]; + } +} + +// square n times +static void modnsqr(spint *a, int n) { + int i; + for (i = 0; i < n; i++) { + modsqr(a, a); + } +} + +// Calculate progenitor +static void modpro(const spint *w, spint *z) { + spint x[14]; + spint t0[14]; + spint t1[14]; + spint t2[14]; + spint t3[14]; + spint t4[14]; + spint t5[14]; + modcpy(w, x); + modsqr(x, z); + modsqr(z, t0); + modmul(x, t0, t1); + modmul(z, t1, z); + modsqr(z, t0); + modsqr(t0, t3); + modsqr(t3, t4); + modsqr(t4, t2); + modcpy(t2, t5); + modnsqr(t5, 3); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 6); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 2); + modmul(t4, t5, t5); + modnsqr(t5, 13); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 2); + modmul(t4, t5, t4); + modnsqr(t4, 28); + modmul(t2, t4, t2); + modsqr(t2, t4); + modmul(t3, t4, t3); + modnsqr(t3, 59); + modmul(t2, t3, t2); + modmul(t1, t2, t1); + modmul(z, t1, z); + modmul(t0, z, t0); + modmul(t1, t0, t1); + modsqr(t1, t2); + modmul(t1, t2, t2); + modsqr(t2, t2); + modmul(t1, t2, t2); + modmul(t0, t2, t0); + modmul(z, t0, z); + modsqr(z, t2); + modmul(z, t2, t2); + modmul(t0, t2, t0); + modmul(t1, t0, t1); + modcpy(t1, t2); + modnsqr(t2, 128); + modmul(t1, t2, t1); + modmul(t0, t1, t0); + modnsqr(t0, 125); + modmul(z, t0, z); +} + +// calculate inverse, provide progenitor h if available +static void modinv(const spint *x, const spint *h, spint *z) { + spint s[14]; + spint t[14]; + if (h == NULL) { + modpro(x, t); + } else { + modcpy(h, t); + } + modcpy(x, s); + modnsqr(t, 2); + modmul(s, t, z); +} + +// Convert m to n-residue form, n=nres(m) +static void nres(const spint *m, spint *n) { + const spint c[14] = {0xf13732fu, 0x3f03f03u, 0x3f03f0u, 0xf03f03fu, + 0x3f03f03u, 0x3f03f0u, 0xf03f03fu, 0x3f03f03u, + 0x3f03f0u, 0xf03f03fu, 0x3f03f03u, 0x3f03f0u, + 0xf03f03fu, 0x14f03u}; + modmul(m, c, n); +} + +// Convert n back to normal form, m=redc(n) +static void redc(const spint *n, spint *m) { + int i; + spint c[14]; + c[0] = 1; + for (i = 1; i < 14; i++) { + c[i] = 0; + } + modmul(n, c, m); + (void)modfsb(m); +} + +// is unity? +static int modis1(const spint *a) { + int i; + spint c[14]; + spint c0; + spint d = 0; + redc(a, c); + for (i = 1; i < 14; i++) { + d |= c[i]; + } + c0 = (spint)c[0]; + return ((spint)1 & ((d - (spint)1) >> 28u) & + (((c0 ^ (spint)1) - (spint)1) >> 28u)); +} + +// is zero? +static int modis0(const spint *a) { + int i; + spint c[14]; + spint d = 0; + redc(a, c); + for (i = 0; i < 14; i++) { + d |= c[i]; + } + return ((spint)1 & ((d - (spint)1) >> 28u)); +} + +// set to zero +static void modzer(spint *a) { + int i; + for (i = 0; i < 14; i++) { + a[i] = 0; + } +} + +// set to one +static void modone(spint *a) { + int i; + a[0] = 1; + for (i = 1; i < 14; i++) { + a[i] = 0; + } + nres(a, a); +} + +// set to integer +static void modint(int x, spint *a) { + int i; + a[0] = (spint)x; + for (i = 1; i < 14; i++) { + a[i] = 0; + } + nres(a, a); +} + +// Modular multiplication by an integer, c=a*b mod 2p +static void modmli(const spint *a, int b, spint *c) { + spint t[14]; + modint(b, t); + modmul(a, t, c); +} + +// Test for quadratic residue +static int modqr(const spint *h, const spint *x) { + spint r[14]; + if (h == NULL) { + modpro(x, r); + modsqr(r, r); + } else { + modsqr(h, r); + } + modmul(r, x, r); + return modis1(r) | modis0(x); +} + +// conditional move g to f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcmv(int b, const spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t; + spint r = 0x5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 14; i++) { + s = g[i]; + t = f[i]; + f[i] = c0 * t + c1 * s; + f[i] -= r * (t + s); + } +} + +// conditional swap g and f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcsw(int b, volatile spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t, w; + spint r = 0x5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 14; i++) { + s = g[i]; + t = f[i]; + w = r * (t + s); + f[i] = c0 * t + c1 * s; + f[i] -= w; + g[i] = c0 * s + c1 * t; + g[i] -= w; + } +} + +// Modular square root, provide progenitor h if available, NULL if not +static void modsqrt(const spint *x, const spint *h, spint *r) { + spint s[14]; + spint y[14]; + if (h == NULL) { + modpro(x, y); + } else { + modcpy(h, y); + } + modmul(y, x, s); + modcpy(s, r); +} + +// shift left by less than a word +static void modshl(unsigned int n, spint *a) { + int i; + a[13] = ((a[13] << n)) | (a[12] >> (28u - n)); + for (i = 12; i > 0; i--) { + a[i] = ((a[i] << n) & (spint)0xfffffff) | (a[i - 1] >> (28u - n)); + } + a[0] = (a[0] << n) & (spint)0xfffffff; +} + +// shift right by less than a word. Return shifted out part +static int modshr(unsigned int n, spint *a) { + int i; + spint r = a[0] & (((spint)1 << n) - (spint)1); + for (i = 0; i < 13; i++) { + a[i] = (a[i] >> n) | ((a[i + 1] << (28u - n)) & (spint)0xfffffff); + } + a[13] = a[13] >> n; + return r; +} + +// set a= 2^r +static void mod2r(unsigned int r, spint *a) { + unsigned int n = r / 28u; + unsigned int m = r % 28u; + modzer(a); + if (r >= 48 * 8) + return; + a[n] = 1; + a[n] <<= m; + nres(a, a); +} + +// export to byte array +static void modexp(const spint *a, char *b) { + int i; + spint c[14]; + redc(a, c); + for (i = 47; i >= 0; i--) { + b[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +// import from byte array +// returns 1 if in range, else 0 +static int modimp(const char *b, spint *a) { + int i, res; + for (i = 0; i < 14; i++) { + a[i] = 0; + } + for (i = 0; i < 48; i++) { + modshl(8, a); + a[0] += (spint)(unsigned char)b[i]; + } + res = modfsb(a); + nres(a, a); + return res; +} + +// determine sign +static int modsign(const spint *a) { + spint c[14]; + redc(a, c); + return c[0] % 2; +} + +// return true if equal +static int modcmp(const spint *a, const spint *b) { + spint c[14], d[14]; + int i, eq = 1; + redc(a, c); + redc(b, d); + for (i = 0; i < 14; i++) { + eq &= (((c[i] ^ d[i]) - 1) >> 28) & 1; + } + return eq; +} + +// clang-format on +/****************************************************************************** + API functions calling generated code above + ******************************************************************************/ + +#include + +const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +const digit_t ONE[NWORDS_FIELD] = { + 0x000003f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00010000 +}; +// Montgomery representation of 2^-1 +static const digit_t TWO_INV[NWORDS_FIELD] = { 0x000001f8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00008000 }; +// Montgomery representation of 3^-1 +static const digit_t THREE_INV[NWORDS_FIELD] = { 0x0aaaabfa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, + 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, + 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x00030aaa }; +// Montgomery representation of 2^384 +static const digit_t R2[NWORDS_FIELD] = { 0x003f1373, 0x0f03f03f, 0x03f03f03, 0x003f03f0, 0x0f03f03f, + 0x03f03f03, 0x003f03f0, 0x0f03f03f, 0x03f03f03, 0x003f03f0, + 0x0f03f03f, 0x03f03f03, 0x003f03f0, 0x0000c03f }; + +void +fp_set_small(fp_t *x, const digit_t val) +{ + modint((int)val, *x); +} + +void +fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) +{ + modmli(*a, (int)val, *x); +} + +void +fp_set_zero(fp_t *x) +{ + modzer(*x); +} + +void +fp_set_one(fp_t *x) +{ + modone(*x); +} + +uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return -(uint32_t)modcmp(*a, *b); +} + +uint32_t +fp_is_zero(const fp_t *a) +{ + return -(uint32_t)modis0(*a); +} + +void +fp_copy(fp_t *out, const fp_t *a) +{ + modcpy(*a, *out); +} + +void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + modcsw((int)(ctl & 0x1), *a, *b); +} + +void +fp_add(fp_t *out, const fp_t *a, const fp_t *b) +{ + modadd(*a, *b, *out); +} + +void +fp_sub(fp_t *out, const fp_t *a, const fp_t *b) +{ + modsub(*a, *b, *out); +} + +void +fp_neg(fp_t *out, const fp_t *a) +{ + modneg(*a, *out); +} + +void +fp_sqr(fp_t *out, const fp_t *a) +{ + modsqr(*a, *out); +} + +void +fp_mul(fp_t *out, const fp_t *a, const fp_t *b) +{ + modmul(*a, *b, *out); +} + +void +fp_inv(fp_t *x) +{ + modinv(*x, NULL, *x); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + return -(uint32_t)modqr(NULL, *a); +} + +void +fp_sqrt(fp_t *a) +{ + modsqrt(*a, NULL, *a); +} + +void +fp_half(fp_t *out, const fp_t *a) +{ + modmul(TWO_INV, *a, *out); +} + +void +fp_exp3div4(fp_t *out, const fp_t *a) +{ + modpro(*a, *out); +} + +void +fp_div3(fp_t *out, const fp_t *a) +{ + modmul(THREE_INV, *a, *out); +} + +void +fp_encode(void *dst, const fp_t *a) +{ + // Modified version of modexp() + int i; + spint c[14]; + redc(*a, c); + for (i = 0; i < 48; i++) { + ((char *)dst)[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +uint32_t +fp_decode(fp_t *d, const void *src) +{ + // Modified version of modimp() + int i; + spint res; + const unsigned char *b = src; + for (i = 0; i < 14; i++) { + (*d)[i] = 0; + } + for (i = 47; i >= 0; i--) { + modshl(8, *d); + (*d)[0] += (spint)b[i]; + } + res = (spint)-modfsb(*d); + nres(*d, *d); + // If the value was canonical then res = -1; otherwise, res = 0 + for (i = 0; i < 14; i++) { + (*d)[i] &= res; + } + return (uint32_t)res; +} + +static inline unsigned char +add_carry(unsigned char cc, spint a, spint b, spint *d) +{ + udpint t = (udpint)a + (udpint)b + cc; + *d = (spint)t; + return (unsigned char)(t >> Wordlength); +} + +static void +partial_reduce(spint *out, const spint *src) +{ + spint h, l, quo, rem; + unsigned char cc; + + // Split value in high (8 bits) and low (376 bits) parts. + h = src[11] >> 24; + l = src[11] & 0x00FFFFFF; + + // 65*2^376 = 1 mod q; hence, we add floor(h/65) + (h mod 65)*2^376 + // to the low part. + quo = (h * 0xFC1) >> 18; + rem = h - (65 * quo); + cc = add_carry(0, src[0], quo, &out[0]); + cc = add_carry(cc, src[1], 0, &out[1]); + cc = add_carry(cc, src[2], 0, &out[2]); + cc = add_carry(cc, src[3], 0, &out[3]); + cc = add_carry(cc, src[4], 0, &out[4]); + cc = add_carry(cc, src[5], 0, &out[5]); + cc = add_carry(cc, src[6], 0, &out[6]); + cc = add_carry(cc, src[7], 0, &out[7]); + cc = add_carry(cc, src[8], 0, &out[8]); + cc = add_carry(cc, src[9], 0, &out[9]); + cc = add_carry(cc, src[10], 0, &out[10]); + (void)add_carry(cc, l, rem << 24, &out[11]); +} + +// Little-endian encoding of a 32-bit integer. +static inline void +enc32le(void *dst, uint32_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); +} + +// Little-endian decoding of a 32-bit integer. +static inline uint32_t +dec32le(const void *src) +{ + const uint8_t *buf = src; + return (spint)buf[0] | ((spint)buf[1] << 8) | ((spint)buf[2] << 16) | ((spint)buf[3] << 24); +} + +void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + uint32_t t[12]; // Stores Nbytes * 8 bits + uint8_t tmp[48]; // Nbytes + const uint8_t *b = src; + + fp_set_zero(d); + if (len == 0) { + return; + } + + size_t rem = len % 48; + if (rem != 0) { + // Input size is not a multiple of 48, we decode a partial + // block, which is already less than 2^376. + size_t k = len - rem; + memcpy(tmp, b + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + fp_decode(d, tmp); + len = k; + } + // Process all remaining blocks, in descending address order. + while (len > 0) { + fp_mul(d, d, &R2); + len -= 48; + t[0] = dec32le(b + len); + t[1] = dec32le(b + len + 4); + t[2] = dec32le(b + len + 8); + t[3] = dec32le(b + len + 12); + t[4] = dec32le(b + len + 16); + t[5] = dec32le(b + len + 20); + t[6] = dec32le(b + len + 24); + t[7] = dec32le(b + len + 28); + t[8] = dec32le(b + len + 32); + t[9] = dec32le(b + len + 36); + t[10] = dec32le(b + len + 40); + t[11] = dec32le(b + len + 44); + partial_reduce(t, t); + enc32le(tmp, t[0]); + enc32le(tmp + 4, t[1]); + enc32le(tmp + 8, t[2]); + enc32le(tmp + 12, t[3]); + enc32le(tmp + 16, t[4]); + enc32le(tmp + 20, t[5]); + enc32le(tmp + 24, t[6]); + enc32le(tmp + 28, t[7]); + enc32le(tmp + 32, t[8]); + enc32le(tmp + 36, t[9]); + enc32le(tmp + 40, t[10]); + enc32le(tmp + 44, t[11]); + fp_t a; + fp_decode(&a, tmp); + fp_add(d, d, &a); + } +} + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c new file mode 100644 index 0000000000..9ac5fc5495 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c @@ -0,0 +1,875 @@ +// clang-format off +// Command line : python monty.py 64 +// 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +#ifdef RADIX_64 + +#include +#include + +#define sspint int64_t +#define spint uint64_t +#define udpint __uint128_t +#define dpint __uint128_t + +#define Wordlength 64 +#define Nlimbs 7 +#define Radix 55 +#define Nbits 383 +#define Nbytes 48 + +#define MONTGOMERY +// propagate carries +inline static spint prop(spint *n) { + int i; + spint mask = ((spint)1 << 55u) - (spint)1; + sspint carry = (sspint)n[0]; + carry >>= 55u; + n[0] &= mask; + for (i = 1; i < 6; i++) { + carry += (sspint)n[i]; + n[i] = (spint)carry & mask; + carry >>= 55u; + } + n[6] += (spint)carry; + return -((n[6] >> 1) >> 62u); +} + +// propagate carries and add p if negative, propagate carries again +inline static int flatten(spint *n) { + spint carry = prop(n); + n[0] -= (spint)1u & carry; + n[6] += ((spint)0x10400000000000u) & carry; + (void)prop(n); + return (int)(carry & 1); +} + +// Montgomery final subtract +inline static int modfsb(spint *n) { + n[0] += (spint)1u; + n[6] -= (spint)0x10400000000000u; + return flatten(n); +} + +// Modular addition - reduce less than 2p +inline static void modadd(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] + b[0]; + n[1] = a[1] + b[1]; + n[2] = a[2] + b[2]; + n[3] = a[3] + b[3]; + n[4] = a[4] + b[4]; + n[5] = a[5] + b[5]; + n[6] = a[6] + b[6]; + n[0] += (spint)2u; + n[6] -= (spint)0x20800000000000u; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[6] += ((spint)0x20800000000000u) & carry; + (void)prop(n); +} + +// Modular subtraction - reduce less than 2p +inline static void modsub(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] - b[0]; + n[1] = a[1] - b[1]; + n[2] = a[2] - b[2]; + n[3] = a[3] - b[3]; + n[4] = a[4] - b[4]; + n[5] = a[5] - b[5]; + n[6] = a[6] - b[6]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[6] += ((spint)0x20800000000000u) & carry; + (void)prop(n); +} + +// Modular negation +inline static void modneg(const spint *b, spint *n) { + spint carry; + n[0] = (spint)0 - b[0]; + n[1] = (spint)0 - b[1]; + n[2] = (spint)0 - b[2]; + n[3] = (spint)0 - b[3]; + n[4] = (spint)0 - b[4]; + n[5] = (spint)0 - b[5]; + n[6] = (spint)0 - b[6]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[6] += ((spint)0x20800000000000u) & carry; + (void)prop(n); +} + +// Overflow limit = 340282366920938463463374607431768211456 +// maximum possible = 9251314080475062396111552646217735 +// Modular multiplication, c=a*b mod 2p +inline static void modmul(const spint *a, const spint *b, spint *c) { + dpint t = 0; + spint p6 = 0x10400000000000u; + spint q = ((spint)1 << 55u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + t += (dpint)a[0] * b[0]; + spint v0 = ((spint)t & mask); + t >>= 55; + t += (dpint)a[0] * b[1]; + t += (dpint)a[1] * b[0]; + spint v1 = ((spint)t & mask); + t >>= 55; + t += (dpint)a[0] * b[2]; + t += (dpint)a[1] * b[1]; + t += (dpint)a[2] * b[0]; + spint v2 = ((spint)t & mask); + t >>= 55; + t += (dpint)a[0] * b[3]; + t += (dpint)a[1] * b[2]; + t += (dpint)a[2] * b[1]; + t += (dpint)a[3] * b[0]; + spint v3 = ((spint)t & mask); + t >>= 55; + t += (dpint)a[0] * b[4]; + t += (dpint)a[1] * b[3]; + t += (dpint)a[2] * b[2]; + t += (dpint)a[3] * b[1]; + t += (dpint)a[4] * b[0]; + spint v4 = ((spint)t & mask); + t >>= 55; + t += (dpint)a[0] * b[5]; + t += (dpint)a[1] * b[4]; + t += (dpint)a[2] * b[3]; + t += (dpint)a[3] * b[2]; + t += (dpint)a[4] * b[1]; + t += (dpint)a[5] * b[0]; + spint v5 = ((spint)t & mask); + t >>= 55; + t += (dpint)a[0] * b[6]; + t += (dpint)a[1] * b[5]; + t += (dpint)a[2] * b[4]; + t += (dpint)a[3] * b[3]; + t += (dpint)a[4] * b[2]; + t += (dpint)a[5] * b[1]; + t += (dpint)a[6] * b[0]; + t += (dpint)v0 * (dpint)p6; + spint v6 = ((spint)t & mask); + t >>= 55; + t += (dpint)a[1] * b[6]; + t += (dpint)a[2] * b[5]; + t += (dpint)a[3] * b[4]; + t += (dpint)a[4] * b[3]; + t += (dpint)a[5] * b[2]; + t += (dpint)a[6] * b[1]; + t += (dpint)v1 * (dpint)p6; + c[0] = ((spint)t & mask); + t >>= 55; + t += (dpint)a[2] * b[6]; + t += (dpint)a[3] * b[5]; + t += (dpint)a[4] * b[4]; + t += (dpint)a[5] * b[3]; + t += (dpint)a[6] * b[2]; + t += (dpint)v2 * (dpint)p6; + c[1] = ((spint)t & mask); + t >>= 55; + t += (dpint)a[3] * b[6]; + t += (dpint)a[4] * b[5]; + t += (dpint)a[5] * b[4]; + t += (dpint)a[6] * b[3]; + t += (dpint)v3 * (dpint)p6; + c[2] = ((spint)t & mask); + t >>= 55; + t += (dpint)a[4] * b[6]; + t += (dpint)a[5] * b[5]; + t += (dpint)a[6] * b[4]; + t += (dpint)v4 * (dpint)p6; + c[3] = ((spint)t & mask); + t >>= 55; + t += (dpint)a[5] * b[6]; + t += (dpint)a[6] * b[5]; + t += (dpint)v5 * (dpint)p6; + c[4] = ((spint)t & mask); + t >>= 55; + t += (dpint)a[6] * b[6]; + t += (dpint)v6 * (dpint)p6; + c[5] = ((spint)t & mask); + t >>= 55; + c[6] = (spint)t; +} + +// Modular squaring, c=a*a mod 2p +inline static void modsqr(const spint *a, spint *c) { + udpint tot; + udpint t = 0; + spint p6 = 0x10400000000000u; + spint q = ((spint)1 << 55u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + tot = (udpint)a[0] * a[0]; + t = tot; + spint v0 = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[0] * a[1]; + tot *= 2; + t += tot; + spint v1 = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[0] * a[2]; + tot *= 2; + tot += (udpint)a[1] * a[1]; + t += tot; + spint v2 = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[0] * a[3]; + tot += (udpint)a[1] * a[2]; + tot *= 2; + t += tot; + spint v3 = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[0] * a[4]; + tot += (udpint)a[1] * a[3]; + tot *= 2; + tot += (udpint)a[2] * a[2]; + t += tot; + spint v4 = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[0] * a[5]; + tot += (udpint)a[1] * a[4]; + tot += (udpint)a[2] * a[3]; + tot *= 2; + t += tot; + spint v5 = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[0] * a[6]; + tot += (udpint)a[1] * a[5]; + tot += (udpint)a[2] * a[4]; + tot *= 2; + tot += (udpint)a[3] * a[3]; + t += tot; + t += (udpint)v0 * p6; + spint v6 = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[1] * a[6]; + tot += (udpint)a[2] * a[5]; + tot += (udpint)a[3] * a[4]; + tot *= 2; + t += tot; + t += (udpint)v1 * p6; + c[0] = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[2] * a[6]; + tot += (udpint)a[3] * a[5]; + tot *= 2; + tot += (udpint)a[4] * a[4]; + t += tot; + t += (udpint)v2 * p6; + c[1] = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[3] * a[6]; + tot += (udpint)a[4] * a[5]; + tot *= 2; + t += tot; + t += (udpint)v3 * p6; + c[2] = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[4] * a[6]; + tot *= 2; + tot += (udpint)a[5] * a[5]; + t += tot; + t += (udpint)v4 * p6; + c[3] = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[5] * a[6]; + tot *= 2; + t += tot; + t += (udpint)v5 * p6; + c[4] = ((spint)t & mask); + t >>= 55; + tot = (udpint)a[6] * a[6]; + t += tot; + t += (udpint)v6 * p6; + c[5] = ((spint)t & mask); + t >>= 55; + c[6] = (spint)t; +} + +// copy +inline static void modcpy(const spint *a, spint *c) { + int i; + for (i = 0; i < 7; i++) { + c[i] = a[i]; + } +} + +// square n times +static void modnsqr(spint *a, int n) { + int i; + for (i = 0; i < n; i++) { + modsqr(a, a); + } +} + +// Calculate progenitor +static void modpro(const spint *w, spint *z) { + spint x[7]; + spint t0[7]; + spint t1[7]; + spint t2[7]; + spint t3[7]; + spint t4[7]; + spint t5[7]; + modcpy(w, x); + modsqr(x, z); + modsqr(z, t0); + modmul(x, t0, t1); + modmul(z, t1, z); + modsqr(z, t0); + modsqr(t0, t3); + modsqr(t3, t4); + modsqr(t4, t2); + modcpy(t2, t5); + modnsqr(t5, 3); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 6); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 2); + modmul(t4, t5, t5); + modnsqr(t5, 13); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 2); + modmul(t4, t5, t4); + modnsqr(t4, 28); + modmul(t2, t4, t2); + modsqr(t2, t4); + modmul(t3, t4, t3); + modnsqr(t3, 59); + modmul(t2, t3, t2); + modmul(t1, t2, t1); + modmul(z, t1, z); + modmul(t0, z, t0); + modmul(t1, t0, t1); + modsqr(t1, t2); + modmul(t1, t2, t2); + modsqr(t2, t2); + modmul(t1, t2, t2); + modmul(t0, t2, t0); + modmul(z, t0, z); + modsqr(z, t2); + modmul(z, t2, t2); + modmul(t0, t2, t0); + modmul(t1, t0, t1); + modcpy(t1, t2); + modnsqr(t2, 128); + modmul(t1, t2, t1); + modmul(t0, t1, t0); + modnsqr(t0, 125); + modmul(z, t0, z); +} + +// calculate inverse, provide progenitor h if available +static void modinv(const spint *x, const spint *h, spint *z) { + spint s[7]; + spint t[7]; + if (h == NULL) { + modpro(x, t); + } else { + modcpy(h, t); + } + modcpy(x, s); + modnsqr(t, 2); + modmul(s, t, z); +} + +// Convert m to n-residue form, n=nres(m) +static void nres(const spint *m, spint *n) { + const spint c[7] = {0xfc0fc0fc0fc4du, 0x781f81f81f81f8u, 0x3f03f03f03f03u, + 0x7e07e07e07e07eu, 0x40fc0fc0fc0fc0u, 0x1f81f81f81f81fu, + 0xcff03f03f03f0u}; + modmul(m, c, n); +} + +// Convert n back to normal form, m=redc(n) +static void redc(const spint *n, spint *m) { + int i; + spint c[7]; + c[0] = 1; + for (i = 1; i < 7; i++) { + c[i] = 0; + } + modmul(n, c, m); + (void)modfsb(m); +} + +// is unity? +static int modis1(const spint *a) { + int i; + spint c[7]; + spint c0; + spint d = 0; + redc(a, c); + for (i = 1; i < 7; i++) { + d |= c[i]; + } + c0 = (spint)c[0]; + return ((spint)1 & ((d - (spint)1) >> 55u) & + (((c0 ^ (spint)1) - (spint)1) >> 55u)); +} + +// is zero? +static int modis0(const spint *a) { + int i; + spint c[7]; + spint d = 0; + redc(a, c); + for (i = 0; i < 7; i++) { + d |= c[i]; + } + return ((spint)1 & ((d - (spint)1) >> 55u)); +} + +// set to zero +static void modzer(spint *a) { + int i; + for (i = 0; i < 7; i++) { + a[i] = 0; + } +} + +// set to one +static void modone(spint *a) { + int i; + a[0] = 1; + for (i = 1; i < 7; i++) { + a[i] = 0; + } + nres(a, a); +} + +// set to integer +static void modint(int x, spint *a) { + int i; + a[0] = (spint)x; + for (i = 1; i < 7; i++) { + a[i] = 0; + } + nres(a, a); +} + +// Modular multiplication by an integer, c=a*b mod 2p +inline static void modmli(const spint *a, int b, spint *c) { + spint t[7]; + modint(b, t); + modmul(a, t, c); +} + +// Test for quadratic residue +static int modqr(const spint *h, const spint *x) { + spint r[7]; + if (h == NULL) { + modpro(x, r); + modsqr(r, r); + } else { + modsqr(h, r); + } + modmul(r, x, r); + return modis1(r) | modis0(x); +} + +// conditional move g to f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcmv(int b, const spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t; + spint r = 0x3cc3c33c5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 7; i++) { + s = g[i]; + t = f[i]; + f[i] = c0 * t + c1 * s; + f[i] -= r * (t + s); + } +} + +// conditional swap g and f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcsw(int b, volatile spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t, w; + spint r = 0x3cc3c33c5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 7; i++) { + s = g[i]; + t = f[i]; + w = r * (t + s); + f[i] = c0 * t + c1 * s; + f[i] -= w; + g[i] = c0 * s + c1 * t; + g[i] -= w; + } +} + +// Modular square root, provide progenitor h if available, NULL if not +static void modsqrt(const spint *x, const spint *h, spint *r) { + spint s[7]; + spint y[7]; + if (h == NULL) { + modpro(x, y); + } else { + modcpy(h, y); + } + modmul(y, x, s); + modcpy(s, r); +} + +// shift left by less than a word +static void modshl(unsigned int n, spint *a) { + int i; + a[6] = ((a[6] << n)) | (a[5] >> (55u - n)); + for (i = 5; i > 0; i--) { + a[i] = ((a[i] << n) & (spint)0x7fffffffffffff) | (a[i - 1] >> (55u - n)); + } + a[0] = (a[0] << n) & (spint)0x7fffffffffffff; +} + +// shift right by less than a word. Return shifted out part +static int modshr(unsigned int n, spint *a) { + int i; + spint r = a[0] & (((spint)1 << n) - (spint)1); + for (i = 0; i < 6; i++) { + a[i] = (a[i] >> n) | ((a[i + 1] << (55u - n)) & (spint)0x7fffffffffffff); + } + a[6] = a[6] >> n; + return r; +} + +// set a= 2^r +static void mod2r(unsigned int r, spint *a) { + unsigned int n = r / 55u; + unsigned int m = r % 55u; + modzer(a); + if (r >= 48 * 8) + return; + a[n] = 1; + a[n] <<= m; + nres(a, a); +} + +// export to byte array +static void modexp(const spint *a, char *b) { + int i; + spint c[7]; + redc(a, c); + for (i = 47; i >= 0; i--) { + b[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +// import from byte array +// returns 1 if in range, else 0 +static int modimp(const char *b, spint *a) { + int i, res; + for (i = 0; i < 7; i++) { + a[i] = 0; + } + for (i = 0; i < 48; i++) { + modshl(8, a); + a[0] += (spint)(unsigned char)b[i]; + } + res = modfsb(a); + nres(a, a); + return res; +} + +// determine sign +static int modsign(const spint *a) { + spint c[7]; + redc(a, c); + return c[0] % 2; +} + +// return true if equal +static int modcmp(const spint *a, const spint *b) { + spint c[7], d[7]; + int i, eq = 1; + redc(a, c); + redc(b, d); + for (i = 0; i < 7; i++) { + eq &= (((c[i] ^ d[i]) - 1) >> 55) & 1; + } + return eq; +} + +// clang-format on +/****************************************************************************** + API functions calling generated code above + ******************************************************************************/ + +#include + +const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +const digit_t ONE[NWORDS_FIELD] = { 0x0000000000000007, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x000e400000000000 }; +// Montgomery representation of 2^-1 +static const digit_t TWO_INV[NWORDS_FIELD] = { 0x0000000000000003, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x000f400000000000 }; +// Montgomery representation of 3^-1 +static const digit_t THREE_INV[NWORDS_FIELD] = { 0x0055555555555557, 0x002aaaaaaaaaaaaa, 0x0055555555555555, + 0x002aaaaaaaaaaaaa, 0x0055555555555555, 0x002aaaaaaaaaaaaa, + 0x000f955555555555 }; +// Montgomery representation of 2^384 +static const digit_t R2[NWORDS_FIELD] = { 0x0007e07e07e07e26, 0x007c0fc0fc0fc0fc, 0x0001f81f81f81f81, + 0x003f03f03f03f03f, 0x00607e07e07e07e0, 0x000fc0fc0fc0fc0f, + 0x000e9f81f81f81f8 }; + +void +fp_set_small(fp_t *x, const digit_t val) +{ + modint((int)val, *x); +} + +void +fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) +{ + modmli(*a, (int)val, *x); +} + +void +fp_set_zero(fp_t *x) +{ + modzer(*x); +} + +void +fp_set_one(fp_t *x) +{ + modone(*x); +} + +uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return -(uint32_t)modcmp(*a, *b); +} + +uint32_t +fp_is_zero(const fp_t *a) +{ + return -(uint32_t)modis0(*a); +} + +void +fp_copy(fp_t *out, const fp_t *a) +{ + modcpy(*a, *out); +} + +void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + modcsw((int)(ctl & 0x1), *a, *b); +} + +void +fp_add(fp_t *out, const fp_t *a, const fp_t *b) +{ + modadd(*a, *b, *out); +} + +void +fp_sub(fp_t *out, const fp_t *a, const fp_t *b) +{ + modsub(*a, *b, *out); +} + +void +fp_neg(fp_t *out, const fp_t *a) +{ + modneg(*a, *out); +} + +void +fp_sqr(fp_t *out, const fp_t *a) +{ + modsqr(*a, *out); +} + +void +fp_mul(fp_t *out, const fp_t *a, const fp_t *b) +{ + modmul(*a, *b, *out); +} + +void +fp_inv(fp_t *x) +{ + modinv(*x, NULL, *x); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + return -(uint32_t)modqr(NULL, *a); +} + +void +fp_sqrt(fp_t *a) +{ + modsqrt(*a, NULL, *a); +} + +void +fp_half(fp_t *out, const fp_t *a) +{ + modmul(TWO_INV, *a, *out); +} + +void +fp_exp3div4(fp_t *out, const fp_t *a) +{ + modpro(*a, *out); +} + +void +fp_div3(fp_t *out, const fp_t *a) +{ + modmul(THREE_INV, *a, *out); +} + +void +fp_encode(void *dst, const fp_t *a) +{ + // Modified version of modexp() + int i; + spint c[7]; + redc(*a, c); + for (i = 0; i < 48; i++) { + ((char *)dst)[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +uint32_t +fp_decode(fp_t *d, const void *src) +{ + // Modified version of modimp() + int i; + spint res; + const unsigned char *b = src; + for (i = 0; i < 7; i++) { + (*d)[i] = 0; + } + for (i = 47; i >= 0; i--) { + modshl(8, *d); + (*d)[0] += (spint)b[i]; + } + res = (spint)-modfsb(*d); + nres(*d, *d); + // If the value was canonical then res = -1; otherwise, res = 0 + for (i = 0; i < 7; i++) { + (*d)[i] &= res; + } + return (uint32_t)res; +} + +static inline unsigned char +add_carry(unsigned char cc, spint a, spint b, spint *d) +{ + udpint t = (udpint)a + (udpint)b + cc; + *d = (spint)t; + return (unsigned char)(t >> Wordlength); +} + +static void +partial_reduce(spint *out, const spint *src) +{ + spint h, l, quo, rem; + unsigned char cc; + + // Split value in high (8 bits) and low (376 bits) parts. + h = src[5] >> 56; + l = src[5] & 0x00FFFFFFFFFFFFFF; + + // 65*2^376 = 1 mod q; hence, we add floor(h/65) + (h mod 65)*2^376 + // to the low part. + quo = (h * 0xFC1) >> 18; + rem = h - (65 * quo); + cc = add_carry(0, src[0], quo, &out[0]); + cc = add_carry(cc, src[1], 0, &out[1]); + cc = add_carry(cc, src[2], 0, &out[2]); + cc = add_carry(cc, src[3], 0, &out[3]); + cc = add_carry(cc, src[4], 0, &out[4]); + (void)add_carry(cc, l, rem << 56, &out[5]); +} + +// Little-endian encoding of a 64-bit integer. +static inline void +enc64le(void *dst, uint64_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); + buf[4] = (uint8_t)(x >> 32); + buf[5] = (uint8_t)(x >> 40); + buf[6] = (uint8_t)(x >> 48); + buf[7] = (uint8_t)(x >> 56); +} + +// Little-endian decoding of a 64-bit integer. +static inline uint64_t +dec64le(const void *src) +{ + const uint8_t *buf = src; + return (spint)buf[0] | ((spint)buf[1] << 8) | ((spint)buf[2] << 16) | ((spint)buf[3] << 24) | + ((spint)buf[4] << 32) | ((spint)buf[5] << 40) | ((spint)buf[6] << 48) | ((spint)buf[7] << 56); +} + +void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + uint64_t t[6]; // Stores Nbytes * 8 bits + uint8_t tmp[48]; // Nbytes + const uint8_t *b = src; + + fp_set_zero(d); + if (len == 0) { + return; + } + + size_t rem = len % 48; + if (rem != 0) { + // Input size is not a multiple of 48, we decode a partial + // block, which is already less than 2^376. + size_t k = len - rem; + memcpy(tmp, b + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + fp_decode(d, tmp); + len = k; + } + // Process all remaining blocks, in descending address order. + while (len > 0) { + fp_mul(d, d, &R2); + len -= 48; + t[0] = dec64le(b + len); + t[1] = dec64le(b + len + 8); + t[2] = dec64le(b + len + 16); + t[3] = dec64le(b + len + 24); + t[4] = dec64le(b + len + 32); + t[5] = dec64le(b + len + 40); + partial_reduce(t, t); + enc64le(tmp, t[0]); + enc64le(tmp + 8, t[1]); + enc64le(tmp + 16, t[2]); + enc64le(tmp + 24, t[3]); + enc64le(tmp + 32, t[4]); + enc64le(tmp + 40, t[5]); + fp_t a; + fp_decode(&a, tmp); + fp_add(d, d, &a); + } +} + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.c new file mode 100644 index 0000000000..0424108019 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.c @@ -0,0 +1,93 @@ +#include +#include + +void +double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2) +{ + ec_dbl(&out->P1, &in->P1, &E1E2->E1); + ec_dbl(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + memmove(out, in, sizeof(theta_couple_point_t)); + } else { + double_couple_point(out, in, E1E2); + for (unsigned i = 0; i < n - 1; i++) { + double_couple_point(out, out, E1E2); + } + } +} + +void +add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2) +{ + ADD(&out->P1, &T1->P1, &T2->P1, &E1E2->E1); + ADD(&out->P2, &T1->P2, &T2->P2, &E1E2->E2); +} + +void +double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + DBL(&out->P1, &in->P1, &E1E2->E1); + DBL(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + *out = *in; + } else if (n == 1) { + double_couple_jac_point(out, in, E1E2); + } else { + fp2_t a1, a2, t1, t2; + + jac_to_ws(&out->P1, &t1, &a1, &in->P1, &E1E2->E1); + jac_to_ws(&out->P2, &t2, &a2, &in->P2, &E1E2->E2); + + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + for (unsigned i = 0; i < n - 1; i++) { + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + } + + jac_from_ws(&out->P1, &out->P1, &a1, &E1E2->E1); + jac_from_ws(&out->P2, &out->P2, &a2, &E1E2->E2); + } +} + +void +couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP) +{ + jac_to_xz(&P->P1, &xyP->P1); + jac_to_xz(&P->P2, &xyP->P2); +} + +void +copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2) +{ + // Copy the basis on E1 to (P, _) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P1, &B1->P); + copy_point(&ker->T2.P1, &B1->Q); + copy_point(&ker->T1m2.P1, &B1->PmQ); + + // Copy the basis on E2 to (_, P) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P2, &B2->P); + copy_point(&ker->T2.P2, &B2->Q); + copy_point(&ker->T1m2.P2, &B2->PmQ); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.h new file mode 100644 index 0000000000..2b16e23834 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.h @@ -0,0 +1,435 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The HD-isogenies algorithm required by the signature + * + */ + +#ifndef HD_H +#define HD_H + +#include +#include +#include + +/** @defgroup hd_module Abelian surfaces and their isogenies + * @{ + */ + +#define HD_extra_torsion 2 + +/** @defgroup hd_struct Data structures for dimension 2 + * @{ + */ + +/** @brief Type for couple point with XZ coordinates + * @typedef theta_couple_point_t + * + * @struct theta_couple_point + * + * Structure for the couple point on an elliptic product + * using XZ coordinates + */ +typedef struct theta_couple_point +{ + ec_point_t P1; + ec_point_t P2; +} theta_couple_point_t; + +/** @brief Type for three couple points T1, T2, T1-T2 with XZ coordinates + * @typedef theta_kernel_couple_points_t + * + * @struct theta_kernel_couple_points + * + * Structure for a triple of theta couple points T1, T2 and T1 - T2 + */ +typedef struct theta_kernel_couple_points +{ + theta_couple_point_t T1; + theta_couple_point_t T2; + theta_couple_point_t T1m2; +} theta_kernel_couple_points_t; + +/** @brief Type for couple point with XYZ coordinates + * @typedef theta_couple_jac_point_t + * + * @struct theta_couple_jac_point + * + * Structure for the couple point on an elliptic product + * using XYZ coordinates + */ +typedef struct theta_couple_jac_point +{ + jac_point_t P1; + jac_point_t P2; +} theta_couple_jac_point_t; + +/** @brief Type for couple curve * + * @typedef theta_couple_curve_t + * + * @struct theta_couple_curve + * + * the theta_couple_curve structure + */ +typedef struct theta_couple_curve +{ + ec_curve_t E1; + ec_curve_t E2; +} theta_couple_curve_t; + +/** @brief Type for a product E1 x E2 with corresponding bases + * @typedef theta_couple_curve_with_basis_t + * + * @struct theta_couple_curve_with_basis + * + * tType for a product E1 x E2 with corresponding bases Ei[2^n] + */ +typedef struct theta_couple_curve_with_basis +{ + ec_curve_t E1; + ec_curve_t E2; + ec_basis_t B1; + ec_basis_t B2; +} theta_couple_curve_with_basis_t; + +/** @brief Type for theta point * + * @typedef theta_point_t + * + * @struct theta_point + * + * the theta_point structure used + */ +typedef struct theta_point +{ + fp2_t x; + fp2_t y; + fp2_t z; + fp2_t t; +} theta_point_t; + +/** @brief Type for theta point with repeating components + * @typedef theta_point_compact_t + * + * @struct theta_point_compact + * + * the theta_point structure used for points with repeated components + */ +typedef struct theta_point_compact +{ + fp2_t x; + fp2_t y; +} theta_point_compact_t; + +/** @brief Type for theta structure * + * @typedef theta_structure_t + * + * @struct theta_structure + * + * the theta_structure structure used + */ +typedef struct theta_structure +{ + theta_point_t null_point; + bool precomputation; + + // Eight precomputed values used for doubling and + // (2,2)-isogenies. + fp2_t XYZ0; + fp2_t YZT0; + fp2_t XZT0; + fp2_t XYT0; + + fp2_t xyz0; + fp2_t yzt0; + fp2_t xzt0; + fp2_t xyt0; +} theta_structure_t; + +/** @brief A 2x2 matrix used for action by translation + * @typedef translation_matrix_t + * + * @struct translation_matrix + * + * Structure to hold 4 fp2_t elements representing a 2x2 matrix used when computing + * a compatible theta structure during gluing. + */ +typedef struct translation_matrix +{ + fp2_t g00; + fp2_t g01; + fp2_t g10; + fp2_t g11; +} translation_matrix_t; + +/** @brief A 4x4 matrix used for basis changes + * @typedef basis_change_matrix_t + * + * @struct basis_change_matrix + * + * Structure to hold 16 elements representing a 4x4 matrix used for changing + * the basis of a theta point. + */ +typedef struct basis_change_matrix +{ + fp2_t m[4][4]; +} basis_change_matrix_t; + +/** @brief Type for gluing (2,2) theta isogeny * + * @typedef theta_gluing_t + * + * @struct theta_gluing + * + * the theta_gluing structure + */ +typedef struct theta_gluing +{ + + theta_couple_curve_t domain; + theta_couple_jac_point_t xyK1_8; + theta_point_compact_t imageK1_8; + basis_change_matrix_t M; + theta_point_t precomputation; + theta_point_t codomain; + +} theta_gluing_t; + +/** @brief Type for standard (2,2) theta isogeny * + * @typedef theta_isogeny_t + * + * @struct theta_isogeny + * + * the theta_isogeny structure + */ +typedef struct theta_isogeny +{ + theta_point_t T1_8; + theta_point_t T2_8; + bool hadamard_bool_1; + bool hadamard_bool_2; + theta_structure_t domain; + theta_point_t precomputation; + theta_structure_t codomain; +} theta_isogeny_t; + +/** @brief Type for splitting isomorphism * + * @typedef theta_splitting_t + * + * @struct theta_splitting + * + * the theta_splitting structure + */ +typedef struct theta_splitting +{ + basis_change_matrix_t M; + theta_structure_t B; + +} theta_splitting_t; + +// end of hd_struct +/** + * @} + */ + +/** @defgroup hd_functions Functions for dimension 2 + * @{ + */ + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param n : the number of iteration + * @param E1E2 an elliptic product + * @param in the theta couple point in the elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the addition of two points in (X : Y : Z) coordinates on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param T1 the theta couple jac point in the elliptic product + * @param T2 the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1, P2), (Q1, Q2) + * out = (P1 + Q1, P2 + Q2) + * + **/ +void add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple jac point in on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param n : the number of iteration + * @param in the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief A forgetful function which returns (X : Z) points given a pair of (X : Y : Z) points + * + * @param P Output: the theta_couple_point + * @param xyP : the theta_couple_jac_point + **/ +void couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it does extra isotropy + * checks on the kernel. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it selects a random Montgomery + * model of the codomain. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success, 0 on failure + * + */ +int theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Given a bases B1 on E1 and B2 on E2 copies this to create a kernel + * on E1 x E2 as couple points T1, T2 and T1 - T2 + * + * @param ker Output: a kernel for dim_two_isogenies (T1, T2, T1-T2) + * @param B1 Input basis on E1 + * @param B2 Input basis on E2 + **/ +void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2); + +/** + * @brief Given a couple of points (P1, P2) on a couple of curves (E1, E2) + * this function tests if both points are of order exactly 2^t + * + * @param T: couple point (P1, P2) + * @param E: a couple of curves (E1, E2) + * @param t: an integer + * @returns 0xFFFFFFFF on success, 0 on failure + */ +static int +test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) +{ + int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); + int check_P2 = test_point_order_twof(&T->P2, &E->E2, t); + + return check_P1 & check_P2; +} + +// end of hd_functions +/** + * @} + */ +// end of hd_module +/** + * @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c new file mode 100644 index 0000000000..d980d12183 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c @@ -0,0 +1,143 @@ +#include + +#define FP2_ZERO 0 +#define FP2_ONE 1 +#define FP2_I 2 +#define FP2_MINUS_ONE 3 +#define FP2_MINUS_I 4 + +const int EVEN_INDEX[10][2] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 0}, {1, 2}, {2, 0}, {2, 1}, {3, 0}, {3, 3}}; +const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}; +const fp2_t FP2_CONSTANTS[5] = {{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +#elif RADIX == 32 +{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +#else +{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e} +#elif RADIX == 32 +{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +#else +{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e} +#elif RADIX == 32 +{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +#else +{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff} +#endif +#endif +}}; +const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10] = {{{{FP2_ONE, FP2_I, FP2_ONE, FP2_I}, {FP2_ONE, FP2_MINUS_I, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_MINUS_ONE, FP2_MINUS_I}, {FP2_MINUS_ONE, FP2_I, FP2_MINUS_ONE, FP2_I}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}}; +const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6] = {{{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}, {{{FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.h new file mode 100644 index 0000000000..b3147a42a9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.h @@ -0,0 +1,18 @@ +#ifndef HD_SPLITTING_H +#define HD_SPLITTING_H + +#include +#include + +typedef struct precomp_basis_change_matrix { + uint8_t m[4][4]; +} precomp_basis_change_matrix_t; + +extern const int EVEN_INDEX[10][2]; +extern const int CHI_EVAL[4][4]; +extern const fp2_t FP2_CONSTANTS[5]; +extern const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10]; +extern const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6]; + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c new file mode 100644 index 0000000000..1fb4c0f139 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c @@ -0,0 +1,210 @@ +#include "hnf_internal.h" +#include "internal.h" + +// HNF test function +int +ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) +{ + int res = 1; + int found; + int ind = 0; + ibz_t zero; + ibz_init(&zero); + // upper triangular + for (int i = 0; i < 4; i++) { + // upper triangular + for (int j = 0; j < i; j++) { + res = res && ibz_is_zero(&((*mat)[i][j])); + } + // find first non 0 element of line + found = 0; + for (int j = i; j < 4; j++) { + if (found) { + // all values are positive, and first non-0 is the largest of that line + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + } else { + if (!ibz_is_zero(&((*mat)[i][j]))) { + found = 1; + ind = j; + // mustbe non-negative + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + } + } + } + } + // check that first nom-zero elements ndex per column is strictly increasing + int linestart = -1; + int i = 0; + for (int j = 0; j < 4; j++) { + while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + i = i + 1; + } + if (i != 4) { + res = res && (linestart < i); + } + i = 0; + } + ibz_finalize(&zero); + return res; +} + +// Untested HNF helpers +// centered mod +void +ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b, + const ibz_t *mod) +{ + ibz_t prod, m; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_finalize(&m); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m; + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + } + ibz_finalize(&m); +} + +// no need to center this, and not 0 +void +ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m, s; + ibz_init(&m); + ibz_init(&s); + ibz_copy(&s, scalar); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); + ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + } + ibz_finalize(&m); + ibz_finalize(&s); +} + +// Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic +// Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 +// assumes ibz_xgcd outputs u,v which are small in absolute value (as described in the +// book) +void +ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec_4_t *generators, const ibz_t *mod) +{ + int i = 3; + assert(generator_number > 3); + int n = generator_number; + int j = n - 1; + int k = n - 1; + ibz_t b, u, v, d, q, m, coeff_1, coeff_2, r; + ibz_vec_4_t c; + ibz_vec_4_t a[generator_number]; + ibz_vec_4_t w[4]; + ibz_init(&b); + ibz_init(&d); + ibz_init(&u); + ibz_init(&v); + ibz_init(&r); + ibz_init(&m); + ibz_init(&q); + ibz_init(&coeff_1); + ibz_init(&coeff_2); + ibz_vec_4_init(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_init(&(w[h])); + ibz_vec_4_init(&(a[h])); + ibz_copy(&(a[h][0]), &(generators[h][0])); + ibz_copy(&(a[h][1]), &(generators[h][1])); + ibz_copy(&(a[h][2]), &(generators[h][2])); + ibz_copy(&(a[h][3]), &(generators[h][3])); + } + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_copy(&m, mod); + while (i != -1) { + while (j != 0) { + j = j - 1; + if (!ibz_is_zero(&(a[j][i]))) { + // assumtion that ibz_xgcd outputs u,v which are small in absolute + // value is needed here also, needs u non 0, but v can be 0 if needed + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); + ibz_div(&coeff_1, &r, &(a[k][i]), &d); + ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_neg(&coeff_2, &coeff_2); + ibz_vec_4_linear_combination_mod( + &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m + ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy + } + } + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult + if (ibz_is_zero(&(w[i][i]))) { + ibz_copy(&(w[i][i]), &m); + } + for (int h = i + 1; h < 4; h++) { + ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_neg(&q, &q); + ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); + } + ibz_div(&m, &r, &m, &d); + assert(ibz_is_zero(&r)); + if (i != 0) { + k = k - 1; + i = i - 1; + j = k; + if (ibz_is_zero(&(a[k][i]))) + ibz_copy(&(a[k][i]), &m); + + } else { + k = k - 1; + i = i - 1; + j = k; + } + } + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + } + } + + ibz_finalize(&b); + ibz_finalize(&d); + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&coeff_1); + ibz_finalize(&coeff_2); + ibz_finalize(&m); + ibz_vec_4_finalize(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_finalize(&(w[h])); + ibz_vec_4_finalize(&(a[h])); + } +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.c new file mode 100644 index 0000000000..b2db5b54c9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.c @@ -0,0 +1,182 @@ +#include "hnf_internal.h" +#include "internal.h" + +// Small helper for integers +void +ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod) +{ + ibz_t m, t; + ibz_init(&m); + ibz_init(&t); + ibz_mod(&m, x, mod); + ibz_set(&t, ibz_is_zero(&m)); + ibz_mul(&t, &t, mod); + ibz_add(res, &m, &t); + ibz_finalize(&m); + ibz_finalize(&t); +} + +// centered and rather positive then negative +void +ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod) +{ + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_t tmp, d, t; + ibz_init(&tmp); + ibz_init(&d); + ibz_init(&t); + ibz_div_floor(&d, &tmp, mod, &ibz_const_two); + ibz_mod_not_zero(&tmp, a, mod); + ibz_set(&t, ibz_cmp(&tmp, &d) > 0); + ibz_mul(&t, &t, mod); + ibz_sub(remainder, &tmp, &t); + ibz_finalize(&tmp); + ibz_finalize(&d); + ibz_finalize(&t); +} + +// if c, res = x, else res = y +void +ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c) +{ + ibz_t s, t, r; + ibz_init(&r); + ibz_init(&s); + ibz_init(&t); + ibz_set(&s, c != 0); + ibz_sub(&t, &ibz_const_one, &s); + ibz_mul(&r, &s, x); + ibz_mul(res, &t, y); + ibz_add(res, &r, res); + ibz_finalize(&r); + ibz_finalize(&s); + ibz_finalize(&t); +} + +// mpz_gcdext specification specifies unique outputs used here +void +ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const ibz_t *y) +{ + if (ibz_is_zero(x) & ibz_is_zero(y)) { + ibz_set(d, 1); + ibz_set(u, 1); + ibz_set(v, 0); + return; + } + ibz_t q, r, x1, y1; + ibz_init(&q); + ibz_init(&r); + ibz_init(&x1); + ibz_init(&y1); + ibz_copy(&x1, x); + ibz_copy(&y1, y); + + // xgcd + ibz_xgcd(d, u, v, &x1, &y1); + + // make sure u!=0 (v can be 0 if needed) + // following GMP specification, u == 0 implies y|x + if (ibz_is_zero(u)) { + if (!ibz_is_zero(&x1)) { + if (ibz_is_zero(&y1)) { + ibz_set(&y1, 1); + } + ibz_div(&q, &r, &x1, &y1); + assert(ibz_is_zero(&r)); + ibz_sub(v, v, &q); + } + ibz_set(u, 1); + } + if (!ibz_is_zero(&x1)) { + // Make sure ux > 0 (and as small as possible) + assert(ibz_cmp(d, &ibz_const_zero) > 0); + ibz_mul(&r, &x1, &y1); + int neg = ibz_cmp(&r, &ibz_const_zero) < 0; + ibz_mul(&q, &x1, u); + while (ibz_cmp(&q, &ibz_const_zero) <= 0) { + ibz_div(&q, &r, &y1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_add(u, u, &q); + ibz_div(&q, &r, &x1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_sub(v, v, &q); + + ibz_mul(&q, &x1, u); + } + } + +#ifndef NDEBUG + int res = 0; + ibz_t sum, prod, test, cmp; + ibz_init(&sum); + ibz_init(&prod); + ibz_init(&cmp); + ibz_init(&test); + // sign correct + res = res | !(ibz_cmp(d, &ibz_const_zero) >= 0); + if (ibz_is_zero(&x1) && ibz_is_zero(&y1)) { + res = res | !(ibz_is_zero(v) && ibz_is_one(u) && ibz_is_one(d)); + } else { + if (!ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &x1, u); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) > 0); + ibz_mul(&sum, &sum, &y1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) <= 0); + + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &y1, v); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) <= 0); + ibz_mul(&sum, &sum, &x1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) < 0); + } else { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + if (ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + ibz_abs(&prod, v); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_one(u)); + } else { + ibz_abs(&prod, u); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_zero(v)); + } + } + + // Bezout coeffs + ibz_mul(&sum, &x1, u); + ibz_mul(&prod, &y1, v); + ibz_add(&sum, &sum, &prod); + res = res | !(ibz_cmp(&sum, d) == 0); + } + assert(!res); + ibz_finalize(&sum); + ibz_finalize(&prod); + ibz_finalize(&cmp); + ibz_finalize(&test); + +#endif + + ibz_finalize(&x1); + ibz_finalize(&y1); + ibz_finalize(&q); + ibz_finalize(&r); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.h new file mode 100644 index 0000000000..5ecc871bb4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.h @@ -0,0 +1,94 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for functions internal to the HNF computation and its tests + */ + +#ifndef QUAT_HNF_HELPERS_H +#define QUAT_HNF_HELPERS_H + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup quat_hnf_helpers Internal functions for the HNF computation and tests + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_helpers_ibz Internal renamed GMP functions for the HNF computation + */ + +/** + * @brief GCD and Bézout coefficients u, v such that ua + bv = gcd + * + * @param gcd Output: Set to the gcd of a and b + * @param u Output: integer such that ua+bv=gcd + * @param v Output: Integer such that ua+bv=gcd + * @param a + * @param b + */ +void ibz_xgcd(ibz_t *gcd, + ibz_t *u, + ibz_t *v, + const ibz_t *a, + const ibz_t *b); // integers, dim4, test/integers, test/dim4 + +/** @} + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_integer_helpers Integer functions internal to the HNF computation and tests + * @{ + */ + +/** @brief x mod mod, with x in [1,mod] + * + * @param res Output: res = x [mod] and 0 0 + */ +void ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod); + +/** @brief x mod mod, with x in ]-mod/2,mod/2] + * + * Centered and rather positive then negative. + * + * @param remainder Output: remainder = x [mod] and -mod/2 0 + */ +void ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod); + +/** @brief if c then x else y + * + * @param res Output: if c, res = x, else res = y + * @param x + * @param y + * @param c condition: must be 0 or 1 + */ +void ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c); + +/** @brief d = gcd(x,y)>0 and d = ux+vy and u!= 0 and d>0 and u, v of small absolute value, u not 0 + * + * More precisely: + * If x and y are both non 0, -|xy|/d +#else +#include +#endif + +void +ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) +{ + mpz_gcdext(*gcd, *u, *v, *a, *b); +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.c new file mode 100644 index 0000000000..0743974345 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.c @@ -0,0 +1,338 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Scalar multiplication [x]P + [y]Q where x and y are stored +// inside an ibz_vec_2_t [x, y] and P, Q \in E[2^f] +void +ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + digit_t scalars[2][NWORDS_ORDER]; + ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); + ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); +} + +// Given an ideal, computes the scalars s0, s1 which determine the kernel generator +// of the equivalent isogeny +void +id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lideal) +{ + ibz_t tmp; + ibz_init(&tmp); + + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + // construct the matrix of the dual of alpha on the 2^f-torsion + { + quat_alg_elem_t alpha; + quat_alg_elem_init(&alpha); + + int lideal_generator_ok UNUSED = quat_lideal_generator(&alpha, lideal, &QUATALG_PINFTY); + assert(lideal_generator_ok); + quat_alg_conj(&alpha, &alpha); + + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + quat_change_to_O0_basis(&coeffs, &alpha); + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + } + } + + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&alpha); + } + + // find the kernel of alpha modulo the norm of the ideal + { + const ibz_t *const norm = &lideal->norm; + + ibz_mod(&(*vec)[0], &mat[0][0], norm); + ibz_mod(&(*vec)[1], &mat[1][0], norm); + ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + if (ibz_is_even(&tmp)) { + ibz_mod(&(*vec)[0], &mat[0][1], norm); + ibz_mod(&(*vec)[1], &mat[1][1], norm); + } +#ifndef NDEBUG + ibz_gcd(&tmp, &(*vec)[0], norm); + ibz_gcd(&tmp, &(*vec)[1], &tmp); + assert(!ibz_cmp(&tmp, &ibz_const_one)); +#endif + } + + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&tmp); +} + +// helper function to apply a matrix to a basis of E[2^f] +// works in place +int +matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f) +{ + digit_t scalars[2][NWORDS_ORDER] = { 0 }; + int ret; + + ibz_t tmp, pow_two; + ibz_init(&tmp); + ibz_init(&pow_two); + ibz_pow(&pow_two, &ibz_const_two, f); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // reduction mod 2f + ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); + ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); + ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); + ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][0]); + ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); + + // second basis element S = [c]P + [d]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][1]); + ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); + + // Their difference R - S = [a - c]P + [b - d]Q + ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[0], &tmp); + ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[1], &tmp); + ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); + + ibz_finalize(&tmp); + ibz_finalize(&pow_two); + + return ret; +} + +// helper function to apply some endomorphism of E0 on the precomputed basis of E[2^f] +// works in place +void +endomorphism_application_even_basis(ec_basis_t *bas, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_t content; + ibz_init(&content); + + // decomposing theta on the basis + quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); + assert(ibz_is_odd(&content)); + + ibz_set(&mat[0][0], 0); + ibz_set(&mat[0][1], 0); + ibz_set(&mat[1][0], 0); + ibz_set(&mat[1][1], 0); + + // computing the matrix + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&mat[i][j], &mat[i][j], &content); + } + } + + // and now we apply it + matrix_application_even_basis(bas, E, &mat, f); + + ibz_vec_4_finalize(&coeffs); + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&content); + + ibz_finalize(&tmp); +} + +// compute the ideal whose kernel is generated by vec2[0]*BO[0] + vec2[1]*B0[1] where B0 is the +// canonical basis of E0 +void +id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f) +{ + + // algorithm: apply endomorphisms 1 and j+(1+k)/2 to the kernel point, + // the result should form a basis of the respective torsion subgroup. + // then apply i to the kernel point and decompose over said basis. + // hence we have an equation a*P + b*[j+(1+k)/2]P == [i]P, which will + // easily reveal an endomorphism that kills P. + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + if (f == TORSION_EVEN_POWER) { + ibz_copy(&two_pow, &TORSION_PLUS_2POWER); + } else { + ibz_pow(&two_pow, &ibz_const_two, f); + } + + { + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_copy(&mat[0][0], &(*vec2)[0]); + ibz_copy(&mat[1][0], &(*vec2)[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); + ibz_copy(&mat[0][1], &vec[0]); + ibz_copy(&mat[1][1], &vec[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); + ibz_add(&mat[0][1], &mat[0][1], &vec[0]); + ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + + ibz_mod(&mat[0][1], &mat[0][1], &two_pow); + ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + + ibz_mat_2x2_t inv; + ibz_mat_2x2_init(&inv); + { + int inv_ok UNUSED = ibz_mat_2x2_inv_mod(&inv, &mat, &two_pow); + assert(inv_ok); + } + ibz_mat_2x2_finalize(&mat); + + ibz_mat_2x2_eval(&vec, &ACTION_I, vec2); + ibz_mat_2x2_eval(&vec, &inv, &vec); + + ibz_mat_2x2_finalize(&inv); + } + + // final result: a - i + b*(j+(1+k)/2) + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + ibz_set(&gen.denom, 2); + ibz_add(&gen.coord[0], &vec[0], &vec[0]); + ibz_set(&gen.coord[1], -2); + ibz_add(&gen.coord[2], &vec[1], &vec[1]); + ibz_copy(&gen.coord[3], &vec[1]); + ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_vec_2_finalize(&vec); + + quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + assert(0 == ibz_cmp(&lideal->norm, &two_pow)); + + quat_alg_elem_finalize(&gen); + ibz_finalize(&two_pow); +} + +// finds mat such that: +// (mat*v).B2 = v.B1 +// where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q +// mat encodes the coordinates of the points of B1 in the basis B2 +// specifically requires B1 or B2 to be "full" w.r.t to the 2^n torsion, so that we use tate +// full = 0 assumes B2 is "full" so the easier case. +// if we want to switch the role of B2 and B1, we invert the matrix, e.g. set full = 1 +static void +_change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f, + bool invert) +{ + digit_t x1[NWORDS_ORDER] = { 0 }, x2[NWORDS_ORDER] = { 0 }, x3[NWORDS_ORDER] = { 0 }, x4[NWORDS_ORDER] = { 0 }; + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - f; +#endif + + // Ensure the input basis has points of order 2^f + if (invert) { + assert(test_basis_order_twof(B1, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B1, B2, E, f); + mp_invert_matrix(x1, x2, x3, x4, f, NWORDS_ORDER); + } else { + assert(test_basis_order_twof(B2, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B2, B1, E, f); + } + +#ifndef NDEBUG + { + if (invert) { + ec_point_t test, test2; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->P, E); + assert(ec_is_equal(&test, &test2)); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->Q, E); + assert(ec_is_equal(&test, &test2)); + } else { + ec_point_t test; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->P))); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->Q))); + } + } +#endif + + // Copy the results into the matrix + ibz_copy_digit_array(&((*mat)[0][0]), x1); + ibz_copy_digit_array(&((*mat)[1][0]), x2); + ibz_copy_digit_array(&((*mat)[0][1]), x3); + ibz_copy_digit_array(&((*mat)[1][1]), x4); +} + +void +change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, false); +} + +void +change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.h new file mode 100644 index 0000000000..1b4eaae3c5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.h @@ -0,0 +1,280 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The id2iso algorithms + */ + +#ifndef ID2ISO_H +#define ID2ISO_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @defgroup id2iso_id2iso Ideal to isogeny conversion + * @{ + */ +static const quat_represent_integer_params_t QUAT_represent_integer_params = { + .algebra = &QUATALG_PINFTY, /// The level-specific quaternion algebra + .order = &(EXTREMAL_ORDERS[0]), // The special extremal order O0 + .primality_test_iterations = QUAT_primality_num_iter // precompted bound on the iteration number in primality tests +}; + +/*************************** Functions *****************************/ + +/** @defgroup id2iso_others Other functions needed for id2iso + * @{ + */ + +/** + * @brief Scalar multiplication [x]P + [y]Q where x and y are stored inside an + * ibz_vec_2_t [x, y] and P, Q in E[2^f] + * + * @param res Output: the point R = [x]P + [y]Q + * @param scalar_vec: a vector of ibz type elements (x, y) + * @param f: an integer such that P, Q are in E[2^f] + * @param PQ: an x-only basis x(P), x(Q) and x(P-Q) + * @param curve: the curve E the points P, Q, R are defined on + * + */ +void ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Translating an ideal of norm 2^f dividing p²-1 into the corresponding + * kernel coefficients + * + * @param ker_dlog Output : two coefficients indicating the decomposition of the + * kernel over the canonical basis of E0[2^f] + * @param lideal_input : O0-ideal corresponding to the ideal to be translated of + * norm 2^f + * + */ +void id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *ker_dlog, const quat_left_ideal_t *lideal_input); + +/** + * @brief Applies some 2x2 matrix on a basis of E[2^TORSION_EVEN_POWER] + * + * @param P the basis + * @param E the curve + * @param mat the matrix + * @param f TORSION_EVEN_POWER + * @returns 1 if success, 0 if error + * + * helper function, works in place + * + */ +int matrix_application_even_basis(ec_basis_t *P, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f); + +/** + * @brief Applies some endomorphism of an alternate curve to E[f] + * + * @param P the basis + * @param index_alternate_curve index of the alternate order in the list of precomputed extremal + * orders + * @param E the curve (E is not required to be the alternate curve in question since in the end we + * only apply a matrix) + * @param theta the endomorphism + * @param f TORSION_EVEN_POWER + * + * helper function, works in place + * + */ +void endomorphism_application_even_basis(ec_basis_t *P, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f); + +/** + * @brief Translating a kernel on the curve E0, represented as a vector with + * respect to the precomputed 2^f-torsion basis, into the corresponding O0-ideal + * + * @param lideal Output : the output O0-ideal + * @param f : exponent definining the norm of the ideal to compute + * @param vec2 : length-2 vector giving the 2-power part of the kernel with + * respect to the precomputed 2^f basis + * + */ +void id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B2 = v.B1 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^f] + * @param B2 the target basis for E[2^e] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2 + */ +void change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B1 = [2^e-f]*v.B2 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^e] + * @param B2 the target basis for E[2^f] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2, by + * applying change_of_basis_matrix_tate and inverting the outcome + */ +void change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f); + +/** @} + */ + +/** @defgroup id2iso_arbitrary Arbitrary isogeny evaluation + * @{ + */ +/** + * @brief Function to find elements u, v, d1, d2, beta1, beta2 for the ideal to isogeny + * + * @param u Output: integer + * @param v Output: integer + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param d1 Output: integer + * @param d2 Output: integer + * @param index_alternate_order_1 Output: small integer (index of an alternate order) + * @param index_alternate_order_2 Output: small integer (index of an alternate order) + * @param target : integer, target norm + * @param lideal : O0-ideal defining the search space + * @param Bpoo : quaternion algebra + * @param num_alternate_order number of alternate order we consider + * @returns 1 if the computation succeeds, 0 otherwise + * + * Let us write ti = index_alternate_order_i, + * we look for u,v,beta1,beta2,d1,d2,t1,t2 + * such that u d1 + v d2 = target + * and where di = norm(betai)/norm(Ii), where the ideal Ii is equal to overbar{Ji} * lideal and + * betai is in Ii where Ji is a connecting ideal between the maximal order O0 and O_ti t1,t2 must be + * contained between 0 and num_alternate_order This corresponds to the function SuitableIdeals in + * the spec + */ +int find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order); + +/** + * @brief Computes an arbitrary isogeny of fixed degree starting from E0 + * and evaluates it a list of points of the form (P1,0) or (0,P2). + * + * @param lideal Output : an ideal of norm u + * @param u : integer + * @param small : bit indicating if we the value of u is "small" meaning that we + expect it to be + * around sqrt{p}, in that case we use a length slightly above + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny + (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @param index_alternate_order : index of the special extremal order to be used (in the list of + these orders) + * @returns the length of the chain if the computation succeeded, zero upon + failure + * + * F is an isogeny encoding an isogeny [adjust]*phi : E0 -> Eu of degree u + * note that the codomain of F can be either Eu x Eu' or Eu' x Eu for some curve + Eu' + */ +int fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param u Output: integer + * @param v Output: integer + * @param d1 Output: integer + * @param d2 Output: integer + * @param codomain the codomain of the isogeny corresponding to lideal + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : O0 - ideal in input + * @param Bpoo : the quaternion algebra + * @returns 1 if the computation succeeded, 0 otherwise + * + * Compute the codomain and image on the basis of E0 of the isogeny + * E0 -> codomain corresponding to lideal + * + * There is some integer e >= 0 such that + * 2^e * u, 2^e * v,beta1, beta2, d1, d2 are the output of find_uv + * on input target = 2^TORSION_PLUS_EVEN_POWER and lideal + * + * codomain and basis are computed with the help of a dimension 2 isogeny + * of degree 2^TORSION_PLUS_EVEN_POWER - e using a Kani diagram + * + */ +int dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : ideal in input + * @param codomain + * @returns 1 if the computation succeeds, 0 otherwise + * + * This is a wrapper around the ideal to isogeny clapotis function + */ +int dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ideal.c new file mode 100644 index 0000000000..9cf863a104 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ideal.c @@ -0,0 +1,323 @@ +#include +#include +#include "internal.h" + +// assumes parent order and lattice correctly set, computes and sets the norm +void +quat_lideal_norm(quat_left_ideal_t *lideal) +{ + quat_lattice_index(&(lideal->norm), &(lideal->lattice), (lideal->parent_order)); + int ok UNUSED = ibz_sqrt(&(lideal->norm), &(lideal->norm)); + assert(ok); +} + +// assumes parent order and lattice correctly set, recomputes and verifies its norm +static int +quat_lideal_norm_verify(const quat_left_ideal_t *lideal) +{ + int res; + ibz_t index; + ibz_init(&index); + quat_lattice_index(&index, &(lideal->lattice), (lideal->parent_order)); + ibz_sqrt(&index, &index); + res = (ibz_cmp(&(lideal->norm), &index) == 0); + ibz_finalize(&index); + return (res); +} + +void +quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) +{ + copy->parent_order = copied->parent_order; + ibz_copy(©->norm, &copied->norm); + ibz_copy(©->lattice.denom, &copied->lattice.denom); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + } + } +} + +void +quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(quat_lattice_contains(NULL, order, x)); + ibz_t norm_n, norm_d; + ibz_init(&norm_n); + ibz_init(&norm_d); + + // Multiply order on the right by x + quat_lattice_alg_elem_mul(&(lideal->lattice), order, x, alg); + + // Reduce denominator. This conserves HNF + quat_lattice_reduce_denom(&lideal->lattice, &lideal->lattice); + + // Compute norm and check it's integral + quat_alg_norm(&norm_n, &norm_d, x, alg); + assert(ibz_is_one(&norm_d)); + ibz_copy(&lideal->norm, &norm_n); + + // Set order + lideal->parent_order = order; + ibz_finalize(&norm_n); + ibz_finalize(&norm_d); +} + +void +quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(!quat_alg_elem_is_zero(x)); + + quat_lattice_t ON; + quat_lattice_init(&ON); + + // Compute ideal generated by x + quat_lideal_create_principal(lideal, x, order, alg); + + // Compute ideal generated by N (without reducing denominator) + ibz_mat_4x4_scalar_mul(&ON.basis, N, &order->basis); + ibz_copy(&ON.denom, &order->denom); + + // Add lattices (reduces denominators) + quat_lattice_add(&lideal->lattice, &lideal->lattice, &ON); + // Set order + lideal->parent_order = order; + // Compute norm + quat_lideal_norm(lideal); + + quat_lattice_finalize(&ON); +} + +int +quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + ibz_t norm_int, norm_n, gcd, r, q, norm_denom; + ibz_vec_4_t vec; + ibz_vec_4_init(&vec); + ibz_init(&norm_denom); + ibz_init(&norm_int); + ibz_init(&norm_n); + ibz_init(&r); + ibz_init(&q); + ibz_init(&gcd); + int a, b, c, d; + int found = 0; + int int_norm = 0; + while (1) { + int_norm++; + for (a = -int_norm; a <= int_norm; a++) { + for (b = -int_norm + abs(a); b <= int_norm - abs(a); b++) { + for (c = -int_norm + abs(a) + abs(b); c <= int_norm - abs(a) - abs(b); c++) { + d = int_norm - abs(a) - abs(b) - abs(c); + ibz_vec_4_set(&vec, a, b, c, d); + ibz_vec_4_content(&gcd, &vec); + if (ibz_is_one(&gcd)) { + ibz_mat_4x4_eval(&(gen->coord), &(lideal->lattice.basis), &vec); + ibz_copy(&(gen->denom), &(lideal->lattice.denom)); + quat_alg_norm(&norm_int, &norm_denom, gen, alg); + assert(ibz_is_one(&norm_denom)); + ibz_div(&q, &r, &norm_int, &(lideal->norm)); + assert(ibz_is_zero(&r)); + ibz_gcd(&gcd, &(lideal->norm), &q); + found = (0 == ibz_cmp(&gcd, &ibz_const_one)); + if (found) + goto fin; + } + } + } + } + } +fin:; + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&norm_denom); + ibz_finalize(&norm_int); + ibz_finalize(&norm_n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&gcd); + return (found); +} + +void +quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t norm, norm_d; + ibz_init(&norm); + ibz_init(&norm_d); + quat_lattice_alg_elem_mul(&(product->lattice), &(lideal->lattice), alpha, alg); + product->parent_order = lideal->parent_order; + quat_alg_norm(&norm, &norm_d, alpha, alg); + ibz_mul(&(product->norm), &(lideal->norm), &norm); + assert(ibz_divides(&(product->norm), &norm_d)); + ibz_div(&(product->norm), &norm, &(product->norm), &norm_d); + assert(quat_lideal_norm_verify(lideal)); + ibz_finalize(&norm_d); + ibz_finalize(&norm); +} + +void +quat_lideal_add(quat_left_ideal_t *sum, const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_add(&sum->lattice, &I1->lattice, &I2->lattice); + sum->parent_order = I1->parent_order; + quat_lideal_norm(sum); +} + +void +quat_lideal_inter(quat_left_ideal_t *inter, + const quat_left_ideal_t *I1, + const quat_left_ideal_t *I2, + const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_intersect(&inter->lattice, &I1->lattice, &I2->lattice); + inter->parent_order = I1->parent_order; + quat_lideal_norm(inter); +} + +int +quat_lideal_equals(const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((I2->parent_order), alg)); + assert(quat_order_is_maximal((I1->parent_order), alg)); + return (I1->parent_order == I2->parent_order) & (ibz_cmp(&I1->norm, &I2->norm) == 0) & + quat_lattice_equal(&I1->lattice, &I2->lattice); +} + +void +quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lattice_conjugate_without_hnf(inv, &(lideal->lattice)); + ibz_mul(&(inv->denom), &(inv->denom), &(lideal->norm)); +} + +// following the implementation of ideal isomorphisms in the code of LearningToSQI's sage +// implementation of SQIsign +void +quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal1->parent_order), alg)); + assert(quat_order_is_maximal((lideal2->parent_order), alg)); + assert(lideal1->parent_order == lideal2->parent_order); + quat_lattice_t inv; + quat_lattice_init(&inv); + quat_lideal_inverse_lattice_without_hnf(&inv, lideal1, alg); + quat_lattice_mul(trans, &inv, &(lideal2->lattice), alg); + quat_lattice_finalize(&inv); +} + +void +quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lideal_right_transporter(order, lideal, lideal, alg); +} + +void +quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + quat_lattice_gram(G, &(lideal->lattice), alg); + + // divide by norm · denominator² + ibz_t divisor, rmd; + ibz_init(&divisor); + ibz_init(&rmd); + + ibz_mul(&divisor, &(lideal->lattice.denom), &(lideal->lattice.denom)); + ibz_mul(&divisor, &divisor, &(lideal->norm)); + + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + assert(ibz_is_zero(&rmd)); + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i - 1; j++) { + ibz_copy(&(*G)[j][i], &(*G)[i][j]); + } + } + + ibz_finalize(&rmd); + ibz_finalize(&divisor); +} + +void +quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + quat_lideal_right_order(new_parent_order, lideal, alg); + quat_lattice_conjugate_without_hnf(&(conj->lattice), &(lideal->lattice)); + conj->parent_order = new_parent_order; + ibz_copy(&(conj->norm), &(lideal->norm)); +} + +int +quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg_t *alg) +{ + int ok = 0; + ibz_t det, sqr, div; + ibz_mat_4x4_t transposed, norm, prod; + ibz_init(&det); + ibz_init(&sqr); + ibz_init(&div); + ibz_mat_4x4_init(&transposed); + ibz_mat_4x4_init(&norm); + ibz_mat_4x4_init(&prod); + ibz_mat_4x4_transpose(&transposed, &(order->basis)); + // multiply gram matrix by 2 because of reduced trace + ibz_mat_4x4_identity(&norm); + ibz_copy(&(norm[2][2]), &(alg->p)); + ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); + ibz_mat_4x4_mul(&prod, &transposed, &norm); + ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &prod); + ibz_mul(&div, &(order->denom), &(order->denom)); + ibz_mul(&div, &div, &div); + ibz_mul(&div, &div, &div); + ibz_div(&sqr, &div, &det, &div); + ok = ibz_is_zero(&div); + ok = ok & ibz_sqrt(disc, &sqr); + ibz_finalize(&det); + ibz_finalize(&div); + ibz_finalize(&sqr); + ibz_mat_4x4_finalize(&transposed); + ibz_mat_4x4_finalize(&norm); + ibz_mat_4x4_finalize(&prod); + return (ok); +} + +int +quat_order_is_maximal(const quat_lattice_t *order, const quat_alg_t *alg) +{ + int res; + ibz_t disc; + ibz_init(&disc); + quat_order_discriminant(&disc, order, alg); + res = (ibz_cmp(&disc, &(alg->p)) == 0); + ibz_finalize(&disc); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.c new file mode 100644 index 0000000000..b0462dc8b5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.c @@ -0,0 +1,791 @@ +#include "intbig_internal.h" +#include +#include +#include +#include +#include +#include + +// #define DEBUG_VERBOSE + +#ifdef DEBUG_VERBOSE +#define DEBUG_STR_PRINTF(x) printf("%s\n", (x)); + +static void +DEBUG_STR_FUN_INT_MP(const char *op, int arg1, const ibz_t *arg2) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s\n", op, arg1, arg2_str); +} + +static void +DEBUG_STR_FUN_3(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + printf("%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_MP2_INT(const char *op, const ibz_t *arg1, const ibz_t *arg2, int arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%s,%s,%x\n", op, arg1_str, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_INT_MP2(const char *op, int arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + if (arg1 >= 0) + printf("%s,%x,%s,%s\n", op, arg1, arg2_str, arg3_str); + else + printf("%s,-%x,%s,%s\n", op, -arg1, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_INT_MP_INT(const char *op, int arg1, const ibz_t *arg2, int arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s,%x\n", op, arg1, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3, const ibz_t *arg4) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + int arg4_size = ibz_size_in_base(arg4, 16); + char arg4_str[arg4_size + 2]; + ibz_convert_to_str(arg4, arg4_str, 16); + + printf("%s,%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str, arg4_str); +} +#else +#define DEBUG_STR_PRINTF(x) +#define DEBUG_STR_FUN_INT_MP(op, arg1, arg2) +#define DEBUG_STR_FUN_3(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP2(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP_INT(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_4(op, arg1, arg2, arg3, arg4) +#endif + +/** @defgroup ibz_t Constants + * @{ + */ + +const __mpz_struct ibz_const_zero[1] = { + { + ._mp_alloc = 0, + ._mp_size = 0, + ._mp_d = (mp_limb_t[]){ 0 }, + } +}; + +const __mpz_struct ibz_const_one[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 1 }, + } +}; + +const __mpz_struct ibz_const_two[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 2 }, + } +}; + +const __mpz_struct ibz_const_three[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 3 }, + } +}; + +void +ibz_init(ibz_t *x) +{ + mpz_init(*x); +} + +void +ibz_finalize(ibz_t *x) +{ + mpz_clear(*x); +} + +void +ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_add(*sum, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_sub(*diff, *a, *b); + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_mul(*prod, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_neg(ibz_t *neg, const ibz_t *a) +{ + mpz_neg(*neg, *a); +} + +void +ibz_abs(ibz_t *abs, const ibz_t *a) +{ + mpz_abs(*abs, *a); +} + +void +ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_tdiv_qr(*quotient, *remainder, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp; + ibz_init(&a_cp); + ibz_copy(&a_cp, a); +#endif + mpz_tdiv_q_2exp(*quotient, *a, exp); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); + ibz_finalize(&a_cp); +#endif +} + +void +ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) +{ + mpz_fdiv_qr(*q, *r, *n, *d); +} + +void +ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) +{ + mpz_mod(*r, *a, *b); +} + +unsigned long int +ibz_mod_ui(const mpz_t *n, unsigned long int d) +{ + return mpz_fdiv_ui(*n, d); +} + +int +ibz_divides(const ibz_t *a, const ibz_t *b) +{ + return mpz_divisible_p(*a, *b); +} + +void +ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) +{ + mpz_pow_ui(*pow, *x, e); +} + +void +ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) +{ + mpz_powm(*pow, *x, *e, *m); + DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); +} + +int +ibz_two_adic(ibz_t *pow) +{ + return mpz_scan1(*pow, 0); +} + +int +ibz_cmp(const ibz_t *a, const ibz_t *b) +{ + int ret = mpz_cmp(*a, *b); + DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); + return ret; +} + +int +ibz_is_zero(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); + return ret; +} + +int +ibz_is_one(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 1); + DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); + return ret; +} + +int +ibz_cmp_int32(const ibz_t *x, int32_t y) +{ + int ret = mpz_cmp_si(*x, (signed long int)y); + DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); + return ret; +} + +int +ibz_is_even(const ibz_t *x) +{ + int ret = !mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); + return ret; +} + +int +ibz_is_odd(const ibz_t *x) +{ + int ret = mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); + return ret; +} + +void +ibz_set(ibz_t *i, int32_t x) +{ + mpz_set_si(*i, x); +} + +int +ibz_convert_to_str(const ibz_t *i, char *str, int base) +{ + if (!str || (base != 10 && base != 16)) + return 0; + + mpz_get_str(str, base, *i); + + return 1; +} + +void +ibz_print(const ibz_t *num, int base) +{ + assert(base == 10 || base == 16); + + int num_size = ibz_size_in_base(num, base); + char num_str[num_size + 2]; + ibz_convert_to_str(num, num_str, base); + printf("%s", num_str); +} + +int +ibz_set_from_str(ibz_t *i, const char *str, int base) +{ + return (1 + mpz_set_str(*i, str, base)); +} + +void +ibz_copy(ibz_t *target, const ibz_t *value) +{ + mpz_set(*target, *value); +} + +void +ibz_swap(ibz_t *a, ibz_t *b) +{ + mpz_swap(*a, *b); +} + +int32_t +ibz_get(const ibz_t *i) +{ +#if LONG_MAX == INT32_MAX + return (int32_t)mpz_get_si(*i); +#elif LONG_MAX > INT32_MAX + // Extracts the sign bit and the 31 least significant bits + signed long int t = mpz_get_si(*i); + return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); +#else +#error Unsupported configuration: LONG_MAX must be >= INT32_MAX +#endif +} + +int +ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) +{ + int randret; + int ret = 1; + mpz_t tmp; + mpz_t bmina; + mpz_init(bmina); + mpz_sub(bmina, *b, *a); + + if (mpz_sgn(bmina) == 0) { + mpz_set(*rand, *a); + mpz_clear(bmina); + return 1; + } + + size_t len_bits = mpz_sizeinbase(bmina, 2); + size_t len_bytes = (len_bits + 7) / 8; + size_t sizeof_limb = sizeof(mp_limb_t); + size_t sizeof_limb_bits = sizeof_limb * 8; + size_t len_limbs = (len_bytes + sizeof_limb - 1) / sizeof_limb; + + mp_limb_t mask = ((mp_limb_t)-1) >> (sizeof_limb_bits - len_bits) % sizeof_limb_bits; + mp_limb_t r[len_limbs]; + +#ifndef NDEBUG + { + for (size_t i = 0; i < len_limbs; ++i) + r[i] = (mp_limb_t)-1; + r[len_limbs - 1] = mask; + mpz_t check; + mpz_roinit_n(check, r, len_limbs); + assert(mpz_cmp(check, bmina) >= 0); // max sampled value >= b - a + mpz_t bmina2; + mpz_init(bmina2); + mpz_add(bmina2, bmina, bmina); + assert(mpz_cmp(check, bmina2) < 0); // max sampled value < 2 * (b - a) + mpz_clear(bmina2); + } +#endif + + do { + randret = randombytes((unsigned char *)r, len_bytes); + if (randret != 0) { + ret = 0; + goto err; + } +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < len_limbs; ++i) + r[i] = BSWAP_DIGIT(r[i]); +#endif + r[len_limbs - 1] &= mask; + mpz_roinit_n(tmp, r, len_limbs); + if (mpz_cmp(tmp, bmina) <= 0) + break; + } while (1); + + mpz_add(*rand, tmp, *a); +err: + mpz_clear(bmina); + return ret; +} + +int +ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b) +{ + uint32_t diff, mask; + int32_t rand32; + + if (!(a >= 0 && b >= 0 && b > a)) { + printf("a = %d b = %d\n", a, b); + } + assert(a >= 0 && b >= 0 && b > a); + + diff = b - a; + + // Create a mask with 1 + ceil(log2(diff)) least significant bits set +#if (defined(__GNUC__) || defined(__clang__)) && INT_MAX == INT32_MAX + mask = (1 << (32 - __builtin_clz((uint32_t)diff))) - 1; +#else + uint32_t diff2 = diff, tmp; + + mask = (diff2 > 0xFFFF) << 4; + diff2 >>= mask; + + tmp = (diff2 > 0xFF) << 3; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0xF) << 2; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0x3) << 1; + diff2 >>= tmp; + mask |= tmp; + + mask |= diff2 >> 1; + + mask = (1 << (mask + 1)) - 1; +#endif + + assert(mask >= diff && mask < 2 * diff); + + // Rejection sampling + do { + randombytes((unsigned char *)&rand32, sizeof(rand32)); + +#ifdef TARGET_BIG_ENDIAN + rand32 = BSWAP32(rand32); +#endif + + rand32 &= mask; + } while (rand32 > (int32_t)diff); + + rand32 += a; + ibz_set(rand, rand32); + + return 1; +} + +int +ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) +{ + int ret = 1; + mpz_t m_big; + + // m_big = 2 * m + mpz_init_set_si(m_big, m); + mpz_add(m_big, m_big, m_big); + + // Sample in [0, 2*m] + ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); + + // Adjust to range [-m, m] + mpz_sub_ui(*rand, *rand, m); + + mpz_clear(m_big); + + return ret; +} + +int +ibz_rand_interval_bits(ibz_t *rand, uint32_t m) +{ + int ret = 1; + mpz_t tmp; + mpz_t low; + mpz_init_set_ui(tmp, 1); + mpz_mul_2exp(tmp, tmp, m); + mpz_init(low); + mpz_neg(low, tmp); + ret = ibz_rand_interval(rand, &low, &tmp); + mpz_clear(tmp); + mpz_clear(low); + if (ret != 1) + goto err; + mpz_sub_ui(*rand, *rand, (unsigned long int)m); + return ret; +err: + mpz_clear(tmp); + mpz_clear(low); + return ret; +} + +int +ibz_bitsize(const ibz_t *a) +{ + return (int)mpz_sizeinbase(*a, 2); +} + +int +ibz_size_in_base(const ibz_t *a, int base) +{ + return (int)mpz_sizeinbase(*a, base); +} + +void +ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) +{ + mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); +} + +void +ibz_to_digits(digit_t *target, const ibz_t *ibz) +{ + // From the GMP documentation: + // "If op is zero then the count returned will be zero and nothing written to rop." + // The next line ensures zero is written to the first limb of target if ibz is zero; + // target is then overwritten by the actual value if it is not. + target[0] = 0; + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); +} + +int +ibz_probab_prime(const ibz_t *n, int reps) +{ + int ret = mpz_probab_prime_p(*n, reps); + DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); + return ret; +} + +void +ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) +{ + mpz_gcd(*gcd, *a, *b); +} + +int +ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) +{ + return (mpz_invert(*inv, *a, *mod) ? 1 : 0); +} + +int +ibz_legendre(const ibz_t *a, const ibz_t *p) +{ + return mpz_legendre(*a, *p); +} + +int +ibz_sqrt(ibz_t *sqrt, const ibz_t *a) +{ + if (mpz_perfect_square_p(*a)) { + mpz_sqrt(*sqrt, *a); + return 1; + } else { + return 0; + } +} + +void +ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) +{ + mpz_sqrt(*sqrt, *a); +} + +int +ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) +{ +#ifndef NDEBUG + assert(ibz_probab_prime(p, 100)); +#endif + // Case a = 0 + { + ibz_t test; + ibz_init(&test); + ibz_mod(&test, a, p); + if (ibz_is_zero(&test)) { + ibz_set(sqrt, 0); + } + ibz_finalize(&test); + } +#ifdef DEBUG_VERBOSE + ibz_t a_cp, p_cp; + ibz_init(&a_cp); + ibz_init(&p_cp); + ibz_copy(&a_cp, a); + ibz_copy(&p_cp, p); +#endif + + mpz_t amod, tmp, exp, a4, a2, q, z, qnr, x, y, b, pm1; + mpz_init(amod); + mpz_init(tmp); + mpz_init(exp); + mpz_init(a4); + mpz_init(a2); + mpz_init(q); + mpz_init(z); + mpz_init(qnr); + mpz_init(x); + mpz_init(y); + mpz_init(b); + mpz_init(pm1); + + int ret = 1; + + mpz_mod(amod, *a, *p); + if (mpz_cmp_ui(amod, 0) < 0) { + mpz_add(amod, *p, amod); + } + + if (mpz_legendre(amod, *p) != 1) { + ret = 0; + goto end; + } + + mpz_sub_ui(pm1, *p, 1); + + if (mpz_mod_ui(tmp, *p, 4) == 3) { + // p % 4 == 3 + mpz_add_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(*sqrt, amod, tmp, *p); + } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + // p % 8 == 5 + mpz_sub_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + if (!mpz_cmp_ui(tmp, 1)) { + mpz_add_ui(tmp, *p, 3); + mpz_fdiv_q_2exp(tmp, tmp, 3); + mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + } else { + mpz_sub_ui(tmp, *p, 5); + mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 + mpz_mul_2exp(a4, amod, 2); // 4*a + mpz_powm(tmp, a4, tmp, *p); + + mpz_mul_2exp(a2, amod, 1); + mpz_mul(tmp, a2, tmp); + mpz_mod(*sqrt, tmp, *p); + } + } else { + // p % 8 == 1 -> Shanks-Tonelli + int e = 0; + mpz_sub_ui(q, *p, 1); + while (mpz_tstbit(q, e) == 0) + e++; + mpz_fdiv_q_2exp(q, q, e); + + // 1. find generator - non-quadratic residue + mpz_set_ui(qnr, 2); + while (mpz_legendre(qnr, *p) != -1) + mpz_add_ui(qnr, qnr, 1); + mpz_powm(z, qnr, q, *p); + + // 2. Initialize + mpz_set(y, z); + mpz_powm(y, amod, q, *p); // y = a^q mod p + + mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 + mpz_fdiv_q_2exp(tmp, tmp, 1); + + mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + + mpz_set_ui(exp, 1); + mpz_mul_2exp(exp, exp, e - 2); + + for (int i = 0; i < e; ++i) { + mpz_powm(b, y, exp, *p); + + if (!mpz_cmp(b, pm1)) { + mpz_mul(x, x, z); + mpz_mod(x, x, *p); + + mpz_mul(y, y, z); + mpz_mul(y, y, z); + mpz_mod(y, y, *p); + } + + mpz_powm_ui(z, z, 2, *p); + mpz_fdiv_q_2exp(exp, exp, 1); + } + + mpz_set(*sqrt, x); + } + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sqrt_mod_p", sqrt, &a_cp, &p_cp); + ibz_finalize(&a_cp); + ibz_finalize(&p_cp); +#endif + +end: + mpz_clear(amod); + mpz_clear(tmp); + mpz_clear(exp); + mpz_clear(a4); + mpz_clear(a2); + mpz_clear(q); + mpz_clear(z); + mpz_clear(qnr); + mpz_clear(x); + mpz_clear(y); + mpz_clear(b); + mpz_clear(pm1); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.h new file mode 100644 index 0000000000..a0c2c02477 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.h @@ -0,0 +1,303 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for big integers in the reference implementation + */ + +#ifndef INTBIG_H +#define INTBIG_H + +#include +#if defined(MINI_GMP) +#include +#include +#else +#include +#endif +#include +#include + +/** @ingroup quat_quat + * @defgroup ibz_all Signed big integers (gmp-based) + * @{ + */ + +/** @defgroup ibz_t Precise number types + * @{ + */ + +/** @brief Type for signed long integers + * + * @typedef ibz_t + * + * For integers of arbitrary size, used by intbig module, using gmp + */ +typedef mpz_t ibz_t; + +/** @} + */ + +/** @defgroup ibz_c Constants + * @{ + */ + +/** + * Constant zero + */ +extern const ibz_t ibz_const_zero; + +/** + * Constant one + */ +extern const ibz_t ibz_const_one; + +/** + * Constant two + */ +extern const ibz_t ibz_const_two; + +/** + * Constant three + */ +extern const ibz_t ibz_const_three; + +/** @} + */ + +/** @defgroup ibz_finit Constructors and Destructors + * @{ + */ + +void ibz_init(ibz_t *x); +void ibz_finalize(ibz_t *x); + +/** @} + */ + +/** @defgroup ibz_za Basic integer arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b); + +/** @brief diff=a-b + */ +void ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b); + +/** @brief prod=a*b + */ +void ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b); + +/** @brief neg=-a + */ +void ibz_neg(ibz_t *neg, const ibz_t *a); + +/** @brief abs=|a| + */ +void ibz_abs(ibz_t *abs, const ibz_t *a); + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards zero. + */ +void ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b); + +/** @brief Euclidean division of a by 2^exp + * + * Computes a right shift of abs(a) by exp bits, then sets sign(quotient) to sign(a). + * + * Division and rounding is as in ibz_div. + */ +void ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp); + +/** @brief Two adic valuation computation + * + * Computes the position of the first 1 in the binary representation of the integer given in input + * + * When this number is a power of two this gives the two adic valuation of the integer + */ +int ibz_two_adic(ibz_t *pow); + +/** @brief r = a mod b + * + * Assumes valid inputs + * The sign of the divisor is ignored, the result is always non-negative + */ +void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); + +unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); + +/** @brief Test if a = 0 mod b + */ +int ibz_divides(const ibz_t *a, const ibz_t *b); + +/** @brief pow=x^e + * + * Assumes valid inputs, The case 0^0 yields 1. + */ +void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e); + +/** @brief pow=(x^e) mod m + * + * Assumes valid inputs + */ +void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibz_cmp(const ibz_t *a, const ibz_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibz_is_zero(const ibz_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibz_is_one(const ibz_t *x); + +/** @brief Compare x to y + * + * @returns 0 if x=y, positive if x>y, negative if x= 0 and target must hold sufficient elements to hold ibz + * + * @param target Target digit_t array + * @param ibz ibz source ibz_t element + */ +void ibz_to_digits(digit_t *target, const ibz_t *ibz); +#define ibz_to_digit_array(T, I) \ + do { \ + memset((T), 0, sizeof(T)); \ + ibz_to_digits((T), (I)); \ + } while (0) + +/** @brief get int32_t equal to the lowest bits of i + * + * Should not be used to get the value of i if its bitsize is close to 32 bit + * It can however be used on any i to get an int32_t of the same parity as i (and same value modulo + * 4) + * + * @param i Input integer + */ +int32_t ibz_get(const ibz_t *i); + +/** @brief generate random value in [a, b] + * assumed that a >= 0 and b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b); + +/** @brief generate random value in [-m, m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m); + +/** @brief Bitsize of a. + * + * @returns Bitsize of a. + * + */ +int ibz_bitsize(const ibz_t *a); + +/** @brief Size of a in given base. + * + * @returns Size of a in given base. + * + */ +int ibz_size_in_base(const ibz_t *a, int base); + +/** @} + */ + +/** @defgroup ibz_n Number theory functions + * @{ + */ + +/** + * @brief Greatest common divisor + * + * @param gcd Output: Set to the gcd of a and b + * @param a + * @param b + */ +void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b); + +/** + * @brief Modular inverse + * + * @param inv Output: Set to the integer in [0,mod[ such that a*inv = 1 mod (mod) if it exists + * @param a + * @param mod + * @returns 1 if inverse exists and was computed, 0 otherwise + */ +int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod); + +/** + * @brief Floor of Integer square root + * + * @param sqrt Output: Set to the floor of an integer square root + * @param a number of which a floor of an integer square root is searched + */ +void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig_internal.h new file mode 100644 index 0000000000..de4762a6d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig_internal.h @@ -0,0 +1,123 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for big integer functions only used in quaternion functions + */ + +#ifndef INTBIG_INTERNAL_H +#define INTBIG_INTERNAL_H + +#include "intbig.h" + +/** @internal + * @ingroup quat_helpers + * @defgroup ibz_helper Internal integer functions (gmp-based) + * @{ + */ + +/********************************************************************/ + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards minus infinity. + */ +void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d); + +/** @brief generate random value in [a, b] + * assumed that a >= 0, b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b); + +/** @brief generate random value in [-2^m, 2^m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_bits(ibz_t *rand, uint32_t m); + +/** @brief set str to a string containing the representation of i in base + * + * Base should be 10 or 16 + * + * str should be an array of length enough to store the representation of in + * in base, which can be obtained by ibz_sizeinbase(i, base) + 2, where the 2 + * is for the sign and the null terminator + * + * Case for base 16 does not matter + * + * @returns 1 if the integer could be converted to a string, 0 otherwise + */ +int ibz_convert_to_str(const ibz_t *i, char *str, int base); + +/** @brief print num in base to stdout + * + * Base should be 10 or 16 + */ +void ibz_print(const ibz_t *num, int base); + +/** @brief set i to integer contained in string when read as number in base + * + * Base should be 10 or 16, and the number should be written without ponctuation or whitespaces + * + * Case for base 16 does not matter + * + * @returns 1 if the string could be converted to an integer, 0 otherwise + */ +int ibz_set_from_str(ibz_t *i, const char *str, int base); + +/** + * @brief Probabilistic primality test + * + * @param n The number to test + * @param reps Number of Miller-Rabin repetitions. The more, the slower and the less likely are + * false positives + * @return 1 if probably prime, 0 if certainly not prime, 2 if certainly prime + * + * Using GMP's implementation: + * + * From GMP's documentation: "This function performs some trial divisions, a Baillie-PSW probable + * prime test, then reps-24 Miller-Rabin probabilistic primality tests." + */ +int ibz_probab_prime(const ibz_t *n, int reps); + +/** + * @brief Square root modulo a prime + * + * @returns 1 if square root of a mod p exists and was computed, 0 otherwise + * @param sqrt Output: Set to a square root of a mod p if any exist + * @param a number of which a square root mod p is searched + * @param p assumed prime + */ +int ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p); + +/** + * @brief Integer square root of a perfect square + * + * @returns 1 if an integer square root of a exists and was computed, 0 otherwise + * @param sqrt Output: Set to a integer square root of a if any exist + * @param a number of which an integer square root is searched + */ +int ibz_sqrt(ibz_t *sqrt, const ibz_t *a); + +/** + * @brief Legendre symbol of a mod p + * + * @returns Legendre symbol of a mod p + * @param a + * @param p assumed prime + * + * Uses GMP's implementation + * + * If output is 1, a is a square mod p, if -1, not. If 0, it is divisible by p + */ +int ibz_legendre(const ibz_t *a, const ibz_t *p); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/integers.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/integers.c new file mode 100644 index 0000000000..ec7cda05eb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/integers.c @@ -0,0 +1,116 @@ +#include +#include "internal.h" +#include +#include +#include + +// Random prime generation for tests +int +ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations) +{ + assert(bitsize != 0); + int found = 0; + ibz_t two_pow, two_powp; + + ibz_init(&two_pow); + ibz_init(&two_powp); + ibz_pow(&two_pow, &ibz_const_two, (bitsize - 1) - (0 != is3mod4)); + ibz_pow(&two_powp, &ibz_const_two, bitsize - (0 != is3mod4)); + + int cnt = 0; + while (!found) { + cnt++; + if (cnt % 100000 == 0) { + printf("Random prime generation is still running after %d attempts, this is not " + "normal! The expected number of attempts is %d \n", + cnt, + bitsize); + } + ibz_rand_interval(p, &two_pow, &two_powp); + ibz_add(p, p, p); + if (is3mod4) { + ibz_add(p, p, p); + ibz_add(p, &ibz_const_two, p); + } + ibz_add(p, &ibz_const_one, p); + + found = ibz_probab_prime(p, probability_test_iterations); + } + ibz_finalize(&two_pow); + ibz_finalize(&two_powp); + return found; +} + +// solves x^2 + n y^2 == p for positive integers x, y +// assumes that p is prime and -n mod p is a square +int +ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p) +{ + ibz_t r0, r1, r2, a, prod; + ibz_init(&r0); + ibz_init(&r1); + ibz_init(&r2); + ibz_init(&a); + ibz_init(&prod); + + int res = 0; + + // manage case p = 2 separately + if (!ibz_cmp(p, &ibz_const_two)) { + if (ibz_is_one(n)) { + ibz_set(x, 1); + ibz_set(y, 1); + res = 1; + } + goto done; + } + // manage case p = n separately + if (!ibz_cmp(p, n)) { + ibz_set(x, 0); + ibz_set(y, 1); + res = 1; + goto done; + } + + // test coprimality (should always be ok in our cases) + ibz_gcd(&r2, p, n); + if (!ibz_is_one(&r2)) + goto done; + + // get sqrt of -n mod p + ibz_neg(&r2, n); + if (!ibz_sqrt_mod_p(&r2, &r2, p)) + goto done; + + // run loop + ibz_copy(&prod, p); + ibz_copy(&r1, p); + ibz_copy(&r0, p); + while (ibz_cmp(&prod, p) >= 0) { + ibz_div(&a, &r0, &r2, &r1); + ibz_mul(&prod, &r0, &r0); + ibz_copy(&r2, &r1); + ibz_copy(&r1, &r0); + } + // test if result is solution + ibz_sub(&a, p, &prod); + ibz_div(&a, &r2, &a, n); + if (!ibz_is_zero(&r2)) + goto done; + if (!ibz_sqrt(y, &a)) + goto done; + + ibz_copy(x, &r0); + ibz_mul(&a, y, y); + ibz_mul(&a, &a, n); + ibz_add(&prod, &prod, &a); + res = !ibz_cmp(&prod, p); + +done: + ibz_finalize(&r0); + ibz_finalize(&r1); + ibz_finalize(&r2); + ibz_finalize(&a); + ibz_finalize(&prod); + return res; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/internal.h new file mode 100644 index 0000000000..edbba345f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/internal.h @@ -0,0 +1,812 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for helper functions for quaternion algebra implementation + */ + +#ifndef QUAT_HELPER_H +#define QUAT_HELPER_H + +#include +#include +#include "intbig_internal.h" + +/** @internal + * @ingroup quat_quat + * @defgroup quat_helpers Quaternion module internal functions + * @{ + */ + +/** @internal + * @defgroup quat_alg_helpers Helper functions for the alg library + * @{ + */ + +/** @internal + * @brief helper function for initializing small quaternion algebras. + */ +void quat_alg_init_set_ui(quat_alg_t *alg, + unsigned int p); // test/lattice, test/ideal, test/algebra + +/** @brief a*b + * + * Multiply two coordinate vectors as elements of the algebra in basis (1,i,j,ij) with i^2 = -1, j^2 + * = -p + * + * @param res Output: Will contain product + * @param a + * @param b + * @param alg The quaternion algebra + */ +void quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg); + +/** @brief a=b + * + * Test if a and b represent the same quaternion algebra element + * + * @param a + * @param b + * @returns 1 if a=b, 0 otherwise + */ +int quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + * + * x is 0 iff all coordinates in x->coord are 0 + */ +int quat_alg_elem_is_zero(const quat_alg_elem_t *x); + +/** @brief Compute same denominator form of two quaternion algebra elements + * + * res_a=a and res_b=b (representing the same element) and res_a.denom = res_b.denom + * + * @param res_a + * @param res_b + * @param a + * @param b + */ +void quat_alg_equal_denom(quat_alg_elem_t *res_a, + quat_alg_elem_t *res_b, + const quat_alg_elem_t *a, + const quat_alg_elem_t *b); + +/** @brief Copies the given values into an algebra element, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Sets an algebra element to the given integer values, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_set(quat_alg_elem_t *elem, + int32_t denom, + int32_t coord0, + int32_t coord1, + int32_t coord2, + int32_t coord3); + +/** + * @brief Creates algebra element from scalar + * + * Resulting element has 1-coordinate equal to numerator/denominator + * + * @param elem Output: algebra element with numerator/denominator as first coordiante + * (1-coordinate), 0 elsewhere (i,j,ij coordinates) + * @param numerator + * @param denominator Assumed non zero + */ +void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator); + +/** @brief a+b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief a-b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Multiplies algebra element by integer scalar, without normalizing it + * + * @param res Output + * @param scalar Integer + * @param elem Algebra element + */ +void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_helpers Helper functions for functions for matrices or vectors in dimension 4 + * @{ + */ + +/** @internal + * @defgroup quat_inv_helpers Helper functions for the integer matrix inversion function + * @{ + */ + +/** @brief a1a2+b1b2+c1c2 + * + * @param coeff Output: The coefficien which was computed as a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief -a1a2+b1b2-c1c2 + * + * @param coeff Output: The coefficien which was computed as -a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief Matrix determinant and a matrix inv such that inv/det is the inverse matrix of the input + * + * Implemented following the methof of 2x2 minors explained at Method from + * https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf (visited on 3rd of May + * 2023, 16h15 CEST) + * + * @returns 1 if the determinant of mat is not 0 and an inverse was computed, 0 otherwise + * @param inv Output: Will contain an integer matrix which, dividet by det, will yield the rational + * inverse of the matrix if it exists, can be NULL + * @param det Output: Will contain the determinant of the input matrix, can be NULL + * @param mat Matrix of which the inverse will be computed + */ +int ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_lat_helpers Helper functions on vectors and matrices used mainly for lattices + * @{ + */ + +/** @brief Copy all values from one vector to another + * + * @param new Output: is set to same values as vec + * @param vec + */ +void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec); + +/** @brief set res to values coord0,coord1,coord2,coord3 + * + * @param res Output: Will contain vector (coord0,coord1,coord2,coord3) + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Set a vector of 4 integers to given values + * + * @param vec Output: is set to given coordinates + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3); + +/** @brief a+b + * + * Add two integer 4-vectors + * + * @param res Output: Will contain sum + * @param a + * @param b + */ +void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief a-b + * + * Substract two integer 4-vectors + * + * @param res Output: Will contain difference + * @param a + * @param b + */ +void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief x=0 + * + * Test if a vector x has only zero coordinates + * + * @returns 0 if x has at least one non-zero coordinates, 1 otherwise + * @param x + */ +int ibz_vec_4_is_zero(const ibz_vec_4_t *x); + +/** @brief Compute the linear combination lc = coeff_a vec_a + coeff_b vec_b + * + * @param lc Output: linear combination lc = coeff_a vec_a + coeff_b vec_b + * @param coeff_a Scalar multiplied to vec_a + * @param vec_a + * @param coeff_b Scalar multiplied to vec_b + * @param vec_b + */ +void ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b); + +/** @brief multiplies all values in vector by same scalar + * + * @param prod Output + * @param scalar + * @param vec + */ +void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief divides all values in vector by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param vec + */ +int ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief Negation for vectors of 4 integers + * + * @param neg Output: is set to -vec + * @param vec + */ +void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec); + +/** + * @brief content of a 4-vector of integers + * + * The content is the GCD of all entries. + * + * @param v A 4-vector of integers + * @param content Output: the resulting gcd + */ +void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v); + +/** @brief -mat for mat a 4x4 integer matrix + * + * @param neg Output: is set to -mat + * @param mat Input matrix + */ +void ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat); + +/** @brief Set all coefficients of a matrix to zero for 4x4 integer matrices + * + * @param zero + */ +void ibz_mat_4x4_zero(ibz_mat_4x4_t *zero); + +/** @brief Set a matrix to the identity for 4x4 integer matrices + * + * @param id + */ +void ibz_mat_4x4_identity(ibz_mat_4x4_t *id); + +/** @brief Test equality to identity for 4x4 integer matrices + * + * @returns 1 if mat is the identity matrix, 0 otherwise + * @param mat + */ +int ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat); + +/** @brief Equality test for 4x4 integer matrices + * + * @returns 1 if equal, 0 otherwise + * @param mat1 + * @param mat2 + */ +int ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat); + +/** @brief Matrix by integer multiplication + * + * @param prod Output + * @param scalar + * @param mat + */ +void ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** @brief gcd of all values in matrix + * + * @param gcd Output + * @param mat + */ +void ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat); + +/** @brief Verifies whether the 4x4 input matrix is in Hermite Normal Form + * + * @returns 1 if mat is in HNF, 0 otherwise + * @param mat Matrix to be tested + */ +int ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat); + +/** @brief Hermite Normal Form of a matrix of 8 integer vectors, computed using a multiple of its + * determinant as modulo + * + * Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic + * Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 + * + * @param hnf Output: Matrix in Hermite Normal Form generating the same lattice as generators + * @param generators matrix whose colums generate the same lattice than the output + * @param generator_number number of generators given + * @param mod integer, must be a multiple of the volume of the lattice generated by the columns of + * generators + */ +void ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, + int generator_number, + const ibz_vec_4_t *generators, + const ibz_t *mod); + +/** @} + */ +/** @} + */ + +/** @internal + * @defgroup quat_dim2_helpers Helper functions for dimension 2 + * @{ + */ + +/** @brief Set vector coefficients to the given integers + * + * @param vec Output: Vector + * @param a0 + * @param a1 + */ +void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1); // test/dim2 + +/** @brief Set matrix coefficients to the given integers + * + * @param mat Output: Matrix + * @param a00 + * @param a01 + * @param a10 + * @param a11 + */ +void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11); // test/dim2 + +void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, + const ibz_mat_2x2_t *b); // unused + +/** @brief Determinant of a 2x2 integer matrix given as 4 integers + * + * @param det Output: Determinant of the matrix + * @param a11 matrix coefficient (upper left corner) + * @param a12 matrix coefficient (upper right corner) + * @param a21 matrix coefficient (lower left corner) + * @param a22 matrix coefficient (lower right corner) + */ +void ibz_mat_2x2_det_from_ibz(ibz_t *det, + const ibz_t *a11, + const ibz_t *a12, + const ibz_t *a21, + const ibz_t *a22); // dim4 + +/** + * @brief a*b for 2x2 integer matrices modulo m + * + * @param prod Output matrix + * @param mat_a Input matrix + * @param mat_b Input matrix + * @param m Integer modulo + */ +void ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, + const ibz_mat_2x2_t *mat_a, + const ibz_mat_2x2_t *mat_b, + const ibz_t *m); // test/dim2 +/** @} + */ + +/** @internal + * @defgroup quat_lattice_helper Helper functions for the lattice library (dimension 4) + * @{ + */ + +/** + * @brief Modifies a lattice to put it in hermite normal form + * + * In-place modification of the lattice. + * + * @param lat input lattice + * + * On a correct lattice this function changes nothing (since it is already in HNF), but it can be + * used to put a handmade one in correct form in order to use the other lattice functions. + */ +void quat_lattice_hnf(quat_lattice_t *lat); // lattice, test/lattice, test/algebra, + +/** + * @brief Lattice equality + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if both lattices are equal, 0 otherwise + * @param lat1 + * @param lat2 + */ +int quat_lattice_equal(const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice, test/ideal + +/** + * @brief Lattice inclusion test + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if sublat is included in overlat, 0 otherwise + * @param sublat Lattice whose inclusion in overlat will be testes + * @param overlat + */ +int quat_lattice_inclusion(const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // test/lattice, test/ideal + +/** @brief Divides basis and denominator of a lattice by their gcd + * + * @param reduced Output + * @param lat Lattice + */ +void quat_lattice_reduce_denom(quat_lattice_t *reduced, + const quat_lattice_t *lat); // lattice, ideal, + +/** @brief a+b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + */ +void quat_lattice_add(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice + +/** @brief a*b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + * @param alg The quaternion algebra + */ +void quat_lattice_mul(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2, + const quat_alg_t *alg); // ideal, lattie, test/ideal, test/lattice + +/** + * @brief Computes the dual lattice of lat, without putting its basis in HNF + * + * This function returns a lattice not under HNF. For careful internal use only. + * + * Computation method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted + * on 19 of May 2023, 12h40 CEST + * + * @param dual Output: The dual lattice of lat. ATTENTION: is not under HNF. hnf computation must be + * applied before using lattice functions on it + * @param lat lattice, the dual of it will be computed + */ +void quat_lattice_dual_without_hnf(quat_lattice_t *dual, + const quat_lattice_t *lat); // lattice, ideal + +/** + * @brief Multiply all columns of lat with coord (as algebra elements) + * + * The columns and coord are seen as algebra elements in basis 1,i,j,ij, i^2 = -1, j^2 = -p). Coord + * is multiplied to the right of lat. + * + * The output matrix is not under HNF. + * + * @param prod Output: Matrix not under HND whose columns represent the algebra elements obtained as + * L*coord for L column of lat. + * @param lat Matrix whose columns are algebra elements in basis (1,i,j,ij) + * @param coord Integer coordinate algebra element in basis (1,i,j,ij) + * @param alg The quaternion algebra + */ +void quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg); // lattice + +/** @brief The index of sublat into overlat + * + * Assumes inputs are in HNF. + * + * @param index Output + * @param sublat A lattice in HNF, must be sublattice of overlat + * @param overlat A lattice in HNF, must be overlattice of sublat + */ +void quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // ideal + +/** @brief Compute the Gram matrix of the quaternion trace bilinear form + * + * Given a lattice of the quaternion algebra, computes the Gram matrix + * of the bilinear form + * + * 〈a,b〉 := [lattice->denom^2] Tr(a·conj(b)) + * + * multiplied by the square of the denominator of the lattice. + * + * This matrix always has integer entries. + * + * @param G Output: Gram matrix of the trace bilinear form on the lattice, multiplied by the square + * of the denominator of the lattice + * @param lattice A lattice + * @param alg The quaternion algebra + */ +void quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @brief Compute an integer parallelogram containing the ball of + * given radius for the positive definite quadratic form defined by + * the Gram matrix G. + * + * The computed parallelogram is defined by the vectors + * + * (x₁ x₂ x₃ x₄) · U + * + * with x_i ∈ [ -box[i], box[i] ]. + * + * @param box Output: bounds of the parallelogram + * @param U Output: Unimodular transformation defining the parallelogram + * @param G Gram matrix of the quadratic form, must be full rank + * @param radius Radius of the ball, must be non-negative + * @returns 0 if the box only contains the origin, 1 otherwise + */ +int quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius); + +/** @} + */ + +/** @internal + * @defgroup quat_lideal_helper Helper functions for ideals and orders + * @{ + */ +/** @brief Set norm of an ideal given its lattice and parent order + * + * @param lideal In/Output: Ideal which has lattice and parent_order correctly set, but not + * necessarily the norm. Will have norm correctly set too. + */ +void quat_lideal_norm(quat_left_ideal_t *lideal); // ideal + +/** + * @brief Left principal ideal of order, generated by x + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element + * + * Creates the left ideal in 'order' generated by the element 'x' + */ +void quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg); // ideal, test/ideal + +/** + * @brief Equality test for left ideals + * + * @returns 1 if both left ideals are equal, 0 otherwise + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +int quat_lideal_equals(const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // test/ideal + +/** + * @brief Sum of two left ideals + * + * @param sum Output: Left ideal which is the sum of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_add(quat_left_ideal_t *sum, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // Not used outside + +/** + * @brief Left ideal product of left ideal I and element alpha + * + * @param product Output: lideal I*alpha, must have integer norm + * @param lideal left ideal + * @param alpha element multiplied to lideal to get the product ideal + * @param alg the quaternion algebra + * + * I*alpha where I is a left-ideal and alpha an element of the algebra + * + * The resulting ideal must have an integer norm + * + */ +void quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg); // test/ideal + +/** @brief Computes the inverse ideal (for a left ideal of a maximal order) without putting it under + * HNF + * + * This function returns a lattice not under HNF. For careful internal use only + * + * Computes the inverse ideal for lideal as conjugate(lideal)/norm(lideal) + * + * @param inv Output: lattice which is lattice representation of the inverse ideal of lideal + * ATTENTION: is not under HNF. hnf computation must be applied before using lattice functions on it + * @param lideal Left ideal of a maximal order in alg + * @param alg The quaternion algebra + */ +void quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** @brief Computes the right transporter of two left ideals of the same maximal order + * + * Following the implementation of ideal isomorphisms in the code of LearningToSQI's sage + * implementation of SQIsign. Computes the right transporter of (J:I) as inverse(I)J. + * + * @param trans Output: lattice which is right transporter from lideal1 to lideal2 (lideal2:lideal1) + * @param lideal1 Left ideal of the same maximal order than lideal1 in alg + * @param lideal2 Left ideal of the same maximal order than lideal1 in alg + * @param alg The quaternion algebra + */ +void quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Right order of a left ideal + * + * @param order Output: right order of the given ideal + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** + * @brief Gram matrix of the trace map of the ideal class + * + * Compute the Gram matrix of the bilinear form + * + * 〈a, b〉 := Tr(a·conj(b)) / norm(lideal) + * + * on the basis of the ideal. This matrix has integer entries and its + * integer congruence class only depends on the ideal class. + * + * @param G Output: Gram matrix of the trace map + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg); + +/** @brief Test if order is maximal + * + * Checks if the discriminant of the order equals the prime p defining the quaternion algebra. + * + * It is not verified whether the order is really an order. The output 1 only means that if it is an + * order, then it is maximal. + * + * @returns 1 if order is maximal (assuming it is an order), 0 otherwise + * @param order An order of the quaternion algebra (assumes to be an order, this is not tested) + * @param alg The quaternion algebra + */ +int quat_order_is_maximal(const quat_lattice_t *order, + const quat_alg_t *alg); // ideal (only in asserts) + +/** @brief Compute the discriminant of an order as sqrt(det(gram(reduced_norm))) + * + * @param disc: Output: The discriminant sqrt(det(gram(reduced_norm))) + * @param order An order of the quaternion algebra + * @param alg The quaternion algebra + */ +int quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, + const quat_alg_t *alg); // ideal + +/** @} + */ + +/** @internal + * @ingroup quat_normeq + * @{ + */ + +/** @brief Set lattice to O0 + * + * @param O0 Lattice to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set(quat_lattice_t *O0); + +/** @brief Set p-extremal maximal order to O0 + * + * @param O0 p-extremal order to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0); + +/** + * @brief Create an element of a extremal maximal order from its coefficients + * + * @param elem Output: the quaternion element + * @param order the order + * @param coeffs the vector of 4 ibz coefficients + * @param Bpoo quaternion algebra + * + * elem = x + z*y + z*u + t*z*v + * where coeffs = [x,y,u,v] and t = order.t z = order.z + * + */ +void quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo); // normeq, untested + +/** @} + */ +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/isog.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/isog.h new file mode 100644 index 0000000000..b251ca3cdc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/isog.h @@ -0,0 +1,28 @@ +#ifndef _ISOG_H_ +#define _ISOG_H_ +#include +#include + +/* KPS structure for isogenies of degree 2 or 4 */ +typedef struct +{ + ec_point_t K; +} ec_kps2_t; +typedef struct +{ + ec_point_t K[3]; +} ec_kps4_t; + +void xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P); // degree-2 isogeny construction +void xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24); + +void xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P); // degree-4 isogeny construction +void xisog_4_singular(ec_kps4_t *kps, ec_point_t *B24, const ec_point_t P, ec_point_t A24); + +void xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps); +void xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps); + +void xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps); +void xeval_4_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_point_t P, const ec_kps4_t *kps); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/isog_chains.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/isog_chains.c new file mode 100644 index 0000000000..abc9808057 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/isog_chains.c @@ -0,0 +1,241 @@ +#include "isog.h" +#include + +// since we use degree 4 isogeny steps, we need to handle the odd case with care +static uint32_t +ec_eval_even_strategy(ec_curve_t *curve, + ec_point_t *points, + unsigned len_points, + const ec_point_t *kernel, + const int isog_len) +{ + ec_curve_normalize_A24(curve); + ec_point_t A24; + copy_point(&A24, &curve->A24); + + int space = 1; + for (int i = 1; i < isog_len; i *= 2) + ++space; + + // Stack of remaining kernel points and their associated orders + ec_point_t splits[space]; + uint16_t todo[space]; + splits[0] = *kernel; + todo[0] = isog_len; + + int current = 0; // Pointer to current top of stack + + // Chain of 4-isogenies + for (int j = 0; j < isog_len / 2; ++j) { + assert(current >= 0); + assert(todo[current] >= 1); + // Get the next point of order 4 + while (todo[current] != 2) { + assert(todo[current] >= 3); + // A new split will be added + ++current; + assert(current < space); + // We set the seed of the new split to be computed and saved + copy_point(&splits[current], &splits[current - 1]); + // if we copied from the very first element, then we perform one additional doubling + unsigned num_dbls = todo[current - 1] / 4 * 2 + todo[current - 1] % 2; + todo[current] = todo[current - 1] - num_dbls; + while (num_dbls--) + xDBL_A24(&splits[current], &splits[current], &A24, false); + } + + if (j == 0) { + assert(fp2_is_one(&A24.z)); + if (!ec_is_four_torsion(&splits[current], curve)) + return -1; + + ec_point_t T; + xDBL_A24(&T, &splits[current], &A24, false); + if (fp2_is_zero(&T.x)) + return -1; // special isogenies not allowed + } else { + assert(todo[current] == 2); +#ifndef NDEBUG + if (fp2_is_zero(&splits[current].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + + ec_point_t test; + xDBL_A24(&test, &splits[current], &A24, false); + if (fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly zero before doubling"); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + } + + // Evaluate 4-isogeny + ec_kps4_t kps4; + xisog_4(&kps4, &A24, splits[current]); + xeval_4(splits, splits, current, &kps4); + for (int i = 0; i < current; ++i) + todo[i] -= 2; + xeval_4(points, points, len_points, &kps4); + + --current; + } + assert(isog_len % 2 ? !current : current == -1); + + // Final 2-isogeny + if (isog_len % 2) { +#ifndef NDEBUG + if (fp2_is_zero(&splits[0].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + ec_point_t test; + copy_point(&test, &splits[0]); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + + // We need to check the order of this point in case there were no 4-isogenies + if (isog_len == 1 && !ec_is_two_torsion(&splits[0], curve)) + return -1; + if (fp2_is_zero(&splits[0].x)) { + // special isogenies not allowed + // this case can only happen if isog_len == 1; otherwise the + // previous 4-isogenies we computed ensure that $T=(0:1)$ is put + // as the kernel of the dual isogeny + return -1; + } + + ec_kps2_t kps2; + xisog_2(&kps2, &A24, splits[0]); + xeval_2(points, points, len_points, &kps2); + } + + // Output curve in the form (A:C) + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + + return 0; +} + +uint32_t +ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points) +{ + copy_curve(image, &phi->curve); + return ec_eval_even_strategy(image, points, len_points, &phi->kernel, phi->length); +} + +// naive implementation +uint32_t +ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special) // do we allow special isogenies? +{ + + ec_point_t A24; + AC_to_A24(&A24, curve); + + ec_kps2_t kps; + ec_point_t small_K, big_K; + copy_point(&big_K, kernel); + + for (int i = 0; i < len; i++) { + copy_point(&small_K, &big_K); + // small_K = big_K; + for (int j = 0; j < len - i - 1; j++) { + xDBL_A24(&small_K, &small_K, &A24, false); + } + // Check the order of the point before the first isogeny step + if (i == 0 && !ec_is_two_torsion(&small_K, curve)) + return (uint32_t)-1; + // Perform isogeny step + if (fp2_is_zero(&small_K.x)) { + if (special) { + ec_point_t B24; + xisog_2_singular(&kps, &B24, A24); + xeval_2_singular(&big_K, &big_K, 1, &kps); + xeval_2_singular(points, points, len_points, &kps); + copy_point(&A24, &B24); + } else { + return (uint32_t)-1; + } + } else { + xisog_2(&kps, &A24, small_K); + xeval_2(&big_K, &big_K, 1, &kps); + xeval_2(points, points, len_points, &kps); + } + } + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + return 0; +} + +uint32_t +ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to) +{ + fp2_t t0, t1, t2, t3, t4; + + fp2_mul(&t0, &from->A, &from->C); + fp2_mul(&t1, &to->A, &to->C); + + fp2_mul(&t2, &t1, &to->C); // toA*toC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*toA*toC^2 + fp2_sqr(&t3, &to->A); + fp2_mul(&t3, &t3, &to->A); // toA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->Nx, &t3, &t2); // 2*toA^3-9*toA*toC^2 + fp2_mul(&t2, &t0, &from->A); // fromA^2*fromC + fp2_sqr(&t3, &from->C); + fp2_mul(&t3, &t3, &from->C); // fromC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*fromC^3 + fp2_sub(&t3, &t3, &t2); // 3*fromC^3-fromA^2*fromC + fp2_mul(&isom->Nx, &isom->Nx, &t3); // lambda_x = (2*toA^3-9*toA*toC^2)*(3*fromC^3-fromA^2*fromC) + + fp2_mul(&t2, &t0, &from->C); // fromA*fromC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*fromA*fromC^2 + fp2_sqr(&t3, &from->A); + fp2_mul(&t3, &t3, &from->A); // fromA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->D, &t3, &t2); // 2*fromA^3-9*fromA*fromC^2 + fp2_mul(&t2, &t1, &to->A); // toA^2*toC + fp2_sqr(&t3, &to->C); + fp2_mul(&t3, &t3, &to->C); // toC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*toC^3 + fp2_sub(&t3, &t3, &t2); // 3*toC^3-toA^2*toC + fp2_mul(&isom->D, &isom->D, &t3); // lambda_z = (2*fromA^3-9*fromA*fromC^2)*(3*toC^3-toA^2*toC) + + // Mont -> SW -> SW -> Mont + fp2_mul(&t0, &to->C, &from->A); + fp2_mul(&t0, &t0, &isom->Nx); // lambda_x*toC*fromA + fp2_mul(&t1, &from->C, &to->A); + fp2_mul(&t1, &t1, &isom->D); // lambda_z*fromC*toA + fp2_sub(&isom->Nz, &t0, &t1); // lambda_x*toC*fromA - lambda_z*fromC*toA + fp2_mul(&t0, &from->C, &to->C); + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // 3*fromC*toC + fp2_mul(&isom->D, &isom->D, &t0); // 3*lambda_z*fromC*toC + fp2_mul(&isom->Nx, &isom->Nx, &t0); // 3*lambda_x*fromC*toC + + return (fp2_is_zero(&isom->Nx) | fp2_is_zero(&isom->D)); +} + +void +ec_iso_eval(ec_point_t *P, ec_isom_t *isom) +{ + fp2_t tmp; + fp2_mul(&P->x, &P->x, &isom->Nx); + fp2_mul(&tmp, &P->z, &isom->Nz); + fp2_add(&P->x, &P->x, &tmp); + fp2_mul(&P->z, &P->z, &isom->D); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/keygen.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/keygen.c new file mode 100644 index 0000000000..c1c206c99d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/keygen.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +void +secret_key_init(secret_key_t *sk) +{ + quat_left_ideal_init(&(sk->secret_ideal)); + ibz_mat_2x2_init(&(sk->mat_BAcan_to_BA0_two)); + ec_curve_init(&sk->curve); +} + +void +secret_key_finalize(secret_key_t *sk) +{ + quat_left_ideal_finalize(&(sk->secret_ideal)); + ibz_mat_2x2_finalize(&(sk->mat_BAcan_to_BA0_two)); +} + +int +protocols_keygen(public_key_t *pk, secret_key_t *sk) +{ + int found = 0; + ec_basis_t B_0_two; + + // iterating until a solution has been found + while (!found) { + + found = quat_sampling_random_ideal_O0_given_norm( + &sk->secret_ideal, &SEC_DEGREE, 1, &QUAT_represent_integer_params, NULL); + + // replacing the secret key ideal by a shorter equivalent one for efficiency + found = found && quat_lideal_prime_norm_reduced_equivalent( + &sk->secret_ideal, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + + // ideal to isogeny clapotis + + found = found && dim2id2iso_arbitrary_isogeny_evaluation(&B_0_two, &sk->curve, &sk->secret_ideal); + } + + // Assert the isogeny was found and images have the correct order + assert(test_basis_order_twof(&B_0_two, &sk->curve, TORSION_EVEN_POWER)); + + // Compute a deterministic basis with a hint to speed up verification + pk->hint_pk = ec_curve_to_basis_2f_to_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER); + + // Assert the deterministic basis we computed has the correct order + assert(test_basis_order_twof(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the 2x2 matrix basis change from the canonical basis to the evaluation of our secret + // isogeny + change_of_basis_matrix_tate( + &sk->mat_BAcan_to_BA0_two, &sk->canonical_basis, &B_0_two, &sk->curve, TORSION_EVEN_POWER); + + // Set the public key from the codomain curve + copy_curve(&pk->curve, &sk->curve); + pk->curve.is_A24_computed_and_normalized = false; // We don't send any precomputation + + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c new file mode 100644 index 0000000000..8c49b21d20 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c @@ -0,0 +1,190 @@ +#include +#include "lll_internals.h" +#include "internal.h" + +#include "dpe.h" + +// Access entry of symmetric matrix +#define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + dpe_t dpe_const_one, dpe_const_DELTABAR; + + dpe_init(dpe_const_one); + dpe_set_ui(dpe_const_one, 1); + + dpe_init(dpe_const_DELTABAR); + dpe_set_d(dpe_const_DELTABAR, DELTABAR); + + // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions + dpe_t r[4][4], u[4][4], lovasz[4]; + for (int i = 0; i < 4; i++) { + dpe_init(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_init(r[i][j]); + dpe_init(u[i][j]); + } + } + + // threshold for swaps + dpe_t delta_bar; + dpe_init(delta_bar); + dpe_set_d(delta_bar, DELTABAR); + + // Other work variables + dpe_t Xf, tmpF; + dpe_init(Xf); + dpe_init(tmpF); + ibz_t X, tmpI; + ibz_init(&X); + ibz_init(&tmpI); + + // Main L² loop + dpe_set_z(r[0][0], (*G)[0][0]); + int kappa = 1; + while (kappa < 4) { + // size reduce b_κ + int done = 0; + while (!done) { + // Recompute the κ-th row of the Choleski Factorisation + // Loop invariant: + // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 + for (int j = 0; j <= kappa; j++) { + dpe_set_z(r[kappa][j], (*G)[kappa][j]); + for (int k = 0; k < j; k++) { + dpe_mul(tmpF, r[kappa][k], u[j][k]); + dpe_sub(r[kappa][j], r[kappa][j], tmpF); + } + if (j < kappa) + dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + } + + done = 1; + // size reduce + for (int i = kappa - 1; i >= 0; i--) { + if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + done = 0; + dpe_set(Xf, u[kappa][i]); + dpe_round(Xf, Xf); + dpe_get_z(X, Xf); + // Update basis: b_κ ← b_κ - X·b_i + for (int j = 0; j < 4; j++) { + ibz_mul(&tmpI, &X, &(*basis)[j][i]); + ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + } + // Update lower half of the Gram matrix + // = - 2X + X² = + // - X - X( - X·) + //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 + ibz_mul(&tmpI, &X, &(*G)[kappa][i]); + ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + for (int j = 0; j < 4; j++) { // works because i < κ + // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 + ibz_mul(&tmpI, &X, SYM((*G), i, j)); + ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + } + // After the loop: + //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, + /// b_i〉) = 〈b_κ - X·b_i, b_κ - X·b_i〉 + // + // Update u[kappa][j] + for (int j = 0; j < i; j++) { + dpe_mul(tmpF, Xf, u[i][j]); + dpe_sub(u[kappa][j], u[kappa][j], tmpF); + } + } + } + } + + // Check Lovasz' conditions + // lovasz[0] = ‖b_κ‖² + dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] + for (int i = 1; i < kappa; i++) { + dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); + dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + } + int swap; + for (swap = kappa; swap > 0; swap--) { + dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); + if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + break; + } + + // Insert b_κ before b_swap + if (kappa != swap) { + // Insert b_κ before b_swap in the basis and in the lower half Gram matrix + for (int j = kappa; j > swap; j--) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + if (i == j - 1) + ibz_swap(&(*G)[i][i], &(*G)[j][j]); + else if (i != j) + ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + } + } + // Copy row u[κ] and r[κ] in swap position, ignore what follows + for (int i = 0; i < swap; i++) { + dpe_set(u[swap][i], u[kappa][i]); + dpe_set(r[swap][i], r[kappa][i]); + } + dpe_set(r[swap][swap], lovasz[swap]); + // swap complete + kappa = swap; + } + + kappa += 1; + } + +#ifndef NDEBUG + // Check size-reducedness + for (int i = 0; i < 4; i++) + for (int j = 0; j < i; j++) { + dpe_abs(u[i][j], u[i][j]); + assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + } + // Check Lovasz' conditions + for (int i = 1; i < 4; i++) { + dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); + dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); + dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); + assert(dpe_cmp(tmpF, r[i][i]) <= 0); + } +#endif + + // Fill in the upper half of the Gram matrix + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + + // Clearinghouse + ibz_finalize(&X); + ibz_finalize(&tmpI); + dpe_clear(dpe_const_one); + dpe_clear(dpe_const_DELTABAR); + dpe_clear(Xf); + dpe_clear(tmpF); + dpe_clear(delta_bar); + for (int i = 0; i < 4; i++) { + dpe_clear(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_clear(r[i][j]); + dpe_clear(u[i][j]); + } + } +} + +int +quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_mat_4x4_t G; // Gram Matrix + ibz_mat_4x4_init(&G); + quat_lattice_gram(&G, lattice, alg); + ibz_mat_4x4_copy(red, &lattice->basis); + quat_lll_core(&G, red); + ibz_mat_4x4_finalize(&G); + return 0; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lat_ball.c new file mode 100644 index 0000000000..c7bbb9682f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lat_ball.c @@ -0,0 +1,139 @@ +#include +#include +#include +#include "internal.h" +#include "lll_internals.h" + +int +quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius) +{ + ibz_t denom, rem; + ibz_init(&denom); + ibz_init(&rem); + ibz_mat_4x4_t dualG; + ibz_mat_4x4_init(&dualG); + +// Compute the Gram matrix of the dual lattice +#ifndef NDEBUG + int inv_check = ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); + assert(inv_check); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); +#endif + // Initialize the dual lattice basis to the identity matrix + ibz_mat_4x4_identity(U); + // Reduce the dual lattice + quat_lll_core(&dualG, U); + + // Compute the parallelogram's bounds + int trivial = 1; + for (int i = 0; i < 4; i++) { + ibz_mul(&(*box)[i], &dualG[i][i], radius); + ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); + ibz_sqrt_floor(&(*box)[i], &(*box)[i]); + trivial &= ibz_is_zero(&(*box)[i]); + } + + // Compute the transpose transformation matrix +#ifndef NDEBUG + int inv = ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#endif + // U is unitary, det(U) = ± 1 + ibz_mat_4x4_scalar_mul(U, &denom, U); +#ifndef NDEBUG + assert(inv); + ibz_abs(&denom, &denom); + assert(ibz_is_one(&denom)); +#endif + + ibz_mat_4x4_finalize(&dualG); + ibz_finalize(&denom); + ibz_finalize(&rem); + return !trivial; +} + +int +quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius) +{ + assert(ibz_cmp(radius, &ibz_const_zero) > 0); + + ibz_vec_4_t box; + ibz_vec_4_init(&box); + ibz_mat_4x4_t U, G; + ibz_mat_4x4_init(&U); + ibz_mat_4x4_init(&G); + ibz_vec_4_t x; + ibz_vec_4_init(&x); + ibz_t rad, tmp; + ibz_init(&rad); + ibz_init(&tmp); + + // Compute the Gram matrix of the lattice + quat_lattice_gram(&G, lattice, alg); + + // Correct ball radius by the denominator + ibz_mul(&rad, radius, &lattice->denom); + ibz_mul(&rad, &rad, &lattice->denom); + // Correct by 2 (Gram matrix corresponds to twice the norm) + ibz_mul(&rad, &rad, &ibz_const_two); + + // Compute a bounding parallelogram for the ball, stop if it only + // contains the origin + int ok = quat_lattice_bound_parallelogram(&box, &U, &G, &rad); + if (!ok) + goto err; + + // Rejection sampling from the parallelogram +#ifndef NDEBUG + int cnt = 0; +#endif + do { + // Sample vector + for (int i = 0; i < 4; i++) { + if (ibz_is_zero(&box[i])) { + ibz_copy(&x[i], &ibz_const_zero); + } else { + ibz_add(&tmp, &box[i], &box[i]); + ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); + ibz_sub(&x[i], &x[i], &box[i]); + if (!ok) + goto err; + } + } + // Map to parallelogram + ibz_mat_4x4_eval_t(&x, &x, &U); + // Evaluate quadratic form + quat_qf_eval(&tmp, &G, &x); +#ifndef NDEBUG + cnt++; + if (cnt % 100 == 0) + printf("Lattice sampling rejected %d times", cnt - 1); +#endif + } while (ibz_is_zero(&tmp) || (ibz_cmp(&tmp, &rad) > 0)); + + // Evaluate linear combination + ibz_mat_4x4_eval(&(res->coord), &(lattice->basis), &x); + ibz_copy(&(res->denom), &(lattice->denom)); + quat_alg_normalize(res); + +#ifndef NDEBUG + // Check norm is smaller than radius + quat_alg_norm(&tmp, &rad, res, alg); + ibz_mul(&rad, &rad, radius); + assert(ibz_cmp(&tmp, &rad) <= 0); +#endif + +err: + ibz_finalize(&rad); + ibz_finalize(&tmp); + ibz_vec_4_finalize(&x); + ibz_mat_4x4_finalize(&U); + ibz_mat_4x4_finalize(&G); + ibz_vec_4_finalize(&box); + return ok; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lattice.c new file mode 100644 index 0000000000..c98bae9499 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lattice.c @@ -0,0 +1,328 @@ +#include +#include +#include "internal.h" + +// helper functions +int +quat_lattice_equal(const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + int equal = 1; + quat_lattice_t a, b; + quat_lattice_init(&a); + quat_lattice_init(&b); + quat_lattice_reduce_denom(&a, lat1); + quat_lattice_reduce_denom(&b, lat2); + ibz_abs(&(a.denom), &(a.denom)); + ibz_abs(&(b.denom), &(b.denom)); + quat_lattice_hnf(&a); + quat_lattice_hnf(&b); + equal = equal && (ibz_cmp(&(a.denom), &(b.denom)) == 0); + equal = equal && ibz_mat_4x4_equal(&(a.basis), &(b.basis)); + quat_lattice_finalize(&a); + quat_lattice_finalize(&b); + return (equal); +} + +// sublattice test +int +quat_lattice_inclusion(const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + int res; + quat_lattice_t sum; + quat_lattice_init(&sum); + quat_lattice_add(&sum, overlat, sublat); + res = quat_lattice_equal(&sum, overlat); + quat_lattice_finalize(&sum); + return (res); +} + +void +quat_lattice_reduce_denom(quat_lattice_t *reduced, const quat_lattice_t *lat) +{ + ibz_t gcd; + ibz_init(&gcd); + ibz_mat_4x4_gcd(&gcd, &(lat->basis)); + ibz_gcd(&gcd, &gcd, &(lat->denom)); + ibz_mat_4x4_scalar_div(&(reduced->basis), &gcd, &(lat->basis)); + ibz_div(&(reduced->denom), &gcd, &(lat->denom), &gcd); + ibz_abs(&(reduced->denom), &(reduced->denom)); + ibz_finalize(&gcd); +} + +void +quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat) +{ + ibz_mat_4x4_copy(&(conj->basis), &(lat->basis)); + ibz_copy(&(conj->denom), &(lat->denom)); + + for (int row = 1; row < 4; ++row) { + for (int col = 0; col < 4; ++col) { + ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + } + } +} + +// Method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_dual_without_hnf(quat_lattice_t *dual, const quat_lattice_t *lat) +{ + ibz_mat_4x4_t inv; + ibz_t det; + ibz_init(&det); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + ibz_mat_4x4_transpose(&inv, &inv); + // dual_denom = det/lat_denom + ibz_mat_4x4_scalar_mul(&(dual->basis), &(lat->denom), &inv); + ibz_copy(&(dual->denom), &det); + + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); +} + +void +quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + ibz_vec_4_t generators[8]; + ibz_mat_4x4_t tmp; + ibz_t det1, det2, detprod; + ibz_init(&det1); + ibz_init(&det2); + ibz_init(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_init(&(generators[i])); + ibz_mat_4x4_init(&tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); + assert(!ibz_is_zero(&det1)); + assert(!ibz_is_zero(&det2)); + ibz_gcd(&detprod, &det1, &det2); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 8, generators, &detprod); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_mat_4x4_finalize(&tmp); + ibz_finalize(&det1); + ibz_finalize(&det2); + ibz_finalize(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + quat_lattice_t dual1, dual2, dual_res; + quat_lattice_init(&dual1); + quat_lattice_init(&dual2); + quat_lattice_init(&dual_res); + quat_lattice_dual_without_hnf(&dual1, lat1); + + quat_lattice_dual_without_hnf(&dual2, lat2); + quat_lattice_add(&dual_res, &dual1, &dual2); + quat_lattice_dual_without_hnf(res, &dual_res); + quat_lattice_hnf(res); // could be removed if we do not expect HNF any more + quat_lattice_finalize(&dual1); + quat_lattice_finalize(&dual2); + quat_lattice_finalize(&dual_res); +} + +void +quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg) +{ + ibz_vec_4_t p, a; + ibz_vec_4_init(&p); + ibz_vec_4_init(&a); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + quat_alg_coord_mul(&p, &a, coord, alg); + ibz_copy(&((*prod)[0][i]), &(p[0])); + ibz_copy(&((*prod)[1][i]), &(p[1])); + ibz_copy(&((*prod)[2][i]), &(p[2])); + ibz_copy(&((*prod)[3][i]), &(p[3])); + } + ibz_vec_4_finalize(&p); + ibz_vec_4_finalize(&a); +} + +void +quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg) +{ + quat_lattice_mat_alg_coord_mul_without_hnf(&(prod->basis), &(lat->basis), &(elem->coord), alg); + ibz_mul(&(prod->denom), &(lat->denom), &(elem->denom)); + quat_lattice_hnf(prod); +} + +void +quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2, const quat_alg_t *alg) +{ + ibz_vec_4_t elem1, elem2, elem_res; + ibz_vec_4_t generators[16]; + ibz_mat_4x4_t detmat; + ibz_t det; + quat_lattice_t lat_res; + ibz_init(&det); + ibz_mat_4x4_init(&detmat); + quat_lattice_init(&lat_res); + ibz_vec_4_init(&elem1); + ibz_vec_4_init(&elem2); + ibz_vec_4_init(&elem_res); + for (int i = 0; i < 16; i++) + ibz_vec_4_init(&(generators[i])); + for (int k = 0; k < 4; k++) { + ibz_vec_4_copy_ibz( + &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz( + &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); + for (int j = 0; j < 4; j++) { + if (k == 0) + ibz_copy(&(detmat[i][j]), &(elem_res[j])); + ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + } + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &detmat); + ibz_abs(&det, &det); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 16, generators, &det); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_vec_4_finalize(&elem1); + ibz_vec_4_finalize(&elem2); + ibz_vec_4_finalize(&elem_res); + quat_lattice_finalize(&lat_res); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&(detmat)); + for (int i = 0; i < 16; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// lattice assumed of full rank +int +quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x) +{ + int divisible = 0; + ibz_vec_4_t work_coord; + ibz_mat_4x4_t inv; + ibz_t det, prod; + ibz_init(&prod); + ibz_init(&det); + ibz_vec_4_init(&work_coord); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + assert(!ibz_is_zero(&det)); + ibz_mat_4x4_eval(&work_coord, &inv, &(x->coord)); + ibz_vec_4_scalar_mul(&(work_coord), &(lat->denom), &work_coord); + ibz_mul(&prod, &(x->denom), &det); + divisible = ibz_vec_4_scalar_div(&work_coord, &prod, &work_coord); + // copy result + if (divisible && (coord != NULL)) { + for (int i = 0; i < 4; i++) { + ibz_copy(&((*coord)[i]), &(work_coord[i])); + } + } + ibz_finalize(&prod); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); + ibz_vec_4_finalize(&work_coord); + return (divisible); +} + +void +quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + ibz_t tmp, det; + ibz_init(&tmp); + ibz_init(&det); + + // det = det(sublat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &sublat->basis); + // tmp = (overlat->denom)⁴ + ibz_mul(&tmp, &overlat->denom, &overlat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // index = (overlat->denom)⁴ · det(sublat->basis) + ibz_mul(index, &det, &tmp); + // tmp = (sublat->denom)⁴ + ibz_mul(&tmp, &sublat->denom, &sublat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // det = det(overlat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &overlat->basis); + // tmp = (sublat->denom)⁴ · det(overlat->basis) + ibz_mul(&tmp, &tmp, &det); + // index = index / tmp + ibz_div(index, &tmp, index, &tmp); + assert(ibz_is_zero(&tmp)); + // index = |index| + ibz_abs(index, index); + + ibz_finalize(&tmp); + ibz_finalize(&det); +} + +void +quat_lattice_hnf(quat_lattice_t *lat) +{ + ibz_t mod; + ibz_vec_4_t generators[4]; + ibz_init(&mod); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &mod, &(lat->basis)); + ibz_abs(&mod, &mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_init(&(generators[i])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + } + } + ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); + quat_lattice_reduce_denom(lat, lat); + ibz_finalize(&mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +void +quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_t tmp; + ibz_init(&tmp); + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_set(&(*G)[i][j], 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + if (k >= 2) + ibz_mul(&tmp, &tmp, &alg->p); + ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + } + ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + } + } + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + } + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_applications.c new file mode 100644 index 0000000000..6c763b8c04 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_applications.c @@ -0,0 +1,127 @@ +#include +#include +#include "lll_internals.h" + +void +quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t gram_corrector; + ibz_init(&gram_corrector); + ibz_mul(&gram_corrector, &(lideal->lattice.denom), &(lideal->lattice.denom)); + quat_lideal_class_gram(gram, lideal, alg); + ibz_mat_4x4_copy(reduced, &(lideal->lattice.basis)); + quat_lll_core(gram, reduced); + ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); + for (int i = 0; i < 4; i++) { + ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + for (int j = i + 1; j < 4; j++) { + ibz_set(&((*gram)[i][j]), 0); + } + } + ibz_finalize(&gram_corrector); +} + +void +quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + ibz_mat_4x4_t red; + ibz_mat_4x4_init(&red); + + quat_lattice_mul(&(prod->lattice), &(lideal1->lattice), &(lideal2->lattice), alg); + prod->parent_order = lideal1->parent_order; + quat_lideal_norm(prod); + quat_lideal_reduce_basis(&red, gram, prod, alg); + ibz_mat_4x4_copy(&(prod->lattice.basis), &red); + + ibz_mat_4x4_finalize(&red); +} + +int +quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff) +{ + ibz_mat_4x4_t gram, red; + ibz_mat_4x4_init(&gram); + ibz_mat_4x4_init(&red); + + int found = 0; + + // computing the reduced basis + quat_lideal_reduce_basis(&red, &gram, lideal, alg); + + quat_alg_elem_t new_alpha; + quat_alg_elem_init(&new_alpha); + ibz_t tmp, remainder, adjusted_norm; + ibz_init(&tmp); + ibz_init(&remainder); + ibz_init(&adjusted_norm); + + ibz_mul(&adjusted_norm, &lideal->lattice.denom, &lideal->lattice.denom); + + int ctr = 0; + + // equiv_num_iter = (2 * equiv_bound_coeff + 1)^4 + assert(equiv_bound_coeff < (1 << 20)); + int equiv_num_iter = (2 * equiv_bound_coeff + 1); + equiv_num_iter = equiv_num_iter * equiv_num_iter; + equiv_num_iter = equiv_num_iter * equiv_num_iter; + + while (!found && ctr < equiv_num_iter) { + ctr++; + // we select our linear combination at random + ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + + // computation of the norm of the vector sampled + quat_qf_eval(&tmp, &gram, &new_alpha.coord); + + // compute the norm of the equivalent ideal + // can be improved by removing the power of two first and the odd part only if the trial + // division failed (this should always be called on an ideal of norm 2^x * N for some + // big prime N ) + ibz_div(&tmp, &remainder, &tmp, &adjusted_norm); + + // debug : check that the remainder is zero + assert(ibz_is_zero(&remainder)); + + // pseudo-primality test + if (ibz_probab_prime(&tmp, primality_num_iter)) { + + // computes the generator using a matrix multiplication + ibz_mat_4x4_eval(&new_alpha.coord, &red, &new_alpha.coord); + ibz_copy(&new_alpha.denom, &lideal->lattice.denom); + assert(quat_lattice_contains(NULL, &lideal->lattice, &new_alpha)); + + quat_alg_conj(&new_alpha, &new_alpha); + ibz_mul(&new_alpha.denom, &new_alpha.denom, &lideal->norm); + quat_lideal_mul(lideal, lideal, &new_alpha, alg); + assert(ibz_probab_prime(&lideal->norm, primality_num_iter)); + + found = 1; + break; + } + } + assert(found); + + ibz_finalize(&tmp); + ibz_finalize(&remainder); + ibz_finalize(&adjusted_norm); + quat_alg_elem_finalize(&new_alpha); + + ibz_mat_4x4_finalize(&gram); + ibz_mat_4x4_finalize(&red); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_internals.h new file mode 100644 index 0000000000..e8d90141ac --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_internals.h @@ -0,0 +1,238 @@ +#ifndef LLL_INTERNALS_H +#define LLL_INTERNALS_H + +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations of functions only used for the LLL tets + */ + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup lll_internal Functions only used for LLL or its tests + * @{ + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_params Parameters used by the L2 implementation (floats) and its tests (ints) + * @{ + */ + +#define DELTABAR 0.995 +#define DELTA_NUM 99 +#define DELTA_DENOM 100 + +#define ETABAR 0.505 +#define EPSILON_NUM 1 +#define EPSILON_DENOM 100 + +#define PREC 64 +/** + * @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup ibq_t Types for rationals + * @{ + */ + +/** @brief Type for fractions of integers + * + * @typedef ibq_t + * + * For fractions of integers of arbitrary size, used by intbig module, using gmp + */ +typedef ibz_t ibq_t[2]; +typedef ibq_t ibq_vec_4_t[4]; +typedef ibq_t ibq_mat_4x4_t[4][4]; + +/**@} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_ibq_c Constructors and Destructors and Printers + * @{ + */ + +void ibq_init(ibq_t *x); +void ibq_finalize(ibq_t *x); + +void ibq_mat_4x4_init(ibq_mat_4x4_t *mat); +void ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat); + +void ibq_vec_4_init(ibq_vec_4_t *vec); +void ibq_vec_4_finalize(ibq_vec_4_t *vec); + +void ibq_mat_4x4_print(const ibq_mat_4x4_t *mat); +void ibq_vec_4_print(const ibq_vec_4_t *vec); + +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_qa Basic fraction arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b); + +/** @brief diff=a-b + */ +void ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b); + +/** @brief neg=-x + */ +void ibq_neg(ibq_t *neg, const ibq_t *x); + +/** @brief abs=|x| + */ +void ibq_abs(ibq_t *abs, const ibq_t *x); + +/** @brief prod=a*b + */ +void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b); + +/** @brief inv=1/x + * + * @returns 0 if x is 0, 1 if inverse exists and was computed + */ +int ibq_inv(ibq_t *inv, const ibq_t *x); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibq_cmp(const ibq_t *a, const ibq_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibq_is_zero(const ibq_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibq_is_one(const ibq_t *x); + +/** @brief Set q to a/b if b not 0 + * + * @returns 1 if b not 0 and q is set, 0 otherwise + */ +int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b); + +/** @brief Copy value into target + */ +void ibq_copy(ibq_t *target, const ibq_t *value); + +/** @brief Checks if q is an integer + * + * @returns 1 if yes, 0 if not + */ +int ibq_is_ibz(const ibq_t *q); + +/** + * @brief Converts a fraction q to an integer y, if q is an integer. + * + * @returns 1 if z is an integer, 0 if not + */ +int ibq_to_ibz(ibz_t *z, const ibq_t *q); +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup quat_lll_verify_helpers Helper functions for lll verification in dimension 4 + * @{ + */ + +/** @brief Set ibq to parameters delta and eta = 1/2 + epsilon using L2 constants + */ +void quat_lll_set_ibq_parameters(ibq_t *delta, ibq_t *eta); + +/** @brief Set an ibq vector to 4 given integer coefficients + */ +void ibq_vec_4_copy_ibz(ibq_vec_4_t *vec, + const ibz_t *coeff0, + const ibz_t *coeff1, + const ibz_t *coeff2, + const ibz_t *coeff3); // dim4, test/dim4 + +/** @brief Bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 for ibz_q + */ +void quat_lll_bilinear(ibq_t *b, const ibq_vec_4_t *vec0, const ibq_vec_4_t *vec1, + const ibz_t *q); // dim4, test/dim4 + +/** @brief Outputs the transposition of the orthogonalised matrix of mat (as fractions) + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +void quat_lll_gram_schmidt_transposed_with_ibq(ibq_mat_4x4_t *orthogonalised_transposed, + const ibz_mat_4x4_t *mat, + const ibz_t *q); // dim4 + +/** @brief Verifies if mat is lll-reduced for parameter coeff and norm defined by q + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +int quat_lll_verify(const ibz_mat_4x4_t *mat, + const ibq_t *delta, + const ibq_t *eta, + const quat_alg_t *alg); // test/lattice, test/dim4 + /** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_internal_gram Internal LLL function + * @{ + */ + +/** @brief In-place L2 reduction core function + * + * Given a lattice basis represented by the columns of a 4x4 matrix + * and the Gram matrix of its bilinear form, L2-reduces the basis + * in-place and updates the Gram matrix accordingly. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param G In/Output: Gram matrix of the lattice basis + * @param basis In/Output: lattice basis + */ +void quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis); + +/** + * @brief LLL reduction on 4-dimensional lattice + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param red Output: LLL reduced basis + * @param lattice In/Output: lattice with 4-dimensional basis + * @param alg The quaternion algebra + */ +int quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @} + */ + +// end of lll_internal +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lvlx.cmake b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lvlx.cmake new file mode 100644 index 0000000000..9b8c0f9287 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lvlx.cmake @@ -0,0 +1,12 @@ +set(SOURCE_FILES_ID2ISO_GENERIC_REF + ${LVLX_DIR}/id2iso.c + ${LVLX_DIR}/dim2id2iso.c +) + +add_library(${LIB_ID2ISO_${SVARIANT_UPPER}} STATIC ${SOURCE_FILES_ID2ISO_GENERIC_REF}) +target_link_libraries(${LIB_ID2ISO_${SVARIANT_UPPER}} ${LIB_QUATERNION} ${LIB_PRECOMP_${SVARIANT_UPPER}} ${LIB_MP} ${LIB_GF_${SVARIANT_UPPER}} ${LIB_EC_${SVARIANT_UPPER}} ${LIB_HD_${SVARIANT_UPPER}}) +target_include_directories(${LIB_ID2ISO_${SVARIANT_UPPER}} PRIVATE ${INC_PUBLIC} ${INC_PRECOMP_${SVARIANT_UPPER}} ${INC_QUATERNION} ${INC_MP} ${INC_GF} ${INC_GF_${SVARIANT_UPPER}} ${INC_EC} ${INC_HD} ${INC_ID2ISO} ${INC_COMMON}) +target_compile_options(${LIB_ID2ISO_${SVARIANT_UPPER}} PRIVATE ${C_OPT_FLAGS}) +target_compile_definitions(${LIB_ID2ISO_${SVARIANT_UPPER}} PUBLIC SQISIGN_VARIANT=${SVARIANT_LOWER}) + +add_subdirectory(test) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.c new file mode 100644 index 0000000000..4956beda50 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include + +void +sqisign_secure_free(void *mem, size_t size) +{ + if (mem) { + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); + free(mem); + } +} +void +sqisign_secure_clear(void *mem, size_t size) +{ + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.h new file mode 100644 index 0000000000..ab8f6c6481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef MEM_H +#define MEM_H +#include +#include + +/** + * Clears and frees allocated memory. + * + * @param[out] mem Memory to be cleared and freed. + * @param size Size of memory to be cleared and freed. + */ +void sqisign_secure_free(void *mem, size_t size); + +/** + * Clears memory. + * + * @param[out] mem Memory to be cleared. + * @param size Size of memory to be cleared. + */ +void sqisign_secure_clear(void *mem, size_t size); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c new file mode 100644 index 0000000000..396d505aec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c @@ -0,0 +1,73 @@ +#include +#include +#if defined(MINI_GMP) +#include "mini-gmp.h" +#else +// This configuration is used only for testing +#include +#endif +#include + +// Exported for testing +int +mini_mpz_legendre(const mpz_t a, const mpz_t p) +{ + int res = 0; + mpz_t e; + mpz_init_set(e, p); + mpz_sub_ui(e, e, 1); + mpz_fdiv_q_2exp(e, e, 1); + mpz_powm(e, a, e, p); + + if (mpz_cmp_ui(e, 1) <= 0) { + res = mpz_get_si(e); + } else { + res = -1; + } + mpz_clear(e); + return res; +} + +#if defined(MINI_GMP) +int +mpz_legendre(const mpz_t a, const mpz_t p) +{ + return mini_mpz_legendre(a, p); +} +#endif + +// Exported for testing +double +mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + double ret; + int tmp_exp; + mpz_t tmp; + + // Handle the case where op is 0 + if (mpz_cmp_ui(op, 0) == 0) { + *exp = 0; + return 0.0; + } + + *exp = mpz_sizeinbase(op, 2); + + mpz_init_set(tmp, op); + + if (*exp > DBL_MAX_EXP) { + mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); + } + + ret = frexp(mpz_get_d(tmp), &tmp_exp); + mpz_clear(tmp); + + return ret; +} + +#if defined(MINI_GMP) +double +mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + return mini_mpz_get_d_2exp(exp, op); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.h new file mode 100644 index 0000000000..0113cfdfe6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.h @@ -0,0 +1,19 @@ +#ifndef MINI_GMP_EXTRA_H +#define MINI_GMP_EXTRA_H + +#if defined MINI_GMP +#include "mini-gmp.h" + +typedef long mp_exp_t; + +int mpz_legendre(const mpz_t a, const mpz_t p); +double mpz_get_d_2exp(signed long int *exp, const mpz_t op); +#else +// This configuration is used only for testing +#include +#endif + +int mini_mpz_legendre(const mpz_t a, const mpz_t p); +double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.c new file mode 100644 index 0000000000..3830ab2031 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.c @@ -0,0 +1,4671 @@ +/* Note: The code from mini-gmp is modifed from the original by + commenting out the definition of GMP_LIMB_BITS */ + +/* + mini-gmp, a minimalistic implementation of a GNU GMP subset. + + Contributed to the GNU project by Niels Möller + Additional functionalities and improvements by Marco Bodrato. + +Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* NOTE: All functions in this file which are not declared in + mini-gmp.h are internal, and are not intended to be compatible + with GMP or with future versions of mini-gmp. */ + +/* Much of the material copied from GMP files, including: gmp-impl.h, + longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, + mpn/generic/lshift.c, mpn/generic/mul_1.c, + mpn/generic/mul_basecase.c, mpn/generic/rshift.c, + mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, + mpn/generic/submul_1.c. */ + +#include +#include +#include +#include +#include +#include + +#include "mini-gmp.h" + +#if !defined(MINI_GMP_DONT_USE_FLOAT_H) +#include +#endif + + +/* Macros */ +/* Removed from here as it is passed as a compiler command-line definition */ +/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ + +#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) +#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) + +#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) +#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) + +#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) +#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) + +#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) +#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) + +#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) + +#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 +#define GMP_DBL_MANT_BITS DBL_MANT_DIG +#else +#define GMP_DBL_MANT_BITS (53) +#endif + +/* Return non-zero if xp,xsize and yp,ysize overlap. + If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no + overlap. If both these are false, there's an overlap. */ +#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ + ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) + +#define gmp_assert_nocarry(x) do { \ + mp_limb_t __cy = (x); \ + assert (__cy == 0); \ + (void) (__cy); \ + } while (0) + +#define gmp_clz(count, x) do { \ + mp_limb_t __clz_x = (x); \ + unsigned __clz_c = 0; \ + int LOCAL_SHIFT_BITS = 8; \ + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ + for (; \ + (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ + __clz_c += 8) \ + { __clz_x <<= LOCAL_SHIFT_BITS; } \ + for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ + __clz_x <<= 1; \ + (count) = __clz_c; \ + } while (0) + +#define gmp_ctz(count, x) do { \ + mp_limb_t __ctz_x = (x); \ + unsigned __ctz_c = 0; \ + gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ + (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ + } while (0) + +#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) + (bl); \ + (sh) = (ah) + (bh) + (__x < (al)); \ + (sl) = __x; \ + } while (0) + +#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) - (bl); \ + (sh) = (ah) - (bh) - ((al) < (bl)); \ + (sl) = __x; \ + } while (0) + +#define gmp_umul_ppmm(w1, w0, u, v) \ + do { \ + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ + if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned int __ww = (unsigned int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned long int __ww = (unsigned long int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else { \ + mp_limb_t __x0, __x1, __x2, __x3; \ + unsigned __ul, __vl, __uh, __vh; \ + mp_limb_t __u = (u), __v = (v); \ + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ + \ + __ul = __u & GMP_LLIMB_MASK; \ + __uh = __u >> (GMP_LIMB_BITS / 2); \ + __vl = __v & GMP_LLIMB_MASK; \ + __vh = __v >> (GMP_LIMB_BITS / 2); \ + \ + __x0 = (mp_limb_t) __ul * __vl; \ + __x1 = (mp_limb_t) __ul * __vh; \ + __x2 = (mp_limb_t) __uh * __vl; \ + __x3 = (mp_limb_t) __uh * __vh; \ + \ + __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ + (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ + } \ + } while (0) + +/* If mp_limb_t is of size smaller than int, plain u*v implies + automatic promotion to *signed* int, and then multiply may overflow + and cause undefined behavior. Explicitly cast to unsigned int for + that case. */ +#define gmp_umullo_limb(u, v) \ + ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) + +#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ + do { \ + mp_limb_t _qh, _ql, _r, _mask; \ + gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ + gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ + _r = (nl) - gmp_umullo_limb (_qh, (d)); \ + _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ + _qh += _mask; \ + _r += _mask & (d); \ + if (_r >= (d)) \ + { \ + _r -= (d); \ + _qh++; \ + } \ + \ + (r) = _r; \ + (q) = _qh; \ + } while (0) + +#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ + do { \ + mp_limb_t _q0, _t1, _t0, _mask; \ + gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ + gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ + \ + /* Compute the two most significant limbs of n - q'd */ \ + (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ + gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ + (q)++; \ + \ + /* Conditionally adjust q and the remainders */ \ + _mask = - (mp_limb_t) ((r1) >= _q0); \ + (q) += _mask; \ + gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ + if ((r1) >= (d1)) \ + { \ + if ((r1) > (d1) || (r0) >= (d0)) \ + { \ + (q)++; \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ + } \ + } \ + } while (0) + +/* Swap macros. */ +#define MP_LIMB_T_SWAP(x, y) \ + do { \ + mp_limb_t __mp_limb_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_limb_t_swap__tmp; \ + } while (0) +#define MP_SIZE_T_SWAP(x, y) \ + do { \ + mp_size_t __mp_size_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_size_t_swap__tmp; \ + } while (0) +#define MP_BITCNT_T_SWAP(x,y) \ + do { \ + mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_bitcnt_t_swap__tmp; \ + } while (0) +#define MP_PTR_SWAP(x, y) \ + do { \ + mp_ptr __mp_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_ptr_swap__tmp; \ + } while (0) +#define MP_SRCPTR_SWAP(x, y) \ + do { \ + mp_srcptr __mp_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_srcptr_swap__tmp; \ + } while (0) + +#define MPN_PTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_PTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) +#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_SRCPTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) + +#define MPZ_PTR_SWAP(x, y) \ + do { \ + mpz_ptr __mpz_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_ptr_swap__tmp; \ + } while (0) +#define MPZ_SRCPTR_SWAP(x, y) \ + do { \ + mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_srcptr_swap__tmp; \ + } while (0) + +const int mp_bits_per_limb = GMP_LIMB_BITS; + + +/* Memory allocation and other helper functions. */ +static void +gmp_die (const char *msg) +{ + fprintf (stderr, "%s\n", msg); + abort(); +} + +static void * +gmp_default_alloc (size_t size) +{ + void *p; + + assert (size > 0); + + p = malloc (size); + if (!p) + gmp_die("gmp_default_alloc: Virtual memory exhausted."); + + return p; +} + +static void * +gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) +{ + void * p; + + p = realloc (old, new_size); + + if (!p) + gmp_die("gmp_default_realloc: Virtual memory exhausted."); + + return p; +} + +static void +gmp_default_free (void *p, size_t unused_size) +{ + free (p); +} + +static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; +static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; +static void (*gmp_free_func) (void *, size_t) = gmp_default_free; + +void +mp_get_memory_functions (void *(**alloc_func) (size_t), + void *(**realloc_func) (void *, size_t, size_t), + void (**free_func) (void *, size_t)) +{ + if (alloc_func) + *alloc_func = gmp_allocate_func; + + if (realloc_func) + *realloc_func = gmp_reallocate_func; + + if (free_func) + *free_func = gmp_free_func; +} + +void +mp_set_memory_functions (void *(*alloc_func) (size_t), + void *(*realloc_func) (void *, size_t, size_t), + void (*free_func) (void *, size_t)) +{ + if (!alloc_func) + alloc_func = gmp_default_alloc; + if (!realloc_func) + realloc_func = gmp_default_realloc; + if (!free_func) + free_func = gmp_default_free; + + gmp_allocate_func = alloc_func; + gmp_reallocate_func = realloc_func; + gmp_free_func = free_func; +} + +#define gmp_alloc(size) ((*gmp_allocate_func)((size))) +#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) +#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) + +static mp_ptr +gmp_alloc_limbs (mp_size_t size) +{ + return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); +} + +static mp_ptr +gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) +{ + assert (size > 0); + return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); +} + +static void +gmp_free_limbs (mp_ptr old, mp_size_t size) +{ + gmp_free (old, size * sizeof (mp_limb_t)); +} + + +/* MPN interface */ + +void +mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + mp_size_t i; + for (i = 0; i < n; i++) + d[i] = s[i]; +} + +void +mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + while (--n >= 0) + d[n] = s[n]; +} + +int +mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + while (--n >= 0) + { + if (ap[n] != bp[n]) + return ap[n] > bp[n] ? 1 : -1; + } + return 0; +} + +static int +mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + if (an != bn) + return an < bn ? -1 : 1; + else + return mpn_cmp (ap, bp, an); +} + +static mp_size_t +mpn_normalized_size (mp_srcptr xp, mp_size_t n) +{ + while (n > 0 && xp[n-1] == 0) + --n; + return n; +} + +int +mpn_zero_p(mp_srcptr rp, mp_size_t n) +{ + return mpn_normalized_size (rp, n) == 0; +} + +void +mpn_zero (mp_ptr rp, mp_size_t n) +{ + while (--n >= 0) + rp[n] = 0; +} + +mp_limb_t +mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + i = 0; + do + { + mp_limb_t r = ap[i] + b; + /* Carry out */ + b = (r < b); + rp[i] = r; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b, r; + a = ap[i]; b = bp[i]; + r = a + cy; + cy = (r < cy); + r += b; + cy += (r < b); + rp[i] = r; + } + return cy; +} + +mp_limb_t +mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_add_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + + i = 0; + do + { + mp_limb_t a = ap[i]; + /* Carry out */ + mp_limb_t cy = a < b; + rp[i] = a - b; + b = cy; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b; + a = ap[i]; b = bp[i]; + b += cy; + cy = (b < cy); + cy += (a < b); + rp[i] = a - b; + } + return cy; +} + +mp_limb_t +mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_sub_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl + lpl; + cl += lpl < rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl - lpl; + cl += lpl > rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn >= 1); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); + + /* We first multiply by the low order limb. This result can be + stored, not added, to rp. We also avoid a loop for zeroing this + way. */ + + rp[un] = mpn_mul_1 (rp, up, un, vp[0]); + + /* Now accumulate the product of up[] and the next higher limb from + vp[]. */ + + while (--vn >= 1) + { + rp += 1, vp += 1; + rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); + } + return rp[un]; +} + +void +mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mpn_mul (rp, ap, n, bp, n); +} + +void +mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) +{ + mpn_mul (rp, ap, n, ap, n); +} + +mp_limb_t +mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + up += n; + rp += n; + + tnc = GMP_LIMB_BITS - cnt; + low_limb = *--up; + retval = low_limb >> tnc; + high_limb = (low_limb << cnt); + + while (--n != 0) + { + low_limb = *--up; + *--rp = high_limb | (low_limb >> tnc); + high_limb = (low_limb << cnt); + } + *--rp = high_limb; + + return retval; +} + +mp_limb_t +mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + tnc = GMP_LIMB_BITS - cnt; + high_limb = *up++; + retval = (high_limb << tnc); + low_limb = high_limb >> cnt; + + while (--n != 0) + { + high_limb = *up++; + *rp++ = low_limb | (high_limb << tnc); + low_limb = high_limb >> cnt; + } + *rp = low_limb; + + return retval; +} + +static mp_bitcnt_t +mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, + mp_limb_t ux) +{ + unsigned cnt; + + assert (ux == 0 || ux == GMP_LIMB_MAX); + assert (0 <= i && i <= un ); + + while (limb == 0) + { + i++; + if (i == un) + return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); + limb = ux ^ up[i]; + } + gmp_ctz (cnt, limb); + return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; +} + +mp_bitcnt_t +mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, 0); +} + +mp_bitcnt_t +mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, GMP_LIMB_MAX); +} + +void +mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (--n >= 0) + *rp++ = ~ *up++; +} + +mp_limb_t +mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (*up == 0) + { + *rp = 0; + if (!--n) + return 0; + ++up; ++rp; + } + *rp = - *up; + mpn_com (++rp, ++up, --n); + return 1; +} + + +/* MPN division interface. */ + +/* The 3/2 inverse is defined as + + m = floor( (B^3-1) / (B u1 + u0)) - B +*/ +mp_limb_t +mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) +{ + mp_limb_t r, m; + + { + mp_limb_t p, ql; + unsigned ul, uh, qh; + + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); + /* For notation, let b denote the half-limb base, so that B = b^2. + Split u1 = b uh + ul. */ + ul = u1 & GMP_LLIMB_MASK; + uh = u1 >> (GMP_LIMB_BITS / 2); + + /* Approximation of the high half of quotient. Differs from the 2/1 + inverse of the half limb uh, since we have already subtracted + u0. */ + qh = (u1 ^ GMP_LIMB_MAX) / uh; + + /* Adjust to get a half-limb 3/2 inverse, i.e., we want + + qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u + = floor( (b (~u) + b-1) / u), + + and the remainder + + r = b (~u) + b-1 - qh (b uh + ul) + = b (~u - qh uh) + b-1 - qh ul + + Subtraction of qh ul may underflow, which implies adjustments. + But by normalization, 2 u >= B > qh ul, so we need to adjust by + at most 2. + */ + + r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; + + p = (mp_limb_t) qh * ul; + /* Adjustment steps taken from udiv_qrnnd_c */ + if (r < p) + { + qh--; + r += u1; + if (r >= u1) /* i.e. we didn't get carry when adding to r */ + if (r < p) + { + qh--; + r += u1; + } + } + r -= p; + + /* Low half of the quotient is + + ql = floor ( (b r + b-1) / u1). + + This is a 3/2 division (on half-limbs), for which qh is a + suitable inverse. */ + + p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; + /* Unlike full-limb 3/2, we can add 1 without overflow. For this to + work, it is essential that ql is a full mp_limb_t. */ + ql = (p >> (GMP_LIMB_BITS / 2)) + 1; + + /* By the 3/2 trick, we don't need the high half limb. */ + r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; + + if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) + { + ql--; + r += u1; + } + m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; + if (r >= u1) + { + m++; + r -= u1; + } + } + + /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a + 3/2 inverse. */ + if (u0 > 0) + { + mp_limb_t th, tl; + r = ~r; + r += u0; + if (r < u0) + { + m--; + if (r >= u1) + { + m--; + r -= u1; + } + r -= u1; + } + gmp_umul_ppmm (th, tl, u0, m); + r += th; + if (r < th) + { + m--; + m -= ((r > u1) | ((r == u1) & (tl > u0))); + } + } + + return m; +} + +struct gmp_div_inverse +{ + /* Normalization shift count. */ + unsigned shift; + /* Normalized divisor (d0 unused for mpn_div_qr_1) */ + mp_limb_t d1, d0; + /* Inverse, for 2/1 or 3/2. */ + mp_limb_t di; +}; + +static void +mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) +{ + unsigned shift; + + assert (d > 0); + gmp_clz (shift, d); + inv->shift = shift; + inv->d1 = d << shift; + inv->di = mpn_invert_limb (inv->d1); +} + +static void +mpn_div_qr_2_invert (struct gmp_div_inverse *inv, + mp_limb_t d1, mp_limb_t d0) +{ + unsigned shift; + + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 <<= shift; + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); +} + +static void +mpn_div_qr_invert (struct gmp_div_inverse *inv, + mp_srcptr dp, mp_size_t dn) +{ + assert (dn > 0); + + if (dn == 1) + mpn_div_qr_1_invert (inv, dp[0]); + else if (dn == 2) + mpn_div_qr_2_invert (inv, dp[1], dp[0]); + else + { + unsigned shift; + mp_limb_t d1, d0; + + d1 = dp[dn-1]; + d0 = dp[dn-2]; + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); + } +} + +/* Not matching current public gmp interface, rather corresponding to + the sbpi1_div_* functions. */ +static mp_limb_t +mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + mp_limb_t d, di; + mp_limb_t r; + mp_ptr tp = NULL; + mp_size_t tn = 0; + + if (inv->shift > 0) + { + /* Shift, reusing qp area if possible. In-place shift if qp == np. */ + tp = qp; + if (!tp) + { + tn = nn; + tp = gmp_alloc_limbs (tn); + } + r = mpn_lshift (tp, np, nn, inv->shift); + np = tp; + } + else + r = 0; + + d = inv->d1; + di = inv->di; + while (--nn >= 0) + { + mp_limb_t q; + + gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); + if (qp) + qp[nn] = q; + } + if (tn) + gmp_free_limbs (tp, tn); + + return r >> inv->shift; +} + +static void +mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + unsigned shift; + mp_size_t i; + mp_limb_t d1, d0, di, r1, r0; + + assert (nn >= 2); + shift = inv->shift; + d1 = inv->d1; + d0 = inv->d0; + di = inv->di; + + if (shift > 0) + r1 = mpn_lshift (np, np, nn, shift); + else + r1 = 0; + + r0 = np[nn - 1]; + + i = nn - 2; + do + { + mp_limb_t n0, q; + n0 = np[i]; + gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + if (shift > 0) + { + assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); + r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); + r1 >>= shift; + } + + np[1] = r1; + np[0] = r0; +} + +static void +mpn_div_qr_pi1 (mp_ptr qp, + mp_ptr np, mp_size_t nn, mp_limb_t n1, + mp_srcptr dp, mp_size_t dn, + mp_limb_t dinv) +{ + mp_size_t i; + + mp_limb_t d1, d0; + mp_limb_t cy, cy1; + mp_limb_t q; + + assert (dn > 2); + assert (nn >= dn); + + d1 = dp[dn - 1]; + d0 = dp[dn - 2]; + + assert ((d1 & GMP_LIMB_HIGHBIT) != 0); + /* Iteration variable is the index of the q limb. + * + * We divide + * by + */ + + i = nn - dn; + do + { + mp_limb_t n0 = np[dn-1+i]; + + if (n1 == d1 && n0 == d0) + { + q = GMP_LIMB_MAX; + mpn_submul_1 (np+i, dp, dn, q); + n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ + } + else + { + gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); + + cy = mpn_submul_1 (np + i, dp, dn-2, q); + + cy1 = n0 < cy; + n0 = n0 - cy; + cy = n1 < cy1; + n1 = n1 - cy1; + np[dn-2+i] = n0; + + if (cy != 0) + { + n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); + q--; + } + } + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + np[dn - 1] = n1; +} + +static void +mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + mp_srcptr dp, mp_size_t dn, + const struct gmp_div_inverse *inv) +{ + assert (dn > 0); + assert (nn >= dn); + + if (dn == 1) + np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); + else if (dn == 2) + mpn_div_qr_2_preinv (qp, np, nn, inv); + else + { + mp_limb_t nh; + unsigned shift; + + assert (inv->d1 == dp[dn-1]); + assert (inv->d0 == dp[dn-2]); + assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); + + shift = inv->shift; + if (shift > 0) + nh = mpn_lshift (np, np, nn, shift); + else + nh = 0; + + mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); + + if (shift > 0) + gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); + } +} + +static void +mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) +{ + struct gmp_div_inverse inv; + mp_ptr tp = NULL; + + assert (dn > 0); + assert (nn >= dn); + + mpn_div_qr_invert (&inv, dp, dn); + if (dn > 2 && inv.shift > 0) + { + tp = gmp_alloc_limbs (dn); + gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); + dp = tp; + } + mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); + if (tp) + gmp_free_limbs (tp, dn); +} + + +/* MPN base conversion. */ +static unsigned +mpn_base_power_of_two_p (unsigned b) +{ + switch (b) + { + case 2: return 1; + case 4: return 2; + case 8: return 3; + case 16: return 4; + case 32: return 5; + case 64: return 6; + case 128: return 7; + case 256: return 8; + default: return 0; + } +} + +struct mpn_base_info +{ + /* bb is the largest power of the base which fits in one limb, and + exp is the corresponding exponent. */ + unsigned exp; + mp_limb_t bb; +}; + +static void +mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) +{ + mp_limb_t m; + mp_limb_t p; + unsigned exp; + + m = GMP_LIMB_MAX / b; + for (exp = 1, p = b; p <= m; exp++) + p *= b; + + info->exp = exp; + info->bb = p; +} + +static mp_bitcnt_t +mpn_limb_size_in_base_2 (mp_limb_t u) +{ + unsigned shift; + + assert (u > 0); + gmp_clz (shift, u); + return GMP_LIMB_BITS - shift; +} + +static size_t +mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) +{ + unsigned char mask; + size_t sn, j; + mp_size_t i; + unsigned shift; + + sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) + + bits - 1) / bits; + + mask = (1U << bits) - 1; + + for (i = 0, j = sn, shift = 0; j-- > 0;) + { + unsigned char digit = up[i] >> shift; + + shift += bits; + + if (shift >= GMP_LIMB_BITS && ++i < un) + { + shift -= GMP_LIMB_BITS; + digit |= up[i] << (bits - shift); + } + sp[j] = digit & mask; + } + return sn; +} + +/* We generate digits from the least significant end, and reverse at + the end. */ +static size_t +mpn_limb_get_str (unsigned char *sp, mp_limb_t w, + const struct gmp_div_inverse *binv) +{ + mp_size_t i; + for (i = 0; w > 0; i++) + { + mp_limb_t h, l, r; + + h = w >> (GMP_LIMB_BITS - binv->shift); + l = w << binv->shift; + + gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); + assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); + r >>= binv->shift; + + sp[i] = r; + } + return i; +} + +static size_t +mpn_get_str_other (unsigned char *sp, + int base, const struct mpn_base_info *info, + mp_ptr up, mp_size_t un) +{ + struct gmp_div_inverse binv; + size_t sn; + size_t i; + + mpn_div_qr_1_invert (&binv, base); + + sn = 0; + + if (un > 1) + { + struct gmp_div_inverse bbinv; + mpn_div_qr_1_invert (&bbinv, info->bb); + + do + { + mp_limb_t w; + size_t done; + w = mpn_div_qr_1_preinv (up, up, un, &bbinv); + un -= (up[un-1] == 0); + done = mpn_limb_get_str (sp + sn, w, &binv); + + for (sn += done; done < info->exp; done++) + sp[sn++] = 0; + } + while (un > 1); + } + sn += mpn_limb_get_str (sp + sn, up[0], &binv); + + /* Reverse order */ + for (i = 0; 2*i + 1 < sn; i++) + { + unsigned char t = sp[i]; + sp[i] = sp[sn - i - 1]; + sp[sn - i - 1] = t; + } + + return sn; +} + +size_t +mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) +{ + unsigned bits; + + assert (un > 0); + assert (up[un-1] > 0); + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_get_str_bits (sp, bits, up, un); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_get_str_other (sp, base, &info, up, un); + } +} + +static mp_size_t +mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, + unsigned bits) +{ + mp_size_t rn; + mp_limb_t limb; + unsigned shift; + + for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) + { + limb |= (mp_limb_t) sp[sn] << shift; + shift += bits; + if (shift >= GMP_LIMB_BITS) + { + shift -= GMP_LIMB_BITS; + rp[rn++] = limb; + /* Next line is correct also if shift == 0, + bits == 8, and mp_limb_t == unsigned char. */ + limb = (unsigned int) sp[sn] >> (bits - shift); + } + } + if (limb != 0) + rp[rn++] = limb; + else + rn = mpn_normalized_size (rp, rn); + return rn; +} + +/* Result is usually normalized, except for all-zero input, in which + case a single zero limb is written at *RP, and 1 is returned. */ +static mp_size_t +mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, + mp_limb_t b, const struct mpn_base_info *info) +{ + mp_size_t rn; + mp_limb_t w; + unsigned k; + size_t j; + + assert (sn > 0); + + k = 1 + (sn - 1) % info->exp; + + j = 0; + w = sp[j++]; + while (--k != 0) + w = w * b + sp[j++]; + + rp[0] = w; + + for (rn = 1; j < sn;) + { + mp_limb_t cy; + + w = sp[j++]; + for (k = 1; k < info->exp; k++) + w = w * b + sp[j++]; + + cy = mpn_mul_1 (rp, rp, rn, info->bb); + cy += mpn_add_1 (rp, rp, rn, w); + if (cy > 0) + rp[rn++] = cy; + } + assert (j == sn); + + return rn; +} + +mp_size_t +mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) +{ + unsigned bits; + + if (sn == 0) + return 0; + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_set_str_bits (rp, sp, sn, bits); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_set_str_other (rp, sp, sn, base, &info); + } +} + + +/* MPZ interface */ +void +mpz_init (mpz_t r) +{ + static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; + + r->_mp_alloc = 0; + r->_mp_size = 0; + r->_mp_d = (mp_ptr) &dummy_limb; +} + +/* The utility of this function is a bit limited, since many functions + assigns the result variable using mpz_swap. */ +void +mpz_init2 (mpz_t r, mp_bitcnt_t bits) +{ + mp_size_t rn; + + bits -= (bits != 0); /* Round down, except if 0 */ + rn = 1 + bits / GMP_LIMB_BITS; + + r->_mp_alloc = rn; + r->_mp_size = 0; + r->_mp_d = gmp_alloc_limbs (rn); +} + +void +mpz_clear (mpz_t r) +{ + if (r->_mp_alloc) + gmp_free_limbs (r->_mp_d, r->_mp_alloc); +} + +static mp_ptr +mpz_realloc (mpz_t r, mp_size_t size) +{ + size = GMP_MAX (size, 1); + + if (r->_mp_alloc) + r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); + else + r->_mp_d = gmp_alloc_limbs (size); + r->_mp_alloc = size; + + if (GMP_ABS (r->_mp_size) > size) + r->_mp_size = 0; + + return r->_mp_d; +} + +/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ +#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ + ? mpz_realloc(z,n) \ + : (z)->_mp_d) + +/* MPZ assignment and basic conversions. */ +void +mpz_set_si (mpz_t r, signed long int x) +{ + if (x >= 0) + mpz_set_ui (r, x); + else /* (x < 0) */ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); + mpz_neg (r, r); + } + else + { + r->_mp_size = -1; + MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); + } +} + +void +mpz_set_ui (mpz_t r, unsigned long int x) +{ + if (x > 0) + { + r->_mp_size = 1; + MPZ_REALLOC (r, 1)[0] = x; + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + while (x >>= LOCAL_GMP_LIMB_BITS) + { + ++ r->_mp_size; + MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; + } + } + } + else + r->_mp_size = 0; +} + +void +mpz_set (mpz_t r, const mpz_t x) +{ + /* Allow the NOP r == x */ + if (r != x) + { + mp_size_t n; + mp_ptr rp; + + n = GMP_ABS (x->_mp_size); + rp = MPZ_REALLOC (r, n); + + mpn_copyi (rp, x->_mp_d, n); + r->_mp_size = x->_mp_size; + } +} + +void +mpz_init_set_si (mpz_t r, signed long int x) +{ + mpz_init (r); + mpz_set_si (r, x); +} + +void +mpz_init_set_ui (mpz_t r, unsigned long int x) +{ + mpz_init (r); + mpz_set_ui (r, x); +} + +void +mpz_init_set (mpz_t r, const mpz_t x) +{ + mpz_init (r); + mpz_set (r, x); +} + +int +mpz_fits_slong_p (const mpz_t u) +{ + return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; +} + +static int +mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) +{ + int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; + mp_limb_t ulongrem = 0; + + if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) + ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; + + return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); +} + +int +mpz_fits_ulong_p (const mpz_t u) +{ + mp_size_t us = u->_mp_size; + + return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); +} + +int +mpz_fits_sint_p (const mpz_t u) +{ + return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; +} + +int +mpz_fits_uint_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; +} + +int +mpz_fits_sshort_p (const mpz_t u) +{ + return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; +} + +int +mpz_fits_ushort_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; +} + +long int +mpz_get_si (const mpz_t u) +{ + unsigned long r = mpz_get_ui (u); + unsigned long c = -LONG_MAX - LONG_MIN; + + if (u->_mp_size < 0) + /* This expression is necessary to properly handle -LONG_MIN */ + return -(long) c - (long) ((r - c) & LONG_MAX); + else + return (long) (r & LONG_MAX); +} + +unsigned long int +mpz_get_ui (const mpz_t u) +{ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + unsigned long r = 0; + mp_size_t n = GMP_ABS (u->_mp_size); + n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); + while (--n >= 0) + r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; + return r; + } + + return u->_mp_size == 0 ? 0 : u->_mp_d[0]; +} + +size_t +mpz_size (const mpz_t u) +{ + return GMP_ABS (u->_mp_size); +} + +mp_limb_t +mpz_getlimbn (const mpz_t u, mp_size_t n) +{ + if (n >= 0 && n < GMP_ABS (u->_mp_size)) + return u->_mp_d[n]; + else + return 0; +} + +void +mpz_realloc2 (mpz_t x, mp_bitcnt_t n) +{ + mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); +} + +mp_srcptr +mpz_limbs_read (mpz_srcptr x) +{ + return x->_mp_d; +} + +mp_ptr +mpz_limbs_modify (mpz_t x, mp_size_t n) +{ + assert (n > 0); + return MPZ_REALLOC (x, n); +} + +mp_ptr +mpz_limbs_write (mpz_t x, mp_size_t n) +{ + return mpz_limbs_modify (x, n); +} + +void +mpz_limbs_finish (mpz_t x, mp_size_t xs) +{ + mp_size_t xn; + xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); + x->_mp_size = xs < 0 ? -xn : xn; +} + +static mpz_srcptr +mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + x->_mp_alloc = 0; + x->_mp_d = (mp_ptr) xp; + x->_mp_size = xs; + return x; +} + +mpz_srcptr +mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + mpz_roinit_normal_n (x, xp, xs); + mpz_limbs_finish (x, xs); + return x; +} + + +/* Conversions and comparison to double. */ +void +mpz_set_d (mpz_t r, double x) +{ + int sign; + mp_ptr rp; + mp_size_t rn, i; + double B; + double Bi; + mp_limb_t f; + + /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is + zero or infinity. */ + if (x != x || x == x * 0.5) + { + r->_mp_size = 0; + return; + } + + sign = x < 0.0 ; + if (sign) + x = - x; + + if (x < 1.0) + { + r->_mp_size = 0; + return; + } + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + for (rn = 1; x >= B; rn++) + x *= Bi; + + rp = MPZ_REALLOC (r, rn); + + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + i = rn-1; + rp[i] = f; + while (--i >= 0) + { + x = B * x; + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + rp[i] = f; + } + + r->_mp_size = sign ? - rn : rn; +} + +void +mpz_init_set_d (mpz_t r, double x) +{ + mpz_init (r); + mpz_set_d (r, x); +} + +double +mpz_get_d (const mpz_t u) +{ + int m; + mp_limb_t l; + mp_size_t un; + double x; + double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + + un = GMP_ABS (u->_mp_size); + + if (un == 0) + return 0.0; + + l = u->_mp_d[--un]; + gmp_clz (m, l); + m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + + for (x = l; --un >= 0;) + { + x = B*x; + if (m > 0) { + l = u->_mp_d[un]; + m -= GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + x += l; + } + } + + if (u->_mp_size < 0) + x = -x; + + return x; +} + +int +mpz_cmpabs_d (const mpz_t x, double d) +{ + mp_size_t xn; + double B, Bi; + mp_size_t i; + + xn = x->_mp_size; + d = GMP_ABS (d); + + if (xn != 0) + { + xn = GMP_ABS (xn); + + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + + /* Scale d so it can be compared with the top limb. */ + for (i = 1; i < xn; i++) + d *= Bi; + + if (d >= B) + return -1; + + /* Compare floor(d) to top limb, subtract and cancel when equal. */ + for (i = xn; i-- > 0;) + { + mp_limb_t f, xl; + + f = (mp_limb_t) d; + xl = x->_mp_d[i]; + if (xl > f) + return 1; + else if (xl < f) + return -1; + d = B * (d - f); + } + } + return - (d > 0.0); +} + +int +mpz_cmp_d (const mpz_t x, double d) +{ + if (x->_mp_size < 0) + { + if (d >= 0.0) + return -1; + else + return -mpz_cmpabs_d (x, d); + } + else + { + if (d < 0.0) + return 1; + else + return mpz_cmpabs_d (x, d); + } +} + + +/* MPZ comparisons and the like. */ +int +mpz_sgn (const mpz_t u) +{ + return GMP_CMP (u->_mp_size, 0); +} + +int +mpz_cmp_si (const mpz_t u, long v) +{ + mp_size_t usize = u->_mp_size; + + if (v >= 0) + return mpz_cmp_ui (u, v); + else if (usize >= 0) + return 1; + else + return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); +} + +int +mpz_cmp_ui (const mpz_t u, unsigned long v) +{ + mp_size_t usize = u->_mp_size; + + if (usize < 0) + return -1; + else + return mpz_cmpabs_ui (u, v); +} + +int +mpz_cmp (const mpz_t a, const mpz_t b) +{ + mp_size_t asize = a->_mp_size; + mp_size_t bsize = b->_mp_size; + + if (asize != bsize) + return (asize < bsize) ? -1 : 1; + else if (asize >= 0) + return mpn_cmp (a->_mp_d, b->_mp_d, asize); + else + return mpn_cmp (b->_mp_d, a->_mp_d, -asize); +} + +int +mpz_cmpabs_ui (const mpz_t u, unsigned long v) +{ + mp_size_t un = GMP_ABS (u->_mp_size); + + if (! mpn_absfits_ulong_p (u->_mp_d, un)) + return 1; + else + { + unsigned long uu = mpz_get_ui (u); + return GMP_CMP(uu, v); + } +} + +int +mpz_cmpabs (const mpz_t u, const mpz_t v) +{ + return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), + v->_mp_d, GMP_ABS (v->_mp_size)); +} + +void +mpz_abs (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = GMP_ABS (r->_mp_size); +} + +void +mpz_neg (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = -r->_mp_size; +} + +void +mpz_swap (mpz_t u, mpz_t v) +{ + MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); + MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); +} + + +/* MPZ addition and subtraction */ + + +void +mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_t bb; + mpz_init_set_ui (bb, b); + mpz_add (r, a, bb); + mpz_clear (bb); +} + +void +mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_ui_sub (r, b, a); + mpz_neg (r, r); +} + +void +mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) +{ + mpz_neg (r, b); + mpz_add_ui (r, r, a); +} + +static mp_size_t +mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + mp_ptr rp; + mp_limb_t cy; + + if (an < bn) + { + MPZ_SRCPTR_SWAP (a, b); + MP_SIZE_T_SWAP (an, bn); + } + + rp = MPZ_REALLOC (r, an + 1); + cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); + + rp[an] = cy; + + return an + cy; +} + +static mp_size_t +mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + int cmp; + mp_ptr rp; + + cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); + if (cmp > 0) + { + rp = MPZ_REALLOC (r, an); + gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); + return mpn_normalized_size (rp, an); + } + else if (cmp < 0) + { + rp = MPZ_REALLOC (r, bn); + gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); + return -mpn_normalized_size (rp, bn); + } + else + return 0; +} + +void +mpz_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_add (r, a, b); + else + rn = mpz_abs_sub (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + +void +mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_sub (r, a, b); + else + rn = mpz_abs_add (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + + +/* MPZ multiplication */ +void +mpz_mul_si (mpz_t r, const mpz_t u, long int v) +{ + if (v < 0) + { + mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); + mpz_neg (r, r); + } + else + mpz_mul_ui (r, u, v); +} + +void +mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t vv; + mpz_init_set_ui (vv, v); + mpz_mul (r, u, vv); + mpz_clear (vv); + return; +} + +void +mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) +{ + int sign; + mp_size_t un, vn, rn; + mpz_t t; + mp_ptr tp; + + un = u->_mp_size; + vn = v->_mp_size; + + if (un == 0 || vn == 0) + { + r->_mp_size = 0; + return; + } + + sign = (un ^ vn) < 0; + + un = GMP_ABS (un); + vn = GMP_ABS (vn); + + mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); + + tp = t->_mp_d; + if (un >= vn) + mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); + else + mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); + + rn = un + vn; + rn -= tp[rn-1] == 0; + + t->_mp_size = sign ? - rn : rn; + mpz_swap (r, t); + mpz_clear (t); +} + +void +mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) +{ + mp_size_t un, rn; + mp_size_t limbs; + unsigned shift; + mp_ptr rp; + + un = GMP_ABS (u->_mp_size); + if (un == 0) + { + r->_mp_size = 0; + return; + } + + limbs = bits / GMP_LIMB_BITS; + shift = bits % GMP_LIMB_BITS; + + rn = un + limbs + (shift > 0); + rp = MPZ_REALLOC (r, rn); + if (shift > 0) + { + mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); + rp[rn-1] = cy; + rn -= (cy == 0); + } + else + mpn_copyd (rp + limbs, u->_mp_d, un); + + mpn_zero (rp, limbs); + + r->_mp_size = (u->_mp_size < 0) ? - rn : rn; +} + +void +mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_sub (r, r, t); + mpz_clear (t); +} + +void +mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_sub (r, r, t); + mpz_clear (t); +} + + +/* MPZ division */ +enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; + +/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ +static int +mpz_div_qr (mpz_t q, mpz_t r, + const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) +{ + mp_size_t ns, ds, nn, dn, qs; + ns = n->_mp_size; + ds = d->_mp_size; + + if (ds == 0) + gmp_die("mpz_div_qr: Divide by zero."); + + if (ns == 0) + { + if (q) + q->_mp_size = 0; + if (r) + r->_mp_size = 0; + return 0; + } + + nn = GMP_ABS (ns); + dn = GMP_ABS (ds); + + qs = ds ^ ns; + + if (nn < dn) + { + if (mode == GMP_DIV_CEIL && qs >= 0) + { + /* q = 1, r = n - d */ + if (r) + mpz_sub (r, n, d); + if (q) + mpz_set_ui (q, 1); + } + else if (mode == GMP_DIV_FLOOR && qs < 0) + { + /* q = -1, r = n + d */ + if (r) + mpz_add (r, n, d); + if (q) + mpz_set_si (q, -1); + } + else + { + /* q = 0, r = d */ + if (r) + mpz_set (r, n); + if (q) + q->_mp_size = 0; + } + return 1; + } + else + { + mp_ptr np, qp; + mp_size_t qn, rn; + mpz_t tq, tr; + + mpz_init_set (tr, n); + np = tr->_mp_d; + + qn = nn - dn + 1; + + if (q) + { + mpz_init2 (tq, qn * GMP_LIMB_BITS); + qp = tq->_mp_d; + } + else + qp = NULL; + + mpn_div_qr (qp, np, nn, d->_mp_d, dn); + + if (qp) + { + qn -= (qp[qn-1] == 0); + + tq->_mp_size = qs < 0 ? -qn : qn; + } + rn = mpn_normalized_size (np, dn); + tr->_mp_size = ns < 0 ? - rn : rn; + + if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) + { + if (q) + mpz_sub_ui (tq, tq, 1); + if (r) + mpz_add (tr, tr, d); + } + else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) + { + if (q) + mpz_add_ui (tq, tq, 1); + if (r) + mpz_sub (tr, tr, d); + } + + if (q) + { + mpz_swap (tq, q); + mpz_clear (tq); + } + if (r) + mpz_swap (tr, r); + + mpz_clear (tr); + + return rn != 0; + } +} + +void +mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); +} + +static void +mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t un, qn; + mp_size_t limb_cnt; + mp_ptr qp; + int adjust; + + un = u->_mp_size; + if (un == 0) + { + q->_mp_size = 0; + return; + } + limb_cnt = bit_index / GMP_LIMB_BITS; + qn = GMP_ABS (un) - limb_cnt; + bit_index %= GMP_LIMB_BITS; + + if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ + /* Note: Below, the final indexing at limb_cnt is valid because at + that point we have qn > 0. */ + adjust = (qn <= 0 + || !mpn_zero_p (u->_mp_d, limb_cnt) + || (u->_mp_d[limb_cnt] + & (((mp_limb_t) 1 << bit_index) - 1))); + else + adjust = 0; + + if (qn <= 0) + qn = 0; + else + { + qp = MPZ_REALLOC (q, qn); + + if (bit_index != 0) + { + mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); + qn -= qp[qn - 1] == 0; + } + else + { + mpn_copyi (qp, u->_mp_d + limb_cnt, qn); + } + } + + q->_mp_size = qn; + + if (adjust) + mpz_add_ui (q, q, 1); + if (un < 0) + mpz_neg (q, q); +} + +static void +mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t us, un, rn; + mp_ptr rp; + mp_limb_t mask; + + us = u->_mp_size; + if (us == 0 || bit_index == 0) + { + r->_mp_size = 0; + return; + } + rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + assert (rn > 0); + + rp = MPZ_REALLOC (r, rn); + un = GMP_ABS (us); + + mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); + + if (rn > un) + { + /* Quotient (with truncation) is zero, and remainder is + non-zero */ + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* Have to negate and sign extend. */ + mp_size_t i; + + gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); + for (i = un; i < rn - 1; i++) + rp[i] = GMP_LIMB_MAX; + + rp[rn-1] = mask; + us = -us; + } + else + { + /* Just copy */ + if (r != u) + mpn_copyi (rp, u->_mp_d, un); + + rn = un; + } + } + else + { + if (r != u) + mpn_copyi (rp, u->_mp_d, rn - 1); + + rp[rn-1] = u->_mp_d[rn-1] & mask; + + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* If r != 0, compute 2^{bit_count} - r. */ + mpn_neg (rp, rp, rn); + + rp[rn-1] &= mask; + + /* us is not used for anything else, so we can modify it + here to indicate flipped sign. */ + us = -us; + } + } + rn = mpn_normalized_size (rp, rn); + r->_mp_size = us < 0 ? -rn : rn; +} + +void +mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) +{ + gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_p (const mpz_t n, const mpz_t d) +{ + return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + +int +mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) +{ + mpz_t t; + int res; + + /* a == b (mod 0) iff a == b */ + if (mpz_sgn (m) == 0) + return (mpz_cmp (a, b) == 0); + + mpz_init (t); + mpz_sub (t, a, b); + res = mpz_divisible_p (t, m); + mpz_clear (t); + + return res; +} + +static unsigned long +mpz_div_qr_ui (mpz_t q, mpz_t r, + const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) +{ + unsigned long ret; + mpz_t rr, dd; + + mpz_init (rr); + mpz_init_set_ui (dd, d); + mpz_div_qr (q, rr, n, dd, mode); + mpz_clear (dd); + ret = mpz_get_ui (rr); + + if (r) + mpz_swap (r, rr); + mpz_clear (rr); + + return ret; +} + +unsigned long +mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); +} +unsigned long +mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} +unsigned long +mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_ui_p (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + + +/* GCD */ +static mp_limb_t +mpn_gcd_11 (mp_limb_t u, mp_limb_t v) +{ + unsigned shift; + + assert ( (u | v) > 0); + + if (u == 0) + return v; + else if (v == 0) + return u; + + gmp_ctz (shift, u | v); + + u >>= shift; + v >>= shift; + + if ( (u & 1) == 0) + MP_LIMB_T_SWAP (u, v); + + while ( (v & 1) == 0) + v >>= 1; + + while (u != v) + { + if (u > v) + { + u -= v; + do + u >>= 1; + while ( (u & 1) == 0); + } + else + { + v -= u; + do + v >>= 1; + while ( (v & 1) == 0); + } + } + return u << shift; +} + +mp_size_t +mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn > 0); + assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); + assert (vp[vn-1] > 0); + assert ((up[0] | vp[0]) & 1); + + if (un > vn) + mpn_div_qr (NULL, up, un, vp, vn); + + un = mpn_normalized_size (up, vn); + if (un == 0) + { + mpn_copyi (rp, vp, vn); + return vn; + } + + if (!(vp[0] & 1)) + MPN_PTR_SWAP (up, un, vp, vn); + + while (un > 1 || vn > 1) + { + int shift; + assert (vp[0] & 1); + + while (up[0] == 0) + { + up++; + un--; + } + gmp_ctz (shift, up[0]); + if (shift > 0) + { + gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); + un -= (up[un-1] == 0); + } + + if (un < vn) + MPN_PTR_SWAP (up, un, vp, vn); + else if (un == vn) + { + int c = mpn_cmp (up, vp, un); + if (c == 0) + { + mpn_copyi (rp, up, un); + return un; + } + else if (c < 0) + MP_PTR_SWAP (up, vp); + } + + gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); + un = mpn_normalized_size (up, un); + } + rp[0] = mpn_gcd_11 (up[0], vp[0]); + return 1; +} + +unsigned long +mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) +{ + mpz_t t; + mpz_init_set_ui(t, v); + mpz_gcd (t, u, t); + if (v > 0) + v = mpz_get_ui (t); + + if (g) + mpz_swap (t, g); + + mpz_clear (t); + + return v; +} + +static mp_bitcnt_t +mpz_make_odd (mpz_t r) +{ + mp_bitcnt_t shift; + + assert (r->_mp_size > 0); + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + shift = mpn_scan1 (r->_mp_d, 0); + mpz_tdiv_q_2exp (r, r, shift); + + return shift; +} + +void +mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv; + mp_bitcnt_t uz, vz, gz; + + if (u->_mp_size == 0) + { + mpz_abs (g, v); + return; + } + if (v->_mp_size == 0) + { + mpz_abs (g, u); + return; + } + + mpz_init (tu); + mpz_init (tv); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + if (tu->_mp_size < tv->_mp_size) + mpz_swap (tu, tv); + + tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); + mpz_mul_2exp (g, tu, gz); + + mpz_clear (tu); + mpz_clear (tv); +} + +void +mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv, s0, s1, t0, t1; + mp_bitcnt_t uz, vz, gz; + mp_bitcnt_t power; + int cmp; + + if (u->_mp_size == 0) + { + /* g = 0 u + sgn(v) v */ + signed long sign = mpz_sgn (v); + mpz_abs (g, v); + if (s) + s->_mp_size = 0; + if (t) + mpz_set_si (t, sign); + return; + } + + if (v->_mp_size == 0) + { + /* g = sgn(u) u + 0 v */ + signed long sign = mpz_sgn (u); + mpz_abs (g, u); + if (s) + mpz_set_si (s, sign); + if (t) + t->_mp_size = 0; + return; + } + + mpz_init (tu); + mpz_init (tv); + mpz_init (s0); + mpz_init (s1); + mpz_init (t0); + mpz_init (t1); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + uz -= gz; + vz -= gz; + + /* Cofactors corresponding to odd gcd. gz handled later. */ + if (tu->_mp_size < tv->_mp_size) + { + mpz_swap (tu, tv); + MPZ_SRCPTR_SWAP (u, v); + MPZ_PTR_SWAP (s, t); + MP_BITCNT_T_SWAP (uz, vz); + } + + /* Maintain + * + * u = t0 tu + t1 tv + * v = s0 tu + s1 tv + * + * where u and v denote the inputs with common factors of two + * eliminated, and det (s0, t0; s1, t1) = 2^p. Then + * + * 2^p tu = s1 u - t1 v + * 2^p tv = -s0 u + t0 v + */ + + /* After initial division, tu = q tv + tu', we have + * + * u = 2^uz (tu' + q tv) + * v = 2^vz tv + * + * or + * + * t0 = 2^uz, t1 = 2^uz q + * s0 = 0, s1 = 2^vz + */ + + mpz_tdiv_qr (t1, tu, tu, tv); + mpz_mul_2exp (t1, t1, uz); + + mpz_setbit (s1, vz); + power = uz + vz; + + if (tu->_mp_size > 0) + { + mp_bitcnt_t shift; + shift = mpz_make_odd (tu); + mpz_setbit (t0, uz + shift); + power += shift; + + for (;;) + { + int c; + c = mpz_cmp (tu, tv); + if (c == 0) + break; + + if (c < 0) + { + /* tv = tv' + tu + * + * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' + * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ + + mpz_sub (tv, tv, tu); + mpz_add (t0, t0, t1); + mpz_add (s0, s0, s1); + + shift = mpz_make_odd (tv); + mpz_mul_2exp (t1, t1, shift); + mpz_mul_2exp (s1, s1, shift); + } + else + { + mpz_sub (tu, tu, tv); + mpz_add (t1, t0, t1); + mpz_add (s1, s0, s1); + + shift = mpz_make_odd (tu); + mpz_mul_2exp (t0, t0, shift); + mpz_mul_2exp (s0, s0, shift); + } + power += shift; + } + } + else + mpz_setbit (t0, uz); + + /* Now tv = odd part of gcd, and -s0 and t0 are corresponding + cofactors. */ + + mpz_mul_2exp (tv, tv, gz); + mpz_neg (s0, s0); + + /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To + adjust cofactors, we need u / g and v / g */ + + mpz_divexact (s1, v, tv); + mpz_abs (s1, s1); + mpz_divexact (t1, u, tv); + mpz_abs (t1, t1); + + while (power-- > 0) + { + /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ + if (mpz_odd_p (s0) || mpz_odd_p (t0)) + { + mpz_sub (s0, s0, s1); + mpz_add (t0, t0, t1); + } + assert (mpz_even_p (t0) && mpz_even_p (s0)); + mpz_tdiv_q_2exp (s0, s0, 1); + mpz_tdiv_q_2exp (t0, t0, 1); + } + + /* Choose small cofactors (they should generally satify + + |s| < |u| / 2g and |t| < |v| / 2g, + + with some documented exceptions). Always choose the smallest s, + if there are two choices for s with same absolute value, choose + the one with smallest corresponding t (this asymmetric condition + is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ + mpz_add (s1, s0, s1); + mpz_sub (t1, t0, t1); + cmp = mpz_cmpabs (s0, s1); + if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) + { + mpz_swap (s0, s1); + mpz_swap (t0, t1); + } + if (u->_mp_size < 0) + mpz_neg (s0, s0); + if (v->_mp_size < 0) + mpz_neg (t0, t0); + + mpz_swap (g, tv); + if (s) + mpz_swap (s, s0); + if (t) + mpz_swap (t, t0); + + mpz_clear (tu); + mpz_clear (tv); + mpz_clear (s0); + mpz_clear (s1); + mpz_clear (t0); + mpz_clear (t1); +} + +void +mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t g; + + if (u->_mp_size == 0 || v->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + mpz_init (g); + + mpz_gcd (g, u, v); + mpz_divexact (g, u, g); + mpz_mul (r, g, v); + + mpz_clear (g); + mpz_abs (r, r); +} + +void +mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) +{ + if (v == 0 || u->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + v /= mpz_gcd_ui (NULL, u, v); + mpz_mul_ui (r, u, v); + + mpz_abs (r, r); +} + +int +mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) +{ + mpz_t g, tr; + int invertible; + + if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) + return 0; + + mpz_init (g); + mpz_init (tr); + + mpz_gcdext (g, tr, NULL, u, m); + invertible = (mpz_cmp_ui (g, 1) == 0); + + if (invertible) + { + if (tr->_mp_size < 0) + { + if (m->_mp_size >= 0) + mpz_add (tr, tr, m); + else + mpz_sub (tr, tr, m); + } + mpz_swap (r, tr); + } + + mpz_clear (g); + mpz_clear (tr); + return invertible; +} + + +/* Higher level operations (sqrt, pow and root) */ + +void +mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) +{ + unsigned long bit; + mpz_t tr; + mpz_init_set_ui (tr, 1); + + bit = GMP_ULONG_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (e & bit) + mpz_mul (tr, tr, b); + bit >>= 1; + } + while (bit > 0); + + mpz_swap (r, tr); + mpz_clear (tr); +} + +void +mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) +{ + mpz_t b; + + mpz_init_set_ui (b, blimb); + mpz_pow_ui (r, b, e); + mpz_clear (b); +} + +void +mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) +{ + mpz_t tr; + mpz_t base; + mp_size_t en, mn; + mp_srcptr mp; + struct gmp_div_inverse minv; + unsigned shift; + mp_ptr tp = NULL; + + en = GMP_ABS (e->_mp_size); + mn = GMP_ABS (m->_mp_size); + if (mn == 0) + gmp_die ("mpz_powm: Zero modulo."); + + if (en == 0) + { + mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); + return; + } + + mp = m->_mp_d; + mpn_div_qr_invert (&minv, mp, mn); + shift = minv.shift; + + if (shift > 0) + { + /* To avoid shifts, we do all our reductions, except the final + one, using a *normalized* m. */ + minv.shift = 0; + + tp = gmp_alloc_limbs (mn); + gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); + mp = tp; + } + + mpz_init (base); + + if (e->_mp_size < 0) + { + if (!mpz_invert (base, b, m)) + gmp_die ("mpz_powm: Negative exponent and non-invertible base."); + } + else + { + mp_size_t bn; + mpz_abs (base, b); + + bn = base->_mp_size; + if (bn >= mn) + { + mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); + bn = mn; + } + + /* We have reduced the absolute value. Now take care of the + sign. Note that we get zero represented non-canonically as + m. */ + if (b->_mp_size < 0) + { + mp_ptr bp = MPZ_REALLOC (base, mn); + gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); + bn = mn; + } + base->_mp_size = mpn_normalized_size (base->_mp_d, bn); + } + mpz_init_set_ui (tr, 1); + + while (--en >= 0) + { + mp_limb_t w = e->_mp_d[en]; + mp_limb_t bit; + + bit = GMP_LIMB_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (w & bit) + mpz_mul (tr, tr, base); + if (tr->_mp_size > mn) + { + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + bit >>= 1; + } + while (bit > 0); + } + + /* Final reduction */ + if (tr->_mp_size >= mn) + { + minv.shift = shift; + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + if (tp) + gmp_free_limbs (tp, mn); + + mpz_swap (r, tr); + mpz_clear (tr); + mpz_clear (base); +} + +void +mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) +{ + mpz_t e; + + mpz_init_set_ui (e, elimb); + mpz_powm (r, b, e, m); + mpz_clear (e); +} + +/* x=trunc(y^(1/z)), r=y-x^z */ +void +mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) +{ + int sgn; + mp_bitcnt_t bc; + mpz_t t, u; + + sgn = y->_mp_size < 0; + if ((~z & sgn) != 0) + gmp_die ("mpz_rootrem: Negative argument, with even root."); + if (z == 0) + gmp_die ("mpz_rootrem: Zeroth root."); + + if (mpz_cmpabs_ui (y, 1) <= 0) { + if (x) + mpz_set (x, y); + if (r) + r->_mp_size = 0; + return; + } + + mpz_init (u); + mpz_init (t); + bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; + mpz_setbit (t, bc); + + if (z == 2) /* simplify sqrt loop: z-1 == 1 */ + do { + mpz_swap (u, t); /* u = x */ + mpz_tdiv_q (t, y, u); /* t = y/x */ + mpz_add (t, t, u); /* t = y/x + x */ + mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + else /* z != 2 */ { + mpz_t v; + + mpz_init (v); + if (sgn) + mpz_neg (t, t); + + do { + mpz_swap (u, t); /* u = x */ + mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ + mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ + mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ + mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ + mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + + mpz_clear (v); + } + + if (r) { + mpz_pow_ui (t, u, z); + mpz_sub (r, y, t); + } + if (x) + mpz_swap (x, u); + mpz_clear (u); + mpz_clear (t); +} + +int +mpz_root (mpz_t x, const mpz_t y, unsigned long z) +{ + int res; + mpz_t r; + + mpz_init (r); + mpz_rootrem (x, r, y, z); + res = r->_mp_size == 0; + mpz_clear (r); + + return res; +} + +/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ +void +mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) +{ + mpz_rootrem (s, r, u, 2); +} + +void +mpz_sqrt (mpz_t s, const mpz_t u) +{ + mpz_rootrem (s, NULL, u, 2); +} + +int +mpz_perfect_square_p (const mpz_t u) +{ + if (u->_mp_size <= 0) + return (u->_mp_size == 0); + else + return mpz_root (NULL, u, 2); +} + +int +mpn_perfect_square_p (mp_srcptr p, mp_size_t n) +{ + mpz_t t; + + assert (n > 0); + assert (p [n-1] != 0); + return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); +} + +mp_size_t +mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) +{ + mpz_t s, r, u; + mp_size_t res; + + assert (n > 0); + assert (p [n-1] != 0); + + mpz_init (r); + mpz_init (s); + mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); + + assert (s->_mp_size == (n+1)/2); + mpn_copyd (sp, s->_mp_d, s->_mp_size); + mpz_clear (s); + res = r->_mp_size; + if (rp) + mpn_copyd (rp, r->_mp_d, res); + mpz_clear (r); + return res; +} + +/* Combinatorics */ + +void +mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) +{ + mpz_set_ui (x, n + (n == 0)); + if (m + 1 < 2) return; + while (n > m + 1) + mpz_mul_ui (x, x, n -= m); +} + +void +mpz_2fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 2); +} + +void +mpz_fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 1); +} + +void +mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) +{ + mpz_t t; + + mpz_set_ui (r, k <= n); + + if (k > (n >> 1)) + k = (k <= n) ? n - k : 0; + + mpz_init (t); + mpz_fac_ui (t, k); + + for (; k > 0; --k) + mpz_mul_ui (r, r, n--); + + mpz_divexact (r, r, t); + mpz_clear (t); +} + + +/* Primality testing */ + +/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ +/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ +static int +gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) +{ + int c, bit = 0; + + assert (b & 1); + assert (a != 0); + /* assert (mpn_gcd_11 (a, b) == 1); */ + + /* Below, we represent a and b shifted right so that the least + significant one bit is implicit. */ + b >>= 1; + + gmp_ctz(c, a); + a >>= 1; + + for (;;) + { + a >>= c; + /* (2/b) = -1 if b = 3 or 5 mod 8 */ + bit ^= c & (b ^ (b >> 1)); + if (a < b) + { + if (a == 0) + return bit & 1 ? -1 : 1; + bit ^= a & b; + a = b - a; + b -= a; + } + else + { + a -= b; + assert (a != 0); + } + + gmp_ctz(c, a); + ++c; + } +} + +static void +gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) +{ + mpz_mod (Qk, Qk, n); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + mpz_mul (V, V, V); + mpz_submul_ui (V, Qk, 2); + mpz_tdiv_r (V, V, n); + /* Q^{2k} = (Q^k)^2 */ + mpz_mul (Qk, Qk, Qk); +} + +/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ +/* with P=1, Q=Q; k = (n>>b0)|1. */ +/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ +/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ +static int +gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, + mp_bitcnt_t b0, const mpz_t n) +{ + mp_bitcnt_t bs; + mpz_t U; + int res; + + assert (b0 > 0); + assert (Q <= - (LONG_MIN / 2)); + assert (Q >= - (LONG_MAX / 2)); + assert (mpz_cmp_ui (n, 4) > 0); + assert (mpz_odd_p (n)); + + mpz_init_set_ui (U, 1); /* U1 = 1 */ + mpz_set_ui (V, 1); /* V1 = 1 */ + mpz_set_si (Qk, Q); + + for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) + { + /* U_{2k} <- U_k * V_k */ + mpz_mul (U, U, V); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + /* A step k->k+1 is performed if the bit in $n$ is 1 */ + /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ + /* should be 1 in $n+1$ (bs == b0) */ + if (b0 == bs || mpz_tstbit (n, bs)) + { + /* Q^{k+1} <- Q^k * Q */ + mpz_mul_si (Qk, Qk, Q); + /* U_{k+1} <- (U_k + V_k) / 2 */ + mpz_swap (U, V); /* Keep in V the old value of U_k */ + mpz_add (U, U, V); + /* We have to compute U/2, so we need an even value, */ + /* equivalent (mod n) */ + if (mpz_odd_p (U)) + mpz_add (U, U, n); + mpz_tdiv_q_2exp (U, U, 1); + /* V_{k+1} <-(D*U_k + V_k) / 2 = + U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ + mpz_mul_si (V, V, -2*Q); + mpz_add (V, U, V); + mpz_tdiv_r (V, V, n); + } + mpz_tdiv_r (U, U, n); + } + + res = U->_mp_size == 0; + mpz_clear (U); + return res; +} + +/* Performs strong Lucas' test on x, with parameters suggested */ +/* for the BPSW test. Qk is only passed to recycle a variable. */ +/* Requires GCD (x,6) = 1.*/ +static int +gmp_stronglucas (const mpz_t x, mpz_t Qk) +{ + mp_bitcnt_t b0; + mpz_t V, n; + mp_limb_t maxD, D; /* The absolute value is stored. */ + long Q; + mp_limb_t tl; + + /* Test on the absolute value. */ + mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); + + assert (mpz_odd_p (n)); + /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ + if (mpz_root (Qk, n, 2)) + return 0; /* A square is composite. */ + + /* Check Ds up to square root (in case, n is prime) + or avoid overflows */ + maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; + + D = 3; + /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ + /* For those Ds we have (D/n) = (n/|D|) */ + do + { + if (D >= maxD) + return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ + D += 2; + tl = mpz_tdiv_ui (n, D); + if (tl == 0) + return 0; + } + while (gmp_jacobi_coprime (tl, D) == 1); + + mpz_init (V); + + /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ + b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); + /* b0 = mpz_scan0 (n, 0); */ + + /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ + Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); + + if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ + while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ + /* V <- V ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + mpz_clear (V); + return (b0 != 0); +} + +static int +gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, + const mpz_t q, mp_bitcnt_t k) +{ + assert (k > 0); + + /* Caller must initialize y to the base. */ + mpz_powm (y, y, q, n); + + if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) + return 1; + + while (--k > 0) + { + mpz_powm_ui (y, y, 2, n); + if (mpz_cmp (y, nm1) == 0) + return 1; + } + return 0; +} + +/* This product is 0xc0cfd797, and fits in 32 bits. */ +#define GMP_PRIME_PRODUCT \ + (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) + +/* Bit (p+1)/2 is set, for each odd prime <= 61 */ +#define GMP_PRIME_MASK 0xc96996dcUL + +int +mpz_probab_prime_p (const mpz_t n, int reps) +{ + mpz_t nm1; + mpz_t q; + mpz_t y; + mp_bitcnt_t k; + int is_prime; + int j; + + /* Note that we use the absolute value of n only, for compatibility + with the real GMP. */ + if (mpz_even_p (n)) + return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; + + /* Above test excludes n == 0 */ + assert (n->_mp_size != 0); + + if (mpz_cmpabs_ui (n, 64) < 0) + return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; + + if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) + return 0; + + /* All prime factors are >= 31. */ + if (mpz_cmpabs_ui (n, 31*31) < 0) + return 2; + + mpz_init (nm1); + mpz_init (q); + + /* Find q and k, where q is odd and n = 1 + 2**k * q. */ + mpz_abs (nm1, n); + nm1->_mp_d[0] -= 1; + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + k = mpn_scan1 (nm1->_mp_d, 0); + mpz_tdiv_q_2exp (q, nm1, k); + + /* BPSW test */ + mpz_init_set_ui (y, 2); + is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); + reps -= 24; /* skip the first 24 repetitions */ + + /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = + j^2 + j + 41 using Euler's polynomial. We potentially stop early, + if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > + 30 (a[30] == 971 > 31*31 == 961). */ + + for (j = 0; is_prime & (j < reps); j++) + { + mpz_set_ui (y, (unsigned long) j*j+j+41); + if (mpz_cmp (y, nm1) >= 0) + { + /* Don't try any further bases. This "early" break does not affect + the result for any reasonable reps value (<=5000 was tested) */ + assert (j >= 30); + break; + } + is_prime = gmp_millerrabin (n, nm1, y, q, k); + } + mpz_clear (nm1); + mpz_clear (q); + mpz_clear (y); + + return is_prime; +} + + +/* Logical operations and bit manipulation. */ + +/* Numbers are treated as if represented in two's complement (and + infinitely sign extended). For a negative values we get the two's + complement from -x = ~x + 1, where ~ is bitwise complement. + Negation transforms + + xxxx10...0 + + into + + yyyy10...0 + + where yyyy is the bitwise complement of xxxx. So least significant + bits, up to and including the first one bit, are unchanged, and + the more significant bits are all complemented. + + To change a bit from zero to one in a negative number, subtract the + corresponding power of two from the absolute value. This can never + underflow. To change a bit from one to zero, add the corresponding + power of two, and this might overflow. E.g., if x = -001111, the + two's complement is 110001. Clearing the least significant bit, we + get two's complement 110000, and -010000. */ + +int +mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t limb_index; + unsigned shift; + mp_size_t ds; + mp_size_t dn; + mp_limb_t w; + int bit; + + ds = d->_mp_size; + dn = GMP_ABS (ds); + limb_index = bit_index / GMP_LIMB_BITS; + if (limb_index >= dn) + return ds < 0; + + shift = bit_index % GMP_LIMB_BITS; + w = d->_mp_d[limb_index]; + bit = (w >> shift) & 1; + + if (ds < 0) + { + /* d < 0. Check if any of the bits below is set: If so, our bit + must be complemented. */ + if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) + return bit ^ 1; + while (--limb_index >= 0) + if (d->_mp_d[limb_index] > 0) + return bit ^ 1; + } + return bit; +} + +static void +mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_limb_t bit; + mp_ptr dp; + + dn = GMP_ABS (d->_mp_size); + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + if (limb_index >= dn) + { + mp_size_t i; + /* The bit should be set outside of the end of the number. + We have to increase the size of the number. */ + dp = MPZ_REALLOC (d, limb_index + 1); + + dp[limb_index] = bit; + for (i = dn; i < limb_index; i++) + dp[i] = 0; + dn = limb_index + 1; + } + else + { + mp_limb_t cy; + + dp = d->_mp_d; + + cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); + if (cy > 0) + { + dp = MPZ_REALLOC (d, dn + 1); + dp[dn++] = cy; + } + } + + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +static void +mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_ptr dp; + mp_limb_t bit; + + dn = GMP_ABS (d->_mp_size); + dp = d->_mp_d; + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + assert (limb_index < dn); + + gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, + dn - limb_index, bit)); + dn = mpn_normalized_size (dp, dn); + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +void +mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (!mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_add_bit (d, bit_index); + else + mpz_abs_sub_bit (d, bit_index); + } +} + +void +mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); + } +} + +void +mpz_combit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); +} + +void +mpz_com (mpz_t r, const mpz_t u) +{ + mpz_add_ui (r, u, 1); + mpz_neg (r, r); +} + +void +mpz_and (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + r->_mp_size = 0; + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc & vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is positive, higher limbs don't matter. */ + rn = vx ? un : vn; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul & vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul & vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc | vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is negative, by sign extension higher limbs + don't matter. */ + rn = vx ? vn : un; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul | vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul | vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc ^ vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + rp = MPZ_REALLOC (r, un + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = (ul ^ vl ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = (ul ^ ux) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[un++] = rc; + else + un = mpn_normalized_size (rp, un); + + r->_mp_size = rx ? -un : un; +} + +static unsigned +gmp_popcount_limb (mp_limb_t x) +{ + unsigned c; + + /* Do 16 bits at a time, to avoid limb-sized constants. */ + int LOCAL_SHIFT_BITS = 16; + for (c = 0; x > 0;) + { + unsigned w = x - ((x >> 1) & 0x5555); + w = ((w >> 2) & 0x3333) + (w & 0x3333); + w = (w >> 4) + w; + w = ((w >> 8) & 0x000f) + (w & 0x000f); + c += w; + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) + x >>= LOCAL_SHIFT_BITS; + else + x = 0; + } + return c; +} + +mp_bitcnt_t +mpn_popcount (mp_srcptr p, mp_size_t n) +{ + mp_size_t i; + mp_bitcnt_t c; + + for (c = 0, i = 0; i < n; i++) + c += gmp_popcount_limb (p[i]); + + return c; +} + +mp_bitcnt_t +mpz_popcount (const mpz_t u) +{ + mp_size_t un; + + un = u->_mp_size; + + if (un < 0) + return ~(mp_bitcnt_t) 0; + + return mpn_popcount (u->_mp_d, un); +} + +mp_bitcnt_t +mpz_hamdist (const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_limb_t uc, vc, ul, vl, comp; + mp_srcptr up, vp; + mp_bitcnt_t c; + + un = u->_mp_size; + vn = v->_mp_size; + + if ( (un ^ vn) < 0) + return ~(mp_bitcnt_t) 0; + + comp = - (uc = vc = (un < 0)); + if (uc) + { + assert (vn < 0); + un = -un; + vn = -vn; + } + + up = u->_mp_d; + vp = v->_mp_d; + + if (un < vn) + MPN_SRCPTR_SWAP (up, un, vp, vn); + + for (i = 0, c = 0; i < vn; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + vl = (vp[i] ^ comp) + vc; + vc = vl < vc; + + c += gmp_popcount_limb (ul ^ vl); + } + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + c += gmp_popcount_limb (ul ^ comp); + } + + return c; +} + +mp_bitcnt_t +mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit + for u<0. Notice this test picks up any u==0 too. */ + if (i >= un) + return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); + + up = u->_mp_d; + ux = 0; + limb = up[i]; + + if (starting_bit != 0) + { + if (us < 0) + { + ux = mpn_zero_p (up, i); + limb = ~ limb + ux; + ux = - (mp_limb_t) (limb >= ux); + } + + /* Mask to 0 all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + } + + return mpn_common_scan (limb, i, up, un, ux); +} + +mp_bitcnt_t +mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + ux = - (mp_limb_t) (us >= 0); + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for + u<0. Notice this test picks up all cases of u==0 too. */ + if (i >= un) + return (ux ? starting_bit : ~(mp_bitcnt_t) 0); + + up = u->_mp_d; + limb = up[i] ^ ux; + + if (ux == 0) + limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ + + /* Mask all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + + return mpn_common_scan (limb, i, up, un, ux); +} + + +/* MPZ base conversion. */ + +size_t +mpz_sizeinbase (const mpz_t u, int base) +{ + mp_size_t un, tn; + mp_srcptr up; + mp_ptr tp; + mp_bitcnt_t bits; + struct gmp_div_inverse bi; + size_t ndigits; + + assert (base >= 2); + assert (base <= 62); + + un = GMP_ABS (u->_mp_size); + if (un == 0) + return 1; + + up = u->_mp_d; + + bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); + switch (base) + { + case 2: + return bits; + case 4: + return (bits + 1) / 2; + case 8: + return (bits + 2) / 3; + case 16: + return (bits + 3) / 4; + case 32: + return (bits + 4) / 5; + /* FIXME: Do something more clever for the common case of base + 10. */ + } + + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, up, un); + mpn_div_qr_1_invert (&bi, base); + + tn = un; + ndigits = 0; + do + { + ndigits++; + mpn_div_qr_1_preinv (tp, tp, tn, &bi); + tn -= (tp[tn-1] == 0); + } + while (tn > 0); + + gmp_free_limbs (tp, un); + return ndigits; +} + +char * +mpz_get_str (char *sp, int base, const mpz_t u) +{ + unsigned bits; + const char *digits; + mp_size_t un; + size_t i, sn, osn; + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + if (base > 1) + { + if (base <= 36) + digits = "0123456789abcdefghijklmnopqrstuvwxyz"; + else if (base > 62) + return NULL; + } + else if (base >= -1) + base = 10; + else + { + base = -base; + if (base > 36) + return NULL; + } + + sn = 1 + mpz_sizeinbase (u, base); + if (!sp) + { + osn = 1 + sn; + sp = (char *) gmp_alloc (osn); + } + else + osn = 0; + un = GMP_ABS (u->_mp_size); + + if (un == 0) + { + sp[0] = '0'; + sn = 1; + goto ret; + } + + i = 0; + + if (u->_mp_size < 0) + sp[i++] = '-'; + + bits = mpn_base_power_of_two_p (base); + + if (bits) + /* Not modified in this case. */ + sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); + else + { + struct mpn_base_info info; + mp_ptr tp; + + mpn_get_base_info (&info, base); + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, u->_mp_d, un); + + sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); + gmp_free_limbs (tp, un); + } + + for (; i < sn; i++) + sp[i] = digits[(unsigned char) sp[i]]; + +ret: + sp[sn] = '\0'; + if (osn && osn != sn + 1) + sp = (char*) gmp_realloc (sp, osn, sn + 1); + return sp; +} + +int +mpz_set_str (mpz_t r, const char *sp, int base) +{ + unsigned bits, value_of_a; + mp_size_t rn, alloc; + mp_ptr rp; + size_t dn, sn; + int sign; + unsigned char *dp; + + assert (base == 0 || (base >= 2 && base <= 62)); + + while (isspace( (unsigned char) *sp)) + sp++; + + sign = (*sp == '-'); + sp += sign; + + if (base == 0) + { + if (sp[0] == '0') + { + if (sp[1] == 'x' || sp[1] == 'X') + { + base = 16; + sp += 2; + } + else if (sp[1] == 'b' || sp[1] == 'B') + { + base = 2; + sp += 2; + } + else + base = 8; + } + else + base = 10; + } + + if (!*sp) + { + r->_mp_size = 0; + return -1; + } + sn = strlen(sp); + dp = (unsigned char *) gmp_alloc (sn); + + value_of_a = (base > 36) ? 36 : 10; + for (dn = 0; *sp; sp++) + { + unsigned digit; + + if (isspace ((unsigned char) *sp)) + continue; + else if (*sp >= '0' && *sp <= '9') + digit = *sp - '0'; + else if (*sp >= 'a' && *sp <= 'z') + digit = *sp - 'a' + value_of_a; + else if (*sp >= 'A' && *sp <= 'Z') + digit = *sp - 'A' + 10; + else + digit = base; /* fail */ + + if (digit >= (unsigned) base) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + + dp[dn++] = digit; + } + + if (!dn) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + bits = mpn_base_power_of_two_p (base); + + if (bits > 0) + { + alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_bits (rp, dp, dn, bits); + } + else + { + struct mpn_base_info info; + mpn_get_base_info (&info, base); + alloc = (dn + info.exp - 1) / info.exp; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_other (rp, dp, dn, base, &info); + /* Normalization, needed for all-zero input. */ + assert (rn > 0); + rn -= rp[rn-1] == 0; + } + assert (rn <= alloc); + gmp_free (dp, sn); + + r->_mp_size = sign ? - rn : rn; + + return 0; +} + +int +mpz_init_set_str (mpz_t r, const char *sp, int base) +{ + mpz_init (r); + return mpz_set_str (r, sp, base); +} + +size_t +mpz_out_str (FILE *stream, int base, const mpz_t x) +{ + char *str; + size_t len, n; + + str = mpz_get_str (NULL, base, x); + if (!str) + return 0; + len = strlen (str); + n = fwrite (str, 1, len, stream); + gmp_free (str, len + 1); + return n; +} + + +static int +gmp_detect_endian (void) +{ + static const int i = 2; + const unsigned char *p = (const unsigned char *) &i; + return 1 - *p; +} + +/* Import and export. Does not support nails. */ +void +mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, + size_t nails, const void *src) +{ + const unsigned char *p; + ptrdiff_t word_step; + mp_ptr rp; + mp_size_t rn; + + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes already copied to this limb (starting from + the low end). */ + size_t bytes; + /* The index where the limb should be stored, when completed. */ + mp_size_t i; + + if (nails != 0) + gmp_die ("mpz_import: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) src; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); + rp = MPZ_REALLOC (r, rn); + + for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) + { + size_t j; + for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) + { + limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); + if (bytes == sizeof(mp_limb_t)) + { + rp[i++] = limb; + bytes = 0; + limb = 0; + } + } + } + assert (i + (bytes > 0) == rn); + if (limb != 0) + rp[i++] = limb; + else + i = mpn_normalized_size (rp, i); + + r->_mp_size = i; +} + +void * +mpz_export (void *r, size_t *countp, int order, size_t size, int endian, + size_t nails, const mpz_t u) +{ + size_t count; + mp_size_t un; + + if (nails != 0) + gmp_die ("mpz_export: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + assert (size > 0 || u->_mp_size == 0); + + un = u->_mp_size; + count = 0; + if (un != 0) + { + size_t k; + unsigned char *p; + ptrdiff_t word_step; + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes left to do in this limb. */ + size_t bytes; + /* The index where the limb was read. */ + mp_size_t i; + + un = GMP_ABS (un); + + /* Count bytes in top limb. */ + limb = u->_mp_d[un-1]; + assert (limb != 0); + + k = (GMP_LIMB_BITS <= CHAR_BIT); + if (!k) + { + do { + int LOCAL_CHAR_BIT = CHAR_BIT; + k++; limb >>= LOCAL_CHAR_BIT; + } while (limb != 0); + } + /* else limb = 0; */ + + count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; + + if (!r) + r = gmp_alloc (count * size); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) r; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) + { + size_t j; + for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) + { + if (sizeof (mp_limb_t) == 1) + { + if (i < un) + *p = u->_mp_d[i++]; + else + *p = 0; + } + else + { + int LOCAL_CHAR_BIT = CHAR_BIT; + if (bytes == 0) + { + if (i < un) + limb = u->_mp_d[i++]; + bytes = sizeof (mp_limb_t); + } + *p = limb; + limb >>= LOCAL_CHAR_BIT; + bytes--; + } + } + } + assert (i == un); + assert (k == count); + } + + if (countp) + *countp = count; + + return r; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.h new file mode 100644 index 0000000000..f28cb360ce --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.h @@ -0,0 +1,311 @@ +/* mini-gmp, a minimalistic implementation of a GNU GMP subset. + +Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* About mini-gmp: This is a minimal implementation of a subset of the + GMP interface. It is intended for inclusion into applications which + have modest bignums needs, as a fallback when the real GMP library + is not installed. + + This file defines the public interface. */ + +#ifndef __MINI_GMP_H__ +#define __MINI_GMP_H__ + +/* For size_t */ +#include + +#if defined (__cplusplus) +extern "C" { +#endif + +void mp_set_memory_functions (void *(*) (size_t), + void *(*) (void *, size_t, size_t), + void (*) (void *, size_t)); + +void mp_get_memory_functions (void *(**) (size_t), + void *(**) (void *, size_t, size_t), + void (**) (void *, size_t)); + +#ifndef MINI_GMP_LIMB_TYPE +#define MINI_GMP_LIMB_TYPE long +#endif + +typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; +typedef long mp_size_t; +typedef unsigned long mp_bitcnt_t; + +typedef mp_limb_t *mp_ptr; +typedef const mp_limb_t *mp_srcptr; + +typedef struct +{ + int _mp_alloc; /* Number of *limbs* allocated and pointed + to by the _mp_d field. */ + int _mp_size; /* abs(_mp_size) is the number of limbs the + last field points to. If _mp_size is + negative this is a negative number. */ + mp_limb_t *_mp_d; /* Pointer to the limbs. */ +} __mpz_struct; + +typedef __mpz_struct mpz_t[1]; + +typedef __mpz_struct *mpz_ptr; +typedef const __mpz_struct *mpz_srcptr; + +extern const int mp_bits_per_limb; + +void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); +void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); +void mpn_zero (mp_ptr, mp_size_t); + +int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); +int mpn_zero_p (mp_srcptr, mp_size_t); + +mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); +void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); +int mpn_perfect_square_p (mp_srcptr, mp_size_t); +mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); +mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); + +mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); +mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); + +mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); +mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); + +void mpn_com (mp_ptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); + +mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); + +mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); +#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) + +size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); +mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); + +void mpz_init (mpz_t); +void mpz_init2 (mpz_t, mp_bitcnt_t); +void mpz_clear (mpz_t); + +#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) +#define mpz_even_p(z) (! mpz_odd_p (z)) + +int mpz_sgn (const mpz_t); +int mpz_cmp_si (const mpz_t, long); +int mpz_cmp_ui (const mpz_t, unsigned long); +int mpz_cmp (const mpz_t, const mpz_t); +int mpz_cmpabs_ui (const mpz_t, unsigned long); +int mpz_cmpabs (const mpz_t, const mpz_t); +int mpz_cmp_d (const mpz_t, double); +int mpz_cmpabs_d (const mpz_t, double); + +void mpz_abs (mpz_t, const mpz_t); +void mpz_neg (mpz_t, const mpz_t); +void mpz_swap (mpz_t, mpz_t); + +void mpz_add_ui (mpz_t, const mpz_t, unsigned long); +void mpz_add (mpz_t, const mpz_t, const mpz_t); +void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); +void mpz_sub (mpz_t, const mpz_t, const mpz_t); + +void mpz_mul_si (mpz_t, const mpz_t, long int); +void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_mul (mpz_t, const mpz_t, const mpz_t); +void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_addmul (mpz_t, const mpz_t, const mpz_t); +void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_submul (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); + +void mpz_mod (mpz_t, const mpz_t, const mpz_t); + +void mpz_divexact (mpz_t, const mpz_t, const mpz_t); + +int mpz_divisible_p (const mpz_t, const mpz_t); +int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); + +unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); + +unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); + +void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); + +int mpz_divisible_ui_p (const mpz_t, unsigned long); + +unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); +void mpz_gcd (mpz_t, const mpz_t, const mpz_t); +void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); +void mpz_lcm (mpz_t, const mpz_t, const mpz_t); +int mpz_invert (mpz_t, const mpz_t, const mpz_t); + +void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); +void mpz_sqrt (mpz_t, const mpz_t); +int mpz_perfect_square_p (const mpz_t); + +void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); +void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); +void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); + +void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); +int mpz_root (mpz_t, const mpz_t, unsigned long); + +void mpz_fac_ui (mpz_t, unsigned long); +void mpz_2fac_ui (mpz_t, unsigned long); +void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); +void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); + +int mpz_probab_prime_p (const mpz_t, int); + +int mpz_tstbit (const mpz_t, mp_bitcnt_t); +void mpz_setbit (mpz_t, mp_bitcnt_t); +void mpz_clrbit (mpz_t, mp_bitcnt_t); +void mpz_combit (mpz_t, mp_bitcnt_t); + +void mpz_com (mpz_t, const mpz_t); +void mpz_and (mpz_t, const mpz_t, const mpz_t); +void mpz_ior (mpz_t, const mpz_t, const mpz_t); +void mpz_xor (mpz_t, const mpz_t, const mpz_t); + +mp_bitcnt_t mpz_popcount (const mpz_t); +mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); +mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); +mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); + +int mpz_fits_slong_p (const mpz_t); +int mpz_fits_ulong_p (const mpz_t); +int mpz_fits_sint_p (const mpz_t); +int mpz_fits_uint_p (const mpz_t); +int mpz_fits_sshort_p (const mpz_t); +int mpz_fits_ushort_p (const mpz_t); +long int mpz_get_si (const mpz_t); +unsigned long int mpz_get_ui (const mpz_t); +double mpz_get_d (const mpz_t); +size_t mpz_size (const mpz_t); +mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); + +void mpz_realloc2 (mpz_t, mp_bitcnt_t); +mp_srcptr mpz_limbs_read (mpz_srcptr); +mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); +mp_ptr mpz_limbs_write (mpz_t, mp_size_t); +void mpz_limbs_finish (mpz_t, mp_size_t); +mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); + +#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} + +void mpz_set_si (mpz_t, signed long int); +void mpz_set_ui (mpz_t, unsigned long int); +void mpz_set (mpz_t, const mpz_t); +void mpz_set_d (mpz_t, double); + +void mpz_init_set_si (mpz_t, signed long int); +void mpz_init_set_ui (mpz_t, unsigned long int); +void mpz_init_set (mpz_t, const mpz_t); +void mpz_init_set_d (mpz_t, double); + +size_t mpz_sizeinbase (const mpz_t, int); +char *mpz_get_str (char *, int, const mpz_t); +int mpz_set_str (mpz_t, const char *, int); +int mpz_init_set_str (mpz_t, const char *, int); + +/* This long list taken from gmp.h. */ +/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, + defines EOF but not FILE. */ +#if defined (FILE) \ + || defined (H_STDIO) \ + || defined (_H_STDIO) /* AIX */ \ + || defined (_STDIO_H) /* glibc, Sun, SCO */ \ + || defined (_STDIO_H_) /* BSD, OSF */ \ + || defined (__STDIO_H) /* Borland */ \ + || defined (__STDIO_H__) /* IRIX */ \ + || defined (_STDIO_INCLUDED) /* HPUX */ \ + || defined (__dj_include_stdio_h_) /* DJGPP */ \ + || defined (_FILE_DEFINED) /* Microsoft */ \ + || defined (__STDIO__) /* Apple MPW MrC */ \ + || defined (_MSL_STDIO_H) /* Metrowerks */ \ + || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ + || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ + || defined (__STDIO_LOADED) /* VMS */ \ + || defined (_STDIO) /* HPE NonStop */ \ + || defined (__DEFINED_FILE) /* musl */ +size_t mpz_out_str (FILE *, int, const mpz_t); +#endif + +void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); +void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); + +#if defined (__cplusplus) +} +#endif +#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.c new file mode 100644 index 0000000000..27f4a963db --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.c @@ -0,0 +1,357 @@ +#include +#include +#include +#include + +// double-wide multiplication +void +MUL(digit_t *out, const digit_t a, const digit_t b) +{ +#ifdef RADIX_32 + uint64_t r = (uint64_t)a * b; + out[0] = r & 0xFFFFFFFFUL; + out[1] = r >> 32; + +#elif defined(RADIX_64) && defined(_MSC_VER) + uint64_t umul_hi; + out[0] = _umul128(a, b, &umul_hi); + out[1] = umul_hi; + +#elif defined(RADIX_64) && defined(HAVE_UINT128) + unsigned __int128 umul_tmp; + umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); + out[0] = (uint64_t)umul_tmp; + out[1] = (uint64_t)(umul_tmp >> 64); + +#else + register digit_t al, ah, bl, bh, temp; + digit_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; + digit_t mask_low = (digit_t)(-1) >> (sizeof(digit_t) * 4), mask_high = (digit_t)(-1) << (sizeof(digit_t) * 4); + al = a & mask_low; // Low part + ah = a >> (sizeof(digit_t) * 4); // High part + bl = b & mask_low; + bh = b >> (sizeof(digit_t) * 4); + + albl = al * bl; + albh = al * bh; + ahbl = ah * bl; + ahbh = ah * bh; + out[0] = albl & mask_low; // out00 + + res1 = albl >> (sizeof(digit_t) * 4); + res2 = ahbl & mask_low; + res3 = albh & mask_low; + temp = res1 + res2 + res3; + carry = temp >> (sizeof(digit_t) * 4); + out[0] ^= temp << (sizeof(digit_t) * 4); // out01 + + res1 = ahbl >> (sizeof(digit_t) * 4); + res2 = albh >> (sizeof(digit_t) * 4); + res3 = ahbh & mask_low; + temp = res1 + res2 + res3 + carry; + out[1] = temp & mask_low; // out10 + carry = temp & mask_high; + out[1] ^= (ahbh & mask_high) + carry; // out11 + +#endif +} + +void +mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision addition + unsigned int i, carry = 0; + + for (i = 0; i < nwords; i++) { + ADDC(c[i], carry, a[i], b[i], carry); + } +} + +digit_t +mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision right shift by 1...RADIX-1 + digit_t bit_out = x[0] & 1; + + for (unsigned int i = 0; i < nwords - 1; i++) { + SHIFTR(x[i + 1], x[i], shift, x[i], RADIX); + } + x[nwords - 1] >>= shift; + return bit_out; +} + +void +mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision left shift by 1...RADIX-1 + + for (int i = nwords - 1; i > 0; i--) { + SHIFTL(x[i], x[i - 1], shift, x[i], RADIX); + } + x[0] <<= shift; +} + +void +multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ + int t = shift; + while (t > RADIX - 1) { + mp_shiftl(x, RADIX - 1, nwords); + t = t - (RADIX - 1); + } + mp_shiftl(x, t, nwords); +} + +// The below functions were taken from the EC module + +void +mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision subtraction, assuming a > b + unsigned int i, borrow = 0; + + for (i = 0; i < nwords; i++) { + SUBC(c[i], borrow, a[i], b[i], borrow); + } +} + +void +select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords) +{ // Select c <- a if mask = 0, select c <- b if mask = 1...1 + + for (int i = 0; i < nwords; i++) { + c[i] = ((a[i] ^ b[i]) & mask) ^ a[i]; + } +} + +void +swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords) +{ // Swap entries + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then a <- b and b <- a + digit_t temp; + + for (int i = 0; i < nwords; i++) { + temp = option & (a[i] ^ b[i]); + a[i] = temp ^ a[i]; + b[i] = temp ^ b[i]; + } +} + +int +mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords) +{ // Multiprecision comparison, a=b? : (1) a>b, (0) a=b, (-1) a= 0; i--) { + if (a[i] > b[i]) + return 1; + else if (a[i] < b[i]) + return -1; + } + return 0; +} + +bool +mp_is_zero(const digit_t *a, unsigned int nwords) +{ // Is a multiprecision element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + digit_t r = 0; + + for (unsigned int i = 0; i < nwords; i++) + r |= a[i] ^ 0; + + return (bool)is_digit_zero_ct(r); +} + +void +mp_mul2(digit_t *c, const digit_t *a, const digit_t *b) +{ // Multiprecision multiplication fixed to two-digit operands + unsigned int carry = 0; + digit_t t0[2], t1[2], t2[2]; + + MUL(t0, a[0], b[0]); + MUL(t1, a[0], b[1]); + ADDC(t0[1], carry, t0[1], t1[0], carry); + ADDC(t1[1], carry, 0, t1[1], carry); + MUL(t2, a[1], b[1]); + ADDC(t2[0], carry, t2[0], t1[1], carry); + ADDC(t2[1], carry, 0, t2[1], carry); + c[0] = t0[0]; + c[1] = t0[1]; + c[2] = t2[0]; + c[3] = t2[1]; +} + +void +mp_print(const digit_t *a, size_t nwords) +{ + printf("0x"); + for (size_t i = 0; i < nwords; i++) { +#ifdef RADIX_32 + printf("%08" PRIx32, a[nwords - i - 1]); // Print each word with 8 hex digits +#elif defined(RADIX_64) + printf("%016" PRIx64, a[nwords - i - 1]); // Print each word with 16 hex digits +#endif + } +} + +void +mp_copy(digit_t *b, const digit_t *a, size_t nwords) +{ + for (size_t i = 0; i < nwords; i++) { + b[i] = a[i]; + } +} + +void +mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords) +{ + // Multiprecision multiplication, c = a*b, for nwords-digit inputs, with nwords-digit output + // explicitly does not use the higher half of c, as we do not need in our applications + digit_t carry, UV[2], t[nwords], cc[nwords]; + + for (size_t i = 0; i < nwords; i++) { + cc[i] = 0; + } + + for (size_t i = 0; i < nwords; i++) { + + MUL(t, a[i], b[0]); + + for (size_t j = 1; j < nwords - 1; j++) { + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + t[j + 1] = UV[1] + carry; + } + + int j = nwords - 1; + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + + mp_add(&cc[i], &cc[i], t, nwords - i); + } + + mp_copy(c, cc, nwords); +} + +void +mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords) +{ // Multiprecision modulo 2^e, with 0 <= a < 2^(e) + unsigned int i, q = e >> LOG2RADIX, r = e & (RADIX - 1); + + if (q < nwords) { + a[q] &= ((digit_t)1 << r) - 1; + + for (i = q + 1; i < nwords; i++) { + a[i] = 0; + } + } +} + +void +mp_neg(digit_t *a, unsigned int nwords) +{ // negates a + for (size_t i = 0; i < nwords; i++) { + a[i] ^= -1; + } + + a[0] += 1; +} + +bool +mp_is_one(const digit_t *x, unsigned int nwords) +{ // returns true if x represents 1, and false otherwise + if (x[0] != 1) { + return false; + } + + for (size_t i = 1; i < nwords; i++) { + if (x[i] != 0) { + return false; + } + } + return true; +} + +void +mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) +{ // Inversion modulo 2^e, using Newton's method and Hensel lifting + // we take the first power of 2 larger than e to use + // requires a to be odd, of course + // returns b such that a*b = 1 mod 2^e + assert((a[0] & 1) == 1); + + digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + mp_copy(aa, a, nwords); + + mp_one[0] = 1; + for (unsigned int i = 1; i < nwords; i++) { + mp_one[i] = 0; + } + + int p = 1; + while ((1 << p) < e) { + p++; + } + p -= 2; // using k = 4 for initial inverse + int w = (1 << (p + 2)); + + mp_mod_2exp(aa, w, nwords); + mp_add(x, aa, aa, nwords); + mp_add(x, x, aa, nwords); // should be 3a + x[0] ^= (1 << 1); // so that x equals (3a)^2 xor 2 + mp_mod_2exp(x, w, nwords); // now x*a = 1 mod 2^4, which we lift + + mp_mul(tmp, aa, x, nwords); + mp_neg(tmp, nwords); + mp_add(y, mp_one, tmp, nwords); + + // Hensel lifting for p rounds + for (int i = 0; i < p; i++) { + mp_add(tmp, mp_one, y, nwords); + mp_mul(x, x, tmp, nwords); + mp_mul(y, y, y, nwords); + } + + mp_mod_2exp(x, w, nwords); + mp_copy(b, x, nwords); + + // verify results + mp_mul(x, x, aa, nwords); + mp_mod_2exp(x, w, nwords); + assert(mp_is_one(x, nwords)); +} + +void +mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords) +{ + // given a matrix ( ( a, b ), (c, d) ) of values mod 2^e + // returns the inverse matrix gamma ( (d, -b), (-c, a) ) + // where gamma is the inverse of the determinant a*d - b*c + // assumes the matrix is invertible, otherwises, inversion of determinant fails + + int p = 1; + while ((1 << p) < e) { + p++; + } + int w = (1 << (p)); + + digit_t det[nwords], tmp[nwords], resa[nwords], resb[nwords], resc[nwords], resd[nwords]; + mp_mul(tmp, r1, s2, nwords); + mp_mul(det, r2, s1, nwords); + mp_sub(det, tmp, det, nwords); + mp_inv_2e(det, det, e, nwords); + + mp_mul(resa, det, s2, nwords); + mp_mul(resb, det, r2, nwords); + mp_mul(resc, det, s1, nwords); + mp_mul(resd, det, r1, nwords); + + mp_neg(resb, nwords); + mp_neg(resc, nwords); + + mp_mod_2exp(resa, w, nwords); + mp_mod_2exp(resb, w, nwords); + mp_mod_2exp(resc, w, nwords); + mp_mod_2exp(resd, w, nwords); + + mp_copy(r1, resa, nwords); + mp_copy(r2, resb, nwords); + mp_copy(s1, resc, nwords); + mp_copy(s2, resd, nwords); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.h new file mode 100644 index 0000000000..b3733b520d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.h @@ -0,0 +1,88 @@ +#ifndef MP_H +#define MP_H + +#include +#include +#include + +// Functions taken from the GF module + +void mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +digit_t mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords); +void multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void MUL(digit_t *out, const digit_t a, const digit_t b); + +// Functions taken from the EC module + +void mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +void select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords); +void swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords); +int mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords); +bool mp_is_zero(const digit_t *a, unsigned int nwords); +void mp_mul2(digit_t *c, const digit_t *a, const digit_t *b); + +// Further functions for multiprecision arithmetic +void mp_print(const digit_t *a, size_t nwords); +void mp_copy(digit_t *b, const digit_t *a, size_t nwords); +void mp_neg(digit_t *a, unsigned int nwords); +bool mp_is_one(const digit_t *x, unsigned int nwords); +void mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords); +void mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords); +void mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords); +void mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords); + +#define mp_is_odd(x, nwords) (((nwords) != 0) & (int)(x)[0]) +#define mp_is_even(x, nwords) (!mp_is_odd(x, nwords)) + +/********************** Constant-time unsigned comparisons ***********************/ + +// The following functions return 1 (TRUE) if condition is true, 0 (FALSE) otherwise +static inline unsigned int +is_digit_nonzero_ct(digit_t x) +{ // Is x != 0? + return (unsigned int)((x | (0 - x)) >> (RADIX - 1)); +} + +static inline unsigned int +is_digit_zero_ct(digit_t x) +{ // Is x = 0? + return (unsigned int)(1 ^ is_digit_nonzero_ct(x)); +} + +static inline unsigned int +is_digit_lessthan_ct(digit_t x, digit_t y) +{ // Is x < y? + return (unsigned int)((x ^ ((x ^ y) | ((x - y) ^ y))) >> (RADIX - 1)); +} + +/********************** Platform-independent macros for digit-size operations + * **********************/ + +// Digit addition with carry +#define ADDC(sumOut, carryOut, addend1, addend2, carryIn) \ + { \ + digit_t tempReg = (addend1) + (digit_t)(carryIn); \ + (sumOut) = (addend2) + tempReg; \ + (carryOut) = (is_digit_lessthan_ct(tempReg, (digit_t)(carryIn)) | is_digit_lessthan_ct((sumOut), tempReg)); \ + } + +// Digit subtraction with borrow +#define SUBC(differenceOut, borrowOut, minuend, subtrahend, borrowIn) \ + { \ + digit_t tempReg = (minuend) - (subtrahend); \ + unsigned int borrowReg = \ + (is_digit_lessthan_ct((minuend), (subtrahend)) | ((borrowIn) & is_digit_zero_ct(tempReg))); \ + (differenceOut) = tempReg - (digit_t)(borrowIn); \ + (borrowOut) = borrowReg; \ + } + +// Shift right with flexible datatype +#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << (DigitSize - (shift))); + +// Digit shift left +#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((highIn) << (shift)) ^ ((lowIn) >> (RADIX - (shift))); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/normeq.c new file mode 100644 index 0000000000..8c133dd095 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/normeq.c @@ -0,0 +1,369 @@ +#include +#include "internal.h" + +/** @file + * + * @authors Antonin Leroux + * + * @brief Functions related to norm equation solving or special extremal orders + */ + +void +quat_lattice_O0_set(quat_lattice_t *O0) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(O0->basis[i][j]), 0); + } + } + ibz_set(&(O0->denom), 2); + ibz_set(&(O0->basis[0][0]), 2); + ibz_set(&(O0->basis[1][1]), 2); + ibz_set(&(O0->basis[2][2]), 1); + ibz_set(&(O0->basis[1][2]), 1); + ibz_set(&(O0->basis[3][3]), 1); + ibz_set(&(O0->basis[0][3]), 1); +} + +void +quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) +{ + ibz_set(&O0->z.coord[1], 1); + ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.denom, 1); + ibz_set(&O0->t.denom, 1); + O0->q = 1; + quat_lattice_O0_set(&(O0->order)); +} + +void +quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo) +{ + + // var dec + quat_alg_elem_t quat_temp; + + // var init + quat_alg_elem_init(&quat_temp); + + // elem = x + quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + + // quat_temp = i*y + quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); + + // elem = x + i*y + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = z * j + quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + + // elem = x + i* + z*j + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = t * j * i + quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); + + // elem = x + i*y + j*z + j*i*t + quat_alg_add(elem, elem, &quat_temp); + + quat_alg_elem_finalize(&quat_temp); +} + +int +quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params) +{ + + if (ibz_is_even(n_gamma)) { + return 0; + } + // var dec + int found; + ibz_t cornacchia_target; + ibz_t adjusted_n_gamma, q; + ibz_t bound, sq_bound, temp; + ibz_t test; + ibz_vec_4_t coeffs; // coeffs = [x,y,z,t] + quat_alg_elem_t quat_temp; + + if (non_diag) + assert(params->order->q % 4 == 1); + + // var init + found = 0; + ibz_init(&bound); + ibz_init(&test); + ibz_init(&temp); + ibz_init(&q); + ibz_init(&sq_bound); + ibz_vec_4_init(&coeffs); + quat_alg_elem_init(&quat_temp); + ibz_init(&adjusted_n_gamma); + ibz_init(&cornacchia_target); + + ibz_set(&q, params->order->q); + + // this could be removed in the current state + int standard_order = (params->order->q == 1); + + // adjusting the norm of gamma (multiplying by 4 to find a solution in an order of odd level) + if (non_diag || standard_order) { + ibz_mul(&adjusted_n_gamma, n_gamma, &ibz_const_two); + ibz_mul(&adjusted_n_gamma, &adjusted_n_gamma, &ibz_const_two); + } else { + ibz_copy(&adjusted_n_gamma, n_gamma); + } + // computation of the first bound = sqrt (adjust_n_gamma / p - q) + ibz_div(&sq_bound, &bound, &adjusted_n_gamma, &((params->algebra)->p)); + ibz_set(&temp, params->order->q); + ibz_sub(&sq_bound, &sq_bound, &temp); + ibz_sqrt_floor(&bound, &sq_bound); + + // the size of the search space is roughly n_gamma / (p√q) + ibz_t counter; + ibz_init(&counter); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_sqrt_floor(&temp, &temp); + ibz_div(&counter, &temp, &adjusted_n_gamma, &temp); + + // entering the main loop + while (!found && ibz_cmp(&counter, &ibz_const_zero) != 0) { + // decreasing the counter + ibz_sub(&counter, &counter, &ibz_const_one); + + // we start by sampling the first coordinate + ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + + // then, we sample the second coordinate + // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) + ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); + ibz_sub(&temp, &adjusted_n_gamma, &temp); + ibz_mul(&sq_bound, &q, &(params->algebra->p)); + ibz_div(&temp, &sq_bound, &temp, &sq_bound); + ibz_sqrt_floor(&temp, &temp); + + if (ibz_cmp(&temp, &ibz_const_zero) == 0) { + continue; + } + // sampling the second value + ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + + // compute cornacchia_target = n_gamma - p * (z² + q*t²) + ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &q, &temp); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); + ibz_sub(&cornacchia_target, &adjusted_n_gamma, &cornacchia_target); + assert(ibz_cmp(&cornacchia_target, &ibz_const_zero) > 0); + + // applying cornacchia + if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) + found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + else + found = 0; + + if (found && non_diag && standard_order) { + // check that we can divide by two at least once + // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 + // we must have x = t mod 2 and y = z mod 2 + // if q=1 we can simply swap x and y + if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { + ibz_swap(&coeffs[1], &coeffs[0]); + } + // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the + // resulting endomorphism will behave well for dim 2 computations + found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && + ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + } + if (found) { + +#ifndef NDEBUG + ibz_set(&temp, (params->order->q)); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_add(&temp, &temp, &test); + assert(0 == ibz_cmp(&temp, &cornacchia_target)); + + ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); + ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_set(&temp, (params->order->q)); + ibz_mul(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &temp, &(params->algebra->p)); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); +#endif + // translate x,y,z,t into the quaternion element gamma + quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); +#ifndef NDEBUG + quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs[0]))); + assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); + assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); +#endif + // making gamma primitive + // coeffs contains the coefficients of primitivized gamma in the basis of order + quat_alg_make_primitive(&coeffs, &temp, gamma, &((params->order)->order)); + + if (non_diag || standard_order) + found = (ibz_cmp(&temp, &ibz_const_two) == 0); + else + found = (ibz_cmp(&temp, &ibz_const_one) == 0); + } + } + + if (found) { + // new gamma + ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); + ibz_copy(&gamma->coord[0], &coeffs[0]); + ibz_copy(&gamma->coord[1], &coeffs[1]); + ibz_copy(&gamma->coord[2], &coeffs[2]); + ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->denom, &(((params->order)->order).denom)); + } + // var finalize + ibz_finalize(&counter); + ibz_finalize(&bound); + ibz_finalize(&temp); + ibz_finalize(&sq_bound); + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&quat_temp); + ibz_finalize(&adjusted_n_gamma); + ibz_finalize(&cornacchia_target); + ibz_finalize(&q); + ibz_finalize(&test); + + return found; +} + +int +quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor) +{ + + ibz_t n_temp, norm_d; + ibz_t disc; + quat_alg_elem_t gen, gen_rerand; + int found = 0; + ibz_init(&n_temp); + ibz_init(&norm_d); + ibz_init(&disc); + quat_alg_elem_init(&gen); + quat_alg_elem_init(&gen_rerand); + + // when the norm is prime we can be quite efficient + // by avoiding to run represent integer + // the first step is to generate one ideal of the correct norm + if (is_prime) { + + // we find a quaternion element of norm divisible by norm + while (!found) { + // generating a trace-zero element at random + ibz_set(&gen.coord[0], 0); + ibz_sub(&n_temp, norm, &ibz_const_one); + for (int i = 1; i < 4; i++) + ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + + // and finally the negation mod norm + ibz_neg(&disc, &n_temp); + ibz_mod(&disc, &disc, norm); + // now we check that -n is a square mod norm + // and if the square root exists we compute it + found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = found && !quat_alg_elem_is_zero(&gen); + } + } else { + assert(prime_cofactor != NULL); + // if it is not prime or we don't know if it is prime, we may just use represent integer + // and use a precomputed prime as cofactor + assert(!ibz_is_zero(norm)); + ibz_mul(&n_temp, prime_cofactor, norm); + found = quat_represent_integer(&gen, &n_temp, 0, params); + found = found && !quat_alg_elem_is_zero(&gen); + } +#ifndef NDEBUG + if (found) { + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_mod(&n_temp, &n_temp, norm); + assert(ibz_cmp(&n_temp, &ibz_const_zero) == 0); + } +#endif + + // now we just have to rerandomize the class of the ideal generated by gen + found = 0; + while (!found) { + for (int i = 0; i < 4; i++) { + ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + } + quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_gcd(&disc, &n_temp, norm); + found = ibz_is_one(&disc); + found = found && !quat_alg_elem_is_zero(&gen_rerand); + } + + quat_alg_mul(&gen, &gen, &gen_rerand, (params->algebra)); + // in both cases, whether norm is prime or not prime, + // gen is not divisible by any integer factor of the target norm + // therefore the call below will yield an ideal of the correct norm + quat_lideal_create(lideal, &gen, norm, &((params->order)->order), (params->algebra)); + assert(ibz_cmp(norm, &(lideal->norm)) == 0); + + ibz_finalize(&n_temp); + quat_alg_elem_finalize(&gen); + quat_alg_elem_finalize(&gen_rerand); + ibz_finalize(&norm_d); + ibz_finalize(&disc); + return (found); +} + +void +quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_copy(&(*vec)[2], &el->coord[2]); + ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) + ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) + ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); + ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); + ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); + + assert(ibz_divides(&(*vec)[0], &el->denom)); + assert(ibz_divides(&(*vec)[1], &el->denom)); + assert(ibz_divides(&(*vec)[2], &el->denom)); + assert(ibz_divides(&(*vec)[3], &el->denom)); + + ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); + ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); + ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); + ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/printer.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/printer.c new file mode 100644 index 0000000000..6d6a3ca9b7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/printer.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +void +ibz_mat_2x2_print(const ibz_mat_2x2_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_print(&((*mat)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibz_mat_4x4_print(const ibz_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibz_vec_2_print(const ibz_vec_2_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 2; i++) { + ibz_print(&((*vec)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibz_vec_4_print(const ibz_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +quat_lattice_print(const quat_lattice_t *lat) +{ + printf("lattice\n"); + printf("denominator: "); + ibz_print(&(lat->denom), 10); + printf("\n"); + printf("basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lat->basis)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +quat_alg_print(const quat_alg_t *alg) +{ + printf("quaternion algebra ramified at "); + ibz_print(&(alg->p), 10); + printf(" and infinity\n\n"); +} + +void +quat_alg_elem_print(const quat_alg_elem_t *elem) +{ + printf("denominator: "); + ibz_print(&(elem->denom), 10); + printf("\n"); + printf("coordinates: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((elem->coord)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +quat_left_ideal_print(const quat_left_ideal_t *lideal) +{ + printf("left ideal\n"); + printf("norm: "); + ibz_print(&(lideal->norm), 10); + printf("\n"); + printf("denominator: "); + ibz_print(&(lideal->lattice.denom), 10); + printf("\n"); + printf("basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lideal->lattice.basis)[i][j]), 10); + printf(" "); + } + if (i != 3) { + printf("\n "); + } else { + printf("\n"); + } + } + if ((lideal->parent_order) != NULL) { + printf("parent order denominator: "); + ibz_print(&(lideal->parent_order->denom), 10); + printf("\n"); + printf("parent order basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lideal->parent_order->basis)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + } else { + printf("Parent order not given!\n"); + } + printf("\n"); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion.h new file mode 100644 index 0000000000..a567657464 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion.h @@ -0,0 +1,708 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for quaternion algebra operations + */ + +#ifndef QUATERNION_H +#define QUATERNION_H + +// #include +#include +#include "intbig.h" +#include + +/** @defgroup quat_quat Quaternion algebra + * @{ + */ + +/** @defgroup quat_vec_t Types for integer vectors and matrices + * @{ + */ + +/** @brief Type for vector of 2 big integers + * + * @typedef ibz_vec_2_t + */ +typedef ibz_t ibz_vec_2_t[2]; + +/** @brief Type for vectors of 4 integers + * + * @typedef ibz_vec_4_t + * + * Represented as a vector of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_vec_4_t[4]; + +/** @brief Type for 2 by 2 matrices of integers + * + * @typedef ibz_mat_2x2_t + * + * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_2x2_t[2][2]; + +/** @brief Type for 4 by 4 matrices of integers + * + * @typedef ibz_mat_4x4_t + * + * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_4x4_t[4][4]; +/** + * @} + */ + +/** @defgroup quat_quat_t Types for quaternion algebras + * @{ + */ + +/** @brief Type for quaternion algebras + * + * @typedef quat_alg_t + * + * @struct quat_alg + * + * The quaternion algebra ramified at p = 3 mod 4 and ∞. + */ +typedef struct quat_alg +{ + ibz_t p; ///< Prime number, must be = 3 mod 4. +} quat_alg_t; + +/** @brief Type for quaternion algebra elements + * + * @typedef quat_alg_elem_t + * + * @struct quat_alg_elem + * + * Represented as a array *coord* of 4 ibz_t integers and a common ibz_t denominator *denom*. + * + * The representation is not necessarily normalized, that is, gcd(denom, content(coord)) might not + * be 1. For getting a normalized representation, use the quat_alg_normalize function + * + * The elements are always represented in basis (1,i,j,ij) of the quaternion algebra, with i^2=-1 + * and j^2 = -p + */ +typedef struct quat_alg_elem +{ + ibz_t denom; ///< Denominator by which all coordinates are divided (big integer, must not be 0) + ibz_vec_4_t coord; ///< Numerators of the 4 coordinates of the quaternion algebra element in basis (1,i,j,ij) +} quat_alg_elem_t; + +/** @brief Type for lattices in dimension 4 + * + * @typedef quat_lattice_t + * + * @struct quat_lattice + * + * Represented as a rational (`frac`) times an integreal lattice (`basis`) + * + * The basis is such that its columns divided by its denominator are elements of + * the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + * + * All lattices must have full rank (4) + */ +typedef struct quat_lattice +{ + ibz_t denom; ///< Denominator by which the basis is divided (big integer, must not be 0) + ibz_mat_4x4_t basis; ///< Integer basis of the lattice (its columns divided by denom are + ///< algebra elements in the usual basis) +} quat_lattice_t; + +/** @brief Type for left ideals of maximal orders in quaternion algebras + * + * @typedef quat_left_ideal_t + * + * @struct quat_left_ideal + * + * The basis of the lattice representing it is such that its columns divided by its denominator are + * elements of the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + */ +typedef struct quat_left_ideal +{ + quat_lattice_t lattice; ///< lattice representing the ideal + ibz_t norm; ///< norm of the lattice + const quat_lattice_t *parent_order; ///< should be a maximal order +} quat_left_ideal_t; +/** @} + */ + +/** @brief Type for extremal maximal orders + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + * The basis of the order representing it is in hermite normal form, and its columns divid +ed by its denominator are elements of the quaternion algebra, represented in basis (1,z,t, +tz) where z^2 = -q, t^2 = -p. +*/ +typedef struct quat_p_extremal_maximal_order +{ + quat_lattice_t order; ///< the order represented as a lattice + quat_alg_elem_t z; ///< the element of small discriminant + quat_alg_elem_t t; ///< the element of norm p orthogonal to z + uint32_t q; ///< the absolute value of the square of z +} quat_p_extremal_maximal_order_t; + +/** @brief Type for represent integer parameters + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + */ +typedef struct quat_represent_integer_params +{ + int primality_test_iterations; ///< Primality test iterations + const quat_p_extremal_maximal_order_t *order; ///< The standard extremal maximal order + const quat_alg_t *algebra; ///< The quaternion algebra +} quat_represent_integer_params_t; + +/*************************** Functions *****************************/ + +/** @defgroup quat_c Constructors and Destructors + * @{ + */ +void quat_alg_init_set(quat_alg_t *alg, const ibz_t *p); +void quat_alg_finalize(quat_alg_t *alg); + +void quat_alg_elem_init(quat_alg_elem_t *elem); +void quat_alg_elem_finalize(quat_alg_elem_t *elem); + +void ibz_vec_2_init(ibz_vec_2_t *vec); +void ibz_vec_2_finalize(ibz_vec_2_t *vec); + +void ibz_vec_4_init(ibz_vec_4_t *vec); +void ibz_vec_4_finalize(ibz_vec_4_t *vec); + +void ibz_mat_2x2_init(ibz_mat_2x2_t *mat); +void ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat); + +void ibz_mat_4x4_init(ibz_mat_4x4_t *mat); +void ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat); + +void quat_lattice_init(quat_lattice_t *lat); +void quat_lattice_finalize(quat_lattice_t *lat); + +void quat_left_ideal_init(quat_left_ideal_t *lideal); +void quat_left_ideal_finalize(quat_left_ideal_t *lideal); +/** @} + */ + +/** @defgroup quat_printers Print functions for types from the quaternion module + * @{ + */ +void ibz_mat_2x2_print(const ibz_mat_2x2_t *mat); +void ibz_mat_4x4_print(const ibz_mat_4x4_t *mat); +void ibz_vec_2_print(const ibz_vec_2_t *vec); +void ibz_vec_4_print(const ibz_vec_4_t *vec); + +void quat_lattice_print(const quat_lattice_t *lat); +void quat_alg_print(const quat_alg_t *alg); +void quat_alg_elem_print(const quat_alg_elem_t *elem); +void quat_left_ideal_print(const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @defgroup quat_int Integer functions for quaternion algebra + * @{ + */ + +/** @defgroup quat_int_mat Integer matrix and vector functions + * @{ + */ + +/** @brief Copy matrix + * + * @param copy Output: Matrix into which copied will be copied + * @param copied + */ +void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied); + +/** + * @brief Inverse of 2x2 integer matrices modulo m + * + * @param inv Output matrix + * @param mat Input matrix + * @param m Integer modulo + * @return 1 if inverse exists 0 otherwise + */ +int ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m); + +/** @brief mat*vec in dimension 2 for integers + * + * @param res Output vector + * @param mat Input vector + * @param vec Input vector + */ +void ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, + const ibz_mat_4x4_t *mat); // dim4, lattice, test/dim4, ideal + +/** @brief transpose a 4x4 integer matrix + * + * @param transposed Output: is set to the transposition of mat + * @param mat Input matrix + */ +void ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat); + +/** @brief a*b for a,b integer 4x4 matrices + * + * Naive implementation + * + * @param res Output: A 4x4 integer matrix + * @param a + * @param b + */ +void ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b); + +/** @brief divides all values in matrix by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param mat + */ +int ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** + * @brief mat*vec + * + * + * @param res Output: coordinate vector + * @param mat Integer 4x4 matrix + * @param vec Integer vector (coordinate vector) + * + * Multiplies 4x4 integer matrix mat by a 4-integers column vector vec + */ +void ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec); + +/** + * @brief vec*mat + * + * + * @param res Output: coordinate vector. + * @param vec Integer vector (coordinate vector) + * @param mat Integer 4x4 matrix + * + * Multiplies 4x4 integer matrix mat by a 4-integers row vector vec (on the left) + */ +void ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @defgroup quat_integer Higher-level integer functions for quaternion algebra + * @{ + */ + +/** + * @brief Generates a random prime + * + * A number is accepted as prime if it passes a 30-round Miller-Rabin test. + * This function is fairly inefficient and mostly meant for tests. + * + * @returns 1 if a prime is found, 0 otherwise + * @param p Output: The prime (if found) + * @param is3mod4 If 1, the prime is required to be 3 mod 4, if 0 no congruence condition is imposed + * @param bitsize Maximal size of output prime + * @param probability_test_iterations Miller-Rabin iteartions for probabilistic primality testing in + * rejection sampling + */ +int ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations); + +/** + * @brief Find integers x and y such that x^2 + n*y^2 = p + * + * Uses Cornacchia's algorithm, should be used only for prime p + * + * @param x Output + * @param y Output + * @param n first parameter defining the equation + * @param p seond parameter defining the equation, must be prime + * @return 1 if success, 0 otherwise + */ +int ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p); + +/** @} + */ + +/** @defgroup quat_qf Quadratic form functions + * @{ + */ + +/** + * @brief Quadratic form evaluation + * + * qf and coord must be represented in the same basis. + * + * @param res Output: coordinate vector + * @param qf Quadratic form (4x4 integer matrix) + * @param coord Integer vector (coordinate vector) + */ +void quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord); +/** @} + */ + +/** @} + */ + +/** @defgroup quat_quat_f Quaternion algebra functions + * @{ + */ +/** + * @brief Copies an algebra element + * + * @param copy Output: The element into which another one is copied + * @param copied Source element copied into copy + */ +void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied); + +void quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg); + +/** @brief reduced norm of alg_elem x + * + * @param res_num Output: rational which will contain the numerator of the reduced norm of a + * @param res_denom Output: rational which will contain the denominator of the reduced norm of a (it + * is 1 if the norm is integer) + * @param x Algebra element whose norm is computed + * @param alg The quaternion algebra + */ +void quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *x, const quat_alg_t *alg); + +/** @brief Normalize representation of alg_elem x + * + * @param x Algebra element whose representation will be normalized + * + * Modification of x. + * Sets coord and denom of x so that gcd(denom, content(coord))=1 + * without changing the value of x = (coord0/denom, coord1/denom, coord2/denom, coord3/denom). + */ +void quat_alg_normalize(quat_alg_elem_t *x); + +/** + * @brief Standard involution in a quaternion algebra + * + * @param conj Output: image of x by standard involution of the quaternion algebra alg + * @param x element of alg whose image is searched + */ +void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x); + +/** + * @brief Given `x` ∈ `order`, factor it into its primitive and impritive parts + * + * Given `x` ∈ `order`, return a coordinate vector `primitive_x` and an integer `content` + * such that `x` = `content` · Λ `primitive_x`, where Λ is the basis of `order` + * and `x` / `content` is primitive in `order`. + * + * @param primitive_x Output: coordinates of a primitive element of `order` (in `order`'s basis) + * @param content Output: content of `x`'s coordinate vector in order's basis + * @param order order of `alg` + * @param x element of order, must be in `order` + */ +void quat_alg_make_primitive(ibz_vec_4_t *primitive_x, + ibz_t *content, + const quat_alg_elem_t *x, + const quat_lattice_t *order); + +// end quat_quat_f +/** @} + */ + +/** @defgroup quat_lat_f Lattice functions + * @{ + */ + +void quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2); + +/** + * @brief Test whether x ∈ lat. If so, compute its coordinates in lat's basis. + * + * @param coord Output: Set to the coordinates of x in lat. May be NULL. + * @param lat The lattice, not necessarily in HNF but full rank + * @param x An element of the quaternion algebra + * @return true if x ∈ lat + */ +int quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x); + +/** + * @brief Conjugate of a lattice with basis not in HNF + * + * @param conj Output: The lattice conjugate to lat. ATTENTION: is not under HNF + * @param lat Input lattice + */ +void quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat); + +/** + * @brief Multiply a lattice and an algebra element + * + * The element is multiplied to the right of the lattice + * + * @param prod Output: Lattice lat*elem + * @param lat Input lattice + * @param elem Algebra element + * @param alg The quaternion algebra + */ +void quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg); // ideal + +/** + * @brief Sample from the intersection of a lattice with a ball + * + * Sample a uniform non-zero vector of norm ≤ `radius` from the lattice. + * + * @param res Output: sampled quaternion from the lattice + * @param lattice Input lattice + * @param alg The quaternion algebra + * @param radius The ball radius (quaternion norm) + * @return 0 if an error occurred (ball too small or RNG error), 1 otherwise + */ +int quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius); + +// end quat_lat_f +/** @} + */ + +/** @defgroup quat_lideal_f Functions for left ideals + * @{ + */ + +/** @defgroup quat_lideal_c Creating left ideals + * @{ + */ + +/** + * @brief Left ideal of order, generated by x and N as order*x+order*N + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element. Must be non-zero + * @param N generating integer + * + * Creates the left ideal in order generated by the element x and the integer N. + * If x is not divisible (inside the order) by any integer divisor n>1 of N, + * then the norm of the output ideal is N. + * + */ +void quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg); + +/** @} + */ + +/** @defgroup quat_lideal_gen Generators of left ideals + * @{ + */ + +/** + * @brief Generator of 'lideal' + * + * @returns 1 if such a generator was found, 0 otherwise + * @param gen Output: non scalar generator of lideal + * @param lideal left ideal + * @param alg the quaternion algebra + * + * Ideal is generated by gen and the ideal's norm + * + * Bound has as default value QUATERNION_lideal_generator_search_bound + */ +int quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg); +/** @} + */ + +/** @defgroup quat_lideal_op Operations on left ideals + * @{ + */ + +/** + * @brief Copies an ideal + * + * @param copy Output: The ideal into which another one is copied + * @param copied Source ideal copied into copy. The parent order is not copied (only the pointer). + */ +void quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied); + +/** + * @brief Conjugate of a left ideal (not in HNF) + * + * @param conj Output: Ideal conjugate to lideal, with norm and parent order correctly set, but its + * lattice not in HNF + * @param new_parent_order Output: Will be set to the right order of lideal, and serve as parent + * order for conj (so must have at least the lifetime of conj) + * @param lideal input left ideal (of which conj will be the conjugate) + * @param alg the quaternion algebra + */ +void quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); + +/** + * @brief Intersection of two left ideals + * + * @param intersection Output: Left ideal which is the intersection of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_inter(quat_left_ideal_t *intersection, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief L2-reduce the basis of the left ideal, without considering its denominator + * + * This function reduce the basis of the lattice of the ideal, but it does completely ignore its + * denominator. So the outputs of this function must still e divided by the appropriate power of + * lideal.lattice.denom. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param reduced Output: Lattice defining the ideal, which has its basis in a lll-reduced form. + * Must be divided by lideal.lattice.denom before usage + * @param gram Output: Matrix of the quadratic form given by the norm on the basis of the reduced + * ideal, divided by the norm of the ideal + * @param lideal ideal whose basis will be reduced + * @param alg the quaternion algebra + */ +void quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // replaces lideal_lll + +/** + * @brief Multplies two ideals and L2-reduces the lattice of the result + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param prod Output: The product ideal with its lattice basis being L2-reduced + * @param gram Output: Gram matrix of the reduced norm (as quadratic but not bilinear form) on the + * basis of prod, divided by the norm of prod + * @param lideal1 Ideal at left in the product + * @param lideal2 Ideal at right in the product + * @param alg The quaternion algebra + */ +void quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Replaces an ideal by a smaller equivalent one of prime norm + * + * @returns 1 if the computation succeeded and 0 otherwise + * @param lideal In- and Output: Ideal to be replaced + * @param alg The quaternion algebra + * @param primality_num_iter number of repetition for primality testing + * @param equiv_bound_coeff bound on the coefficients for the candidates + */ +int quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff); + +/** @} + */ + +// end quat_lideal_f +/** @} + */ + +/** @defgroup quat_normeq Functions specific to special extremal maximal orders + * @{ + */ + +/** + * @brief Representing an integer by the quadratic norm form of a maximal extremal order + * + * @returns 1 if the computation succeeded + * @param gamma Output: a quaternion element + * @param n_gamma Target norm of gamma. n_gamma must be odd. If n_gamma/(p*params.order->q) < + * 2^QUAT_repres_bound_input failure is likely + * @param non_diag If set to 1 (instead of 0) and the order is O0, an additional property is ensured + * @param params Represent integer parameters specifying the algebra, the special extremal order, + * the number of trials for finding gamma and the number of iterations of the primality test. + * Special requirements apply if non-diag is set to 1 + * + * This algorithm finds a primitive quaternion element gamma of n_gamma inside any maximal extremal + * order. Failure is possible. Most efficient for the standard order. + * + * If non-diag is set to 1,this algorithm finds a primitive quaternion element gamma with some + * special properties used in fixed degree isogeny of n_gamma inside any maximal extremal order such + * that params->order->q=1 mod 4. Failure is possible. Most efficient for the standard order. The + * most important property is to avoid diagonal isogenies, meaning that the gamma returned by the + * algorithm must not be contained inside ZZ + 2 O where O is the maximal order params->order When O + * is the special order O0 corresponding to j=1728, we further need to avoid endomorphisms of E0xE0 + * and there is another requirement + * + * If non-diag is set to 1, the number of trials for finding gamma (in params), the number of + * iterations of the primality test and the value of params->order->q is required to be 1 mod 4 + */ +int quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params); + +/** @brief Basis change to (1,i,(i+j)/2,(1+ij)/2) for elements of O0 + * + * Change the basis in which an element is give from 1,i,j,ij to (1,i,(i+j)/2,(1+ij)/2) the ususal + * basis of the special maximal order O0 Only for elements of O0 + * + * @param vec Output: Coordinates of el in basis (1,i,(i+j)/2,(1+ij)/2) + * @param el Imput: An algebra element in O0 + */ +void quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el); + +/** + * @brief Random O0-ideal of given norm + * + * Much faster if norm is prime and is_prime is set to 1 + * + * @param lideal Output: O0-ideal of norm norm + * @param norm Norm of the ideal to be found + * @param is_prime Indicates if norm is prime: 1 if it is, 0 otherwise + * @param params Represent Integer parameters from the level-dependent constants + * @param prime_cofactor Prime distinct from the prime p defining the algebra but of similar size + * and coprime to norm. If is_prime is 1, it might be NULL. + * @returns 1 if success, 0 if no ideal found or randomness failed + */ +int quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor); +// end quat_normeq +/** @} + */ +// end quat_quat +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_constants.h new file mode 100644 index 0000000000..a2f4b52b93 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_constants.h @@ -0,0 +1,6 @@ +#include +#define QUAT_primality_num_iter 32 +#define QUAT_repres_bound_input 21 +#define QUAT_equiv_bound_coeff 64 +#define FINDUV_box_size 3 +#define FINDUV_cube_size 2400 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.c new file mode 100644 index 0000000000..24402255d4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.c @@ -0,0 +1,3626 @@ +#include +#include +#include +const ibz_t QUAT_prime_cofactor = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8000}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x8000000000000000}}} +#endif +; +const quat_alg_t QUATALG_PINFTY = { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x40ff}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x40ffffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x40ffffffffffffff}}} +#endif +}; +const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[8] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 1}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x680}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423,0x0,0x0,0x0,0x0,0x0,0x6800000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a,0x0,0x0,0x680000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 5}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed,0x0,0x0,0x0,0x0,0x0,0x2800000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b,0x0,0x0,0x280000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 13}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc07,0x925a,0x605a,0x9489,0x475b,0x7944,0x880f,0x65fa,0xed5a,0x329c,0x13f8,0x78f2,0xfffe,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x925adc07,0x9489605a,0x7944475b,0x65fa880f,0x329ced5a,0x78f213f8,0xfffffffe,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9489605a925adc07,0x65fa880f7944475b,0x78f213f8329ced5a,0xfffffffffffffffe,0xffffffffffffffff,0x207fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9c07,0x5ca4,0xc660,0xc2e5,0x94d7,0x2b1d,0x3b32,0xa3de,0x67a4,0x2fd3,0xfeab,0x1a11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5ca49c07,0xc2e5c660,0x2b1d94d7,0xa3de3b32,0x2fd367a4,0x1a11feab}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xc2e5c6605ca49c07,0xa3de3b322b1d94d7,0x1a11feab2fd367a4}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 17}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9a15,0x48a0,0x16ae,0xa42,0x3772,0x534a,0x26a7,0x2f5e,0xce7c,0x39eb,0xa365,0x745c,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0x657}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x48a09a15,0xa4216ae,0x534a3772,0x2f5e26a7,0x39ebce7c,0x745ca365,0xa2576a25,0x576a2576,0x6a2576a2,0x2576a257,0x76a2576a,0x6576a25}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4216ae48a09a15,0x2f5e26a7534a3772,0x745ca36539ebce7c,0x576a2576a2576a25,0x2576a2576a2576a2,0x6576a2576a2576a}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50e5,0x2533,0xb03b,0x2c45,0xfde,0xaaf1,0xafff,0x8c73,0xebfd,0xfb3,0xc7bc,0x26}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x253350e5,0x2c45b03b,0xaaf10fde,0x8c73afff,0xfb3ebfd,0x26c7bc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2c45b03b253350e5,0x8c73afffaaf10fde,0x26c7bc0fb3ebfd}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 41}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x73ba,0x1227,0x9519,0xedfb,0x605b,0xe80,0x1a20,0xf0b2,0xb418,0xa90c,0xb325,0xefd6,0x7e3e,0xf8fc,0xe3f1,0x8fc7,0x3f1f,0xfc7e,0xf1f8,0xc7e3,0x1f8f,0x7e3f,0xf8fc,0x71}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x122773ba,0xedfb9519,0xe80605b,0xf0b21a20,0xa90cb418,0xefd6b325,0xf8fc7e3e,0x8fc7e3f1,0xfc7e3f1f,0xc7e3f1f8,0x7e3f1f8f,0x71f8fc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xedfb9519122773ba,0xf0b21a200e80605b,0xefd6b325a90cb418,0x8fc7e3f1f8fc7e3e,0xc7e3f1f8fc7e3f1f,0x71f8fc7e3f1f8f}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x73ba,0x8a7,0x681e,0x130f,0xeee3,0xd966,0x4ebe,0xf78b,0xba4d,0xfa9,0xc409,0x245}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x8a773ba,0x130f681e,0xd966eee3,0xf78b4ebe,0xfa9ba4d,0x245c409}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x130f681e08a773ba,0xf78b4ebed966eee3,0x245c4090fa9ba4d}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 73}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x30b3,0xeb66,0x87b7,0x617e,0x27c,0xfa7,0xdcf4,0x90c8,0x7e8b,0x9e3c,0xaf36,0xb7ba,0x5eeb,0xbaf7,0xbdd7,0x75ee,0x7baf,0xebdd,0xf75e,0xd7ba,0xeebd,0xaf75,0xdd7b,0x2eb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xeb6630b3,0x617e87b7,0xfa7027c,0x90c8dcf4,0x9e3c7e8b,0xb7baaf36,0xbaf75eeb,0x75eebdd7,0xebdd7baf,0xd7baf75e,0xaf75eebd,0x2ebdd7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x617e87b7eb6630b3,0x90c8dcf40fa7027c,0xb7baaf369e3c7e8b,0x75eebdd7baf75eeb,0xd7baf75eebdd7baf,0x2ebdd7baf75eebd}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xb5ab,0x986,0x1b92,0x5123,0x4b2a,0x653b,0x4896,0xc0fd,0x579e,0xc06c,0xd20e,0xf7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x986b5ab,0x51231b92,0x653b4b2a,0xc0fd4896,0xc06c579e,0xf7d20e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x51231b920986b5ab,0xc0fd4896653b4b2a,0xf7d20ec06c579e}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 89}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0xbd79,0x489c,0xbd84,0xce46,0x9344,0xb194,0x642a,0x3c5a,0xdb04,0x96f5,0x6e1f,0x4dcb,0xff6e,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x489cbd79,0xce46bd84,0xb1949344,0x3c5a642a,0x96f5db04,0x4dcb6e1f,0xffffff6e,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xce46bd84489cbd79,0x3c5a642ab1949344,0x4dcb6e1f96f5db04,0xffffffffffffff6e,0xffffffffffffffff,0x207fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xa1c9,0x3fda,0x577,0x71a8,0xf4d3,0x4269,0xecf2,0x2a5d,0x41b6,0x6e41,0x47e5,0x782c,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x3fdaa1c9,0x71a80577,0x4269f4d3,0x2a5decf2,0x6e4141b6,0x782c47e5,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x71a805773fdaa1c9,0x2a5decf24269f4d3,0x782c47e56e4141b6,0x2}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 97}}; +const quat_left_ideal_t CONNECTING_IDEALS[8] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x3f45,0x9d13,0x18d8,0xd9d,0x581f,0x857d,0xdf68,0xd151,0x582a,0xa4d6,0xa864,0x68b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9d133f45,0xd9d18d8,0x857d581f,0xd151df68,0xa4d6582a,0x68ba864,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd9d18d89d133f45,0xd151df68857d581f,0x68ba864a4d6582a,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfad,0xcd37,0x66f0,0x90ea,0x2958,0x73d0,0xf9dd,0x3c75,0xe22e,0xbc3f,0xae14,0x8e28}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd37dfad,0x90ea66f0,0x73d02958,0x3c75f9dd,0xbc3fe22e,0x8e28ae14}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x90ea66f0cd37dfad,0x3c75f9dd73d02958,0x8e28ae14bc3fe22e}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe0bb,0x1b20,0x4939,0xd4cc,0xa436,0xac70,0x5d50,0xfe05,0xe870,0x178b,0xcef2,0xd21,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x1b20e0bb,0xd4cc4939,0xac70a436,0xfe055d50,0x178be870,0xd21cef2,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd4cc49391b20e0bb,0xfe055d50ac70a436,0xd21cef2178be870,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4ebd,0xc907,0x738,0xe090,0x47df,0xb03f,0x814f,0x7faa,0x3a11,0x23cb,0xde52,0x892d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9074ebd,0xe0900738,0xb03f47df,0x7faa814f,0x23cb3a11,0x892dde52}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe0900738c9074ebd,0x7faa814fb03f47df,0x892dde5223cb3a11}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50bf,0xeebf,0xe944,0xea4d,0x76d,0xcbc5,0x4919,0x12b0,0x71f3,0x9e30,0x3304,0x1265}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xeebf50bf,0xea4de944,0xcbc5076d,0x12b04919,0x9e3071f3,0x12653304}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xea4de944eebf50bf,0x12b04919cbc5076d,0x126533049e3071f3}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x81c3,0xdc60,0x7bed,0xf8f0,0xdcf,0x4413,0xf95b,0x18b1,0x7f8a,0x3cd4,0xc0e,0xe4bd,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xdc6081c3,0xf8f07bed,0x44130dcf,0x18b1f95b,0x3cd47f8a,0xe4bd0c0e,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8f07beddc6081c3,0x18b1f95b44130dcf,0xe4bd0c0e3cd47f8a,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe941,0x658f,0x3299,0xf19f,0xa9e,0x87ec,0x213a,0x95b1,0x78be,0x6d82,0x1f89,0xfb91}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x658fe941,0xf19f3299,0x87ec0a9e,0x95b1213a,0x6d8278be,0xfb911f89}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf19f3299658fe941,0x95b1213a87ec0a9e,0xfb911f896d8278be}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x60fb,0xd399,0x887f,0xd263,0xe0e7,0xb202,0x699b,0xea34,0x5a15,0x4b8a,0x6763,0x8e95}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd39960fb,0xd263887f,0xb202e0e7,0xea34699b,0x4b8a5a15,0x8e956763}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xd263887fd39960fb,0xea34699bb202e0e7,0x8e9567634b8a5a15}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7edf,0xd82a,0x4c38,0xa9b9,0x663f,0xb4af,0xb83e,0x8f97,0x898d,0x9b3,0x342a,0x1298}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd82a7edf,0xa9b94c38,0xb4af663f,0x8f97b83e,0x9b3898d,0x1298342a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa9b94c38d82a7edf,0x8f97b83eb4af663f,0x1298342a09b3898d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb00f,0x8bbf,0x19a9,0xd6b,0xf7b,0xcd5c,0x74e7,0xd7e2,0xa419,0x3593,0x56a8,0x8de8,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x8bbfb00f,0xd6b19a9,0xcd5c0f7b,0xd7e274e7,0x3593a419,0x8de856a8,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd6b19a98bbfb00f,0xd7e274e7cd5c0f7b,0x8de856a83593a419,0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf007,0x6c34,0xd3b,0x6c6f,0xff26,0xd5e2,0x4cf0,0xf932,0xbec1,0x84e1,0x9955,0xdb05}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6c34f007,0x6c6f0d3b,0xd5e2ff26,0xf9324cf0,0x84e1bec1,0xdb059955}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6c6f0d3b6c34f007,0xf9324cf0d5e2ff26,0xdb05995584e1bec1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3a91,0xcd01,0xac55,0x9a52,0x9887,0x118f,0x4dec,0x4245,0xd869,0x1022,0x1d16,0x7ad}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd013a91,0x9a52ac55,0x118f9887,0x42454dec,0x1022d869,0x7ad1d16}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9a52ac55cd013a91,0x42454dec118f9887,0x7ad1d161022d869}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4095,0x6a9f,0x1c86,0xfd81,0xe6a7,0xc52d,0xbb45,0xdbac,0x50ae,0x3a1b,0x87b,0x673a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6a9f4095,0xfd811c86,0xc52de6a7,0xdbacbb45,0x3a1b50ae,0x673a087b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xfd811c866a9f4095,0xdbacbb45c52de6a7,0x673a087b3a1b50ae}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4d27,0x98d5,0x3839,0x83ff,0x48b7,0x4d5b,0xc95b,0xbe45,0x9d44,0x36f3,0x4d57,0x6c26}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x98d54d27,0x83ff3839,0x4d5b48b7,0xbe45c95b,0x36f39d44,0x6c264d57}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x83ff383998d54d27,0xbe45c95b4d5b48b7,0x6c264d5736f39d44}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98a3,0xa25f,0x7811,0xbf10,0x9edd,0x52ef,0xc322,0x2e01,0xda9b,0x5768,0x69c7,0x66f9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa25f98a3,0xbf107811,0x52ef9edd,0x2e01c322,0x5768da9b,0x66f969c7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbf107811a25f98a3,0x2e01c32252ef9edd,0x66f969c75768da9b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x72e5,0x9d9a,0xd825,0xa187,0x73ca,0xd025,0xc63e,0xf623,0x3bef,0x472e,0xdb8f,0x698f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9d9a72e5,0xa187d825,0xd02573ca,0xf623c63e,0x472e3bef,0x698fdb8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa187d8259d9a72e5,0xf623c63ed02573ca,0x698fdb8f472e3bef}}} +#endif +, &MAXORD_O0}}; +const quat_alg_elem_t CONJUGATING_ELEMENTS[8] = {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +#endif +}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.h new file mode 100644 index 0000000000..740da6e507 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.h @@ -0,0 +1,12 @@ +#include +#define MAXORD_O0 (EXTREMAL_ORDERS->order) +#define STANDARD_EXTREMAL_ORDER (EXTREMAL_ORDERS[0]) +#define NUM_ALTERNATE_EXTREMAL_ORDERS 7 +#define ALTERNATE_EXTREMAL_ORDERS (EXTREMAL_ORDERS+1) +#define ALTERNATE_CONNECTING_IDEALS (CONNECTING_IDEALS+1) +#define ALTERNATE_CONJUGATING_ELEMENTS (CONJUGATING_ELEMENTS+1) +extern const ibz_t QUAT_prime_cofactor; +extern const quat_alg_t QUATALG_PINFTY; +extern const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[8]; +extern const quat_left_ideal_t CONNECTING_IDEALS[8]; +extern const quat_alg_elem_t CONJUGATING_ELEMENTS[8]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c new file mode 100644 index 0000000000..372cc0de81 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: Apache-2.0 and Unknown +// +/* +NIST-developed software is provided by NIST as a public service. You may use, +copy, and distribute copies of the software in any medium, provided that you +keep intact this entire notice. You may improve, modify, and create derivative +works of the software or any portion of the software, and you may copy and +distribute such modifications or works. Modified works should carry a notice +stating that you changed the software and should note the date and nature of any +such change. Please explicitly acknowledge the National Institute of Standards +and Technology as the source of the software. + +NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF +ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, +WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS +NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR +ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE +ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, +INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR +USEFULNESS OF THE SOFTWARE. + +You are solely responsible for determining the appropriateness of using and +distributing the software and you assume all risks associated with its use, +including but not limited to the risks and costs of program errors, compliance +with applicable laws, damage to or loss of data, programs or equipment, and the +unavailability or interruption of operation. This software is not intended to be +used in any situation where a failure could cause risk of injury or damage to +property. The software developed by NIST employees is not subject to copyright +protection within the United States. +*/ + +#include +#include + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +static inline void AES256_ECB(const unsigned char *key, + const unsigned char *ctr, unsigned char *buffer) { + AES_ECB_encrypt(ctr, key, buffer); +} + +typedef struct { + unsigned char Key[32]; + unsigned char V[16]; + int reseed_counter; +} AES256_CTR_DRBG_struct; + +void AES256_CTR_DRBG_Update(const unsigned char *provided_data, + unsigned char *Key, unsigned char *V); + +AES256_CTR_DRBG_struct DRBG_ctx; + +#ifndef CTRDRBG_TEST_BENCH +static +#endif + void + randombytes_init_nist(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + unsigned char seed_material[48]; + + (void)security_strength; // Unused parameter + memcpy(seed_material, entropy_input, 48); + if (personalization_string) + for (int i = 0; i < 48; i++) { + seed_material[i] ^= personalization_string[i]; + } + memset(DRBG_ctx.Key, 0x00, 32); + memset(DRBG_ctx.V, 0x00, 16); + AES256_CTR_DRBG_Update(seed_material, DRBG_ctx.Key, DRBG_ctx.V); + DRBG_ctx.reseed_counter = 1; +} + +#ifndef CTRDRBG_TEST_BENCH +static +#endif + int + randombytes_nist(unsigned char *x, size_t xlen) { + unsigned char block[16]; + size_t i = 0; + + while (xlen > 0) { + // increment V + for (int j = 15; j >= 0; j--) { + if (DRBG_ctx.V[j] == 0xff) { + DRBG_ctx.V[j] = 0x00; + } else { + DRBG_ctx.V[j]++; + break; + } + } + AES256_ECB(DRBG_ctx.Key, DRBG_ctx.V, block); + if (xlen > 15) { + memcpy(x + i, block, 16); + i += 16; + xlen -= 16; + } else { + memcpy(x + i, block, xlen); + i += xlen; + xlen = 0; + } + } + AES256_CTR_DRBG_Update(NULL, DRBG_ctx.Key, DRBG_ctx.V); + DRBG_ctx.reseed_counter++; + + return 0; +} + +void AES256_CTR_DRBG_Update(const unsigned char *provided_data, + unsigned char *Key, unsigned char *V) { + unsigned char temp[48]; + + for (int i = 0; i < 3; i++) { + // increment V + for (int j = 15; j >= 0; j--) { + if (V[j] == 0xff) { + V[j] = 0x00; + } else { + V[j]++; + break; + } + } + + AES256_ECB(Key, V, temp + 16 * i); + } + if (provided_data != NULL) + for (int i = 0; i < 48; i++) { + temp[i] ^= provided_data[i]; + } + memcpy(Key, temp, 32); + memcpy(V, temp + 32, 16); +} + +#ifdef RANDOMBYTES_C +SQISIGN_API +int randombytes(unsigned char *random_array, unsigned long long nbytes) { + int ret = randombytes_nist(random_array, nbytes); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); +#endif + return ret; +} + +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + randombytes_init_nist(entropy_input, personalization_string, + security_strength); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_system.c new file mode 100644 index 0000000000..689c29b242 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_system.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: MIT + +/* +The MIT License +Copyright (c) 2017 Daan Sprenkels +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +// In the case that are compiling on linux, we need to define _GNU_SOURCE +// *before* randombytes.h is included. Otherwise SYS_getrandom will not be +// declared. +#if defined(__linux__) || defined(__GNU__) +#define _GNU_SOURCE +#endif /* defined(__linux__) || defined(__GNU__) */ + +#if defined(_WIN32) +/* Windows */ +#include +#include /* CryptAcquireContext, CryptGenRandom */ +#endif /* defined(_WIN32) */ + +/* wasi */ +#if defined(__wasi__) +#include +#endif + +/* kFreeBSD */ +#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) +#define GNU_KFREEBSD +#endif + +#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +/* Linux */ +// We would need to include , but not every target has access +// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. +// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the +// linux repo. +#define RNDGETENTCNT 0x80045200 + +#include +#include +#include +#include +#include +#include +#include +#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ + ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) +#define USE_GLIBC +#include +#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ + (__GLIBC_MINOR__ > 24)) */ +#include +#include +#include +#include + +// We need SSIZE_MAX as the maximum read len from /dev/urandom +#if !defined(SSIZE_MAX) +#define SSIZE_MAX (SIZE_MAX / 2 - 1) +#endif /* defined(SSIZE_MAX) */ + +#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ + +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ +#include +#if defined(BSD) +#include +#endif +/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ +#if defined(__GNU__) +#undef BSD +#endif +#endif + +#if defined(__EMSCRIPTEN__) +#include +#include +#include +#include +#endif /* defined(__EMSCRIPTEN__) */ + +#if defined(_WIN32) +static int +randombytes_win32_randombytes(void *buf, size_t n) +{ + HCRYPTPROV ctx; + BOOL tmp; + DWORD to_read = 0; + const size_t MAX_DWORD = 0xFFFFFFFF; + + tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); + if (tmp == FALSE) + return -1; + + while (n > 0) { + to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); + tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); + if (tmp == FALSE) + return -1; + buf = ((char *)buf) + to_read; + n -= to_read; + } + + tmp = CryptReleaseContext(ctx, 0); + if (tmp == FALSE) + return -1; + + return 0; +} +#endif /* defined(_WIN32) */ + +#if defined(__wasi__) +static int +randombytes_wasi_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(__wasi__) */ + +#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) +#if defined(USE_GLIBC) +// getrandom is declared in glibc. +#elif defined(SYS_getrandom) +static ssize_t +getrandom(void *buf, size_t buflen, unsigned int flags) +{ + return syscall(SYS_getrandom, buf, buflen, flags); +} +#endif + +static int +randombytes_linux_randombytes_getrandom(void *buf, size_t n) +{ + /* I have thought about using a separate PRF, seeded by getrandom, but + * it turns out that the performance of getrandom is good enough + * (250 MB/s on my laptop). + */ + size_t offset = 0, chunk; + int ret; + while (n > 0) { + /* getrandom does not allow chunks larger than 33554431 */ + chunk = n <= 33554431 ? n : 33554431; + do { + ret = getrandom((char *)buf + offset, chunk, 0); + } while (ret == -1 && errno == EINTR); + if (ret < 0) + return ret; + offset += ret; + n -= ret; + } + assert(n == 0); + return 0; +} +#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ + defined(SYS_getrandom)) */ + +#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) + +#if defined(__linux__) +static int +randombytes_linux_read_entropy_ioctl(int device, int *entropy) +{ + return ioctl(device, RNDGETENTCNT, entropy); +} + +static int +randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) +{ + int retcode; + do { + rewind(stream); + retcode = fscanf(stream, "%d", entropy); + } while (retcode != 1 && errno == EINTR); + if (retcode != 1) { + return -1; + } + return 0; +} + +static int +randombytes_linux_wait_for_entropy(int device) +{ + /* We will block on /dev/random, because any increase in the OS' entropy + * level will unblock the request. I use poll here (as does libsodium), + * because we don't *actually* want to read from the device. */ + enum + { + IOCTL, + PROC + } strategy = IOCTL; + const int bits = 128; + struct pollfd pfd; + int fd; + FILE *proc_file; + int retcode, retcode_error = 0; // Used as return codes throughout this function + int entropy = 0; + + /* If the device has enough entropy already, we will want to return early */ + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + // printf("errno: %d (%s)\n", errno, strerror(errno)); + if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { + // The ioctl call on /dev/urandom has failed due to a + // - ENOTTY (unsupported action), or + // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). + // + // We will fall back to reading from + // `/proc/sys/kernel/random/entropy_avail`. This less ideal, + // because it allocates a file descriptor, and it may not work + // in a chroot. But at this point it seems we have no better + // options left. + strategy = PROC; + // Open the entropy count file + proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); + if (proc_file == NULL) { + return -1; + } + } else if (retcode != 0) { + // Unrecoverable ioctl error + return -1; + } + if (entropy >= bits) { + return 0; + } + + do { + fd = open("/dev/random", O_RDONLY); + } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ + if (fd == -1) { + /* Unrecoverable IO error */ + return -1; + } + + pfd.fd = fd; + pfd.events = POLLIN; + for (;;) { + retcode = poll(&pfd, 1, -1); + if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { + continue; + } else if (retcode == 1) { + if (strategy == IOCTL) { + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + } else if (strategy == PROC) { + retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); + } else { + return -1; // Unreachable + } + + if (retcode != 0) { + // Unrecoverable I/O error + retcode_error = retcode; + break; + } + if (entropy >= bits) { + break; + } + } else { + // Unreachable: poll() should only return -1 or 1 + retcode_error = -1; + break; + } + } + do { + retcode = close(fd); + } while (retcode == -1 && errno == EINTR); + if (strategy == PROC) { + do { + retcode = fclose(proc_file); + } while (retcode == -1 && errno == EINTR); + } + if (retcode_error != 0) { + return retcode_error; + } + return retcode; +} +#endif /* defined(__linux__) */ + +static int +randombytes_linux_randombytes_urandom(void *buf, size_t n) +{ + int fd; + size_t offset = 0, count; + ssize_t tmp; + do { + fd = open("/dev/urandom", O_RDONLY); + } while (fd == -1 && errno == EINTR); + if (fd == -1) + return -1; +#if defined(__linux__) + if (randombytes_linux_wait_for_entropy(fd) == -1) + return -1; +#endif + + while (n > 0) { + count = n <= SSIZE_MAX ? n : SSIZE_MAX; + tmp = read(fd, (char *)buf + offset, count); + if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { + continue; + } + if (tmp == -1) + return -1; /* Unrecoverable IO error */ + offset += tmp; + n -= tmp; + } + close(fd); + assert(n == 0); + return 0; +} +#endif /* defined(__linux__) && !defined(SYS_getrandom) */ + +#if defined(BSD) +static int +randombytes_bsd_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(BSD) */ + +#if defined(__EMSCRIPTEN__) +static int +randombytes_js_randombytes_nodejs(void *buf, size_t n) +{ + const int ret = EM_ASM_INT( + { + var crypto; + try { + crypto = require('crypto'); + } catch (error) { + return -2; + } + try { + writeArrayToMemory(crypto.randomBytes($1), $0); + return 0; + } catch (error) { + return -1; + } + }, + buf, + n); + switch (ret) { + case 0: + return 0; + case -1: + errno = EINVAL; + return -1; + case -2: + errno = ENOSYS; + return -1; + } + assert(false); // Unreachable +} +#endif /* defined(__EMSCRIPTEN__) */ + +SQISIGN_API +int +randombytes_select(unsigned char *buf, unsigned long long n) +{ +#if defined(__EMSCRIPTEN__) + return randombytes_js_randombytes_nodejs(buf, n); +#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +#if defined(USE_GLIBC) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#elif defined(SYS_getrandom) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#else + /* When we have enough entropy, we can read from /dev/urandom */ + return randombytes_linux_randombytes_urandom(buf, n); +#endif +#elif defined(BSD) + /* Use arc4random system call */ + return randombytes_bsd_randombytes(buf, n); +#elif defined(_WIN32) + /* Use windows API */ + return randombytes_win32_randombytes(buf, n); +#elif defined(__wasi__) + /* Use WASI */ + return randombytes_wasi_randombytes(buf, n); +#else +#error "randombytes(...) is not supported on this platform" +#endif +} + +#ifdef RANDOMBYTES_SYSTEM +SQISIGN_API +int +randombytes(unsigned char *x, unsigned long long xlen) +{ + + int ret = randombytes_select(x, (size_t)xlen); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); +#endif + return ret; +} + +SQISIGN_API +void +randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) +{ + (void)entropy_input; + (void)personalization_string; + (void)security_strength; +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rationals.c new file mode 100644 index 0000000000..0c5387e5e8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rationals.c @@ -0,0 +1,233 @@ +#include +#include "internal.h" +#include "lll_internals.h" + +void +ibq_init(ibq_t *x) +{ + ibz_init(&((*x)[0])); + ibz_init(&((*x)[1])); + ibz_set(&((*x)[1]), 1); +} + +void +ibq_finalize(ibq_t *x) +{ + ibz_finalize(&((*x)[0])); + ibz_finalize(&((*x)[1])); +} + +void +ibq_mat_4x4_init(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_init(&(*mat)[i][j]); + } + } +} +void +ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_finalize(&(*mat)[i][j]); + } + } +} + +void +ibq_vec_4_init(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_init(&(*vec)[i]); + } +} +void +ibq_vec_4_finalize(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_finalize(&(*vec)[i]); + } +} + +void +ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j][0]), 10); + printf("/"); + ibz_print(&((*mat)[i][j][1]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibq_vec_4_print(const ibq_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i][0]), 10); + printf("/"); + ibz_print(&((*vec)[i][1]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibq_reduce(ibq_t *x) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); + ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + assert(ibz_is_zero(&r)); + ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + assert(ibz_is_zero(&r)); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +void +ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) +{ + ibz_t add, prod; + ibz_init(&add); + ibz_init(&prod); + + ibz_mul(&add, &((*a)[0]), &((*b)[1])); + ibz_mul(&prod, &((*b)[0]), &((*a)[1])); + ibz_add(&((*sum)[0]), &add, &prod); + ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_finalize(&add); + ibz_finalize(&prod); +} + +void +ibq_neg(ibq_t *neg, const ibq_t *x) +{ + ibz_copy(&((*neg)[1]), &((*x)[1])); + ibz_neg(&((*neg)[0]), &((*x)[0])); +} + +void +ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b) +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, b); + ibq_add(diff, a, &neg); + ibq_finalize(&neg); +} + +void +ibq_abs(ibq_t *abs, const ibq_t *x) // once +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, x); + if (ibq_cmp(x, &neg) < 0) + ibq_copy(abs, &neg); + else + ibq_copy(abs, x); + ibq_finalize(&neg); +} + +void +ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) +{ + ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); + ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); +} + +int +ibq_inv(ibq_t *inv, const ibq_t *x) +{ + int res = !ibq_is_zero(x); + if (res) { + ibz_copy(&((*inv)[0]), &((*x)[0])); + ibz_copy(&((*inv)[1]), &((*x)[1])); + ibz_swap(&((*inv)[1]), &((*inv)[0])); + } + return (res); +} + +int +ibq_cmp(const ibq_t *a, const ibq_t *b) +{ + ibz_t x, y; + ibz_init(&x); + ibz_init(&y); + ibz_copy(&x, &((*a)[0])); + ibz_copy(&y, &((*b)[0])); + ibz_mul(&y, &y, &((*a)[1])); + ibz_mul(&x, &x, &((*b)[1])); + if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + int res = ibz_cmp(&x, &y); + ibz_finalize(&x); + ibz_finalize(&y); + return (res); +} + +int +ibq_is_zero(const ibq_t *x) +{ + return ibz_is_zero(&((*x)[0])); +} + +int +ibq_is_one(const ibq_t *x) +{ + return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); +} + +int +ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) +{ + ibz_copy(&((*q)[0]), a); + ibz_copy(&((*q)[1]), b); + return !ibz_is_zero(b); +} + +void +ibq_copy(ibq_t *target, const ibq_t *value) // once +{ + ibz_copy(&((*target)[0]), &((*value)[0])); + ibz_copy(&((*target)[1]), &((*value)[1])); +} + +int +ibq_is_ibz(const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_mod(&r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} + +int +ibq_to_ibz(ibz_t *z, const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h new file mode 100644 index 0000000000..0a9ca0e465 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef rng_h +#define rng_h + +#include + +/** + * Randombytes initialization. + * Initialization may be needed for some random number generators (e.g. CTR-DRBG). + * + * @param[in] entropy_input 48 bytes entropy input + * @param[in] personalization_string Personalization string + * @param[in] security_strength Security string + */ +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength); + +/** + * Random byte generation using /dev/urandom. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes_select(unsigned char *x, unsigned long long xlen); + +/** + * Random byte generation. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes(unsigned char *x, unsigned long long xlen); + +#endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sig.h new file mode 100644 index 0000000000..4c33510084 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sig.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef SQISIGN_H +#define SQISIGN_H + +#include +#include + +#if defined(ENABLE_SIGN) +/** + * SQIsign keypair generation. + * + * The implementation corresponds to SQIsign.CompactKeyGen() in the SQIsign spec. + * The caller is responsible to allocate sufficient memory to hold pk and sk. + * + * @param[out] pk SQIsign public key + * @param[out] sk SQIsign secret key + * @return int status code + */ +SQISIGN_API +int sqisign_keypair(unsigned char *pk, unsigned char *sk); + +/** + * SQIsign signature generation. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] sm Signature concatenated with message + * @param[out] smlen Pointer to the length of sm + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); +#endif + +/** + * SQIsign open signature. + * + * The implementation performs SQIsign.verify(). If the signature verification succeeded, the + * original message is stored in m. Keys provided is a compact public key. The caller is responsible + * to allocate sufficient memory to hold m. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sm Signature concatenated with message + * @param[in] smlen Length of sm + * @param[in] pk Compacted public key + * @return int status code + */ +SQISIGN_API +int sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk); + +/** + * SQIsign verify signature. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sign.c new file mode 100644 index 0000000000..9216bbe4d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sign.c @@ -0,0 +1,634 @@ +#include +#include +#include +#include +#include +#include + +// compute the commitment with ideal to isogeny clapotis +// and apply it to the basis of E0 (together with the multiplication by some scalar u) +static bool +commit(ec_curve_t *E_com, ec_basis_t *basis_even_com, quat_left_ideal_t *lideal_com) +{ + + bool found = false; + + found = quat_sampling_random_ideal_O0_given_norm(lideal_com, &COM_DEGREE, 1, &QUAT_represent_integer_params, NULL); + // replacing it with a shorter prime norm equivalent ideal + found = found && quat_lideal_prime_norm_reduced_equivalent( + lideal_com, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + // ideal to isogeny clapotis + found = found && dim2id2iso_arbitrary_isogeny_evaluation(basis_even_com, E_com, lideal_com); + return found; +} + +static void +compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const signature_t *sig, const secret_key_t *sk) +{ + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge + // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the + // 2^TORSION_EVEN_POWER torsion of EA + ibz_set(&vec[0], 1); + ibz_copy_digit_array(&vec[1], sig->chall_coeff); + + // now we compute the ideal associated to the challenge + // for that, we need to find vec such that + // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // is the image through the secret key isogeny of the canonical basis E0 + ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); + + // lideal_chall_two is the pullback of the ideal challenge through the secret key ideal + id2iso_kernel_dlogs_to_ideal_even(lideal_chall_two, &vec, TORSION_EVEN_POWER); + assert(ibz_cmp(&lideal_chall_two->norm, &TORSION_PLUS_2POWER) == 0); + + ibz_vec_2_finalize(&vec); +} + +static void +sample_response(quat_alg_elem_t *x, const quat_lattice_t *lattice, const ibz_t *lattice_content) +{ + ibz_t bound; + ibz_init(&bound); + ibz_pow(&bound, &ibz_const_two, SQIsign_response_length); + ibz_sub(&bound, &bound, &ibz_const_one); + ibz_mul(&bound, &bound, lattice_content); + + int ok UNUSED = quat_lattice_sample_from_ball(x, lattice, &QUATALG_PINFTY, &bound); + assert(ok); + + ibz_finalize(&bound); +} + +static void +compute_response_quat_element(quat_alg_elem_t *resp_quat, + ibz_t *lattice_content, + const secret_key_t *sk, + const quat_left_ideal_t *lideal_chall_two, + const quat_left_ideal_t *lideal_commit) +{ + quat_left_ideal_t lideal_chall_secret; + quat_lattice_t lattice_hom_chall_to_com, lat_commit; + + // Init + quat_left_ideal_init(&lideal_chall_secret); + quat_lattice_init(&lat_commit); + quat_lattice_init(&lattice_hom_chall_to_com); + + // lideal_chall_secret = lideal_secret * lideal_chall_two + quat_lideal_inter(&lideal_chall_secret, lideal_chall_two, &(sk->secret_ideal), &QUATALG_PINFTY); + + // now we compute lideal_com_to_chall which is dual(Icom)* lideal_chall_secret + quat_lattice_conjugate_without_hnf(&lat_commit, &(lideal_commit->lattice)); + quat_lattice_intersect(&lattice_hom_chall_to_com, &lideal_chall_secret.lattice, &lat_commit); + + // sampling the smallest response + ibz_mul(lattice_content, &lideal_chall_secret.norm, &lideal_commit->norm); + sample_response(resp_quat, &lattice_hom_chall_to_com, lattice_content); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_secret); + quat_lattice_finalize(&lat_commit); + quat_lattice_finalize(&lattice_hom_chall_to_com); +} + +static void +compute_backtracking_signature(signature_t *sig, quat_alg_elem_t *resp_quat, ibz_t *lattice_content, ibz_t *remain) +{ + uint_fast8_t backtracking; + ibz_t tmp; + ibz_init(&tmp); + + ibz_vec_4_t dummy_coord; + ibz_vec_4_init(&dummy_coord); + + quat_alg_make_primitive(&dummy_coord, &tmp, resp_quat, &MAXORD_O0); + ibz_mul(&resp_quat->denom, &resp_quat->denom, &tmp); + assert(quat_lattice_contains(NULL, &MAXORD_O0, resp_quat)); + + // the backtracking is the common part of the response and the challenge + // its degree is the scalar tmp computed above such that quat_resp is in tmp * O0. + backtracking = ibz_two_adic(&tmp); + sig->backtracking = backtracking; + + ibz_pow(&tmp, &ibz_const_two, backtracking); + ibz_div(lattice_content, remain, lattice_content, &tmp); + + ibz_finalize(&tmp); + ibz_vec_4_finalize(&dummy_coord); +} + +static uint_fast8_t +compute_random_aux_norm_and_helpers(signature_t *sig, + ibz_t *random_aux_norm, + ibz_t *degree_resp_inv, + ibz_t *remain, + const ibz_t *lattice_content, + quat_alg_elem_t *resp_quat, + quat_left_ideal_t *lideal_com_resp, + quat_left_ideal_t *lideal_commit) +{ + uint_fast8_t pow_dim2_deg_resp; + uint_fast8_t exp_diadic_val_full_resp; + + ibz_t tmp, degree_full_resp, degree_odd_resp, norm_d; + + // Init + ibz_init(°ree_full_resp); + ibz_init(°ree_odd_resp); + ibz_init(&norm_d); + ibz_init(&tmp); + + quat_alg_norm(°ree_full_resp, &norm_d, resp_quat, &QUATALG_PINFTY); + + // dividing by n(lideal_com) * n(lideal_secret_chall) + assert(ibz_is_one(&norm_d)); + ibz_div(°ree_full_resp, remain, °ree_full_resp, lattice_content); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); + + // computing the diadic valuation + exp_diadic_val_full_resp = ibz_two_adic(°ree_full_resp); + sig->two_resp_length = exp_diadic_val_full_resp; + + // removing the power of two part + ibz_pow(&tmp, &ibz_const_two, exp_diadic_val_full_resp); + ibz_div(°ree_odd_resp, remain, °ree_full_resp, &tmp); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); +#ifndef NDEBUG + ibz_pow(&tmp, &ibz_const_two, SQIsign_response_length - sig->backtracking); + assert(ibz_cmp(&tmp, °ree_odd_resp) > 0); +#endif + + // creating the ideal + quat_alg_conj(resp_quat, resp_quat); + + // setting the norm + ibz_mul(&tmp, &lideal_commit->norm, °ree_odd_resp); + quat_lideal_create(lideal_com_resp, resp_quat, &tmp, &MAXORD_O0, &QUATALG_PINFTY); + + // now we compute the ideal_aux + // computing the norm + pow_dim2_deg_resp = SQIsign_response_length - exp_diadic_val_full_resp - sig->backtracking; + ibz_pow(remain, &ibz_const_two, pow_dim2_deg_resp); + ibz_sub(random_aux_norm, remain, °ree_odd_resp); + + // multiplying by 2^HD_extra_torsion to account for the fact that + // we use extra torsion above the kernel + for (int i = 0; i < HD_extra_torsion; i++) + ibz_mul(remain, remain, &ibz_const_two); + + ibz_invmod(degree_resp_inv, °ree_odd_resp, remain); + + ibz_finalize(°ree_full_resp); + ibz_finalize(°ree_odd_resp); + ibz_finalize(&norm_d); + ibz_finalize(&tmp); + + return pow_dim2_deg_resp; +} + +static int +evaluate_random_aux_isogeny_signature(ec_curve_t *E_aux, + ec_basis_t *B_aux, + const ibz_t *norm, + const quat_left_ideal_t *lideal_com_resp) +{ + quat_left_ideal_t lideal_aux; + quat_left_ideal_t lideal_aux_resp_com; + + // Init + quat_left_ideal_init(&lideal_aux); + quat_left_ideal_init(&lideal_aux_resp_com); + + // sampling the ideal at random + int found = quat_sampling_random_ideal_O0_given_norm( + &lideal_aux, norm, 0, &QUAT_represent_integer_params, &QUAT_prime_cofactor); + + if (found) { + // pushing forward + quat_lideal_inter(&lideal_aux_resp_com, lideal_com_resp, &lideal_aux, &QUATALG_PINFTY); + + // now we evaluate this isogeny on the basis of E0 + found = dim2id2iso_arbitrary_isogeny_evaluation(B_aux, E_aux, &lideal_aux_resp_com); + + // Clean up + quat_left_ideal_finalize(&lideal_aux_resp_com); + quat_left_ideal_finalize(&lideal_aux); + } + + return found; +} + +static int +compute_dim2_isogeny_challenge(theta_couple_curve_with_basis_t *codomain, + theta_couple_curve_with_basis_t *domain, + const ibz_t *degree_resp_inv, + int pow_dim2_deg_resp, + int exp_diadic_val_full_resp, + int reduced_order) +{ + // now, we compute the isogeny Phi : Ecom x Eaux -> Echl' x Eaux' + // where Echl' is 2^exp_diadic_val_full_resp isogenous to Echal + // ker Phi = <(Bcom_can.P,Baux.P),(Bcom_can.Q,Baux.Q)> + + // preparing the domain + theta_couple_curve_t EcomXEaux; + copy_curve(&EcomXEaux.E1, &domain->E1); + copy_curve(&EcomXEaux.E2, &domain->E2); + + // preparing the kernel + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &domain->B1, &domain->B2); + + // dividing by the degree of the response + digit_t scalar[NWORDS_ORDER]; + ibz_to_digit_array(scalar, degree_resp_inv); + ec_mul(&dim_two_ker.T1.P2, scalar, reduced_order, &dim_two_ker.T1.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T2.P2, scalar, reduced_order, &dim_two_ker.T2.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T1m2.P2, scalar, reduced_order, &dim_two_ker.T1m2.P2, &EcomXEaux.E2); + + // and multiplying by 2^exp_diadic... + double_couple_point_iter(&dim_two_ker.T1, exp_diadic_val_full_resp, &dim_two_ker.T1, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T2, exp_diadic_val_full_resp, &dim_two_ker.T2, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T1m2, exp_diadic_val_full_resp, &dim_two_ker.T1m2, &EcomXEaux); + + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const Tev1 = pushed_points + 0, *const Tev2 = pushed_points + 1, + *const Tev1m2 = pushed_points + 2; + + // Set points on the commitment curve + copy_point(&Tev1->P1, &domain->B1.P); + copy_point(&Tev2->P1, &domain->B1.Q); + copy_point(&Tev1m2->P1, &domain->B1.PmQ); + + // Zero points on the aux curve + ec_point_init(&Tev1->P2); + ec_point_init(&Tev2->P2); + ec_point_init(&Tev1m2->P2); + + theta_couple_curve_t codomain_product; + + // computation of the dim2 isogeny + if (!theta_chain_compute_and_eval_randomized(pow_dim2_deg_resp, + &EcomXEaux, + &dim_two_ker, + true, + &codomain_product, + pushed_points, + sizeof(pushed_points) / sizeof(*pushed_points))) + return 0; + + assert(test_couple_point_order_twof(Tev1, &codomain_product, reduced_order)); + + // Set the auxiliary curve + copy_curve(&codomain->E1, &codomain_product.E2); + + // Set the codomain curve from the dim 2 isogeny + // it should always be the first curve + copy_curve(&codomain->E2, &codomain_product.E1); + + // Set the evaluated basis points + copy_point(&codomain->B1.P, &Tev1->P2); + copy_point(&codomain->B1.Q, &Tev2->P2); + copy_point(&codomain->B1.PmQ, &Tev1m2->P2); + + copy_point(&codomain->B2.P, &Tev1->P1); + copy_point(&codomain->B2.Q, &Tev2->P1); + copy_point(&codomain->B2.PmQ, &Tev1m2->P1); + return 1; +} + +static int +compute_small_chain_isogeny_signature(ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2, + const quat_alg_elem_t *resp_quat, + int pow_dim2_deg_resp, + int length) +{ + int ret = 1; + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec_resp_two; + ibz_vec_2_init(&vec_resp_two); + + quat_left_ideal_t lideal_resp_two; + quat_left_ideal_init(&lideal_resp_two); + + // computing the ideal + ibz_pow(&two_pow, &ibz_const_two, length); + + // we compute the generator of the challenge ideal + quat_lideal_create(&lideal_resp_two, resp_quat, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + // computing the coefficients of the kernel in terms of the basis of O0 + id2iso_ideal_to_kernel_dlogs_even(&vec_resp_two, &lideal_resp_two); + + ec_point_t points[3]; + copy_point(&points[0], &B_chall_2->P); + copy_point(&points[1], &B_chall_2->Q); + copy_point(&points[2], &B_chall_2->PmQ); + + // getting down to the right order and applying the matrix + ec_dbl_iter_basis(B_chall_2, pow_dim2_deg_resp + HD_extra_torsion, B_chall_2, E_chall_2); + assert(test_basis_order_twof(B_chall_2, E_chall_2, length)); + + ec_point_t ker; + // applying the vector to find the kernel + ec_biscalar_mul_ibz_vec(&ker, &vec_resp_two, length, B_chall_2, E_chall_2); + assert(test_point_order_twof(&ker, E_chall_2, length)); + + // computing the isogeny and pushing the points + if (ec_eval_small_chain(E_chall_2, &ker, length, points, 3, true)) { + ret = 0; + } + + // copying the result + copy_point(&B_chall_2->P, &points[0]); + copy_point(&B_chall_2->Q, &points[1]); + copy_point(&B_chall_2->PmQ, &points[2]); + + ibz_finalize(&two_pow); + ibz_vec_2_finalize(&vec_resp_two); + quat_left_ideal_finalize(&lideal_resp_two); + + return ret; +} + +static int +compute_challenge_codomain_signature(const signature_t *sig, + secret_key_t *sk, + ec_curve_t *E_chall, + const ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2) +{ + ec_isog_even_t phi_chall; + ec_basis_t bas_sk; + copy_basis(&bas_sk, &sk->canonical_basis); + + phi_chall.curve = sk->curve; + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + assert(test_basis_order_twof(&bas_sk, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the kernel + { + ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_sk.P, &bas_sk.Q, &bas_sk.PmQ, &sk->curve); + } + assert(test_point_order_twof(&phi_chall.kernel, &sk->curve, TORSION_EVEN_POWER)); + + // Double kernel to get correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &sk->curve); + + assert(test_point_order_twof(&phi_chall.kernel, E_chall, phi_chall.length)); + + // Compute the codomain from challenge isogeny + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + +#ifndef NDEBUG + fp2_t j_chall, j_codomain; + ec_j_inv(&j_codomain, E_chall_2); + ec_j_inv(&j_chall, E_chall); + // apparently its always the second one curve + assert(fp2_is_equal(&j_chall, &j_codomain)); +#endif + + // applying the isomorphism from E_chall_2 to E_chall + ec_isom_t isom; + if (ec_isomorphism(&isom, E_chall_2, E_chall)) + return 0; // error due to a corner case with 1/p probability + ec_iso_eval(&B_chall_2->P, &isom); + ec_iso_eval(&B_chall_2->Q, &isom); + ec_iso_eval(&B_chall_2->PmQ, &isom); + + return 1; +} + +static void +set_aux_curve_signature(signature_t *sig, ec_curve_t *E_aux) +{ + ec_normalize_curve(E_aux); + fp2_copy(&sig->E_aux_A, &E_aux->A); +} + +static void +compute_and_set_basis_change_matrix(signature_t *sig, + const ec_basis_t *B_aux_2, + ec_basis_t *B_chall_2, + ec_curve_t *E_aux_2, + ec_curve_t *E_chall, + int f) +{ + // Matrices for change of bases matrices + ibz_mat_2x2_t mat_Baux2_to_Baux2_can, mat_Bchall_can_to_Bchall; + ibz_mat_2x2_init(&mat_Baux2_to_Baux2_can); + ibz_mat_2x2_init(&mat_Bchall_can_to_Bchall); + + // Compute canonical bases + ec_basis_t B_can_chall, B_aux_2_can; + sig->hint_chall = ec_curve_to_basis_2f_to_hint(&B_can_chall, E_chall, TORSION_EVEN_POWER); + sig->hint_aux = ec_curve_to_basis_2f_to_hint(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(B_aux_2, E_aux_2, f)); + fp2_t w0; + weil(&w0, f, &B_aux_2->P, &B_aux_2->Q, &B_aux_2->PmQ, E_aux_2); + } +#endif + + // compute the matrix to go from B_aux_2 to B_aux_2_can + change_of_basis_matrix_tate_invert(&mat_Baux2_to_Baux2_can, &B_aux_2_can, B_aux_2, E_aux_2, f); + + // apply the change of basis to B_chall_2 + matrix_application_even_basis(B_chall_2, E_chall, &mat_Baux2_to_Baux2_can, f); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_can_chall, E_chall, TORSION_EVEN_POWER)); + } +#endif + + // compute the matrix to go from B_chall_can to B_chall_2 + change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); + + // Assert all values in the matrix are of the expected size for packing + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + + // Set the basis change matrix to signature + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + + // Finalise the matrices + ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); + ibz_mat_2x2_finalize(&mat_Baux2_to_Baux2_can); +} + +int +protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l) +{ + int ret = 0; + int reduced_order = 0; // work around false positive gcc warning + + uint_fast8_t pow_dim2_deg_resp; + assert(SQIsign_response_length <= (intmax_t)UINT_FAST8_MAX); // otherwise we might need more bits there + + ibz_t remain, lattice_content, random_aux_norm, degree_resp_inv; + ibz_init(&remain); + ibz_init(&lattice_content); + ibz_init(&random_aux_norm); + ibz_init(°ree_resp_inv); + + quat_alg_elem_t resp_quat; + quat_alg_elem_init(&resp_quat); + + quat_left_ideal_t lideal_commit, lideal_com_resp; + quat_left_ideal_init(&lideal_commit); + quat_left_ideal_init(&lideal_com_resp); + + // This structure holds two curves E1 x E2 together with a basis + // Bi of E[2^n] for each of these curves + theta_couple_curve_with_basis_t Ecom_Eaux; + // This structure holds two curves E1 x E2 together with a basis + // Bi of Ei[2^n] + theta_couple_curve_with_basis_t Eaux2_Echall2; + + // This will hold the challenge curve + ec_curve_t E_chall = sk->curve; + + ec_curve_init(&Ecom_Eaux.E1); + ec_curve_init(&Ecom_Eaux.E2); + + while (!ret) { + + // computing the commitment + ret = commit(&Ecom_Eaux.E1, &Ecom_Eaux.B1, &lideal_commit); + + // start again if the commitment generation has failed + if (!ret) { + continue; + } + + // Hash the message to a kernel generator + // i.e. a scalar such that ker = P + [s]Q + hash_to_challenge(&sig->chall_coeff, pk, &Ecom_Eaux.E1, m, l); + // Compute the challenge ideal and response quaternion element + { + quat_left_ideal_t lideal_chall_two; + quat_left_ideal_init(&lideal_chall_two); + + // computing the challenge ideal + compute_challenge_ideal_signature(&lideal_chall_two, sig, sk); + compute_response_quat_element(&resp_quat, &lattice_content, sk, &lideal_chall_two, &lideal_commit); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_two); + } + + // computing the amount of backtracking we're making + // and removing it + compute_backtracking_signature(sig, &resp_quat, &lattice_content, &remain); + + // creating lideal_com * lideal_resp + // we first compute the norm of lideal_resp + // norm of the resp_quat + pow_dim2_deg_resp = compute_random_aux_norm_and_helpers(sig, + &random_aux_norm, + °ree_resp_inv, + &remain, + &lattice_content, + &resp_quat, + &lideal_com_resp, + &lideal_commit); + + // notational conventions: + // B0 = canonical basis of E0 + // B_com = image through commitment isogeny (odd degree) of canonical basis of E0 + // B_aux = image through aux_resp_com isogeny (odd degree) of canonical basis of E0 + + if (pow_dim2_deg_resp > 0) { + // Evaluate the random aux ideal on the curve E0 and its basis to find E_aux and B_aux + ret = + evaluate_random_aux_isogeny_signature(&Ecom_Eaux.E2, &Ecom_Eaux.B2, &random_aux_norm, &lideal_com_resp); + + // auxiliary isogeny computation failed we must start again + if (!ret) { + continue; + } + +#ifndef NDEBUG + // testing that the order of the points in the bases is as expected + assert(test_basis_order_twof(&Ecom_Eaux.B1, &Ecom_Eaux.E1, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(&Ecom_Eaux.B2, &Ecom_Eaux.E2, TORSION_EVEN_POWER)); +#endif + + // applying the matrix to compute Baux + // first, we reduce to the relevant order + reduced_order = pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length; + ec_dbl_iter_basis(&Ecom_Eaux.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Ecom_Eaux.B2, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B2, &Ecom_Eaux.E2); + + // Given all the above data, compute a dim two isogeny with domain + // E_com x E_aux + // and codomain + // E_aux_2 x E_chall_2 (note: E_chall_2 is isomorphic to E_chall) + // and evaluated points stored as bases in + // B_aux_2 on E_aux_2 + // B_chall_2 on E_chall_2 + ret = compute_dim2_isogeny_challenge( + &Eaux2_Echall2, &Ecom_Eaux, °ree_resp_inv, pow_dim2_deg_resp, sig->two_resp_length, reduced_order); + if (!ret) + continue; + } else { + // No 2d isogeny needed, so simulate a "Kani matrix" identity here + copy_curve(&Eaux2_Echall2.E1, &Ecom_Eaux.E1); + copy_curve(&Eaux2_Echall2.E2, &Ecom_Eaux.E1); + + reduced_order = sig->two_resp_length; + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + copy_basis(&Eaux2_Echall2.B2, &Eaux2_Echall2.B1); + } + + // computation of the remaining small chain of two isogenies when needed + if (sig->two_resp_length > 0) { + if (!compute_small_chain_isogeny_signature( + &Eaux2_Echall2.E2, &Eaux2_Echall2.B2, &resp_quat, pow_dim2_deg_resp, sig->two_resp_length)) { + assert(0); // this shouldn't fail + } + } + + // computation of the challenge codomain + if (!compute_challenge_codomain_signature(sig, sk, &E_chall, &Eaux2_Echall2.E2, &Eaux2_Echall2.B2)) + assert(0); // this shouldn't fail + } + + // Set to the signature the Montgomery A-coefficient of E_aux_2 + set_aux_curve_signature(sig, &Eaux2_Echall2.E1); + + // Set the basis change matrix from canonical bases to the supplied bases + compute_and_set_basis_change_matrix( + sig, &Eaux2_Echall2.B1, &Eaux2_Echall2.B2, &Eaux2_Echall2.E1, &E_chall, reduced_order); + + quat_alg_elem_finalize(&resp_quat); + quat_left_ideal_finalize(&lideal_commit); + quat_left_ideal_finalize(&lideal_com_resp); + + ibz_finalize(&lattice_content); + ibz_finalize(&remain); + ibz_finalize(°ree_resp_inv); + ibz_finalize(&random_aux_norm); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/signature.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/signature.h new file mode 100644 index 0000000000..ba38c360e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/signature.h @@ -0,0 +1,97 @@ +/** @file + * + * @brief The key generation and signature protocols + */ + +#ifndef SIGNATURE_H +#define SIGNATURE_H + +#include +#include +#include +#include + +/** @defgroup signature SQIsignHD key generation and signature protocols + * @{ + */ +/** @defgroup signature_t Types for SQIsignHD key generation and signature protocols + * @{ + */ + +/** @brief Type for the secret keys + * + * @typedef secret_key_t + * + * @struct secret_key + * + */ +typedef struct secret_key +{ + ec_curve_t curve; /// the public curve, but with little precomputations + quat_left_ideal_t secret_ideal; + ibz_mat_2x2_t mat_BAcan_to_BA0_two; // mat_BA0_to_BAcan*BA0 = BAcan, where BAcan is the + // canonical basis of EA[2^e], and BA0 the image of the + // basis of E0[2^e] through the secret isogeny + ec_basis_t canonical_basis; // the canonical basis of the public key curve +} secret_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void secret_key_init(secret_key_t *sk); +void secret_key_finalize(secret_key_t *sk); + +/** + * @brief Key generation + * + * @param pk Output: will contain the public key + * @param sk Output: will contain the secret key + * @returns 1 if success, 0 otherwise + */ +int protocols_keygen(public_key_t *pk, secret_key_t *sk); + +/** + * @brief Signature computation + * + * @param sig Output: will contain the signature + * @param sk secret key + * @param pk public key + * @param m message + * @param l size + * @returns 1 if success, 0 otherwise + */ +int protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a secret key as a byte array + * + * @param enc : Byte array to encode the secret key (including public key) in + * @param sk : Secret key to encode + * @param pk : Public key to encode + */ +void secret_key_to_bytes(unsigned char *enc, const secret_key_t *sk, const public_key_t *pk); + +/** + * @brief Decodes a secret key (and public key) from a byte array + * + * @param sk : Structure to decode the secret key in + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +void secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign.c new file mode 100644 index 0000000000..7335c38d9a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign.c @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#if defined(ENABLE_SIGN) +#include +#endif + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +sqisign_keypair(unsigned char *pk, unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + secret_key_init(&skt); + + ret = !protocols_keygen(&pkt, &skt); + + secret_key_to_bytes(sk, &skt, &pkt); + public_key_to_bytes(pk, &pkt); + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + memmove(sm + SIGNATURE_BYTES, m, mlen); + + ret = !protocols_sign(&sigt, &pkt, &skt, sm + SIGNATURE_BYTES, mlen); + if (ret != 0) { + *smlen = 0; + goto err; + } + + signature_to_bytes(sm, &sigt); + *smlen = SIGNATURE_BYTES + mlen; + +err: + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + ret = !protocols_sign(&sigt, &pkt, &skt, m, mlen); + if (ret != 0) { + *slen = 0; + goto err; + } + + signature_to_bytes(s, &sigt); + *slen = SIGNATURE_BYTES; + +err: + secret_key_finalize(&skt); + return ret; +} +#endif + +SQISIGN_API +int +sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk) +{ + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sm); + + ret = !protocols_verify(&sigt, &pkt, sm + SIGNATURE_BYTES, smlen - SIGNATURE_BYTES); + + if (!ret) { + *mlen = smlen - SIGNATURE_BYTES; + memmove(m, sm + SIGNATURE_BYTES, *mlen); + } else { + *mlen = 0; + memset(m, 0, smlen - SIGNATURE_BYTES); + } + + return ret; +} + +SQISIGN_API +int +sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk) +{ + + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sig); + + ret = !protocols_verify(&sigt, &pkt, m, mlen); + + return ret; +} + +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk) +{ + return sqisign_verify(m, mlen, sig, siglen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h new file mode 100644 index 0000000000..007d2572b9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h @@ -0,0 +1,1071 @@ + +#ifndef SQISIGN_NAMESPACE_H +#define SQISIGN_NAMESPACE_H + +//#define DISABLE_NAMESPACING + +#if defined(_WIN32) +#define SQISIGN_API __declspec(dllexport) +#else +#define SQISIGN_API __attribute__((visibility("default"))) +#endif + +#define PARAM_JOIN3_(a, b, c) sqisign_##a##_##b##_##c +#define PARAM_JOIN3(a, b, c) PARAM_JOIN3_(a, b, c) +#define PARAM_NAME3(end, s) PARAM_JOIN3(SQISIGN_VARIANT, end, s) + +#define PARAM_JOIN2_(a, b) sqisign_##a##_##b +#define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) +#define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) + +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + +#if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) +#if defined(SQISIGN_BUILD_TYPE_REF) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) +#elif defined(SQISIGN_BUILD_TYPE_OPT) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(opt, s) +#elif defined(SQISIGN_BUILD_TYPE_BROADWELL) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(broadwell, s) +#elif defined(SQISIGN_BUILD_TYPE_ARM64CRYPTO) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(arm64crypto, s) +#else +#error "Build type not known" +#endif + +#else +#define SQISIGN_NAMESPACE(s) s +#endif + +// Namespacing symbols exported from algebra.c: +#undef quat_alg_add +#undef quat_alg_conj +#undef quat_alg_coord_mul +#undef quat_alg_elem_copy +#undef quat_alg_elem_copy_ibz +#undef quat_alg_elem_equal +#undef quat_alg_elem_is_zero +#undef quat_alg_elem_mul_by_scalar +#undef quat_alg_elem_set +#undef quat_alg_equal_denom +#undef quat_alg_init_set_ui +#undef quat_alg_make_primitive +#undef quat_alg_mul +#undef quat_alg_norm +#undef quat_alg_normalize +#undef quat_alg_scalar +#undef quat_alg_sub + +#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) + +// Namespacing symbols exported from api.c: +#undef crypto_sign +#undef crypto_sign_keypair +#undef crypto_sign_open + +#define crypto_sign SQISIGN_NAMESPACE(crypto_sign) +#define crypto_sign_keypair SQISIGN_NAMESPACE(crypto_sign_keypair) +#define crypto_sign_open SQISIGN_NAMESPACE(crypto_sign_open) + +// Namespacing symbols exported from basis.c: +#undef ec_curve_to_basis_2f_from_hint +#undef ec_curve_to_basis_2f_to_hint +#undef ec_recover_y +#undef lift_basis +#undef lift_basis_normalized + +#define ec_curve_to_basis_2f_from_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_from_hint) +#define ec_curve_to_basis_2f_to_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_to_hint) +#define ec_recover_y SQISIGN_NAMESPACE(ec_recover_y) +#define lift_basis SQISIGN_NAMESPACE(lift_basis) +#define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) + +// Namespacing symbols exported from biextension.c: +#undef clear_cofac +#undef ec_dlog_2_tate +#undef ec_dlog_2_weil +#undef fp2_frob +#undef reduced_tate +#undef weil + +#define clear_cofac SQISIGN_NAMESPACE(clear_cofac) +#define ec_dlog_2_tate SQISIGN_NAMESPACE(ec_dlog_2_tate) +#define ec_dlog_2_weil SQISIGN_NAMESPACE(ec_dlog_2_weil) +#define fp2_frob SQISIGN_NAMESPACE(fp2_frob) +#define reduced_tate SQISIGN_NAMESPACE(reduced_tate) +#define weil SQISIGN_NAMESPACE(weil) + +// Namespacing symbols exported from common.c: +#undef hash_to_challenge +#undef public_key_finalize +#undef public_key_init + +#define hash_to_challenge SQISIGN_NAMESPACE(hash_to_challenge) +#define public_key_finalize SQISIGN_NAMESPACE(public_key_finalize) +#define public_key_init SQISIGN_NAMESPACE(public_key_init) + +// Namespacing symbols exported from dim2.c: +#undef ibz_2x2_mul_mod +#undef ibz_mat_2x2_add +#undef ibz_mat_2x2_copy +#undef ibz_mat_2x2_det_from_ibz +#undef ibz_mat_2x2_eval +#undef ibz_mat_2x2_inv_mod +#undef ibz_mat_2x2_set +#undef ibz_vec_2_set + +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) + +// Namespacing symbols exported from dim2id2iso.c: +#undef dim2id2iso_arbitrary_isogeny_evaluation +#undef dim2id2iso_ideal_to_isogeny_clapotis +#undef find_uv +#undef fixed_degree_isogeny_and_eval + +#define dim2id2iso_arbitrary_isogeny_evaluation SQISIGN_NAMESPACE(dim2id2iso_arbitrary_isogeny_evaluation) +#define dim2id2iso_ideal_to_isogeny_clapotis SQISIGN_NAMESPACE(dim2id2iso_ideal_to_isogeny_clapotis) +#define find_uv SQISIGN_NAMESPACE(find_uv) +#define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) + +// Namespacing symbols exported from dim4.c: +#undef ibz_inv_dim4_make_coeff_mpm +#undef ibz_inv_dim4_make_coeff_pmp +#undef ibz_mat_4x4_copy +#undef ibz_mat_4x4_equal +#undef ibz_mat_4x4_eval +#undef ibz_mat_4x4_eval_t +#undef ibz_mat_4x4_gcd +#undef ibz_mat_4x4_identity +#undef ibz_mat_4x4_inv_with_det_as_denom +#undef ibz_mat_4x4_is_identity +#undef ibz_mat_4x4_mul +#undef ibz_mat_4x4_negate +#undef ibz_mat_4x4_scalar_div +#undef ibz_mat_4x4_scalar_mul +#undef ibz_mat_4x4_transpose +#undef ibz_mat_4x4_zero +#undef ibz_vec_4_add +#undef ibz_vec_4_content +#undef ibz_vec_4_copy +#undef ibz_vec_4_copy_ibz +#undef ibz_vec_4_is_zero +#undef ibz_vec_4_linear_combination +#undef ibz_vec_4_negate +#undef ibz_vec_4_scalar_div +#undef ibz_vec_4_scalar_mul +#undef ibz_vec_4_set +#undef ibz_vec_4_sub +#undef quat_qf_eval + +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) + +// Namespacing symbols exported from ec.c: +#undef cswap_points +#undef ec_biscalar_mul +#undef ec_curve_init +#undef ec_curve_init_from_A +#undef ec_curve_normalize_A24 +#undef ec_curve_verify_A +#undef ec_dbl +#undef ec_dbl_iter +#undef ec_dbl_iter_basis +#undef ec_has_zero_coordinate +#undef ec_is_basis_four_torsion +#undef ec_is_equal +#undef ec_is_four_torsion +#undef ec_is_two_torsion +#undef ec_is_zero +#undef ec_j_inv +#undef ec_ladder3pt +#undef ec_mul +#undef ec_normalize_curve +#undef ec_normalize_curve_and_A24 +#undef ec_normalize_point +#undef ec_point_init +#undef select_point +#undef xADD +#undef xDBL +#undef xDBLADD +#undef xDBLMUL +#undef xDBL_A24 +#undef xDBL_E0 +#undef xMUL + +#define cswap_points SQISIGN_NAMESPACE(cswap_points) +#define ec_biscalar_mul SQISIGN_NAMESPACE(ec_biscalar_mul) +#define ec_curve_init SQISIGN_NAMESPACE(ec_curve_init) +#define ec_curve_init_from_A SQISIGN_NAMESPACE(ec_curve_init_from_A) +#define ec_curve_normalize_A24 SQISIGN_NAMESPACE(ec_curve_normalize_A24) +#define ec_curve_verify_A SQISIGN_NAMESPACE(ec_curve_verify_A) +#define ec_dbl SQISIGN_NAMESPACE(ec_dbl) +#define ec_dbl_iter SQISIGN_NAMESPACE(ec_dbl_iter) +#define ec_dbl_iter_basis SQISIGN_NAMESPACE(ec_dbl_iter_basis) +#define ec_has_zero_coordinate SQISIGN_NAMESPACE(ec_has_zero_coordinate) +#define ec_is_basis_four_torsion SQISIGN_NAMESPACE(ec_is_basis_four_torsion) +#define ec_is_equal SQISIGN_NAMESPACE(ec_is_equal) +#define ec_is_four_torsion SQISIGN_NAMESPACE(ec_is_four_torsion) +#define ec_is_two_torsion SQISIGN_NAMESPACE(ec_is_two_torsion) +#define ec_is_zero SQISIGN_NAMESPACE(ec_is_zero) +#define ec_j_inv SQISIGN_NAMESPACE(ec_j_inv) +#define ec_ladder3pt SQISIGN_NAMESPACE(ec_ladder3pt) +#define ec_mul SQISIGN_NAMESPACE(ec_mul) +#define ec_normalize_curve SQISIGN_NAMESPACE(ec_normalize_curve) +#define ec_normalize_curve_and_A24 SQISIGN_NAMESPACE(ec_normalize_curve_and_A24) +#define ec_normalize_point SQISIGN_NAMESPACE(ec_normalize_point) +#define ec_point_init SQISIGN_NAMESPACE(ec_point_init) +#define select_point SQISIGN_NAMESPACE(select_point) +#define xADD SQISIGN_NAMESPACE(xADD) +#define xDBL SQISIGN_NAMESPACE(xDBL) +#define xDBLADD SQISIGN_NAMESPACE(xDBLADD) +#define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) +#define xMUL SQISIGN_NAMESPACE(xMUL) + +// Namespacing symbols exported from ec_jac.c: +#undef ADD +#undef DBL +#undef DBLW +#undef copy_jac_point +#undef jac_from_ws +#undef jac_init +#undef jac_is_equal +#undef jac_neg +#undef jac_to_ws +#undef jac_to_xz +#undef jac_to_xz_add_components +#undef select_jac_point + +#define ADD SQISIGN_NAMESPACE(ADD) +#define DBL SQISIGN_NAMESPACE(DBL) +#define DBLW SQISIGN_NAMESPACE(DBLW) +#define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) +#define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) +#define jac_init SQISIGN_NAMESPACE(jac_init) +#define jac_is_equal SQISIGN_NAMESPACE(jac_is_equal) +#define jac_neg SQISIGN_NAMESPACE(jac_neg) +#define jac_to_ws SQISIGN_NAMESPACE(jac_to_ws) +#define jac_to_xz SQISIGN_NAMESPACE(jac_to_xz) +#define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) +#define select_jac_point SQISIGN_NAMESPACE(select_jac_point) + +// Namespacing symbols exported from encode_signature.c: +#undef secret_key_from_bytes +#undef secret_key_to_bytes + +#define secret_key_from_bytes SQISIGN_NAMESPACE(secret_key_from_bytes) +#define secret_key_to_bytes SQISIGN_NAMESPACE(secret_key_to_bytes) + +// Namespacing symbols exported from encode_verification.c: +#undef public_key_from_bytes +#undef public_key_to_bytes +#undef signature_from_bytes +#undef signature_to_bytes + +#define public_key_from_bytes SQISIGN_NAMESPACE(public_key_from_bytes) +#define public_key_to_bytes SQISIGN_NAMESPACE(public_key_to_bytes) +#define signature_from_bytes SQISIGN_NAMESPACE(signature_from_bytes) +#define signature_to_bytes SQISIGN_NAMESPACE(signature_to_bytes) + +// Namespacing symbols exported from finit.c: +#undef ibz_mat_2x2_finalize +#undef ibz_mat_2x2_init +#undef ibz_mat_4x4_finalize +#undef ibz_mat_4x4_init +#undef ibz_vec_2_finalize +#undef ibz_vec_2_init +#undef ibz_vec_4_finalize +#undef ibz_vec_4_init +#undef quat_alg_elem_finalize +#undef quat_alg_elem_init +#undef quat_alg_finalize +#undef quat_alg_init_set +#undef quat_lattice_finalize +#undef quat_lattice_init +#undef quat_left_ideal_finalize +#undef quat_left_ideal_init + +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) + +// Namespacing symbols exported from fp.c: +#undef fp_select +#undef p +#undef p2 + +#define fp_select SQISIGN_NAMESPACE(fp_select) +#define p SQISIGN_NAMESPACE(p) +#define p2 SQISIGN_NAMESPACE(p2) + +// Namespacing symbols exported from fp.c, fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_exp3div4 +#undef fp_inv +#undef fp_is_square +#undef fp_sqrt + +#define fp_exp3div4 SQISIGN_NAMESPACE(fp_exp3div4) +#define fp_inv SQISIGN_NAMESPACE(fp_inv) +#define fp_is_square SQISIGN_NAMESPACE(fp_is_square) +#define fp_sqrt SQISIGN_NAMESPACE(fp_sqrt) + +// Namespacing symbols exported from fp2.c: +#undef fp2_add +#undef fp2_add_one +#undef fp2_batched_inv +#undef fp2_copy +#undef fp2_cswap +#undef fp2_decode +#undef fp2_encode +#undef fp2_half +#undef fp2_inv +#undef fp2_is_equal +#undef fp2_is_one +#undef fp2_is_square +#undef fp2_is_zero +#undef fp2_mul +#undef fp2_mul_small +#undef fp2_neg +#undef fp2_pow_vartime +#undef fp2_print +#undef fp2_select +#undef fp2_set_one +#undef fp2_set_small +#undef fp2_set_zero +#undef fp2_sqr +#undef fp2_sqrt +#undef fp2_sqrt_verify +#undef fp2_sub + +#define fp2_add SQISIGN_NAMESPACE(fp2_add) +#define fp2_add_one SQISIGN_NAMESPACE(fp2_add_one) +#define fp2_batched_inv SQISIGN_NAMESPACE(fp2_batched_inv) +#define fp2_copy SQISIGN_NAMESPACE(fp2_copy) +#define fp2_cswap SQISIGN_NAMESPACE(fp2_cswap) +#define fp2_decode SQISIGN_NAMESPACE(fp2_decode) +#define fp2_encode SQISIGN_NAMESPACE(fp2_encode) +#define fp2_half SQISIGN_NAMESPACE(fp2_half) +#define fp2_inv SQISIGN_NAMESPACE(fp2_inv) +#define fp2_is_equal SQISIGN_NAMESPACE(fp2_is_equal) +#define fp2_is_one SQISIGN_NAMESPACE(fp2_is_one) +#define fp2_is_square SQISIGN_NAMESPACE(fp2_is_square) +#define fp2_is_zero SQISIGN_NAMESPACE(fp2_is_zero) +#define fp2_mul SQISIGN_NAMESPACE(fp2_mul) +#define fp2_mul_small SQISIGN_NAMESPACE(fp2_mul_small) +#define fp2_neg SQISIGN_NAMESPACE(fp2_neg) +#define fp2_pow_vartime SQISIGN_NAMESPACE(fp2_pow_vartime) +#define fp2_print SQISIGN_NAMESPACE(fp2_print) +#define fp2_select SQISIGN_NAMESPACE(fp2_select) +#define fp2_set_one SQISIGN_NAMESPACE(fp2_set_one) +#define fp2_set_small SQISIGN_NAMESPACE(fp2_set_small) +#define fp2_set_zero SQISIGN_NAMESPACE(fp2_set_zero) +#define fp2_sqr SQISIGN_NAMESPACE(fp2_sqr) +#define fp2_sqrt SQISIGN_NAMESPACE(fp2_sqrt) +#define fp2_sqrt_verify SQISIGN_NAMESPACE(fp2_sqrt_verify) +#define fp2_sub SQISIGN_NAMESPACE(fp2_sub) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_copy +#undef fp_cswap +#undef fp_decode +#undef fp_decode_reduce +#undef fp_div3 +#undef fp_encode +#undef fp_half +#undef fp_is_equal +#undef fp_is_zero +#undef fp_mul_small +#undef fp_neg +#undef fp_set_one +#undef fp_set_small +#undef fp_set_zero + +#define fp_copy SQISIGN_NAMESPACE(fp_copy) +#define fp_cswap SQISIGN_NAMESPACE(fp_cswap) +#define fp_decode SQISIGN_NAMESPACE(fp_decode) +#define fp_decode_reduce SQISIGN_NAMESPACE(fp_decode_reduce) +#define fp_div3 SQISIGN_NAMESPACE(fp_div3) +#define fp_encode SQISIGN_NAMESPACE(fp_encode) +#define fp_half SQISIGN_NAMESPACE(fp_half) +#define fp_is_equal SQISIGN_NAMESPACE(fp_is_equal) +#define fp_is_zero SQISIGN_NAMESPACE(fp_is_zero) +#define fp_mul_small SQISIGN_NAMESPACE(fp_mul_small) +#define fp_neg SQISIGN_NAMESPACE(fp_neg) +#define fp_set_one SQISIGN_NAMESPACE(fp_set_one) +#define fp_set_small SQISIGN_NAMESPACE(fp_set_small) +#define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef fp_add +#undef fp_mul +#undef fp_sqr +#undef fp_sub + +#define fp_add SQISIGN_NAMESPACE(fp_add) +#define fp_mul SQISIGN_NAMESPACE(fp_mul) +#define fp_sqr SQISIGN_NAMESPACE(fp_sqr) +#define fp_sub SQISIGN_NAMESPACE(fp_sub) + +// Namespacing symbols exported from gf27500.c: +#undef gf27500_decode +#undef gf27500_decode_reduce +#undef gf27500_div +#undef gf27500_div3 +#undef gf27500_encode +#undef gf27500_invert +#undef gf27500_legendre +#undef gf27500_sqrt + +#define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) +#define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) +#define gf27500_div SQISIGN_NAMESPACE(gf27500_div) +#define gf27500_div3 SQISIGN_NAMESPACE(gf27500_div3) +#define gf27500_encode SQISIGN_NAMESPACE(gf27500_encode) +#define gf27500_invert SQISIGN_NAMESPACE(gf27500_invert) +#define gf27500_legendre SQISIGN_NAMESPACE(gf27500_legendre) +#define gf27500_sqrt SQISIGN_NAMESPACE(gf27500_sqrt) + +// Namespacing symbols exported from gf27500.c, gf5248.c, gf65376.c: +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 + +#define fp2_mul_c0 SQISIGN_NAMESPACE(fp2_mul_c0) +#define fp2_mul_c1 SQISIGN_NAMESPACE(fp2_mul_c1) +#define fp2_sq_c0 SQISIGN_NAMESPACE(fp2_sq_c0) +#define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) + +// Namespacing symbols exported from gf5248.c: +#undef gf5248_decode +#undef gf5248_decode_reduce +#undef gf5248_div +#undef gf5248_div3 +#undef gf5248_encode +#undef gf5248_invert +#undef gf5248_legendre +#undef gf5248_sqrt + +#define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) +#define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) +#define gf5248_div SQISIGN_NAMESPACE(gf5248_div) +#define gf5248_div3 SQISIGN_NAMESPACE(gf5248_div3) +#define gf5248_encode SQISIGN_NAMESPACE(gf5248_encode) +#define gf5248_invert SQISIGN_NAMESPACE(gf5248_invert) +#define gf5248_legendre SQISIGN_NAMESPACE(gf5248_legendre) +#define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) + +// Namespacing symbols exported from gf65376.c: +#undef gf65376_decode +#undef gf65376_decode_reduce +#undef gf65376_div +#undef gf65376_div3 +#undef gf65376_encode +#undef gf65376_invert +#undef gf65376_legendre +#undef gf65376_sqrt + +#define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) +#define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) +#define gf65376_div SQISIGN_NAMESPACE(gf65376_div) +#define gf65376_div3 SQISIGN_NAMESPACE(gf65376_div3) +#define gf65376_encode SQISIGN_NAMESPACE(gf65376_encode) +#define gf65376_invert SQISIGN_NAMESPACE(gf65376_invert) +#define gf65376_legendre SQISIGN_NAMESPACE(gf65376_legendre) +#define gf65376_sqrt SQISIGN_NAMESPACE(gf65376_sqrt) + +// Namespacing symbols exported from hd.c: +#undef add_couple_jac_points +#undef copy_bases_to_kernel +#undef couple_jac_to_xz +#undef double_couple_jac_point +#undef double_couple_jac_point_iter +#undef double_couple_point +#undef double_couple_point_iter + +#define add_couple_jac_points SQISIGN_NAMESPACE(add_couple_jac_points) +#define copy_bases_to_kernel SQISIGN_NAMESPACE(copy_bases_to_kernel) +#define couple_jac_to_xz SQISIGN_NAMESPACE(couple_jac_to_xz) +#define double_couple_jac_point SQISIGN_NAMESPACE(double_couple_jac_point) +#define double_couple_jac_point_iter SQISIGN_NAMESPACE(double_couple_jac_point_iter) +#define double_couple_point SQISIGN_NAMESPACE(double_couple_point) +#define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) + +// Namespacing symbols exported from hnf.c: +#undef ibz_mat_4x4_is_hnf +#undef ibz_mat_4xn_hnf_mod_core +#undef ibz_vec_4_copy_mod +#undef ibz_vec_4_linear_combination_mod +#undef ibz_vec_4_scalar_mul_mod + +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) + +// Namespacing symbols exported from hnf_internal.c: +#undef ibz_centered_mod +#undef ibz_conditional_assign +#undef ibz_mod_not_zero +#undef ibz_xgcd_with_u_not_0 + +#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) + +// Namespacing symbols exported from ibz_division.c: +#undef ibz_xgcd + +#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) + +// Namespacing symbols exported from id2iso.c: +#undef change_of_basis_matrix_tate +#undef change_of_basis_matrix_tate_invert +#undef ec_biscalar_mul_ibz_vec +#undef endomorphism_application_even_basis +#undef id2iso_ideal_to_kernel_dlogs_even +#undef id2iso_kernel_dlogs_to_ideal_even +#undef matrix_application_even_basis + +#define change_of_basis_matrix_tate SQISIGN_NAMESPACE(change_of_basis_matrix_tate) +#define change_of_basis_matrix_tate_invert SQISIGN_NAMESPACE(change_of_basis_matrix_tate_invert) +#define ec_biscalar_mul_ibz_vec SQISIGN_NAMESPACE(ec_biscalar_mul_ibz_vec) +#define endomorphism_application_even_basis SQISIGN_NAMESPACE(endomorphism_application_even_basis) +#define id2iso_ideal_to_kernel_dlogs_even SQISIGN_NAMESPACE(id2iso_ideal_to_kernel_dlogs_even) +#define id2iso_kernel_dlogs_to_ideal_even SQISIGN_NAMESPACE(id2iso_kernel_dlogs_to_ideal_even) +#define matrix_application_even_basis SQISIGN_NAMESPACE(matrix_application_even_basis) + +// Namespacing symbols exported from ideal.c: +#undef quat_lideal_add +#undef quat_lideal_class_gram +#undef quat_lideal_conjugate_without_hnf +#undef quat_lideal_copy +#undef quat_lideal_create +#undef quat_lideal_create_principal +#undef quat_lideal_equals +#undef quat_lideal_generator +#undef quat_lideal_inter +#undef quat_lideal_inverse_lattice_without_hnf +#undef quat_lideal_mul +#undef quat_lideal_norm +#undef quat_lideal_right_order +#undef quat_lideal_right_transporter +#undef quat_order_discriminant +#undef quat_order_is_maximal + +#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) + +// Namespacing symbols exported from intbig.c: +#undef ibz_abs +#undef ibz_add +#undef ibz_bitsize +#undef ibz_cmp +#undef ibz_cmp_int32 +#undef ibz_convert_to_str +#undef ibz_copy +#undef ibz_copy_digits +#undef ibz_div +#undef ibz_div_2exp +#undef ibz_div_floor +#undef ibz_divides +#undef ibz_finalize +#undef ibz_gcd +#undef ibz_get +#undef ibz_init +#undef ibz_invmod +#undef ibz_is_even +#undef ibz_is_odd +#undef ibz_is_one +#undef ibz_is_zero +#undef ibz_legendre +#undef ibz_mod +#undef ibz_mod_ui +#undef ibz_mul +#undef ibz_neg +#undef ibz_pow +#undef ibz_pow_mod +#undef ibz_print +#undef ibz_probab_prime +#undef ibz_rand_interval +#undef ibz_rand_interval_bits +#undef ibz_rand_interval_i +#undef ibz_rand_interval_minm_m +#undef ibz_set +#undef ibz_set_from_str +#undef ibz_size_in_base +#undef ibz_sqrt +#undef ibz_sqrt_floor +#undef ibz_sqrt_mod_p +#undef ibz_sub +#undef ibz_swap +#undef ibz_to_digits +#undef ibz_two_adic + +#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) +#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) +#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) + +// Namespacing symbols exported from integers.c: +#undef ibz_cornacchia_prime +#undef ibz_generate_random_prime + +#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) + +// Namespacing symbols exported from isog_chains.c: +#undef ec_eval_even +#undef ec_eval_small_chain +#undef ec_iso_eval +#undef ec_isomorphism + +#define ec_eval_even SQISIGN_NAMESPACE(ec_eval_even) +#define ec_eval_small_chain SQISIGN_NAMESPACE(ec_eval_small_chain) +#define ec_iso_eval SQISIGN_NAMESPACE(ec_iso_eval) +#define ec_isomorphism SQISIGN_NAMESPACE(ec_isomorphism) + +// Namespacing symbols exported from keygen.c: +#undef protocols_keygen +#undef secret_key_finalize +#undef secret_key_init + +#define protocols_keygen SQISIGN_NAMESPACE(protocols_keygen) +#define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) +#define secret_key_init SQISIGN_NAMESPACE(secret_key_init) + +// Namespacing symbols exported from l2.c: +#undef quat_lattice_lll +#undef quat_lll_core + +#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) + +// Namespacing symbols exported from lat_ball.c: +#undef quat_lattice_bound_parallelogram +#undef quat_lattice_sample_from_ball + +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) + +// Namespacing symbols exported from lattice.c: +#undef quat_lattice_add +#undef quat_lattice_alg_elem_mul +#undef quat_lattice_conjugate_without_hnf +#undef quat_lattice_contains +#undef quat_lattice_dual_without_hnf +#undef quat_lattice_equal +#undef quat_lattice_gram +#undef quat_lattice_hnf +#undef quat_lattice_inclusion +#undef quat_lattice_index +#undef quat_lattice_intersect +#undef quat_lattice_mat_alg_coord_mul_without_hnf +#undef quat_lattice_mul +#undef quat_lattice_reduce_denom + +#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) + +// Namespacing symbols exported from lll_applications.c: +#undef quat_lideal_lideal_mul_reduced +#undef quat_lideal_prime_norm_reduced_equivalent +#undef quat_lideal_reduce_basis + +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) + +// Namespacing symbols exported from lll_verification.c: +#undef ibq_vec_4_copy_ibz +#undef quat_lll_bilinear +#undef quat_lll_gram_schmidt_transposed_with_ibq +#undef quat_lll_set_ibq_parameters +#undef quat_lll_verify + +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) + +// Namespacing symbols exported from mem.c: +#undef sqisign_secure_clear +#undef sqisign_secure_free + +#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) + +// Namespacing symbols exported from mp.c: +#undef MUL +#undef mp_add +#undef mp_compare +#undef mp_copy +#undef mp_inv_2e +#undef mp_invert_matrix +#undef mp_is_one +#undef mp_is_zero +#undef mp_mod_2exp +#undef mp_mul +#undef mp_mul2 +#undef mp_neg +#undef mp_print +#undef mp_shiftl +#undef mp_shiftr +#undef mp_sub +#undef multiple_mp_shiftl +#undef select_ct +#undef swap_ct + +#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) +#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) +#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) +#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) +#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) +#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) + +// Namespacing symbols exported from normeq.c: +#undef quat_change_to_O0_basis +#undef quat_lattice_O0_set +#undef quat_lattice_O0_set_extremal +#undef quat_order_elem_create +#undef quat_represent_integer +#undef quat_sampling_random_ideal_O0_given_norm + +#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) + +// Namespacing symbols exported from printer.c: +#undef ibz_mat_2x2_print +#undef ibz_mat_4x4_print +#undef ibz_vec_2_print +#undef ibz_vec_4_print +#undef quat_alg_elem_print +#undef quat_alg_print +#undef quat_lattice_print +#undef quat_left_ideal_print + +#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) + +// Namespacing symbols exported from random_input_generation.c: +#undef quat_test_input_random_ideal_generation +#undef quat_test_input_random_ideal_lattice_generation +#undef quat_test_input_random_lattice_generation + +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) + +// Namespacing symbols exported from rationals.c: +#undef ibq_abs +#undef ibq_add +#undef ibq_cmp +#undef ibq_copy +#undef ibq_finalize +#undef ibq_init +#undef ibq_inv +#undef ibq_is_ibz +#undef ibq_is_one +#undef ibq_is_zero +#undef ibq_mat_4x4_finalize +#undef ibq_mat_4x4_init +#undef ibq_mat_4x4_print +#undef ibq_mul +#undef ibq_neg +#undef ibq_reduce +#undef ibq_set +#undef ibq_sub +#undef ibq_to_ibz +#undef ibq_vec_4_finalize +#undef ibq_vec_4_init +#undef ibq_vec_4_print + +#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) + +// Namespacing symbols exported from sign.c: +#undef protocols_sign + +#define protocols_sign SQISIGN_NAMESPACE(protocols_sign) + +// Namespacing symbols exported from sqisign.c: +#undef sqisign_keypair +#undef sqisign_open +#undef sqisign_sign +#undef sqisign_sign_signature +#undef sqisign_verify +#undef sqisign_verify_signature + +#define sqisign_keypair SQISIGN_NAMESPACE(sqisign_keypair) +#define sqisign_open SQISIGN_NAMESPACE(sqisign_open) +#define sqisign_sign SQISIGN_NAMESPACE(sqisign_sign) +#define sqisign_sign_signature SQISIGN_NAMESPACE(sqisign_sign_signature) +#define sqisign_verify SQISIGN_NAMESPACE(sqisign_verify) +#define sqisign_verify_signature SQISIGN_NAMESPACE(sqisign_verify_signature) + +// Namespacing symbols exported from theta_isogenies.c: +#undef theta_chain_compute_and_eval +#undef theta_chain_compute_and_eval_randomized +#undef theta_chain_compute_and_eval_verify + +#define theta_chain_compute_and_eval SQISIGN_NAMESPACE(theta_chain_compute_and_eval) +#define theta_chain_compute_and_eval_randomized SQISIGN_NAMESPACE(theta_chain_compute_and_eval_randomized) +#define theta_chain_compute_and_eval_verify SQISIGN_NAMESPACE(theta_chain_compute_and_eval_verify) + +// Namespacing symbols exported from theta_structure.c: +#undef double_iter +#undef double_point +#undef is_product_theta_point +#undef theta_precomputation + +#define double_iter SQISIGN_NAMESPACE(double_iter) +#define double_point SQISIGN_NAMESPACE(double_point) +#define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) +#define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) + +// Namespacing symbols exported from verify.c: +#undef protocols_verify + +#define protocols_verify SQISIGN_NAMESPACE(protocols_verify) + +// Namespacing symbols exported from xeval.c: +#undef xeval_2 +#undef xeval_2_singular +#undef xeval_4 + +#define xeval_2 SQISIGN_NAMESPACE(xeval_2) +#define xeval_2_singular SQISIGN_NAMESPACE(xeval_2_singular) +#define xeval_4 SQISIGN_NAMESPACE(xeval_4) + +// Namespacing symbols exported from xisog.c: +#undef xisog_2 +#undef xisog_2_singular +#undef xisog_4 + +#define xisog_2 SQISIGN_NAMESPACE(xisog_2) +#define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) +#define xisog_4 SQISIGN_NAMESPACE(xisog_4) + +// Namespacing symbols from precomp: +#undef BASIS_E0_PX +#undef BASIS_E0_QX +#undef p_cofactor_for_2f +#undef CURVES_WITH_ENDOMORPHISMS +#undef EVEN_INDEX +#undef CHI_EVAL +#undef FP2_CONSTANTS +#undef SPLITTING_TRANSFORMS +#undef NORMALIZATION_TRANSFORMS +#undef QUAT_prime_cofactor +#undef QUATALG_PINFTY +#undef EXTREMAL_ORDERS +#undef CONNECTING_IDEALS +#undef CONJUGATING_ELEMENTS +#undef TWO_TO_SECURITY_BITS +#undef TORSION_PLUS_2POWER +#undef SEC_DEGREE +#undef COM_DEGREE + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_parameters.txt b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_parameters.txt new file mode 100644 index 0000000000..52241becdc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_parameters.txt @@ -0,0 +1,3 @@ +lvl = 3 +p = 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +num_orders = 8 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.c new file mode 100644 index 0000000000..478a9ab25b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.c @@ -0,0 +1,1283 @@ +#include "theta_isogenies.h" +#include +#include +#include +#include +#include + +// Select a base change matrix in constant time, with M1 a regular +// base change matrix and M2 a precomputed base change matrix +// If option = 0 then M <- M1, else if option = 0xFF...FF then M <- M2 +static inline void +select_base_change_matrix(basis_change_matrix_t *M, + const basis_change_matrix_t *M1, + const precomp_basis_change_matrix_t *M2, + const uint32_t option) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + fp2_select(&M->m[i][j], &M1->m[i][j], &FP2_CONSTANTS[M2->m[i][j]], option); +} + +// Set a regular base change matrix from a precomputed one +static inline void +set_base_change_matrix_from_precomp(basis_change_matrix_t *res, const precomp_basis_change_matrix_t *M) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + res->m[i][j] = FP2_CONSTANTS[M->m[i][j]]; +} + +static inline void +choose_index_theta_point(fp2_t *res, int ind, const theta_point_t *T) +{ + const fp2_t *src = NULL; + switch (ind % 4) { + case 0: + src = &T->x; + break; + case 1: + src = &T->y; + break; + case 2: + src = &T->z; + break; + case 3: + src = &T->t; + break; + default: + assert(0); + } + fp2_copy(res, src); +} + +// same as apply_isomorphism method but more efficient when the t component of P is zero. +static void +apply_isomorphism_general(theta_point_t *res, + const basis_change_matrix_t *M, + const theta_point_t *P, + const bool Pt_not_zero) +{ + fp2_t x1; + theta_point_t temp; + + fp2_mul(&temp.x, &P->x, &M->m[0][0]); + fp2_mul(&x1, &P->y, &M->m[0][1]); + fp2_add(&temp.x, &temp.x, &x1); + fp2_mul(&x1, &P->z, &M->m[0][2]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&temp.y, &P->x, &M->m[1][0]); + fp2_mul(&x1, &P->y, &M->m[1][1]); + fp2_add(&temp.y, &temp.y, &x1); + fp2_mul(&x1, &P->z, &M->m[1][2]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&temp.z, &P->x, &M->m[2][0]); + fp2_mul(&x1, &P->y, &M->m[2][1]); + fp2_add(&temp.z, &temp.z, &x1); + fp2_mul(&x1, &P->z, &M->m[2][2]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&temp.t, &P->x, &M->m[3][0]); + fp2_mul(&x1, &P->y, &M->m[3][1]); + fp2_add(&temp.t, &temp.t, &x1); + fp2_mul(&x1, &P->z, &M->m[3][2]); + fp2_add(&temp.t, &temp.t, &x1); + + if (Pt_not_zero) { + fp2_mul(&x1, &P->t, &M->m[0][3]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&x1, &P->t, &M->m[1][3]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&x1, &P->t, &M->m[2][3]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&x1, &P->t, &M->m[3][3]); + fp2_add(&temp.t, &temp.t, &x1); + } + + fp2_copy(&res->x, &temp.x); + fp2_copy(&res->y, &temp.y); + fp2_copy(&res->z, &temp.z); + fp2_copy(&res->t, &temp.t); +} + +static void +apply_isomorphism(theta_point_t *res, const basis_change_matrix_t *M, const theta_point_t *P) +{ + apply_isomorphism_general(res, M, P, true); +} + +// set res = M1 * M2 with matrix multiplication +static void +base_change_matrix_multiplication(basis_change_matrix_t *res, + const basis_change_matrix_t *M1, + const basis_change_matrix_t *M2) +{ + basis_change_matrix_t tmp; + fp2_t sum, m_ik, m_kj; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + fp2_set_zero(&sum); + for (int k = 0; k < 4; k++) { + m_ik = M1->m[i][k]; + m_kj = M2->m[k][j]; + fp2_mul(&m_ik, &m_ik, &m_kj); + fp2_add(&sum, &sum, &m_ik); + } + tmp.m[i][j] = sum; + } + } + *res = tmp; +} + +// compute the theta_point corresponding to the couple of point T on an elliptic product +static void +base_change(theta_point_t *out, const theta_gluing_t *phi, const theta_couple_point_t *T) +{ + theta_point_t null_point; + + // null_point = (a : b : c : d) + // a = P1.x P2.x, b = P1.x P2.z, c = P1.z P2.x, d = P1.z P2.z + fp2_mul(&null_point.x, &T->P1.x, &T->P2.x); + fp2_mul(&null_point.y, &T->P1.x, &T->P2.z); + fp2_mul(&null_point.z, &T->P2.x, &T->P1.z); + fp2_mul(&null_point.t, &T->P1.z, &T->P2.z); + + // Apply the basis change + apply_isomorphism(out, &phi->M, &null_point); +} + +static void +action_by_translation_z_and_det(fp2_t *z_inv, fp2_t *det_inv, const ec_point_t *P4, const ec_point_t *P2) +{ + // Store the Z-coordinate to invert + fp2_copy(z_inv, &P4->z); + + // Then collect detij = xij wij - uij zij + fp2_t tmp; + fp2_mul(det_inv, &P4->x, &P2->z); + fp2_mul(&tmp, &P4->z, &P2->x); + fp2_sub(det_inv, det_inv, &tmp); +} + +static void +action_by_translation_compute_matrix(translation_matrix_t *G, + const ec_point_t *P4, + const ec_point_t *P2, + const fp2_t *z_inv, + const fp2_t *det_inv) +{ + fp2_t tmp; + + // Gi.g10 = uij xij /detij - xij/zij + fp2_mul(&tmp, &P4->x, z_inv); + fp2_mul(&G->g10, &P4->x, &P2->x); + fp2_mul(&G->g10, &G->g10, det_inv); + fp2_sub(&G->g10, &G->g10, &tmp); + + // Gi.g11 = uij zij * detij + fp2_mul(&G->g11, &P2->x, det_inv); + fp2_mul(&G->g11, &G->g11, &P4->z); + + // Gi.g00 = -Gi.g11 + fp2_neg(&G->g00, &G->g11); + + // Gi.g01 = - wij zij detij + fp2_mul(&G->g01, &P2->z, det_inv); + fp2_mul(&G->g01, &G->g01, &P4->z); + fp2_neg(&G->g01, &G->g01); +} + +// Returns 1 if the basis is as expected and 0 otherwise +// We only expect this to fail for malformed signatures, so +// do not require this to run in constant time. +static int +verify_two_torsion(const theta_couple_point_t *K1_2, const theta_couple_point_t *K2_2, const theta_couple_curve_t *E12) +{ + // First check if any point in K1_2 or K2_2 is zero, if they are then the points did not have + // order 8 when we started gluing + if (ec_is_zero(&K1_2->P1) | ec_is_zero(&K1_2->P2) | ec_is_zero(&K2_2->P1) | ec_is_zero(&K2_2->P2)) { + return 0; + } + + // Now ensure that P1, Q1 and P2, Q2 are independent. For points of order two this means + // that they're not the same + if (ec_is_equal(&K1_2->P1, &K2_2->P1) | ec_is_equal(&K1_2->P2, &K2_2->P2)) { + return 0; + } + + // Finally, double points to ensure all points have order exactly 0 + theta_couple_point_t O1, O2; + double_couple_point(&O1, K1_2, E12); + double_couple_point(&O2, K2_2, E12); + // If this check fails then the points had order 2*f for some f, and the kernel is malformed. + if (!(ec_is_zero(&O1.P1) & ec_is_zero(&O1.P2) & ec_is_zero(&O2.P1) & ec_is_zero(&O2.P2))) { + return 0; + } + + return 1; +} + +// Computes the action by translation for four points +// (P1, P2) and (Q1, Q2) on E1 x E2 simultaneously to +// save on inversions. +// Returns 0 if any of Pi or Qi does not have order 2 +// and 1 otherwise +static int +action_by_translation(translation_matrix_t *Gi, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute points of order 2 from Ki_4 + theta_couple_point_t K1_2, K2_2; + double_couple_point(&K1_2, K1_4, E12); + double_couple_point(&K2_2, K2_4, E12); + + if (!verify_two_torsion(&K1_2, &K2_2, E12)) { + return 0; + } + + // We need to invert four Z coordinates and + // four determinants which we do with batched + // inversion + fp2_t inverses[8]; + action_by_translation_z_and_det(&inverses[0], &inverses[4], &K1_4->P1, &K1_2.P1); + action_by_translation_z_and_det(&inverses[1], &inverses[5], &K1_4->P2, &K1_2.P2); + action_by_translation_z_and_det(&inverses[2], &inverses[6], &K2_4->P1, &K2_2.P1); + action_by_translation_z_and_det(&inverses[3], &inverses[7], &K2_4->P2, &K2_2.P2); + + fp2_batched_inv(inverses, 8); + if (fp2_is_zero(&inverses[0])) + return 0; // something was wrong with our input (which somehow was not caught by + // verify_two_torsion) + + action_by_translation_compute_matrix(&Gi[0], &K1_4->P1, &K1_2.P1, &inverses[0], &inverses[4]); + action_by_translation_compute_matrix(&Gi[1], &K1_4->P2, &K1_2.P2, &inverses[1], &inverses[5]); + action_by_translation_compute_matrix(&Gi[2], &K2_4->P1, &K2_2.P1, &inverses[2], &inverses[6]); + action_by_translation_compute_matrix(&Gi[3], &K2_4->P2, &K2_2.P2, &inverses[3], &inverses[7]); + + return 1; +} + +// Given the appropriate four torsion, computes the +// change of basis to compute the correct theta null +// point. +// Returns 0 if the order of K1_4 or K2_4 is not 4 +static int +gluing_change_of_basis(basis_change_matrix_t *M, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute the four 2x2 matrices for the action by translation + // on the four points: + translation_matrix_t Gi[4]; + if (!action_by_translation(Gi, K1_4, K2_4, E12)) + return 0; + + // Computation of the 4x4 matrix from Mij + // t001, t101 (resp t002, t102) first column of M11 * M21 (resp M12 * M22) + fp2_t t001, t101, t002, t102, tmp; + + fp2_mul(&t001, &Gi[0].g00, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g01, &Gi[2].g10); + fp2_add(&t001, &t001, &tmp); + + fp2_mul(&t101, &Gi[0].g10, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g11, &Gi[2].g10); + fp2_add(&t101, &t101, &tmp); + + fp2_mul(&t002, &Gi[1].g00, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g01, &Gi[3].g10); + fp2_add(&t002, &t002, &tmp); + + fp2_mul(&t102, &Gi[1].g10, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g11, &Gi[3].g10); + fp2_add(&t102, &t102, &tmp); + + // trace for the first row + fp2_set_one(&M->m[0][0]); + fp2_mul(&tmp, &t001, &t002); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + + fp2_mul(&M->m[0][1], &t001, &t102); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + + fp2_mul(&M->m[0][2], &t101, &t002); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + + fp2_mul(&M->m[0][3], &t101, &t102); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + + // Compute the action of (0,out.K2_4.P2) for the second row + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][1]); + fp2_mul(&M->m[1][0], &Gi[3].g00, &M->m[0][0]); + fp2_add(&M->m[1][0], &M->m[1][0], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][1]); + fp2_mul(&M->m[1][1], &Gi[3].g10, &M->m[0][0]); + fp2_add(&M->m[1][1], &M->m[1][1], &tmp); + + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][3]); + fp2_mul(&M->m[1][2], &Gi[3].g00, &M->m[0][2]); + fp2_add(&M->m[1][2], &M->m[1][2], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][3]); + fp2_mul(&M->m[1][3], &Gi[3].g10, &M->m[0][2]); + fp2_add(&M->m[1][3], &M->m[1][3], &tmp); + + // compute the action of (K1_4.P1,0) for the third row + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][2]); + fp2_mul(&M->m[2][0], &Gi[0].g00, &M->m[0][0]); + fp2_add(&M->m[2][0], &M->m[2][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][3]); + fp2_mul(&M->m[2][1], &Gi[0].g00, &M->m[0][1]); + fp2_add(&M->m[2][1], &M->m[2][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][2]); + fp2_mul(&M->m[2][2], &Gi[0].g10, &M->m[0][0]); + fp2_add(&M->m[2][2], &M->m[2][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][3]); + fp2_mul(&M->m[2][3], &Gi[0].g10, &M->m[0][1]); + fp2_add(&M->m[2][3], &M->m[2][3], &tmp); + + // compute the action of (K1_4.P1,K2_4.P2) for the final row + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][2]); + fp2_mul(&M->m[3][0], &Gi[0].g00, &M->m[1][0]); + fp2_add(&M->m[3][0], &M->m[3][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][3]); + fp2_mul(&M->m[3][1], &Gi[0].g00, &M->m[1][1]); + fp2_add(&M->m[3][1], &M->m[3][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][2]); + fp2_mul(&M->m[3][2], &Gi[0].g10, &M->m[1][0]); + fp2_add(&M->m[3][2], &M->m[3][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][3]); + fp2_mul(&M->m[3][3], &Gi[0].g10, &M->m[1][1]); + fp2_add(&M->m[3][3], &M->m[3][3], &tmp); + + return 1; +} + +/** + * @brief Compute the gluing isogeny from an elliptic product + * + * @param out Output: the theta_gluing + * @param K1_8 a couple point + * @param E12 an elliptic curve product + * @param K2_8 a point in E2[8] + * + * out : E1xE2 -> A of kernel [4](K1_8,K2_8) + * if the kernel supplied has the incorrect order, or gluing seems malformed, + * returns 0, otherwise returns 1. + */ +static int +gluing_compute(theta_gluing_t *out, + const theta_couple_curve_t *E12, + const theta_couple_jac_point_t *xyK1_8, + const theta_couple_jac_point_t *xyK2_8, + bool verify) +{ + // Ensure that we have been given the eight torsion +#ifndef NDEBUG + { + int check = test_jac_order_twof(&xyK1_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK1_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK1_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P2 does not have order 8"); + } +#endif + + out->xyK1_8 = *xyK1_8; + out->domain = *E12; + + // Given points in E[8] x E[8] we need the four torsion below + theta_couple_jac_point_t xyK1_4, xyK2_4; + + double_couple_jac_point(&xyK1_4, xyK1_8, E12); + double_couple_jac_point(&xyK2_4, xyK2_8, E12); + + // Convert from (X:Y:Z) coordinates to (X:Z) + theta_couple_point_t K1_8, K2_8; + theta_couple_point_t K1_4, K2_4; + + couple_jac_to_xz(&K1_8, xyK1_8); + couple_jac_to_xz(&K2_8, xyK2_8); + couple_jac_to_xz(&K1_4, &xyK1_4); + couple_jac_to_xz(&K2_4, &xyK2_4); + + // Set the basis change matrix, if we have not been given a valid K[8] for this computation + // gluing_change_of_basis will detect this and return 0 + if (!gluing_change_of_basis(&out->M, &K1_4, &K2_4, E12)) { + debug_print("gluing failed as kernel does not have correct order"); + return 0; + } + + // apply the base change to the kernel + theta_point_t TT1, TT2; + + base_change(&TT1, out, &K1_8); + base_change(&TT2, out, &K2_8); + + // compute the codomain + to_squared_theta(&TT1, &TT1); + to_squared_theta(&TT2, &TT2); + + // If the kernel is well formed then TT1.t and TT2.t are zero + // if they are not, we exit early as the signature we are validating + // is probably malformed + if (!(fp2_is_zero(&TT1.t) & fp2_is_zero(&TT2.t))) { + debug_print("gluing failed TT1.t or TT2.t is not zero"); + return 0; + } + // Test our projective factors are non zero + if (fp2_is_zero(&TT1.x) | fp2_is_zero(&TT2.x) | fp2_is_zero(&TT1.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT1.z)) + return 0; // invalid input + + // Projective factor: Ax + fp2_mul(&out->codomain.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.y, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.z, &TT1.x, &TT2.z); + fp2_set_zero(&out->codomain.t); + // Projective factor: ABCxz + fp2_mul(&out->precomputation.x, &TT1.y, &TT2.z); + fp2_copy(&out->precomputation.y, &out->codomain.z); + fp2_copy(&out->precomputation.z, &out->codomain.y); + fp2_set_zero(&out->precomputation.t); + + // Compute the two components of phi(K1_8) = (x:x:y:y). + fp2_mul(&out->imageK1_8.x, &TT1.x, &out->precomputation.x); + fp2_mul(&out->imageK1_8.y, &TT1.z, &out->precomputation.z); + + // If K1_8 and K2_8 are our 8-torsion points, this ensures that the + // 4-torsion points [2]K1_8 and [2]K2_8 are isotropic. + if (verify) { + fp2_t t1, t2; + fp2_mul(&t1, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&out->imageK1_8.x, &t1)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t2, &t1)) + return 0; + } + + // compute the final codomain + hadamard(&out->codomain, &out->codomain); + return 1; +} + +// sub routine of the gluing eval +static void +gluing_eval_point(theta_point_t *image, const theta_couple_jac_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T1, T2; + add_components_t add_comp1, add_comp2; + + // Compute the cross addition components of P1+Q1 and P2+Q2 + jac_to_xz_add_components(&add_comp1, &P->P1, &phi->xyK1_8.P1, &phi->domain.E1); + jac_to_xz_add_components(&add_comp2, &P->P2, &phi->xyK1_8.P2, &phi->domain.E2); + + // Compute T1 and T2 derived from the cross addition components. + fp2_mul(&T1.x, &add_comp1.u, &add_comp2.u); // T1x = u1u2 + fp2_mul(&T2.t, &add_comp1.v, &add_comp2.v); // T2t = v1v2 + fp2_add(&T1.x, &T1.x, &T2.t); // T1x = u1u2 + v1v2 + fp2_mul(&T1.y, &add_comp1.u, &add_comp2.w); // T1y = u1w2 + fp2_mul(&T1.z, &add_comp1.w, &add_comp2.u); // T1z = w1u2 + fp2_mul(&T1.t, &add_comp1.w, &add_comp2.w); // T1t = w1w2 + fp2_add(&T2.x, &add_comp1.u, &add_comp1.v); // T2x = (u1+v1) + fp2_add(&T2.y, &add_comp2.u, &add_comp2.v); // T2y = (u2+v2) + fp2_mul(&T2.x, &T2.x, &T2.y); // T2x = (u1+v1)(u2+v2) + fp2_sub(&T2.x, &T2.x, &T1.x); // T1x = v1u2 + u1v2 + fp2_mul(&T2.y, &add_comp1.v, &add_comp2.w); // T2y = v1w2 + fp2_mul(&T2.z, &add_comp1.w, &add_comp2.v); // T2z = w1v2 + fp2_set_zero(&T2.t); // T2t = 0 + + // Apply the basis change and compute their respective square + // theta(P+Q) = M.T1 - M.T2 and theta(P-Q) = M.T1 + M.T2 + apply_isomorphism_general(&T1, &phi->M, &T1, true); + apply_isomorphism_general(&T2, &phi->M, &T2, false); + pointwise_square(&T1, &T1); + pointwise_square(&T2, &T2); + + // the difference between the two is therefore theta(P+Q)theta(P-Q) + // whose hadamard transform is then the product of the dual + // theta_points of phi(P) and phi(Q). + fp2_sub(&T1.x, &T1.x, &T2.x); + fp2_sub(&T1.y, &T1.y, &T2.y); + fp2_sub(&T1.z, &T1.z, &T2.z); + fp2_sub(&T1.t, &T1.t, &T2.t); + hadamard(&T1, &T1); + + // Compute (x, y, z, t) + // As imageK1_8 = (x:x:y:y), its inverse is (y:y:x:x). + fp2_mul(&image->x, &T1.x, &phi->imageK1_8.y); + fp2_mul(&image->y, &T1.y, &phi->imageK1_8.y); + fp2_mul(&image->z, &T1.z, &phi->imageK1_8.x); + fp2_mul(&image->t, &T1.t, &phi->imageK1_8.x); + + hadamard(image, image); +} + +// Same as gluing_eval_point but in the very special case where we already know that the point will +// have a zero coordinate at the place where the zero coordinate of the dual_theta_nullpoint would +// have made the computation difficult +static int +gluing_eval_point_special_case(theta_point_t *image, const theta_couple_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T; + + // Apply the basis change + base_change(&T, phi, P); + + // Apply the to_squared_theta transform + to_squared_theta(&T, &T); + + // This coordinate should always be 0 in a gluing because D=0. + // If this is not the case, something went very wrong, so reject + if (!fp2_is_zero(&T.t)) + return 0; + + // Compute (x, y, z, t) + fp2_mul(&image->x, &T.x, &phi->precomputation.x); + fp2_mul(&image->y, &T.y, &phi->precomputation.y); + fp2_mul(&image->z, &T.z, &phi->precomputation.z); + fp2_set_zero(&image->t); + + hadamard(image, image); + return 1; +} + +/** + * @brief Evaluate a gluing isogeny from an elliptic product on a basis + * + * @param image1 Output: the theta_point of the image of the first couple of points + * @param image2 Output : the theta point of the image of the second couple of points + * @param xyT1: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param xyT2: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param phi : a gluing isogeny E1 x E2 -> A + * + **/ +static void +gluing_eval_basis(theta_point_t *image1, + theta_point_t *image2, + const theta_couple_jac_point_t *xyT1, + const theta_couple_jac_point_t *xyT2, + const theta_gluing_t *phi) +{ + gluing_eval_point(image1, xyT1, phi); + gluing_eval_point(image2, xyT2, phi); +} + +/** + * @brief Compute a (2,2) isogeny in dimension 2 in the theta_model + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_8 a point in A[8] + * @param T2_8 a point in A[8] + * @param hadamard_bool_1 a boolean used for the last two steps of the chain + * @param hadamard_bool_2 a boolean used for the last two steps of the chain + * + * out : A -> B of kernel [4](T1_8,T2_8) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * verify: add extra sanity check to ensure our 8-torsion points are coherent with the isogeny + * + */ +static int +theta_isogeny_compute(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_8, + const theta_point_t *T2_8, + bool hadamard_bool_1, + bool hadamard_bool_2, + bool verify) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_8; + out->T2_8 = *T2_8; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_8); + to_squared_theta(&TT1, &TT1); + hadamard(&TT2, T2_8); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_8); + to_squared_theta(&TT2, T2_8); + } + + fp2_t t1, t2; + + // Test that our projective factor ABCDxzw is non zero, where + // TT1=(Ax, Bx, Cy, Dy), TT2=(Az, Bw, Cz, Dw) + // But ABCDxzw=0 can only happen if we had an unexpected splitting in + // the isogeny chain. + // In either case reject + // (this is not strictly necessary, we could just return (0:0:0:0)) + if (fp2_is_zero(&TT2.x) | fp2_is_zero(&TT2.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT2.t) | fp2_is_zero(&TT1.x) | + fp2_is_zero(&TT1.y)) + return 0; + + fp2_mul(&t1, &TT1.x, &TT2.y); + fp2_mul(&t2, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.null_point.x, &TT2.x, &t1); + fp2_mul(&out->codomain.null_point.y, &TT2.y, &t2); + fp2_mul(&out->codomain.null_point.z, &TT2.z, &t1); + fp2_mul(&out->codomain.null_point.t, &TT2.t, &t2); + fp2_t t3; + fp2_mul(&t3, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.x, &t3, &TT1.y); + fp2_mul(&out->precomputation.y, &t3, &TT1.x); + fp2_copy(&out->precomputation.z, &out->codomain.null_point.t); + fp2_copy(&out->precomputation.t, &out->codomain.null_point.z); + + // If T1_8 and T2_8 are our 8-torsion points, this ensures that the + // 4-torsion points 2T1_8 and 2T2_8 are isotropic. + if (verify) { + fp2_mul(&t1, &TT1.x, &out->precomputation.x); + fp2_mul(&t2, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT1.z, &out->precomputation.z); + fp2_mul(&t2, &TT1.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.y, &out->precomputation.y); + fp2_mul(&t2, &TT2.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + } + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } + return 1; +} + +/** + * @brief Compute a (2,2) isogeny when only the 4 torsion above the kernel is known and not the 8 + * torsion + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_4 a point in A[4] + * @param T2_4 a point in A[4] + * @param hadamard_bool_1 a boolean + * @param hadamard_bool_2 a boolean + * + * out : A -> B of kernel [2](T1_4,T2_4) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_4(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_4, + const theta_point_t *T2_4, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_4; + out->T2_8 = *T2_4; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + // we will compute: + // TT1 = (xAB, _ , xCD, _) + // TT2 = (AA,BB,CC,DD) + + // fp2_t xA_inv,zA_inv,tB_inv; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_4); + to_squared_theta(&TT1, &TT1); + + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_4); + to_squared_theta(&TT2, &A->null_point); + } + + fp2_t sqaabb, sqaacc; + fp2_mul(&sqaabb, &TT2.x, &TT2.y); + fp2_mul(&sqaacc, &TT2.x, &TT2.z); + // No need to check the square roots, only used for signing. + // sqaabb = sqrt(AA*BB) + fp2_sqrt(&sqaabb); + // sqaacc = sqrt(AA*CC) + fp2_sqrt(&sqaacc); + + // we compute out->codomain.null_point = (xAB * sqaacc * AA, xAB *sqaabb *sqaacc, xCD*sqaabb * + // AA) out->precomputation = (xAB * BB * CC *DD , sqaabb * CC * DD * xAB , sqaacc * BB* DD * xAB + // , xCD * sqaabb *sqaacc * BB) + + fp2_mul(&out->codomain.null_point.y, &sqaabb, &sqaacc); + fp2_mul(&out->precomputation.t, &out->codomain.null_point.y, &TT1.z); + fp2_mul(&out->codomain.null_point.y, &out->codomain.null_point.y, + &TT1.x); // done for out->codomain.null_point.y + + fp2_mul(&out->codomain.null_point.t, &TT1.z, &sqaabb); + fp2_mul(&out->codomain.null_point.t, &out->codomain.null_point.t, + &TT2.x); // done for out->codomain.null_point.t + + fp2_mul(&out->codomain.null_point.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.null_point.z, &out->codomain.null_point.x, + &TT2.z); // done for out->codomain.null_point.z + fp2_mul(&out->codomain.null_point.x, &out->codomain.null_point.x, + &sqaacc); // done for out->codomain.null_point.x + + fp2_mul(&out->precomputation.x, &TT1.x, &TT2.t); + fp2_mul(&out->precomputation.z, &out->precomputation.x, &TT2.y); + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.z); + fp2_mul(&out->precomputation.y, &out->precomputation.x, &sqaabb); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &out->precomputation.z, &sqaacc); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +/** + * @brief Compute a (2,2) isogeny when only the kernel is known and not the 8 or 4 torsion above + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_2 a point in A[2] + * @param T2_2 a point in A[2] + * @param hadamard_bool_1 a boolean + * @param boo2 a boolean + * + * out : A -> B of kernel (T1_2,T2_2) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_2(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_2, + const theta_point_t *T2_2, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_2; + out->T2_8 = *T2_2; + out->codomain.precomputation = false; + + theta_point_t TT2; + // we will compute: + // TT2 = (AA,BB,CC,DD) + + if (hadamard_bool_1) { + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT2, &A->null_point); + } + + // we compute out->codomain.null_point = (AA,sqaabb, sqaacc, sqaadd) + // out->precomputation = ( BB * CC *DD , sqaabb * CC * DD , sqaacc * BB* DD , sqaadd * BB * CC) + fp2_copy(&out->codomain.null_point.x, &TT2.x); + fp2_mul(&out->codomain.null_point.y, &TT2.x, &TT2.y); + fp2_mul(&out->codomain.null_point.z, &TT2.x, &TT2.z); + fp2_mul(&out->codomain.null_point.t, &TT2.x, &TT2.t); + // No need to check the square roots, only used for signing. + fp2_sqrt(&out->codomain.null_point.y); + fp2_sqrt(&out->codomain.null_point.z); + fp2_sqrt(&out->codomain.null_point.t); + + fp2_mul(&out->precomputation.x, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.y, + &out->precomputation.x, + &out->codomain.null_point.y); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &TT2.t, &out->codomain.null_point.z); + fp2_mul(&out->precomputation.z, &out->precomputation.z, &TT2.y); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &TT2.z, &out->codomain.null_point.t); + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +static void +theta_isogeny_eval(theta_point_t *out, const theta_isogeny_t *phi, const theta_point_t *P) +{ + if (phi->hadamard_bool_1) { + hadamard(out, P); + to_squared_theta(out, out); + } else { + to_squared_theta(out, P); + } + fp2_mul(&out->x, &out->x, &phi->precomputation.x); + fp2_mul(&out->y, &out->y, &phi->precomputation.y); + fp2_mul(&out->z, &out->z, &phi->precomputation.z); + fp2_mul(&out->t, &out->t, &phi->precomputation.t); + + if (phi->hadamard_bool_2) { + hadamard(out, out); + } +} + +#if defined(ENABLE_SIGN) +// Sample a random secret index in [0, 5] to select one of the 6 normalisation +// matrices for the normalisation of the output of the (2,2)-chain during +// splitting +static unsigned char +sample_random_index(void) +{ + // To avoid bias in reduction we should only consider integers smaller + // than 2^32 which are a multiple of 6, so we only reduce bytes with a + // value in [0, 4294967292-1]. + // We have 4294967292/2^32 = ~99.9999999% chance that the first try is "good". + unsigned char seed_arr[4]; + uint32_t seed; + + do { + randombytes(seed_arr, 4); + seed = (seed_arr[0] | (seed_arr[1] << 8) | (seed_arr[2] << 16) | (seed_arr[3] << 24)); + } while (seed >= 4294967292U); + + uint32_t secret_index = seed - (((uint64_t)seed * 2863311531U) >> 34) * 6; + assert(secret_index == seed % 6); // ensure the constant time trick above works + return (unsigned char)secret_index; +} +#endif + +static bool +splitting_compute(theta_splitting_t *out, const theta_structure_t *A, int zero_index, bool randomize) + +{ + // init + uint32_t ctl; + uint32_t count = 0; + fp2_t U_cst, t1, t2; + + memset(&out->M, 0, sizeof(basis_change_matrix_t)); + + // enumerate through all indices + for (int i = 0; i < 10; i++) { + fp2_set_zero(&U_cst); + for (int t = 0; t < 4; t++) { + // Iterate through the null point + choose_index_theta_point(&t2, t, &A->null_point); + choose_index_theta_point(&t1, t ^ EVEN_INDEX[i][1], &A->null_point); + + // Compute t1 * t2 + fp2_mul(&t1, &t1, &t2); + // If CHI_EVAL(i,t) is +1 we want ctl to be 0 and + // If CHI_EVAL(i,t) is -1 we want ctl to be 0xFF..FF + ctl = (uint32_t)(CHI_EVAL[EVEN_INDEX[i][0]][t] >> 1); + assert(ctl == 0 || ctl == 0xffffffff); + + fp2_neg(&t2, &t1); + fp2_select(&t1, &t1, &t2, ctl); + + // Then we compute U_cst ± (t1 * t2) + fp2_add(&U_cst, &U_cst, &t1); + } + + // If U_cst is 0 then update the splitting matrix + ctl = fp2_is_zero(&U_cst); + count -= ctl; + select_base_change_matrix(&out->M, &out->M, &SPLITTING_TRANSFORMS[i], ctl); + if (zero_index != -1 && i == zero_index && + !ctl) { // extra checks if we know exactly where the 0 index should be + return 0; + } + } + +#if defined(ENABLE_SIGN) + // Pick a random normalization matrix + if (randomize) { + unsigned char secret_index = sample_random_index(); + basis_change_matrix_t Mrandom; + + set_base_change_matrix_from_precomp(&Mrandom, &NORMALIZATION_TRANSFORMS[0]); + + // Use a constant time selection to pick the index we want + for (unsigned char i = 1; i < 6; i++) { + // When i == secret_index, mask == 0 and 0xFF..FF otherwise + int32_t mask = i - secret_index; + mask = (mask | -mask) >> 31; + select_base_change_matrix(&Mrandom, &Mrandom, &NORMALIZATION_TRANSFORMS[i], ~mask); + } + base_change_matrix_multiplication(&out->M, &Mrandom, &out->M); + } +#else + assert(!randomize); +#endif + + // apply the isomorphism to ensure the null point is compatible with splitting + apply_isomorphism(&out->B.null_point, &out->M, &A->null_point); + + // splitting was successful only if exactly one zero was identified + return count == 1; +} + +static int +theta_product_structure_to_elliptic_product(theta_couple_curve_t *E12, theta_structure_t *A) +{ + fp2_t xx, yy; + + // This should be true from our computations in splitting_compute + // but still check this for sanity + if (!is_product_theta_point(&A->null_point)) + return 0; + + ec_curve_init(&(E12->E1)); + ec_curve_init(&(E12->E2)); + + // A valid elliptic theta null point has no zero coordinate + if (fp2_is_zero(&A->null_point.x) | fp2_is_zero(&A->null_point.y) | fp2_is_zero(&A->null_point.z)) + return 0; + + // xx = x², yy = y² + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.y); + // xx = x^4, yy = y^4 + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A2 = -2(x^4+y^4)/(x^4-y^4) + fp2_add(&E12->E2.A, &xx, &yy); + fp2_sub(&E12->E2.C, &xx, &yy); + fp2_add(&E12->E2.A, &E12->E2.A, &E12->E2.A); + fp2_neg(&E12->E2.A, &E12->E2.A); + + // same with x,z + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.z); + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A1 = -2(x^4+z^4)/(x^4-z^4) + fp2_add(&E12->E1.A, &xx, &yy); + fp2_sub(&E12->E1.C, &xx, &yy); + fp2_add(&E12->E1.A, &E12->E1.A, &E12->E1.A); + fp2_neg(&E12->E1.A, &E12->E1.A); + + if (fp2_is_zero(&E12->E1.C) | fp2_is_zero(&E12->E2.C)) + return 0; + + return 1; +} + +static int +theta_point_to_montgomery_point(theta_couple_point_t *P12, const theta_point_t *P, const theta_structure_t *A) +{ + fp2_t temp; + const fp2_t *x, *z; + + if (!is_product_theta_point(P)) + return 0; + + x = &P->x; + z = &P->y; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->z; + z = &P->t; + } + if (fp2_is_zero(x) & fp2_is_zero(z)) { + return 0; // at this point P=(0:0:0:0) so is invalid + } + // P2.X = A.null_point.y * P.x + A.null_point.x * P.y + // P2.Z = - A.null_point.y * P.x + A.null_point.x * P.y + fp2_mul(&P12->P2.x, &A->null_point.y, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P2.z, &temp, &P12->P2.x); + fp2_add(&P12->P2.x, &P12->P2.x, &temp); + + x = &P->x; + z = &P->z; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->y; + z = &P->t; + } + // P1.X = A.null_point.z * P.x + A.null_point.x * P.z + // P1.Z = -A.null_point.z * P.x + A.null_point.x * P.z + fp2_mul(&P12->P1.x, &A->null_point.z, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P1.z, &temp, &P12->P1.x); + fp2_add(&P12->P1.x, &P12->P1.x, &temp); + return 1; +} + +static int +_theta_chain_compute_impl(unsigned n, + theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + bool verify, + bool randomize) +{ + theta_structure_t theta; + + // lift the basis + theta_couple_jac_point_t xyT1, xyT2; + + ec_basis_t bas1 = { .P = ker->T1.P1, .Q = ker->T2.P1, .PmQ = ker->T1m2.P1 }; + ec_basis_t bas2 = { .P = ker->T1.P2, .Q = ker->T2.P2, .PmQ = ker->T1m2.P2 }; + if (!lift_basis(&xyT1.P1, &xyT2.P1, &bas1, &E12->E1)) + return 0; + if (!lift_basis(&xyT1.P2, &xyT2.P2, &bas2, &E12->E2)) + return 0; + + const unsigned extra = HD_extra_torsion * extra_torsion; + +#ifndef NDEBUG + assert(extra == 0 || extra == 2); // only cases implemented + if (!test_point_order_twof(&bas2.P, &E12->E2, n + extra)) + debug_print("bas2.P does not have correct order"); + + if (!test_jac_order_twof(&xyT2.P2, &E12->E2, n + extra)) + debug_print("xyT2.P2 does not have correct order"); +#endif + + theta_point_t pts[numP ? numP : 1]; + + int space = 1; + for (unsigned i = 1; i < n; i *= 2) + ++space; + + uint16_t todo[space]; + todo[0] = n - 2 + extra; + + int current = 0; + + // kernel points for the gluing isogeny + theta_couple_jac_point_t jacQ1[space], jacQ2[space]; + jacQ1[0] = xyT1; + jacQ2[0] = xyT2; + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + // the gluing isogeny is quite a bit more expensive than the others, + // so we adjust the usual splitting rule here a little bit: towards + // the end of the doubling chain it will be cheaper to recompute the + // doublings after evaluation than to push the intermediate points. + const unsigned num_dbls = todo[current - 1] >= 16 ? todo[current - 1] / 2 : todo[current - 1] - 1; + assert(num_dbls && num_dbls < todo[current - 1]); + double_couple_jac_point_iter(&jacQ1[current], num_dbls, &jacQ1[current - 1], E12); + double_couple_jac_point_iter(&jacQ2[current], num_dbls, &jacQ2[current - 1], E12); + todo[current] = todo[current - 1] - num_dbls; + } + + // kernel points for the remaining isogeny steps + theta_point_t thetaQ1[space], thetaQ2[space]; + + // the gluing step + theta_gluing_t first_step; + { + assert(todo[current] == 1); + + // compute the gluing isogeny + if (!gluing_compute(&first_step, E12, &jacQ1[current], &jacQ2[current], verify)) + return 0; + + // evaluate + for (unsigned j = 0; j < numP; ++j) { + assert(ec_is_zero(&P12[j].P1) || ec_is_zero(&P12[j].P2)); + if (!gluing_eval_point_special_case(&pts[j], &P12[j], &first_step)) + return 0; + } + + // push kernel points through gluing isogeny + for (int j = 0; j < current; ++j) { + gluing_eval_basis(&thetaQ1[j], &thetaQ2[j], &jacQ1[j], &jacQ2[j], &first_step); + --todo[j]; + } + + --current; + } + + // set-up the theta_structure for the first codomain + theta.null_point = first_step.codomain; + theta.precomputation = 0; + theta_precomputation(&theta); + + theta_isogeny_t step; + + // and now we do the remaining steps + for (unsigned i = 1; current >= 0 && todo[current]; ++i) { + assert(current < space); + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + const unsigned num_dbls = todo[current - 1] / 2; + assert(num_dbls && num_dbls < todo[current - 1]); + double_iter(&thetaQ1[current], &theta, &thetaQ1[current - 1], num_dbls); + double_iter(&thetaQ2[current], &theta, &thetaQ2[current - 1], num_dbls); + todo[current] = todo[current - 1] - num_dbls; + } + + // computing the next step + int ret; + if (i == n - 2) // penultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 0, verify); + else if (i == n - 1) // ultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 1, 0, false); + else + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 1, verify); + if (!ret) + return 0; + + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + + // updating the codomain + theta = step.codomain; + + // pushing the kernel + assert(todo[current] == 1); + for (int j = 0; j < current; ++j) { + theta_isogeny_eval(&thetaQ1[j], &step, &thetaQ1[j]); + theta_isogeny_eval(&thetaQ2[j], &step, &thetaQ2[j]); + assert(todo[j]); + --todo[j]; + } + + --current; + } + + assert(current == -1); + + if (!extra_torsion) { + if (n >= 3) { + // in the last step we've skipped pushing the kernel since current was == 0, let's do it now + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + } + + // penultimate step + theta_isogeny_compute_4(&step, &theta, &thetaQ1[0], &thetaQ2[0], 0, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + + // ultimate step + theta_isogeny_compute_2(&step, &theta, &thetaQ1[0], &thetaQ2[0], 1, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + } + + // final splitting step + theta_splitting_t last_step; + + bool is_split = splitting_compute(&last_step, &theta, extra_torsion ? 8 : -1, randomize); + + if (!is_split) { + debug_print("kernel did not generate an isogeny between elliptic products"); + return 0; + } + + if (!theta_product_structure_to_elliptic_product(E34, &last_step.B)) + return 0; + + // evaluate + for (size_t j = 0; j < numP; ++j) { + apply_isomorphism(&pts[j], &last_step.M, &pts[j]); + if (!theta_point_to_montgomery_point(&P12[j], &pts[j], &last_step.B)) + return 0; + } + + return 1; +} + +int +theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, false); +} + +// Like theta_chain_compute_and_eval, adding extra verification checks; +// used in the signature verification +int +theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, true, false); +} + +int +theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.h new file mode 100644 index 0000000000..d151811fe7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.h @@ -0,0 +1,18 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta isogeny header + */ + +#ifndef THETA_ISOGENY_H +#define THETA_ISOGENY_H + +#include +#include +#include +#include "theta_structure.h" +#include +#include + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_structure.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_structure.c new file mode 100644 index 0000000000..ce97ac61a8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_structure.c @@ -0,0 +1,78 @@ +#include "theta_structure.h" +#include + +void +theta_precomputation(theta_structure_t *A) +{ + + if (A->precomputation) { + return; + } + + theta_point_t A_dual; + to_squared_theta(&A_dual, &A->null_point); + + fp2_t t1, t2; + fp2_mul(&t1, &A_dual.x, &A_dual.y); + fp2_mul(&t2, &A_dual.z, &A_dual.t); + fp2_mul(&A->XYZ0, &t1, &A_dual.z); + fp2_mul(&A->XYT0, &t1, &A_dual.t); + fp2_mul(&A->YZT0, &t2, &A_dual.y); + fp2_mul(&A->XZT0, &t2, &A_dual.x); + + fp2_mul(&t1, &A->null_point.x, &A->null_point.y); + fp2_mul(&t2, &A->null_point.z, &A->null_point.t); + fp2_mul(&A->xyz0, &t1, &A->null_point.z); + fp2_mul(&A->xyt0, &t1, &A->null_point.t); + fp2_mul(&A->yzt0, &t2, &A->null_point.y); + fp2_mul(&A->xzt0, &t2, &A->null_point.x); + + A->precomputation = true; +} + +void +double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in) +{ + to_squared_theta(out, in); + fp2_sqr(&out->x, &out->x); + fp2_sqr(&out->y, &out->y); + fp2_sqr(&out->z, &out->z); + fp2_sqr(&out->t, &out->t); + + if (!A->precomputation) { + theta_precomputation(A); + } + fp2_mul(&out->x, &out->x, &A->YZT0); + fp2_mul(&out->y, &out->y, &A->XZT0); + fp2_mul(&out->z, &out->z, &A->XYT0); + fp2_mul(&out->t, &out->t, &A->XYZ0); + + hadamard(out, out); + + fp2_mul(&out->x, &out->x, &A->yzt0); + fp2_mul(&out->y, &out->y, &A->xzt0); + fp2_mul(&out->z, &out->z, &A->xyt0); + fp2_mul(&out->t, &out->t, &A->xyz0); +} + +void +double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp) +{ + if (exp == 0) { + *out = *in; + } else { + double_point(out, A, in); + for (int i = 1; i < exp; i++) { + double_point(out, A, out); + } + } +} + +uint32_t +is_product_theta_point(const theta_point_t *P) +{ + fp2_t t1, t2; + fp2_mul(&t1, &P->x, &P->t); + fp2_mul(&t2, &P->y, &P->z); + return fp2_is_equal(&t1, &t2); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_structure.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_structure.h new file mode 100644 index 0000000000..fc630b750a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_structure.h @@ -0,0 +1,135 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta structure header + */ + +#ifndef THETA_STRUCTURE_H +#define THETA_STRUCTURE_H + +#include +#include +#include + +/** @internal + * @ingroup hd_module + * @defgroup hd_theta Functions for theta structures + * @{ + */ + +/** + * @brief Perform the hadamard transform on a theta point + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x+y+z+t, x-y+z-t, x+y-z-t, x-y-z+t) + * + */ +static inline void +hadamard(theta_point_t *out, const theta_point_t *in) +{ + fp2_t t1, t2, t3, t4; + + // t1 = x + y + fp2_add(&t1, &in->x, &in->y); + // t2 = x - y + fp2_sub(&t2, &in->x, &in->y); + // t3 = z + t + fp2_add(&t3, &in->z, &in->t); + // t4 = z - t + fp2_sub(&t4, &in->z, &in->t); + + fp2_add(&out->x, &t1, &t3); + fp2_add(&out->y, &t2, &t4); + fp2_sub(&out->z, &t1, &t3); + fp2_sub(&out->t, &t2, &t4); +} + +/** + * @brief Square the coordinates of a theta point + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2, y^2, z^2, t^2) + * + */ +static inline void +pointwise_square(theta_point_t *out, const theta_point_t *in) +{ + fp2_sqr(&out->x, &in->x); + fp2_sqr(&out->y, &in->y); + fp2_sqr(&out->z, &in->z); + fp2_sqr(&out->t, &in->t); +} + +/** + * @brief Square the coordinates and then perform the hadamard transform + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2+y^2+z^2+t^2, x^2-y^2+z^2-t^2, x^2+y^2-z^2-t^2, x^2-y^2-z^2+t^2) + * + */ +static inline void +to_squared_theta(theta_point_t *out, const theta_point_t *in) +{ + pointwise_square(out, in); + hadamard(out, out); +} + +/** + * @brief Perform the theta structure precomputation + * + * @param A Output: the theta_structure + * + * if A.null_point = (x,y,z,t) + * if (xx,yy,zz,tt) = to_squared_theta(A.null_point) + * Computes y0,z0,t0,Y0,Z0,T0 = x/y,x/z,x/t,XX/YY,XX/ZZ,XX/TT + * + */ +void theta_precomputation(theta_structure_t *A); + +/** + * @brief Compute the double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * in = (x,y,z,t) + * out = [2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in); + +/** + * @brief Compute the iterated double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * @param exp the exponent + * in = (x,y,z,t) + * out = [2^2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp); + +/* + * @brief Check if a theta point is a product theta point + * + * @param P a theta point + * @return 0xFFFFFFFF if true, zero otherwise + */ +uint32_t is_product_theta_point(const theta_point_t *P); + +// end hd_theta +/** + * @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.c new file mode 100644 index 0000000000..242ea08fe2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.c @@ -0,0 +1,75 @@ +#include +#include + +static clock_t global_timer; + +clock_t +tic(void) +{ + global_timer = clock(); + return global_timer; +} + +float +tac(void) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); + return ms; +} + +float +TAC(const char *str) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); +#ifndef NDEBUG + printf("%s [%d ms]\n", str, (int)ms); +#endif + return ms; +} + +float +toc(const clock_t t) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + return ms; +} + +float +TOC(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,clock()-t); + // return (float) (clock()-t); +} + +float +TOC_clock(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, clock() - t); + return (float)(clock() - t); +} + +clock_t +dclock(const clock_t t) +{ + return (clock() - t); +} + +float +clock_to_time(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,t); + // return (float) (t); +} + +float +clock_print(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, t); + return (float)(t); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.h new file mode 100644 index 0000000000..5a6a505fc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.h @@ -0,0 +1,49 @@ + +#ifndef TOOLS_H +#define TOOLS_H + +#include + +// Debug printing: +// https://stackoverflow.com/questions/1644868/define-macro-for-debug-printing-in-c +#ifndef NDEBUG +#define DEBUG_PRINT 1 +#else +#define DEBUG_PRINT 0 +#endif + +#ifndef __FILE_NAME__ +#define __FILE_NAME__ "NA" +#endif + +#ifndef __LINE__ +#define __LINE__ 0 +#endif + +#ifndef __func__ +#define __func__ "NA" +#endif + +#define debug_print(fmt) \ + do { \ + if (DEBUG_PRINT) \ + printf("warning: %s, file %s, line %d, function %s().\n", \ + fmt, \ + __FILE_NAME__, \ + __LINE__, \ + __func__); \ + } while (0) + + +clock_t tic(void); +float tac(void); /* time in ms since last tic */ +float TAC(const char *str); /* same, but prints it with label 'str' */ +float toc(const clock_t t); /* time in ms since t */ +float TOC(const clock_t t, const char *str); /* same, but prints it with label 'str' */ +float TOC_clock(const clock_t t, const char *str); + +clock_t dclock(const clock_t t); // return the clock cycle diff between now and t +float clock_to_time(const clock_t t, + const char *str); // convert the number of clock cycles t to time +float clock_print(const clock_t t, const char *str); +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.c new file mode 100644 index 0000000000..1a6c203035 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.c @@ -0,0 +1,43 @@ +#include +#include +#include +const ibz_t TWO_TO_SECURITY_BITS = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t TORSION_PLUS_2POWER = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x100000000000000}}} +#endif +; +const ibz_t SEC_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t COM_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.h new file mode 100644 index 0000000000..f5e4e9fb66 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.h @@ -0,0 +1,6 @@ +#include +#define TORSION_2POWER_BYTES 48 +extern const ibz_t TWO_TO_SECURITY_BITS; +extern const ibz_t TORSION_PLUS_2POWER; +extern const ibz_t SEC_DEGREE; +extern const ibz_t COM_DEGREE; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tutil.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tutil.h new file mode 100644 index 0000000000..59f162093e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tutil.h @@ -0,0 +1,36 @@ +#ifndef TUTIL_H +#define TUTIL_H + +#include +#include + +#if defined(__GNUC__) || defined(__clang__) +#define BSWAP16(i) __builtin_bswap16((i)) +#define BSWAP32(i) __builtin_bswap32((i)) +#define BSWAP64(i) __builtin_bswap64((i)) +#define UNUSED __attribute__((unused)) +#else +#define BSWAP16(i) ((((i) >> 8) & 0xff) | (((i) & 0xff00) << 8)) +#define BSWAP32(i) \ + ((((i) >> 24) & 0xff) | (((i) >> 8) & 0xff00) | (((i) & 0xff00) << 8) | ((i) << 24)) +#define BSWAP64(i) ((BSWAP32((i) >> 32) & 0xffffffff) | (BSWAP32(i) << 32) +#define UNUSED +#endif + +#if defined(RADIX_64) +#define digit_t uint64_t +#define sdigit_t int64_t +#define RADIX 64 +#define LOG2RADIX 6 +#define BSWAP_DIGIT(i) BSWAP64(i) +#elif defined(RADIX_32) +#define digit_t uint32_t +#define sdigit_t int32_t +#define RADIX 32 +#define LOG2RADIX 5 +#define BSWAP_DIGIT(i) BSWAP32(i) +#else +#error "Radix must be 32bit or 64 bit" +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/verification.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/verification.h new file mode 100644 index 0000000000..af674691da --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/verification.h @@ -0,0 +1,123 @@ +/** @file + * + * @brief The verification protocol + */ + +#ifndef VERIFICATION_H +#define VERIFICATION_H + +#include +#include + +/** @defgroup verification SQIsignHD verification protocol + * @{ + */ + +/** @defgroup verification_t Types for SQIsignHD verification protocol + * @{ + */ + +typedef digit_t scalar_t[NWORDS_ORDER]; +typedef scalar_t scalar_mtx_2x2_t[2][2]; + +/** @brief Type for the signature + * + * @typedef signature_t + * + * @struct signature + * + */ +typedef struct signature +{ + fp2_t E_aux_A; // the Montgomery A-coefficient for the auxiliary curve + uint8_t backtracking; + uint8_t two_resp_length; + scalar_mtx_2x2_t mat_Bchall_can_to_B_chall; // the matrix of the desired basis + scalar_t chall_coeff; + uint8_t hint_aux; + uint8_t hint_chall; +} signature_t; + +/** @brief Type for the public keys + * + * @typedef public_key_t + * + * @struct public_key + * + */ +typedef struct public_key +{ + ec_curve_t curve; // the normalized A-coefficient of the Montgomery curve + uint8_t hint_pk; +} public_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void public_key_init(public_key_t *pk); +void public_key_finalize(public_key_t *pk); + +void hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length); + +/** + * @brief Verification + * + * @param sig signature + * @param pk public key + * @param m message + * @param l size + * @returns 1 if the signature verifies, 0 otherwise + */ +int protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a signature as a byte array + * + * @param enc : Byte array to encode the signature in + * @param sig : Signature to encode + */ +void signature_to_bytes(unsigned char *enc, const signature_t *sig); + +/** + * @brief Decodes a signature from a byte array + * + * @param sig : Structure to decode the signature in + * @param enc : Byte array to decode + */ +void signature_from_bytes(signature_t *sig, const unsigned char *enc); + +/** + * @brief Encodes a public key as a byte array + * + * @param enc : Byte array to encode the public key in + * @param pk : Public key to encode + */ +unsigned char *public_key_to_bytes(unsigned char *enc, const public_key_t *pk); + +/** + * @brief Decodes a public key from a byte array + * + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +const unsigned char *public_key_from_bytes(public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/verify.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/verify.c new file mode 100644 index 0000000000..b5f78ad398 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/verify.c @@ -0,0 +1,309 @@ +#include +#include +#include +#include +#include + +// Check that the basis change matrix elements are canonical +// representatives modulo 2^(SQIsign_response_length + 2). +static int +check_canonical_basis_change_matrix(const signature_t *sig) +{ + // This works as long as all values in sig->mat_Bchall_can_to_B_chall are + // positive integers. + int ret = 1; + scalar_t aux; + + memset(aux, 0, NWORDS_ORDER * sizeof(digit_t)); + aux[0] = 0x1; + multiple_mp_shiftl(aux, SQIsign_response_length + HD_extra_torsion - (int)sig->backtracking, NWORDS_ORDER); + + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + if (mp_compare(aux, sig->mat_Bchall_can_to_B_chall[i][j], NWORDS_ORDER) <= 0) { + ret = 0; + } + } + } + + return ret; +} + +// Compute the 2^n isogeny from the signature with kernel +// P + [chall_coeff]Q and store the codomain in E_chall +static int +compute_challenge_verify(ec_curve_t *E_chall, const signature_t *sig, const ec_curve_t *Epk, const uint8_t hint_pk) +{ + ec_basis_t bas_EA; + ec_isog_even_t phi_chall; + + // Set domain and length of 2^n isogeny + copy_curve(&phi_chall.curve, Epk); + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + + // Compute the basis from the supplied hint + if (!ec_curve_to_basis_2f_from_hint(&bas_EA, &phi_chall.curve, TORSION_EVEN_POWER, hint_pk)) // canonical + return 0; + + // recovering the exact challenge + { + if (!ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_EA.P, &bas_EA.Q, &bas_EA.PmQ, &phi_chall.curve)) { + return 0; + }; + } + + // Double the kernel until is has the correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &phi_chall.curve); + + // Compute the codomain + copy_curve(E_chall, &phi_chall.curve); + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + return 1; +} + +// same as matrix_application_even_basis() in id2iso.c, with some modifications: +// - this version works with a matrix of scalars (not ibz_t). +// - reduction modulo 2^f of matrix elements is removed here, because it is +// assumed that the elements are already cannonical representatives modulo +// 2^f; this is ensured by calling check_canonical_basis_change_matrix() at +// the beginning of protocols_verify(). +static int +matrix_scalar_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, scalar_mtx_2x2_t *mat, int f) +{ + scalar_t scalar0, scalar1; + memset(scalar0, 0, NWORDS_ORDER * sizeof(digit_t)); + memset(scalar1, 0, NWORDS_ORDER * sizeof(digit_t)); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + if (!ec_biscalar_mul(&bas->P, (*mat)[0][0], (*mat)[1][0], f, &tmp_bas, E)) + return 0; + // second basis element S = [c]P + [d]Q + if (!ec_biscalar_mul(&bas->Q, (*mat)[0][1], (*mat)[1][1], f, &tmp_bas, E)) + return 0; + // Their difference R - S = [a - c]P + [b - d]Q + mp_sub(scalar0, (*mat)[0][0], (*mat)[0][1], NWORDS_ORDER); + mp_mod_2exp(scalar0, f, NWORDS_ORDER); + mp_sub(scalar1, (*mat)[1][0], (*mat)[1][1], NWORDS_ORDER); + mp_mod_2exp(scalar1, f, NWORDS_ORDER); + return ec_biscalar_mul(&bas->PmQ, scalar0, scalar1, f, &tmp_bas, E); +} + +// Compute the bases for the challenge and auxillary curve from +// the canonical bases. Challenge basis is reconstructed from the +// compressed scalars within the challenge. +static int +challenge_and_aux_basis_verify(ec_basis_t *B_chall_can, + ec_basis_t *B_aux_can, + ec_curve_t *E_chall, + ec_curve_t *E_aux, + signature_t *sig, + const int pow_dim2_deg_resp) +{ + + // recovering the canonical basis as TORSION_EVEN_POWER for consistency with signing + if (!ec_curve_to_basis_2f_from_hint(B_chall_can, E_chall, TORSION_EVEN_POWER, sig->hint_chall)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_chall_can, + TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion - sig->two_resp_length, + B_chall_can, + E_chall); + + if (!ec_curve_to_basis_2f_from_hint(B_aux_can, E_aux, TORSION_EVEN_POWER, sig->hint_aux)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_aux_can, TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion, B_aux_can, E_aux); + +#ifndef NDEBUG + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp + sig->two_resp_length)) + debug_print("canonical basis has wrong order, expect something to fail"); +#endif + + // applying the change matrix on the basis of E_chall + return matrix_scalar_application_even_basis(B_chall_can, + E_chall, + &sig->mat_Bchall_can_to_B_chall, + pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length); +} + +// When two_resp_length is non-zero, we must compute a small 2^n-isogeny +// updating E_chall as the codomain as well as push the basis on E_chall +// through this isogeny +static int +two_response_isogeny_verify(ec_curve_t *E_chall, ec_basis_t *B_chall_can, const signature_t *sig, int pow_dim2_deg_resp) +{ + ec_point_t ker, points[3]; + + // choosing the right point for the small two_isogenies + if (mp_is_even(sig->mat_Bchall_can_to_B_chall[0][0], NWORDS_ORDER) && + mp_is_even(sig->mat_Bchall_can_to_B_chall[1][0], NWORDS_ORDER)) { + copy_point(&ker, &B_chall_can->Q); + } else { + copy_point(&ker, &B_chall_can->P); + } + + copy_point(&points[0], &B_chall_can->P); + copy_point(&points[1], &B_chall_can->Q); + copy_point(&points[2], &B_chall_can->PmQ); + + ec_dbl_iter(&ker, pow_dim2_deg_resp + HD_extra_torsion, &ker, E_chall); + +#ifndef NDEBUG + if (!test_point_order_twof(&ker, E_chall, sig->two_resp_length)) + debug_print("kernel does not have order 2^(two_resp_length"); +#endif + + if (ec_eval_small_chain(E_chall, &ker, sig->two_resp_length, points, 3, false)) { + return 0; + } + +#ifndef NDEBUG + if (!test_point_order_twof(&points[0], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[0] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[1], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[1] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[2], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[2] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + copy_point(&B_chall_can->P, &points[0]); + copy_point(&B_chall_can->Q, &points[1]); + copy_point(&B_chall_can->PmQ, &points[2]); + return 1; +} + +// The commitment curve can be recovered from the codomain of the 2D +// isogeny built from the bases computed during verification. +static int +compute_commitment_curve_verify(ec_curve_t *E_com, + const ec_basis_t *B_chall_can, + const ec_basis_t *B_aux_can, + const ec_curve_t *E_chall, + const ec_curve_t *E_aux, + int pow_dim2_deg_resp) + +{ +#ifndef NDEBUG + // Check all the points are the correct order + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_chall_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + + if (!test_basis_order_twof(B_aux_can, E_aux, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_aux_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + // now compute the dim2 isogeny from Echall x E_aux -> E_com x E_aux' + // of kernel B_chall_can x B_aux_can + + // first we set-up the kernel + theta_couple_curve_t EchallxEaux; + copy_curve(&EchallxEaux.E1, E_chall); + copy_curve(&EchallxEaux.E2, E_aux); + + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, B_chall_can, B_aux_can); + + // computing the isogeny + theta_couple_curve_t codomain; + int codomain_splits; + ec_curve_init(&codomain.E1); + ec_curve_init(&codomain.E2); + // handling the special case where we don't need to perform any dim2 computation + if (pow_dim2_deg_resp == 0) { + codomain_splits = 1; + copy_curve(&codomain.E1, &EchallxEaux.E1); + copy_curve(&codomain.E2, &EchallxEaux.E2); + // We still need to check that E_chall is supersingular + // This assumes that HD_extra_torsion == 2 + if (!ec_is_basis_four_torsion(B_chall_can, E_chall)) { + return 0; + } + } else { + codomain_splits = theta_chain_compute_and_eval_verify( + pow_dim2_deg_resp, &EchallxEaux, &dim_two_ker, true, &codomain, NULL, 0); + } + + // computing the commitment curve + // its always the first one because of our (2^n,2^n)-isogeny formulae + copy_curve(E_com, &codomain.E1); + + return codomain_splits; +} + +// SQIsign verification +int +protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l) +{ + int verify; + + if (!check_canonical_basis_change_matrix(sig)) + return 0; + + // Computation of the length of the dim 2 2^n isogeny + int pow_dim2_deg_resp = SQIsign_response_length - (int)sig->two_resp_length - (int)sig->backtracking; + + // basic sanity test: checking that the response is not too long + if (pow_dim2_deg_resp < 0) + return 0; + // The dim 2 isogeny embeds a dim 1 isogeny of odd degree, so it can + // never be of length 2. + if (pow_dim2_deg_resp == 1) + return 0; + + // check the public curve is valid + if (!ec_curve_verify_A(&(pk->curve).A)) + return 0; + + // Set auxiliary curve from the A-coefficient within the signature + ec_curve_t E_aux; + if (!ec_curve_init_from_A(&E_aux, &sig->E_aux_A)) + return 0; // invalid curve + + // checking that we are given A-coefficients and no precomputation + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF && !pk->curve.is_A24_computed_and_normalized); + + // computation of the challenge + ec_curve_t E_chall; + if (!compute_challenge_verify(&E_chall, sig, &pk->curve, pk->hint_pk)) { + return 0; + } + + // Computation of the canonical bases for the challenge and aux curve + ec_basis_t B_chall_can, B_aux_can; + + if (!challenge_and_aux_basis_verify(&B_chall_can, &B_aux_can, &E_chall, &E_aux, sig, pow_dim2_deg_resp)) { + return 0; + } + + // When two_resp_length != 0 we need to compute a second, short 2^r-isogeny + if (sig->two_resp_length > 0) { + if (!two_response_isogeny_verify(&E_chall, &B_chall_can, sig, pow_dim2_deg_resp)) { + return 0; + } + } + + // We can recover the commitment curve with a 2D isogeny + // The supplied signature did not compute an isogeny between eliptic products + // and so definitely is an invalid signature. + ec_curve_t E_com; + if (!compute_commitment_curve_verify(&E_com, &B_chall_can, &B_aux_can, &E_chall, &E_aux, pow_dim2_deg_resp)) + return 0; + + scalar_t chk_chall; + + // recomputing the challenge vector + hash_to_challenge(&chk_chall, pk, &E_com, m, l); + + // performing the final check + verify = mp_compare(sig->chall_coeff, chk_chall, NWORDS_ORDER) == 0; + + return verify; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/xeval.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/xeval.c new file mode 100644 index 0000000000..7fc7170423 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/xeval.c @@ -0,0 +1,64 @@ +#include "isog.h" +#include "ec.h" +#include + +// ----------------------------------------------------------------------------------------- +// ----------------------------------------------------------------------------------------- + +// Degree-2 isogeny evaluation with kenerl generated by P != (0, 0) +void +xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1, t2; + for (int j = 0; j < lenQ; j++) { + fp2_add(&t0, &Q[j].x, &Q[j].z); + fp2_sub(&t1, &Q[j].x, &Q[j].z); + fp2_mul(&t2, &kps->K.x, &t1); + fp2_mul(&t1, &kps->K.z, &t0); + fp2_add(&t0, &t2, &t1); + fp2_sub(&t1, &t2, &t1); + fp2_mul(&R[j].x, &Q[j].x, &t0); + fp2_mul(&R[j].z, &Q[j].z, &t1); + } +} + +void +xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1; + for (int i = 0; i < lenQ; i++) { + fp2_mul(&t0, &Q[i].x, &Q[i].z); + fp2_mul(&t1, &kps->K.x, &Q[i].z); + fp2_add(&t1, &t1, &Q[i].x); + fp2_mul(&t1, &t1, &Q[i].x); + fp2_sqr(&R[i].x, &Q[i].z); + fp2_add(&R[i].x, &R[i].x, &t1); + fp2_mul(&R[i].z, &t0, &kps->K.z); + } +} + +// Degree-4 isogeny evaluation with kenerl generated by P such that [2]P != (0, 0) +void +xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps) +{ + const ec_point_t *K = kps->K; + + fp2_t t0, t1; + + for (int i = 0; i < lenQ; i++) { + fp2_add(&t0, &Q[i].x, &Q[i].z); + fp2_sub(&t1, &Q[i].x, &Q[i].z); + fp2_mul(&(R[i].x), &t0, &K[1].x); + fp2_mul(&(R[i].z), &t1, &K[2].x); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &K[0].x); + fp2_add(&t1, &(R[i].x), &(R[i].z)); + fp2_sub(&(R[i].z), &(R[i].x), &(R[i].z)); + fp2_sqr(&t1, &t1); + fp2_sqr(&(R[i].z), &(R[i].z)); + fp2_add(&(R[i].x), &t0, &t1); + fp2_sub(&t0, &t0, &(R[i].z)); + fp2_mul(&(R[i].x), &(R[i].x), &t1); + fp2_mul(&(R[i].z), &(R[i].z), &t0); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/xisog.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/xisog.c new file mode 100644 index 0000000000..7242d29433 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/xisog.c @@ -0,0 +1,61 @@ +#include "isog.h" +#include "ec.h" +#include + +// ------------------------------------------------------------------------- +// ------------------------------------------------------------------------- + +// Degree-2 isogeny with kernel generated by P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P) +{ + fp2_sqr(&B->x, &P.x); + fp2_sqr(&B->z, &P.z); + fp2_sub(&B->x, &B->z, &B->x); + fp2_add(&kps->K.x, &P.x, &P.z); + fp2_sub(&kps->K.z, &P.x, &P.z); +} + +void +xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24) +{ + // No need to check the square root, only used for signing. + fp2_t t0, four; + fp2_set_small(&four, 4); + fp2_add(&t0, &A24.x, &A24.x); + fp2_sub(&t0, &t0, &A24.z); + fp2_add(&t0, &t0, &t0); + fp2_inv(&A24.z); + fp2_mul(&t0, &t0, &A24.z); + fp2_copy(&kps->K.x, &t0); + fp2_add(&B24->x, &t0, &t0); + fp2_sqr(&t0, &t0); + fp2_sub(&t0, &t0, &four); + fp2_sqrt(&t0); + fp2_neg(&kps->K.z, &t0); + fp2_add(&B24->z, &t0, &t0); + fp2_add(&B24->x, &B24->x, &B24->z); + fp2_add(&B24->z, &B24->z, &B24->z); +} + +// Degree-4 isogeny with kernel generated by P such that [2]P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P) +{ + ec_point_t *K = kps->K; + + fp2_sqr(&K[0].x, &P.x); + fp2_sqr(&K[0].z, &P.z); + fp2_add(&K[1].x, &K[0].z, &K[0].x); + fp2_sub(&K[1].z, &K[0].z, &K[0].x); + fp2_mul(&B->x, &K[1].x, &K[1].z); + fp2_sqr(&B->z, &K[0].z); + + // Constants for xeval_4 + fp2_add(&K[2].x, &P.x, &P.z); + fp2_sub(&K[1].x, &P.x, &P.z); + fp2_add(&K[0].x, &K[0].z, &K[0].z); + fp2_add(&K[0].x, &K[0].x, &K[0].x); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/COPYING.LGPL new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/COPYING.LGPL @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/LICENSE b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/LICENSE new file mode 100644 index 0000000000..8f71f43fee --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/NOTICE b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/NOTICE new file mode 100644 index 0000000000..6eccf392fa --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/NOTICE @@ -0,0 +1,21 @@ +Copyright 2023-2025 the SQIsign team. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +The DPE Library is (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, +LORIA/INRIA, and licensed under the GNU Lesser General Public License, +version 3. You may obtain a copy of the License at + + https://www.gnu.org/licenses/lgpl-3.0.en.html + +or in the file COPYING.LGPL. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes.h new file mode 100644 index 0000000000..e35ec3705b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef AES_H +#define AES_H + +#include +#include + +void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); +#define AES_ECB_encrypt AES_256_ECB + +#ifdef ENABLE_AESNI +int AES_128_CTR_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +int AES_128_CTR_4R_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#define AES_128_CTR AES_128_CTR_NI +#else +int AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.c new file mode 100644 index 0000000000..dc778fc9b6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.c @@ -0,0 +1,258 @@ +/*************************************************************************** +* This implementation is a modified version of the code, +* written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#include "aes_ni.h" +#include + +#include +#include + +#define AESENC(m, key) _mm_aesenc_si128(m, key) +#define AESENCLAST(m, key) _mm_aesenclast_si128(m, key) +#define XOR(a, b) _mm_xor_si128(a, b) +#define ADD32(a, b) _mm_add_epi32(a, b) +#define SHUF8(a, mask) _mm_shuffle_epi8(a, mask) + +#define ZERO256 _mm256_zeroall + +#define BSWAP_MASK 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f + +#ifdef VAES256 +#define VAESENC(a, key) _mm256_aesenc_epi128(a, key) +#define VAESENCLAST(a, key) _mm256_aesenclast_epi128(a, key) +#define EXTRACT128(a, imm) _mm256_extracti128_si256(a, imm) +#define XOR256(a, b) _mm256_xor_si256(a,b) +#define ADD32_256(a, b) _mm256_add_epi32(a,b) +#define SHUF8_256(a, mask) _mm256_shuffle_epi8(a, mask) +#endif + +#ifdef VAES512 +#define VAESENC(a, key) _mm512_aesenc_epi128(a, key) +#define VAESENCLAST(a, key) _mm512_aesenclast_epi128(a, key) +#define EXTRACT128(a, imm) _mm512_extracti64x2_epi64(a, imm) +#define XOR512(a, b) _mm512_xor_si512(a,b) +#define ADD32_512(a, b) _mm512_add_epi32(a,b) +#define SHUF8_512(a, mask) _mm512_shuffle_epi8(a, mask) +#endif + +_INLINE_ __m128i load_m128i(IN const uint8_t *ctr) +{ + return _mm_set_epi8(ctr[0], ctr[1], ctr[2], ctr[3], + ctr[4], ctr[5], ctr[6], ctr[7], + ctr[8], ctr[9], ctr[10], ctr[11], + ctr[12], ctr[13], ctr[14], ctr[15]); +} + +_INLINE_ __m128i loadr_m128i(IN const uint8_t *ctr) +{ + return _mm_setr_epi8(ctr[0], ctr[1], ctr[2], ctr[3], + ctr[4], ctr[5], ctr[6], ctr[7], + ctr[8], ctr[9], ctr[10], ctr[11], + ctr[12], ctr[13], ctr[14], ctr[15]); +} + +void aes256_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const aes256_ks_t *ks) { + uint32_t i = 0; + __m128i block = loadr_m128i(pt); + + block = XOR(block, ks->keys[0]); + for (i = 1; i < AES256_ROUNDS; i++) { + block = AESENC(block, ks->keys[i]); + } + block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); + + _mm_storeu_si128((void*)ct, block); + + // Delete secrets from registers if any. + ZERO256(); +} + +void aes256_ctr_enc(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + __m128i ctr_block = load_m128i(ctr); + + const __m128i bswap_mask = _mm_set_epi32(BSWAP_MASK); + const __m128i one = _mm_set_epi32(0,0,0,1); + + __m128i block = SHUF8(ctr_block, bswap_mask); + + for (uint32_t bidx = 0; bidx < num_blocks; bidx++) + { + block = XOR(block, ks->keys[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) { + block = AESENC(block, ks->keys[i]); + } + block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); + + //We use memcpy to avoid align casting. + _mm_storeu_si128((void*)&ct[16*bidx], block); + + ctr_block = ADD32(ctr_block, one); + block = SHUF8(ctr_block, bswap_mask); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#ifdef VAES256 +_INLINE_ void load_ks(OUT __m256i ks256[AES256_ROUNDS + 1], + IN const aes256_ks_t *ks) +{ + for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) + { + ks256[i] = _mm256_broadcastsi128_si256(ks->keys[i]); + } +} + +// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that +// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 +// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 +// Here num_blocks is assumed to be less then 2^32. +// It is the caller responsiblity to ensure it. +void aes256_ctr_enc256(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + const uint64_t num_par_blocks = num_blocks/2; + const uint64_t blocks_rem = num_blocks - (2*(num_par_blocks)); + + __m256i ks256[AES256_ROUNDS + 1]; + load_ks(ks256, ks); + + __m128i single_block = load_m128i(ctr); + __m256i ctr_blocks = _mm256_broadcastsi128_si256(single_block); + + // Preparing the masks + const __m256i bswap_mask = _mm256_set_epi32(BSWAP_MASK, BSWAP_MASK); + const __m256i two = _mm256_set_epi32(0,0,0,2,0,0,0,2); + const __m256i init = _mm256_set_epi32(0,0,0,1,0,0,0,0); + + // Initialize two parallel counters + ctr_blocks = ADD32_256(ctr_blocks, init); + __m256i p = SHUF8_256(ctr_blocks, bswap_mask); + + for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) + { + p = XOR256(p, ks256[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) + { + p = VAESENC(p, ks256[i]); + } + p = VAESENCLAST(p, ks256[AES256_ROUNDS]); + + // We use memcpy to avoid align casting. + _mm256_storeu_si256((__m256i *)&ct[PAR_AES_BLOCK_SIZE * block_idx], p); + + // Increase the two counters in parallel + ctr_blocks = ADD32_256(ctr_blocks, two); + p = SHUF8_256(ctr_blocks, bswap_mask); + } + + if(0 != blocks_rem) + { + single_block = EXTRACT128(p, 0); + aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], + (const uint8_t*)&single_block, blocks_rem, ks); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#endif //VAES256 + +#ifdef VAES512 + +_INLINE_ void load_ks(OUT __m512i ks512[AES256_ROUNDS + 1], + IN const aes256_ks_t *ks) +{ + for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) + { + ks512[i] = _mm512_broadcast_i32x4(ks->keys[i]); + } +} + +// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that +// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 +// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 +// Here num_blocks is assumed to be less then 2^32. +// It is the caller responsiblity to ensure it. +void aes256_ctr_enc512(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks) +{ + const uint64_t num_par_blocks = num_blocks/4; + const uint64_t blocks_rem = num_blocks - (4*(num_par_blocks)); + + __m512i ks512[AES256_ROUNDS + 1]; + load_ks(ks512, ks); + + __m128i single_block = load_m128i(ctr); + __m512i ctr_blocks = _mm512_broadcast_i32x4(single_block); + + // Preparing the masks + const __m512i bswap_mask = _mm512_set_epi32(BSWAP_MASK, BSWAP_MASK, + BSWAP_MASK, BSWAP_MASK); + const __m512i four = _mm512_set_epi32(0,0,0,4,0,0,0,4,0,0,0,4,0,0,0,4); + const __m512i init = _mm512_set_epi32(0,0,0,3,0,0,0,2,0,0,0,1,0,0,0,0); + + // Initialize four parallel counters + ctr_blocks = ADD32_512(ctr_blocks, init); + __m512i p = SHUF8_512(ctr_blocks, bswap_mask); + + for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) + { + p = XOR512(p, ks512[0]); + for (uint32_t i = 1; i < AES256_ROUNDS; i++) + { + p = VAESENC(p, ks512[i]); + } + p = VAESENCLAST(p, ks512[AES256_ROUNDS]); + + + // We use memcpy to avoid align casting. + _mm512_storeu_si512(&ct[PAR_AES_BLOCK_SIZE * block_idx], p); + + // Increase the four counters in parallel + ctr_blocks = ADD32_512(ctr_blocks, four); + p = SHUF8_512(ctr_blocks, bswap_mask); + } + + if(0 != blocks_rem) + { + single_block = EXTRACT128(p, 0); + aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], + (const uint8_t*)&single_block, blocks_rem, ks); + } + + // Delete secrets from registers if any. + ZERO256(); +} + +#endif //VAES512 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.h new file mode 100644 index 0000000000..3d2b21ecf5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.h @@ -0,0 +1,85 @@ +/*************************************************************************** +* Written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#pragma once + +#include +#include +#include "defs.h" + +#define MAX_AES_INVOKATION (MASK(32)) + +#define AES256_KEY_SIZE (32ULL) +#define AES256_KEY_BITS (AES256_KEY_SIZE * 8) +#define AES_BLOCK_SIZE (16ULL) +#define AES256_ROUNDS (14ULL) + +#ifdef VAES256 +#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*2) +#elif defined(VAES512) +#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*4) +#endif + +typedef ALIGN(16) struct aes256_key_s { + uint8_t raw[AES256_KEY_SIZE]; +} aes256_key_t; + +typedef ALIGN(16) struct aes256_ks_s { + __m128i keys[AES256_ROUNDS + 1]; +} aes256_ks_t; + +// The ks parameter must be 16 bytes aligned! +EXTERNC void aes256_key_expansion(OUT aes256_ks_t *ks, + IN const aes256_key_t *key); + +// Encrypt one 128-bit block ct = E(pt,ks) +void aes256_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc(OUT uint8_t *ct, + IN const uint8_t *pt, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks using VAES (AVX-2) +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc256(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); + +// Encrypt num_blocks 128-bit blocks using VAES (AVX512) +// ct[15:0] = E(pt[15:0],ks) +// ct[31:16] = E(pt[15:0] + 1,ks) +// ... +// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) +void aes256_ctr_enc512(OUT uint8_t *ct, + IN const uint8_t *ctr, + IN const uint32_t num_blocks, + IN const aes256_ks_t *ks); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/api.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/api.c new file mode 100644 index 0000000000..e01f911e87 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/api.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk) +{ + + return sqisign_keypair(pk, sk); +} + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk) +{ + return sqisign_sign(sm, smlen, m, mlen, sk); +} +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk) +{ + return sqisign_open(m, mlen, sm, smlen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/api.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/api.h new file mode 100644 index 0000000000..dee239e1cd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/api.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef api_h +#define api_h + +#include + +#define CRYPTO_SECRETKEYBYTES 701 +#define CRYPTO_PUBLICKEYBYTES 129 +#define CRYPTO_BYTES 292 + +#define CRYPTO_ALGNAME "SQIsign_lvl5" + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk); + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk); +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk); + +#endif /* api_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/asm_preamble.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/asm_preamble.h new file mode 100644 index 0000000000..3ef7927e9c --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/asm_preamble.h @@ -0,0 +1,22 @@ +#ifdef __APPLE__ +#define CAT(A, B) _CAT(A, B) +#define _CAT(A, B) A##B +#undef fp_add +#undef fp_sub +#undef fp_mul +#undef fp_sqr +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 +#define p2 CAT(_, p2) +#define p CAT(_, p) +#define fp_add CAT(_, SQISIGN_NAMESPACE(fp_add)) +#define fp_sub CAT(_, SQISIGN_NAMESPACE(fp_sub)) +#define fp_mul CAT(_, SQISIGN_NAMESPACE(fp_mul)) +#define fp_sqr CAT(_, SQISIGN_NAMESPACE(fp_sqr)) +#define fp2_mul_c0 CAT(_, SQISIGN_NAMESPACE(fp2_mul_c0)) +#define fp2_mul_c1 CAT(_, SQISIGN_NAMESPACE(fp2_mul_c1)) +#define fp2_sq_c0 CAT(_, SQISIGN_NAMESPACE(fp2_sq_c0)) +#define fp2_sq_c1 CAT(_, SQISIGN_NAMESPACE(fp2_sq_c1)) +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/basis.c new file mode 100644 index 0000000000..94cb7fcacb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/basis.c @@ -0,0 +1,416 @@ +#include "ec.h" +#include "fp2.h" +#include "e0_basis.h" +#include + +uint32_t +ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve) +{ // Recover y-coordinate of a point on the Montgomery curve y^2 = x^3 + Ax^2 + x + fp2_t t0; + + fp2_sqr(&t0, Px); + fp2_mul(y, &t0, &curve->A); // Ax^2 + fp2_add(y, y, Px); // Ax^2 + x + fp2_mul(&t0, &t0, Px); + fp2_add(y, y, &t0); // x^3 + Ax^2 + x + // This is required, because we do not yet know that our curves are + // supersingular so our points live on the twist with B = 1. + return fp2_sqrt_verify(y); +} + +static void +difference_point(ec_point_t *PQ, const ec_point_t *P, const ec_point_t *Q, const ec_curve_t *curve) +{ + // Given P,Q in projective x-only, computes a deterministic choice for (P-Q) + // Based on Proposition 3 of https://eprint.iacr.org/2017/518.pdf + + fp2_t Bxx, Bxz, Bzz, t0, t1; + + fp2_mul(&t0, &P->x, &Q->x); + fp2_mul(&t1, &P->z, &Q->z); + fp2_sub(&Bxx, &t0, &t1); + fp2_sqr(&Bxx, &Bxx); + fp2_mul(&Bxx, &Bxx, &curve->C); // C*(P.x*Q.x-P.z*Q.z)^2 + fp2_add(&Bxz, &t0, &t1); + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + fp2_add(&Bzz, &t0, &t1); + fp2_mul(&Bxz, &Bxz, &Bzz); // (P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_sub(&Bzz, &t0, &t1); + fp2_sqr(&Bzz, &Bzz); + fp2_mul(&Bzz, &Bzz, &curve->C); // C*(P.x*Q.z-P.z*Q.x)^2 + fp2_mul(&Bxz, &Bxz, &curve->C); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &curve->A); + fp2_add(&t0, &t0, &t0); + fp2_add(&Bxz, &Bxz, &t0); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + 2*A*P.x*Q.z*P.z*Q.x + + // To ensure that the denominator is a fourth power in Fp, we normalize by + // C*C_bar^2*(P.z)_bar^2*(Q.z)_bar^2 + fp_copy(&t0.re, &curve->C.re); + fp_neg(&t0.im, &curve->C.im); + fp2_sqr(&t0, &t0); + fp2_mul(&t0, &t0, &curve->C); + fp_copy(&t1.re, &P->z.re); + fp_neg(&t1.im, &P->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp_copy(&t1.re, &Q->z.re); + fp_neg(&t1.im, &Q->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&Bxx, &Bxx, &t0); + fp2_mul(&Bxz, &Bxz, &t0); + fp2_mul(&Bzz, &Bzz, &t0); + + // Solving quadratic equation + fp2_sqr(&t0, &Bxz); + fp2_mul(&t1, &Bxx, &Bzz); + fp2_sub(&t0, &t0, &t1); + // No need to check if t0 is square, as per the entangled basis algorithm. + fp2_sqrt(&t0); + fp2_add(&PQ->x, &Bxz, &t0); + fp2_copy(&PQ->z, &Bzz); +} + +// Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and the point +// P = (X/Z : 1). For generic implementation see lift_basis() +uint32_t +lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + assert(fp2_is_one(&B->P.z)); + assert(fp2_is_one(&E->C)); + + fp2_copy(&P->x, &B->P.x); + fp2_copy(&Q->x, &B->Q.x); + fp2_copy(&Q->z, &B->Q.z); + fp2_set_one(&P->z); + uint32_t ret = ec_recover_y(&P->y, &P->x, E); + + // Algorithm of Okeya-Sakurai to recover y.Q in the montgomery model + fp2_t v1, v2, v3, v4; + fp2_mul(&v1, &P->x, &Q->z); + fp2_add(&v2, &Q->x, &v1); + fp2_sub(&v3, &Q->x, &v1); + fp2_sqr(&v3, &v3); + fp2_mul(&v3, &v3, &B->PmQ.x); + fp2_add(&v1, &E->A, &E->A); + fp2_mul(&v1, &v1, &Q->z); + fp2_add(&v2, &v2, &v1); + fp2_mul(&v4, &P->x, &Q->x); + fp2_add(&v4, &v4, &Q->z); + fp2_mul(&v2, &v2, &v4); + fp2_mul(&v1, &v1, &Q->z); + fp2_sub(&v2, &v2, &v1); + fp2_mul(&v2, &v2, &B->PmQ.z); + fp2_sub(&Q->y, &v3, &v2); + fp2_add(&v1, &P->y, &P->y); + fp2_mul(&v1, &v1, &Q->z); + fp2_mul(&v1, &v1, &B->PmQ.z); + fp2_mul(&Q->x, &Q->x, &v1); + fp2_mul(&Q->z, &Q->z, &v1); + + // Transforming to a jacobian coordinate + fp2_sqr(&v1, &Q->z); + fp2_mul(&Q->y, &Q->y, &v1); + fp2_mul(&Q->x, &Q->x, &Q->z); + return ret; +} + +uint32_t +lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + // Normalise the curve E such that (A : C) is (A/C : 1) + // and the point x(P) = (X/Z : 1). + fp2_t inverses[2]; + fp2_copy(&inverses[0], &B->P.z); + fp2_copy(&inverses[1], &E->C); + + fp2_batched_inv(inverses, 2); + fp2_set_one(&B->P.z); + fp2_set_one(&E->C); + + fp2_mul(&B->P.x, &B->P.x, &inverses[0]); + fp2_mul(&E->A, &E->A, &inverses[1]); + + // Lift the basis to Jacobian points P, Q + return lift_basis_normalized(P, Q, B, E); +} + +// Given an x-coordinate, determines if this is a valid +// point on the curve. Assumes C=1. +static uint32_t +is_on_curve(const fp2_t *x, const ec_curve_t *curve) +{ + assert(fp2_is_one(&curve->C)); + fp2_t t0; + + fp2_add(&t0, x, &curve->A); // x + (A/C) + fp2_mul(&t0, &t0, x); // x^2 + (A/C)*x + fp2_add_one(&t0, &t0); // x^2 + (A/C)*x + 1 + fp2_mul(&t0, &t0, x); // x^3 + (A/C)*x^2 + x + + return fp2_is_square(&t0); +} + +// Helper function which given a point of order k*2^n with n maximal +// and k odd, computes a point of order 2^f +static inline void +clear_cofactor_for_maximal_even_order(ec_point_t *P, ec_curve_t *curve, int f) +{ + // clear out the odd cofactor to get a point of order 2^n + ec_mul(P, p_cofactor_for_2f, P_COFACTOR_FOR_2F_BITLENGTH, P, curve); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_A24(P, P, &curve->A24, curve->is_A24_computed_and_normalized); + } +} + +// Helper function which finds an NQR -1 / (1 + i*b) for entangled basis generation +static uint8_t +find_nqr_factor(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + // factor = -1/(1 + i*b) for b in Fp will be NQR whenever 1 + b^2 is NQR + // in Fp, so we find one of these and then invert (1 + i*b). We store b + // as a u8 hint to save time in verification. + + // We return the hint as a u8, but use (uint16_t)n to give 2^16 - 1 + // to make failure cryptographically negligible, with a fallback when + // n > 128 is required. + uint8_t hint; + uint32_t found = 0; + uint16_t n = start; + + bool qr_b = 1; + fp_t b, tmp; + fp2_t z, t0, t1; + + do { + while (qr_b) { + // find b with 1 + b^2 a non-quadratic residue + fp_set_small(&tmp, (uint32_t)n * n + 1); + qr_b = fp_is_square(&tmp); + n++; // keeps track of b = n - 1 + } + + // for Px := -A/(1 + i*b) to be on the curve + // is equivalent to A^2*(z-1) - z^2 NQR for z = 1 + i*b + // thus prevents unnecessary inversion pre-check + + // t0 = z - 1 = i*b + // t1 = z = 1 + i*b + fp_set_small(&b, (uint32_t)n - 1); + fp2_set_zero(&t0); + fp2_set_one(&z); + fp_copy(&z.im, &b); + fp_copy(&t0.im, &b); + + // A^2*(z-1) - z^2 + fp2_sqr(&t1, &curve->A); + fp2_mul(&t0, &t0, &t1); // A^2 * (z - 1) + fp2_sqr(&t1, &z); + fp2_sub(&t0, &t0, &t1); // A^2 * (z - 1) - z^2 + found = !fp2_is_square(&t0); + + qr_b = 1; + } while (!found); + + // set Px to -A/(1 + i*b) + fp2_copy(x, &z); + fp2_inv(x); + fp2_mul(x, x, &curve->A); + fp2_neg(x, x); + + /* + * With very low probability n will not fit in 7 bits. + * We set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + hint = n <= 128 ? n - 1 : 0; + + return hint; +} + +// Helper function which finds a point x(P) = n * A +static uint8_t +find_nA_x_coord(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + assert(!fp2_is_square(&curve->A)); // Only to be called when A is a NQR + + // when A is NQR we allow x(P) to be a multiple n*A of A + uint8_t n = start; + if (n == 1) { + fp2_copy(x, &curve->A); + } else { + fp2_mul_small(x, &curve->A, n); + } + + while (!is_on_curve(x, curve)) { + fp2_add(x, x, &curve->A); + n++; + } + + /* + * With very low probability (1/2^128), n will not fit in 7 bits. + * In this case, we set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + uint8_t hint = n < 128 ? n : 0; + return hint; +} + +// The entangled basis generation does not allow A = 0 +// so we simply return the one we have already precomputed +static void +ec_basis_E0_2f(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + assert(fp2_is_zero(&curve->A)); + ec_point_t P, Q; + + // Set P, Q to precomputed (X : 1) values + fp2_copy(&P.x, &BASIS_E0_PX); + fp2_copy(&Q.x, &BASIS_E0_QX); + fp2_set_one(&P.z); + fp2_set_one(&Q.z); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_E0(&P, &P); + xDBL_E0(&Q, &Q); + } + + // Set P, Q in the basis and compute x(P - Q) + copy_point(&PQ2->P, &P); + copy_point(&PQ2->Q, &Q); + difference_point(&PQ2->PmQ, &P, &Q, curve); +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// and stores hints as an array for faster recomputation at a later point +uint8_t +ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 0; + } + + uint8_t hint; + bool hint_A = fp2_is_square(&curve->A); + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_A) { + // when A is NQR we allow x(P) to be a multiple n*A of A + hint = find_nA_x_coord(&P.x, curve, 1); + } else { + // when A is QR we instead have to find (1 + b^2) a NQR + // such that x(P) = -A / (1 + i*b) + hint = find_nqr_factor(&P.x, curve, 1); + } + + fp2_set_one(&P.z); + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + + // Finally, we compress hint_A and hint into a single bytes. + // We choose to set the LSB of hint to hint_A + assert(hint < 128); // We expect hint to be 7-bits in size + return (hint << 1) | hint_A; +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// given the hints as an array for faster basis computation +int +ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 1; + } + + // The LSB of hint encodes whether A is a QR + // The remaining 7-bits are used to find a valid x(P) + bool hint_A = hint & 1; + uint8_t hint_P = hint >> 1; + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_P) { + // When hint_P = 0 it means we did not find a point in 128 attempts + // this is very rare and we almost never expect to need this fallback + // In either case, we can start with b = 128 to skip testing the known + // values which will not work + if (!hint_A) { + find_nA_x_coord(&P.x, curve, 128); + } else { + find_nqr_factor(&P.x, curve, 128); + } + } else { + // Otherwise we use the hint to directly find x(P) based on hint_A + if (!hint_A) { + // when A is NQR, we have found n such that x(P) = n*A + fp2_mul_small(&P.x, &curve->A, hint_P); + } else { + // when A is QR we have found b such that (1 + b^2) is a NQR in + // Fp, so we must compute x(P) = -A / (1 + i*b) + fp_set_one(&P.x.re); + fp_set_small(&P.x.im, hint_P); + fp2_inv(&P.x); + fp2_mul(&P.x, &P.x, &curve->A); + fp2_neg(&P.x, &P.x); + } + } + fp2_set_one(&P.z); + +#ifndef NDEBUG + int passed = 1; + passed = is_on_curve(&P.x, curve); + passed &= !fp2_is_square(&P.x); + + if (!passed) + return 0; +#endif + + // set xQ to -xP - A + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + +#ifndef NDEBUG + passed &= test_basis_order_twof(PQ2, curve, f); + + if (!passed) + return 0; +#endif + + return 1; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/bench.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/bench.h new file mode 100644 index 0000000000..c253825828 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/bench.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: Apache-2.0 +#ifndef BENCH_H__ +#define BENCH_H__ + +#include +#include +#include +#include +#include +#if defined(__APPLE__) +#include "bench_macos.h" +#endif + +#if defined(TARGET_ARM) || defined(TARGET_S390X) || defined(NO_CYCLE_COUNTER) +#define BENCH_UNIT0 "nanoseconds" +#define BENCH_UNIT3 "microseconds" +#define BENCH_UNIT6 "milliseconds" +#define BENCH_UNIT9 "seconds" +#else +#define BENCH_UNIT0 "cycles" +#define BENCH_UNIT3 "kilocycles" +#define BENCH_UNIT6 "megacycles" +#define BENCH_UNIT9 "gigacycles" +#endif + +static inline void +cpucycles_init(void) { +#if defined(__APPLE__) && defined(TARGET_ARM64) + macos_init_rdtsc(); +#endif +} + +static inline uint64_t +cpucycles(void) +{ +#if defined(TARGET_AMD64) || defined(TARGET_X86) + uint32_t hi, lo; + + asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)lo) | ((uint64_t)hi << 32); +#elif defined(TARGET_S390X) + uint64_t tod; + asm volatile("stckf %0\n" : "=Q"(tod) : : "cc"); + return (tod * 1000 / 4096); +#elif defined(TARGET_ARM64) && !defined(NO_CYCLE_COUNTER) +#if defined(__APPLE__) + return macos_rdtsc(); +#else + uint64_t cycles; + asm volatile("mrs %0, PMCCNTR_EL0" : "=r"(cycles)); + return cycles; +#endif // __APPLE__ +#else + struct timespec time; + clock_gettime(CLOCK_REALTIME, &time); + return (uint64_t)time.tv_sec * 1000000000 + time.tv_nsec; +#endif +} + +static inline int +CMPFUNC(const void *a, const void *b) +{ + uint64_t aa = *(uint64_t *)a, bb = *(uint64_t *)b; + + if (aa > bb) + return +1; + if (aa < bb) + return -1; + return 0; +} + +static inline uint32_t +ISQRT(uint64_t x) +{ + uint32_t r = 0; + for (ssize_t i = 31; i >= 0; --i) { + uint32_t s = r + (1 << i); + if ((uint64_t)s * s <= x) + r = s; + } + return r; +} + +static inline double +_TRUNC(uint64_t x) +{ + return x / 1000 / 1000.; +} +#define _FMT ".3lf" +#define _UNIT BENCH_UNIT6 + +#define BENCH_CODE_1(RUNS) \ + { \ + const size_t count = (RUNS); \ + if (!count) \ + abort(); \ + uint64_t cycles, cycles1, cycles2; \ + uint64_t cycles_list[count]; \ + cycles = 0; \ + for (size_t i = 0; i < count; ++i) { \ + cycles1 = cpucycles(); + +#define BENCH_CODE_2(name) \ + cycles2 = cpucycles(); \ + cycles_list[i] = cycles2 - cycles1; \ + cycles += cycles2 - cycles1; \ + } \ + qsort(cycles_list, count, sizeof(uint64_t), CMPFUNC); \ + uint64_t variance = 0; \ + for (size_t i = 0; i < count; ++i) { \ + int64_t off = cycles_list[i] - cycles / count; \ + variance += off * off; \ + } \ + variance /= count; \ + printf(" %-10s", name); \ + printf(" | average %9" _FMT " | stddev %9" _FMT, \ + _TRUNC(cycles / count), \ + _TRUNC(ISQRT(variance))); \ + printf(" | median %9" _FMT " | min %9" _FMT " | max %9" _FMT, \ + _TRUNC(cycles_list[count / 2]), \ + _TRUNC(cycles_list[0]), \ + _TRUNC(cycles_list[count - 1])); \ + printf(" (%s)\n", _UNIT); \ + } + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/bench_macos.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/bench_macos.h new file mode 100644 index 0000000000..0494fc85e9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/bench_macos.h @@ -0,0 +1,143 @@ +// WARNING: must be run as root on an M1 device +// WARNING: fragile, uses private apple APIs +// currently no command line interface, see variables at top of main + +/* +no warranty; use at your own risk - i believe this code needs +some minor changes to work on some later hardware and/or software revisions, +which is unsurprising given the use of undocumented, private APIs. +------------------------------------------------------------------------------ +This code is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2020 Dougall Johnson +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ + +/* + Based on https://github.com/travisdowns/robsize + Henry Wong + http://blog.stuffedcow.net/2013/05/measuring-rob-capacity/ + 2014-10-14 +*/ + +#include +#include +#include +#include + +#define KPERF_LIST \ + /* ret, name, params */ \ + F(int, kpc_force_all_ctrs_set, int) \ + F(int, kpc_set_counting, uint32_t) \ + F(int, kpc_set_thread_counting, uint32_t) \ + F(int, kpc_set_config, uint32_t, void *) \ + F(int, kpc_get_thread_counters, int, unsigned int, void *) + +#define F(ret, name, ...) \ + typedef ret name##proc(__VA_ARGS__); \ + static name##proc *name; +KPERF_LIST +#undef F + +#define CFGWORD_EL0A64EN_MASK (0x20000) + +#define CPMU_CORE_CYCLE 0x02 + +#define KPC_CLASS_FIXED (0) +#define KPC_CLASS_CONFIGURABLE (1) + +#define COUNTERS_COUNT 10 +#define KPC_MASK ((1u << KPC_CLASS_CONFIGURABLE) | (1u << KPC_CLASS_FIXED)) +static uint64_t g_config[COUNTERS_COUNT]; +static uint64_t g_counters[COUNTERS_COUNT]; + +static void +macos_configure_rdtsc() +{ + if (kpc_force_all_ctrs_set(1)) { + printf("kpc_force_all_ctrs_set failed\n"); + return; + } + + if (kpc_set_config(KPC_MASK, g_config)) { + printf("kpc_set_config failed\n"); + return; + } + + if (kpc_set_counting(KPC_MASK)) { + printf("kpc_set_counting failed\n"); + return; + } + + if (kpc_set_thread_counting(KPC_MASK)) { + printf("kpc_set_thread_counting failed\n"); + return; + } +} + +static void +macos_init_rdtsc() +{ + void *kperf = + dlopen("/System/Library/PrivateFrameworks/kperf.framework/Versions/A/kperf", RTLD_LAZY); + if (!kperf) { + printf("kperf = %p\n", kperf); + return; + } +#define F(ret, name, ...) \ + name = (name##proc *)(intptr_t)(dlsym(kperf, #name)); \ + if (!name) { \ + printf("%s = %p\n", #name, (void *)(intptr_t)name); \ + return; \ + } + KPERF_LIST +#undef F + + g_config[0] = CPMU_CORE_CYCLE | CFGWORD_EL0A64EN_MASK; + + macos_configure_rdtsc(); +} + +static uint64_t +macos_rdtsc(void) +{ + if (kpc_get_thread_counters(0, COUNTERS_COUNT, g_counters)) { + printf("kpc_get_thread_counters failed\n"); + return 1; + } + return g_counters[2]; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/biextension.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/biextension.c new file mode 100644 index 0000000000..1df7ab938b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/biextension.c @@ -0,0 +1,770 @@ +#include +#include +#include +#include + +/* + * We implement the biextension arithmetic by using the cubical torsor + * representation. For now only implement the 2^e-ladder. + * + * Warning: cubicalADD is off by a factor x4 with respect to the correct + * cubical arithmetic. This does not affect the Weil pairing or the Tate + * pairing over F_{p^2} (due to the final exponentiation), but would give + * the wrong result if we compute the Tate pairing over F_p. + */ + +// this would be exactly like xADD if PQ was 'antinormalised' as (1,z) +// Cost: 3M + 2S + 3a + 3s +// Note: if needed, cubicalDBL is simply xDBL_A24 normalized and +// costs 3M + 2S + 2a + 2s + +static void +cubicalADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const fp2_t *ixPQ) +{ + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&R->z, &t3); + fp2_sqr(&t2, &t2); + fp2_mul(&R->x, ixPQ, &t2); +} + +// Given cubical reps of P, Q and x(P - Q) = (1 : ixPQ) +// compute P + Q, [2]Q +// Cost: 6M + 4S + 4a + 4s +static void +cubicalDBLADD(ec_point_t *PpQ, + ec_point_t *QQ, + const ec_point_t *P, + const ec_point_t *Q, + const fp2_t *ixPQ, + const ec_point_t *A24) +{ + // A24 = (A+2C/4C: 1) + assert(fp2_is_one(&A24->z)); + + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&PpQ->x, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_sqr(&t2, &PpQ->x); + fp2_sqr(&QQ->z, &t3); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &PpQ->x); + fp2_add(&PpQ->x, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&PpQ->z, &t3); + fp2_sqr(&PpQ->x, &PpQ->x); + fp2_mul(&PpQ->x, ixPQ, &PpQ->x); + fp2_sub(&t3, &t2, &QQ->z); + fp2_mul(&QQ->x, &t2, &QQ->z); + fp2_mul(&t0, &t3, &A24->x); + fp2_add(&t0, &t0, &QQ->z); + fp2_mul(&QQ->z, &t0, &t3); +} + +// iterative biextension doubling +static void +biext_ladder_2e(uint32_t e, + ec_point_t *PnQ, + ec_point_t *nQ, + const ec_point_t *PQ, + const ec_point_t *Q, + const fp2_t *ixP, + const ec_point_t *A24) +{ + copy_point(PnQ, PQ); + copy_point(nQ, Q); + for (uint32_t i = 0; i < e; i++) { + cubicalDBLADD(PnQ, nQ, PnQ, nQ, ixP, A24); + } +} + +// Compute the monodromy ratio X/Z above as a (X:Z) point to avoid a division +// We implicitly use (1,0) as a cubical point above 0_E +static void +point_ratio(ec_point_t *R, const ec_point_t *PnQ, const ec_point_t *nQ, const ec_point_t *P) +{ + // Sanity tests + assert(ec_is_zero(nQ)); + assert(ec_is_equal(PnQ, P)); + + fp2_mul(&R->x, &nQ->x, &P->x); + fp2_copy(&R->z, &PnQ->x); +} + +// Compute the cubical translation of P by a point of 2-torsion T +static void +translate(ec_point_t *P, const ec_point_t *T) +{ + // When we translate, the following three things can happen: + // T = (A : 0) then the translation of P should be P + // T = (0 : B) then the translation of P = (X : Z) should be (Z : X) + // Otherwise T = (A : B) and P translates to (AX - BZ : BX - AZ) + // We compute this in constant time by computing the generic case + // and then using constant time swaps. + fp2_t PX_new, PZ_new; + + { + fp2_t t0, t1; + + // PX_new = AX - BZ + fp2_mul(&t0, &T->x, &P->x); + fp2_mul(&t1, &T->z, &P->z); + fp2_sub(&PX_new, &t0, &t1); + + // PZ_new = BX - AZ + fp2_mul(&t0, &T->z, &P->x); + fp2_mul(&t1, &T->x, &P->z); + fp2_sub(&PZ_new, &t0, &t1); + } + + // When we have A zero we should return (Z : X) + uint32_t TA_is_zero = fp2_is_zero(&T->x); + fp2_select(&PX_new, &PX_new, &P->z, TA_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->x, TA_is_zero); + + // When we have B zero we should return (X : Z) + uint32_t TB_is_zero = fp2_is_zero(&T->z); + fp2_select(&PX_new, &PX_new, &P->x, TB_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->z, TB_is_zero); + + // Set the point to the desired result + fp2_copy(&P->x, &PX_new); + fp2_copy(&P->z, &PZ_new); +} + +// Compute the biextension monodromy g_P,Q^{2^g} (in level 1) via the +// cubical arithmetic of P+2^e Q. +// The suffix _i means that we are given 1/x(P) as parameter. Warning: to +// get meaningful result when using the monodromy to compute pairings, we +// need P, Q, PQ, A24 to be normalised (this is not strictly necessary, but +// care need to be taken when they are not normalised. Only handle the +// normalised case for now) +static void +monodromy_i(ec_point_t *R, const pairing_params_t *pairing_data, bool swap_PQ) +{ + fp2_t ixP; + ec_point_t P, Q, PnQ, nQ; + + // When we compute the Weil pairing we need both P + [2^e]Q and + // Q + [2^e]P which we can do easily with biext_ladder_2e() below + // we use a bool to decide wether to use Q, ixP or P, ixQ in the + // ladder and P or Q in translation. + if (!swap_PQ) { + copy_point(&P, &pairing_data->P); + copy_point(&Q, &pairing_data->Q); + fp2_copy(&ixP, &pairing_data->ixP); + } else { + copy_point(&P, &pairing_data->Q); + copy_point(&Q, &pairing_data->P); + fp2_copy(&ixP, &pairing_data->ixQ); + } + + // Compute the biextension ladder P + [2^e]Q + biext_ladder_2e(pairing_data->e - 1, &PnQ, &nQ, &pairing_data->PQ, &Q, &ixP, &pairing_data->A24); + translate(&PnQ, &nQ); + translate(&nQ, &nQ); + point_ratio(R, &PnQ, &nQ, &P); +} + +// Normalize the points and also store 1/x(P), 1/x(Q) +static void +cubical_normalization(pairing_params_t *pairing_data, const ec_point_t *P, const ec_point_t *Q) +{ + fp2_t t[4]; + fp2_copy(&t[0], &P->x); + fp2_copy(&t[1], &P->z); + fp2_copy(&t[2], &Q->x); + fp2_copy(&t[3], &Q->z); + fp2_batched_inv(t, 4); + + // Store PZ / PX and QZ / QX + fp2_mul(&pairing_data->ixP, &P->z, &t[0]); + fp2_mul(&pairing_data->ixQ, &Q->z, &t[2]); + + // Store x(P), x(Q) normalised to (X/Z : 1) + fp2_mul(&pairing_data->P.x, &P->x, &t[1]); + fp2_mul(&pairing_data->Q.x, &Q->x, &t[3]); + fp2_set_one(&pairing_data->P.z); + fp2_set_one(&pairing_data->Q.z); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// We assume the points are normalised correctly +static void +weil_n(fp2_t *r, const pairing_params_t *pairing_data) +{ + ec_point_t R0, R1; + monodromy_i(&R0, pairing_data, true); + monodromy_i(&R1, pairing_data, false); + + fp2_mul(r, &R0.x, &R1.z); + fp2_inv(r); + fp2_mul(r, r, &R0.z); + fp2_mul(r, r, &R1.x); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// Normalise the points and call the code above +// The code will crash (division by 0) if either P or Q is (0:1) +void +weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + pairing_params_t pairing_data; + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + // Compute the Weil pairing e_(2^n)(P, Q) + weil_n(r, &pairing_data); +} + +// two helper functions for reducing the tate pairing +// clear_cofac clears (p + 1) // 2^f for an Fp2 value +void +clear_cofac(fp2_t *r, const fp2_t *a) +{ + digit_t exp = *p_cofactor_for_2f; + exp >>= 1; + + fp2_t x; + fp2_copy(&x, a); + fp2_copy(r, a); + + // removes cofac + while (exp > 0) { + fp2_sqr(r, r); + if (exp & 1) { + fp2_mul(r, r, &x); + } + exp >>= 1; + } +} + +// applies frobenius a + ib --> a - ib to an fp2 element +void +fp2_frob(fp2_t *out, const fp2_t *in) +{ + fp_copy(&(out->re), &(in->re)); + fp_neg(&(out->im), &(in->im)); +} + +// reduced Tate pairing, normalizes the points, assumes PQ is P+Q in (X:Z) +// coordinates. Computes 1/x(P) and 1/x(Q) for efficient cubical ladder +void +reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - e; + ec_point_t R; + pairing_params_t pairing_data; + + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + monodromy_i(&R, &pairing_data, true); + + // we get unreduced tate as R.X, R.Z + // reduced tate is -(R.Z/R.X)^((p^2 - 1) div 2^f) + // we reuse R.X and R.Z to split reduction step ^(p-1) into frobenius and ^-1 + fp2_t frob, tmp; + fp2_copy(&tmp, &R.x); + fp2_frob(&frob, &R.x); + fp2_mul(&R.x, &R.z, &frob); + fp2_frob(&frob, &R.z); + fp2_mul(&R.z, &tmp, &frob); + fp2_inv(&R.x); + fp2_mul(r, &R.x, &R.z); + + clear_cofac(r, r); + // clear remaining 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(r, r); + } +} + +// Functions to compute discrete logs by computing the Weil pairing of points +// followed by computing the dlog in Fp^2 +// (If we work with full order points, it would be faster to use the Tate +// pairings rather than the Weil pairings; this is not implemented yet) + +// recursive dlog function +static bool +fp2_dlog_2e_rec(digit_t *a, long len, fp2_t *pows_f, fp2_t *pows_g, long stacklen) +{ + if (len == 0) { + // *a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + return true; + } else if (len == 1) { + if (fp2_is_one(&pows_f[stacklen - 1])) { + // a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else if (fp2_is_equal(&pows_f[stacklen - 1], &pows_g[stacklen - 1])) { + // a = 1; + a[0] = 1; + for (int i = 1; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_mul(&pows_f[i], &pows_f[i], &pows_g[i]); // new_f = f*g + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else { + return false; + } + } else { + long right = (double)len * 0.5; + long left = len - right; + pows_f[stacklen] = pows_f[stacklen - 1]; + pows_g[stacklen] = pows_g[stacklen - 1]; + for (int i = 0; i < left; i++) { + fp2_sqr(&pows_f[stacklen], &pows_f[stacklen]); + fp2_sqr(&pows_g[stacklen], &pows_g[stacklen]); + } + // uint32_t dlp1 = 0, dlp2 = 0; + digit_t dlp1[NWORDS_ORDER], dlp2[NWORDS_ORDER]; + bool ok; + ok = fp2_dlog_2e_rec(dlp1, right, pows_f, pows_g, stacklen + 1); + if (!ok) + return false; + ok = fp2_dlog_2e_rec(dlp2, left, pows_f, pows_g, stacklen); + if (!ok) + return false; + // a = dlp1 + 2^right * dlp2 + multiple_mp_shiftl(dlp2, right, NWORDS_ORDER); + mp_add(a, dlp2, dlp1, NWORDS_ORDER); + + return true; + } +} + +// compute DLP: compute scal such that f = g^scal with f, 1/g as input +static bool +fp2_dlog_2e(digit_t *scal, const fp2_t *f, const fp2_t *g_inverse, int e) +{ + long log, len = e; + for (log = 0; len > 1; len >>= 1) + log++; + log += 1; + + fp2_t pows_f[log], pows_g[log]; + pows_f[0] = *f; + pows_g[0] = *g_inverse; + + for (int i = 0; i < NWORDS_ORDER; i++) { + scal[i] = 0; + } + + bool ok = fp2_dlog_2e_rec(scal, e, pows_f, pows_g, 1); + assert(ok); + + return ok; +} + +// Normalize the bases (P, Q), (R, S) and store their inverse +// and additionally normalise the curve to (A/C : 1) +static void +cubical_normalization_dlog(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + fp2_t t[11]; + ec_basis_t *PQ = &pairing_dlog_data->PQ; + ec_basis_t *RS = &pairing_dlog_data->RS; + fp2_copy(&t[0], &PQ->P.x); + fp2_copy(&t[1], &PQ->P.z); + fp2_copy(&t[2], &PQ->Q.x); + fp2_copy(&t[3], &PQ->Q.z); + fp2_copy(&t[4], &PQ->PmQ.x); + fp2_copy(&t[5], &PQ->PmQ.z); + fp2_copy(&t[6], &RS->P.x); + fp2_copy(&t[7], &RS->P.z); + fp2_copy(&t[8], &RS->Q.x); + fp2_copy(&t[9], &RS->Q.z); + fp2_copy(&t[10], &curve->C); + + fp2_batched_inv(t, 11); + + fp2_mul(&pairing_dlog_data->ixP, &PQ->P.z, &t[0]); + fp2_mul(&PQ->P.x, &PQ->P.x, &t[1]); + fp2_set_one(&PQ->P.z); + + fp2_mul(&pairing_dlog_data->ixQ, &PQ->Q.z, &t[2]); + fp2_mul(&PQ->Q.x, &PQ->Q.x, &t[3]); + fp2_set_one(&PQ->Q.z); + + fp2_mul(&PQ->PmQ.x, &PQ->PmQ.x, &t[5]); + fp2_set_one(&PQ->PmQ.z); + + fp2_mul(&pairing_dlog_data->ixR, &RS->P.z, &t[6]); + fp2_mul(&RS->P.x, &RS->P.x, &t[7]); + fp2_set_one(&RS->P.z); + + fp2_mul(&pairing_dlog_data->ixS, &RS->Q.z, &t[8]); + fp2_mul(&RS->Q.x, &RS->Q.x, &t[9]); + fp2_set_one(&RS->Q.z); + + fp2_mul(&curve->A, &curve->A, &t[10]); + fp2_set_one(&curve->C); +} + +// Given two bases and basis = compute +// x(P - R), x(P - S), x(R - Q), x(S - Q) +static void +compute_difference_points(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + jac_point_t xyP, xyQ, xyR, xyS, temp; + + // lifting the two basis points, assumes that x(P) and x(R) + // and the curve itself are normalised to (X : 1) + lift_basis_normalized(&xyP, &xyQ, &pairing_dlog_data->PQ, curve); + lift_basis_normalized(&xyR, &xyS, &pairing_dlog_data->RS, curve); + + // computation of the differences + // x(P - R) + jac_neg(&temp, &xyR); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmR, &temp); + + // x(P - S) + jac_neg(&temp, &xyS); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmS, &temp); + + // x(R - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyR, curve); + jac_to_xz(&pairing_dlog_data->diff.RmQ, &temp); + + // x(S - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyS, curve); + jac_to_xz(&pairing_dlog_data->diff.SmQ, &temp); +} + +// Inline all the Weil pairing computations needed for ec_dlog_2_weil +static void +weil_dlog(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + ec_point_t nP, nQ, nR, nS, nPQ, PnQ, nPR, PnR, nPS, PnS, nRQ, RnQ, nSQ, SnQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&nPR, &pairing_dlog_data->diff.PmR); + copy_point(&nPS, &pairing_dlog_data->diff.PmS); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + copy_point(&RnQ, &pairing_dlog_data->diff.RmQ); + copy_point(&SnQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&nPQ, &nPQ, &nP, &pairing_dlog_data->ixQ); + cubicalADD(&nPR, &nPR, &nP, &pairing_dlog_data->ixR); + cubicalDBLADD(&nPS, &nP, &nPS, &nP, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnQ, &PnQ, &nQ, &pairing_dlog_data->ixP); + cubicalADD(&RnQ, &RnQ, &nQ, &pairing_dlog_data->ixR); + cubicalDBLADD(&SnQ, &nQ, &SnQ, &nQ, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + // weil(&w0,e,&PQ->P,&PQ->Q,&PQ->PmQ,&A24); + translate(&nPQ, &nP); + translate(&nPR, &nP); + translate(&nPS, &nP); + translate(&PnQ, &nQ); + translate(&RnQ, &nQ); + translate(&SnQ, &nQ); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference weil pairing + ec_point_t T0, T1; + fp2_t w1[5], w2[5]; + + // e(P, Q) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &PnQ, &nQ, &pairing_dlog_data->PQ.P); + // For the first element we need it's inverse for + // fp2_dlog_2e so we swap w1 and w2 here to save inversions + fp2_mul(&w2[0], &T0.x, &T1.z); + fp2_mul(&w1[0], &T1.x, &T0.z); + + // e(P,R) = w0^r2 + point_ratio(&T0, &nPR, &nP, &pairing_dlog_data->RS.P); + point_ratio(&T1, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[1], &T0.x, &T1.z); + fp2_mul(&w2[1], &T1.x, &T0.z); + + // e(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &RnQ, &nQ, &pairing_dlog_data->RS.P); + fp2_mul(&w1[2], &T0.x, &T1.z); + fp2_mul(&w2[2], &T1.x, &T0.z); + + // e(P,S) = w0^s2 + point_ratio(&T0, &nPS, &nP, &pairing_dlog_data->RS.Q); + point_ratio(&T1, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[3], &T0.x, &T1.z); + fp2_mul(&w2[3], &T1.x, &T0.z); + + // e(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &SnQ, &nQ, &pairing_dlog_data->RS.Q); + fp2_mul(&w1[4], &T0.x, &T1.z); + fp2_mul(&w2[4], &T1.x, &T0.z); + + fp2_batched_inv(w1, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + assert(test_point_order_twof(&PQ->Q, curve, e)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + + weil_dlog(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} + +// Inline all the Tate pairing computations needed for ec_dlog_2_weil +// including reduction, assumes a bases PQ of full E[2^e_full] torsion +// and a bases RS of smaller E[2^e] torsion +static void +tate_dlog_partial(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - pairing_dlog_data->e; + + ec_point_t nP, nQ, nR, nS, nPQ, PnR, PnS, nRQ, nSQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < e_full - 1; i++) { + cubicalDBLADD(&nPQ, &nP, &nPQ, &nP, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + translate(&nPQ, &nP); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference Tate pairing + ec_point_t T0; + fp2_t w1[5], w2[5]; + + // t(P, Q)^(2^e_diff) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + fp2_copy(&w1[0], &T0.x); + fp2_copy(&w2[0], &T0.z); + + // t(R,P) = w0^r2 + point_ratio(&T0, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[1], &T0.x); + fp2_copy(&w2[1], &T0.z); + + // t(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[2], &T0.x); + fp2_copy(&w1[2], &T0.z); + + // t(S,P) = w0^s2 + point_ratio(&T0, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[3], &T0.x); + fp2_copy(&w2[3], &T0.z); + + // t(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[4], &T0.x); + fp2_copy(&w1[4], &T0.z); + + // batched reduction using projective representation + for (int i = 0; i < 5; i++) { + fp2_t frob, tmp; + fp2_copy(&tmp, &w1[i]); + // inline frobenius for ^p + // multiply by inverse to get ^(p-1) + fp2_frob(&frob, &w1[i]); + fp2_mul(&w1[i], &w2[i], &frob); + + // repeat for denom + fp2_frob(&frob, &w2[i]); + fp2_mul(&w2[i], &tmp, &frob); + } + + // batched normalization + fp2_batched_inv(w2, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + for (int i = 0; i < 5; i++) { + clear_cofac(&w1[i], &w1[i]); + + // removes 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(&w1[i], &w1[i]); + } + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + // assume PQ is a full torsion basis + // returns a, b, c, d such that R = [a]P + [b]Q, S = [c]P + [d]Q + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - e; +#endif + assert(test_basis_order_twof(PQ, curve, e_full)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + tate_dlog_partial(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/biextension.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/biextension.h new file mode 100644 index 0000000000..1a50fcc738 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/biextension.h @@ -0,0 +1,82 @@ +#ifndef _BIEXT_H_ +#define _BIEXT_H_ + +#include +#include + +typedef struct pairing_params +{ + uint32_t e; // Points have order 2^e + ec_point_t P; // x(P) + ec_point_t Q; // x(Q) + ec_point_t PQ; // x(P-Q) = (PQX/PQZ : 1) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_params_t; + +// For two bases and store: +// x(P - R), x(P - S), x(R - Q), x(S - Q) +typedef struct pairing_dlog_diff_points +{ + ec_point_t PmR; // x(P - R) + ec_point_t PmS; // x(P - S) + ec_point_t RmQ; // x(R - Q) + ec_point_t SmQ; // x(S - Q) +} pairing_dlog_diff_points_t; + +typedef struct pairing_dlog_params +{ + uint32_t e; // Points have order 2^e + ec_basis_t PQ; // x(P), x(Q), x(P-Q) + ec_basis_t RS; // x(R), x(S), x(R-S) + pairing_dlog_diff_points_t diff; // x(P - R), x(P - S), x(R - Q), x(S - Q) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + fp2_t ixR; // RZ/RX + fp2_t ixS; // SZ/SX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_dlog_params_t; + +// Computes e = e_{2^e}(P, Q) using biextension ladder +void weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Computes (reduced) z = t_{2^e}(P, Q) using biextension ladder +void reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Given two bases and computes scalars +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +// Given two bases and +// where is a basis for E[2^f] +// the full 2-torsion, and a basis +// for smaller torsion E[2^e] +// computes scalars r1, r2, s1, s2 +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +void ec_dlog_2_tate_to_full(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + ec_basis_t *RS, + ec_curve_t *curve, + int e); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c new file mode 100644 index 0000000000..d393e9cb11 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include + +void +public_key_init(public_key_t *pk) +{ + ec_curve_init(&pk->curve); +} + +void +public_key_finalize(public_key_t *pk) +{ +} + +// compute the challenge as the hash of the message and the commitment curve and public key +void +hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length) +{ + unsigned char buf[2 * FP2_ENCODED_BYTES]; + { + fp2_t j1, j2; + ec_j_inv(&j1, &pk->curve); + ec_j_inv(&j2, com_curve); + fp2_encode(buf, &j1); + fp2_encode(buf + FP2_ENCODED_BYTES, &j2); + } + + { + // The type scalar_t represents an element of GF(p), which is about + // 2*lambda bits, where lambda = 128, 192 or 256, according to the + // security level. Thus, the variable scalar should have enough memory + // for the values produced by SHAKE256 in the intermediate iterations. + + shake256incctx ctx; + + size_t hash_bytes = ((2 * SECURITY_BITS) + 7) / 8; + size_t limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + size_t bits = (2 * SECURITY_BITS) % RADIX; + digit_t mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, buf, 2 * FP2_ENCODED_BYTES); + shake256_inc_absorb(&ctx, message, length); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + for (int i = 2; i < HASH_ITERATIONS; i++) { + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + } + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + + hash_bytes = ((TORSION_EVEN_POWER - SQIsign_response_length) + 7) / 8; + limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + bits = (TORSION_EVEN_POWER - SQIsign_response_length) % RADIX; + mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + +#ifdef TARGET_BIG_ENDIAN + for (int i = 0; i < NWORDS_ORDER; i++) + (*scalar)[i] = BSWAP_DIGIT((*scalar)[i]); +#endif + + mp_mod_2exp(*scalar, SECURITY_BITS, NWORDS_ORDER); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c new file mode 100644 index 0000000000..983ba49adf --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c @@ -0,0 +1,201 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/*************************************************************************** + * Small modification by Nir Drucker and Shay Gueron + * AWS Cryptographic Algorithms Group + * (ndrucker@amazon.com, gueron@amazon.com) + * include: + * 1) Use memcpy/memset instead of OPENSSL_memcpy/memset + * 2) Include aes.h as the underlying aes code + * 3) Modifying the drbg structure + * ***************************************************************************/ + +#include "ctr_drbg.h" +#include + + +// Section references in this file refer to SP 800-90Ar1: +// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf + +int CTR_DRBG_init(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *personalization, size_t personalization_len) { + // Section 10.2.1.3.1 + if (personalization_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + uint8_t seed_material[CTR_DRBG_ENTROPY_LEN]; + memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN); + + for (size_t i = 0; i < personalization_len; i++) { + seed_material[i] ^= personalization[i]; + } + + // Section 10.2.1.2 + // kInitMask is the result of encrypting blocks with big-endian value 1, 2 + // and 3 with the all-zero AES-256 key. + static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { + 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, + 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, + 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca, + 0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e, + }; + + for (size_t i = 0; i < sizeof(kInitMask); i++) { + seed_material[i] ^= kInitMask[i]; + } + + aes256_key_t key; + memcpy(key.raw, seed_material, 32); + memcpy(drbg->counter.bytes, seed_material + 32, 16); + + aes256_key_expansion(&drbg->ks, &key); + drbg->reseed_counter = 1; + + return 1; +} + +// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a +// big-endian number. +static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { + drbg->counter.words[3] = + CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n); +} + +static int ctr_drbg_update(CTR_DRBG_STATE *drbg, const uint8_t *data, + size_t data_len) { + // Per section 10.2.1.2, |data_len| must be |CTR_DRBG_ENTROPY_LEN|. Here, we + // allow shorter inputs and right-pad them with zeros. This is equivalent to + // the specified algorithm but saves a copy in |CTR_DRBG_generate|. + if (data_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + uint8_t temp[CTR_DRBG_ENTROPY_LEN]; + for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) { + ctr32_add(drbg, 1); + aes256_enc(temp + i, drbg->counter.bytes, &drbg->ks); + } + + for (size_t i = 0; i < data_len; i++) { + temp[i] ^= data[i]; + } + + aes256_key_t key; + memcpy(key.raw, temp, 32); + memcpy(drbg->counter.bytes, temp + 32, 16); + aes256_key_expansion(&drbg->ks, &key); + + return 1; +} + +int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *additional_data, + size_t additional_data_len) { + // Section 10.2.1.4 + uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; + + if (additional_data_len > 0) { + if (additional_data_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN); + for (size_t i = 0; i < additional_data_len; i++) { + entropy_copy[i] ^= additional_data[i]; + } + + entropy = entropy_copy; + } + + if (!ctr_drbg_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) { + return 0; + } + + drbg->reseed_counter = 1; + + return 1; +} + +int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, + const uint8_t *additional_data, + size_t additional_data_len) { + if (additional_data_len != 0 && + !ctr_drbg_update(drbg, additional_data, additional_data_len)) { + return 0; + } + + // kChunkSize is used to interact better with the cache. Since the AES-CTR + // code assumes that it's encrypting rather than just writing keystream, the + // buffer has to be zeroed first. Without chunking, large reads would zero + // the whole buffer, flushing the L1 cache, and then do another pass (missing + // the cache every time) to “encrypt” it. The code can avoid this by + // chunking. + static const size_t kChunkSize = 8 * 1024; + + while (out_len >= AES_BLOCK_SIZE) { + size_t todo = kChunkSize; + if (todo > out_len) { + todo = out_len; + } + + todo &= ~(AES_BLOCK_SIZE - 1); + + const size_t num_blocks = todo / AES_BLOCK_SIZE; + if (1) { + memset(out, 0, todo); + ctr32_add(drbg, 1); +#ifdef VAES512 + aes256_ctr_enc512(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#elif defined(VAES256) + aes256_ctr_enc256(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#else + aes256_ctr_enc(out, drbg->counter.bytes, num_blocks, &drbg->ks); +#endif + ctr32_add(drbg, num_blocks - 1); + } else { + for (size_t i = 0; i < todo; i += AES_BLOCK_SIZE) { + ctr32_add(drbg, 1); + aes256_enc(&out[i], drbg->counter.bytes, &drbg->ks); + } + } + + out += todo; + out_len -= todo; + } + + if (out_len > 0) { + uint8_t block[AES_BLOCK_SIZE]; + ctr32_add(drbg, 1); + aes256_enc(block, drbg->counter.bytes, &drbg->ks); + + memcpy(out, block, out_len); + } + + // Right-padding |additional_data| in step 2.2 is handled implicitly by + // |ctr_drbg_update|, to save a copy. + if (!ctr_drbg_update(drbg, additional_data, additional_data_len)) { + return 0; + } + + drbg->reseed_counter++; + return 1; +} + +void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) { + secure_clean((uint8_t *)drbg, sizeof(CTR_DRBG_STATE)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.h new file mode 100644 index 0000000000..2d1b1f3f0c --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/*************************************************************************** +* Small modification by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* include: +* 1) Use memcpy/memset instead of OPENSSL_memcpy/memset +* 2) Include aes.h as the underlying aes code +* 3) Modifying the drbg structure +* ***************************************************************************/ + +#pragma once + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "aes_ni.h" + +// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP +// 800-90Ar1. +typedef struct { + aes256_ks_t ks; + union { + uint8_t bytes[16]; + uint32_t words[4]; + } counter; + uint64_t reseed_counter; +} CTR_DRBG_STATE; + +// See SP 800-90Ar1, table 3. +#define CTR_DRBG_ENTROPY_LEN 48 + +// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of +// entropy in |entropy| and, optionally, a personalization string up to +// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero +// on error. +int CTR_DRBG_init(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *personalization, + size_t personalization_len); + +// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy +// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of +// additional data. It returns one on success or zero on error. +int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *additional_data, + size_t additional_data_len); + +// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional +// data (if any) and then writes |out_len| random bytes to |out|. It returns one on success or +// zero on error. +int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, + size_t out_len, + const uint8_t *additional_data, + size_t additional_data_len); + +// CTR_DRBG_clear zeroises the state of |drbg|. +void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); + + +#if defined(__cplusplus) +} // extern C +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/defs.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/defs.h new file mode 100644 index 0000000000..09bb8b5eba --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/defs.h @@ -0,0 +1,63 @@ +/*************************************************************************** +* Written by Nir Drucker and Shay Gueron +* AWS Cryptographic Algorithms Group +* (ndrucker@amazon.com, gueron@amazon.com) +* +* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"). +* You may not use this file except in compliance with the License. +* A copy of the License is located at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* or in the "license" file accompanying this file. This file is distributed +* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +* express or implied. See the License for the specific language governing +* permissions and limitations under the License. +* The license is detailed in the file LICENSE.txt, and applies to this file. +* ***************************************************************************/ + +#pragma once + +#include + +#ifdef __cplusplus + #define EXTERNC extern "C" +#else + #define EXTERNC +#endif + +// For code clarity. +#define IN +#define OUT + +#define ALIGN(n) __attribute__((aligned(n))) +#define _INLINE_ static inline + +typedef enum +{ + SUCCESS=0, + ERROR=1 +} status_t; + +#define SUCCESS 0 +#define ERROR 1 +#define GUARD(func) {if(SUCCESS != func) {return ERROR;}} + +#if defined(__GNUC__) && __GNUC__ >= 2 +static inline uint32_t CRYPTO_bswap4(uint32_t x) { + return __builtin_bswap32(x); +} +#endif + +_INLINE_ void secure_clean(OUT uint8_t *p, IN const uint32_t len) +{ +#ifdef _WIN32 + SecureZeroMemory(p, len); +#else + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(p, 0, len); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c new file mode 100644 index 0000000000..171473d481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c @@ -0,0 +1,1172 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +_fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + + // var declaration + int ret; + ibz_t two_pow, tmp; + quat_alg_elem_t theta; + + ec_curve_t E0; + copy_curve(&E0, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].curve); + ec_curve_normalize_A24(&E0); + + unsigned length; + + int u_bitsize = ibz_bitsize(u); + + // deciding the power of 2 of the dim2 isogeny we use for this + // the smaller the faster, but if it set too low there is a risk that + // RepresentInteger will fail + if (!small) { + // in that case, we just set it to be the biggest value possible + length = TORSION_EVEN_POWER - HD_extra_torsion; + } else { + length = ibz_bitsize(&QUATALG_PINFTY.p) + QUAT_repres_bound_input - u_bitsize; + assert(u_bitsize < (int)length); + assert(length < TORSION_EVEN_POWER - HD_extra_torsion); + } + assert(length); + + // var init + ibz_init(&two_pow); + ibz_init(&tmp); + quat_alg_elem_init(&theta); + + ibz_pow(&two_pow, &ibz_const_two, length); + ibz_copy(&tmp, u); + assert(ibz_cmp(&two_pow, &tmp) > 0); + assert(!ibz_is_even(&tmp)); + + // computing the endomorphism theta of norm u * (2^(length) - u) + ibz_sub(&tmp, &two_pow, &tmp); + ibz_mul(&tmp, &tmp, u); + assert(!ibz_is_even(&tmp)); + + // setting-up the quat_represent_integer_params + quat_represent_integer_params_t ri_params; + ri_params.primality_test_iterations = QUAT_represent_integer_params.primality_test_iterations; + + quat_p_extremal_maximal_order_t order_hnf; + quat_alg_elem_init(&order_hnf.z); + quat_alg_elem_copy(&order_hnf.z, &EXTREMAL_ORDERS[index_alternate_order].z); + quat_alg_elem_init(&order_hnf.t); + quat_alg_elem_copy(&order_hnf.t, &EXTREMAL_ORDERS[index_alternate_order].t); + quat_lattice_init(&order_hnf.order); + ibz_copy(&order_hnf.order.denom, &EXTREMAL_ORDERS[index_alternate_order].order.denom); + ibz_mat_4x4_copy(&order_hnf.order.basis, &EXTREMAL_ORDERS[index_alternate_order].order.basis); + order_hnf.q = EXTREMAL_ORDERS[index_alternate_order].q; + ri_params.order = &order_hnf; + ri_params.algebra = &QUATALG_PINFTY; + +#ifndef NDEBUG + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->z)); + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->t)); +#endif + + ret = quat_represent_integer(&theta, &tmp, 1, &ri_params); + + assert(!ibz_is_even(&tmp)); + + if (!ret) { + printf("represent integer failed for the alternate order number %d and for " + "a target of " + "size %d for a u of size %d with length = " + "%u \n", + index_alternate_order, + ibz_bitsize(&tmp), + ibz_bitsize(u), + length); + goto cleanup; + } + quat_lideal_create(lideal, &theta, u, &order_hnf.order, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&order_hnf.z); + quat_alg_elem_finalize(&order_hnf.t); + quat_lattice_finalize(&order_hnf.order); + +#ifndef NDEBUG + ibz_t test_norm, test_denom; + ibz_init(&test_denom); + ibz_init(&test_norm); + quat_alg_norm(&test_norm, &test_denom, &theta, &QUATALG_PINFTY); + assert(ibz_is_one(&test_denom)); + assert(ibz_cmp(&test_norm, &tmp) == 0); + assert(!ibz_is_even(&tmp)); + assert(quat_lattice_contains(NULL, &EXTREMAL_ORDERS[index_alternate_order].order, &theta)); + ibz_finalize(&test_norm); + ibz_finalize(&test_denom); +#endif + + ec_basis_t B0_two; + // copying the basis + copy_basis(&B0_two, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].basis_even); + assert(test_basis_order_twof(&B0_two, &E0, TORSION_EVEN_POWER)); + ec_dbl_iter_basis(&B0_two, TORSION_EVEN_POWER - length - HD_extra_torsion, &B0_two, &E0); + + assert(test_basis_order_twof(&B0_two, &E0, length + HD_extra_torsion)); + + // now we set-up the kernel + theta_couple_point_t T1; + theta_couple_point_t T2, T1m2; + + copy_point(&T1.P1, &B0_two.P); + copy_point(&T2.P1, &B0_two.Q); + copy_point(&T1m2.P1, &B0_two.PmQ); + + // multiplication of theta by (u)^-1 mod 2^(length+2) + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_copy(&tmp, u); + ibz_invmod(&tmp, &tmp, &two_pow); + assert(!ibz_is_even(&tmp)); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta to the basis + ec_basis_t B0_two_theta; + copy_basis(&B0_two_theta, &B0_two); + endomorphism_application_even_basis(&B0_two_theta, index_alternate_order, &E0, &theta, length + HD_extra_torsion); + + // Ensure the basis we're using has the expected order + assert(test_basis_order_twof(&B0_two_theta, &E0, length + HD_extra_torsion)); + + // Set-up the domain E0 x E0 + theta_couple_curve_t E00; + E00.E1 = E0; + E00.E2 = E0; + + // Set-up the kernel from the bases + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &B0_two, &B0_two_theta); + + ret = theta_chain_compute_and_eval(length, &E00, &dim_two_ker, true, E34, P12, numP); + if (!ret) + goto cleanup; + + assert(length); + ret = (int)length; + +cleanup: + // var finalize + ibz_finalize(&two_pow); + ibz_finalize(&tmp); + quat_alg_elem_finalize(&theta); + + return ret; +} + +int +fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + return _fixed_degree_isogeny_impl(lideal, u, small, E34, P12, numP, index_alternate_order); +} + +// takes the output of LLL and apply some small treatment on the basis +// reordering vectors and switching some signs if needed to make it in a nicer +// shape +static void +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +{ + // if the left order is the special one, then we apply some additional post + // treatment + if (is_special_order) { + // reordering the basis if needed + if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + } + ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); + ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); + ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); + ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + // in this case it seems that we need to swap the second and third + // element, and then recompute entirely the second element from the first + // first we swap the second and third element + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } + + // adjusting the sign if needed + if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); + ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); + ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + } + } + if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); + ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); + ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + } + // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + } + } +} + +// enumerate all vectors in an hypercube of norm m for the infinity norm +// with respect to a basis whose gram matrix is given by gram +// Returns an int `count`, the number of vectors found with the desired +// properties +static int +enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t *gram, const ibz_t *adjusted_norm) +{ + + ibz_t remain, norm; + ibz_vec_4_t point; + + ibz_init(&remain); + ibz_init(&norm); + ibz_vec_4_init(&point); + + assert(m > 0); + + int count = 0; + int dim = 2 * m + 1; + int dim2 = dim * dim; + int dim3 = dim2 * dim; + + // if the basis is of the form alpha, i*alpha, beta, i*beta + // we can remove some values due to symmetry of the basis that + bool need_remove_symmetry = + (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + + int check1, check2, check3; + + // Enumerate over points in a hypercube with coordinates (x, y, z, w) + for (int x = -m; x <= 0; x++) { // We only check non-positive x-values + for (int y = -m; y < m + 1; y++) { + // Once x = 0 we only consider non-positive y values + if (x == 0 && y > 0) { + break; + } + for (int z = -m; z < m + 1; z++) { + // If x and y are both zero, we only consider non-positive z values + if (x == 0 && y == 0 && z > 0) { + break; + } + for (int w = -m; w < m + 1; w++) { + // If x, y, z are all zero, we only consider negative w values + if (x == 0 && y == 0 && z == 0 && w >= 0) { + break; + } + + // Now for each candidate (x, y, z, w) we need to check a number of + // conditions We have already filtered for symmetry with several break + // statements, but there are more checks. + + // 1. We do not allow all (x, y, z, w) to be multiples of 2 + // 2. We do not allow all (x, y, z, w) to be multiples of 3 + // 3. We do not want elements of the same norm, so we quotient out the + // action + // of a group of order four generated by i for a basis expected to + // be of the form: [gamma, i gamma, beta, i beta ]. + + // Ensure that not all values are even + if (!((x | y | z | w) & 1)) { + continue; + } + // Ensure that not all values are multiples of three + if (x % 3 == 0 && y % 3 == 0 && z % 3 == 0 && w % 3 == 0) { + continue; + } + + check1 = (m + w) + dim * (m + z) + dim2 * (m + y) + dim3 * (m + x); + check2 = (m - z) + dim * (m + w) + dim2 * (m - x) + dim3 * (m + y); + check3 = (m + z) + dim * (m - w) + dim2 * (m + x) + dim3 * (m - y); + + // either the basis does not have symmetry and we are good, + // or there is a special symmetry that we can exploit + // and we ensure that we don't record the same norm in the list + if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { + // Set the point as a vector (x, y, z, w) + ibz_set(&point[0], x); + ibz_set(&point[1], y); + ibz_set(&point[2], z); + ibz_set(&point[3], w); + + // Evaluate this through the gram matrix and divide out by the + // adjusted_norm + quat_qf_eval(&norm, gram, &point); + ibz_div(&norm, &remain, &norm, adjusted_norm); + assert(ibz_is_zero(&remain)); + + if (ibz_mod_ui(&norm, 2) == 1) { + ibz_set(&vecs[count][0], x); + ibz_set(&vecs[count][1], y); + ibz_set(&vecs[count][2], z); + ibz_set(&vecs[count][3], w); + ibz_copy(&norms[count], &norm); + count++; + } + } + } + } + } + } + + ibz_finalize(&remain); + ibz_finalize(&norm); + ibz_vec_4_finalize(&point); + + return count - 1; +} + +// enumerate through the two list given in input to find to integer d1,d2 such +// that there exists u,v with u d1 + v d2 = target the bool is diagonal +// indicates if the two lists are the same +static int +find_uv_from_lists(ibz_t *au, + ibz_t *bu, + ibz_t *av, + ibz_t *bv, + ibz_t *u, + ibz_t *v, + int *index_sol1, + int *index_sol2, + const ibz_t *target, + const ibz_t *small_norms1, + const ibz_t *small_norms2, + const ibz_t *quotients, + const int index1, + const int index2, + const int is_diagonal, + const int number_sum_square) +{ + + ibz_t n, remain, adjusted_norm; + ibz_init(&n); + ibz_init(&remain); + ibz_init(&adjusted_norm); + + int found = 0; + int cmp; + ibz_copy(&n, target); + + // enumerating through the list + for (int i1 = 0; i1 < index1; i1++) { + ibz_mod(&adjusted_norm, &n, &small_norms1[i1]); + int starting_index2; + if (is_diagonal) { + starting_index2 = i1; + } else { + starting_index2 = 0; + } + for (int i2 = starting_index2; i2 < index2; i2++) { + // u = target / d1 mod d2 + if (!ibz_invmod(&remain, &small_norms2[i2], &small_norms1[i1])) { + continue; + } + ibz_mul(v, &remain, &adjusted_norm); + ibz_mod(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + while (!found && cmp < 0) { + if (number_sum_square > 0) { + found = ibz_cornacchia_prime(av, bv, &ibz_const_one, v); + } else if (number_sum_square == 0) { + found = 1; + } + if (found) { + ibz_mul(&remain, v, &small_norms2[i2]); + ibz_copy(au, &n); + ibz_sub(u, au, &remain); + assert(ibz_cmp(u, &ibz_const_zero) > 0); + ibz_div(u, &remain, u, &small_norms1[i1]); + assert(ibz_is_zero(&remain)); + // we want to remove weird cases where u,v have big power of two + found = found && (ibz_get(u) != 0 && ibz_get(v) != 0); + if (number_sum_square == 2) { + found = ibz_cornacchia_prime(au, bu, &ibz_const_one, u); + } + } + if (!found) { + ibz_add(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + } + } + + if (found) { + // copying the indices + *index_sol1 = i1; + *index_sol2 = i2; + break; + } + } + if (found) { + break; + } + } + + ibz_finalize(&n); + ibz_finalize(&remain); + ibz_finalize(&adjusted_norm); + + return found; +} + +struct vec_and_norm +{ + ibz_vec_4_t vec; + ibz_t norm; + int idx; +}; + +static int +compare_vec_by_norm(const void *_first, const void *_second) +{ + const struct vec_and_norm *first = _first, *second = _second; + int res = ibz_cmp(&first->norm, &second->norm); + if (res != 0) + return res; + else + return first->idx - second->idx; +} + +// use several special curves +// we assume that the first one is always j=1728 +int +find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order) + +{ + + // variable declaration & init + ibz_vec_4_t vec; + ibz_t n; + ibz_t au, bu, av, bv; + ibz_t norm_d; + ibz_t remain; + ibz_init(&au); + ibz_init(&bu); + ibz_init(&av); + ibz_init(&bv); + ibz_init(&norm_d); + ibz_init(&n); + ibz_vec_4_init(&vec); + ibz_init(&remain); + + ibz_copy(&n, target); + + ibz_t adjusted_norm[num_alternate_order + 1]; + ibz_mat_4x4_t gram[num_alternate_order + 1], reduced[num_alternate_order + 1]; + quat_left_ideal_t ideal[num_alternate_order + 1]; + + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_init(&adjusted_norm[i]); + ibz_mat_4x4_init(&gram[i]); + ibz_mat_4x4_init(&reduced[i]); + quat_left_ideal_init(&ideal[i]); + } + + // first we reduce the ideal given in input + quat_lideal_copy(&ideal[0], lideal); + quat_lideal_reduce_basis(&reduced[0], &gram[0], &ideal[0], Bpoo); + + ibz_mat_4x4_copy(&ideal[0].lattice.basis, &reduced[0]); + ibz_set(&adjusted_norm[0], 1); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + + // for efficient lattice reduction, we replace ideal[0] by the equivalent + // ideal of smallest norm + quat_left_ideal_t reduced_id; + quat_left_ideal_init(&reduced_id); + quat_lideal_copy(&reduced_id, &ideal[0]); + quat_alg_elem_t delta; + // delta will be the element of smallest norm + quat_alg_elem_init(&delta); + ibz_set(&delta.coord[0], 1); + ibz_set(&delta.coord[1], 0); + ibz_set(&delta.coord[2], 0); + ibz_set(&delta.coord[3], 0); + ibz_copy(&delta.denom, &reduced_id.lattice.denom); + ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); + assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); + + // reduced_id = ideal[0] * \overline{delta}/n(ideal[0]) + quat_alg_conj(&delta, &delta); + ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); + quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); + ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + + // and conj_ideal is the conjugate of reduced_id + // init the right order; + quat_lattice_t right_order; + quat_lattice_init(&right_order); + // computing the conjugate + quat_left_ideal_t conj_ideal; + quat_left_ideal_init(&conj_ideal); + quat_lideal_conjugate_without_hnf(&conj_ideal, &right_order, &reduced_id, Bpoo); + + // computing all the other connecting ideals and reducing them + for (int i = 1; i < num_alternate_order + 1; i++) { + quat_lideal_lideal_mul_reduced(&ideal[i], &gram[i], &conj_ideal, &ALTERNATE_CONNECTING_IDEALS[i - 1], Bpoo); + ibz_mat_4x4_copy(&reduced[i], &ideal[i].lattice.basis); + ibz_set(&adjusted_norm[i], 1); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + } + + // enumerating small vectors + + // global parameters for the enumeration + int m = FINDUV_box_size; + int m4 = FINDUV_cube_size; + + ibz_vec_4_t small_vecs[num_alternate_order + 1][m4]; + ibz_t small_norms[num_alternate_order + 1][m4]; + ibz_vec_4_t alternate_small_vecs[num_alternate_order + 1][m4]; + ibz_t alternate_small_norms[num_alternate_order + 1][m4]; + ibz_t quotients[num_alternate_order + 1][m4]; + int indices[num_alternate_order + 1]; + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_init(&small_norms[j][i]); + ibz_vec_4_init(&small_vecs[j][i]); + ibz_init(&alternate_small_norms[j][i]); + ibz_init("ients[j][i]); + ibz_vec_4_init(&alternate_small_vecs[j][i]); + } + // enumeration in the hypercube of norm m + indices[j] = enumerate_hypercube(small_vecs[j], small_norms[j], m, &gram[j], &adjusted_norm[j]); + + // sorting the list + { + struct vec_and_norm small_vecs_and_norms[indices[j]]; + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs_and_norms[i].vec, &small_vecs[j][i], sizeof(ibz_vec_4_t)); + memcpy(&small_vecs_and_norms[i].norm, &small_norms[j][i], sizeof(ibz_t)); + small_vecs_and_norms[i].idx = i; + } + qsort(small_vecs_and_norms, indices[j], sizeof(*small_vecs_and_norms), compare_vec_by_norm); + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs[j][i], &small_vecs_and_norms[i].vec, sizeof(ibz_vec_4_t)); + memcpy(&small_norms[j][i], &small_vecs_and_norms[i].norm, sizeof(ibz_t)); + } +#ifndef NDEBUG + for (int i = 1; i < indices[j]; ++i) + assert(ibz_cmp(&small_norms[j][i - 1], &small_norms[j][i]) <= 0); +#endif + } + + for (int i = 0; i < indices[j]; i++) { + ibz_div("ients[j][i], &remain, &n, &small_norms[j][i]); + } + } + + int found = 0; + int i1; + int i2; + for (int j1 = 0; j1 < num_alternate_order + 1; j1++) { + for (int j2 = j1; j2 < num_alternate_order + 1; j2++) { + // in this case, there are some small adjustements to make + int is_diago = (j1 == j2); + found = find_uv_from_lists(&au, + &bu, + &av, + &bv, + u, + v, + &i1, + &i2, + target, + small_norms[j1], + small_norms[j2], + quotients[j2], + indices[j1], + indices[j2], + is_diago, + 0); + // } + + if (found) { + // recording the solutions that we found + ibz_copy(&beta1->denom, &ideal[j1].lattice.denom); + ibz_copy(&beta2->denom, &ideal[j2].lattice.denom); + ibz_copy(d1, &small_norms[j1][i1]); + ibz_copy(d2, &small_norms[j2][i2]); + ibz_mat_4x4_eval(&beta1->coord, &reduced[j1], &small_vecs[j1][i1]); + ibz_mat_4x4_eval(&beta2->coord, &reduced[j2], &small_vecs[j2][i2]); + assert(quat_lattice_contains(NULL, &ideal[j1].lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal[j2].lattice, beta2)); + if (j1 != 0 || j2 != 0) { + ibz_div(&delta.denom, &remain, &delta.denom, &lideal->norm); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + ibz_mul(&delta.denom, &delta.denom, &conj_ideal.norm); + } + if (j1 != 0) { + // we send back beta1 to the original ideal + quat_alg_mul(beta1, &delta, beta1, Bpoo); + quat_alg_normalize(beta1); + } + if (j2 != 0) { + // we send back beta2 to the original ideal + quat_alg_mul(beta2, &delta, beta2, Bpoo); + quat_alg_normalize(beta2); + } + + // if the selected element belong to an alternate order, we conjugate it + if (j1 != 0) { + quat_alg_conj(beta1, beta1); + } + if (j2 != 0) { + quat_alg_conj(beta2, beta2); + } + +#ifndef NDEBUG + quat_alg_norm(&remain, &norm_d, beta1, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d1, &ideal->norm); + if (j1 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j1 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + quat_alg_norm(&remain, &norm_d, beta2, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d2, &ideal->norm); + if (j2 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j2 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta2)); + + quat_left_ideal_t ideal_test; + quat_lattice_t ro; + quat_left_ideal_init(&ideal_test); + quat_lattice_init(&ro); + if (j1 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j1 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta1)); + } + if (j2 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j2 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta2)); + } + + quat_lattice_finalize(&ro); + quat_left_ideal_finalize(&ideal_test); +#endif + + *index_alternate_order_1 = j1; + *index_alternate_order_2 = j2; + break; + } + } + if (found) { + break; + } + } + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_finalize(&small_norms[j][i]); + ibz_vec_4_finalize(&small_vecs[j][i]); + ibz_finalize(&alternate_small_norms[j][i]); + ibz_finalize("ients[j][i]); + ibz_vec_4_finalize(&alternate_small_vecs[j][i]); + } + } + + // var finalize + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_mat_4x4_finalize(&gram[i]); + ibz_mat_4x4_finalize(&reduced[i]); + quat_left_ideal_finalize(&ideal[i]); + ibz_finalize(&adjusted_norm[i]); + } + + ibz_finalize(&n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&au); + ibz_finalize(&bu); + ibz_finalize(&av); + ibz_finalize(&bv); + ibz_finalize(&remain); + ibz_finalize(&norm_d); + quat_lattice_finalize(&right_order); + quat_left_ideal_finalize(&conj_ideal); + quat_left_ideal_finalize(&reduced_id); + quat_alg_elem_finalize(&delta); + + return found; +} + +int +dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo) +{ + ibz_t target, tmp, two_pow; + ; + quat_alg_elem_t theta; + + ibz_t norm_d; + ibz_init(&norm_d); + ibz_t test1, test2; + ibz_init(&test1); + ibz_init(&test2); + + ibz_init(&target); + ibz_init(&tmp); + ibz_init(&two_pow); + int exp = TORSION_EVEN_POWER; + quat_alg_elem_init(&theta); + + // first, we find u,v,d1,d2,beta1,beta2 + // such that u*d1 + v*d2 = 2^TORSION_EVEN_POWER and there are ideals of + // norm d1,d2 equivalent to ideal beta1 and beta2 are elements of norm nd1, + // nd2 where n=n(lideal) + int ret; + int index_order1 = 0, index_order2 = 0; +#ifndef NDEBUG + unsigned int Fu_length, Fv_length; +#endif + ret = find_uv(u, + v, + beta1, + beta2, + d1, + d2, + &index_order1, + &index_order2, + &TORSION_PLUS_2POWER, + lideal, + Bpoo, + NUM_ALTERNATE_EXTREMAL_ORDERS); + if (!ret) { + goto cleanup; + } + + assert(ibz_is_odd(d1) && ibz_is_odd(d2)); + // compute the valuation of the GCD of u,v + ibz_gcd(&tmp, u, v); + assert(ibz_cmp(&tmp, &ibz_const_zero) != 0); + int exp_gcd = ibz_two_adic(&tmp); + exp = TORSION_EVEN_POWER - exp_gcd; + // removing the power of 2 from u and v + ibz_div(u, &test1, u, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + ibz_div(v, &test1, v, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + +#ifndef NDEBUG + // checking that ud1+vd2 = 2^exp + ibz_t pow_check, tmp_check; + ibz_init(&pow_check); + ibz_init(&tmp_check); + ibz_pow(&pow_check, &ibz_const_two, exp); + ibz_mul(&tmp_check, d1, u); + ibz_sub(&pow_check, &pow_check, &tmp_check); + ibz_mul(&tmp_check, v, d2); + ibz_sub(&pow_check, &pow_check, &tmp_check); + assert(ibz_cmp(&pow_check, &ibz_const_zero) == 0); + ibz_finalize(&tmp_check); + ibz_finalize(&pow_check); +#endif + + // now we compute the dimension 2 isogeny + // F : Eu x Ev -> E x E' + // where we have phi_u : Eu -> E_index_order1 and phi_v : Ev -> E_index_order2 + // if we have phi1 : E_index_order_1 -> E of degree d1 + // and phi2 : E_index_order_2 -> E of degree d2 + // we can define theta = phi2 o hat{phi1} + // and the kernel of F is given by + // ( [ud1](P), phiv o theta o hat{phiu} (P)),( [ud1](Q), phiv o theta o + // hat{phiu} (Q)) where P,Q is a basis of E0[2e] + + // now we set-up the kernel + // ec_curve_t E0 = CURVE_E0; + ec_curve_t E1; + copy_curve(&E1, &CURVES_WITH_ENDOMORPHISMS[index_order1].curve); + ec_curve_t E2; + copy_curve(&E2, &CURVES_WITH_ENDOMORPHISMS[index_order2].curve); + ec_basis_t bas1, bas2; + theta_couple_curve_t E01; + theta_kernel_couple_points_t ker; + + ec_basis_t bas_u; + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + + // we start by computing theta = beta2 \hat{beta1}/n + ibz_set(&theta.denom, 1); + quat_alg_conj(&theta, beta1); + quat_alg_mul(&theta, beta2, &theta, &QUATALG_PINFTY); + ibz_mul(&theta.denom, &theta.denom, &lideal->norm); + + // now we perform the actual computation + quat_left_ideal_t idealu, idealv; + quat_left_ideal_init(&idealu); + quat_left_ideal_init(&idealv); + theta_couple_curve_t Fu_codomain, Fv_codomain; + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const V1 = pushed_points + 0, *const V2 = pushed_points + 1, *const V1m2 = pushed_points + 2; + theta_couple_point_t P, Q, PmQ; + + copy_point(&P.P1, &bas1.P); + copy_point(&PmQ.P1, &bas1.PmQ); + copy_point(&Q.P1, &bas1.Q); + // Set points to zero + ec_point_init(&P.P2); + ec_point_init(&Q.P2); + ec_point_init(&PmQ.P2); + + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + // we perform the computation of phiu with a fixed degree isogeny + ret = fixed_degree_isogeny_and_eval( + &idealu, u, true, &Fu_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order1); + + if (!ret) { + goto cleanup; + } + assert(test_point_order_twof(&V1->P1, &Fu_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fu_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fu_length = (unsigned int)ret; + // presumably the correct curve is the first one, we check this + fp2_t w0a, w1a, w2a; + ec_curve_t E1_tmp, Fu_codomain_E1_tmp, Fu_codomain_E2_tmp; + copy_curve(&E1_tmp, &E1); + copy_curve(&Fu_codomain_E1_tmp, &Fu_codomain.E1); + copy_curve(&Fu_codomain_E2_tmp, &Fu_codomain.E2); + weil(&w0a, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fu_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fu_codomain_E2_tmp); + ibz_pow(&two_pow, &ibz_const_two, Fu_length); + ibz_sub(&two_pow, &two_pow, u); + + // now we are checking that the weil pairings are equal to the correct value + digit_t digit_u[NWORDS_ORDER] = { 0 }; + ibz_to_digit_array(digit_u, u); + fp2_t test_powa; + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); +#endif + + // copying the basis images + copy_point(&bas_u.P, &V1->P1); + copy_point(&bas_u.Q, &V2->P1); + copy_point(&bas_u.PmQ, &V1m2->P1); + + // copying the points to the first part of the kernel + copy_point(&ker.T1.P1, &bas_u.P); + copy_point(&ker.T2.P1, &bas_u.Q); + copy_point(&ker.T1m2.P1, &bas_u.PmQ); + copy_curve(&E01.E1, &Fu_codomain.E1); + + copy_point(&P.P1, &bas2.P); + copy_point(&PmQ.P1, &bas2.PmQ); + copy_point(&Q.P1, &bas2.Q); + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + + // computation of phiv + ret = fixed_degree_isogeny_and_eval( + &idealv, v, true, &Fv_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order2); + if (!ret) { + goto cleanup; + } + + assert(test_point_order_twof(&V1->P1, &Fv_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fv_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fv_length = (unsigned int)ret; + ec_curve_t E2_tmp, Fv_codomain_E1_tmp, Fv_codomain_E2_tmp; + copy_curve(&E2_tmp, &E2); + copy_curve(&Fv_codomain_E1_tmp, &Fv_codomain.E1); + copy_curve(&Fv_codomain_E2_tmp, &Fv_codomain.E2); + // presumably the correct curve is the first one, we check this + weil(&w0a, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fv_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fv_codomain_E2_tmp); + if (Fv_length == 0) { + ibz_set(&tmp, 1); + ibz_set(&two_pow, 1); + } else { + ibz_pow(&two_pow, &ibz_const_two, Fv_length); + ibz_sub(&two_pow, &two_pow, v); + } + + // now we are checking that one of the two is equal to the correct value + ibz_to_digit_array(digit_u, v); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); + +#endif + + copy_point(&bas2.P, &V1->P1); + copy_point(&bas2.Q, &V2->P1); + copy_point(&bas2.PmQ, &V1m2->P1); + + // multiplying theta by 1 / (d1 * n(connecting_ideal2)) + ibz_pow(&two_pow, &ibz_const_two, TORSION_EVEN_POWER); + ibz_copy(&tmp, d1); + if (index_order2 > 0) { + ibz_mul(&tmp, &tmp, &ALTERNATE_CONNECTING_IDEALS[index_order2 - 1].norm); + } + ibz_invmod(&tmp, &tmp, &two_pow); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta + endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); + + assert(test_basis_order_twof(&bas2, &Fv_codomain.E1, TORSION_EVEN_POWER)); + + // copying points to the second part of the kernel + copy_point(&ker.T1.P2, &bas2.P); + copy_point(&ker.T2.P2, &bas2.Q); + copy_point(&ker.T1m2.P2, &bas2.PmQ); + copy_curve(&E01.E2, &Fv_codomain.E1); + + // copying the points to the first part of the kernel + quat_left_ideal_finalize(&idealu); + quat_left_ideal_finalize(&idealv); + + double_couple_point_iter(&ker.T1, TORSION_EVEN_POWER - exp, &ker.T1, &E01); + double_couple_point_iter(&ker.T2, TORSION_EVEN_POWER - exp, &ker.T2, &E01); + double_couple_point_iter(&ker.T1m2, TORSION_EVEN_POWER - exp, &ker.T1m2, &E01); + + assert(test_point_order_twof(&ker.T1.P1, &E01.E1, exp)); + assert(test_point_order_twof(&ker.T1m2.P2, &E01.E2, exp)); + + assert(ibz_is_odd(u)); + + // now we evaluate the basis points through the isogeny + assert(test_basis_order_twof(&bas_u, &E01.E1, TORSION_EVEN_POWER)); + + // evaluating the basis through the isogeny of degree u*d1 + copy_point(&pushed_points[0].P1, &bas_u.P); + copy_point(&pushed_points[2].P1, &bas_u.PmQ); + copy_point(&pushed_points[1].P1, &bas_u.Q); + // Set points to zero + ec_point_init(&pushed_points[0].P2); + ec_point_init(&pushed_points[1].P2); + ec_point_init(&pushed_points[2].P2); + + theta_couple_curve_t theta_codomain; + + ret = theta_chain_compute_and_eval_randomized( + exp, &E01, &ker, false, &theta_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points)); + if (!ret) { + goto cleanup; + } + + theta_couple_point_t T1, T2, T1m2; + T1 = pushed_points[0]; + T2 = pushed_points[1]; + T1m2 = pushed_points[2]; + + assert(test_point_order_twof(&T1.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1.P1, &theta_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1m2.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + + copy_point(&basis->P, &T1.P1); + copy_point(&basis->Q, &T2.P1); + copy_point(&basis->PmQ, &T1m2.P1); + copy_curve(codomain, &theta_codomain.E1); + + // using weil pairing to verify that we selected the correct curve + fp2_t w0, w1; + // ec_curve_t E0 = CURVE_E0; + // ec_basis_t bas0 = BASIS_EVEN; + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, codomain); + + digit_t digit_d[NWORDS_ORDER] = { 0 }; + ibz_mul(&tmp, d1, u); + ibz_mul(&tmp, &tmp, u); + ibz_mod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_to_digit_array(digit_d, &tmp); + fp2_t test_pow; + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + + // then we have selected the wrong one + if (!fp2_is_equal(&w1, &test_pow)) { + copy_point(&basis->P, &T1.P2); + copy_point(&basis->Q, &T2.P2); + copy_point(&basis->PmQ, &T1m2.P2); + copy_curve(codomain, &theta_codomain.E2); + +// verifying that the other one is the good one +#ifndef NDEBUG + ec_curve_t codomain_tmp; + copy_curve(&codomain_tmp, codomain); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1)); +#endif + } + + // now we apply M / (u * d1) where M is the matrix corresponding to the + // endomorphism beta1 = phi o dual(phi1) we multiply beta1 by the inverse of + // (u*d1) mod 2^TORSION_EVEN_POWER + ibz_mul(&tmp, u, d1); + if (index_order1 != 0) { + ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); + } + ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); + ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); + ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); + ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + + endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + ec_curve_t E0 = CURVE_E0; + ec_curve_t codomain_tmp; + ec_basis_t bas0 = CURVES_WITH_ENDOMORPHISMS[0].basis_even; + copy_curve(&codomain_tmp, codomain); + copy_curve(&E1_tmp, &E1); + copy_curve(&E2_tmp, &E2); + weil(&w0a, TORSION_EVEN_POWER, &bas0.P, &bas0.Q, &bas0.PmQ, &E0); + weil(&w1a, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + digit_t tmp_d[2 * NWORDS_ORDER] = { 0 }; + if (index_order1 != 0) { + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order1].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + if (index_order2 != 0) { + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order2].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + ibz_to_digit_array(tmp_d, &lideal->norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1a)); + } +#endif + +cleanup: + ibz_finalize(&norm_d); + ibz_finalize(&test1); + ibz_finalize(&test2); + ibz_finalize(&target); + ibz_finalize(&tmp); + ibz_finalize(&two_pow); + quat_alg_elem_finalize(&theta); + return ret; +} + +int +dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal) +{ + int ret; + + quat_alg_elem_t beta1, beta2; + ibz_t u, v, d1, d2; + + quat_alg_elem_init(&beta1); + quat_alg_elem_init(&beta2); + + ibz_init(&u); + ibz_init(&v); + ibz_init(&d1); + ibz_init(&d2); + + ret = dim2id2iso_ideal_to_isogeny_clapotis( + &beta1, &beta2, &u, &v, &d1, &d2, codomain, basis, lideal, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&beta1); + quat_alg_elem_finalize(&beta2); + + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&d1); + ibz_finalize(&d2); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.c new file mode 100644 index 0000000000..a7148e485b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.c @@ -0,0 +1,55 @@ +#include +const fp2_t BASIS_E0_PX = { +#if 0 +#elif RADIX == 16 +{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314} +#elif RADIX == 32 +{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6} +#else +{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7} +#elif RADIX == 32 +{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5} +#else +{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f} +#endif +#endif +}; +const fp2_t BASIS_E0_QX = { +#if 0 +#elif RADIX == 16 +{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe} +#elif RADIX == 32 +{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2} +#else +{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330} +#elif RADIX == 32 +{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5} +#else +{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85} +#endif +#endif +}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.h new file mode 100644 index 0000000000..05cafb8462 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.h @@ -0,0 +1,3 @@ +#include +extern const fp2_t BASIS_E0_PX; +extern const fp2_t BASIS_E0_QX; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.c new file mode 100644 index 0000000000..be4e4e55b1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.c @@ -0,0 +1,665 @@ +#include +#include +#include +#include + +void +ec_point_init(ec_point_t *P) +{ // Initialize point as identity element (1:0) + fp2_set_one(&(P->x)); + fp2_set_zero(&(P->z)); +} + +void +ec_curve_init(ec_curve_t *E) +{ // Initialize the curve struct + // Initialize the constants + fp2_set_zero(&(E->A)); + fp2_set_one(&(E->C)); + + // Initialize the point (A+2 : 4C) + ec_point_init(&(E->A24)); + + // Set the bool to be false by default + E->is_A24_computed_and_normalized = false; +} + +void +select_point(ec_point_t *Q, const ec_point_t *P1, const ec_point_t *P2, const digit_t option) +{ // Select points in constant time + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +cswap_points(ec_point_t *P, ec_point_t *Q, const digit_t option) +{ // Swap points in constant time + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then P <- Q and Q <- P + fp2_cswap(&(P->x), &(Q->x), option); + fp2_cswap(&(P->z), &(Q->z), option); +} + +void +ec_normalize_point(ec_point_t *P) +{ + fp2_inv(&P->z); + fp2_mul(&P->x, &P->x, &P->z); + fp2_set_one(&(P->z)); +} + +void +ec_normalize_curve(ec_curve_t *E) +{ + fp2_inv(&E->C); + fp2_mul(&E->A, &E->A, &E->C); + fp2_set_one(&E->C); +} + +void +ec_curve_normalize_A24(ec_curve_t *E) +{ + if (!E->is_A24_computed_and_normalized) { + AC_to_A24(&E->A24, E); + ec_normalize_point(&E->A24); + E->is_A24_computed_and_normalized = true; + } + assert(fp2_is_one(&E->A24.z)); +} + +void +ec_normalize_curve_and_A24(ec_curve_t *E) +{ // Neither the curve or A24 are guaranteed to be normalized. + // First we normalize (A/C : 1) and conditionally compute + if (!fp2_is_one(&E->C)) { + ec_normalize_curve(E); + } + + if (!E->is_A24_computed_and_normalized) { + // Now compute A24 = ((A + 2) / 4 : 1) + fp2_add_one(&E->A24.x, &E->A); // re(A24.x) = re(A) + 1 + fp2_add_one(&E->A24.x, &E->A24.x); // re(A24.x) = re(A) + 2 + fp_copy(&E->A24.x.im, &E->A.im); // im(A24.x) = im(A) + + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 2 + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 4 + fp2_set_one(&E->A24.z); + + E->is_A24_computed_and_normalized = true; + } +} + +uint32_t +ec_is_zero(const ec_point_t *P) +{ + return fp2_is_zero(&P->z); +} + +uint32_t +ec_has_zero_coordinate(const ec_point_t *P) +{ + return fp2_is_zero(&P->x) | fp2_is_zero(&P->z); +} + +uint32_t +ec_is_equal(const ec_point_t *P, const ec_point_t *Q) +{ // Evaluate if two points in Montgomery coordinates (X:Z) are equal + // Returns 0xFFFFFFFF (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1; + + // Check if P, Q are the points at infinity + uint32_t l_zero = ec_is_zero(P); + uint32_t r_zero = ec_is_zero(Q); + + // Check if PX * QZ = QX * PZ + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + uint32_t lr_equal = fp2_is_equal(&t0, &t1); + + // Points are equal if + // - Both are zero, or + // - neither are zero AND PX * QZ = QX * PZ + return (l_zero & r_zero) | (~l_zero & ~r_zero * lr_equal); +} + +uint32_t +ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + if (ec_is_zero(P)) + return 0; + + uint32_t x_is_zero, tmp_is_zero; + fp2_t t0, t1, t2; + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t0, &t1); + fp2_mul(&t2, &t2, &E->A); + fp2_mul(&t1, &t1, &E->C); + fp2_add(&t1, &t1, &t1); + fp2_add(&t0, &t1, &t2); // 4 (CX^2+CZ^2+AXZ) + + x_is_zero = fp2_is_zero(&P->x); + tmp_is_zero = fp2_is_zero(&t0); + + // two torsion if x or x^2 + Ax + 1 is zero + return x_is_zero | tmp_is_zero; +} + +uint32_t +ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + ec_point_t test; + xDBL_A24(&test, P, &E->A24, E->is_A24_computed_and_normalized); + return ec_is_two_torsion(&test, E); +} + +uint32_t +ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E) +{ // Check if basis points (P, Q) form a full 2^t-basis + ec_point_t P2, Q2; + xDBL_A24(&P2, &B->P, &E->A24, E->is_A24_computed_and_normalized); + xDBL_A24(&Q2, &B->Q, &E->A24, E->is_A24_computed_and_normalized); + return (ec_is_two_torsion(&P2, E) & ec_is_two_torsion(&Q2, E) & ~ec_is_equal(&P2, &Q2)); +} + +int +ec_curve_verify_A(const fp2_t *A) +{ // Verify the Montgomery coefficient A is valid (A^2-4 \ne 0) + // Return 1 if curve is valid, 0 otherwise + fp2_t t; + fp2_set_one(&t); + fp_add(&t.re, &t.re, &t.re); // t=2 + if (fp2_is_equal(A, &t)) + return 0; + fp_neg(&t.re, &t.re); // t=-2 + if (fp2_is_equal(A, &t)) + return 0; + return 1; +} + +int +ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A) +{ // Initialize the curve from the A coefficient and check it is valid + // Return 1 if curve is valid, 0 otherwise + ec_curve_init(E); + fp2_copy(&E->A, A); // Set A + return ec_curve_verify_A(A); +} + +void +ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve) +{ // j-invariant computation for Montgommery coefficient A2=(A+2C:4C) + fp2_t t0, t1; + + fp2_sqr(&t1, &curve->C); + fp2_sqr(j_inv, &curve->A); + fp2_add(&t0, &t1, &t1); + fp2_sub(&t0, j_inv, &t0); + fp2_sub(&t0, &t0, &t1); + fp2_sub(j_inv, &t0, &t1); + fp2_sqr(&t1, &t1); + fp2_mul(j_inv, j_inv, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_sqr(&t1, &t0); + fp2_mul(&t0, &t0, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_inv(j_inv); + fp2_mul(j_inv, &t0, j_inv); +} + +void +xDBL_E0(ec_point_t *Q, const ec_point_t *P) +{ // Doubling of a Montgomery point in projective coordinates (X:Z) on the curve E0 with (A:C) = (0:1). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C) = (0:1). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&Q->z, &t1, &t2); + fp2_mul(&Q->z, &Q->z, &t2); +} + +void +xDBL(ec_point_t *Q, const ec_point_t *P, const ec_point_t *AC) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). Computation of coefficient values A+2C and 4C + // on-the-fly. + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t3, &AC->z, &AC->z); + fp2_mul(&t1, &t1, &t3); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&t0, &t3, &AC->x); + fp2_mul(&t0, &t0, &t2); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and + // the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + if (!A24_normalized) + fp2_mul(&t1, &t1, &A24->z); + fp2_mul(&Q->x, &t0, &t1); + fp2_mul(&t0, &t2, &A24->x); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ) +{ // Differential addition of Montgomery points in projective coordinates (X:Z). + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, and difference + // PQ=P-Q=(XPQ:ZPQ). + // Output: projective Montgomery point R <- P+Q = (XR:ZR) such that x(P+Q)=XR/ZR. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&t2, &t2); + fp2_sqr(&t3, &t3); + fp2_mul(&t2, &PQ->z, &t2); + fp2_mul(&R->z, &PQ->x, &t3); + fp2_copy(&R->x, &t2); +} + +void +xDBLADD(ec_point_t *R, + ec_point_t *S, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_point_t *A24, + const bool A24_normalized) +{ // Simultaneous doubling and differential addition. + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, the difference + // PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points R <- 2*P = (XR:ZR) such that x(2P)=XR/ZR, and S <- P+Q = (XS:ZS) such that = + // x(Q+P)=XS/ZS. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&R->x, &t0); + fp2_sub(&t2, &Q->x, &Q->z); + fp2_add(&S->x, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t2); + fp2_sqr(&R->z, &t1); + fp2_mul(&t1, &t1, &S->x); + fp2_sub(&t2, &R->x, &R->z); + if (!A24_normalized) + fp2_mul(&R->z, &R->z, &A24->z); + fp2_mul(&R->x, &R->x, &R->z); + fp2_mul(&S->x, &A24->x, &t2); + fp2_sub(&S->z, &t0, &t1); + fp2_add(&R->z, &R->z, &S->x); + fp2_add(&S->x, &t0, &t1); + fp2_mul(&R->z, &R->z, &t2); + fp2_sqr(&S->z, &S->z); + fp2_sqr(&S->x, &S->x); + fp2_mul(&S->z, &S->z, &PQ->x); + fp2_mul(&S->x, &S->x, &PQ->z); +} + +void +xMUL(ec_point_t *Q, const ec_point_t *P, const digit_t *k, const int kbits, const ec_curve_t *curve) +{ // The Montgomery ladder + // Input: projective Montgomery point P=(XP:ZP) such that xP=XP/ZP, a scalar k of bitlength kbits, and + // the Montgomery curve constants (A:C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points Q <- k*P = (XQ:ZQ) such that x(k*P)=XQ/ZQ. + ec_point_t R0, R1, A24; + digit_t mask; + unsigned int bit, prevbit = 0, swap; + + if (!curve->is_A24_computed_and_normalized) { + // Computation of A24=(A+2C:4C) + fp2_add(&A24.x, &curve->C, &curve->C); + fp2_add(&A24.z, &A24.x, &A24.x); + fp2_add(&A24.x, &A24.x, &curve->A); + } else { + fp2_copy(&A24.x, &curve->A24.x); + fp2_copy(&A24.z, &curve->A24.z); + // Assert A24 has been normalised + assert(fp2_is_one(&A24.z)); + } + + // R0 <- (1:0), R1 <- P + ec_point_init(&R0); + fp2_copy(&R1.x, &P->x); + fp2_copy(&R1.z, &P->z); + + // Main loop + for (int i = kbits - 1; i >= 0; i--) { + bit = (k[i >> LOG2RADIX] >> (i & (RADIX - 1))) & 1; + swap = bit ^ prevbit; + prevbit = bit; + mask = 0 - (digit_t)swap; + + cswap_points(&R0, &R1, mask); + xDBLADD(&R0, &R1, &R0, &R1, P, &A24, true); + } + swap = 0 ^ prevbit; + mask = 0 - (digit_t)swap; + cswap_points(&R0, &R1, mask); + + fp2_copy(&Q->x, &R0.x); + fp2_copy(&Q->z, &R0.z); +} + +int +xDBLMUL(ec_point_t *S, + const ec_point_t *P, + const digit_t *k, + const ec_point_t *Q, + const digit_t *l, + const ec_point_t *PQ, + const int kbits, + const ec_curve_t *curve) +{ // The Montgomery biladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, scalars k and l of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants (A:C). + // Output: projective Montgomery point S <- k*P + l*Q = (XS:ZS) such that x(k*P + l*Q)=XS/ZS. + + int i, A_is_zero; + digit_t evens, mevens, bitk0, bitl0, maskk, maskl, temp, bs1_ip1, bs2_ip1, bs1_i, bs2_i, h; + digit_t sigma[2] = { 0 }, pre_sigma = 0; + digit_t k_t[NWORDS_ORDER], l_t[NWORDS_ORDER], one[NWORDS_ORDER] = { 0 }, r[2 * BITS] = { 0 }; + ec_point_t DIFF1a, DIFF1b, DIFF2a, DIFF2b, R[3] = { 0 }, T[3]; + + // differential additions formulas are invalid in this case + if (ec_has_zero_coordinate(P) | ec_has_zero_coordinate(Q) | ec_has_zero_coordinate(PQ)) + return 0; + + // Derive sigma according to parity + bitk0 = (k[0] & 1); + bitl0 = (l[0] & 1); + maskk = 0 - bitk0; // Parity masks: 0 if even, otherwise 1...1 + maskl = 0 - bitl0; + sigma[0] = (bitk0 ^ 1); + sigma[1] = (bitl0 ^ 1); + evens = sigma[0] + sigma[1]; // Count number of even scalars + mevens = 0 - (evens & 1); // Mask mevens <- 0 if # even of scalars = 0 or 2, otherwise mevens = 1...1 + + // If k and l are both even or both odd, pick sigma = (0,1) + sigma[0] = (sigma[0] & mevens); + sigma[1] = (sigma[1] & mevens) | (1 & ~mevens); + + // Convert even scalars to odd + one[0] = 1; + mp_sub(k_t, k, one, NWORDS_ORDER); + mp_sub(l_t, l, one, NWORDS_ORDER); + select_ct(k_t, k_t, k, maskk, NWORDS_ORDER); + select_ct(l_t, l_t, l, maskl, NWORDS_ORDER); + + // Scalar recoding + for (i = 0; i < kbits; i++) { + // If sigma[0] = 1 swap k_t and l_t + maskk = 0 - (sigma[0] ^ pre_sigma); + swap_ct(k_t, l_t, maskk, NWORDS_ORDER); + + if (i == kbits - 1) { + bs1_ip1 = 0; + bs2_ip1 = 0; + } else { + bs1_ip1 = mp_shiftr(k_t, 1, NWORDS_ORDER); + bs2_ip1 = mp_shiftr(l_t, 1, NWORDS_ORDER); + } + bs1_i = k_t[0] & 1; + bs2_i = l_t[0] & 1; + + r[2 * i] = bs1_i ^ bs1_ip1; + r[2 * i + 1] = bs2_i ^ bs2_ip1; + + // Revert sigma if second bit, r_(2i+1), is 1 + pre_sigma = sigma[0]; + maskk = 0 - r[2 * i + 1]; + select_ct(&temp, &sigma[0], &sigma[1], maskk, 1); + select_ct(&sigma[1], &sigma[1], &sigma[0], maskk, 1); + sigma[0] = temp; + } + + // Point initialization + ec_point_init(&R[0]); + maskk = 0 - sigma[0]; + select_point(&R[1], P, Q, maskk); + select_point(&R[2], Q, P, maskk); + + fp2_copy(&DIFF1a.x, &R[1].x); + fp2_copy(&DIFF1a.z, &R[1].z); + fp2_copy(&DIFF1b.x, &R[2].x); + fp2_copy(&DIFF1b.z, &R[2].z); + + // Initialize DIFF2a <- P+Q, DIFF2b <- P-Q + xADD(&R[2], &R[1], &R[2], PQ); + if (ec_has_zero_coordinate(&R[2])) + return 0; // non valid formulas + + fp2_copy(&DIFF2a.x, &R[2].x); + fp2_copy(&DIFF2a.z, &R[2].z); + fp2_copy(&DIFF2b.x, &PQ->x); + fp2_copy(&DIFF2b.z, &PQ->z); + + A_is_zero = fp2_is_zero(&curve->A); + + // Main loop + for (i = kbits - 1; i >= 0; i--) { + h = r[2 * i] + r[2 * i + 1]; // in {0, 1, 2} + maskk = 0 - (h & 1); + select_point(&T[0], &R[0], &R[1], maskk); + maskk = 0 - (h >> 1); + select_point(&T[0], &T[0], &R[2], maskk); + if (A_is_zero) { + xDBL_E0(&T[0], &T[0]); + } else { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(&T[0], &T[0], &curve->A24, true); + } + + maskk = 0 - r[2 * i + 1]; // in {0, 1} + select_point(&T[1], &R[0], &R[1], maskk); + select_point(&T[2], &R[1], &R[2], maskk); + + cswap_points(&DIFF1a, &DIFF1b, maskk); + xADD(&T[1], &T[1], &T[2], &DIFF1a); + xADD(&T[2], &R[0], &R[2], &DIFF2a); + + // If hw (mod 2) = 1 then swap DIFF2a and DIFF2b + maskk = 0 - (h & 1); + cswap_points(&DIFF2a, &DIFF2b, maskk); + + // R <- T + copy_point(&R[0], &T[0]); + copy_point(&R[1], &T[1]); + copy_point(&R[2], &T[2]); + } + + // Output R[evens] + select_point(S, &R[0], &R[1], mevens); + + maskk = 0 - (bitk0 & bitl0); + select_point(S, S, &R[2], maskk); + return 1; +} + +int +ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *E) +{ // The 3-point Montgomery ladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, a scalar k of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C/4C:1). + // Output: projective Montgomery point R <- P + m*Q = (XR:ZR) such that x(P + m*Q)=XR/ZR. + assert(E->is_A24_computed_and_normalized); + if (!fp2_is_one(&E->A24.z)) { + return 0; + } + // Formulas are not valid in that case + if (ec_has_zero_coordinate(PQ)) { + return 0; + } + + ec_point_t X0, X1, X2; + copy_point(&X0, Q); + copy_point(&X1, P); + copy_point(&X2, PQ); + + int i, j; + digit_t t; + for (i = 0; i < NWORDS_ORDER; i++) { + t = 1; + for (j = 0; j < RADIX; j++) { + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + xDBLADD(&X0, &X1, &X0, &X1, &X2, &E->A24, true); + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + t <<= 1; + }; + }; + copy_point(R, &X1); + return 1; +} + +// WRAPPERS to export + +void +ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve) +{ + // If A24 = ((A+2)/4 : 1) we save multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + } else { + // Otherwise we compute A24 on the fly for doubling + xDBL(res, P, (const ec_point_t *)curve); + } +} + +void +ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve) +{ + if (n == 0) { + copy_point(res, P); + return; + } + + // When the chain is long enough, we should normalise A24 + if (n > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is normalized we can save some multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + for (int i = 0; i < n - 1; i++) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, res, &curve->A24, true); + } + } else { + // Otherwise we do normal doubling + xDBL(res, P, (const ec_point_t *)curve); + for (int i = 0; i < n - 1; i++) { + xDBL(res, res, (const ec_point_t *)curve); + } + } +} + +void +ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve) +{ + ec_dbl_iter(&res->P, n, &B->P, curve); + ec_dbl_iter(&res->Q, n, &B->Q, curve); + ec_dbl_iter(&res->PmQ, n, &B->PmQ, curve); +} + +void +ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve) +{ + // For large scalars it's worth normalising anyway + if (kbits > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is computed and normalized we save some Fp2 multiplications + xMUL(res, P, scalar, kbits, curve); +} + +int +ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + if (fp2_is_zero(&PQ->PmQ.z)) + return 0; + + /* Differential additions behave badly when PmQ = (0:1), so we need to + * treat this case specifically. Since we assume P, Q are a basis, this + * can happen only if kbits==1 */ + if (kbits == 1) { + // Sanity check: our basis should be given by 2-torsion points + if (!ec_is_two_torsion(&PQ->P, curve) || !ec_is_two_torsion(&PQ->Q, curve) || + !ec_is_two_torsion(&PQ->PmQ, curve)) + return 0; + digit_t bP, bQ; + bP = (scalarP[0] & 1); + bQ = (scalarQ[0] & 1); + if (bP == 0 && bQ == 0) + ec_point_init(res); //(1: 0) + else if (bP == 1 && bQ == 0) + copy_point(res, &PQ->P); + else if (bP == 0 && bQ == 1) + copy_point(res, &PQ->Q); + else if (bP == 1 && bQ == 1) + copy_point(res, &PQ->PmQ); + else // should never happen + assert(0); + return 1; + } else { + ec_curve_t E; + copy_curve(&E, curve); + + if (!fp2_is_zero(&curve->A)) { // If A is not zero normalize + ec_curve_normalize_A24(&E); + } + return xDBLMUL(res, &PQ->P, scalarP, &PQ->Q, scalarQ, &PQ->PmQ, kbits, (const ec_curve_t *)&E); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h new file mode 100644 index 0000000000..ee2be38060 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h @@ -0,0 +1,668 @@ +/** @file + * + * @authors Luca De Feo, Francisco RH + * + * @brief Elliptic curve stuff + */ + +#ifndef EC_H +#define EC_H +#include +#include +#include +#include +#include + +/** @defgroup ec Elliptic curves + * @{ + */ + +/** @defgroup ec_t Data structures + * @{ + */ + +/** @brief Projective point on the Kummer line E/pm 1 in Montgomery coordinates + * + * @typedef ec_point_t + * + * @struct ec_point_t + * + * A projective point in (X:Z) or (X:Y:Z) coordinates (tbd). + */ +typedef struct ec_point_t +{ + fp2_t x; + fp2_t z; +} ec_point_t; + +/** @brief Projective point in Montgomery coordinates + * + * @typedef jac_point_t + * + * @struct jac_point_t + * + * A projective point in (X:Y:Z) coordinates + */ +typedef struct jac_point_t +{ + fp2_t x; + fp2_t y; + fp2_t z; +} jac_point_t; + +/** @brief Addition components + * + * @typedef add_components_t + * + * @struct add_components_t + * + * 3 components u,v,w that define the (X:Z) coordinates of both + * addition and substraction of two distinct points with + * P+Q =(u-v:w) and P-Q = (u+v=w) + */ +typedef struct add_components_t +{ + fp2_t u; + fp2_t v; + fp2_t w; +} add_components_t; + +/** @brief A basis of a torsion subgroup + * + * @typedef ec_basis_t + * + * @struct ec_basis_t + * + * A pair of points (or a triplet, tbd) forming a basis of a torsion subgroup. + */ +typedef struct ec_basis_t +{ + ec_point_t P; + ec_point_t Q; + ec_point_t PmQ; +} ec_basis_t; + +/** @brief An elliptic curve + * + * @typedef ec_curve_t + * + * @struct ec_curve_t + * + * An elliptic curve in projective Montgomery form + */ +typedef struct ec_curve_t +{ + fp2_t A; + fp2_t C; ///< cannot be 0 + ec_point_t A24; // the point (A+2 : 4C) + bool is_A24_computed_and_normalized; // says if A24 has been computed and normalized +} ec_curve_t; + +/** @brief An isogeny of degree a power of 2 + * + * @typedef ec_isog_even_t + * + * @struct ec_isog_even_t + */ +typedef struct ec_isog_even_t +{ + ec_curve_t curve; ///< The domain curve + ec_point_t kernel; ///< A kernel generator + unsigned length; ///< The length as a 2-isogeny walk +} ec_isog_even_t; + +/** @brief Isomorphism of Montgomery curves + * + * @typedef ec_isom_t + * + * @struct ec_isom_t + * + * The isomorphism is given by the map maps (X:Z) ↦ ( (Nx X + Nz Z) : (D Z) ) + */ +typedef struct ec_isom_t +{ + fp2_t Nx; + fp2_t Nz; + fp2_t D; +} ec_isom_t; + +// end ec_t +/** @} + */ + +/** @defgroup ec_curve_t Curves and isomorphisms + * @{ + */ + +// Initalisation for curves and points +void ec_curve_init(ec_curve_t *E); +void ec_point_init(ec_point_t *P); + +/** + * @brief Verify that a Montgomery coefficient is valid + * + * @param A an fp2_t + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_verify_A(const fp2_t *A); + +/** + * @brief Initialize an elliptic curve from a coefficient + * + * @param A an fp2_t + * @param E the elliptic curve to initialize + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A); + +// Copying points, bases and curves +static inline void +copy_point(ec_point_t *P, const ec_point_t *Q) +{ + fp2_copy(&P->x, &Q->x); + fp2_copy(&P->z, &Q->z); +} + +static inline void +copy_basis(ec_basis_t *B1, const ec_basis_t *B0) +{ + copy_point(&B1->P, &B0->P); + copy_point(&B1->Q, &B0->Q); + copy_point(&B1->PmQ, &B0->PmQ); +} + +static inline void +copy_curve(ec_curve_t *E1, const ec_curve_t *E2) +{ + fp2_copy(&(E1->A), &(E2->A)); + fp2_copy(&(E1->C), &(E2->C)); + E1->is_A24_computed_and_normalized = E2->is_A24_computed_and_normalized; + copy_point(&E1->A24, &E2->A24); +} + +// Functions for working with the A24 point and normalisation + +/** + * @brief Reduce (A : C) to (A/C : 1) in place + * + * @param E a curve + */ +void ec_normalize_curve(ec_curve_t *E); + +/** + * @brief Reduce (A + 2 : 4C) to ((A+2)/4C : 1) in place + * + * @param E a curve + */ +void ec_curve_normalize_A24(ec_curve_t *E); + +/** + * @brief Normalise both (A : C) and (A + 2 : 4C) as above, in place + * + * @param E a curve + */ +void ec_normalize_curve_and_A24(ec_curve_t *E); + +/** + * @brief Given a curve E, compute (A+2 : 4C) + * + * @param A24 the value (A+2 : 4C) to return into + * @param E a curve + */ +static inline void +AC_to_A24(ec_point_t *A24, const ec_curve_t *E) +{ + // Maybe we already have this computed + if (E->is_A24_computed_and_normalized) { + copy_point(A24, &E->A24); + return; + } + + // A24 = (A+2C : 4C) + fp2_add(&A24->z, &E->C, &E->C); + fp2_add(&A24->x, &E->A, &A24->z); + fp2_add(&A24->z, &A24->z, &A24->z); +} + +/** + * @brief Given a curve the point (A+2 : 4C) compute the curve coefficients (A : C) + * + * @param E a curve to compute + * @param A24 the value (A+2 : 4C) + */ +static inline void +A24_to_AC(ec_curve_t *E, const ec_point_t *A24) +{ + // (A:C) = ((A+2C)*2-4C : 4C) + fp2_add(&E->A, &A24->x, &A24->x); + fp2_sub(&E->A, &E->A, &A24->z); + fp2_add(&E->A, &E->A, &E->A); + fp2_copy(&E->C, &A24->z); +} + +/** + * @brief j-invariant. + * + * @param j_inv computed j_invariant + * @param curve input curve + */ +void ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve); + +/** + * @brief Isomorphism of elliptic curve + * Takes as input two isomorphic Kummer lines in Montgomery form, and output an isomorphism between + * them + * + * @param isom computed isomorphism + * @param from domain curve + * @param to image curve + * @return 0xFFFFFFFF if there was an error during the computation, zero otherwise + */ +uint32_t ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to); + +/** + * @brief In-place evaluation of an isomorphism + * + * @param P a point + * @param isom an isomorphism + */ +void ec_iso_eval(ec_point_t *P, ec_isom_t *isom); + +/** @} + */ +/** @defgroup ec_point_t Point operations + * @{ + */ + +/** + * @brief Point equality + * + * @param P a point + * @param Q a point + * @return 0xFFFFFFFF if equal, zero otherwise + */ +uint32_t ec_is_equal(const ec_point_t *P, const ec_point_t *Q); + +/** + * @brief Point equality + * + * @param P a point + * @return 0xFFFFFFFF if point at infinity, zero otherwise + */ +uint32_t ec_is_zero(const ec_point_t *P); + +/** + * @brief Two torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Four torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Reduce Z-coordinate of point in place + * + * @param P a point + */ +void ec_normalize_point(ec_point_t *P); + +void xDBL_E0(ec_point_t *Q, const ec_point_t *P); +void xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ); +void xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized); + +/** + * @brief Point doubling + * + * @param res computed double of P + * @param P a point + * @param curve an elliptic curve + */ +void ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve); + +/** + * @brief Point iterated doubling + * + * @param res computed double of P + * @param P a point + * @param n the number of double + * @param curve the curve on which P lays + */ +void ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Iterated doubling for a basis P, Q, PmQ + * + * @param res the computed iterated double of basis B + * @param n the number of doubles + * @param B the basis to double + * @param curve the parent curve of the basis + */ +void ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve); + +/** + * @brief Point multiplication + * + * @param res computed scalar * P + * @param curve the curve + * @param scalar an unsigned multi-precision integer + * @param P a point + * @param kbits numer of bits of the scalar + */ +void ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Combination P+m*Q + * + * @param R computed P + m * Q + * @param curve the curve + * @param m an unsigned multi-precision integer + * @param P a point + * @param Q a point + * @param PQ the difference P-Q + * @return 0 if there was an error, 1 otherwise + */ +int ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Linear combination of points of a basis + * + * @param res computed scalarP * P + scalarQ * Q + * @param scalarP an unsigned multi-precision integer + * @param scalarQ an unsigned multi-precision integer + * @param kbits number of bits of the scalars, or n for points of order 2^n + * @param PQ a torsion basis consisting of points P and Q + * @param curve the curve + * + * @return 0 if there was an error, 1 otherwise + */ +int ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +// end point computations +/** + * @} + */ + +/** @defgroup ec_dlog_t Torsion basis computations + * @{ + */ + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve along with a hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * + * @return A hint + * + * The algorithm is deterministc + */ +uint8_t ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f); + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve and a given hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * @param hint the hint + * + * @return 1 is the basis is valid, 0 otherwise + * + * The algorithm is deterministc + */ +int ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint); +/** // end basis computations + * @} + */ + +/** @defgroup ec_isog_t Isogenies + * @{ + */ + +/** + * @brief Evaluate isogeny of even degree on list of points. + * Returns 0 if successful and -1 if kernel has the wrong order or includes (0:1). + * + * @param image computed image curve + * @param phi isogeny + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points); + +/** + * @brief Multiplicative strategy for a short isogeny chain. Returns 1 if successfull and -1 + * if kernel has the wrong order or includes (0:1) when special=false. + * + * @param curve domain curve, to be overwritten by the codomain curve. + * @param kernel a kernel generator of order 2^len + * @param len the length of t he 2-isogeny chain + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * @param special if true, allow isogenies with (0:1) in the kernel + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special); + +/** + * @brief Recover Y-coordinate from X-coordinate and curve coefficients. + * + * @param y: a y-coordinate + * @param Px: a x-coordinate + * @param curve: the elliptic curve + * + * @return 0xFFFFFFFF if the point was on the curve, 0 otherwise + */ +uint32_t ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve); + +// Jacobian point init and copying +void jac_init(jac_point_t *P); +void copy_jac_point(jac_point_t *P, const jac_point_t *Q); + +/** + * @brief Test if two Jacobian points are equal + * + * @param P: a point + * @param Q: a point + * + * @return 0xFFFFFFFF if they are equal, 0 otherwise + */ +uint32_t jac_is_equal(const jac_point_t *P, const jac_point_t *Q); + +// Convert from Jacobian to x-only (just drop the Y-coordinate) +void jac_to_xz(ec_point_t *P, const jac_point_t *xyP); +// Convert from Jacobian coordinates in Montgomery model to Weierstrass +void jac_to_ws(jac_point_t *P, fp2_t *t, fp2_t *ao3, const jac_point_t *Q, const ec_curve_t *curve); +void jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve); + +// Jacobian arithmetic +void jac_neg(jac_point_t *Q, const jac_point_t *P); +void ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); +void DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC); +void DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t); +void jac_to_xz_add_components(add_components_t *uvw, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + * + * + * Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and + * the point P = (X/Z : 1). For generic implementation see lift_basis() + */ +uint32_t lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + */ +uint32_t lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Check if basis points (P, Q) form a full 4-basis + * + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if they form a basis, 0 otherwise + */ +uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); + +/* + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Test functions for printing and order checking, only used in debug mode + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + */ + +/** + * @brief Check if a point (X : Z) has order exactly 2^t + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) +{ + ec_point_t test; + ec_curve_t curve; + test = *P; + copy_curve(&curve, E); + + if (ec_is_zero(&test)) + return 0; + // Scale point by 2^(t-1) + ec_dbl_iter(&test, t - 1, &test, &curve); + // If it's zero now, it doesnt have order 2^t + if (ec_is_zero(&test)) + return 0; + // Ensure [2^t] P = 0 + ec_dbl(&test, &test, &curve); + return ec_is_zero(&test); +} + +/** + * @brief Check if basis points (P, Q, PmQ) all have order exactly 2^t + * + * @param B: a basis + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) +{ + int check_P = test_point_order_twof(&B->P, E, t); + int check_Q = test_point_order_twof(&B->Q, E, t); + int check_PmQ = test_point_order_twof(&B->PmQ, E, t); + + return check_P & check_Q & check_PmQ; +} + +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} + +// Prints the x-coordinate of the point (X : 1) +static void +ec_point_print(const char *name, ec_point_t P) +{ + fp2_t a; + if (fp2_is_zero(&P.z)) { + printf("%s = INF\n", name); + } else { + fp2_copy(&a, &P.z); + fp2_inv(&a); + fp2_mul(&a, &a, &P.x); + fp2_print(name, &a); + } +} + +// Prints the Montgomery coefficient A +static void +ec_curve_print(const char *name, ec_curve_t E) +{ + fp2_t a; + fp2_copy(&a, &E.C); + fp2_inv(&a); + fp2_mul(&a, &a, &E.A); + fp2_print(name, &a); +} + +#endif +// end isogeny computations +/** + * @} + */ + +// end ec +/** + * @} + */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_jac.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_jac.c new file mode 100644 index 0000000000..20ca68c9b2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_jac.c @@ -0,0 +1,335 @@ +#include +#include + +void +jac_init(jac_point_t *P) +{ // Initialize Montgomery in Jacobian coordinates as identity element (0:1:0) + fp2_set_zero(&P->x); + fp2_set_one(&P->y); + fp2_set_zero(&P->z); +} + +uint32_t +jac_is_equal(const jac_point_t *P, const jac_point_t *Q) +{ // Evaluate if two points in Jacobian coordinates (X:Y:Z) are equal + // Returns 1 (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1, t2, t3; + + fp2_sqr(&t0, &Q->z); + fp2_mul(&t2, &P->x, &t0); // x1*z2^2 + fp2_sqr(&t1, &P->z); + fp2_mul(&t3, &Q->x, &t1); // x2*z1^2 + fp2_sub(&t2, &t2, &t3); + + fp2_mul(&t0, &t0, &Q->z); + fp2_mul(&t0, &P->y, &t0); // y1*z2^3 + fp2_mul(&t1, &t1, &P->z); + fp2_mul(&t1, &Q->y, &t1); // y2*z1^3 + fp2_sub(&t0, &t0, &t1); + + return fp2_is_zero(&t0) & fp2_is_zero(&t2); +} + +void +jac_to_xz(ec_point_t *P, const jac_point_t *xyP) +{ + fp2_copy(&P->x, &xyP->x); + fp2_copy(&P->z, &xyP->z); + fp2_sqr(&P->z, &P->z); + + // If xyP = (0:1:0), we currently have P=(0 : 0) but we want to set P=(1:0) + uint32_t c1, c2; + fp2_t one; + fp2_set_one(&one); + + c1 = fp2_is_zero(&P->x); + c2 = fp2_is_zero(&P->z); + fp2_select(&P->x, &P->x, &one, c1 & c2); +} + +void +jac_to_ws(jac_point_t *Q, fp2_t *t, fp2_t *ao3, const jac_point_t *P, const ec_curve_t *curve) +{ + // Cost of 3M + 2S when A != 0. + fp_t one; + fp2_t a; + /* a = 1 - A^2/3, U = X + (A*Z^2)/3, V = Y, W = Z, T = a*Z^4*/ + fp_set_one(&one); + if (!fp2_is_zero(&(curve->A))) { + fp_div3(&(ao3->re), &(curve->A.re)); + fp_div3(&(ao3->im), &(curve->A.im)); + fp2_sqr(t, &P->z); + fp2_mul(&Q->x, ao3, t); + fp2_add(&Q->x, &Q->x, &P->x); + fp2_sqr(t, t); + fp2_mul(&a, ao3, &(curve->A)); + fp_sub(&(a.re), &one, &(a.re)); + fp_neg(&(a.im), &(a.im)); + fp2_mul(t, t, &a); + } else { + fp2_copy(&Q->x, &P->x); + fp2_sqr(t, &P->z); + fp2_sqr(t, t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve) +{ + // Cost of 1M + 1S when A != 0. + fp2_t t; + /* X = U - (A*W^2)/3, Y = V, Z = W. */ + if (!fp2_is_zero(&(curve->A))) { + fp2_sqr(&t, &P->z); + fp2_mul(&t, &t, ao3); + fp2_sub(&Q->x, &P->x, &t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +copy_jac_point(jac_point_t *P, const jac_point_t *Q) +{ + fp2_copy(&(P->x), &(Q->x)); + fp2_copy(&(P->y), &(Q->y)); + fp2_copy(&(P->z), &(Q->z)); +} + +void +jac_neg(jac_point_t *Q, const jac_point_t *P) +{ + fp2_copy(&Q->x, &P->x); + fp2_neg(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC) +{ // Cost of 6M + 6S. + // Doubling on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding to + // (X/Z^2,Y/Z^3) This version receives the coefficient value A + fp2_t t0, t1, t2, t3; + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // t0 = 3x1^2 + fp2_sqr(&t1, &P->z); // t1 = z1^2 + fp2_mul(&t2, &P->x, &AC->A); + fp2_add(&t2, &t2, &t2); // t2 = 2Ax1 + fp2_add(&t2, &t1, &t2); // t2 = 2Ax1+z1^2 + fp2_mul(&t2, &t1, &t2); // t2 = z1^2(2Ax1+z1^2) + fp2_add(&t2, &t0, &t2); // t2 = alpha = 3x1^2 + z1^2(2Ax1+z1^2) + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); // z2 = 2y1z1 + fp2_sqr(&t0, &Q->z); + fp2_mul(&t0, &t0, &AC->A); // t0 = 4Ay1^2z1^2 + fp2_sqr(&t1, &P->y); + fp2_add(&t1, &t1, &t1); // t1 = 2y1^2 + fp2_add(&t3, &P->x, &P->x); // t3 = 2x1 + fp2_mul(&t3, &t1, &t3); // t3 = 4x1y1^2 + fp2_sqr(&Q->x, &t2); // x2 = alpha^2 + fp2_sub(&Q->x, &Q->x, &t0); // x2 = alpha^2 - 4Ay1^2z1^2 + fp2_sub(&Q->x, &Q->x, &t3); + fp2_sub(&Q->x, &Q->x, &t3); // x2 = alpha^2 - 4Ay1^2z1^2 - 8x1y1^2 + fp2_sub(&Q->y, &t3, &Q->x); // y2 = 4x1y1^2 - x2 + fp2_mul(&Q->y, &Q->y, &t2); // y2 = alpha(4x1y1^2 - x2) + fp2_sqr(&t1, &t1); // t1 = 4y1^4 + fp2_sub(&Q->y, &Q->y, &t1); + fp2_sub(&Q->y, &Q->y, &t1); // y2 = alpha(4x1y1^2 - x2) - 8y1^4 + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t) +{ // Cost of 3M + 5S. + // Doubling on a Weierstrass curve, representation in modified Jacobian coordinates + // (X:Y:Z:T=a*Z^4) corresponding to (X/Z^2,Y/Z^3), where a is the curve coefficient. + // Formula from https://hyperelliptic.org/EFD/g1p/auto-shortw-modified.html + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_t xx, c, cc, r, s, m; + // XX = X^2 + fp2_sqr(&xx, &P->x); + // A = 2*Y^2 + fp2_sqr(&c, &P->y); + fp2_add(&c, &c, &c); + // AA = A^2 + fp2_sqr(&cc, &c); + // R = 2*AA + fp2_add(&r, &cc, &cc); + // S = (X+A)^2-XX-AA + fp2_add(&s, &P->x, &c); + fp2_sqr(&s, &s); + fp2_sub(&s, &s, &xx); + fp2_sub(&s, &s, &cc); + // M = 3*XX+T1 + fp2_add(&m, &xx, &xx); + fp2_add(&m, &m, &xx); + fp2_add(&m, &m, t); + // X3 = M^2-2*S + fp2_sqr(&Q->x, &m); + fp2_sub(&Q->x, &Q->x, &s); + fp2_sub(&Q->x, &Q->x, &s); + // Z3 = 2*Y*Z + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); + // Y3 = M*(S-X3)-R + fp2_sub(&Q->y, &s, &Q->x); + fp2_mul(&Q->y, &Q->y, &m); + fp2_sub(&Q->y, &Q->y, &r); + // T3 = 2*R*T1 + fp2_mul(u, t, &r); + fp2_add(u, u, u); + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +select_jac_point(jac_point_t *Q, const jac_point_t *P1, const jac_point_t *P2, const digit_t option) +{ // Select points + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->y), &(P1->y), &(P2->y), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Addition on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding + // to (x,y) = (X/Z^2,Y/Z^3) This version receives the coefficient value A + // + // Complete routine, to handle all edge cases: + // if ZP == 0: # P == inf + // return Q + // if ZQ == 0: # Q == inf + // return P + // dy <- YQ*ZP**3 - YP*ZQ**3 + // dx <- XQ*ZP**2 - XP*ZQ**2 + // if dx == 0: # x1 == x2 + // if dy == 0: # ... and y1 == y2: doubling case + // dy <- ZP*ZQ * (3*XP^2 + ZP^2 * (2*A*XP + ZP^2)) + // dx <- 2*YP*ZP + // else: # ... but y1 != y2, thus P = -Q + // return inf + // XR <- dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) + // YR <- dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3 + // ZR <- dx * ZP * ZQ + + // Constant time processing: + // - The case for P == 0 or Q == 0 is handled at the end with conditional select + // - dy and dx are computed for both the normal and doubling cases, we switch when + // dx == dy == 0 for the normal case. + // - If we have that P = -Q then dx = 0 and so ZR will be zero, giving us the point + // at infinity for "free". + // + // These current formula are expensive and I'm probably missing some tricks... + // Thought I'd get the ball rolling. + // Cost 17M + 6S + 13a + fp2_t t0, t1, t2, t3, u1, u2, v1, dx, dy; + + /* If P is zero or Q is zero we will conditionally swap before returning. */ + uint32_t ctl1 = fp2_is_zero(&P->z); + uint32_t ctl2 = fp2_is_zero(&Q->z); + + /* Precompute some values */ + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + + /* Compute dy and dx for ordinary case */ + fp2_mul(&v1, &t1, &Q->z); // v1 = z2^3 + fp2_mul(&t2, &t0, &P->z); // t2 = z1^3 + fp2_mul(&v1, &v1, &P->y); // v1 = y1z2^3 + fp2_mul(&t2, &t2, &Q->y); // t2 = y2z1^3 + fp2_sub(&dy, &t2, &v1); // dy = y2z1^3 - y1z2^3 + fp2_mul(&u2, &t0, &Q->x); // u2 = x2z1^2 + fp2_mul(&u1, &t1, &P->x); // u1 = x1z2^2 + fp2_sub(&dx, &u2, &u1); // dx = x2z1^2 - x1z2^2 + + /* Compute dy and dx for doubling case */ + fp2_add(&t1, &P->y, &P->y); // dx_dbl = t1 = 2y1 + fp2_add(&t2, &AC->A, &AC->A); // t2 = 2A + fp2_mul(&t2, &t2, &P->x); // t2 = 2Ax1 + fp2_add(&t2, &t2, &t0); // t2 = 2Ax1 + z1^2 + fp2_mul(&t2, &t2, &t0); // t2 = z1^2 * (2Ax1 + z1^2) + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t2, &t2, &t0); // t2 = x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 2*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 3*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_mul(&t2, &t2, &Q->z); // dy_dbl = t2 = z2 * (3*x1^2 + z1^2 * (2Ax1 + z1^2)) + + /* If dx is zero and dy is zero swap with double variables */ + uint32_t ctl = fp2_is_zero(&dx) & fp2_is_zero(&dy); + fp2_select(&dx, &dx, &t1, ctl); + fp2_select(&dy, &dy, &t2, ctl); + + /* Some more precomputations */ + fp2_mul(&t0, &P->z, &Q->z); // t0 = z1z2 + fp2_sqr(&t1, &t0); // t1 = z1z2^2 + fp2_sqr(&t2, &dx); // t2 = dx^2 + fp2_sqr(&t3, &dy); // t3 = dy^2 + + /* Compute x3 = dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) */ + fp2_mul(&R->x, &AC->A, &t1); // x3 = A*(z1z2)^2 + fp2_add(&R->x, &R->x, &u1); // x3 = A*(z1z2)^2 + u1 + fp2_add(&R->x, &R->x, &u2); // x3 = A*(z1z2)^2 + u1 + u2 + fp2_mul(&R->x, &R->x, &t2); // x3 = dx^2 * (A*(z1z2)^2 + u1 + u2) + fp2_sub(&R->x, &t3, &R->x); // x3 = dy^2 - dx^2 * (A*(z1z2)^2 + u1 + u2) + + /* Compute y3 = dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3*/ + fp2_mul(&R->y, &u1, &t2); // y3 = u1 * dx^2 + fp2_sub(&R->y, &R->y, &R->x); // y3 = u1 * dx^2 - x3 + fp2_mul(&R->y, &R->y, &dy); // y3 = dy * (u1 * dx^2 - x3) + fp2_mul(&t3, &t2, &dx); // t3 = dx^3 + fp2_mul(&t3, &t3, &v1); // t3 = v1 * dx^3 + fp2_sub(&R->y, &R->y, &t3); // y3 = dy * (u1 * dx^2 - x3) - v1 * dx^3 + + /* Compute z3 = dx * z1 * z2 */ + fp2_mul(&R->z, &dx, &t0); + + /* Finally, we need to set R = P is Q.Z = 0 and R = Q if P.Z = 0 */ + select_jac_point(R, R, Q, ctl1); + select_jac_point(R, R, P, ctl2); +} + +void +jac_to_xz_add_components(add_components_t *add_comp, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Take P and Q in E distinct, two jac_point_t, return three components u,v and w in Fp2 such + // that the xz coordinates of P+Q are (u-v:w) and of P-Q are (u+v:w) + + fp2_t t0, t1, t2, t3, t4, t5, t6; + + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + fp2_mul(&t2, &P->x, &t1); // t2 = x1z2^2 + fp2_mul(&t3, &t0, &Q->x); // t3 = z1^2x2 + fp2_mul(&t4, &P->y, &Q->z); // t4 = y1z2 + fp2_mul(&t4, &t4, &t1); // t4 = y1z2^3 + fp2_mul(&t5, &P->z, &Q->y); // t5 = z1y2 + fp2_mul(&t5, &t5, &t0); // t5 = z1^3y2 + fp2_mul(&t0, &t0, &t1); // t0 = (z1z2)^2 + fp2_mul(&t6, &t4, &t5); // t6 = (z1z_2)^3y1y2 + fp2_add(&add_comp->v, &t6, &t6); // v = 2(z1z_2)^3y1y2 + fp2_sqr(&t4, &t4); // t4 = y1^2z2^6 + fp2_sqr(&t5, &t5); // t5 = z1^6y_2^2 + fp2_add(&t4, &t4, &t5); // t4 = z1^6y_2^2 + y1^2z2^6 + fp2_add(&t5, &t2, &t3); // t5 = x1z2^2 +z_1^2x2 + fp2_add(&t6, &t3, &t3); // t6 = 2z_1^2x2 + fp2_sub(&t6, &t5, &t6); // t6 = lambda = x1z2^2 - z_1^2x2 + fp2_sqr(&t6, &t6); // t6 = lambda^2 = (x1z2^2 - z_1^2x2)^2 + fp2_mul(&t1, &AC->A, &t0); // t1 = A*(z1z2)^2 + fp2_add(&t1, &t5, &t1); // t1 = gamma =A*(z1z2)^2 + x1z2^2 +z_1^2x2 + fp2_mul(&t1, &t1, &t6); // t1 = gamma*lambda^2 + fp2_sub(&add_comp->u, &t4, &t1); // u = z1^6y_2^2 + y1^2z2^6 - gamma*lambda^2 + fp2_mul(&add_comp->w, &t6, &t0); // w = (z1z2)^2(lambda)^2 +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_params.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_params.c new file mode 100644 index 0000000000..d2aa074b7f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_params.c @@ -0,0 +1,4 @@ +#include +// p+1 divided by the power of 2 +const digit_t p_cofactor_for_2f[1] = {27}; + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_params.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_params.h new file mode 100644 index 0000000000..9f2aca3be7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec_params.h @@ -0,0 +1,12 @@ +#ifndef EC_PARAMS_H +#define EC_PARAMS_H + +#include + +#define TORSION_EVEN_POWER 500 + +// p+1 divided by the power of 2 +extern const digit_t p_cofactor_for_2f[1]; +#define P_COFACTOR_FOR_2F_BITLENGTH 5 + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_signature.c new file mode 100644 index 0000000000..112c695941 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_signature.c @@ -0,0 +1,208 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// ibz_t + +static byte_t * +ibz_to_bytes(byte_t *enc, const ibz_t *x, size_t nbytes, bool sgn) +{ +#ifndef NDEBUG + { + // make sure there is enough space + ibz_t abs, bnd; + ibz_init(&bnd); + ibz_init(&abs); + ibz_pow(&bnd, &ibz_const_two, 8 * nbytes - sgn); + ibz_abs(&abs, x); + assert(ibz_cmp(&abs, &bnd) < 0); + ibz_finalize(&bnd); + ibz_finalize(&abs); + } +#endif + const size_t digits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + digit_t d[digits]; + memset(d, 0, sizeof(d)); + if (ibz_cmp(x, &ibz_const_zero) >= 0) { + // non-negative, straightforward. + ibz_to_digits(d, x); + } else { + assert(sgn); + // negative; use two's complement. + ibz_t tmp; + ibz_init(&tmp); + ibz_neg(&tmp, x); + ibz_sub(&tmp, &tmp, &ibz_const_one); + ibz_to_digits(d, &tmp); + for (size_t i = 0; i < digits; ++i) + d[i] = ~d[i]; +#ifndef NDEBUG + { + // make sure the result is correct + ibz_t chk; + ibz_init(&chk); + ibz_copy_digit_array(&tmp, d); + ibz_sub(&tmp, &tmp, x); + ibz_pow(&chk, &ibz_const_two, 8 * sizeof(d)); + assert(!ibz_cmp(&tmp, &chk)); + ibz_finalize(&chk); + } +#endif + ibz_finalize(&tmp); + } + encode_digits(enc, d, nbytes); + return enc + nbytes; +} + +static const byte_t * +ibz_from_bytes(ibz_t *x, const byte_t *enc, size_t nbytes, bool sgn) +{ + assert(nbytes > 0); + const size_t ndigits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + assert(ndigits > 0); + digit_t d[ndigits]; + memset(d, 0, sizeof(d)); + decode_digits(d, enc, nbytes, ndigits); + if (sgn && enc[nbytes - 1] >> 7) { + // negative, decode two's complement + const size_t s = sizeof(digit_t) - 1 - (sizeof(d) - nbytes); + assert(s < sizeof(digit_t)); + d[ndigits - 1] |= ((digit_t)-1) >> 8 * s << 8 * s; + for (size_t i = 0; i < ndigits; ++i) + d[i] = ~d[i]; + ibz_copy_digits(x, d, ndigits); + ibz_add(x, x, &ibz_const_one); + ibz_neg(x, x); + } else { + // non-negative + ibz_copy_digits(x, d, ndigits); + } + return enc + nbytes; +} + +// public API + +void +secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = public_key_to_bytes(enc, pk); + +#ifndef NDEBUG + { + fp2_t lhs, rhs; + fp2_mul(&lhs, &sk->curve.A, &pk->curve.C); + fp2_mul(&rhs, &sk->curve.C, &pk->curve.A); + assert(fp2_is_equal(&lhs, &rhs)); + } +#endif + + enc = ibz_to_bytes(enc, &sk->secret_ideal.norm, FP_ENCODED_BYTES, false); + { + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + int ret UNUSED = quat_lideal_generator(&gen, &sk->secret_ideal, &QUATALG_PINFTY); + assert(ret); + // we skip encoding the denominator since it won't change the generated ideal +#ifndef NDEBUG + { + // let's make sure that the denominator is indeed coprime to the norm of the ideal + ibz_t gcd; + ibz_init(&gcd); + ibz_gcd(&gcd, &gen.denom, &sk->secret_ideal.norm); + assert(!ibz_cmp(&gcd, &ibz_const_one)); + ibz_finalize(&gcd); + } +#endif + enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); +} + +void +secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = public_key_from_bytes(pk, enc); + + { + ibz_t norm; + ibz_init(&norm); + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); + enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); + ibz_finalize(&norm); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); + + sk->curve = pk->curve; + ec_curve_to_basis_2f_from_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER, pk->hint_pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_verification.c new file mode 100644 index 0000000000..fecdb9c259 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_verification.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// fp2_t + +static byte_t * +fp2_to_bytes(byte_t *enc, const fp2_t *x) +{ + fp2_encode(enc, x); + return enc + FP2_ENCODED_BYTES; +} + +static const byte_t * +fp2_from_bytes(fp2_t *x, const byte_t *enc) +{ + fp2_decode(x, enc); + return enc + FP2_ENCODED_BYTES; +} + +// curves and points + +static byte_t * +proj_to_bytes(byte_t *enc, const fp2_t *x, const fp2_t *z) +{ + assert(!fp2_is_zero(z)); + fp2_t tmp = *z; + fp2_inv(&tmp); +#ifndef NDEBUG + { + fp2_t chk; + fp2_mul(&chk, z, &tmp); + fp2_t one; + fp2_set_one(&one); + assert(fp2_is_equal(&chk, &one)); + } +#endif + fp2_mul(&tmp, x, &tmp); + enc = fp2_to_bytes(enc, &tmp); + return enc; +} + +static const byte_t * +proj_from_bytes(fp2_t *x, fp2_t *z, const byte_t *enc) +{ + enc = fp2_from_bytes(x, enc); + fp2_set_one(z); + return enc; +} + +static byte_t * +ec_curve_to_bytes(byte_t *enc, const ec_curve_t *curve) +{ + return proj_to_bytes(enc, &curve->A, &curve->C); +} + +static const byte_t * +ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) +{ + memset(curve, 0, sizeof(*curve)); + return proj_from_bytes(&curve->A, &curve->C, enc); +} + +static byte_t * +ec_point_to_bytes(byte_t *enc, const ec_point_t *point) +{ + return proj_to_bytes(enc, &point->x, &point->z); +} + +static const byte_t * +ec_point_from_bytes(ec_point_t *point, const byte_t *enc) +{ + return proj_from_bytes(&point->x, &point->z, enc); +} + +static byte_t * +ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) +{ + enc = ec_point_to_bytes(enc, &basis->P); + enc = ec_point_to_bytes(enc, &basis->Q); + enc = ec_point_to_bytes(enc, &basis->PmQ); + return enc; +} + +static const byte_t * +ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) +{ + enc = ec_point_from_bytes(&basis->P, enc); + enc = ec_point_from_bytes(&basis->Q, enc); + enc = ec_point_from_bytes(&basis->PmQ, enc); + return enc; +} + +// public API + +byte_t * +public_key_to_bytes(byte_t *enc, const public_key_t *pk) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_to_bytes(enc, &pk->curve); + *enc++ = pk->hint_pk; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +const byte_t * +public_key_from_bytes(public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_from_bytes(&pk->curve, enc); + pk->hint_pk = *enc++; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +void +signature_to_bytes(byte_t *enc, const signature_t *sig) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = fp2_to_bytes(enc, &sig->E_aux_A); + + *enc++ = sig->backtracking; + *enc++ = sig->two_resp_length; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][1], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][1], nbytes); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + encode_digits(enc, sig->chall_coeff, nbytes); + enc += nbytes; + + *enc++ = sig->hint_aux; + *enc++ = sig->hint_chall; + + assert(enc - start == SIGNATURE_BYTES); +} + +void +signature_from_bytes(signature_t *sig, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = fp2_from_bytes(&sig->E_aux_A, enc); + + sig->backtracking = *enc++; + sig->two_resp_length = *enc++; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + decode_digits(sig->chall_coeff, enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + sig->hint_aux = *enc++; + sig->hint_chall = *enc++; + + assert(enc - start == SIGNATURE_BYTES); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encoded_sizes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encoded_sizes.h new file mode 100644 index 0000000000..3aafb0d5f7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encoded_sizes.h @@ -0,0 +1,11 @@ +#define SECURITY_BITS 256 +#define SQIsign_response_length 253 +#define HASH_ITERATIONS 512 +#define FP_ENCODED_BYTES 64 +#define FP2_ENCODED_BYTES 128 +#define EC_CURVE_ENCODED_BYTES 128 +#define EC_POINT_ENCODED_BYTES 128 +#define EC_BASIS_ENCODED_BYTES 384 +#define PUBLICKEY_BYTES 129 +#define SECRETKEY_BYTES 701 +#define SIGNATURE_BYTES 292 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c new file mode 100644 index 0000000000..dd089e6f4f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c @@ -0,0 +1,3336 @@ +#include +#include +#include +const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x280} +#elif RADIX == 32 +{0x12f68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x400} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x4b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x170000000000000} +#else +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314} +#elif RADIX == 32 +{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6} +#else +{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7} +#elif RADIX == 32 +{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5} +#else +{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe} +#elif RADIX == 32 +{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2} +#else +{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330} +#elif RADIX == 32 +{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5} +#else +{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x19da, 0x19cd, 0x19e2, 0x5ea, 0x1079, 0x11ba, 0x1f5e, 0x228, 0x1a45, 0x16ee, 0x18a1, 0x11eb, 0x127a, 0x1d6f, 0x106f, 0x118f, 0x1d0c, 0x1571, 0x1b2d, 0xb60, 0xb27, 0xe1f, 0xe58, 0xe01, 0x4f4, 0x183, 0x13a9, 0x1584, 0x5cb, 0xcce, 0x1ce7, 0x4da, 0x1e62, 0x1213, 0x7fe, 0x1e6, 0x17d, 0x350, 0x3a0} +#elif RADIX == 32 +{0x1ced44bf, 0x159e2ce6, 0xea0f25e, 0x1147d7a3, 0x16eed228, 0xa3d78a1, 0x17f5be4f, 0x10c8c7c1, 0x165b571e, 0x1ac9d6c1, 0x172c387, 0x1064f470, 0x16127521, 0x1667172e, 0x44dae73, 0x1fa427e6, 0xbe8798f, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf25eacf167373b51, 0xbb48a228faf46ea0, 0x7f5be4f51ebc50db, 0xd96d5c7a1918f83, 0x8e0172c387d64eb6, 0x8b975849d4860c9e, 0x484fcc44dae73b33, 0x50d402fa1e63ff} +#else +{0xbd59e2ce6e76a2, 0xa228faf46ea0f2, 0x7a8f5e286ddda4, 0x1e86463e0dfd6f9, 0xfac9d6c1b2dab8, 0x60c9e8e0172c38, 0x1d99c5cbac24ea4, 0x1fd213f31136b9c, 0xa1a805f43cc7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1dea, 0x1bbc, 0x9b0, 0x1066, 0x10fb, 0x1fe8, 0x1bca, 0x34d, 0x275, 0x42a, 0xc7b, 0x6e8, 0x1f5c, 0x12e5, 0x155d, 0x4f2, 0x1422, 0xfce, 0x603, 0x17a8, 0xd9f, 0x182d, 0x9fe, 0x3b1, 0x342, 0x1c21, 0x1aff, 0x1e38, 0x1ac8, 0x1c98, 0x51f, 0x897, 0xe23, 0x17e7, 0xced, 0x1e6, 0x125a, 0x18f3, 0x1b8} +#elif RADIX == 32 +{0xef520a6, 0xc9b0dde, 0x1a21f706, 0x1a6ef2bf, 0x42a13a8, 0x10dd0c7b, 0xecb97eb, 0x2227955, 0xc06fcea, 0xb67ef50, 0x114ff60b, 0x423421d, 0x18e35ffc, 0x1e4c6b23, 0x689728f, 0x1b6fcee2, 0x12d07999, 0x69c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf70664d86ef3bd48, 0xa84ea34dde57fa21, 0xecb97eb86e863d90, 0x8301bf3a8444f2aa, 0x43b14ff60b5b3f7a, 0x3591e38d7ff08468, 0xdf9dc4689728ff26, 0x463ce4b41e6676} +#else +{0xcc9b0dde77a90, 0xa34dde57fa21f7, 0x15c37431ec85427, 0xa1113caabb2e5f, 0x16b67ef506037e7, 0x10846843b14ff60, 0x1f931ac8f1c6bff, 0x1db7e7711a25ca3, 0x8c79c9683ccc} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x20f3,0x77e0,0xc9a6,0xeb4f,0xb334,0xff68,0xecb4,0xa6e3,0x5015,0x43c1,0x9e87,0xf4eb,0x22e7,0x5f37,0x9392,0x80a0,0x9ea0,0x670f,0x1be3,0x7559,0x2cb5,0x900d,0xfa83,0x1519,0x67b8,0x4d7c,0xaf3a,0x6dc4,0x12e1,0x1e51,0x8d84,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77e020f3,0xeb4fc9a6,0xff68b334,0xa6e3ecb4,0x43c15015,0xf4eb9e87,0x5f3722e7,0x80a09392,0x670f9ea0,0x75591be3,0x900d2cb5,0x1519fa83,0x4d7c67b8,0x6dc4af3a,0x1e5112e1,0x58d84}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb4fc9a677e020f3,0xa6e3ecb4ff68b334,0xf4eb9e8743c15015,0x80a093925f3722e7,0x75591be3670f9ea0,0x1519fa83900d2cb5,0x6dc4af3a4d7c67b8,0x58d841e5112e1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8e98,0xe430,0x6d21,0x2fa6,0x524f,0xf0cf,0xe5eb,0x30ec,0x3658,0x7711,0x7d2f,0x47bf,0xbbc5,0x720c,0xe7a6,0x1ef4,0x335f,0x2c25,0x59e5,0x471c,0x5e06,0x5d38,0x62d6,0xa2a7,0x65f3,0xdefc,0x5e15,0x7a7a,0xdac4,0xc542,0x7bb8,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4308e98,0x2fa66d21,0xf0cf524f,0x30ece5eb,0x77113658,0x47bf7d2f,0x720cbbc5,0x1ef4e7a6,0x2c25335f,0x471c59e5,0x5d385e06,0xa2a762d6,0xdefc65f3,0x7a7a5e15,0xc542dac4,0xd7bb8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fa66d21e4308e98,0x30ece5ebf0cf524f,0x47bf7d2f77113658,0x1ef4e7a6720cbbc5,0x471c59e52c25335f,0xa2a762d65d385e06,0x7a7a5e15defc65f3,0xd7bb8c542dac4}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3249,0xe4fe,0xec61,0x49e0,0x5b5f,0xc495,0x6ef6,0x811,0x4fdf,0x59fc,0xbd69,0x608e,0xafe2,0xe9a9,0x5706,0x98ac,0xb327,0x481a,0x9c4e,0xecac,0x19fa,0x6401,0xfaad,0x14a4,0xeda,0x3fb5,0x7eb5,0x9768,0x6597,0x4c10,0xdc28,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4fe3249,0x49e0ec61,0xc4955b5f,0x8116ef6,0x59fc4fdf,0x608ebd69,0xe9a9afe2,0x98ac5706,0x481ab327,0xecac9c4e,0x640119fa,0x14a4faad,0x3fb50eda,0x97687eb5,0x4c106597,0xbdc28}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e0ec61e4fe3249,0x8116ef6c4955b5f,0x608ebd6959fc4fdf,0x98ac5706e9a9afe2,0xecac9c4e481ab327,0x14a4faad640119fa,0x97687eb53fb50eda,0xbdc284c106597}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdf0d,0x881f,0x3659,0x14b0,0x4ccb,0x97,0x134b,0x591c,0xafea,0xbc3e,0x6178,0xb14,0xdd18,0xa0c8,0x6c6d,0x7f5f,0x615f,0x98f0,0xe41c,0x8aa6,0xd34a,0x6ff2,0x57c,0xeae6,0x9847,0xb283,0x50c5,0x923b,0xed1e,0xe1ae,0x727b,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x881fdf0d,0x14b03659,0x974ccb,0x591c134b,0xbc3eafea,0xb146178,0xa0c8dd18,0x7f5f6c6d,0x98f0615f,0x8aa6e41c,0x6ff2d34a,0xeae6057c,0xb2839847,0x923b50c5,0xe1aeed1e,0xa727b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14b03659881fdf0d,0x591c134b00974ccb,0xb146178bc3eafea,0x7f5f6c6da0c8dd18,0x8aa6e41c98f0615f,0xeae6057c6ff2d34a,0x923b50c5b2839847,0xa727be1aeed1e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xaa15,0x7f4c,0xb027,0xba3f,0xa936,0x25fb,0xd8a6,0xc32c,0x4ff6,0xcba,0x7e3a,0x6517,0x8b62,0x1a7d,0x90bb,0x13df,0x3bed,0x3d1a,0x462b,0x6826,0xf410,0xe897,0x8229,0x4b78,0xee4b,0x42f9,0x6ed,0x6da5,0x4789,0x56bf,0x95bb,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f4caa15,0xba3fb027,0x25fba936,0xc32cd8a6,0xcba4ff6,0x65177e3a,0x1a7d8b62,0x13df90bb,0x3d1a3bed,0x6826462b,0xe897f410,0x4b788229,0x42f9ee4b,0x6da506ed,0x56bf4789,0xb95bb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xba3fb0277f4caa15,0xc32cd8a625fba936,0x65177e3a0cba4ff6,0x13df90bb1a7d8b62,0x6826462b3d1a3bed,0x4b788229e897f410,0x6da506ed42f9ee4b,0xb95bb56bf4789}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc893,0xf896,0x2771,0xa804,0x1b30,0x95f4,0x9365,0xd12c,0x33e,0xa849,0x9eb8,0x99bc,0xbb85,0x5dc7,0x7fc2,0x63f9,0x71ec,0x9605,0x475f,0xb8e1,0xc488,0xe25f,0x7f40,0x8735,0xecac,0xd7f,0x2994,0x17fb,0xf1ae,0xdafb,0xc2a,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf896c893,0xa8042771,0x95f41b30,0xd12c9365,0xa849033e,0x99bc9eb8,0x5dc7bb85,0x63f97fc2,0x960571ec,0xb8e1475f,0xe25fc488,0x87357f40,0xd7fecac,0x17fb2994,0xdafbf1ae,0x30c2a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8042771f896c893,0xd12c936595f41b30,0x99bc9eb8a849033e,0x63f97fc25dc7bb85,0xb8e1475f960571ec,0x87357f40e25fc488,0x17fb29940d7fecac,0x30c2adafbf1ae}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3bfd,0x13ce,0x920a,0x911b,0x4570,0x25b1,0xd461,0xc4e5,0x637e,0x243d,0x5ee1,0x2e39,0x5d17,0x952,0x68c2,0x7a32,0x2b9d,0x2f39,0xe4d1,0x13a4,0x6ad4,0x6cd2,0x9b,0xa287,0x5fc3,0x37c9,0xd69b,0xa250,0x1cb2,0xbc08,0xc8f9,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x13ce3bfd,0x911b920a,0x25b14570,0xc4e5d461,0x243d637e,0x2e395ee1,0x9525d17,0x7a3268c2,0x2f392b9d,0x13a4e4d1,0x6cd26ad4,0xa287009b,0x37c95fc3,0xa250d69b,0xbc081cb2,0x1c8f9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x911b920a13ce3bfd,0xc4e5d46125b14570,0x2e395ee1243d637e,0x7a3268c209525d17,0x13a4e4d12f392b9d,0xa287009b6cd26ad4,0xa250d69b37c95fc3,0x1c8f9bc081cb2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55eb,0x80b3,0x4fd8,0x45c0,0x56c9,0xda04,0x2759,0x3cd3,0xb009,0xf345,0x81c5,0x9ae8,0x749d,0xe582,0x6f44,0xec20,0xc412,0xc2e5,0xb9d4,0x97d9,0xbef,0x1768,0x7dd6,0xb487,0x11b4,0xbd06,0xf912,0x925a,0xb876,0xa940,0x6a44,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x80b355eb,0x45c04fd8,0xda0456c9,0x3cd32759,0xf345b009,0x9ae881c5,0xe582749d,0xec206f44,0xc2e5c412,0x97d9b9d4,0x17680bef,0xb4877dd6,0xbd0611b4,0x925af912,0xa940b876,0x46a44}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45c04fd880b355eb,0x3cd32759da0456c9,0x9ae881c5f345b009,0xec206f44e582749d,0x97d9b9d4c2e5c412,0xb4877dd617680bef,0x925af912bd0611b4,0x46a44a940b876}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x4d6, 0x18fd, 0x18c0, 0x13c1, 0x1718, 0x122c, 0x830, 0xa02, 0xee0, 0x1ad1, 0x1485, 0x27f, 0x13e5, 0x118f, 0xdce, 0x31c, 0x1b49, 0x998, 0x117, 0x343, 0xdae, 0x1fe7, 0x16a0, 0x1c43, 0x172, 0xa6e, 0x702, 0xeb8, 0x835, 0x1cf, 0x48d, 0x4e4, 0x13eb, 0x57d, 0xccf, 0x97e, 0x3b6, 0x910, 0x2dd} +#elif RADIX == 32 +{0x126b3651, 0x38c0c7e, 0xb2e313c, 0x10120c24, 0x1ad17702, 0x144ff485, 0x7463e7c, 0x14918e37, 0x22e998d, 0x1b6b8686, 0x3b507f9, 0xdc172e2, 0x1ae0e04a, 0x10e7a0d5, 0x164e4246, 0x13cafb3e, 0x1db25f99, 0x300} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x313c1c6063f49acd, 0x45dc0a0241848b2e, 0x7463e7ca27fa42eb, 0x308ba66369231c6e, 0x5c43b507f9db5c34, 0xd06aeb838129b82e, 0x95f67d64e4246873, 0xfa44076c97e667} +#else +{0x7838c0c7e9359b, 0xa0241848b2e31, 0x1e513fd2175a2ee, 0xda48c71b9d18f9, 0x13b6b86861174cc, 0x9b82e5c43b507f, 0x1439e83575c1c09, 0x19e57d9f5939091, 0x44880ed92fcc} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x937, 0x63f, 0xe30, 0x4f0, 0x5c6, 0x48b, 0x120c, 0x280, 0xbb8, 0xeb4, 0x1d21, 0x89f, 0x1cf9, 0x1463, 0x373, 0x8c7, 0x6d2, 0x1a66, 0x1845, 0x10d0, 0x1b6b, 0x7f9, 0x1da8, 0x1710, 0x105c, 0x129b, 0x1c0, 0xbae, 0x1a0d, 0x873, 0x123, 0x1939, 0xcfa, 0x195f, 0x1333, 0x125f, 0xed, 0xa44, 0x697} +#elif RADIX == 32 +{0x149bfcfc, 0xe3031f, 0x2cb8c4f, 0x14048309, 0xeb45dc0, 0x513fd21, 0x19d18f9f, 0xd24638d, 0x108ba663, 0xedae1a1, 0x10ed41fe, 0x13705cb8, 0xeb83812, 0x1439e835, 0x15939091, 0xcf2becf, 0x76c97e6, 0x820} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8c4f071818fd26ff, 0xd1770280906122cb, 0x9d18f9f289fe90ba, 0xc22e998da48c71b, 0x9710ed41fe76d70d, 0xf41abae0e04a6e0b, 0xe57d9f5939091a1c, 0x6a9101db25f999} +#else +{0x9e0e3031fa4dfe, 0x10280906122cb8c, 0xf944ff485d68bb, 0x369231c6e7463e, 0x1cedae1a1845d33, 0xa6e0b9710ed41f, 0xd0e7a0d5d70702, 0x6795f67d64e424, 0xd52203b64bf3} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1863, 0x635, 0x19a9, 0x17fc, 0xdfe, 0x1784, 0x150b, 0x16c3, 0x15c0, 0x1f5f, 0x11d9, 0x1064, 0x1893, 0x1829, 0x211, 0x1a9e, 0x2e1, 0x3cc, 0x1e64, 0x12ed, 0x1c2c, 0x18b9, 0x121d, 0x234, 0xec9, 0x14dc, 0x4b6, 0xaad, 0x19f6, 0x805, 0x1984, 0x1843, 0xfca, 0x1a7a, 0xe04, 0x4af, 0x881, 0x65b, 0x421} +#elif RADIX == 32 +{0x1c31ce4f, 0x199a931a, 0x11bfd7f, 0x161d42ef, 0x1f5fae05, 0xe0c91d9, 0x8e0a712, 0xe1d4f08, 0x1cc83cc1, 0xf0b25db, 0x1490ee2e, 0x1b8ec911, 0xab496d4, 0x402e7d9, 0x15843cc2, 0x134f4fc, 0x4092bdc, 0x85a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xfd7fccd498d70c73, 0x7eb816c3a85de11b, 0x8e0a71270648ecfd, 0xdf320f305c3a9e10, 0x223490ee2e78592e, 0x73ecaad25b5371d9, 0x69e9f95843cc2201, 0xf996d1024af702} +#else +{0xff99a931ae18e7, 0x16c3a85de11bfd, 0x938324767ebf5c, 0x170ea78423829c, 0x1cf0b25dbe641e6, 0x1371d9223490ee2, 0x1100b9f655692da, 0x9a7a7e5610f30, 0x432da20495ee} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1a7, 0x175b, 0x9bd, 0xb94, 0x1a66, 0x1d52, 0x1eb3, 0x1431, 0x9e7, 0x1b9d, 0x75f, 0xcba, 0x17e9, 0xe1d, 0xdb, 0xc7b, 0x76, 0xa04, 0xd73, 0x3f7, 0x17dd, 0x1555, 0x5d6, 0x16ee, 0x1df6, 0x1429, 0x15cb, 0x140b, 0x1aeb, 0x14fb, 0x1984, 0x179b, 0x1ba1, 0x125e, 0xb62, 0x249, 0x95a, 0x137a, 0x7c} +#elif RADIX == 32 +{0x10d3893a, 0x89bdbad, 0x14b4ccb9, 0x18facfa, 0x1b9d4f3d, 0x597475f, 0xdb876fd, 0x7663d83, 0x1ae6a040, 0xdf747ee, 0xe2eb555, 0x53df6b7, 0x102eb974, 0xa7debae, 0x379bcc2, 0x18a4bdba, 0xad09256, 0xcd2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xccb944dedd6c34e2, 0x753cf431f59f54b4, 0xdb876fd2cba3afee, 0x76b9a8100ecc7b06, 0xd6ee2eb5556fba3f, 0xf5d740bae5d0a7be, 0x497b74379bcc253e, 0x84de92b42495b1} +#else +{0x17289bdbad869c4, 0xf431f59f54b4cc, 0x1e965d1d7f73a9e, 0x3b31ec1b6e1db, 0xadf747eed73502, 0x10a7bed6ee2eb55, 0x129f7aeba05d72e, 0xc525edd0de6f30, 0x109bd2568492b} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1d6a, 0x5b, 0x24a, 0x1bfc, 0x1cef, 0xc7e, 0x1cac, 0x1e4, 0x68, 0x16da, 0x30d, 0x13a5, 0x505, 0x329, 0x9f4, 0x1dae, 0x371, 0x111b, 0x200, 0x1b69, 0x1e51, 0x3b7, 0x316, 0x509, 0x1af2, 0x1220, 0x8c2, 0x195a, 0x1050, 0x1b7a, 0xd8b, 0x1a21, 0x336, 0x14fa, 0x1a4b, 0x11d, 0x167d, 0x1501, 0x302} +#elif RADIX == 32 +{0x1eb53915, 0x1824a02d, 0x1fb9dfbf, 0xf272b18, 0x16da0340, 0x1674a30d, 0x1a0ca4a0, 0x171ed727, 0x40111b1, 0x1f9476d2, 0x918b0ed, 0x41af228, 0x5691852, 0x1dbd4143, 0xda216c5, 0x12e9f433, 0x13e84774, 0xc8d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xdfbfc125016fad4e, 0x680d01e4e5631fb9, 0xa0ca4a0b3a5186db, 0x9100446c6e3dae4f, 0x450918b0edfca3b6, 0xa0a195a46148835e, 0xd3e866da216c5ede, 0x75406cfa11dd25} +#else +{0x17f824a02df5a9c, 0x101e4e5631fb9df, 0x1059d28c36db406, 0x11b8f6b93e83292, 0x1bf9476d220088d, 0x8835e450918b0e, 0xf6f5050cad230a, 0x974fa19b6885b1, 0xea80d9f423ba} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1e9d, 0xbb9, 0x14f9, 0xc51, 0x1731, 0x122e, 0x1901, 0x59a, 0xcc1, 0xb65, 0xc68, 0x1eaf, 0x1f48, 0x1e46, 0xe46, 0x9c1, 0x1013, 0x12f8, 0x18a, 0x177f, 0x1e19, 0x1cca, 0x257, 0x18b9, 0xa38, 0x184b, 0x15a4, 0x86d, 0xa8c, 0x1df5, 0xf2, 0x37, 0x5d9, 0x292, 0x11ae, 0x9e, 0x1fce, 0x7f4, 0x407} +#elif RADIX == 32 +{0x1f4ecc63, 0x34f95dc, 0xbae62c5, 0xcd64064, 0xb656609, 0x3d5ec68, 0x3791be9, 0x134e0b9, 0x3152f88, 0x17866efe, 0x1912bf32, 0x96a38c5, 0x1b6b498, 0xefaaa31, 0x12037079, 0xb85245d, 0x1e7027a3, 0x727} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x62c51a7caee7d3b3, 0x9598259ac80c8bae, 0x3791be91eaf6342d, 0xf0c54be20269c172, 0x18b912bf32bc3377, 0x551886dad2612d47, 0xa48bb203707977d, 0x29fd3f9c09e8d7} +#else +{0x18a34f95dcfa766, 0x259ac80c8bae62, 0x148f57b1a16cacc, 0x809a705c8de46f, 0x57866efe18a97c, 0x12d4718b912bf3, 0xbbeaa8c436d693, 0x15c2922ec80dc1e, 0x53fa7f3813d1} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x177, 0xf70, 0x25, 0x503, 0x1f96, 0x1abd, 0x6f5, 0x115b, 0xa68, 0x1192, 0x338, 0x1bae, 0x15af, 0x1570, 0xb79, 0x1c9a, 0xe78, 0x19de, 0x860, 0x1076, 0x1a63, 0x1d52, 0x1511, 0x10c5, 0x1fdf, 0xab1, 0x1454, 0x2c4, 0x292, 0x1135, 0x273, 0x1d, 0xefa, 0x47, 0x344, 0x226, 0x9c1, 0x1af, 0x639} +#elif RADIX == 32 +{0xbbf600, 0x60257b8, 0xf7f2c50, 0xad9bd75, 0x11925344, 0x1f75c338, 0x1cd5c2b5, 0x78e4d2d, 0x10c19de7, 0x1698e0ec, 0x5a88f54, 0x163fdf86, 0xb128a8a, 0x189a8a48, 0x1401d139, 0x11008eef, 0xe088986, 0xd7a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2c503012bdc02efd, 0x494d115b37aeaf7f, 0xcd5c2b5fbae19c46, 0x64306779cf1c9a5b, 0xf0c5a88f54b4c707, 0x45242c4a2a2ac7fb, 0x11ddf401d139c4d, 0xd86bd3822261a2} +#else +{0xa060257b805dfb, 0x1115b37aeaf7f2c, 0x1afdd70ce2324a6, 0x73c72696f3570a, 0x9698e0ec860cef, 0xac7fbf0c5a88f5, 0xe26a2921625151, 0x8804777d00744e, 0xd7a70444c3} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x153b, 0x598, 0x100c, 0x1537, 0x1eda, 0x190b, 0x1406, 0x186e, 0x457, 0x469, 0x14a0, 0x1ce0, 0x1f6d, 0xf2f, 0x1837, 0x616, 0x16d0, 0xf35, 0x192b, 0x106, 0x17d6, 0x6b3, 0x169e, 0x27a, 0xe54, 0xa42, 0x1694, 0x16c3, 0x7b, 0x298, 0x118, 0xb0, 0x893, 0xbca, 0x1678, 0x19de, 0xb59, 0x3a, 0x43} +#elif RADIX == 32 +{0xa9d84f6, 0xf00c2cc, 0x2fdb553, 0x37501b2, 0x46922be, 0x179c14a0, 0x1bbcbfed, 0xd030b60, 0x1256f35b, 0x1df5820d, 0x1ab4f1ac, 0x84e5413, 0x1b0ed28a, 0x14c01ee, 0x60b008c, 0x1e179489, 0x1ace77ac, 0x8d2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xb55378061662a761, 0xa48af86ea03642fd, 0xbbcbfedbce0a5011, 0x6c95bcd6da0616c1, 0x827ab4f1acefac10, 0xf76c3b4a2909ca, 0x2f291260b008c0a6, 0x680e96b39deb3c} +#else +{0xa6f00c2cc54ec2, 0xf86ea03642fdb5, 0x16de7052808d245, 0x1b68185b06ef2ff, 0x19df5820d92b79a, 0x909ca827ab4f1a, 0x53007bb61da51, 0xf0bca44982c023, 0xd01d2d673bd6} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1aff,0x9f84,0xf1c6,0xd816,0xbdd0,0xd450,0x1990,0x119,0xbcf7,0x1a97,0x4780,0x8209,0x695b,0x1d73,0x20ba,0x7b53,0x5e3c,0x4ce5,0xac53,0x351f,0xaaa3,0x5a3e,0xd54c,0x121f,0xbf17,0xdb55,0xc9c,0x8370,0x2061,0x415c,0x1f35,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9f841aff,0xd816f1c6,0xd450bdd0,0x1191990,0x1a97bcf7,0x82094780,0x1d73695b,0x7b5320ba,0x4ce55e3c,0x351fac53,0x5a3eaaa3,0x121fd54c,0xdb55bf17,0x83700c9c,0x415c2061,0xc1f35}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd816f1c69f841aff,0x1191990d450bdd0,0x820947801a97bcf7,0x7b5320ba1d73695b,0x351fac534ce55e3c,0x121fd54c5a3eaaa3,0x83700c9cdb55bf17,0xc1f35415c2061}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x7734,0xde6f,0xbab1,0xd4f3,0xc928,0x6c68,0x69b0,0x7cc0,0x994f,0x296c,0xb1dc,0x2eb2,0xe4ce,0x8494,0xa8ff,0x95d3,0x5f30,0xe7f,0x918,0x6cd6,0xae27,0x747c,0x1f93,0xed96,0x5590,0xc91a,0x713d,0xc33e,0xc075,0x40fd,0x9ce5,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xde6f7734,0xd4f3bab1,0x6c68c928,0x7cc069b0,0x296c994f,0x2eb2b1dc,0x8494e4ce,0x95d3a8ff,0xe7f5f30,0x6cd60918,0x747cae27,0xed961f93,0xc91a5590,0xc33e713d,0x40fdc075,0x39ce5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4f3bab1de6f7734,0x7cc069b06c68c928,0x2eb2b1dc296c994f,0x95d3a8ff8494e4ce,0x6cd609180e7f5f30,0xed961f93747cae27,0xc33e713dc91a5590,0x39ce540fdc075}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xda85,0x89f5,0x1aaf,0x9ec7,0xcfff,0xec63,0x3ae9,0x20bc,0xc2f3,0x9942,0x7d84,0xfa25,0x5e69,0xeb7b,0xc357,0x9342,0x5c58,0xd26c,0x857b,0x7a7f,0x757,0xfb5c,0xbb97,0x33,0x6c28,0xfceb,0xd644,0xcc0a,0x22ad,0xe1c0,0x12d6,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x89f5da85,0x9ec71aaf,0xec63cfff,0x20bc3ae9,0x9942c2f3,0xfa257d84,0xeb7b5e69,0x9342c357,0xd26c5c58,0x7a7f857b,0xfb5c0757,0x33bb97,0xfceb6c28,0xcc0ad644,0xe1c022ad,0x412d6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9ec71aaf89f5da85,0x20bc3ae9ec63cfff,0xfa257d849942c2f3,0x9342c357eb7b5e69,0x7a7f857bd26c5c58,0x33bb97fb5c0757,0xcc0ad644fceb6c28,0x412d6e1c022ad}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe501,0x607b,0xe39,0x27e9,0x422f,0x2baf,0xe66f,0xfee6,0x4308,0xe568,0xb87f,0x7df6,0x96a4,0xe28c,0xdf45,0x84ac,0xa1c3,0xb31a,0x53ac,0xcae0,0x555c,0xa5c1,0x2ab3,0xede0,0x40e8,0x24aa,0xf363,0x7c8f,0xdf9e,0xbea3,0xe0ca,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x607be501,0x27e90e39,0x2baf422f,0xfee6e66f,0xe5684308,0x7df6b87f,0xe28c96a4,0x84acdf45,0xb31aa1c3,0xcae053ac,0xa5c1555c,0xede02ab3,0x24aa40e8,0x7c8ff363,0xbea3df9e,0x3e0ca}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27e90e39607be501,0xfee6e66f2baf422f,0x7df6b87fe5684308,0x84acdf45e28c96a4,0xcae053acb31aa1c3,0xede02ab3a5c1555c,0x7c8ff36324aa40e8,0x3e0cabea3df9e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x679c,0x35ac,0x6c8c,0xee5e,0x2827,0x29fa,0x9f6c,0xbda,0x2083,0x5e20,0xd351,0x39bd,0xd9bc,0x4085,0x3727,0x8f2,0xe905,0x55dd,0x6f90,0x6e26,0x6779,0xf15a,0xf170,0xec90,0xdb0e,0x53a0,0x6f99,0xe710,0xad92,0xa7f0,0xe2e1,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35ac679c,0xee5e6c8c,0x29fa2827,0xbda9f6c,0x5e202083,0x39bdd351,0x4085d9bc,0x8f23727,0x55dde905,0x6e266f90,0xf15a6779,0xec90f170,0x53a0db0e,0xe7106f99,0xa7f0ad92,0xde2e1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xee5e6c8c35ac679c,0xbda9f6c29fa2827,0x39bdd3515e202083,0x8f237274085d9bc,0x6e266f9055dde905,0xec90f170f15a6779,0xe7106f9953a0db0e,0xde2e1a7f0ad92}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa483,0xbf25,0x238c,0x4c65,0xdd0b,0xccc9,0xc5af,0xac20,0xe998,0xb162,0xe2bf,0xbd24,0x5fd,0x6720,0xd781,0xd37d,0xa89,0x595a,0x76b0,0x7f86,0xdea4,0x59ea,0x2c01,0xd679,0x714b,0x5454,0xe262,0x2bcf,0xfad4,0x8bc0,0x8cd3,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbf25a483,0x4c65238c,0xccc9dd0b,0xac20c5af,0xb162e998,0xbd24e2bf,0x672005fd,0xd37dd781,0x595a0a89,0x7f8676b0,0x59eadea4,0xd6792c01,0x5454714b,0x2bcfe262,0x8bc0fad4,0xc8cd3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4c65238cbf25a483,0xac20c5afccc9dd0b,0xbd24e2bfb162e998,0xd37dd781672005fd,0x7f8676b0595a0a89,0xd6792c0159eadea4,0x2bcfe2625454714b,0xc8cd38bc0fad4}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3f72,0x6188,0x95e8,0xed15,0x2b1a,0x2fd,0xaae9,0x15d9,0x5945,0x23ff,0xfe55,0xce25,0xaa48,0xa648,0x8534,0x16db,0x3fcf,0xa301,0xfb7c,0x3a68,0x4ba,0x1c1d,0x30ee,0xf044,0x116f,0xc4f8,0x98b2,0x4971,0xea5c,0xb93e,0x2836,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x61883f72,0xed1595e8,0x2fd2b1a,0x15d9aae9,0x23ff5945,0xce25fe55,0xa648aa48,0x16db8534,0xa3013fcf,0x3a68fb7c,0x1c1d04ba,0xf04430ee,0xc4f8116f,0x497198b2,0xb93eea5c,0x32836}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed1595e861883f72,0x15d9aae902fd2b1a,0xce25fe5523ff5945,0x16db8534a648aa48,0x3a68fb7ca3013fcf,0xf04430ee1c1d04ba,0x497198b2c4f8116f,0x32836b93eea5c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9864,0xca53,0x9373,0x11a1,0xd7d8,0xd605,0x6093,0xf425,0xdf7c,0xa1df,0x2cae,0xc642,0x2643,0xbf7a,0xc8d8,0xf70d,0x16fa,0xaa22,0x906f,0x91d9,0x9886,0xea5,0xe8f,0x136f,0x24f1,0xac5f,0x9066,0x18ef,0x526d,0x580f,0x1d1e,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca539864,0x11a19373,0xd605d7d8,0xf4256093,0xa1dfdf7c,0xc6422cae,0xbf7a2643,0xf70dc8d8,0xaa2216fa,0x91d9906f,0xea59886,0x136f0e8f,0xac5f24f1,0x18ef9066,0x580f526d,0x21d1e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x11a19373ca539864,0xf4256093d605d7d8,0xc6422caea1dfdf7c,0xf70dc8d8bf7a2643,0x91d9906faa2216fa,0x136f0e8f0ea59886,0x18ef9066ac5f24f1,0x21d1e580f526d}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x185f, 0xecc, 0x21c, 0xa62, 0x77, 0x8b1, 0x1188, 0xec2, 0xda1, 0xbf0, 0xb11, 0x82d, 0x1fe9, 0x10d4, 0x1e36, 0x194e, 0x77e, 0x1112, 0x14c0, 0x1dcd, 0x1be7, 0x1a4c, 0x140e, 0x10c5, 0x1aaf, 0x236, 0xdf3, 0x1a21, 0x1dff, 0x15bb, 0xda8, 0x30e, 0x17b0, 0xc7b, 0xd1, 0xcb, 0x868, 0x2e5, 0x5a} +#elif RADIX == 32 +{0xc2f86ac, 0x421c766, 0xc40eea6, 0x16146211, 0xbf06d0b, 0x505ab11, 0x1b4353fd, 0x17eca778, 0x9811123, 0x6f9fb9b, 0x5a07693, 0x6daaf86, 0x885be62, 0xaddf7ff, 0x30e6d4, 0x1458f77b, 0x34032c1, 0x52a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xeea6210e3b330be1, 0xc1b42ec28c422c40, 0xb4353fd282d588af, 0xda604448efd94ef1, 0xf0c5a0769337cfdc, 0xfbffa216f988db55, 0xb1eef6030e6d456e, 0x120b950d00cb068} +#else +{0x14c421c766617c3, 0x2ec28c422c40ee, 0x1e9416ac457e0da, 0x3bf653bc6d0d4f, 0x66f9fb9b4c0889, 0x8db55f0c5a0769, 0x2b77dffd10b7cc, 0x1a2c7bbd80c39b5, 0x9172a1a01960} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x61a, 0x3b3, 0x1087, 0x1a98, 0x81d, 0x22c, 0x1462, 0xbb0, 0x368, 0xafc, 0xac4, 0xa0b, 0x7fa, 0x1435, 0x178d, 0x1653, 0x11df, 0x444, 0xd30, 0x1f73, 0x6f9, 0x1693, 0xd03, 0x1c31, 0x16ab, 0x188d, 0xb7c, 0x1e88, 0x1f7f, 0x56e, 0x136a, 0xc3, 0x1dec, 0xb1e, 0x1834, 0x32, 0xa1a, 0x10b9, 0xe6} +#elif RADIX == 32 +{0x130d1113, 0x110871d9, 0xb103ba9, 0x1d851884, 0xafc1b42, 0x9416ac4, 0x6d0d4ff, 0x1dfb29de, 0x1a604448, 0x19be7ee6, 0x11681da4, 0x11b6abe1, 0x1a216f98, 0x2b77dff, 0x180c39b5, 0xd163dde, 0x10d00cb0, 0x54a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3ba988438eccc344, 0xf06d0bb0a3108b10, 0x6d0d4ff4a0b5622b, 0x369811123bf653bc, 0x7c31681da4cdf3f7, 0xbeffe885be6236d5, 0x2c7bbd80c39b515b, 0x742e5434032c1a} +#else +{0x15310871d998688, 0x10bb0a3108b103b, 0x1fa505ab115f836, 0x8efd94ef1b4353, 0x99be7ee6d30222, 0x236d57c31681da, 0x8addf7ff442df3, 0x68b1eef6030e6d, 0xe85ca8680658} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xa5a, 0x2ab, 0x659, 0x149f, 0xf1b, 0xa1a, 0xb05, 0x1915, 0x1aa8, 0x1aa0, 0x1c4d, 0xe2f, 0xe1c, 0x19ab, 0x1d34, 0xa8f, 0xf59, 0x1f1, 0xc6d, 0x520, 0xb6e, 0x127f, 0x5dd, 0x175a, 0x1957, 0x1ca4, 0x1563, 0x122f, 0x705, 0xcd6, 0x1c02, 0xdc1, 0x93b, 0x387, 0x1870, 0x54, 0x853, 0x1adc, 0x6bc} +#elif RADIX == 32 +{0x152d7fc4, 0x1e659155, 0x69e3749, 0x8aac154, 0x1aa0d546, 0x11c5fc4d, 0x1a66adc3, 0x159547f4, 0x18da1f17, 0x1adb8a40, 0x1a2eec9f, 0x149957ba, 0x8beac7c, 0x66b1c16, 0x16dc1e01, 0x1c070e93, 0x2981530, 0xe2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3749f32c8aad4b5f, 0x83551915582a869e, 0xa66adc38e2fe26ea, 0x63687c5eb2a8fe9, 0xf75a2eec9fd6dc52, 0x8e0b22fab1f2932a, 0xe1d276dc1e01335, 0x196b710a6054c38} +#else +{0x93e659155a96bf, 0x11915582a869e37, 0x1c717f137541aa, 0x17acaa3fa699ab7, 0x1fadb8a40c6d0f8, 0x12932af75a2eec9, 0x99ac705917d58f, 0xe038749db70780, 0x17d6e214c0a98} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x66e, 0xe79, 0xadd, 0x23, 0xf11, 0x7d6, 0x1091, 0x42a, 0x1885, 0x128, 0x6f9, 0xcdd, 0x1d55, 0x19bd, 0x116f, 0x1dbd, 0x107b, 0xaef, 0x8bc, 0xa74, 0x7b5, 0xdff, 0x743, 0x17e0, 0x453, 0x414, 0x672, 0xf28, 0x198a, 0x19c4, 0x1e85, 0xcb9, 0x17c2, 0x14c6, 0x1871, 0x1034, 0x6cb, 0x55b, 0xbf} +#elif RADIX == 32 +{0x13370e29, 0x6add73c, 0x159e2202, 0x154244f, 0x128c429, 0x159ba6f9, 0x17e6f7aa, 0x7bedec5, 0x1178aef8, 0x19ed54e8, 0x3a1b7f, 0x28453bf, 0x1ca0ce44, 0x1ce26629, 0x4cb9f42, 0x1c698d7c, 0x165c0d30, 0x159} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2202356eb9e4cdc3, 0xa310a42a8489f59e, 0x7e6f7aaacdd37c84, 0x445e2bbe0f7dbd8b, 0x77e03a1b7fcf6aa7, 0x3314f2833910508a, 0xd31af84cb9f42e71, 0xe956cd97034c38} +#else +{0x46add73c99b87, 0xa42a8489f59e22, 0x15566e9be425188, 0x183df6f62df9bde, 0x1f9ed54e88bc577, 0x10508a77e03a1b7, 0x1738998a79419c8, 0xe34c6be132e7d0, 0x22ad9b2e0698} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x165f, 0x1e7c, 0xe41, 0x12eb, 0xa1, 0x1655, 0x6db, 0x1dfc, 0x4a, 0xac7, 0x1dcb, 0x3d9, 0x16a0, 0x562, 0x1d70, 0x528, 0xaa7, 0x172e, 0x36c, 0x728, 0x1e76, 0x23f, 0x6e6, 0x53e, 0x1640, 0x1a82, 0x1b78, 0x1066, 0x895, 0x17eb, 0x1713, 0x174d, 0x679, 0x1415, 0x19a8, 0xe7c, 0x674, 0x1f81, 0x15} +#elif RADIX == 32 +{0xb2f81a0, 0x16e41f3e, 0x1541432e, 0xfe1b6ec, 0xac70257, 0x7b3dcb, 0x18158ad4, 0xa729475, 0x6d972e5, 0x1f9d8e50, 0x1e37308f, 0x10564029, 0x19b6f1a, 0x1bf5a256, 0x1374db89, 0xa282a67, 0x13a39f33, 0xc09} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x432eb720f9f2cbe0, 0x1c095dfc36dd9541, 0x8158ad403d9ee5ab, 0x81b65cb954e528eb, 0x53e37308ffcec72, 0xd12b066dbc6a0ac8, 0x5054cf374db89dfa, 0xafe04ce8e7ccd4} +#else +{0x5d6e41f3e597c0, 0x15dfc36dd954143, 0xa01ecf72d58e04, 0x55394a3ae0562b, 0x1ff9d8e5036cb97, 0xa0ac8053e37308, 0xefd68958336de3, 0x15141533cdd36e2, 0x15fc099d1cf99} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1e32, 0x1f7c, 0x1c05, 0x372, 0x34a, 0x1d26, 0x11b9, 0x294, 0xa87, 0x1835, 0x158f, 0x1d19, 0x13e8, 0x4dc, 0x1e1a, 0x195f, 0x116e, 0x62c, 0x1839, 0x107a, 0xa4f, 0x119f, 0x18f3, 0xc48, 0x1c7a, 0x100d, 0x2e9, 0x12df, 0xbec, 0x6f1, 0x8bf, 0xe24, 0xa57, 0x50c, 0x28b, 0x31e, 0x430, 0x1b08, 0x378} +#elif RADIX == 32 +{0xf1941d7, 0x5c05fbe, 0x9869437, 0x14a46e7a, 0x18355438, 0x3a3358f, 0xd13727d, 0x16ecaff8, 0x107262c8, 0x1a93e0f5, 0x8c79c67, 0x1bc7a62, 0xb7c5d30, 0x1378afb2, 0xee2445f, 0x2ca18a5, 0x180c785, 0x1c1} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x94372e02fdf3c650, 0xd550e2948dcf4986, 0xd13727d1d19ac7e0, 0xac1c98b22dd95ff0, 0x4c48c79c67d49f07, 0x57d92df174c0378f, 0x94314aee2445f9bc, 0xc6c2086031e145} +#else +{0x6e5c05fbe78ca0, 0xe2948dcf498694, 0x1e8e8cd63f06aa8, 0x8b7657fc344dc9, 0xfa93e0f5839316, 0x378f4c48c79c6, 0x1cde2bec96f8ba6, 0x11650c52bb89117, 0x18d8410c063c2} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1044, 0x2d0, 0x1004, 0x1082, 0x535, 0x141a, 0x10a6, 0x1f9d, 0xc2d, 0x1347, 0xdf4, 0x1db1, 0x90e, 0x116d, 0x59c, 0xc2b, 0x7c2, 0x15d7, 0x119, 0x32c, 0x1e89, 0x1b01, 0xe5f, 0x105f, 0xd7d, 0xb4f, 0x1c33, 0x1b3b, 0xf2d, 0xc22, 0x11d8, 0x1848, 0x11a9, 0x1ee7, 0x6ea, 0x165d, 0x17d4, 0x77, 0x64b} +#elif RADIX == 32 +{0x8227755, 0x5004168, 0x68a6b08, 0x1cec29a8, 0x1347616f, 0x1bb62df4, 0xe45b521, 0x1c261596, 0x2335d73, 0xfa24658, 0x1f72fec0, 0x9ed7d82, 0xcef866b, 0x6113cb7, 0x138488ec, 0x1abdcf1a, 0x1ea5974d, 0x83d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6b0828020b42089d, 0x1d85bf9d8535068a, 0xe45b521ddb16fa4d, 0xc08cd75cf84c2b2c, 0xb05f72fec07d1232, 0x9e5bb3be19ad3daf, 0x7b9e3538488ec308, 0x1681defa965d375} +#else +{0x1050041684113b, 0x1bf9d8535068a6b, 0x10eed8b7d268ec2, 0x13e130acb3916d4, 0xfa24658119aeb, 0xd3dafb05f72fec, 0x1844f2dd9df0cd, 0x1d5ee78d4e1223b, 0x1203bdf52cba6} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x7bc, 0x14d4, 0x1225, 0x1afb, 0x179e, 0x2c0, 0x1c0, 0x1267, 0x450, 0x1f26, 0x1e3f, 0x2bb, 0x19a5, 0x12f9, 0xa57, 0x2d, 0x1ed, 0xa16, 0x754, 0x1893, 0x759, 0x6bb, 0x618, 0x1379, 0xff3, 0x1989, 0x1abb, 0x1c40, 0x1bf5, 0x71e, 0xd6d, 0xc04, 0x15ef, 0x6aa, 0x4da, 0x1fb6, 0xb5b, 0x9f2, 0x211} +#elif RADIX == 32 +{0x3de2735, 0x17225a6a, 0x102f3daf, 0x13387005, 0x1f262284, 0x14577e3f, 0xbcbe734, 0x1ed016a9, 0xea8a160, 0x19d67126, 0x1930c1ae, 0x112ff39b, 0x11035779, 0x138f6fd7, 0x1ec046b6, 0x168d555e, 0x1adfed89, 0x412} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3dafb912d350f789, 0x988a12670e00b02f, 0xbcbe734a2bbf1ffc, 0x33aa28583da02d52, 0x737930c1aeceb389, 0xb7ebc40d5de625fe, 0x1aaabdec046b69c7, 0x15a7c96b7fb626d} +#else +{0x15f7225a6a1ef13, 0x12670e00b02f3d, 0x1a515df8ffe4c45, 0xf680b54af2f9c, 0x1d9d6712675450b, 0x625fe737930c1a, 0x14e3dbf5e206aef, 0x1b46aaaf7b011ad, 0x104f92d6ff6c4} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5eb9,0x2393,0xd8e8,0xc566,0xd78,0xa77f,0x1bf1,0x4577,0x3141,0xecd3,0x132c,0x281,0x13b5,0x1d34,0xb4bb,0xf25,0xdc3,0xbf86,0x5e9f,0xde50,0xf536,0xe95e,0xd5b0,0x687d,0x3ab,0x992c,0xdb8d,0xc8cc,0xfaf0,0xd954,0x6e1a,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x23935eb9,0xc566d8e8,0xa77f0d78,0x45771bf1,0xecd33141,0x281132c,0x1d3413b5,0xf25b4bb,0xbf860dc3,0xde505e9f,0xe95ef536,0x687dd5b0,0x992c03ab,0xc8ccdb8d,0xd954faf0,0x56e1a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc566d8e823935eb9,0x45771bf1a77f0d78,0x281132cecd33141,0xf25b4bb1d3413b5,0xde505e9fbf860dc3,0x687dd5b0e95ef536,0xc8ccdb8d992c03ab,0x56e1ad954faf0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf17c,0xf7a8,0xd9f7,0x1544,0xb2c8,0xf5aa,0x3812,0x3fba,0xf63e,0xb545,0x678c,0xad77,0xed9f,0x12f8,0xa5dc,0x74c9,0xec1d,0xc1e0,0x806f,0x14a0,0xfb25,0x34f3,0x606c,0x57d5,0x9733,0x9c8c,0x83e3,0xa787,0x7cae,0x503b,0x2499,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf7a8f17c,0x1544d9f7,0xf5aab2c8,0x3fba3812,0xb545f63e,0xad77678c,0x12f8ed9f,0x74c9a5dc,0xc1e0ec1d,0x14a0806f,0x34f3fb25,0x57d5606c,0x9c8c9733,0xa78783e3,0x503b7cae,0x12499}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1544d9f7f7a8f17c,0x3fba3812f5aab2c8,0xad77678cb545f63e,0x74c9a5dc12f8ed9f,0x14a0806fc1e0ec1d,0x57d5606c34f3fb25,0xa78783e39c8c9733,0x12499503b7cae}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d83,0x57ac,0xb73f,0xb74d,0x1869,0x3588,0x43,0x915,0x7f31,0x82eb,0x4487,0xb830,0x6627,0x70a7,0x9911,0x5646,0x4779,0xe113,0x168c,0x925d,0xc1e8,0xd347,0xa95e,0xd5a6,0x7deb,0xbeb,0x72,0xf755,0x306,0x9ee2,0x7ef9,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x57ac5d83,0xb74db73f,0x35881869,0x9150043,0x82eb7f31,0xb8304487,0x70a76627,0x56469911,0xe1134779,0x925d168c,0xd347c1e8,0xd5a6a95e,0xbeb7deb,0xf7550072,0x9ee20306,0x27ef9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb74db73f57ac5d83,0x915004335881869,0xb830448782eb7f31,0x5646991170a76627,0x925d168ce1134779,0xd5a6a95ed347c1e8,0xf75500720beb7deb,0x27ef99ee20306}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa147,0xdc6c,0x2717,0x3a99,0xf287,0x5880,0xe40e,0xba88,0xcebe,0x132c,0xecd3,0xfd7e,0xec4a,0xe2cb,0x4b44,0xf0da,0xf23c,0x4079,0xa160,0x21af,0xac9,0x16a1,0x2a4f,0x9782,0xfc54,0x66d3,0x2472,0x3733,0x50f,0x26ab,0x91e5,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdc6ca147,0x3a992717,0x5880f287,0xba88e40e,0x132ccebe,0xfd7eecd3,0xe2cbec4a,0xf0da4b44,0x4079f23c,0x21afa160,0x16a10ac9,0x97822a4f,0x66d3fc54,0x37332472,0x26ab050f,0xa91e5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3a992717dc6ca147,0xba88e40e5880f287,0xfd7eecd3132ccebe,0xf0da4b44e2cbec4a,0x21afa1604079f23c,0x97822a4f16a10ac9,0x3733247266d3fc54,0xa91e526ab050f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6f0b,0x3478,0x5aeb,0x64,0x9a1a,0xecff,0xccf0,0x2fab,0xf3a8,0x718a,0x97e7,0xc31a,0xa0cd,0xb872,0x514e,0x5ee1,0x4b79,0x4af9,0xd0c3,0x97c6,0x9591,0x2370,0xa987,0xa5e6,0xe201,0x8730,0x3150,0x1980,0x8452,0x3b83,0x25c9,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x34786f0b,0x645aeb,0xecff9a1a,0x2fabccf0,0x718af3a8,0xc31a97e7,0xb872a0cd,0x5ee1514e,0x4af94b79,0x97c6d0c3,0x23709591,0xa5e6a987,0x8730e201,0x19803150,0x3b838452,0xb25c9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x645aeb34786f0b,0x2fabccf0ecff9a1a,0xc31a97e7718af3a8,0x5ee1514eb872a0cd,0x97c6d0c34af94b79,0xa5e6a98723709591,0x198031508730e201,0xb25c93b838452}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1de7,0x7f69,0xdefe,0xfc6b,0x6fd5,0xc100,0x5188,0x1318,0x416e,0x10dd,0x33ac,0x4260,0x8985,0x1d0e,0x5b13,0xd02e,0x6fb5,0x6e28,0x9b7d,0x4f72,0x9665,0xd5f3,0xf00d,0xda5f,0x98f2,0xd778,0x4b2a,0x958d,0xfcef,0xd837,0x4a93,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f691de7,0xfc6bdefe,0xc1006fd5,0x13185188,0x10dd416e,0x426033ac,0x1d0e8985,0xd02e5b13,0x6e286fb5,0x4f729b7d,0xd5f39665,0xda5ff00d,0xd77898f2,0x958d4b2a,0xd837fcef,0x34a93}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfc6bdefe7f691de7,0x13185188c1006fd5,0x426033ac10dd416e,0xd02e5b131d0e8985,0x4f729b7d6e286fb5,0xda5ff00dd5f39665,0x958d4b2ad77898f2,0x34a93d837fcef}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8527,0x81f3,0xcb8f,0x5e0d,0x7c93,0x7448,0x613,0xedcf,0x7d31,0x77c7,0x19dc,0x8ace,0xbfb8,0xa582,0x9ccc,0x28df,0xb6e0,0x4f69,0x33e6,0x546b,0xcfb2,0x1627,0x53ed,0xdc8d,0xd80b,0xb843,0xc438,0xb942,0x8fb5,0xb3c0,0xc1dc,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81f38527,0x5e0dcb8f,0x74487c93,0xedcf0613,0x77c77d31,0x8ace19dc,0xa582bfb8,0x28df9ccc,0x4f69b6e0,0x546b33e6,0x1627cfb2,0xdc8d53ed,0xb843d80b,0xb942c438,0xb3c08fb5,0x2c1dc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e0dcb8f81f38527,0xedcf061374487c93,0x8ace19dc77c77d31,0x28df9ccca582bfb8,0x546b33e64f69b6e0,0xdc8d53ed1627cfb2,0xb942c438b843d80b,0x2c1dcb3c08fb5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x90f5,0xcb87,0xa514,0xff9b,0x65e5,0x1300,0x330f,0xd054,0xc57,0x8e75,0x6818,0x3ce5,0x5f32,0x478d,0xaeb1,0xa11e,0xb486,0xb506,0x2f3c,0x6839,0x6a6e,0xdc8f,0x5678,0x5a19,0x1dfe,0x78cf,0xceaf,0xe67f,0x7bad,0xc47c,0xda36,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcb8790f5,0xff9ba514,0x130065e5,0xd054330f,0x8e750c57,0x3ce56818,0x478d5f32,0xa11eaeb1,0xb506b486,0x68392f3c,0xdc8f6a6e,0x5a195678,0x78cf1dfe,0xe67fceaf,0xc47c7bad,0x4da36}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xff9ba514cb8790f5,0xd054330f130065e5,0x3ce568188e750c57,0xa11eaeb1478d5f32,0x68392f3cb506b486,0x5a195678dc8f6a6e,0xe67fceaf78cf1dfe,0x4da36c47c7bad}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x14d5, 0x1163, 0xf47, 0x337, 0x71e, 0x4ad, 0x1071, 0x19d4, 0x11d9, 0xdce, 0x186a, 0x1785, 0x13c8, 0x695, 0xe1b, 0x54b, 0x174f, 0xd0c, 0x17cb, 0x17db, 0xb41, 0xb78, 0x1a46, 0x66f, 0x23, 0x836, 0x19ce, 0xcdf, 0x90b, 0x6fb, 0x1508, 0x1bb5, 0x6aa, 0x1219, 0x1bba, 0x1d67, 0x1e46, 0x8d8, 0x62c} +#elif RADIX == 32 +{0x1a6af50e, 0xef478b1, 0xb4e3c33, 0xea41c49, 0xdce8ece, 0x2f0b86a, 0xd9a5679, 0x14f2a5b8, 0xf96d0cb, 0x2d06fb7, 0xfd232de, 0x6c02333, 0x137f39c8, 0x37da42d, 0x15bb5a84, 0xea4326a, 0x123759f7, 0x9c7} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3c3377a3c58e9abd, 0x3a3b39d483892b4e, 0xd9a56791785c3537, 0xbbe5b432e9e54b70, 0x666fd232de16837d, 0xd216cdfce720d804, 0x4864d55bb5a841be, 0x72363c8dd67ddd} +#else +{0x66ef478b1d357a, 0x139d483892b4e3c, 0x1c8bc2e1a9b9d1d, 0xba7952dc366959, 0x1c2d06fb77cb686, 0xd804666fd232d, 0xdf690b66fe739, 0x1752193556ed6a1, 0xe46c791bacfb} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1d37, 0x1c58, 0x1bd1, 0x10cd, 0x9c7, 0x92b, 0x41c, 0xe75, 0x1476, 0x1373, 0xe1a, 0x5e1, 0xcf2, 0x19a5, 0x1b86, 0x1952, 0x5d3, 0x1b43, 0x1df2, 0xdf6, 0x2d0, 0x12de, 0x1e91, 0x199b, 0x1008, 0x120d, 0x1e73, 0x1b37, 0x1a42, 0x1be, 0xd42, 0x16ed, 0x9aa, 0x1486, 0x1eee, 0x1759, 0x791, 0x236, 0x5bb} +#elif RADIX == 32 +{0xe9becab, 0x1bbd1e2c, 0xad38f0c, 0x13a90712, 0x1373a3b3, 0x8bc2e1a, 0x366959e, 0x1d3ca96e, 0x1be5b432, 0x10b41bed, 0x1bf48cb7, 0x1b008cc, 0xcdfce72, 0xdf690b, 0x156ed6a1, 0x1ba90c9a, 0x1c8dd67d, 0xd31} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8f0cdde8f163a6fb, 0xce8ece7520e24ad3, 0x366959e45e170d4d, 0x6ef96d0cba7952dc, 0x199bf48cb785a0df, 0xb485b37f39c83601, 0x52193556ed6a106f, 0x488d8f23759f77} +#else +{0x19bbd1e2c74df6, 0xce7520e24ad38f, 0xf22f0b86a6e747, 0x12e9e54b70d9a56, 0xf0b41beddf2da1, 0x83601199bf48cb, 0x837da42d9bf9ce, 0x1dd4864d55bb5a8, 0x911b1e46eb3e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x8f6, 0xe30, 0x75, 0xaf7, 0xb3c, 0x1672, 0x1e05, 0x157a, 0x16b1, 0x1fd, 0x3c2, 0x114d, 0x1000, 0x1b4f, 0x1f37, 0xc0e, 0xdd, 0x4de, 0xdff, 0x55e, 0x1a2f, 0x353, 0xc4a, 0x1225, 0x9ed, 0x9ff, 0x1493, 0x18e6, 0x96c, 0x163c, 0xa76, 0x1c78, 0x11b4, 0x1087, 0x1519, 0xc82, 0x3e0, 0x7d4, 0xf5} +#elif RADIX == 32 +{0x47b122a, 0xe075718, 0x1c9678af, 0xbd7816c, 0x1fdb58d, 0x229a3c2, 0x1bed3e00, 0xdd6077c, 0x1bfe4de0, 0x1e8bcabc, 0x56250d4, 0x1fe9ed91, 0x39a9269, 0xb1e25b3, 0x9c7853b, 0x6610f1b, 0x1f0320aa, 0x7a0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x78af703ab8c11ec4, 0xf6d6357af02d9c96, 0xbed3e00114d1e107, 0xe6ff93781bac0ef9, 0xb2256250d4f45e55, 0x12d98e6a49a7fd3d, 0xc21e369c7853b58f, 0xe9f507c0c82a8c} +#else +{0x15ee07571823d89, 0x357af02d9c9678, 0x8a68f083fb6b, 0x6eb03be6fb4f8, 0x9e8bcabcdff26f, 0x7fd3db2256250d, 0x1ac7896cc73524d, 0x330878da71e14e, 0x23ea0f819055} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1227, 0x1240, 0x423, 0xd84, 0x1dc1, 0x982, 0x1cb3, 0x14e1, 0x16eb, 0x1409, 0xf49, 0xec8, 0x888, 0xe0b, 0x1c45, 0x176, 0x49e, 0x1d40, 0x1e6b, 0x7a3, 0xfba, 0x175f, 0x1908, 0xb88, 0x168c, 0x1324, 0x159f, 0x1077, 0xac3, 0x10b4, 0x478, 0x240, 0x1682, 0x14f, 0x1599, 0x152f, 0x1197, 0xad5, 0x133} +#elif RADIX == 32 +{0x91396c4, 0x8423920, 0xbb82d8, 0x70f2cd3, 0x1409b75d, 0x1d90f49, 0x2b82d11, 0x9e0bb71, 0x1cd7d402, 0x1bee8f47, 0x8c845d7, 0x4968c5c, 0x1deb3f3, 0x85a2b0e, 0x424023c, 0x6429f68, 0xcbd4beb, 0xac} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x82d84211c90244e5, 0x26dd74e1e59a60bb, 0x2b82d110ec87a4d0, 0x3f35f50093c176e2, 0x8b88c845d7df747a, 0x1587077acfcc92d1, 0x853ed0424023c42d, 0x12ab5632f52facc} +#else +{0x1b08423920489cb, 0x174e1e59a60bb82, 0x887643d268136e, 0x24f05db88ae0b4, 0xfbee8f47e6bea0, 0xc92d18b88c845d, 0x2168ac383bd67e, 0x13214fb4109008f, 0xa56ac65ea5f5} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1544, 0x1dea, 0x162d, 0x73d, 0x6d1, 0x1511, 0x5f2, 0x275, 0x1aff, 0x1c7, 0x1d84, 0x1875, 0x10df, 0x2e0, 0x70b, 0x9eb, 0x897, 0xf0f, 0xa5d, 0xf38, 0x108c, 0x1c12, 0x1649, 0x1849, 0x9b8, 0x2bc, 0x1b0, 0xd0e, 0xfdb, 0x8ee, 0x1b0b, 0x1fdc, 0xc1, 0x1771, 0x1776, 0xa12, 0x1392, 0xd10, 0x618} +#elif RADIX == 32 +{0xaa27395, 0x1b62def5, 0x44da273, 0x13a97caa, 0x1c7d7f8, 0x1f0ebd84, 0x58b821b, 0x974f59c, 0x14baf0f4, 0x14231e70, 0x9b24f04, 0x1789b8c2, 0x14383602, 0x14773f6d, 0x3fdcd85, 0x1daee20c, 0x1c9284ae, 0xd04} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa273db16f7aaa89c, 0x1f5fe2752f95444d, 0x58b821bf875ec207, 0x852ebc3d12e9eb38, 0x1849b24f04a118f3, 0x9fb6d0e0d80af137, 0x5dc4183fdcd85a3b, 0x183442724a12bbb} +#else +{0xe7b62def555139, 0x1e2752f95444da2, 0xdfc3af61038faf, 0x144ba7ace162e08, 0x94231e70a5d787, 0xaf1371849b24f0, 0xd1dcfdb68706c0, 0xed771060ff7361, 0x156884e494257} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1756, 0x1187, 0x608, 0x637, 0x5c5, 0x459, 0x12f2, 0x9a1, 0x314, 0xe7f, 0x1c73, 0x27f, 0xa8d, 0x17f8, 0x1e33, 0x1878, 0x1c21, 0x123b, 0xb76, 0x7ea, 0x157, 0x16b4, 0xad7, 0x413, 0x56e, 0x4f3, 0x881, 0x1319, 0x1cc3, 0x1813, 0x1575, 0x1f0, 0x13f9, 0x1ef4, 0x8ae, 0x17c8, 0xd48, 0x157d, 0x5ea} +#elif RADIX == 32 +{0x1bab7032, 0xe6088c3, 0x164b8a63, 0xd0cbc88, 0xe7f18a2, 0x144ffc73, 0x19dfe151, 0x21c3c78, 0x16ed23be, 0x55cfd4, 0x1356bdad, 0x1e656e20, 0xc651024, 0x1c09f30e, 0x121f0aba, 0xbbde93f, 0xa45f211, 0x8eb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8a637304461eeadc, 0xfc6289a19791164b, 0x9dfe151a27fe39b9, 0xa5bb48ef843878f1, 0xc41356bdad02ae7e, 0xf98731944093ccad, 0x7bd27f21f0abae04, 0x155f5a917c8457} +#else +{0xc6e6088c3dd5b8, 0x89a19791164b8a, 0x8d13ff1cdcfe31, 0x1e10e1e3c677f85, 0x1a055cfd4b7691d, 0x13ccadc41356bda, 0x17027cc398ca204, 0x15def49fc87c2ae, 0x2abeb522f908} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xbba, 0x1eb6, 0x49a, 0x12a5, 0x12d2, 0x30a, 0x172f, 0x174d, 0x1231, 0x1036, 0x122e, 0x158, 0x743, 0xf10, 0x1e52, 0x18c7, 0x152e, 0x13b1, 0x7ae, 0x128d, 0x9c4, 0x848, 0x4, 0x1e64, 0x1e6f, 0x10ca, 0x3d4, 0x164, 0x1c8, 0x3e2, 0x4e8, 0x27b, 0x1d32, 0x1cc2, 0x1c60, 0x7a8, 0x13df, 0x1f6b, 0x6ad} +#elif RADIX == 32 +{0x5dd7eaa, 0xa49af5b, 0x2a5a52a, 0x1a6dcbc6, 0x1036918d, 0xc2b122e, 0x93c40e8, 0x12ec63f9, 0xf5d3b1a, 0x271251a, 0x4002212, 0x195e6ff3, 0x5907a90, 0x1f10720, 0x427b274, 0x183985d3, 0x1ef9ea38, 0x45c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa52a524d7ad9775f, 0xda46374db978c2a5, 0x93c40e8615891740, 0xd3d74ec6a5d8c7f2, 0xfe64002212138928, 0x83901641ea432bcd, 0x730ba6427b2740f8, 0x11fdae7be7a8e30} +#else +{0x54a49af5b2eebf, 0x374db978c2a5a5, 0x1430ac48ba06d23, 0x1a97631fca4f103, 0x4271251a7ae9d8, 0x32bcdfe6400221, 0x7c41c80b20f52, 0xc1cc2e9909ec9d, 0x8fb5cf7cf51c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1d5e, 0x18e6, 0xc97, 0x1db2, 0x9df, 0x19d3, 0x1564, 0x1a3a, 0x90, 0xea5, 0xd74, 0x19fc, 0xf84, 0xadd, 0x2e5, 0x10bb, 0x183f, 0x1334, 0xa50, 0x54b, 0xd22, 0x1295, 0xf11, 0xfa1, 0x1810, 0xa3, 0xa81, 0x1026, 0x2b2, 0x19ee, 0x1a4a, 0xf8a, 0xfb3, 0x1463, 0x19c5, 0x42c, 0x830, 0x562, 0x3db} +#elif RADIX == 32 +{0xeaf491f, 0x4c97c73, 0x14d3bfdb, 0x11d55933, 0xea50486, 0x133f8d74, 0x12ab75f0, 0x3f85d8b, 0x14a1334c, 0xb488a96, 0x1788ca5, 0x1478107d, 0x995020, 0xcf70aca, 0x6f8ad25, 0x1168c6fb, 0x1810b33, 0x892} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbfdb264be39babd2, 0x94121a3aab2674d3, 0x2ab75f099fc6ba3a, 0xb5284cd307f0bb17, 0xfa1788ca55a4454, 0x8565026540828f02, 0xd18df66f8ad2567b, 0x7958906042cce2} +#else +{0x1b64c97c73757a4, 0x1a3aab2674d3bf, 0x184cfe35d1d4a09, 0xc1fc2ec5caadd7, 0xab488a96a5099a, 0x28f020fa1788ca, 0xb3dc2b28132a04, 0x18b4637d9be2b49, 0xf2b120c08599} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5fd3,0xc1bb,0x3527,0x289e,0x97fd,0xf5ce,0xa8e1,0xfbf2,0x8f04,0xb5e7,0xdf66,0xcb44,0x5b5,0x8314,0x31c,0x6e5c,0xa6b9,0x3134,0x3d19,0x5ea9,0x860d,0x37fe,0x8003,0xafb9,0xbfdd,0xf377,0xa36d,0xde5a,0xa9df,0x8da,0xc872,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1bb5fd3,0x289e3527,0xf5ce97fd,0xfbf2a8e1,0xb5e78f04,0xcb44df66,0x831405b5,0x6e5c031c,0x3134a6b9,0x5ea93d19,0x37fe860d,0xafb98003,0xf377bfdd,0xde5aa36d,0x8daa9df,0xbc872}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x289e3527c1bb5fd3,0xfbf2a8e1f5ce97fd,0xcb44df66b5e78f04,0x6e5c031c831405b5,0x5ea93d193134a6b9,0xafb9800337fe860d,0xde5aa36df377bfdd,0xbc87208daa9df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb354,0x6a4f,0xd461,0xf7db,0x4aec,0x6786,0xff6,0xb274,0xfcf4,0x66d,0x97e9,0x277e,0x5e43,0x68a3,0xb1fa,0x6062,0xa56a,0x8c2b,0x67ed,0xd926,0x444a,0x4883,0x5bc5,0x8084,0x1f0a,0x209e,0x3b85,0x4eb6,0x14fe,0xb973,0xb05c,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6a4fb354,0xf7dbd461,0x67864aec,0xb2740ff6,0x66dfcf4,0x277e97e9,0x68a35e43,0x6062b1fa,0x8c2ba56a,0xd92667ed,0x4883444a,0x80845bc5,0x209e1f0a,0x4eb63b85,0xb97314fe,0xab05c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7dbd4616a4fb354,0xb2740ff667864aec,0x277e97e9066dfcf4,0x6062b1fa68a35e43,0xd92667ed8c2ba56a,0x80845bc54883444a,0x4eb63b85209e1f0a,0xab05cb97314fe}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9c41,0x213b,0x2271,0x4d2a,0xca4c,0x987c,0xf3fd,0x8462,0x84ba,0x5504,0xf930,0x5ca1,0xb075,0x84d2,0xb16,0x1bc1,0xe1ac,0xfeb5,0xe84e,0x4bb0,0xf6b6,0x57b6,0x3d98,0x97f4,0xda24,0x9866,0x1aae,0xb84,0x36ec,0xfcb7,0x4a2d,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x213b9c41,0x4d2a2271,0x987cca4c,0x8462f3fd,0x550484ba,0x5ca1f930,0x84d2b075,0x1bc10b16,0xfeb5e1ac,0x4bb0e84e,0x57b6f6b6,0x97f43d98,0x9866da24,0xb841aae,0xfcb736ec,0xf4a2d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4d2a2271213b9c41,0x8462f3fd987cca4c,0x5ca1f930550484ba,0x1bc10b1684d2b075,0x4bb0e84efeb5e1ac,0x97f43d9857b6f6b6,0xb841aae9866da24,0xf4a2dfcb736ec}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa02d,0x3e44,0xcad8,0xd761,0x6802,0xa31,0x571e,0x40d,0x70fb,0x4a18,0x2099,0x34bb,0xfa4a,0x7ceb,0xfce3,0x91a3,0x5946,0xcecb,0xc2e6,0xa156,0x79f2,0xc801,0x7ffc,0x5046,0x4022,0xc88,0x5c92,0x21a5,0x5620,0xf725,0x378d,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3e44a02d,0xd761cad8,0xa316802,0x40d571e,0x4a1870fb,0x34bb2099,0x7cebfa4a,0x91a3fce3,0xcecb5946,0xa156c2e6,0xc80179f2,0x50467ffc,0xc884022,0x21a55c92,0xf7255620,0x4378d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd761cad83e44a02d,0x40d571e0a316802,0x34bb20994a1870fb,0x91a3fce37cebfa4a,0xa156c2e6cecb5946,0x50467ffcc80179f2,0x21a55c920c884022,0x4378df7255620}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x718a,0xe24a,0xae5,0xa4d6,0xd401,0xf453,0x9f91,0x69ce,0x7d19,0xfa11,0x9273,0x4e63,0xf33a,0xde49,0xe08f,0x746a,0x243d,0x52bb,0x43b6,0xe4c,0x1bdd,0x380d,0xdf64,0x74fe,0x4dfa,0x584f,0xa4d6,0xd71b,0xf067,0xf070,0x717e,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe24a718a,0xa4d60ae5,0xf453d401,0x69ce9f91,0xfa117d19,0x4e639273,0xde49f33a,0x746ae08f,0x52bb243d,0xe4c43b6,0x380d1bdd,0x74fedf64,0x584f4dfa,0xd71ba4d6,0xf070f067,0xf717e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa4d60ae5e24a718a,0x69ce9f91f453d401,0x4e639273fa117d19,0x746ae08fde49f33a,0xe4c43b652bb243d,0x74fedf64380d1bdd,0xd71ba4d6584f4dfa,0xf717ef070f067}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d93,0x7845,0xd1d0,0xe045,0xfa74,0x6b6,0x9400,0xad36,0x4e68,0xd3f6,0x9b00,0x7ca0,0xab22,0xfac,0x1fb6,0xb42f,0x57db,0xb2e3,0xbc5b,0x2b2d,0x94fa,0xc77e,0x34e2,0x2918,0x6ce9,0xf9dd,0x68cf,0xd4a2,0xbc59,0x6050,0xda60,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x78455d93,0xe045d1d0,0x6b6fa74,0xad369400,0xd3f64e68,0x7ca09b00,0xfacab22,0xb42f1fb6,0xb2e357db,0x2b2dbc5b,0xc77e94fa,0x291834e2,0xf9dd6ce9,0xd4a268cf,0x6050bc59,0x5da60}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe045d1d078455d93,0xad36940006b6fa74,0x7ca09b00d3f64e68,0xb42f1fb60facab22,0x2b2dbc5bb2e357db,0x291834e2c77e94fa,0xd4a268cff9dd6ce9,0x5da606050bc59}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6dc,0x5d39,0xac2b,0x2d81,0xc9b8,0xf398,0xdab5,0x8e30,0xb3b2,0x1b25,0x7102,0x8cd2,0x952e,0x7c35,0xb4f3,0x52b8,0x5789,0xb877,0x6906,0x8d31,0x98a6,0x8a10,0x2b3,0x1667,0x856,0xa935,0xfc76,0xc8ec,0x6044,0x9148,0x4f02,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d39c6dc,0x2d81ac2b,0xf398c9b8,0x8e30dab5,0x1b25b3b2,0x8cd27102,0x7c35952e,0x52b8b4f3,0xb8775789,0x8d316906,0x8a1098a6,0x166702b3,0xa9350856,0xc8ecfc76,0x91486044,0xd4f02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d81ac2b5d39c6dc,0x8e30dab5f398c9b8,0x8cd271021b25b3b2,0x52b8b4f37c35952e,0x8d316906b8775789,0x166702b38a1098a6,0xc8ecfc76a9350856,0xd4f0291486044}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x8e76,0x1db5,0xf51a,0x5b29,0x2bfe,0xbac,0x606e,0x9631,0x82e6,0x5ee,0x6d8c,0xb19c,0xcc5,0x21b6,0x1f70,0x8b95,0xdbc2,0xad44,0xbc49,0xf1b3,0xe422,0xc7f2,0x209b,0x8b01,0xb205,0xa7b0,0x5b29,0x28e4,0xf98,0xf8f,0x8e81}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1db58e76,0x5b29f51a,0xbac2bfe,0x9631606e,0x5ee82e6,0xb19c6d8c,0x21b60cc5,0x8b951f70,0xad44dbc2,0xf1b3bc49,0xc7f2e422,0x8b01209b,0xa7b0b205,0x28e45b29,0xf8f0f98,0x8e81}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5b29f51a1db58e76,0x9631606e0bac2bfe,0xb19c6d8c05ee82e6,0x8b951f7021b60cc5,0xf1b3bc49ad44dbc2,0x8b01209bc7f2e422,0x28e45b29a7b0b205,0x8e810f8f0f98}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0xca6, 0xc89, 0x411, 0x17e9, 0x188d, 0xad5, 0x8de, 0x403, 0x183a, 0x1e0d, 0x28c, 0xfdc, 0xbcc, 0xd0, 0xa3a, 0xb2e, 0x140c, 0x11b4, 0x4bb, 0x10da, 0xb22, 0x1a4c, 0xbfa, 0xbd5, 0xae6, 0xeff, 0x465, 0x8df, 0xcf9, 0x1c4a, 0x1807, 0x1462, 0xead, 0x1c43, 0x12a3, 0x1ec2, 0x1e12, 0xad0, 0x1cd} +#elif RADIX == 32 +{0x1653222c, 0x12411644, 0x15711b7e, 0x1a3795, 0x1e0dc1d1, 0x11fb828c, 0x1d034179, 0xc59728, 0x9771b4a, 0x2c8a1b4, 0x155fd693, 0x1feae65e, 0x37c8cae, 0x1e2533e5, 0x1b462c03, 0x8f886ea, 0x1097b0a5, 0x487} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1b7e9208b22594c8, 0x3707440346f2b571, 0xd0341798fdc14678, 0xa25dc6d2818b2e51, 0xcbd55fd69316450d, 0x99f28df232bbfd5c, 0xf10dd5b462c03f12, 0xeab43c25ec2951} +#else +{0xfd2411644b2991, 0x1440346f2b5711b, 0x1cc7ee0a33c1b83, 0xa062cb94740d05, 0x62c8a1b44bb8da, 0x1bfd5ccbd55fd69, 0x1f894cf946f9195, 0x147c43756d18b00, 0x2568784bd852} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0xb2b, 0xb22, 0x904, 0xdfa, 0xe23, 0x12b5, 0x1a37, 0x1100, 0xe0e, 0x783, 0xa3, 0x3f7, 0x2f3, 0x1034, 0x128e, 0x2cb, 0x503, 0x1c6d, 0x112e, 0x1436, 0x2c8, 0x1693, 0xafe, 0x12f5, 0x1ab9, 0xbbf, 0x1919, 0xa37, 0x133e, 0x1f12, 0x1601, 0xd18, 0x1bab, 0x1f10, 0x14a8, 0x17b0, 0x784, 0xab4, 0x653} +#elif RADIX == 32 +{0x595f7f3, 0x14904591, 0xd5c46df, 0x8068de5, 0x7837074, 0xc7ee0a3, 0x740d05e, 0x103165ca, 0x25dc6d2, 0x18b2286d, 0x1557f5a4, 0x17fab997, 0x8df232b, 0x1f894cf9, 0x16d18b00, 0xa3e21ba, 0x1c25ec29, 0x521} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x46dfa4822c89657d, 0xdc1d100d1bcad5c, 0x740d05e63f70519e, 0x689771b4a062cb94, 0x32f557f5a4c59143, 0xa67ca37c8caeff57, 0x7c43756d18b00fc4, 0x1aaad0f097b0a54} +#else +{0x1bf49045912cafb, 0x1d100d1bcad5c46, 0xf31fb828cf06e0, 0x12818b2e51d0341, 0x98b2286d12ee36, 0xeff5732f557f5a, 0x7e2533e51be465, 0x151f10dd5b462c0, 0x1a55a1e12f614} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x517, 0x18a8, 0x1a92, 0x94f, 0x1bb0, 0xf2c, 0x43, 0x5a8, 0x1463, 0x1b4b, 0x1a1c, 0x1c0e, 0x148a, 0x7f5, 0x6a3, 0x820, 0x1fc7, 0x141c, 0x1c2b, 0xd98, 0x48c, 0x587, 0x1b23, 0x1fb5, 0x4c0, 0x179c, 0x169e, 0x1927, 0x16b8, 0x1beb, 0x6bb, 0x1923, 0x2b7, 0x146d, 0x32b, 0xd85, 0x1a89, 0x1fb0, 0x2be} +#elif RADIX == 32 +{0x28bb412, 0x1fa92c54, 0xb376094, 0xd4010de, 0x1b4ba319, 0xb81da1c, 0x119fd691, 0x1c74101a, 0x185741cf, 0x19231b31, 0x15d91961, 0x1384c0fd, 0x49ed3d7, 0x1df5dae3, 0xf92335d, 0xae8da2b, 0x144b6146, 0xa86} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6094fd4962a0a2ed, 0x2e8c65a8021bcb37, 0x19fd6915c0ed0e6d, 0x8e15d073f8e82035, 0x1fb5d91961c918d9, 0xed71927b4f5e7098, 0xd1b456f92335defa, 0x7ec3512d85195} +#else +{0x129fa92c54145da, 0x65a8021bcb3760, 0x8ae07687369746, 0xfe3a080d467f5a, 0x39231b31c2ba0e, 0x1e70981fb5d9196, 0xf7d76b8c93da7a, 0x5746d15be48cd7, 0xfd86a25b0a3} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x147b, 0x14f1, 0xfdd, 0xb2a, 0xff7, 0x1426, 0xce1, 0x19a8, 0x1bf3, 0xbdd, 0x16dd, 0x1339, 0x10dd, 0x8f4, 0x1d29, 0x1b05, 0x1ee, 0x187b, 0x118a, 0x1e55, 0xcde, 0x1a18, 0x1b1f, 0x1648, 0x1c75, 0x1db8, 0xa2a, 0x1ab6, 0x1fa, 0xb0a, 0x1bdf, 0x1d18, 0x1a98, 0x12d9, 0x13df, 0x6e0, 0xa3c, 0x537, 0x345} +#elif RADIX == 32 +{0x1a3dbe03, 0x14fdda78, 0x99feeb2, 0xd433868, 0xbdddf9e, 0x166736dd, 0x14a3d21b, 0x1eed82f4, 0x31587b0, 0x337bcab, 0x8d8fe86, 0x171c75b2, 0xad9455d, 0x158507eb, 0x11d18def, 0x17e5b3a9, 0x11e1b827, 0x13a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xeeb2a7eed3c68f6f, 0x777e79a8670d099f, 0x4a3d21bb339b6eaf, 0x58c561ec3ddb05e9, 0xb648d8fe8619bde5, 0x83f5ab651576e38e, 0xcb67531d18defac2, 0xd94dd4786e09ef} +#else +{0x1654fdda78d1edf, 0x79a8670d099fee, 0xdd99cdb757bbbf, 0x10f76c17a528f48, 0xc337bcab18ac3d, 0x16e38eb648d8fe8, 0x1d6141fad5b28ab, 0x1bf2d9d4c74637b, 0x29ba8f0dc13} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xc4b, 0x1f6e, 0xcba, 0x1a23, 0x8a1, 0x7c3, 0x1a45, 0x1ca3, 0x6a9, 0x643, 0x3b, 0xc83, 0x208, 0x21a, 0xd43, 0x1805, 0x1078, 0x9af, 0x80a, 0x1555, 0x50d, 0x1eb8, 0xa49, 0x161c, 0x1eee, 0xe1b, 0xf4b, 0x9de, 0x117e, 0x14f8, 0xea7, 0xd18, 0x112a, 0x1a38, 0x1cc7, 0x1c36, 0xe5, 0x10fa, 0x411} +#elif RADIX == 32 +{0x625cd26, 0x6cbafb7, 0x10d143a2, 0x51e914f, 0x643354f, 0x190603b, 0x1886841, 0x78c02b5, 0x10149af8, 0x1436aaa, 0x1c524fae, 0x37eeeb0, 0x779e96e, 0x1a7c45f9, 0x14d18753, 0x11f47112, 0x72f0db9, 0x6d0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x43a2365d7db98973, 0xcd53ca3d229f0d1, 0x18868410c8301d99, 0x540526be0f18056a, 0xd61c524fae0a1b55, 0x22fc9de7a5b86fdd, 0xe8e2254d18753d3e, 0x7c3e81cbc36e63} +#else +{0x1446cbafb7312e6, 0x13ca3d229f0d143, 0x864180ecc866a, 0x183c6015a8621a1, 0x1c1436aaa80a4d7, 0x186fddd61c524fa, 0x1e9f117e4ef3d2d, 0x18fa388953461d4, 0xf87d039786dc} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x9d5, 0x0, 0x181d, 0xced, 0x1fe0, 0x267, 0xc65, 0x1a4d, 0x9e3, 0x1f0c, 0x5d, 0xbae, 0x276, 0x1551, 0x1684, 0x1eab, 0x17f0, 0x1b20, 0xae6, 0xbc3, 0x95, 0x17c3, 0xfd8, 0x1359, 0x3f5, 0x12b6, 0x1410, 0x113, 0x1a19, 0x1c1d, 0xd91, 0x1446, 0x1233, 0x170, 0x1c50, 0x13ac, 0x6eb, 0x926, 0x3bf} +#elif RADIX == 32 +{0x4eac70e, 0x1b81d000, 0x19ffc0ce, 0x126b1944, 0x1f0c4f1e, 0x1975c05d, 0x255444e, 0x1f0f55da, 0x15cdb20b, 0x18255786, 0x197ec5f0, 0x16c3f59a, 0x44e8212, 0x1e0ee864, 0x74466c8, 0x1402e123, 0x175ceb38, 0xc31} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc0cedc0e80013ab1, 0x313c7a4d632899ff, 0x255444ecbae02efc, 0x35736c82fe1eabb4, 0xb3597ec5f0c12abc, 0x7432113a084ad87e, 0x5c24674466c8f07, 0x14a498dd73ace28} +#else +{0x19db81d00027563, 0x7a4d632899ffc0, 0x765d70177e189e, 0xbf87aaed095511, 0x18255786ae6d90, 0xad87eb3597ec5f, 0x783ba19089d042, 0xa0170919d119b2, 0xe4931bae759c} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1997, 0xa5a, 0x4c4, 0x155d, 0x70b, 0x12f, 0xe9d, 0xfe0, 0x147c, 0x9b6, 0x18ea, 0xf41, 0x1636, 0x1707, 0x1a7e, 0x1326, 0x76d, 0xbef, 0x9fe, 0x1bb4, 0xe22, 0x200, 0x1a11, 0x7e6, 0x1709, 0x1be9, 0x1507, 0x1c63, 0xb6f, 0xceb, 0x1b88, 0x1ef6, 0x16b7, 0x20f, 0x1497, 0x1e1c, 0x26e, 0x139d, 0x330} +#elif RADIX == 32 +{0xccbbc7d, 0x1a4c452d, 0xbce1755, 0x1f03a742, 0x9b6a3e3, 0x19e838ea, 0x1f5c1ec6, 0x16d99369, 0x13fcbef3, 0x388b768, 0x6d08880, 0x1d37093f, 0x118ea0fb, 0x675adbf, 0xfef6dc4, 0x5c41f6b, 0x13778729, 0x568} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1755d262296b32ef, 0xda8f8fe074e84bce, 0xf5c1ec6cf41c7526, 0x44ff2fbcedb326d3, 0x27e6d088801c45bb, 0xd6dfc63a83efa6e1, 0x883ed6fef6dc433a, 0x34e744dde1ca4b} +#else +{0xaba4c452d665de, 0x18fe074e84bce17, 0x367a0e3a936d47, 0x13b6cc9b4fd707b, 0x388b7689fe5f7, 0xfa6e127e6d0888, 0x19d6b6fe31d41f, 0x12e20fb5bfbdb71, 0x69ce89bbc394} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1bf, 0x197b, 0x1b4, 0x1a8a, 0xd22, 0x1cb5, 0x298, 0x76b, 0x16b6, 0x5aa, 0x54b, 0x1b63, 0x1d59, 0x2dc, 0xfe1, 0x1b24, 0x1725, 0x9a8, 0x2dd, 0x150f, 0x12de, 0x9d9, 0x2fd, 0x95f, 0xcc1, 0x1ffd, 0x101b, 0x707, 0x1d9d, 0x464, 0x39e, 0x97b, 0x8cf, 0x4a5, 0xed1, 0x9c3, 0x1b66, 0x1521, 0x112} +#elif RADIX == 32 +{0x10df9458, 0x141b4cbd, 0xd5a45a8, 0x1b58a639, 0x5aab5b1, 0x76c654b, 0x108b73ab, 0x125d923f, 0x5ba9a8b, 0xcb7aa1e, 0x1f17ea76, 0x1facc14a, 0x1c1e037f, 0x2327674, 0x1e97b1cf, 0x14494a8c, 0x1b3270dd, 0x50e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x45a8a0da65ec37e5, 0xaad6c76b14c72d5a, 0x8b73ab3b632a596, 0xf16ea6a2e4bb247f, 0x295f17ea7665bd50, 0x3b3a70780dfff598, 0x929519e97b1cf119, 0x254876cc9c3768} +#else +{0x15141b4cbd86fca, 0xc76b14c72d5a45, 0x159db1952cb556b, 0xb92ec91fc22dce, 0xccb7aa1e2dd4d4, 0x1ff598295f17ea7, 0x188c9d9d383c06f, 0x1a24a5467a5ec73, 0x4a90ed99386e} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe7eb,0x27c8,0x739b,0x6eaa,0x7a17,0xf593,0xac1c,0x4a84,0x1a27,0x7771,0xe67e,0xea3d,0x4596,0xa34b,0x8edd,0xc51c,0x7c15,0xd1a1,0x2551,0x481b,0x402e,0xfed0,0x8b82,0x1eab,0xc98b,0x20fa,0x7143,0x6abf,0x463a,0x475f,0x510f,0x9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27c8e7eb,0x6eaa739b,0xf5937a17,0x4a84ac1c,0x77711a27,0xea3de67e,0xa34b4596,0xc51c8edd,0xd1a17c15,0x481b2551,0xfed0402e,0x1eab8b82,0x20fac98b,0x6abf7143,0x475f463a,0x9510f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6eaa739b27c8e7eb,0x4a84ac1cf5937a17,0xea3de67e77711a27,0xc51c8edda34b4596,0x481b2551d1a17c15,0x1eab8b82fed0402e,0x6abf714320fac98b,0x9510f475f463a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e28,0x9e31,0xdab6,0x138c,0xc3c0,0x5193,0x444d,0xb2b7,0xf371,0x5630,0xb08b,0xc700,0x2404,0x3f08,0xc3f,0xbd7c,0x963b,0xd892,0x7bb2,0x429d,0x19d8,0xf277,0x853d,0x9aac,0x9bfa,0x42cd,0xf5e8,0x9e40,0x8a41,0x15a8,0x9c23,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e319e28,0x138cdab6,0x5193c3c0,0xb2b7444d,0x5630f371,0xc700b08b,0x3f082404,0xbd7c0c3f,0xd892963b,0x429d7bb2,0xf27719d8,0x9aac853d,0x42cd9bfa,0x9e40f5e8,0x15a88a41,0x69c23}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x138cdab69e319e28,0xb2b7444d5193c3c0,0xc700b08b5630f371,0xbd7c0c3f3f082404,0x429d7bb2d892963b,0x9aac853df27719d8,0x9e40f5e842cd9bfa,0x69c2315a88a41}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x66d1,0x8ee,0x9219,0x9d61,0x13a4,0xfc63,0xc3ee,0xdf2a,0x1353,0x2ef,0xc391,0x8ad8,0x953b,0xb014,0x1029,0xa4b2,0x61a3,0xfc07,0xf3a8,0x199c,0xe6c8,0x6a41,0x6eb7,0xb459,0xa187,0x2f4e,0x9ec3,0x8b4e,0x5321,0x38b,0x5b21,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8ee66d1,0x9d619219,0xfc6313a4,0xdf2ac3ee,0x2ef1353,0x8ad8c391,0xb014953b,0xa4b21029,0xfc0761a3,0x199cf3a8,0x6a41e6c8,0xb4596eb7,0x2f4ea187,0x8b4e9ec3,0x38b5321,0x35b21}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d61921908ee66d1,0xdf2ac3eefc6313a4,0x8ad8c39102ef1353,0xa4b21029b014953b,0x199cf3a8fc0761a3,0xb4596eb76a41e6c8,0x8b4e9ec32f4ea187,0x35b21038b5321}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1815,0xd837,0x8c64,0x9155,0x85e8,0xa6c,0x53e3,0xb57b,0xe5d8,0x888e,0x1981,0x15c2,0xba69,0x5cb4,0x7122,0x3ae3,0x83ea,0x2e5e,0xdaae,0xb7e4,0xbfd1,0x12f,0x747d,0xe154,0x3674,0xdf05,0x8ebc,0x9540,0xb9c5,0xb8a0,0xaef0,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8371815,0x91558c64,0xa6c85e8,0xb57b53e3,0x888ee5d8,0x15c21981,0x5cb4ba69,0x3ae37122,0x2e5e83ea,0xb7e4daae,0x12fbfd1,0xe154747d,0xdf053674,0x95408ebc,0xb8a0b9c5,0x6aef0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91558c64d8371815,0xb57b53e30a6c85e8,0x15c21981888ee5d8,0x3ae371225cb4ba69,0xb7e4daae2e5e83ea,0xe154747d012fbfd1,0x95408ebcdf053674,0x6aef0b8a0b9c5}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb83a,0x5e7a,0x2c9b,0xd483,0xeff9,0x71e9,0x4a21,0x2eae,0x921,0xbb26,0x6bf2,0xb038,0xeac9,0xc05a,0xd498,0x34fb,0x7ca,0xaae9,0x2674,0x81de,0x471f,0x7dbe,0x88c9,0xa354,0x9f03,0x5301,0x9acc,0x7c82,0xc479,0x732,0xdc7b,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5e7ab83a,0xd4832c9b,0x71e9eff9,0x2eae4a21,0xbb260921,0xb0386bf2,0xc05aeac9,0x34fbd498,0xaae907ca,0x81de2674,0x7dbe471f,0xa35488c9,0x53019f03,0x7c829acc,0x732c479,0x8dc7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4832c9b5e7ab83a,0x2eae4a2171e9eff9,0xb0386bf2bb260921,0x34fbd498c05aeac9,0x81de2674aae907ca,0xa35488c97dbe471f,0x7c829acc53019f03,0x8dc7b0732c479}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x4733,0xaeba,0xf3d4,0x84bf,0x453a,0xa71a,0xe0fa,0x4604,0xf02b,0x9bc2,0xb114,0x5fc5,0x5f8d,0x1a8d,0x2302,0x175d,0x3655,0x8351,0x51b,0x698c,0xc745,0x8c83,0xdd6a,0xdd4b,0x682f,0x80b7,0xd1fc,0xe320,0xca30,0xc1d3,0xc365}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaeba4733,0x84bff3d4,0xa71a453a,0x4604e0fa,0x9bc2f02b,0x5fc5b114,0x1a8d5f8d,0x175d2302,0x83513655,0x698c051b,0x8c83c745,0xdd4bdd6a,0x80b7682f,0xe320d1fc,0xc1d3ca30,0xc365}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x84bff3d4aeba4733,0x4604e0faa71a453a,0x5fc5b1149bc2f02b,0x175d23021a8d5f8d,0x698c051b83513655,0xdd4bdd6a8c83c745,0xe320d1fc80b7682f,0xc365c1d3ca30}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe32c,0x5173,0xdcb0,0xe05d,0x3a7e,0x6e8c,0xfd38,0xbed7,0x5fe0,0xa986,0x26f1,0xedf0,0x8fc7,0x1dbc,0xa48e,0x2e70,0x6648,0xe767,0xe8c3,0xf05b,0x26aa,0x63b6,0xf8f6,0x5304,0x7042,0x7c93,0x54a2,0xe675,0xd3ea,0x2b1,0xb36e,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5173e32c,0xe05ddcb0,0x6e8c3a7e,0xbed7fd38,0xa9865fe0,0xedf026f1,0x1dbc8fc7,0x2e70a48e,0xe7676648,0xf05be8c3,0x63b626aa,0x5304f8f6,0x7c937042,0xe67554a2,0x2b1d3ea,0x8b36e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe05ddcb05173e32c,0xbed7fd386e8c3a7e,0xedf026f1a9865fe0,0x2e70a48e1dbc8fc7,0xf05be8c3e7676648,0x5304f8f663b626aa,0xe67554a27c937042,0x8b36e02b1d3ea}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x47c6,0xa185,0xd364,0x2b7c,0x1006,0x8e16,0xb5de,0xd151,0xf6de,0x44d9,0x940d,0x4fc7,0x1536,0x3fa5,0x2b67,0xcb04,0xf835,0x5516,0xd98b,0x7e21,0xb8e0,0x8241,0x7736,0x5cab,0x60fc,0xacfe,0x6533,0x837d,0x3b86,0xf8cd,0x2384,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa18547c6,0x2b7cd364,0x8e161006,0xd151b5de,0x44d9f6de,0x4fc7940d,0x3fa51536,0xcb042b67,0x5516f835,0x7e21d98b,0x8241b8e0,0x5cab7736,0xacfe60fc,0x837d6533,0xf8cd3b86,0x72384}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b7cd364a18547c6,0xd151b5de8e161006,0x4fc7940d44d9f6de,0xcb042b673fa51536,0x7e21d98b5516f835,0x5cab77368241b8e0,0x837d6533acfe60fc,0x72384f8cd3b86}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x116d, 0x165f, 0x7d3, 0x488, 0xe08, 0xb12, 0x2e0, 0x18b4, 0xdbb, 0x181e, 0xe50, 0x19b8, 0xc17, 0x1c82, 0x1cf6, 0x7b9, 0xebb, 0x1c0a, 0x183a, 0x1f42, 0x1a3e, 0x176b, 0x19fa, 0x7e4, 0x1646, 0x1dc5, 0x1e9d, 0x4b3, 0x18df, 0xf2d, 0xe2e, 0x2e3, 0x903, 0x19ef, 0x1f94, 0x237, 0x18ef, 0x26c, 0x12f} +#elif RADIX == 32 +{0x18b69673, 0x107d3b2f, 0x49c1048, 0x5a0b816, 0x181e6dde, 0x1f370e50, 0x1b720982, 0xbb3dcf3, 0x1075c0a7, 0x1e8fbe85, 0x4cfd5da, 0x18b6463f, 0x12cfd3bd, 0x796e37c, 0x62e3717, 0x533de90, 0x7788dff, 0x2e6} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x104883e9d97e2da5, 0x79b778b41702c49c, 0xb720982f9b872860, 0x2c1d7029d767b9e7, 0xc7e4cfd5daf47df4, 0x71be4b3f4ef716c8, 0x67bd2062e37173cb, 0x1089b31de237fca} +#else +{0x9107d3b2fc5b4b, 0x178b41702c49c10, 0x17cdc394303cdb, 0x75d9ee79edc826, 0x15e8fbe8583ae05, 0x1716c8c7e4cfd5d, 0x19e5b8df259fa77, 0x1299ef4818b8dc5, 0x613663bc46ff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1c5d, 0x1d97, 0x1f4, 0x122, 0x1382, 0x2c4, 0xb8, 0x1e2d, 0x136e, 0x607, 0x394, 0x1e6e, 0x1305, 0x1720, 0xf3d, 0x19ee, 0x13ae, 0x1702, 0x160e, 0x17d0, 0x1e8f, 0x15da, 0x67e, 0x11f9, 0xd91, 0xf71, 0x1fa7, 0x192c, 0xe37, 0x13cb, 0x1b8b, 0x18b8, 0x1a40, 0x67b, 0x1fe5, 0x188d, 0x63b, 0x189b, 0x47b} +#elif RADIX == 32 +{0x1e2ed505, 0x41f4ecb, 0x11270412, 0x11682e05, 0x6079b77, 0x17cdc394, 0x1edc8260, 0x1aecf73c, 0xc1d7029, 0x17a3efa1, 0x1933f576, 0xe2d918f, 0x4b3f4ef, 0x19e5b8df, 0x18b8dc5, 0x194cf7a4, 0x11de237f, 0x159} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x41220fa765f8bb5, 0x1e6dde2d05c0b127, 0xedc8260be6e1ca18, 0xb075c0a75d9ee79, 0x31f933f576bd1f7d, 0xdc6f92cfd3bdc5b2, 0x99ef4818b8dc5cf2, 0x6e26cc7788dff2} +#else +{0x2441f4ecbf176a, 0x1de2d05c0b12704, 0x105f370e50c0f36, 0x9d767b9e7b7209, 0xd7a3efa160eb81, 0x1dc5b231f933f57, 0xe796e37c967e9d, 0x1ca67bd2062e371, 0xdc4d98ef11bf} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x51d, 0x1394, 0xcca, 0x1568, 0x1790, 0x11d6, 0x18aa, 0xe65, 0x1e8e, 0x4fe, 0xab9, 0x1496, 0x167d, 0x1b42, 0x1f85, 0x1d7a, 0x8c4, 0x17ea, 0x1269, 0x16, 0x1fbf, 0x8b5, 0x6f4, 0x1202, 0x17c4, 0x427, 0x1273, 0x14f, 0x49c, 0xfba, 0x1b3b, 0x13cd, 0x10ee, 0x634, 0x10ae, 0x2c4, 0x10b4, 0x1377, 0xfe} +#elif RADIX == 32 +{0x28e92dc, 0x10cca9ca, 0x15af2156, 0x132e2aa3, 0x4fef473, 0x1692cab9, 0x2ed0acf, 0xc4ebd7e, 0x4d37ea4, 0xfefc02d, 0x237a22d, 0x4f7c490, 0x53e4e64, 0x17dd1270, 0x1d3cdd9d, 0xb8c690e, 0x5a0b121, 0x1bc} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x215686654e50a3a4, 0xfbd1ce65c55475af, 0x2ed0acfb49655c93, 0x6934dfa9189d7afc, 0x920237a22d7f7e01, 0x893814f939909ef8, 0x18d21dd3cdd9dbee, 0x134dde1682c4857} +#else +{0xad0cca9ca14749, 0x1ce65c55475af21, 0x7da4b2ae49fde8, 0x46275ebf0bb42b, 0x1afefc02d269bf5, 0x109ef8920237a22, 0xdf7449c0a7c9cc, 0x15c6348774f3767, 0xb9bbc2d05890} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x17ab, 0x1e1a, 0x1bfe, 0x1f73, 0x1eb9, 0xf30, 0x1cca, 0x1aaf, 0xbea, 0xa1b, 0xb73, 0x86d, 0x1c13, 0x1c31, 0x1e6e, 0x1fbf, 0x968, 0x10f0, 0xb53, 0x1418, 0x11c6, 0x65f, 0x188, 0x2c7, 0x79b, 0xa9, 0xa92, 0x12b0, 0x1b53, 0x1564, 0xfa7, 0x1fd7, 0xa5b, 0xb32, 0x1bc8, 0xc90, 0x11ee, 0x1f6, 0x3f2} +#elif RADIX == 32 +{0xbd5cad1, 0x7bfef0d, 0xc3d73f7, 0x157f329e, 0xa1b5f56, 0xd0dab73, 0x1770c782, 0x168fdff9, 0x16a70f04, 0x1c71a830, 0x70c4197, 0x15279b16, 0xac15240, 0x1ab26d4e, 0x17fd77d3, 0x121664a5, 0xf732437, 0xa34} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x73f73dff786af572, 0x6d7d5aafe653cc3d, 0x770c782686d5b9a8, 0x85a9c3c12d1fbff3, 0x62c70c4197e38d41, 0x36a72b054902a4f3, 0x2cc94b7fd77d3d59, 0x1307da3dcc90de4} +#else +{0x1ee7bfef0d5eae5, 0x15aafe653cc3d73, 0x13436adcd436be, 0x4b47effcddc31e, 0xfc71a830b53878, 0x2a4f362c70c419, 0x1eac9b539582a48, 0x190b3252dff5df4, 0xb0fb47b9921b} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x166f, 0x4b7, 0x1268, 0x18f5, 0x10a9, 0x17ea, 0x105e, 0x1090, 0x1c31, 0x624, 0xec6, 0xea1, 0x17d2, 0xf55, 0x10d3, 0x8fb, 0x9ab, 0x1ae2, 0x952, 0xcab, 0x100d, 0x702, 0xc4d, 0x1387, 0x344, 0xdaf, 0x1566, 0xf8c, 0x1e1c, 0x6f1, 0x1af9, 0xf1, 0xd6d, 0xa06, 0xb5c, 0x62c, 0x2e9, 0x1131, 0x683} +#elif RADIX == 32 +{0x1b37fb85, 0xb26825b, 0x1aa1538f, 0x48417af, 0x624e18c, 0x9d42ec6, 0x9bd56fa, 0x1ab47dc3, 0x12a5ae24, 0x14035956, 0x76269c0, 0x15e3449c, 0x1e32accd, 0x1378f871, 0x1a0f1d7c, 0x17140cd6, 0x17498b16, 0x608} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x538f593412decdfe, 0x9386309082f5faa1, 0x9bd56fa4ea176318, 0xb4a96b893568fb86, 0x93876269c0a01aca, 0x7c38f8cab336bc68, 0x2819ada0f1d7c9bc, 0x17c4c45d262c5ae} +#else +{0x11eb26825bd9bfd, 0x309082f5faa153, 0x1d2750bb18c49c3, 0x4d5a3ee1a6f55b, 0x14035956952d71, 0x16bc6893876269c, 0x4de3e1c7c65599, 0xb8a066b683c75f, 0x148988ba4c58b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x826, 0x1efe, 0xa95, 0x174d, 0x11b5, 0x1184, 0x1d4, 0x1024, 0x1d44, 0x349, 0x83c, 0x665, 0x4a2, 0x1288, 0x473, 0xa16, 0xe54, 0xafc, 0x6e2, 0x13f1, 0x217, 0x11e4, 0x1988, 0xe26, 0xd9a, 0x168f, 0x3d, 0x1436, 0x311, 0x148d, 0x168f, 0x1ad8, 0x1156, 0xb8, 0x193f, 0x1655, 0x279, 0x5cd, 0x65e} +#elif RADIX == 32 +{0x41378c1, 0x1aa95f7f, 0x1236b74, 0x1207523, 0x349ea24, 0x8cca83c, 0x19ca2094, 0x5450b11, 0xdc4afc7, 0x85e7e2, 0x6cc4479, 0x11ed9a71, 0x10d807b6, 0x1a468c46, 0xdad8b47, 0xfc17115, 0x13cd9572, 0xe8} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6b74d54afbf904de, 0x27a890240ea46123, 0x9ca2094466541e0d, 0x13712bf1ca8a1623, 0x4e26cc4479042f3f, 0x462343601eda3db3, 0x82e22adad8b47d23, 0x517344f3655c9f} +#else +{0xe9aa95f7f209bc, 0x90240ea461236b, 0xa2332a0f0693d4, 0x72a28588e72882, 0x12085e7e26e257e, 0x1a3db34e26cc447, 0x1e91a311a1b00f6, 0x7e0b88ab6b62d1, 0xa2e689e6cab9} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1aab, 0xe01, 0x1bf3, 0x122d, 0xd71, 0x34e, 0x153b, 0x1444, 0x1d19, 0x1165, 0x1496, 0x568, 0x12d4, 0x105c, 0x1129, 0x2c7, 0x1706, 0x359, 0x1a4f, 0x114, 0x758, 0x1780, 0x1617, 0x1485, 0x1147, 0xa4f, 0x1f77, 0xf13, 0x1547, 0x103c, 0x352, 0x125d, 0xb1e, 0x1526, 0x1708, 0xfb5, 0x17bf, 0x1d55, 0x6bc} +#elif RADIX == 32 +{0x1d55ffc5, 0x1bbf3700, 0x139ae322, 0x2254ec6, 0x1165e8cd, 0x10ad1496, 0x14c1725a, 0x106163c4, 0x149e359b, 0x1d60229, 0x5b0bde0, 0x9f147a4, 0x1c4feeea, 0x81e551d, 0x1d25d1a9, 0x22a4cb1, 0x1dfbed6e, 0x72d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe322ddf9b807557f, 0x97a33444a9d8d39a, 0x4c1725a8568a4b45, 0x4d278d66e0c2c789, 0xf485b0bde00eb011, 0x2a8ef13fbba93e28, 0x549963d25d1a940f, 0x197556f7efb5b84} +#else +{0x45bbf3700eaaff, 0x13444a9d8d39ae3, 0xd42b4525a2cbd1, 0x1b830b1e25305c9, 0x1d60229a4f1ac, 0x93e28f485b0bde, 0xa079547789fddd, 0x1152658f49746a, 0x17eaadefdf6b7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x204, 0x9f6, 0x1dba, 0x110e, 0x6ea, 0x112a, 0xa11, 0xd06, 0x15aa, 0x1f0b, 0xeec, 0xef1, 0x1edc, 0x1604, 0x65b, 0x129, 0x39d, 0x8f8, 0x5d5, 0x672, 0x150a, 0x233, 0xc20, 0x12ba, 0x1855, 0x15a6, 0xd50, 0x1c71, 0x15b7, 0xf04, 0x579, 0x16d2, 0xbac, 0x4c9, 0xaf5, 0x514, 0xf27, 0xef, 0x36a} +#elif RADIX == 32 +{0x10240be, 0x1ddba4fb, 0xa8dd510, 0x8328462, 0x1f0bad53, 0x11de2eec, 0xdd813db, 0x19d09499, 0xbaa8f81, 0x1d428ce4, 0x1a61008c, 0x14d85595, 0x11c5aa15, 0x178256df, 0x196d22bc, 0x1d4992ba, 0x19394515, 0x27b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd510eedd27d84090, 0x2eb54d06508c4a8d, 0xdd813db8ef17767c, 0x22eaa3e073a12932, 0xb2ba61008cea1467, 0x2b6fc716a8569b0a, 0x93257596d22bcbc1, 0x503bde4e51457a} +#else +{0x21ddba4fb08120, 0x14d06508c4a8dd5, 0xdc778bbb3e175a, 0x1ce84a4cb7604f, 0x19d428ce45d547c, 0x169b0ab2ba61008, 0x5e095b7e38b542, 0x1ea4c95d65b48af, 0xa077bc9ca28a} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2ce5,0xf14f,0xd32e,0x18c2,0x5217,0x2833,0xa7fc,0x4090,0xd9d5,0x4d89,0xe770,0x3801,0xbca0,0x6ac9,0x5432,0x2ee5,0x4356,0x827a,0xdf1a,0x8911,0x4102,0xf05a,0x3e5,0x18b9,0xf66f,0x77ad,0xd99b,0xea2f,0xa030,0x36bb,0xe071,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf14f2ce5,0x18c2d32e,0x28335217,0x4090a7fc,0x4d89d9d5,0x3801e770,0x6ac9bca0,0x2ee55432,0x827a4356,0x8911df1a,0xf05a4102,0x18b903e5,0x77adf66f,0xea2fd99b,0x36bba030,0xde071}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x18c2d32ef14f2ce5,0x4090a7fc28335217,0x3801e7704d89d9d5,0x2ee554326ac9bca0,0x8911df1a827a4356,0x18b903e5f05a4102,0xea2fd99b77adf66f,0xde07136bba030}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x752,0x8c67,0x260,0x14f,0x138a,0x865,0x5e4d,0x4db9,0xca26,0x7a2c,0x4a7e,0x9422,0xb2f6,0xbdc8,0x841d,0x7e33,0x5677,0x38a6,0x9633,0x15fc,0xacce,0xbe04,0xdcef,0xb16c,0xd57b,0x7a5e,0x8395,0x1f8f,0xe4b9,0xe383,0x4be3,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c670752,0x14f0260,0x865138a,0x4db95e4d,0x7a2cca26,0x94224a7e,0xbdc8b2f6,0x7e33841d,0x38a65677,0x15fc9633,0xbe04acce,0xb16cdcef,0x7a5ed57b,0x1f8f8395,0xe383e4b9,0x64be3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14f02608c670752,0x4db95e4d0865138a,0x94224a7e7a2cca26,0x7e33841dbdc8b2f6,0x15fc963338a65677,0xb16cdcefbe04acce,0x1f8f83957a5ed57b,0x64be3e383e4b9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9b63,0x40f2,0x5177,0x535f,0x5098,0x2f26,0x18a2,0x5e1c,0x5d70,0x33e9,0x1db9,0x2397,0xaccc,0xb98b,0xcae6,0x87bf,0xd43f,0xb329,0xf2d0,0x2f67,0x779c,0xd2ab,0x4369,0xc67d,0xd065,0x9550,0xf06b,0x1a2b,0xc6e,0x9b2b,0xbb64,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x40f29b63,0x535f5177,0x2f265098,0x5e1c18a2,0x33e95d70,0x23971db9,0xb98baccc,0x87bfcae6,0xb329d43f,0x2f67f2d0,0xd2ab779c,0xc67d4369,0x9550d065,0x1a2bf06b,0x9b2b0c6e,0x3bb64}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x535f517740f29b63,0x5e1c18a22f265098,0x23971db933e95d70,0x87bfcae6b98baccc,0x2f67f2d0b329d43f,0xc67d4369d2ab779c,0x1a2bf06b9550d065,0x3bb649b2b0c6e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xd31b,0xeb0,0x2cd1,0xe73d,0xade8,0xd7cc,0x5803,0xbf6f,0x262a,0xb276,0x188f,0xc7fe,0x435f,0x9536,0xabcd,0xd11a,0xbca9,0x7d85,0x20e5,0x76ee,0xbefd,0xfa5,0xfc1a,0xe746,0x990,0x8852,0x2664,0x15d0,0x5fcf,0xc944,0x1f8e,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xeb0d31b,0xe73d2cd1,0xd7ccade8,0xbf6f5803,0xb276262a,0xc7fe188f,0x9536435f,0xd11aabcd,0x7d85bca9,0x76ee20e5,0xfa5befd,0xe746fc1a,0x88520990,0x15d02664,0xc9445fcf,0x21f8e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe73d2cd10eb0d31b,0xbf6f5803d7ccade8,0xc7fe188fb276262a,0xd11aabcd9536435f,0x76ee20e57d85bca9,0xe746fc1a0fa5befd,0x15d0266488520990,0x21f8ec9445fcf}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4222,0xe40c,0x843f,0x3518,0x72d1,0xa757,0xb4e5,0x4347,0x3326,0xc267,0x30d,0xb77e,0x9907,0xcb8c,0xd175,0x8cf2,0x5440,0xb876,0x2316,0xa715,0xf0ab,0x9e96,0xa72f,0xcd7f,0x1e06,0xa42f,0x985f,0xdc2d,0xd9ee,0xe71e,0x2ae0,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe40c4222,0x3518843f,0xa75772d1,0x4347b4e5,0xc2673326,0xb77e030d,0xcb8c9907,0x8cf2d175,0xb8765440,0xa7152316,0x9e96f0ab,0xcd7fa72f,0xa42f1e06,0xdc2d985f,0xe71ed9ee,0x82ae0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3518843fe40c4222,0x4347b4e5a75772d1,0xb77e030dc2673326,0x8cf2d175cb8c9907,0xa7152316b8765440,0xcd7fa72f9e96f0ab,0xdc2d985fa42f1e06,0x82ae0e71ed9ee}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x11ac,0x1c90,0x6c62,0x15fd,0x1924,0x5851,0x60c6,0x744c,0x80fd,0xa6b,0x5654,0x51a1,0x6589,0x803f,0xf265,0x4132,0x96d2,0x7497,0xcf0b,0x65,0x2e51,0x2bc,0x4203,0x3aad,0x1f2,0x5b40,0xcc1a,0x67e4,0xdfd3,0xba17,0x7a8c,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1c9011ac,0x15fd6c62,0x58511924,0x744c60c6,0xa6b80fd,0x51a15654,0x803f6589,0x4132f265,0x749796d2,0x65cf0b,0x2bc2e51,0x3aad4203,0x5b4001f2,0x67e4cc1a,0xba17dfd3,0x37a8c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x15fd6c621c9011ac,0x744c60c658511924,0x51a156540a6b80fd,0x4132f265803f6589,0x65cf0b749796d2,0x3aad420302bc2e51,0x67e4cc1a5b4001f2,0x37a8cba17dfd3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x99f9,0x50f4,0xd750,0xb0a2,0xfdaa,0x6986,0x6b4b,0x34be,0x7bd5,0x3974,0xe05,0x8c18,0x6bb8,0xbb5a,0xcc33,0x63b5,0x943b,0xec49,0xb4ef,0xbdc4,0x5a2a,0x2fc8,0x85ad,0x1291,0xa29f,0x9618,0x721b,0x93f6,0xb40f,0x2e85,0xdfbb,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x50f499f9,0xb0a2d750,0x6986fdaa,0x34be6b4b,0x39747bd5,0x8c180e05,0xbb5a6bb8,0x63b5cc33,0xec49943b,0xbdc4b4ef,0x2fc85a2a,0x129185ad,0x9618a29f,0x93f6721b,0x2e85b40f,0xbdfbb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb0a2d75050f499f9,0x34be6b4b6986fdaa,0x8c180e0539747bd5,0x63b5cc33bb5a6bb8,0xbdc4b4efec49943b,0x129185ad2fc85a2a,0x93f6721b9618a29f,0xbdfbb2e85b40f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf4c0,0x4ff5,0x2aee,0x3e90,0x49,0xb2af,0xf257,0x111c,0xead0,0xc1d5,0xc7d9,0x8a7c,0x9579,0xf62,0xe1f6,0xb43c,0x8f3f,0x14ca,0x1b7b,0xc209,0xac8,0xf5cd,0xdfc0,0x5d39,0x9d8d,0x9c9a,0x2e6e,0xba54,0x79d5,0x4f02,0x1cfc,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4ff5f4c0,0x3e902aee,0xb2af0049,0x111cf257,0xc1d5ead0,0x8a7cc7d9,0xf629579,0xb43ce1f6,0x14ca8f3f,0xc2091b7b,0xf5cd0ac8,0x5d39dfc0,0x9c9a9d8d,0xba542e6e,0x4f0279d5,0x21cfc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e902aee4ff5f4c0,0x111cf257b2af0049,0x8a7cc7d9c1d5ead0,0xb43ce1f60f629579,0xc2091b7b14ca8f3f,0x5d39dfc0f5cd0ac8,0xba542e6e9c9a9d8d,0x21cfc4f0279d5}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1eb,0x1730,0x3343,0xcef3,0x2add,0x7615,0x353e,0xd52b,0x9951,0xc1,0x2292,0x69d0,0x4a9f,0xc1bd,0xfec7,0xd332,0x72b7,0x67f8,0xaa27,0x61a4,0x33dd,0x8ec0,0xfe1d,0x9a69,0x38ac,0x60f,0x209b,0xbb33,0x55b1,0x13f5,0x5c80,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x173001eb,0xcef33343,0x76152add,0xd52b353e,0xc19951,0x69d02292,0xc1bd4a9f,0xd332fec7,0x67f872b7,0x61a4aa27,0x8ec033dd,0x9a69fe1d,0x60f38ac,0xbb33209b,0x13f555b1,0xc5c80}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcef33343173001eb,0xd52b353e76152add,0x69d0229200c19951,0xd332fec7c1bd4a9f,0x61a4aa2767f872b7,0x9a69fe1d8ec033dd,0xbb33209b060f38ac,0xc5c8013f555b1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6607,0xaf0b,0x28af,0x4f5d,0x255,0x9679,0x94b4,0xcb41,0x842a,0xc68b,0xf1fa,0x73e7,0x9447,0x44a5,0x33cc,0x9c4a,0x6bc4,0x13b6,0x4b10,0x423b,0xa5d5,0xd037,0x7a52,0xed6e,0x5d60,0x69e7,0x8de4,0x6c09,0x4bf0,0xd17a,0x2044,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaf0b6607,0x4f5d28af,0x96790255,0xcb4194b4,0xc68b842a,0x73e7f1fa,0x44a59447,0x9c4a33cc,0x13b66bc4,0x423b4b10,0xd037a5d5,0xed6e7a52,0x69e75d60,0x6c098de4,0xd17a4bf0,0x42044}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4f5d28afaf0b6607,0xcb4194b496790255,0x73e7f1fac68b842a,0x9c4a33cc44a59447,0x423b4b1013b66bc4,0xed6e7a52d037a5d5,0x6c098de469e75d60,0x42044d17a4bf0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2ce5,0xf14f,0xd32e,0x18c2,0x5217,0x2833,0xa7fc,0x4090,0xd9d5,0x4d89,0xe770,0x3801,0xbca0,0x6ac9,0x5432,0x2ee5,0x4356,0x827a,0xdf1a,0x8911,0x4102,0xf05a,0x3e5,0x18b9,0xf66f,0x77ad,0xd99b,0xea2f,0xa030,0x36bb,0xe071,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf14f2ce5,0x18c2d32e,0x28335217,0x4090a7fc,0x4d89d9d5,0x3801e770,0x6ac9bca0,0x2ee55432,0x827a4356,0x8911df1a,0xf05a4102,0x18b903e5,0x77adf66f,0xea2fd99b,0x36bba030,0xde071}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x18c2d32ef14f2ce5,0x4090a7fc28335217,0x3801e7704d89d9d5,0x2ee554326ac9bca0,0x8911df1a827a4356,0x18b903e5f05a4102,0xea2fd99b77adf66f,0xde07136bba030}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x752,0x8c67,0x260,0x14f,0x138a,0x865,0x5e4d,0x4db9,0xca26,0x7a2c,0x4a7e,0x9422,0xb2f6,0xbdc8,0x841d,0x7e33,0x5677,0x38a6,0x9633,0x15fc,0xacce,0xbe04,0xdcef,0xb16c,0xd57b,0x7a5e,0x8395,0x1f8f,0xe4b9,0xe383,0x4be3,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c670752,0x14f0260,0x865138a,0x4db95e4d,0x7a2cca26,0x94224a7e,0xbdc8b2f6,0x7e33841d,0x38a65677,0x15fc9633,0xbe04acce,0xb16cdcef,0x7a5ed57b,0x1f8f8395,0xe383e4b9,0x64be3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14f02608c670752,0x4db95e4d0865138a,0x94224a7e7a2cca26,0x7e33841dbdc8b2f6,0x15fc963338a65677,0xb16cdcefbe04acce,0x1f8f83957a5ed57b,0x64be3e383e4b9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9b63,0x40f2,0x5177,0x535f,0x5098,0x2f26,0x18a2,0x5e1c,0x5d70,0x33e9,0x1db9,0x2397,0xaccc,0xb98b,0xcae6,0x87bf,0xd43f,0xb329,0xf2d0,0x2f67,0x779c,0xd2ab,0x4369,0xc67d,0xd065,0x9550,0xf06b,0x1a2b,0xc6e,0x9b2b,0xbb64,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x40f29b63,0x535f5177,0x2f265098,0x5e1c18a2,0x33e95d70,0x23971db9,0xb98baccc,0x87bfcae6,0xb329d43f,0x2f67f2d0,0xd2ab779c,0xc67d4369,0x9550d065,0x1a2bf06b,0x9b2b0c6e,0x3bb64}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x535f517740f29b63,0x5e1c18a22f265098,0x23971db933e95d70,0x87bfcae6b98baccc,0x2f67f2d0b329d43f,0xc67d4369d2ab779c,0x1a2bf06b9550d065,0x3bb649b2b0c6e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xd31b,0xeb0,0x2cd1,0xe73d,0xade8,0xd7cc,0x5803,0xbf6f,0x262a,0xb276,0x188f,0xc7fe,0x435f,0x9536,0xabcd,0xd11a,0xbca9,0x7d85,0x20e5,0x76ee,0xbefd,0xfa5,0xfc1a,0xe746,0x990,0x8852,0x2664,0x15d0,0x5fcf,0xc944,0x1f8e,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xeb0d31b,0xe73d2cd1,0xd7ccade8,0xbf6f5803,0xb276262a,0xc7fe188f,0x9536435f,0xd11aabcd,0x7d85bca9,0x76ee20e5,0xfa5befd,0xe746fc1a,0x88520990,0x15d02664,0xc9445fcf,0x21f8e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe73d2cd10eb0d31b,0xbf6f5803d7ccade8,0xc7fe188fb276262a,0xd11aabcd9536435f,0x76ee20e57d85bca9,0xe746fc1a0fa5befd,0x15d0266488520990,0x21f8ec9445fcf}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2111,0xf206,0x421f,0x9a8c,0xb968,0xd3ab,0xda72,0x21a3,0x9993,0xe133,0x186,0xdbbf,0x4c83,0xe5c6,0x68ba,0x4679,0x2a20,0x5c3b,0x918b,0xd38a,0x7855,0xcf4b,0xd397,0x66bf,0x8f03,0xd217,0xcc2f,0x6e16,0x6cf7,0x738f,0x1570,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf2062111,0x9a8c421f,0xd3abb968,0x21a3da72,0xe1339993,0xdbbf0186,0xe5c64c83,0x467968ba,0x5c3b2a20,0xd38a918b,0xcf4b7855,0x66bfd397,0xd2178f03,0x6e16cc2f,0x738f6cf7,0x41570}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9a8c421ff2062111,0x21a3da72d3abb968,0xdbbf0186e1339993,0x467968bae5c64c83,0xd38a918b5c3b2a20,0x66bfd397cf4b7855,0x6e16cc2fd2178f03,0x41570738f6cf7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8d6,0xe48,0xb631,0xafe,0x8c92,0x2c28,0x3063,0xba26,0xc07e,0x535,0xab2a,0xa8d0,0xb2c4,0xc01f,0x7932,0x2099,0xcb69,0xba4b,0xe785,0x8032,0x1728,0x815e,0xa101,0x1d56,0xf9,0x2da0,0x660d,0xb3f2,0xefe9,0x5d0b,0xbd46,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4808d6,0xafeb631,0x2c288c92,0xba263063,0x535c07e,0xa8d0ab2a,0xc01fb2c4,0x20997932,0xba4bcb69,0x8032e785,0x815e1728,0x1d56a101,0x2da000f9,0xb3f2660d,0x5d0befe9,0x1bd46}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafeb6310e4808d6,0xba2630632c288c92,0xa8d0ab2a0535c07e,0x20997932c01fb2c4,0x8032e785ba4bcb69,0x1d56a101815e1728,0xb3f2660d2da000f9,0x1bd465d0befe9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x129d,0xdd4c,0xe2b2,0xca3b,0x6c0b,0x9c8b,0x68f9,0x412,0x51a8,0x7583,0xae25,0xb80d,0x35d5,0x387b,0x4ba1,0x66e1,0x754,0xf6b6,0x3d8c,0x650,0xa955,0x214f,0xc05f,0x16d2,0x9ce4,0x246f,0x123e,0x3ed3,0xa07f,0x2e24,0x8964,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdd4c129d,0xca3be2b2,0x9c8b6c0b,0x41268f9,0x758351a8,0xb80dae25,0x387b35d5,0x66e14ba1,0xf6b60754,0x6503d8c,0x214fa955,0x16d2c05f,0x246f9ce4,0x3ed3123e,0x2e24a07f,0x58964}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xca3be2b2dd4c129d,0x41268f99c8b6c0b,0xb80dae25758351a8,0x66e14ba1387b35d5,0x6503d8cf6b60754,0x16d2c05f214fa955,0x3ed3123e246f9ce4,0x589642e24a07f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e3f,0xbc60,0xa44c,0x253c,0xa75e,0xa9f9,0x326f,0x9f9f,0x14aa,0xa47f,0x3889,0x5ee3,0x87d,0x933f,0x6cba,0x6222,0xcd43,0xa8c9,0xa815,0x992a,0x643a,0xc1d3,0x4cff,0xf675,0xf30b,0x7e2a,0x5248,0xb9e4,0xa454,0x2c53,0x525b,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbc609e3f,0x253ca44c,0xa9f9a75e,0x9f9f326f,0xa47f14aa,0x5ee33889,0x933f087d,0x62226cba,0xa8c9cd43,0x992aa815,0xc1d3643a,0xf6754cff,0x7e2af30b,0xb9e45248,0x2c53a454,0x3525b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x253ca44cbc609e3f,0x9f9f326fa9f9a75e,0x5ee33889a47f14aa,0x62226cba933f087d,0x992aa815a8c9cd43,0xf6754cffc1d3643a,0xb9e452487e2af30b,0x3525b2c53a454}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x584d,0xa517,0xb681,0x45de,0xc2ea,0x7c58,0x123,0xe0fd,0xfd80,0x6c5b,0xf669,0xddc5,0xb21a,0xcaa9,0xc7a0,0x37ec,0xf8c6,0x12e7,0xe984,0xe812,0xef9f,0x128a,0x9fca,0x41f5,0x118f,0x5c32,0xf1cf,0x78c5,0x9424,0x2ae3,0x60d2,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa517584d,0x45deb681,0x7c58c2ea,0xe0fd0123,0x6c5bfd80,0xddc5f669,0xcaa9b21a,0x37ecc7a0,0x12e7f8c6,0xe812e984,0x128aef9f,0x41f59fca,0x5c32118f,0x78c5f1cf,0x2ae39424,0x260d2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45deb681a517584d,0xe0fd01237c58c2ea,0xddc5f6696c5bfd80,0x37ecc7a0caa9b21a,0xe812e98412e7f8c6,0x41f59fca128aef9f,0x78c5f1cf5c32118f,0x260d22ae39424}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed63,0x22b3,0x1d4d,0x35c4,0x93f4,0x6374,0x9706,0xfbed,0xae57,0x8a7c,0x51da,0x47f2,0xca2a,0xc784,0xb45e,0x991e,0xf8ab,0x949,0xc273,0xf9af,0x56aa,0xdeb0,0x3fa0,0xe92d,0x631b,0xdb90,0xedc1,0xc12c,0x5f80,0xd1db,0x769b,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x22b3ed63,0x35c41d4d,0x637493f4,0xfbed9706,0x8a7cae57,0x47f251da,0xc784ca2a,0x991eb45e,0x949f8ab,0xf9afc273,0xdeb056aa,0xe92d3fa0,0xdb90631b,0xc12cedc1,0xd1db5f80,0xa769b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x35c41d4d22b3ed63,0xfbed9706637493f4,0x47f251da8a7cae57,0x991eb45ec784ca2a,0xf9afc2730949f8ab,0xe92d3fa0deb056aa,0xc12cedc1db90631b,0xa769bd1db5f80}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x1414, 0x1912, 0xddb, 0xb9, 0x121b, 0x195d, 0x6c5, 0xe75, 0x1d16, 0xcaf, 0x1a05, 0x15cc, 0x1537, 0x1c57, 0x141a, 0x1b9d, 0x5b0, 0x9e7, 0xf10, 0x300, 0x1d5c, 0xc13, 0x245, 0x1b91, 0x352, 0x730, 0xd55, 0x1ca3, 0x1dac, 0x7b0, 0x15b9, 0x1ba6, 0x7d6, 0xba4, 0xc12, 0x649, 0xf1, 0xd51, 0x107} +#elif RADIX == 32 +{0xa0a1383, 0x12ddbc89, 0x1764360b, 0x13a9b172, 0xcafe8b3, 0x1eb99a05, 0xd715ea6, 0x1b0dced0, 0x1e209e72, 0x1f570600, 0x11122b04, 0x60352dc, 0x128daaa7, 0x13d876b3, 0xdba6adc, 0x497487d, 0x7899258, 0x208} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x360b96ede44a8284, 0xbfa2ce75362e5764, 0xd715ea6f5ccd02b2, 0x788279cb61b9da0, 0x5b91122b04fab830, 0x3b59ca36aa9cc06a, 0x2e90fadba6adc9ec, 0x17b5441e2649609} +#else +{0x172ddbc8950509, 0xce75362e576436, 0x137ae6681595fd1, 0x12d86e76835c57a, 0x9f570600f104f3, 0x1cc06a5b91122b0, 0x4f61dace51b554, 0x24ba43eb6e9ab7, 0x146a883c4c92c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1507, 0x1e44, 0xb76, 0x182e, 0xc86, 0xe57, 0x9b1, 0x139d, 0x1f45, 0xb2b, 0x681, 0x1d73, 0x1d4d, 0x1715, 0xd06, 0x6e7, 0x196c, 0x279, 0x3c4, 0xc0, 0x1f57, 0xb04, 0x891, 0x16e4, 0xd4, 0x9cc, 0x1b55, 0x728, 0x76b, 0x9ec, 0x156e, 0x16e9, 0x1f5, 0x12e9, 0xb04, 0x992, 0x83c, 0x1b54, 0x2c1} +#elif RADIX == 32 +{0xa83b449, 0x1cb76f22, 0x15d90d82, 0x1cea6c5c, 0xb2bfa2c, 0x17ae6681, 0x35c57a9, 0x16c373b4, 0x788279c, 0x7d5c180, 0x4448ac1, 0x1980d4b7, 0x1ca36aa9, 0x4f61dac, 0xb6e9ab7, 0x125d21f, 0x1e26496, 0x122} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd82e5bb7912a0ed, 0xafe8b39d4d8b95d9, 0x35c57a9bd73340ac, 0x1e209e72d86e768, 0x96e4448ac13eae0c, 0xed6728daaa7301a, 0x4ba43eb6e9ab727b, 0x1ed51078992582} +#else +{0x105cb76f22541da, 0xb39d4d8b95d90d, 0x14deb99a05657f4, 0x1cb61b9da0d715e, 0x27d5c1803c413c, 0x7301a96e4448ac, 0x193d876b3946d55, 0x92e90fadba6ad, 0x3daa20f1324b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xa07, 0x1f97, 0x13c4, 0xb69, 0x15ec, 0x161d, 0x194, 0x135c, 0xe18, 0x119a, 0x684, 0x199, 0x1a93, 0x906, 0x62e, 0x1ad4, 0xc99, 0x40b, 0x10df, 0xf12, 0x9ee, 0x93, 0x1837, 0x42d, 0x1ea3, 0x1967, 0x1d41, 0x422, 0x2d5, 0x17d0, 0x1550, 0x1c2d, 0x139a, 0x152b, 0xa57, 0x1072, 0x13bf, 0x1fe7, 0x57a} +#elif RADIX == 32 +{0x1503e7ec, 0x133c4fcb, 0x76bd8b6, 0x1ae0652c, 0x119a70c4, 0xc332684, 0x17241b52, 0x99d6a18, 0x1be40b6, 0x1a7b9e25, 0xdc1b824, 0xcfea321, 0x108ba839, 0xbe80b54, 0x15c2daa8, 0x15ea5739, 0x1dfc1c94, 0xd3c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd8b699e27e5d40f9, 0x69c3135c0ca5876b, 0x7241b52619934246, 0x286f902d933ad431, 0x642dc1b824d3dcf1, 0x5aa422ea0e59fd4, 0xd4ae735c2daa85f4, 0x1a7f9e77f07252b} +#else +{0x16d33c4fcba81f3, 0x1135c0ca5876bd8, 0x930cc9a12334e1, 0x164ceb50c5c906d, 0x9a7b9e250df205, 0x59fd4642dc1b82, 0x2fa02d52117507, 0xaf52b9cd70b6aa, 0x19ff3cefe0e4a} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x2, 0x2d8, 0x113e, 0xa74, 0x660, 0x141f, 0x64f, 0x885, 0x46, 0x17b9, 0x94f, 0x1b44, 0x361, 0xbf6, 0x1f17, 0x583, 0x18b3, 0x118e, 0x9ba, 0x49f, 0x1fc3, 0x13eb, 0x11c8, 0xcc8, 0x1b2d, 0x8c, 0x9c6, 0x1d9, 0xf33, 0x53d, 0x129a, 0x1b4a, 0x65, 0x169a, 0xe74, 0x544, 0x17e3, 0x1f0f, 0x2a6} +#elif RADIX == 32 +{0x1324b, 0x913e16c, 0x7ccc0a7, 0x42993e8, 0x17b90232, 0x768894f, 0xbafd86c, 0xb32c1fc, 0x137518ec, 0x1ff0c93e, 0x88e44fa, 0x119b2d66, 0x76538c0, 0x29ebccc, 0xbb4a94d, 0x1d2d3406, 0x1f19511c, 0x3fd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc0a7489f0b60004c, 0xe408c885327d07cc, 0xbafd86c3b444a7de, 0xf4dd463b166583f8, 0xacc88e44faff8649, 0x5e661d94e3023365, 0x5a680cbb4a94d14f, 0xf7c3efc654473a} +#else +{0x14e913e16c00099, 0xc885327d07ccc0, 0x161da2253ef7204, 0xc59960fe2ebf61, 0x15ff0c93e9ba8c7, 0x23365acc88e44f, 0x8a7af330eca718, 0xe969a032ed2a53, 0x3f87df8ca88e} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xe6e, 0xc55, 0xb5a, 0x1be4, 0x10f8, 0x1175, 0x1ada, 0x13de, 0xa0d, 0x1cb, 0x6f3, 0x91f, 0x70c, 0x12ef, 0x1403, 0x115a, 0x1205, 0x1705, 0xb8a, 0x490, 0x681, 0x1a6f, 0xd49, 0x2ca, 0x7e2, 0x1ad8, 0x1aa6, 0x9e8, 0x1f0f, 0x1df, 0xc32, 0xd30, 0x1a34, 0xfc4, 0x1519, 0x1cde, 0x7c9, 0x12da, 0x157} +#elif RADIX == 32 +{0x17371973, 0x8b5a62a, 0x1d61f1be, 0x1ef6b6a2, 0x1cb506c, 0x1123e6f3, 0x1cbbce1, 0x58ad50, 0x17157059, 0x19a04920, 0xa6a4e9b, 0x1b07e216, 0x7a354da, 0xeffc3d, 0x8d30619, 0x65f89a3, 0x1e4f37aa, 0x651} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf1be45ad3155cdc6, 0x2d41b3ded6d45d61, 0x1cbbce1891f37987, 0x5c55c1640b15aa0, 0x42ca6a4e9bcd0249, 0xfe1e9e8d536b60fc, 0xbf13468d30619077, 0x9cb68f93cdea8c} +#else +{0x17c8b5a62ab9b8c, 0x1b3ded6d45d61f1, 0x10c48f9bcc396a0, 0x1902c56a8072ef3, 0x179a04920b8ab82, 0xb60fc42ca6a4e9, 0x83bff0f4f46a9b, 0x32fc4d1a34c186, 0x1396d1f279bd5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xc71, 0x167c, 0x1de2, 0x708, 0xb78, 0x1797, 0x16d0, 0xc73, 0x1f29, 0x1014, 0x1753, 0x1dd9, 0x1326, 0xab2, 0x1e6e, 0x51a, 0x32d, 0x7c1, 0x127b, 0x1b08, 0xcd4, 0x5fd, 0x159a, 0xb2c, 0x137d, 0x28f, 0xc4f, 0x121a, 0x16dd, 0x1771, 0xa7b, 0x11b9, 0xe86, 0x199c, 0x1cb5, 0x2db, 0x14b3, 0x1e97, 0x7b} +#elif RADIX == 32 +{0x638892e, 0x11de2b3e, 0x5d6f070, 0x39db42f, 0x1014f94b, 0x1bbb3753, 0x172aca64, 0x12d28d79, 0x4f67c11, 0xb353611, 0xcacd17f, 0x11f37d59, 0x86989e2, 0x1bb8db76, 0xd1b953d, 0xd7338e8, 0x598b6f9, 0x7bd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf0708ef159f18e22, 0x53e52c73b685e5d6, 0x72aca64ddd9ba9c0, 0x893d9f0465a51af3, 0xab2cacd17f59a9b0, 0x6dbb21a6278a3e6f, 0xe671d0d1b953dddc, 0x7fa5e9662dbe5a} +#else +{0xe11de2b3e31c44, 0x12c73b685e5d6f0, 0x126eecdd4e029f2, 0x1196946bcdcab29, 0x1eb35361127b3e0, 0xa3e6fab2cacd17, 0xeee36dd90d313c, 0x16b99c74346e54f, 0xff4bd2cc5b7c} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x111d, 0x19ac, 0x1a8f, 0xc58, 0xaa, 0xdc, 0x13de, 0x1dc, 0x17a6, 0x1e3d, 0x198a, 0x40a, 0x120b, 0x17ba, 0x91c, 0x1858, 0xee4, 0x33b, 0x18aa, 0x1124, 0x5f8, 0x37d, 0xf3e, 0xa4b, 0x1e1, 0x2bd, 0x1ff2, 0x1a56, 0x1168, 0x739, 0x1fee, 0x190c, 0x13e9, 0xd07, 0x17fd, 0x1b9e, 0x198b, 0x1faa, 0xd2} +#elif RADIX == 32 +{0x88e8fa0, 0x11a8fcd6, 0x170154c5, 0xee4f781, 0x1e3dbd30, 0xc81598a, 0xe5eea41, 0xe4c2c24, 0x115433b7, 0x97e2249, 0xb79f0df, 0x17a1e152, 0x95bfe42, 0x39cc5a3, 0x1390cff7, 0x1f5a0f3e, 0xc5ee7af, 0xd56} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x54c58d47e6b223a3, 0xf6f4c1dc9ef03701, 0xe5eea41640acc578, 0x4c550ceddc985848, 0x2a4b79f0df4bf112, 0x62d1a56ff90af43c, 0xb41e7d390cff71ce, 0x187eab317b9ebfe} +#else +{0x18b1a8fcd644747, 0xc1dc9ef0370154, 0xb205662bc7b7a, 0x177261612397ba9, 0x1e97e22498aa19d, 0xaf43c2a4b79f0d, 0x18e73168d2b7fc8, 0x1fad079f4e433fd, 0x15fd5662f73d7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x63c, 0x609, 0x89c, 0x1f09, 0x9c9, 0x1e89, 0x1826, 0x1460, 0x15d6, 0xa52, 0xbb2, 0x1b93, 0x1f90, 0xa2f, 0x3b3, 0x1a76, 0x1c29, 0x17fc, 0x864, 0x55a, 0x1a9b, 0x7fa, 0x7ee, 0x75f, 0x1b4b, 0x15e6, 0xd75, 0x1238, 0x847, 0x1711, 0x9e7, 0xa37, 0x4b6, 0x1264, 0x3e1, 0xf87, 0x1c47, 0x706, 0x20b} +#elif RADIX == 32 +{0x131e26c1, 0x1289c304, 0x25393f0, 0x30609bd, 0xa52aeb5, 0x3726bb2, 0x19a8bff2, 0x29d3b0e, 0x10c97fce, 0x16a6cab4, 0x1f3f71fe, 0x1cdb4b3a, 0x8e1aeb5, 0x1b88a11e, 0xca374f3, 0x1864c84b, 0x23be1c7, 0xab7} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x93f0944e1824c789, 0x4abad460c137a253, 0x9a8bff21b935d929, 0xa4325ff3853a761d, 0x675f3f71feb53655, 0x508f2386bad79b69, 0xc99096ca374f3dc4, 0x129c1b88ef871f0} +#else +{0x1e1289c30498f13, 0xd460c137a25393, 0x190dc9aec94a55d, 0xe14e9d8766a2ff, 0x1d6a6cab4864bfe, 0x179b69675f3f71f, 0x1ee2284791c35d6, 0x1c326425b28dd3c, 0xa383711df0e3} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x39f7,0x51a0,0x71ea,0x7557,0x794c,0x6b5e,0x6a81,0x9aa7,0xd8dd,0xab85,0xe387,0x2121,0x1086,0x7989,0xe273,0xf813,0xebd5,0xb13f,0x9ef5,0xc6d5,0x2da2,0x14f8,0xecf3,0x24c4,0xf485,0xc8de,0xb9ef,0xb213,0xbc4d,0xe587,0xd591,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x51a039f7,0x755771ea,0x6b5e794c,0x9aa76a81,0xab85d8dd,0x2121e387,0x79891086,0xf813e273,0xb13febd5,0xc6d59ef5,0x14f82da2,0x24c4ecf3,0xc8def485,0xb213b9ef,0xe587bc4d,0xdd591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x755771ea51a039f7,0x9aa76a816b5e794c,0x2121e387ab85d8dd,0xf813e27379891086,0xc6d59ef5b13febd5,0x24c4ecf314f82da2,0xb213b9efc8def485,0xdd591e587bc4d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc5d4,0x133f,0xc116,0x2a9e,0xacf5,0xaedd,0x6173,0xdacf,0x6448,0xa33e,0x6d36,0x5013,0x2093,0x59f6,0xe571,0x906d,0x37c9,0xe4ab,0xb92a,0xbe30,0x1d49,0xde58,0xffc8,0x47ff,0xe0cb,0x6230,0x6128,0x8679,0x731c,0xc5e,0x66c7,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x133fc5d4,0x2a9ec116,0xaeddacf5,0xdacf6173,0xa33e6448,0x50136d36,0x59f62093,0x906de571,0xe4ab37c9,0xbe30b92a,0xde581d49,0x47ffffc8,0x6230e0cb,0x86796128,0xc5e731c,0xd66c7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2a9ec116133fc5d4,0xdacf6173aeddacf5,0x50136d36a33e6448,0x906de57159f62093,0xbe30b92ae4ab37c9,0x47ffffc8de581d49,0x867961286230e0cb,0xd66c70c5e731c}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55ad,0x2e3e,0xd0dc,0x8dad,0x4e0a,0xe1d0,0x3e27,0x81af,0x1bb4,0xa5fa,0x52f2,0x5bd4,0x2b9b,0xddfe,0x36,0xbdd4,0xf99a,0x3027,0x21d2,0x7b29,0x10ee,0x2146,0x6864,0xec5c,0x6bbd,0x540f,0xbc15,0xe4a1,0xee,0x3d9c,0xdf51,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2e3e55ad,0x8dadd0dc,0xe1d04e0a,0x81af3e27,0xa5fa1bb4,0x5bd452f2,0xddfe2b9b,0xbdd40036,0x3027f99a,0x7b2921d2,0x214610ee,0xec5c6864,0x540f6bbd,0xe4a1bc15,0x3d9c00ee,0x4df51}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8dadd0dc2e3e55ad,0x81af3e27e1d04e0a,0x5bd452f2a5fa1bb4,0xbdd40036ddfe2b9b,0x7b2921d23027f99a,0xec5c6864214610ee,0xe4a1bc15540f6bbd,0x4df513d9c00ee}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc609,0xae5f,0x8e15,0x8aa8,0x86b3,0x94a1,0x957e,0x6558,0x2722,0x547a,0x1c78,0xdede,0xef79,0x8676,0x1d8c,0x7ec,0x142a,0x4ec0,0x610a,0x392a,0xd25d,0xeb07,0x130c,0xdb3b,0xb7a,0x3721,0x4610,0x4dec,0x43b2,0x1a78,0x2a6e,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xae5fc609,0x8aa88e15,0x94a186b3,0x6558957e,0x547a2722,0xdede1c78,0x8676ef79,0x7ec1d8c,0x4ec0142a,0x392a610a,0xeb07d25d,0xdb3b130c,0x37210b7a,0x4dec4610,0x1a7843b2,0x22a6e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8aa88e15ae5fc609,0x6558957e94a186b3,0xdede1c78547a2722,0x7ec1d8c8676ef79,0x392a610a4ec0142a,0xdb3b130ceb07d25d,0x4dec461037210b7a,0x22a6e1a7843b2}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e37,0x619b,0xa159,0x8865,0xab15,0x85c2,0xb3b,0x57ce,0x8108,0xa8d6,0xfeb0,0x8cf0,0xef13,0xc7e1,0x6936,0xc3a9,0xd8f2,0x9c5d,0x7c68,0x7ba2,0xf4da,0x4c63,0x845b,0x22eb,0xbedd,0x37a0,0x24f3,0x7019,0x2855,0x6905,0xb81c,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x619b9e37,0x8865a159,0x85c2ab15,0x57ce0b3b,0xa8d68108,0x8cf0feb0,0xc7e1ef13,0xc3a96936,0x9c5dd8f2,0x7ba27c68,0x4c63f4da,0x22eb845b,0x37a0bedd,0x701924f3,0x69052855,0x3b81c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8865a159619b9e37,0x57ce0b3b85c2ab15,0x8cf0feb0a8d68108,0xc3a96936c7e1ef13,0x7ba27c689c5dd8f2,0x22eb845b4c63f4da,0x701924f337a0bedd,0x3b81c69052855}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x92b5,0x1309,0xc1ee,0xadd1,0x165,0x4911,0xaf0c,0x4a4f,0x5374,0xd4b2,0x926f,0xacc0,0xfd2f,0xeb63,0x7c68,0xc188,0x41ce,0x152e,0x6cfe,0x9a22,0xadb,0x933,0x438c,0x5fef,0xe17a,0x82aa,0x7732,0x8c5b,0xfa7b,0x4cd4,0xdcee,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x130992b5,0xadd1c1ee,0x49110165,0x4a4faf0c,0xd4b25374,0xacc0926f,0xeb63fd2f,0xc1887c68,0x152e41ce,0x9a226cfe,0x9330adb,0x5fef438c,0x82aae17a,0x8c5b7732,0x4cd4fa7b,0x6dcee}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xadd1c1ee130992b5,0x4a4faf0c49110165,0xacc0926fd4b25374,0xc1887c68eb63fd2f,0x9a226cfe152e41ce,0x5fef438c09330adb,0x8c5b773282aae17a,0x6dcee4cd4fa7b}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77b7,0xc00c,0x743e,0x91b3,0xc92c,0x3be,0xc9e8,0x4b6b,0x519c,0xed1b,0x857f,0x2be7,0x2270,0x64a0,0x3a21,0xd5ec,0xd5d1,0x2392,0x175a,0xa58f,0x5c36,0x3908,0x5f46,0x1875,0xee40,0xcd4a,0x7e0b,0x8eda,0x87e0,0xc28c,0x6e24,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc00c77b7,0x91b3743e,0x3bec92c,0x4b6bc9e8,0xed1b519c,0x2be7857f,0x64a02270,0xd5ec3a21,0x2392d5d1,0xa58f175a,0x39085c36,0x18755f46,0xcd4aee40,0x8eda7e0b,0xc28c87e0,0xd6e24}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91b3743ec00c77b7,0x4b6bc9e803bec92c,0x2be7857fed1b519c,0xd5ec3a2164a02270,0xa58f175a2392d5d1,0x18755f4639085c36,0x8eda7e0bcd4aee40,0xd6e24c28c87e0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x61c9,0x9e64,0x5ea6,0x779a,0x54ea,0x7a3d,0xf4c4,0xa831,0x7ef7,0x5729,0x14f,0x730f,0x10ec,0x381e,0x96c9,0x3c56,0x270d,0x63a2,0x8397,0x845d,0xb25,0xb39c,0x7ba4,0xdd14,0x4122,0xc85f,0xdb0c,0x8fe6,0xd7aa,0x96fa,0x47e3,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e6461c9,0x779a5ea6,0x7a3d54ea,0xa831f4c4,0x57297ef7,0x730f014f,0x381e10ec,0x3c5696c9,0x63a2270d,0x845d8397,0xb39c0b25,0xdd147ba4,0xc85f4122,0x8fe6db0c,0x96fad7aa,0xc47e3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x779a5ea69e6461c9,0xa831f4c47a3d54ea,0x730f014f57297ef7,0x3c5696c9381e10ec,0x845d839763a2270d,0xdd147ba4b39c0b25,0x8fe6db0cc85f4122,0xc47e396fad7aa}}} +#endif +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.h new file mode 100644 index 0000000000..1cc782a5bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.h @@ -0,0 +1,31 @@ +#ifndef ENDOMORPHISM_ACTION_H +#define ENDOMORPHISM_ACTION_H +#include +#include +#include +/** Type for precomputed endomorphism rings applied to precomputed torsion bases. + * + * Precomputed by the precompute scripts. + * + * @typedef curve_with_endomorphism_ring_t + * + * @struct curve_with_endomorphism_ring + **/ +typedef struct curve_with_endomorphism_ring { + ec_curve_t curve; + ec_basis_t basis_even; + ibz_mat_2x2_t action_i, action_j, action_k; + ibz_mat_2x2_t action_gen2, action_gen3, action_gen4; +} curve_with_endomorphism_ring_t; +#define CURVE_E0 (CURVES_WITH_ENDOMORPHISMS->curve) +#define BASIS_EVEN (CURVES_WITH_ENDOMORPHISMS->basis_even) +#define ACTION_I (CURVES_WITH_ENDOMORPHISMS->action_i) +#define ACTION_J (CURVES_WITH_ENDOMORPHISMS->action_j) +#define ACTION_K (CURVES_WITH_ENDOMORPHISMS->action_k) +#define ACTION_GEN2 (CURVES_WITH_ENDOMORPHISMS->action_gen2) +#define ACTION_GEN3 (CURVES_WITH_ENDOMORPHISMS->action_gen3) +#define ACTION_GEN4 (CURVES_WITH_ENDOMORPHISMS->action_gen4) +#define NUM_ALTERNATE_STARTING_CURVES 6 +#define ALTERNATE_STARTING_CURVES (CURVES_WITH_ENDOMORPHISMS+1) +extern const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7]; +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.c new file mode 100644 index 0000000000..f2992d8c7f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: PD and Apache-2.0 + +/* FIPS202 implementation based on code from PQClean, + * which is in turn based based on the public domain implementation in + * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html + * by Ronny Van Keer + * and the public domain "TweetFips202" implementation + * from https://twitter.com/tweetfips202 + * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ + +#include +#include +#include +#include + +#include "fips202.h" + +#define NROUNDS 24 +#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) + +/************************************************* + * Name: load64 + * + * Description: Load 8 bytes into uint64_t in little-endian order + * + * Arguments: - const uint8_t *x: pointer to input byte array + * + * Returns the loaded 64-bit unsigned integer + **************************************************/ +static uint64_t load64(const uint8_t *x) { + uint64_t r = 0; + for (size_t i = 0; i < 8; ++i) { + r |= (uint64_t)x[i] << 8 * i; + } + + return r; +} + +/************************************************* + * Name: store64 + * + * Description: Store a 64-bit integer to a byte array in little-endian order + * + * Arguments: - uint8_t *x: pointer to the output byte array + * - uint64_t u: input 64-bit unsigned integer + **************************************************/ +static void store64(uint8_t *x, uint64_t u) { + for (size_t i = 0; i < 8; ++i) { + x[i] = (uint8_t) (u >> 8 * i); + } +} + +/* Keccak round constants */ +static const uint64_t KeccakF_RoundConstants[NROUNDS] = { + 0x0000000000000001ULL, 0x0000000000008082ULL, + 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, + 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, + 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, + 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, + 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, + 0x0000000080000001ULL, 0x8000000080008008ULL +}; + +/************************************************* + * Name: KeccakF1600_StatePermute + * + * Description: The Keccak F1600 Permutation + * + * Arguments: - uint64_t *state: pointer to input/output Keccak state + **************************************************/ +static void KeccakF1600_StatePermute(uint64_t *state) { + int round; + + uint64_t Aba, Abe, Abi, Abo, Abu; + uint64_t Aga, Age, Agi, Ago, Agu; + uint64_t Aka, Ake, Aki, Ako, Aku; + uint64_t Ama, Ame, Ami, Amo, Amu; + uint64_t Asa, Ase, Asi, Aso, Asu; + uint64_t BCa, BCe, BCi, BCo, BCu; + uint64_t Da, De, Di, Do, Du; + uint64_t Eba, Ebe, Ebi, Ebo, Ebu; + uint64_t Ega, Ege, Egi, Ego, Egu; + uint64_t Eka, Eke, Eki, Eko, Eku; + uint64_t Ema, Eme, Emi, Emo, Emu; + uint64_t Esa, Ese, Esi, Eso, Esu; + + // copyFromState(A, state) + Aba = state[0]; + Abe = state[1]; + Abi = state[2]; + Abo = state[3]; + Abu = state[4]; + Aga = state[5]; + Age = state[6]; + Agi = state[7]; + Ago = state[8]; + Agu = state[9]; + Aka = state[10]; + Ake = state[11]; + Aki = state[12]; + Ako = state[13]; + Aku = state[14]; + Ama = state[15]; + Ame = state[16]; + Ami = state[17]; + Amo = state[18]; + Amu = state[19]; + Asa = state[20]; + Ase = state[21]; + Asi = state[22]; + Aso = state[23]; + Asu = state[24]; + + for (round = 0; round < NROUNDS; round += 2) { + // prepareTheta + BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; + BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; + BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; + BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; + BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; + + // thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Aba ^= Da; + BCa = Aba; + Age ^= De; + BCe = ROL(Age, 44); + Aki ^= Di; + BCi = ROL(Aki, 43); + Amo ^= Do; + BCo = ROL(Amo, 21); + Asu ^= Du; + BCu = ROL(Asu, 14); + Eba = BCa ^ ((~BCe) & BCi); + Eba ^= KeccakF_RoundConstants[round]; + Ebe = BCe ^ ((~BCi) & BCo); + Ebi = BCi ^ ((~BCo) & BCu); + Ebo = BCo ^ ((~BCu) & BCa); + Ebu = BCu ^ ((~BCa) & BCe); + + Abo ^= Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka ^= Da; + BCi = ROL(Aka, 3); + Ame ^= De; + BCo = ROL(Ame, 45); + Asi ^= Di; + BCu = ROL(Asi, 61); + Ega = BCa ^ ((~BCe) & BCi); + Ege = BCe ^ ((~BCi) & BCo); + Egi = BCi ^ ((~BCo) & BCu); + Ego = BCo ^ ((~BCu) & BCa); + Egu = BCu ^ ((~BCa) & BCe); + + Abe ^= De; + BCa = ROL(Abe, 1); + Agi ^= Di; + BCe = ROL(Agi, 6); + Ako ^= Do; + BCi = ROL(Ako, 25); + Amu ^= Du; + BCo = ROL(Amu, 8); + Asa ^= Da; + BCu = ROL(Asa, 18); + Eka = BCa ^ ((~BCe) & BCi); + Eke = BCe ^ ((~BCi) & BCo); + Eki = BCi ^ ((~BCo) & BCu); + Eko = BCo ^ ((~BCu) & BCa); + Eku = BCu ^ ((~BCa) & BCe); + + Abu ^= Du; + BCa = ROL(Abu, 27); + Aga ^= Da; + BCe = ROL(Aga, 36); + Ake ^= De; + BCi = ROL(Ake, 10); + Ami ^= Di; + BCo = ROL(Ami, 15); + Aso ^= Do; + BCu = ROL(Aso, 56); + Ema = BCa ^ ((~BCe) & BCi); + Eme = BCe ^ ((~BCi) & BCo); + Emi = BCi ^ ((~BCo) & BCu); + Emo = BCo ^ ((~BCu) & BCa); + Emu = BCu ^ ((~BCa) & BCe); + + Abi ^= Di; + BCa = ROL(Abi, 62); + Ago ^= Do; + BCe = ROL(Ago, 55); + Aku ^= Du; + BCi = ROL(Aku, 39); + Ama ^= Da; + BCo = ROL(Ama, 41); + Ase ^= De; + BCu = ROL(Ase, 2); + Esa = BCa ^ ((~BCe) & BCi); + Ese = BCe ^ ((~BCi) & BCo); + Esi = BCi ^ ((~BCo) & BCu); + Eso = BCo ^ ((~BCu) & BCa); + Esu = BCu ^ ((~BCa) & BCe); + + // prepareTheta + BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; + BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; + BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; + BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; + BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; + + // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^ ((~BCe) & BCi); + Aba ^= KeccakF_RoundConstants[round + 1]; + Abe = BCe ^ ((~BCi) & BCo); + Abi = BCi ^ ((~BCo) & BCu); + Abo = BCo ^ ((~BCu) & BCa); + Abu = BCu ^ ((~BCa) & BCe); + + Ebo ^= Do; + BCa = ROL(Ebo, 28); + Egu ^= Du; + BCe = ROL(Egu, 20); + Eka ^= Da; + BCi = ROL(Eka, 3); + Eme ^= De; + BCo = ROL(Eme, 45); + Esi ^= Di; + BCu = ROL(Esi, 61); + Aga = BCa ^ ((~BCe) & BCi); + Age = BCe ^ ((~BCi) & BCo); + Agi = BCi ^ ((~BCo) & BCu); + Ago = BCo ^ ((~BCu) & BCa); + Agu = BCu ^ ((~BCa) & BCe); + + Ebe ^= De; + BCa = ROL(Ebe, 1); + Egi ^= Di; + BCe = ROL(Egi, 6); + Eko ^= Do; + BCi = ROL(Eko, 25); + Emu ^= Du; + BCo = ROL(Emu, 8); + Esa ^= Da; + BCu = ROL(Esa, 18); + Aka = BCa ^ ((~BCe) & BCi); + Ake = BCe ^ ((~BCi) & BCo); + Aki = BCi ^ ((~BCo) & BCu); + Ako = BCo ^ ((~BCu) & BCa); + Aku = BCu ^ ((~BCa) & BCe); + + Ebu ^= Du; + BCa = ROL(Ebu, 27); + Ega ^= Da; + BCe = ROL(Ega, 36); + Eke ^= De; + BCi = ROL(Eke, 10); + Emi ^= Di; + BCo = ROL(Emi, 15); + Eso ^= Do; + BCu = ROL(Eso, 56); + Ama = BCa ^ ((~BCe) & BCi); + Ame = BCe ^ ((~BCi) & BCo); + Ami = BCi ^ ((~BCo) & BCu); + Amo = BCo ^ ((~BCu) & BCa); + Amu = BCu ^ ((~BCa) & BCe); + + Ebi ^= Di; + BCa = ROL(Ebi, 62); + Ego ^= Do; + BCe = ROL(Ego, 55); + Eku ^= Du; + BCi = ROL(Eku, 39); + Ema ^= Da; + BCo = ROL(Ema, 41); + Ese ^= De; + BCu = ROL(Ese, 2); + Asa = BCa ^ ((~BCe) & BCi); + Ase = BCe ^ ((~BCi) & BCo); + Asi = BCi ^ ((~BCo) & BCu); + Aso = BCo ^ ((~BCu) & BCa); + Asu = BCu ^ ((~BCa) & BCe); + } + + // copyToState(state, A) + state[0] = Aba; + state[1] = Abe; + state[2] = Abi; + state[3] = Abo; + state[4] = Abu; + state[5] = Aga; + state[6] = Age; + state[7] = Agi; + state[8] = Ago; + state[9] = Agu; + state[10] = Aka; + state[11] = Ake; + state[12] = Aki; + state[13] = Ako; + state[14] = Aku; + state[15] = Ama; + state[16] = Ame; + state[17] = Ami; + state[18] = Amo; + state[19] = Amu; + state[20] = Asa; + state[21] = Ase; + state[22] = Asi; + state[23] = Aso; + state[24] = Asu; +} + +/************************************************* + * Name: keccak_absorb + * + * Description: Absorb step of Keccak; + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, + size_t mlen, uint8_t p) { + size_t i; + uint8_t t[200]; + + /* Zero state */ + for (i = 0; i < 25; ++i) { + s[i] = 0; + } + + while (mlen >= r) { + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(m + 8 * i); + } + + KeccakF1600_StatePermute(s); + mlen -= r; + m += r; + } + + for (i = 0; i < r; ++i) { + t[i] = 0; + } + for (i = 0; i < mlen; ++i) { + t[i] = m[i]; + } + t[i] = p; + t[r - 1] |= 128; + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(t + 8 * i); + } +} + +/************************************************* + * Name: keccak_squeezeblocks + * + * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. + * Modifies the state. Can be called multiple times to keep + * squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *h: pointer to output blocks + * - size_t nblocks: number of blocks to be + * squeezed (written to h) + * - uint64_t *s: pointer to input/output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, + uint64_t *s, uint32_t r) { + while (nblocks > 0) { + KeccakF1600_StatePermute(s); + for (size_t i = 0; i < (r >> 3); i++) { + store64(h + 8 * i, s[i]); + } + h += r; + nblocks--; + } +} + +/************************************************* + * Name: keccak_inc_init + * + * Description: Initializes the incremental Keccak state to zero. + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + **************************************************/ +static void keccak_inc_init(uint64_t *s_inc) { + size_t i; + + for (i = 0; i < 25; ++i) { + s_inc[i] = 0; + } + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_absorb + * + * Description: Incremental keccak absorb + * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + **************************************************/ +static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, + size_t mlen) { + size_t i; + + /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ + while (mlen + s_inc[25] >= r) { + for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { + /* Take the i'th byte from message + xor with the s_inc[25] + i'th byte of the state; little-endian */ + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + mlen -= (size_t)(r - s_inc[25]); + m += r - s_inc[25]; + s_inc[25] = 0; + + KeccakF1600_StatePermute(s_inc); + } + + for (i = 0; i < mlen; i++) { + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + s_inc[25] += mlen; +} + +/************************************************* + * Name: keccak_inc_finalize + * + * Description: Finalizes Keccak absorb phase, prepares for squeezing + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { + /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, + so we can always use one more byte for p in the current state. */ + s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); + s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_squeeze + * + * Description: Incremental Keccak squeeze; can be called on byte-level + * + * Arguments: - uint8_t *h: pointer to output bytes + * - size_t outlen: number of bytes to be squeezed + * - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_inc_squeeze(uint8_t *h, size_t outlen, + uint64_t *s_inc, uint32_t r) { + size_t i; + + /* First consume any bytes we still have sitting around */ + for (i = 0; i < outlen && i < s_inc[25]; i++) { + /* There are s_inc[25] bytes left, so r - s_inc[25] is the first + available byte. We consume from there, i.e., up to r. */ + h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] -= i; + + /* Then squeeze the remaining necessary blocks */ + while (outlen > 0) { + KeccakF1600_StatePermute(s_inc); + + for (i = 0; i < outlen && i < r; i++) { + h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] = r - i; + } +} + +void shake128_inc_init(shake128incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); +} + +void shake128_inc_finalize(shake128incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); +} + +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); +} + +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake128_inc_ctx_release(shake128incctx *state) { + (void)state; +} + +void shake256_inc_init(shake256incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); +} + +void shake256_inc_finalize(shake256incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); +} + +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); +} + +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake256_inc_ctx_release(shake256incctx *state) { + (void)state; +} + + +/************************************************* + * Name: shake128_absorb + * + * Description: Absorb step of the SHAKE128 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake128_squeezeblocks + * + * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of + * SHAKE128_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake128ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); +} + +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake128_ctx_release(shake128ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake256_absorb + * + * Description: Absorb step of the SHAKE256 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake256_squeezeblocks + * + * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of + * SHAKE256_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake256ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); +} + +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake256_ctx_release(shake256ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake128 + * + * Description: SHAKE128 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE128_RATE; + uint8_t t[SHAKE128_RATE]; + shake128ctx s; + + shake128_absorb(&s, input, inlen); + shake128_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE128_RATE; + outlen -= nblocks * SHAKE128_RATE; + + if (outlen) { + shake128_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake128_ctx_release(&s); +} + +/************************************************* + * Name: shake256 + * + * Description: SHAKE256 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE256_RATE; + uint8_t t[SHAKE256_RATE]; + shake256ctx s; + + shake256_absorb(&s, input, inlen); + shake256_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE256_RATE; + outlen -= nblocks * SHAKE256_RATE; + + if (outlen) { + shake256_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake256_ctx_release(&s); +} + +void sha3_256_inc_init(sha3_256incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_256_inc_ctx_release(sha3_256incctx *state) { + (void)state; +} + +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); +} + +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { + uint8_t t[SHA3_256_RATE]; + keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); + + sha3_256_inc_ctx_release(state); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_256 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_256_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +void sha3_384_inc_init(sha3_384incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); +} + +void sha3_384_inc_ctx_release(sha3_384incctx *state) { + (void)state; +} + +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { + uint8_t t[SHA3_384_RATE]; + keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); + + sha3_384_inc_ctx_release(state); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_384 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_384_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +void sha3_512_inc_init(sha3_512incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); +} + +void sha3_512_inc_ctx_release(sha3_512incctx *state) { + (void)state; +} + +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { + uint8_t t[SHA3_512_RATE]; + keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); + + sha3_512_inc_ctx_release(state); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_512 + * + * Description: SHA3-512 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_512_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h new file mode 100644 index 0000000000..c29ebd8f9d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef FIPS202_H +#define FIPS202_H + +#include +#include + +#define SHAKE128_RATE 168 +#define SHAKE256_RATE 136 +#define SHA3_256_RATE 136 +#define SHA3_384_RATE 104 +#define SHA3_512_RATE 72 + +#define PQC_SHAKEINCCTX_U64WORDS 26 +#define PQC_SHAKECTX_U64WORDS 25 + +#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) +#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake128incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake128ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake256incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake256ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_256incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_384incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_512incctx; + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); +/* Free the state */ +void shake128_ctx_release(shake128ctx *state); +/* Copy the state. */ +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); + +/* Initialize incremental hashing API */ +void shake128_inc_init(shake128incctx *state); +/* Absorb more information into the XOF. + * + * Can be called multiple times. + */ +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); +/* Finalize the XOF for squeezing */ +void shake128_inc_finalize(shake128incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); +/* Copy the context of the SHAKE128 XOF */ +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); +/* Free the context of the SHAKE128 XOF */ +void shake128_inc_ctx_release(shake128incctx *state); + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); +/* Free the context held by this XOF */ +void shake256_ctx_release(shake256ctx *state); +/* Copy the context held by this XOF */ +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); + +/* Initialize incremental hashing API */ +void shake256_inc_init(shake256incctx *state); +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); +/* Prepares for squeeze phase */ +void shake256_inc_finalize(shake256incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); +/* Copy the state */ +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); +/* Free the state */ +void shake256_inc_ctx_release(shake256incctx *state); + +/* One-stop SHAKE128 call */ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* One-stop SHAKE256 call */ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_256_inc_init(sha3_256incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); +/* Copy the context */ +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_256_inc_ctx_release(sha3_256incctx *state); + +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_384_inc_init(sha3_384incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); +/* Copy the context */ +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_384_inc_ctx_release(sha3_384incctx *state); + +/* One-stop SHA3-384 shop */ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_512_inc_init(sha3_512incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); +/* Copy the context */ +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_512_inc_ctx_release(sha3_512incctx *state); + +/* One-stop SHA3-512 shop */ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.c new file mode 100644 index 0000000000..37b7c87f1d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.c @@ -0,0 +1,112 @@ +#include +#include "fp.h" + +const digit_t p[NWORDS_FIELD] = { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x01afffffffffffff }; +const digit_t p2[NWORDS_FIELD] = { 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x035fffffffffffff }; + +void +fp_sqrt(fp_t *x) +{ + fp_t tmp = *x; + (void)gf27500_sqrt(x, &tmp); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + // ls is (0, 1, -1) and we want fp_is_square + // to return 0xFF..FF when ls is 1 or 0 and 0x00..00 otherwise + int32_t ls = gf27500_legendre(a); + return ~(uint32_t)(ls >> 1); +} + +void +fp_inv(fp_t *x) +{ + fp_t tmp = *x; + (void)gf27500_invert(x, &tmp); +} + +void +fp_exp3div4(fp_t *a) +{ + // + // We optimise this by using the shape of the prime + // to avoid almost all multiplications: + // + // We write: + // (p - 3) / 4 = (27*2^500 - 4) / 4 + // = 27*2^498 - 1 + // = 27*(2^498 - 1) + 26 + // Then we first compute: + // a498 = a**(2^498 - 1) + // Then from this we get the desired result as: + // a**((p-3)/4) = a498**27 * a**26 + // We can compute this with 15 multiplications and 504 squares. + fp_t z26, t3, t6, t9, tmp; + // Compute a**3 and a**26 + fp_sqr(&z26, a); + fp_mul(&tmp, a, &z26); + fp_sqr(&z26, &z26); + // Compute a**(2^3 - 1) = a**7 + fp_mul(&t3, &tmp, &z26); + fp_sqr(&z26, &tmp); + fp_sqr(&z26, &z26); + fp_mul(&z26, &z26, a); + fp_sqr(&z26, &z26); + // Compute a**(2^6 - 1) + fp_sqr(&t6, &t3); + for (int i = 1; i < 3; i++) + fp_sqr(&t6, &t6); + fp_mul(&t6, &t6, &t3); + // Compute a**(2^9 - 1) + fp_sqr(&t9, &t6); + for (int i = 1; i < 3; i++) + fp_sqr(&t9, &t9); + fp_mul(&t9, &t9, &t3); + // Compute a**(2^15 - 1) + fp_sqr(a, &t9); + for (int i = 1; i < 6; i++) + fp_sqr(a, a); + fp_mul(a, a, &t6); + // Compute a**(2^30 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 15; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^60 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 30; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^120 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 60; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^240 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 120; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(2^249 - 1) + for (int i = 0; i < 9; i++) + fp_sqr(a, a); + fp_mul(a, a, &t9); + // Compute a**(2^498 - 1) + fp_sqr(&tmp, a); + for (int i = 1; i < 249; i++) + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + // Compute a**(27*(2^498 - 1)) + fp_sqr(&tmp, a); + fp_sqr(&tmp, &tmp); + fp_sqr(&tmp, &tmp); + fp_mul(a, a, &tmp); + fp_sqr(&tmp, a); + fp_mul(a, a, &tmp); + // Compute a**(27*(2^498 - 1) + 26) + fp_mul(a, a, &z26); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.h new file mode 100644 index 0000000000..1d899ededa --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.h @@ -0,0 +1,136 @@ +#ifndef FP_H +#define FP_H + +// Include statements +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gf27500.h" + +// Type for elements of GF(p) +// Type for elements of GF(p) +#define fp_t gf27500 + +// Operations in fp +static inline void +fp_neg(fp_t *d, const fp_t *a) +{ + gf27500_neg(d, a); +} + +void fp_add(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S +void fp_sub(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S +void fp_sqr(fp_t *out, const fp_t *a); // implemented in fp_asm.S +void fp_mul(fp_t *out, const fp_t *a, const fp_t *b); // implemented in fp_asm.S + +static inline void +fp_mul_small(fp_t *d, const fp_t *a, uint32_t n) +{ + gf27500_mul_small(d, a, n); +} + +static inline void +fp_half(fp_t *d, const fp_t *a) +{ + gf27500_half(d, a); +} +// #define fp_half gf27500_half + +static inline void +fp_div3(fp_t *d, const fp_t *a) +{ + gf27500_div3(d, a); +} +// #define fp_div3 gf27500_div3 + +// Constant time selection and swapping +static inline void +fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) +{ + gf27500_select(d, a0, a1, ctl); +} +// #define fp_select gf27500_select + +static inline void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + gf27500_cswap(a, b, ctl); +} +// #define fp_cswap gf27500_cswap + +// Comparisons for fp elements +static inline uint32_t +fp_is_zero(const fp_t *a) +{ + return gf27500_iszero(a); +} +// #define fp_is_zero gf27500_iszero + +static inline uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return gf27500_equals(a, b); +} +// #define fp_is_equal gf27500_equals + +// Set a uint32 to an Fp value +static inline void +fp_set_small(fp_t *d, uint32_t x) +{ + gf27500_set_small(d, x); +} +// #define fp_set_small gf27500_set_small + +// Encoding and decoding of bytes +static inline void +fp_encode(void *dst, const fp_t *a) +{ + gf27500_encode(dst, a); +} +// #define fp_encode gf27500_encode +static inline uint32_t +fp_decode(fp_t *d, const void *src) +{ + return gf27500_decode(d, src); +} +// #define fp_decode gf27500_decode +static inline void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + gf27500_decode_reduce(d, src, len); +} +// #define fp_decode_reduce gf27500_decode_reduce +// These functions are essentially useless because we can just +// use = for the shallow copies we need, but they're here for +// now until we do a larger refactoring +static inline void +fp_copy(fp_t *out, const fp_t *a) +{ + memcpy(out, a, sizeof(fp_t)); +} + +static inline void +fp_set_zero(fp_t *a) +{ + memcpy(a, &ZERO, sizeof(fp_t)); +} + +static inline void +fp_set_one(fp_t *a) +{ + memcpy(a, &ONE, sizeof(fp_t)); +} + +// Functions defined in low level code but with different API +void fp_inv(fp_t *a); +void fp_sqrt(fp_t *a); +void fp_exp3div4(fp_t *a); +uint32_t fp_is_square(const fp_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.c new file mode 100644 index 0000000000..3269f6c66f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.c @@ -0,0 +1,188 @@ +#include "fp2.h" +#include +#include + +/* Arithmetic modulo X^2 + 1 */ + +void +fp2_encode(void *dst, const fp2_t *a) +{ + uint8_t *buf = dst; + fp_encode(buf, &(a->re)); + fp_encode(buf + FP_ENCODED_BYTES, &(a->im)); +} + +uint32_t +fp2_decode(fp2_t *d, const void *src) +{ + const uint8_t *buf = src; + uint32_t re, im; + + re = fp_decode(&(d->re), buf); + im = fp_decode(&(d->im), buf + FP_ENCODED_BYTES); + return re & im; +} + +void +fp2_inv(fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + fp_inv(&t0); + fp_mul(&(x->re), &(x->re), &t0); + fp_mul(&(x->im), &(x->im), &t0); + fp_neg(&(x->im), &(x->im)); +} + +void +fp2_batched_inv(fp2_t *x, int len) +{ + fp2_t t1[len], t2[len]; + fp2_t inverse; + + // x = x0,...,xn + // t1 = x0, x0*x1, ... ,x0 * x1 * ... * xn + t1[0] = x[0]; + for (int i = 1; i < len; i++) { + fp2_mul(&t1[i], &t1[i - 1], &x[i]); + } + + // inverse = 1/ (x0 * x1 * ... * xn) + inverse = t1[len - 1]; + fp2_inv(&inverse); + t2[0] = inverse; + + // t2 = 1/ (x0 * x1 * ... * xn), 1/ (x0 * x1 * ... * x(n-1)) , ... , 1/xO + for (int i = 1; i < len; i++) { + fp2_mul(&t2[i], &t2[i - 1], &x[len - i]); + } + + x[0] = t2[len - 1]; + for (int i = 1; i < len; i++) { + fp2_mul(&x[i], &t1[i - 1], &t2[len - i - 1]); + } +} + +uint32_t +fp2_is_square(const fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + + return fp_is_square(&t0); +} + +void +fp2_sqrt(fp2_t *a) +{ + fp_t x0, x1, t0, t1; + + /* From "Optimized One-Dimensional SQIsign Verification on Intel and + * Cortex-M4" by Aardal et al: https://eprint.iacr.org/2024/1563 */ + + // x0 = \delta = sqrt(a0^2 + a1^2). + fp_sqr(&x0, &(a->re)); + fp_sqr(&x1, &(a->im)); + fp_add(&x0, &x0, &x1); + fp_sqrt(&x0); + // If a1 = 0, there is a risk of \delta = -a0, which makes x0 = 0 below. + // In that case, we restore the value \delta = a0. + fp_select(&x0, &x0, &(a->re), fp_is_zero(&(a->im))); + // x0 = \delta + a0, t0 = 2 * x0. + fp_add(&x0, &x0, &(a->re)); + fp_add(&t0, &x0, &x0); + // x1 = t0^(p-3)/4. + fp_copy(&x1, &t0); + fp_exp3div4(&x1); + // x0 = x0 * x1, x1 = x1 * a1, t1 = (2x0)^2. + fp_mul(&x0, &x0, &x1); + fp_mul(&x1, &x1, &(a->im)); + fp_add(&t1, &x0, &x0); + fp_sqr(&t1, &t1); + // If t1 = t0, return x0 + x1*i, otherwise x1 - x0*i. + fp_sub(&t0, &t0, &t1); + uint32_t f = fp_is_zero(&t0); + fp_neg(&t1, &x0); + fp_copy(&t0, &x1); + fp_select(&t0, &t0, &x0, f); + fp_select(&t1, &t1, &x1, f); + + // Check if t0 is zero + uint32_t t0_is_zero = fp_is_zero(&t0); + // Check whether t0, t1 are odd + // Note: we encode to ensure canonical representation + uint8_t tmp_bytes[FP_ENCODED_BYTES]; + fp_encode(tmp_bytes, &t0); + uint32_t t0_is_odd = -((uint32_t)tmp_bytes[0] & 1); + fp_encode(tmp_bytes, &t1); + uint32_t t1_is_odd = -((uint32_t)tmp_bytes[0] & 1); + // We negate the output if: + // t0 is odd, or + // t0 is zero and t1 is odd + uint32_t negate_output = t0_is_odd | (t0_is_zero & t1_is_odd); + fp_neg(&x0, &t0); + fp_select(&(a->re), &t0, &x0, negate_output); + fp_neg(&x0, &t1); + fp_select(&(a->im), &t1, &x0, negate_output); +} + +uint32_t +fp2_sqrt_verify(fp2_t *a) +{ + fp2_t t0, t1; + + fp2_copy(&t0, a); + fp2_sqrt(a); + fp2_sqr(&t1, a); + + return (fp2_is_equal(&t0, &t1)); +} + +// exponentiation +void +fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size) +{ + fp2_t acc; + digit_t bit; + + fp2_copy(&acc, x); + fp2_set_one(out); + + // Iterate over each word of exp + for (int j = 0; j < size; j++) { + // Iterate over each bit of the word + for (int i = 0; i < RADIX; i++) { + bit = (exp[j] >> i) & 1; + if (bit == 1) { + fp2_mul(out, out, &acc); + } + fp2_sqr(&acc, &acc); + } + } +} + +void +fp2_print(const char *name, const fp2_t *a) +{ + printf("%s0x", name); + + uint8_t buf[FP_ENCODED_BYTES]; + fp_encode(&buf, &a->re); // Encoding ensures canonical rep + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + + printf(" + i*0x"); + + fp_encode(&buf, &a->im); + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + printf("\n"); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.h new file mode 100644 index 0000000000..736e83e22a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.h @@ -0,0 +1,49 @@ +#ifndef FP2_H +#define FP2_H + +#define NO_FP2X_MUL +#define NO_FP2X_SQR + +#include + +extern void fp2_sq_c0(fp2_t *out, const fp2_t *in); +extern void fp2_sq_c1(fp_t *out, const fp2_t *in); + +extern void fp2_mul_c0(fp_t *out, const fp2_t *in0, const fp2_t *in1); +extern void fp2_mul_c1(fp_t *out, const fp2_t *in0, const fp2_t *in1); + +static inline void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t; + + fp2_mul_c0(&t, y, z); // c0 = a0*b0 - a1*b1 + fp2_mul_c1(&x->im, y, z); // c1 = a0*b1 + a1*b0 + x->re.arr[0] = t.arr[0]; + x->re.arr[1] = t.arr[1]; + x->re.arr[2] = t.arr[2]; + x->re.arr[3] = t.arr[3]; + x->re.arr[4] = t.arr[4]; + x->re.arr[5] = t.arr[5]; + x->re.arr[6] = t.arr[6]; + x->re.arr[7] = t.arr[7]; +} + +static inline void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp2_t t; + + fp2_sq_c0(&t, y); // c0 = (a0+a1)(a0-a1) + fp2_sq_c1(&x->im, y); // c1 = 2a0*a1 + x->re.arr[0] = t.re.arr[0]; + x->re.arr[1] = t.re.arr[1]; + x->re.arr[2] = t.re.arr[2]; + x->re.arr[3] = t.re.arr[3]; + x->re.arr[4] = t.re.arr[4]; + x->re.arr[5] = t.re.arr[5]; + x->re.arr[6] = t.re.arr[6]; + x->re.arr[7] = t.re.arr[7]; +} + +#endif \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2x.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2x.h new file mode 100644 index 0000000000..44cf103bf2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2x.h @@ -0,0 +1,162 @@ +#ifndef FP2X_H +#define FP2X_H + +#include +#include "fp.h" +#include + +// Structure for representing elements in GF(p^2) +typedef struct fp2_t +{ + fp_t re, im; +} fp2_t; + +static inline void +fp2_set_small(fp2_t *x, const uint32_t val) +{ + fp_set_small(&(x->re), val); + fp_set_zero(&(x->im)); +} + +static inline void +fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n) +{ + fp_mul_small(&x->re, &y->re, n); + fp_mul_small(&x->im, &y->im, n); +} + +static inline void +fp2_set_zero(fp2_t *x) +{ + fp_set_zero(&(x->re)); + fp_set_zero(&(x->im)); +} + +static inline void +fp2_set_one(fp2_t *x) +{ + fp_set_one(&(x->re)); + fp_set_zero(&(x->im)); +} + +static inline uint32_t +fp2_is_equal(const fp2_t *a, const fp2_t *b) +{ // Compare two GF(p^2) elements in constant time + // Returns 1 (true) if a=b, 0 (false) otherwise + + return fp_is_equal(&(a->re), &(b->re)) & fp_is_equal(&(a->im), &(b->im)); +} + +static inline uint32_t +fp2_is_zero(const fp2_t *a) +{ // Is a GF(p^2) element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + + return fp_is_zero(&(a->re)) & fp_is_zero(&(a->im)); +} + +static inline uint32_t +fp2_is_one(const fp2_t *a) +{ // Is a GF(p^2) element one? + // Returns 1 (true) if a=0, 0 (false) otherwise + return fp_is_equal(&(a->re), &ONE) & fp_is_zero(&(a->im)); +} + +static inline void +fp2_half(fp2_t *x, const fp2_t *y) +{ + fp_half(&(x->re), &(y->re)); + fp_half(&(x->im), &(y->im)); +} + +static inline void +fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_add(&(x->re), &(y->re), &(z->re)); + fp_add(&(x->im), &(y->im), &(z->im)); +} + +static inline void +fp2_add_one(fp2_t *x, const fp2_t *y) +{ + fp_add(&x->re, &y->re, &ONE); + fp_copy(&x->im, &y->im); +} + +static inline void +fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_sub(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &(y->im), &(z->im)); +} + +static inline void +fp2_neg(fp2_t *x, const fp2_t *y) +{ + fp_neg(&(x->re), &(y->re)); + fp_neg(&(x->im), &(y->im)); +} + +#ifndef NO_FP2X_MUL +static inline void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t0, t1; + + fp_add(&t0, &(y->re), &(y->im)); + fp_add(&t1, &(z->re), &(z->im)); + fp_mul(&t0, &t0, &t1); + fp_mul(&t1, &(y->im), &(z->im)); + fp_mul(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &t0, &t1); + fp_sub(&(x->im), &(x->im), &(x->re)); + fp_sub(&(x->re), &(x->re), &t1); +} +#endif + +#ifndef NO_FP2X_SQR +static inline void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp_t sum, diff; + + fp_add(&sum, &(y->re), &(y->im)); + fp_sub(&diff, &(y->re), &(y->im)); + fp_mul(&(x->im), &(y->re), &(y->im)); + fp_add(&(x->im), &(x->im), &(x->im)); + fp_mul(&(x->re), &sum, &diff); +} +#endif + +static inline void +fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl) +{ + fp_select(&(d->re), &(a0->re), &(a1->re), ctl); + fp_select(&(d->im), &(a0->im), &(a1->im), ctl); +} + +static inline void +fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl) +{ + fp_cswap(&(a->re), &(b->re), ctl); + fp_cswap(&(a->im), &(b->im), ctl); +} + +static inline void +fp2_copy(fp2_t *x, const fp2_t *y) +{ + *x = *y; +} + +// New functions +void fp2_encode(void *dst, const fp2_t *a); +uint32_t fp2_decode(fp2_t *d, const void *src); +void fp2_inv(fp2_t *x); +uint32_t fp2_is_square(const fp2_t *x); +void fp2_sqrt(fp2_t *x); +uint32_t fp2_sqrt_verify(fp2_t *a); +void fp2_batched_inv(fp2_t *x, int len); +void fp2_pow_vartime(fp2_t *out, const fp2_t *x, const uint64_t *exp, const int size); +void fp2_print(const char *name, const fp2_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp_asm.S b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp_asm.S new file mode 100755 index 0000000000..3e7390300e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp_asm.S @@ -0,0 +1,784 @@ +#include +.intel_syntax noprefix + +.set pbytes,32 +.set plimbs,4 + +#ifdef __APPLE__ +.section __TEXT,__const +#else +.section .rodata +#endif +p_plus_1: .quad 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x01B0000000000000 + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",@progbits +#endif + +#include + +.text +.p2align 4,,15 + +.global fp_add +fp_add: + push r12 + push r13 + push r14 + push r15 + xor rax, rax + mov r8, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + mov r12, [rsi+32] + mov r13, [rsi+40] + mov r14, [rsi+48] + mov r15, [rsi+56] + add r8, [rdx] + adc r9, [rdx+8] + adc r10, [rdx+16] + adc r11, [rdx+24] + adc r12, [rdx+32] + adc r13, [rdx+40] + adc r14, [rdx+48] + adc r15, [rdx+56] + mov rax, r15 + shr rax, 57 + neg rax + mov rdx, [rip+p+56] + and rdx, rax + sub r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rax + sbb r13, rax + sbb r14, rax + sbb r15, rdx + + mov rax, r15 + shr rax, 57 + neg rax + mov rdx, [rip+p+56] + and rdx, rax + sub r8, rax + sbb r9, rax + sbb r10, rax + sbb r11, rax + sbb r12, rax + sbb r13, rax + sbb r14, rax + sbb r15, rdx + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + mov [rdi+32], r12 + mov [rdi+40], r13 + mov [rdi+48], r14 + mov [rdi+56], r15 + pop r15 + pop r14 + pop r13 + pop r12 + ret + +.global fp_sub +fp_sub: + push r12 + push r13 + push r14 + push r15 + xor rax, rax + mov r8, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + mov r12, [rsi+32] + mov r13, [rsi+40] + mov r14, [rsi+48] + mov r15, [rsi+56] + sub r8, [rdx] + sbb r9, [rdx+8] + sbb r10, [rdx+16] + sbb r11, [rdx+24] + sbb r12, [rdx+32] + sbb r13, [rdx+40] + sbb r14, [rdx+48] + sbb r15, [rdx+56] + sbb rax, 0 + + mov rdx, [rip+p+56] + and rdx, rax + add r8, rax + adc r9, rax + adc r10, rax + adc r11, rax + adc r12, rax + adc r13, rax + adc r14, rax + adc r15, rdx + + mov rax, r15 + sar rax, 57 + mov rdx, [rip+p+56] + and rdx, rax + add r8, rax + adc r9, rax + adc r10, rax + adc r11, rax + adc r12, rax + adc r13, rax + adc r14, rax + adc r15, rdx + + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + mov [rdi+32], r12 + mov [rdi+40], r13 + mov [rdi+48], r14 + mov [rdi+56], r15 + pop r15 + pop r14 + pop r13 + pop r12 + ret + +///////////////////////////////////////////////////////////////// MACRO +// z = a x bi + z +// Inputs: base memory pointer M1 (a), +// bi pre-stored in rdx, +// accumulator z in [Z0:Z8] +// Output: [Z0:Z8] +// Temps: regs T0:T1 +///////////////////////////////////////////////////////////////// +.macro MULADD64x512 M1, Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8, T0, T1, C + xor \C, \C + mulx \T0, \T1, \M1 // A0*B0 + adox \Z0, \T1 + adox \Z1, \T0 + mulx \T0, \T1, 8\M1 // A0*B1 + adcx \Z1, \T1 + adox \Z2, \T0 + mulx \T0, \T1, 16\M1 // A0*B2 + adcx \Z2, \T1 + adox \Z3, \T0 + mulx \T0, \T1, 24\M1 // A0*B3 + adcx \Z3, \T1 + adox \Z4, \T0 + mulx \T0, \T1, 32\M1 // A0*B4 + adcx \Z4, \T1 + adox \Z5, \T0 + mulx \T0, \T1, 40\M1 // A0*B5 + adcx \Z5, \T1 + adox \Z6, \T0 + mulx \T0, \T1, 48\M1 // A0*B6 + adcx \Z6, \T1 + adox \Z7, \T0 + mulx \T0, \T1, 56\M1 // A0*B7 + adcx \Z7, \T1 + adox \Z8, \T0 + adc \Z8, 0 +.endm + +.macro MULADD64x64 M1, Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7, T0, T1 + xor \T0, \T0 + mulx \T0, \T1, \M1 // A0*B0 + adox \Z6, \T1 + adox \Z7, \T0 +.endm + +//*********************************************************************** +// Multiplication in GF(p^2), non-complex part +// Operation: c [rdi] = a0 x b0 - a1 x b1 +// Inputs: a = [a1, a0] stored in [rsi] +// b = [b1, b0] stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_mul_c0 +fp2_mul_c0: + push r12 + push r13 + push r14 + push r15 + push rbx + push rbp + mov rcx, rdx + + // [rdi0:7] <- 2p - b1 + mov r8, [rip+p2] + mov r9, [rip+p2+8] + mov r10, r9 + mov r11, r9 + mov r12, r9 + mov r13, r9 + mov r14, r9 + mov r15, [rip+p2+56] + mov rax, [rcx+64] + mov rdx, [rcx+72] + sub r8, rax + sbb r9, rdx + mov rax, [rcx+80] + mov rdx, [rcx+88] + sbb r10, rax + sbb r11, rdx + mov rax, [rcx+96] + mov rdx, [rcx+104] + sbb r12, rax + sbb r13, rdx + mov rax, [rcx+112] + mov rdx, [rcx+120] + sbb r14, rax + sbb r15, rdx + mov [rdi], r8 + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + mov [rdi+32], r12 + mov [rdi+40], r13 + mov [rdi+48], r14 + mov [rdi+56], r15 + + // [r8:r15, rax] <- z = a0 x b00 - a1 x b10 + mov rdx, [rcx] + mulx r9, r8, [rsi] + xor rax, rax + mulx r10, r11, [rsi+8] + adcx r9, r11 + mulx r11, r12, [rsi+16] + adcx r10, r12 + mulx r12, r13, [rsi+24] + adcx r11, r13 + mulx r13, r14, [rsi+32] + adcx r12, r14 + mulx r14, r15, [rsi+40] + adcx r13, r15 + mulx r15, rbp, [rsi+48] + adcx r14, rbp + mulx rax, rbx, [rsi+56] + adcx r15, rbx + adc rax, 0 + + mov rdx, [rdi] + MULADD64x512 [rsi+64], r8, r9, r10, r11, r12, r13, r14, r15, rax, rbx, rbp, rbx + // [r9:r14] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r8 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r9, r10, r11, r12, r13, r14, r15, rax, rbx, rbp + + // [r9:r15, rax, r8] <- z = a0 x b01 - a1 x b11 + z + mov rdx, [rcx+8] + MULADD64x512 [rsi], r9, r10, r11, r12, r13, r14, r15, rax, r8, rbx, rbp, r8 + mov rdx, [rdi+8] + MULADD64x512 [rsi+64], r9, r10, r11, r12, r13, r14, r15, rax, r8, rbx, rbp, rbx + // [r10:r15, rax, r8] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r9 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r10, r11, r12, r13, r14, r15, rax, r8, rbx, rbp + + // [r10:r15, rax, r8:r9] <- z = a0 x b02 - a1 x b12 + z + mov rdx, [rcx+16] + MULADD64x512 [rsi], r10, r11, r12, r13, r14, r15, rax, r8, r9, rbx, rbp, r9 + mov rdx, [rdi+16] + MULADD64x512 [rsi+64], r10, r11, r12, r13, r14, r15, rax, r8, r9, rbx, rbp, rbx + // [r11:r15, rax, r8:r9] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r10 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r11, r12, r13, r14, r15, rax, r8, r9, rbx, rbp + + // [r11:r15, rax, r8:r10] <- z = a0 x b03 - a1 x b13 + z + mov rdx, [rcx+24] + MULADD64x512 [rsi], r11, r12, r13, r14, r15, rax, r8, r9, r10, rbx, rbp, r10 + mov rdx, [rdi+24] + MULADD64x512 [rsi+64], r11, r12, r13, r14, r15, rax, r8, r9, r10, rbx, rbp, rbx + // [r12:r15, rax, r8:r10] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r11 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r12, r13, r14, r15, rax, r8, r9, r10, rbx, rbp + + // [r12:r15, rax, r8:r11] <- z = a0 x b04 - a1 x b14 + z + mov rdx, [rcx+32] + MULADD64x512 [rsi], r12, r13, r14, r15, rax, r8, r9, r10, r11, rbx, rbp, r11 + mov rdx, [rdi+32] + MULADD64x512 [rsi+64], r12, r13, r14, r15, rax, r8, r9, r10, r11, rbx, rbp, rbx + // [r13:r15, rax, r8:r11] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r12 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r13, r14, r15, rax, r8, r9, r10, r11, rbx, rbp + + // [r13:r15, rax, r8:r12] <- z = a0 x b05 - a1 x b15 + z + mov rdx, [rcx+40] + MULADD64x512 [rsi], r13, r14, r15, rax, r8, r9, r10, r11, r12, rbx, rbp, r12 + mov rdx, [rdi+40] + MULADD64x512 [rsi+64], r13, r14, r15, rax, r8, r9, r10, r11, r12, rbx, rbp, rbx + // [r14:r15, rax, r8:r12] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r13 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r14, r15, rax, r8, r9, r10, r11, r12, rbx, rbp + + // [r14:r15, rax, r8:r12] <- z = a0 x b06 - a1 x b16 + z + mov rdx, [rcx+48] + MULADD64x512 [rsi], r14, r15, rax, r8, r9, r10, r11, r12, r13, rbx, rbp, r13 + mov rdx, [rdi+48] + MULADD64x512 [rsi+64], r14, r15, rax, r8, r9, r10, r11, r12, r13, rbx, rbp, rbx + // [r15, rax, r8:r13] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r14 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r15, rax, r8, r9, r10, r11, r12, r13, rbx, rbp + + // [r15, rax, r8:r12] <- z = a0 x b06 - a1 x b16 + z + mov rdx, [rcx+56] + MULADD64x512 [rsi], r15, rax, r8, r9, r10, r11, r12, r13, r14, rbx, rbp, r14 + mov rdx, [rdi+56] + MULADD64x512 [rsi+64], r15, rax, r8, r9, r10, r11, r12, r13, r14, rbx, rbp, rbx + // [rax, r8:r14] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r15 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], rax, r8, r9, r10, r11, r12, r13, r14, rbx, rbp + + mov [rdi], rax + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + mov [rdi+48], r13 + mov [rdi+56], r14 + pop rbp + pop rbx + pop r15 + pop r14 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Multiplication in GF(p^2), complex part +// Operation: c [rdi] = a0 x b1 + a1 x b0 +// Inputs: a = [a1, a0] stored in [rsi] +// b = [b1, b0] stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_mul_c1 +fp2_mul_c1: + push r12 + push r13 + push r14 + push r15 + push rbx + push rbp + mov rcx, rdx + + // [r8:r15, rax] <- z = a0 x b10 + a1 x b00 + mov rdx, [rcx+64] + mulx r9, r8, [rsi] + xor rax, rax + mulx r10, r11, [rsi+8] + adcx r9, r11 + mulx r11, r12, [rsi+16] + adcx r10, r12 + mulx r12, r13, [rsi+24] + adcx r11, r13 + mulx r13, r14, [rsi+32] + adcx r12, r14 + mulx r14, r15, [rsi+40] + adcx r13, r15 + mulx r15, rbp, [rsi+48] + adcx r14, rbp + mulx rax, rbx, [rsi+56] + adcx r15, rbx + adc rax, 0 + + mov rdx, [rcx] + MULADD64x512 [rsi+64], r8, r9, r10, r11, r12, r13, r14, r15, rax, rbx, rbp, rbx + // [r9:r14] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r8 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r9, r10, r11, r12, r13, r14, r15, rax, rbx, rbp + + // [r9:r15, rax, r8] <- z = a0 x b01 - a1 x b11 + z + mov rdx, [rcx+72] + MULADD64x512 [rsi], r9, r10, r11, r12, r13, r14, r15, rax, r8, rbx, rbp, r8 + mov rdx, [rcx+8] + MULADD64x512 [rsi+64], r9, r10, r11, r12, r13, r14, r15, rax, r8, rbx, rbp, rbx + // [r10:r15, rax, r8] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r9 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r10, r11, r12, r13, r14, r15, rax, r8, rbx, rbp + + // [r10:r15, rax, r8:r9] <- z = a0 x b02 - a1 x b12 + z + mov rdx, [rcx+80] + MULADD64x512 [rsi], r10, r11, r12, r13, r14, r15, rax, r8, r9, rbx, rbp, r9 + mov rdx, [rcx+16] + MULADD64x512 [rsi+64], r10, r11, r12, r13, r14, r15, rax, r8, r9, rbx, rbp, rbx + // [r11:r15, rax, r8:r9] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r10 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r11, r12, r13, r14, r15, rax, r8, r9, rbx, rbp + + // [r11:r15, rax, r8:r10] <- z = a0 x b03 - a1 x b13 + z + mov rdx, [rcx+88] + MULADD64x512 [rsi], r11, r12, r13, r14, r15, rax, r8, r9, r10, rbx, rbp, r10 + mov rdx, [rcx+24] + MULADD64x512 [rsi+64], r11, r12, r13, r14, r15, rax, r8, r9, r10, rbx, rbp, rbx + // [r12:r15, rax, r8:r10] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r11 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r12, r13, r14, r15, rax, r8, r9, r10, rbx, rbp + + // [r12:r15, rax, r8:r11] <- z = a0 x b04 - a1 x b14 + z + mov rdx, [rcx+96] + MULADD64x512 [rsi], r12, r13, r14, r15, rax, r8, r9, r10, r11, rbx, rbp, r11 + mov rdx, [rcx+32] + MULADD64x512 [rsi+64], r12, r13, r14, r15, rax, r8, r9, r10, r11, rbx, rbp, rbx + // [r13:r15, rax, r8:r11] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r12 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r13, r14, r15, rax, r8, r9, r10, r11, rbx, rbp + + // [r13:r15, rax, r8:r12] <- z = a0 x b05 - a1 x b15 + z + mov rdx, [rcx+104] + MULADD64x512 [rsi], r13, r14, r15, rax, r8, r9, r10, r11, r12, rbx, rbp, r12 + mov rdx, [rcx+40] + MULADD64x512 [rsi+64], r13, r14, r15, rax, r8, r9, r10, r11, r12, rbx, rbp, rbx + // [r14:r15, rax, r8:r12] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r13 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r14, r15, rax, r8, r9, r10, r11, r12, rbx, rbp + + // [r14:r15, rax, r8:r12] <- z = a0 x b06 - a1 x b16 + z + mov rdx, [rcx+112] + MULADD64x512 [rsi], r14, r15, rax, r8, r9, r10, r11, r12, r13, rbx, rbp, r13 + mov rdx, [rcx+48] + MULADD64x512 [rsi+64], r14, r15, rax, r8, r9, r10, r11, r12, r13, rbx, rbp, rbx + // [r15, rax, r8:r13] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r14 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], r15, rax, r8, r9, r10, r11, r12, r13, rbx, rbp + + // [r15, rax, r8:r12] <- z = a0 x b06 - a1 x b16 + z + mov rdx, [rcx+120] + MULADD64x512 [rsi], r15, rax, r8, r9, r10, r11, r12, r13, r14, rbx, rbp, r14 + mov rdx, [rcx+56] + MULADD64x512 [rsi+64], r15, rax, r8, r9, r10, r11, r12, r13, r14, rbx, rbp, rbx + // [rax, r8:r14] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, r15 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], rax, r8, r9, r10, r11, r12, r13, r14, rbx, rbp + + mov [rdi], rax + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + mov [rdi+48], r13 + mov [rdi+56], r14 + pop rbp + pop rbx + pop r15 + pop r14 + pop r13 + pop r12 + ret + +///////////////////////////////////////////////////////////////// MACRO +// z = a x b (mod p) +// Inputs: base memory pointers M0 (a), M1 (b) +// bi pre-stored in rdx, +// accumulator z in [Z0:Z8], pre-stores a0 x b +// Output: [Z0:Z8] +// Temps: regs T0:T1 +///////////////////////////////////////////////////////////////// +.macro FPMUL512x512 M0, M1, Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8, T0, T1 + // [Z1:Z8] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z0 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], \Z1, \Z2, \Z3, \Z4, \Z5, \Z6, \Z7, \Z8, \T0, \T1 + + // [Z1:Z8, Z0] <- z = a01 x a1 + z + mov rdx, 8\M0 + MULADD64x512 \M1, \Z1, \Z2, \Z3, \Z4, \Z5, \Z6, \Z7, \Z8, \Z0, \T0, \T1, \Z0 + // [Z2:Z8, Z0] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z1 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], \Z2, \Z3, \Z4, \Z5, \Z6, \Z7, \Z8, \Z0, \T0, \T1 + + // [Z2:Z8, Z0:Z1] <- z = a02 x a1 + z + mov rdx, 16\M0 + MULADD64x512 \M1, \Z2, \Z3, \Z4, \Z5, \Z6, \Z7, \Z8, \Z0, \Z1, \T0, \T1, \Z1 + // [Z3:Z8, Z0:Z1] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z2 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], \Z3, \Z4, \Z5, \Z6, \Z7, \Z8, \Z0, \Z1, \T0, \T1 + + // [Z3:Z8, Z0:Z2] <- z = a03 x a1 + z + mov rdx, 24\M0 + MULADD64x512 \M1, \Z3, \Z4, \Z5, \Z6, \Z7, \Z8, \Z0, \Z1, \Z2, \T0, \T1, \Z2 + // [Z4:Z8, Z0:Z2] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z3 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], \Z4, \Z5, \Z6, \Z7, \Z8, \Z0, \Z1, \Z2, \T0, \T1 + + // [Z4:Z8, Z0:Z3] <- z = a04 x a1 + z + mov rdx, 32\M0 + MULADD64x512 \M1, \Z4, \Z5, \Z6, \Z7, \Z8, \Z0, \Z1, \Z2, \Z3, \T0, \T1, \Z3 + // [Z5:Z8, Z0:Z3] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z4 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], \Z5, \Z6, \Z7, \Z8, \Z0, \Z1, \Z2, \Z3, \T0, \T1 + + // [Z5:Z8, Z0:Z4] <- z = a05 x a1 + z + mov rdx, 40\M0 + MULADD64x512 \M1, \Z5, \Z6, \Z7, \Z8, \Z0, \Z1, \Z2, \Z3, \Z4, \T0, \T1, \Z4 + // [Z6:Z8, Z0:Z4] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z5 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], \Z6, \Z7, \Z8, \Z0, \Z1, \Z2, \Z3, \Z4, \T0, \T1 + + // [Z6:Z8, Z0:Z5] <- z = a06 x a1 + z + mov rdx, 48\M0 + MULADD64x512 \M1, \Z6, \Z7, \Z8, \Z0, \Z1, \Z2, \Z3, \Z4, \Z5, \T0, \T1, \Z5 + // [Z7:Z8, Z0:Z5] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z6 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], \Z7, \Z8, \Z0, \Z1, \Z2, \Z3, \Z4, \Z5, \T0, \T1 + + // [Z7:Z8, Z0:Z6] <- z = a07 x a1 + z + mov rdx, 56\M0 + MULADD64x512 \M1, \Z7, \Z8, \Z0, \Z1, \Z2, \Z3, \Z4, \Z5, \Z6, \T0, \T1, \Z6 + // [Z8, Z0:Z6] <- z = (z0 x p_plus_1 + z)/2^64 + mov rdx, \Z7 // rdx <- z0 + MULADD64x64 [rip+p_plus_1+56], \Z8, \Z0, \Z1, \Z2, \Z3, \Z4, \Z5, \Z6, \T0, \T1 +.endm + +//*********************************************************************** +// Squaring in GF(p^2), non-complex part +// Operation: c [rdi] = (a0+a1) x (a0-a1) +// Inputs: a = [a1, a0] stored in [rsi] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_sq_c0 +fp2_sq_c0: + push r12 + push r13 + push r14 + push r15 + push rbx + push rbp + + // a0 + a1 + mov rdx, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + mov r12, [rsi+32] + mov r13, [rsi+40] + mov r14, [rsi+48] + mov r15, [rsi+56] + add rdx, [rsi+64] + adc r9, [rsi+72] + adc r10, [rsi+80] + adc r11, [rsi+88] + adc r12, [rsi+96] + adc r13, [rsi+104] + adc r14, [rsi+112] + adc r15, [rsi+120] + mov [rdi], rdx + mov [rdi+8], r9 + mov [rdi+16], r10 + mov [rdi+24], r11 + mov [rdi+32], r12 + mov [rdi+40], r13 + mov [rdi+48], r14 + mov [rdi+56], r15 + + // a0 - a1 + 2p + mov r8, [rsi] + mov r10, [rsi+8] + mov r12, [rsi+16] + mov r13, [rsi+24] + mov r14, [rsi+32] + mov r15, [rsi+40] + mov rbx, [rsi+48] + mov rbp, [rsi+56] + sub r8, [rsi+64] + sbb r10, [rsi+72] + sbb r12, [rsi+80] + sbb r13, [rsi+88] + sbb r14, [rsi+96] + sbb r15, [rsi+104] + sbb rbx, [rsi+112] + sbb rbp, [rsi+120] + mov rax, [rip+p2] + add r8, rax + mov rax, [rip+p2+8] + adc r10, rax + adc r12, rax + adc r13, rax + adc r14, rax + adc r15, rax + adc rbx, rax + adc rbp, [rip+p2+56] + mov [rdi+64], r8 + mov [rdi+72], r10 + mov [rdi+80], r12 + mov [rdi+88], r13 + mov [rdi+96], r14 + mov [rdi+104], r15 + mov [rdi+112], rbx + mov [rdi+120], rbp + + // [r8:r15, rax] <- z = a00 x a1 + mulx r9, r8, r8 + xor rax, rax + mulx r10, r11, r10 + adcx r9, r11 + mulx r11, r12, r12 + adcx r10, r12 + mulx r12, r13, r13 + adcx r11, r13 + mulx r13, r14, r14 + adcx r12, r14 + mulx r14, r15, r15 + adcx r13, r15 + mulx r15, rbx, rbx + adcx r14, rbx + mulx rax, rbp, rbp + adcx r15, rbp + adc rax, 0 + + FPMUL512x512 [rdi], [rdi+64], r8, r9, r10, r11, r12, r13, r14, r15, rax, rbx, rbp + + mov [rdi], rax + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + mov [rdi+48], r13 + mov [rdi+56], r14 + pop rbp + pop rbx + pop r15 + pop r14 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Squaring in GF(p^2), complex part +// Operation: c [rdi] = 2a0 x a1 +// Inputs: a = [a1, a0] stored in [reg_p1] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp2_sq_c1 +fp2_sq_c1: + push r12 + push r13 + push r14 + push r15 + push rbx + push rbp + + mov rdx, [rsi] + mov r9, [rsi+8] + mov r10, [rsi+16] + mov r11, [rsi+24] + mov r12, [rsi+32] + mov r13, [rsi+40] + mov r14, [rsi+48] + mov r15, [rsi+56] + add rdx, rdx + adc r9, r9 + adc r10, r10 + adc r11, r11 + adc r12, r12 + adc r13, r13 + adc r14, r14 + adc r15, r15 + sub rsp, 64 + mov [rsp+8], r9 + mov [rsp+16], r10 + mov [rsp+24], r11 + mov [rsp+32], r12 + mov [rsp+40], r13 + mov [rsp+48], r14 + mov [rsp+56], r15 + + // [r8:r15, rax] <- z = a00 x a1 + mulx r9, r8, [rsi+64] + xor rax, rax + mulx r10, r11, [rsi+72] + adcx r9, r11 + mulx r11, r12, [rsi+80] + adcx r10, r12 + mulx r12, r13, [rsi+88] + adcx r11, r13 + mulx r13, r14, [rsi+96] + adcx r12, r14 + mulx r14, r15, [rsi+104] + adcx r13, r15 + mulx r15, rbp, [rsi+112] + adcx r14, rbp + mulx rax, rbx, [rsi+120] + adcx r15, rbx + adc rax, 0 + + FPMUL512x512 [rsp], [rsi+64], r8, r9, r10, r11, r12, r13, r14, r15, rax, rbx, rbp + add rsp, 64 + + mov [rdi], rax + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + mov [rdi+48], r13 + mov [rdi+56], r14 + pop rbp + pop rbx + pop r15 + pop r14 + pop r13 + pop r12 + ret + +//*********************************************************************** +// Field multiplication in GF(p) +// Operation: c = a x b mod p +// Inputs: a stored in [rsi], b stored in [rdx] +// Output: c stored in [rdi] +//*********************************************************************** +.global fp_mul +fp_mul: + push r12 + push r13 + push r14 + push r15 + push rbx + push rbp + mov rcx, rdx + + // [r8:r15, rax] <- z = a x b0 + mov rdx, [rcx] + mulx r9, r8, [rsi] + xor rax, rax + mulx r10, r11, [rsi+8] + adcx r9, r11 + mulx r11, r12, [rsi+16] + adcx r10, r12 + mulx r12, r13, [rsi+24] + adcx r11, r13 + mulx r13, r14, [rsi+32] + adcx r12, r14 + mulx r14, r15, [rsi+40] + adcx r13, r15 + mulx r15, rbp, [rsi+48] + adcx r14, rbp + mulx rax, rbx, [rsi+56] + adcx r15, rbx + adc rax, 0 + + FPMUL512x512 [rcx], [rsi], r8, r9, r10, r11, r12, r13, r14, r15, rax, rbx, rbp + + mov [rdi], rax + mov [rdi+8], r8 + mov [rdi+16], r9 + mov [rdi+24], r10 + mov [rdi+32], r11 + mov [rdi+40], r12 + mov [rdi+48], r13 + mov [rdi+56], r14 + pop rbp + pop rbx + pop r15 + pop r14 + pop r13 + pop r12 + ret + +.global fp_sqr +fp_sqr: + mov rdx, rsi + jmp fp_mul diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp_constants.h new file mode 100644 index 0000000000..094cb4de22 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp_constants.h @@ -0,0 +1,17 @@ +#if RADIX == 32 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 16 +#else +#define NWORDS_FIELD 18 +#endif +#define NWORDS_ORDER 16 +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 8 +#else +#define NWORDS_FIELD 9 +#endif +#define NWORDS_ORDER 8 +#endif +#define BITS 512 +#define LOG2P 9 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.c new file mode 100644 index 0000000000..11cbd6cf08 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.c @@ -0,0 +1,839 @@ +#include "gf27500.h" + +// see gf27500.h +const gf27500 ZERO = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +// see gf27500.h +const gf27500 ONE = { 0x0000000000000097, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0130000000000000 }; + +// see gf27500.h +const gf27500 gf27500_MINUS_ONE = { 0xFFFFFFFFFFFFFF68, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x007FFFFFFFFFFFFF }; + +// Montgomery representation of 2^256. +static const gf27500 R2 = { 0xED097B425ED0F19A, 0x097B425ED097B425, 0x7B425ED097B425ED, 0x425ED097B425ED09, + 0x5ED097B425ED097B, 0xD097B425ED097B42, 0x97B425ED097B425E, 0x0045ED097B425ED0 }; + +// The modulus itself (this is also a valid representation of zero). +static const gf27500 MODULUS = { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x01AFFFFFFFFFFFFF }; + +// 1/2^496 (in Montgomery representation). +static const gf27500 INVT496 = { 0x0000000000010000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; + +static const gf27500 PM1O3 = { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x011fffffffffffff }; + +// Expand the most significant bit of x into a full-width 64-bit word +// (0x0000000000000000 or 0xFFFFFFFFFFFFFFFF). +static inline uint64_t +sgnw(uint64_t x) +{ + return (uint64_t)(*(int64_t *)&x >> 63); +} + +// d <- u*f + v*g (in the field) +// Coefficients f and g are provided as unsigned integers, but they +// really are signed values which must be less than 2^62 (in absolute value). +static void +gf27500_lin(gf27500 *d, const gf27500 *u, const gf27500 *v, uint64_t f, uint64_t g) +{ + // f <- abs(f), keeping the sign in sf, and negating u accordingly + uint64_t sf = sgnw(f); + f = (f ^ sf) - sf; + gf27500 tu; + gf27500_neg(&tu, u); + gf27500_select(&tu, u, &tu, (uint32_t)sf); + + // g <- abs(g), keeping the sign in sg, and negating v accordingly + uint64_t sg = sgnw(g); + g = (g ^ sg) - sg; + gf27500 tv; + gf27500_neg(&tv, v); + gf27500_select(&tv, v, &tv, (uint32_t)sg); + + // Linear combination over plain integers. + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, t; + inner_gf27500_umul_x2(d0, t, tu.v0, f, tv.v0, g); + inner_gf27500_umul_x2_add(d1, t, tu.v1, f, tv.v1, g, t); + inner_gf27500_umul_x2_add(d2, t, tu.v2, f, tv.v2, g, t); + inner_gf27500_umul_x2_add(d3, t, tu.v3, f, tv.v3, g, t); + inner_gf27500_umul_x2_add(d4, t, tu.v4, f, tv.v4, g, t); + inner_gf27500_umul_x2_add(d5, t, tu.v5, f, tv.v5, g, t); + inner_gf27500_umul_x2_add(d6, t, tu.v6, f, tv.v6, g, t); + inner_gf27500_umul_x2_add(d7, t, tu.v7, f, tv.v7, g, t); + + // Reduction: split into low part (500 bits) and high part + // (75 bits, since t can be up to 63 bits). If the high + // part is h, then: + // h*2^500 = (h mod 27)*2^500 + floor(h/27) mod q + uint64_t h0 = (d7 >> 52) | (t << 12); + uint64_t h1 = t >> 52; + d7 &= 0x000FFFFFFFFFFFFF; + + uint64_t z0, z1, quo0, rem0, quo1, rem1; + inner_gf27500_umul(z0, z1, h0, 0x97B425ED097B425F); + (void)z0; + quo0 = z1 >> 4; + rem0 = h0 - (27 * quo0); + quo1 = (0x12F7 * h1) >> 17; + rem1 = h1 - (27 * quo1); + + // h = rem0 + 27*quo0 + (rem1 + 27*quo1)*2^64 + // = rem0 + rem1 + 27*(quo0 + quo1*2^64 + rem1*((2^64 - 1)/27)) + // We add rem0 and rem1 modulo 27, with an extra carry that + // goes into the folded part (multiple of 27). + uint64_t e, f0, f1; + unsigned char cc; + cc = inner_gf27500_adc(0, rem0 + 0xFFFFFFFFFFFFFFE5, rem1, &e); + cc = inner_gf27500_adc(cc, quo0, rem1 * 0x97B425ED097B425, &f0); + cc = inner_gf27500_adc(cc, quo1, 0, &f1); + assert(cc == 0); + e -= 0xFFFFFFFFFFFFFFE5; + + // Now we only have to add e*2^512 + f0:f1 to the low part. + cc = inner_gf27500_adc(0, d0, f0, &d0); + cc = inner_gf27500_adc(cc, d1, f1, &d1); + cc = inner_gf27500_adc(cc, d2, 0, &d2); + cc = inner_gf27500_adc(cc, d3, 0, &d3); + cc = inner_gf27500_adc(cc, d4, 0, &d4); + cc = inner_gf27500_adc(cc, d5, 0, &d5); + cc = inner_gf27500_adc(cc, d6, 0, &d6); + (void)inner_gf27500_adc(cc, d7, e << 52, &d7); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; +} + +// d <- abs(floor((a*f + b*g) / 2^31)) +// Coefficients f and g are provided as unsigned integer, but they really +// are signed values, which MUST be at most 2^31 in absolute value. +// The computation is performed over the integers, not modulo q. The low +// 31 bits are dropped (in practice, callers provided appropriate coefficients +// f and g such that a*f + b*g is a multiple of 2^31. +// +// If a*f + b*g is negative, then the absolute value is computed, and the +// function returns 0xFFFFFFFFFFFFFFFF; otherwise, the function returns +// 0x0000000000000000. +static uint64_t +lindiv31abs(gf27500 *d, const gf27500 *a, const gf27500 *b, uint64_t f, uint64_t g) +{ + // f <- abs(f), keeping the sign in sf + uint64_t sf = sgnw(f); + f = (f ^ sf) - sf; + + // g <- abs(g), keeping the sign in sg + uint64_t sg = sgnw(g); + g = (g ^ sg) - sg; + + // Apply the signs of f and g to the source operands. + uint64_t a0, a1, a2, a3, a4, a5, a6, a7, a8; + uint64_t b0, b1, b2, b3, b4, b5, b6, b7, b8; + unsigned char cc; + + cc = inner_gf27500_sbb(0, a->v0 ^ sf, sf, &a0); + cc = inner_gf27500_sbb(cc, a->v1 ^ sf, sf, &a1); + cc = inner_gf27500_sbb(cc, a->v2 ^ sf, sf, &a2); + cc = inner_gf27500_sbb(cc, a->v3 ^ sf, sf, &a3); + cc = inner_gf27500_sbb(cc, a->v4 ^ sf, sf, &a4); + cc = inner_gf27500_sbb(cc, a->v5 ^ sf, sf, &a5); + cc = inner_gf27500_sbb(cc, a->v6 ^ sf, sf, &a6); + cc = inner_gf27500_sbb(cc, a->v7 ^ sf, sf, &a7); + (void)inner_gf27500_sbb(cc, 0, 0, &a8); + + cc = inner_gf27500_sbb(0, b->v0 ^ sg, sg, &b0); + cc = inner_gf27500_sbb(cc, b->v1 ^ sg, sg, &b1); + cc = inner_gf27500_sbb(cc, b->v2 ^ sg, sg, &b2); + cc = inner_gf27500_sbb(cc, b->v3 ^ sg, sg, &b3); + cc = inner_gf27500_sbb(cc, b->v4 ^ sg, sg, &b4); + cc = inner_gf27500_sbb(cc, b->v5 ^ sg, sg, &b5); + cc = inner_gf27500_sbb(cc, b->v6 ^ sg, sg, &b6); + cc = inner_gf27500_sbb(cc, b->v7 ^ sg, sg, &b7); + (void)inner_gf27500_sbb(cc, 0, 0, &b8); + + // Compute a*f + b*g into d0:d1:d2:d3:d4. Since f and g are at + // most 2^31, we can add two 128-bit products with no overflow. + // Note: a4 and b4 are both in {0, -1}. + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, d8, t; + inner_gf27500_umul_x2(d0, t, a0, f, b0, g); + inner_gf27500_umul_x2_add(d1, t, a1, f, b1, g, t); + inner_gf27500_umul_x2_add(d2, t, a2, f, b2, g, t); + inner_gf27500_umul_x2_add(d3, t, a3, f, b3, g, t); + inner_gf27500_umul_x2_add(d4, t, a4, f, b4, g, t); + inner_gf27500_umul_x2_add(d5, t, a5, f, b5, g, t); + inner_gf27500_umul_x2_add(d6, t, a6, f, b6, g, t); + inner_gf27500_umul_x2_add(d7, t, a7, f, b7, g, t); + d8 = t - (a8 & f) - (b8 & g); + + // Right-shift the value by 31 bits. + d0 = (d0 >> 31) | (d1 << 33); + d1 = (d1 >> 31) | (d2 << 33); + d2 = (d2 >> 31) | (d3 << 33); + d3 = (d3 >> 31) | (d4 << 33); + d4 = (d4 >> 31) | (d5 << 33); + d5 = (d5 >> 31) | (d6 << 33); + d6 = (d6 >> 31) | (d7 << 33); + d7 = (d7 >> 31) | (d8 << 33); + + // If the result is negative, negate it. + t = sgnw(d8); + cc = inner_gf27500_sbb(0, d0 ^ t, t, &d0); + cc = inner_gf27500_sbb(cc, d1 ^ t, t, &d1); + cc = inner_gf27500_sbb(cc, d2 ^ t, t, &d2); + cc = inner_gf27500_sbb(cc, d3 ^ t, t, &d3); + cc = inner_gf27500_sbb(cc, d4 ^ t, t, &d4); + cc = inner_gf27500_sbb(cc, d5 ^ t, t, &d5); + cc = inner_gf27500_sbb(cc, d6 ^ t, t, &d6); + (void)inner_gf27500_sbb(cc, d7 ^ t, t, &d7); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; + return t; +} + +// lzcnt(x) returns the number of leading bits of value 0 in x. It supports +// x == 0 (in which case the function returns 64). +#if defined __LZCNT__ +static inline uint64_t +lzcnt(uint64_t x) +{ + return _lzcnt_u64(x); +} +#else +static inline uint64_t +lzcnt(uint64_t x) +{ + uint64_t m, s; + m = sgnw((x >> 32) - 1); + s = m & 32; + x = (x >> 32) ^ (m & (x ^ (x >> 32))); + m = sgnw((x >> 16) - 1); + s |= m & 16; + x = (x >> 16) ^ (m & (x ^ (x >> 16))); + m = sgnw((x >> 8) - 1); + s |= m & 8; + x = (x >> 8) ^ (m & (x ^ (x >> 8))); + m = sgnw((x >> 4) - 1); + s |= m & 4; + x = (x >> 4) ^ (m & (x ^ (x >> 4))); + m = sgnw((x >> 2) - 1); + s |= m & 2; + x = (x >> 2) ^ (m & (x ^ (x >> 2))); + + // At this point, x fits on 2 bits. Count of extra zeros: + // x = 0 -> 2 + // x = 1 -> 1 + // x = 2 -> 0 + // x = 3 -> 0 + s += (2 - x) & ((x - 3) >> 2); + return s; +} +#endif + +// see gf27500.h +uint32_t +gf27500_div(gf27500 *d, const gf27500 *x, const gf27500 *y) +{ + // Extended binary GCD: + // + // a <- y + // b <- q (modulus) + // u <- x (self) + // v <- 0 + // + // Value a is normalized (in the 0..q-1 range). Values a and b are + // then considered as (signed) integers. Values u and v are field + // elements. + // + // Invariants: + // a*x = y*u mod q + // b*x = y*v mod q + // b is always odd + // + // At each step: + // if a is even, then: + // a <- a/2, u <- u/2 mod q + // else: + // if a < b: + // (a, u, b, v) <- (b, v, a, u) + // a <- (a-b)/2, u <- (u-v)/2 mod q + // + // What we implement below is the optimized version of this + // algorithm, as described in https://eprint.iacr.org/2020/972 + + gf27500 a, b, u, v; + uint64_t xa, xb, f0, g0, f1, g1; + uint32_t r; + + r = ~gf27500_iszero(y); + inner_gf27500_normalize(&a, y); + b = MODULUS; + u = *x; + v = ZERO; + + // Generic loop does 31*31 = 961 inner iterations. + for (int i = 0; i < 31; i++) { + // Get approximations of a and b over 64 bits: + // - If len(a) <= 64 and len(b) <= 64, then we just use + // their values (low limbs). + // - Otherwise, with n = max(len(a), len(b)), we use: + // (a mod 2^31) + 2^31*floor(a / 2^(n - 33)) + // (b mod 2^31) + 2^31*floor(b / 2^(n - 33)) + uint64_t m7 = a.v7 | b.v7; + uint64_t m6 = a.v6 | b.v6; + uint64_t m5 = a.v5 | b.v5; + uint64_t m4 = a.v4 | b.v4; + uint64_t m3 = a.v3 | b.v3; + uint64_t m2 = a.v2 | b.v2; + uint64_t m1 = a.v1 | b.v1; + uint64_t tnz7 = sgnw(m7 | -m7); + uint64_t tnz6 = sgnw(m6 | -m6) & ~tnz7; + uint64_t tnz5 = sgnw(m5 | -m5) & ~tnz7 & ~tnz6; + uint64_t tnz4 = sgnw(m4 | -m4) & ~tnz7 & ~tnz6 & ~tnz5; + uint64_t tnz3 = sgnw(m3 | -m3) & ~tnz7 & ~tnz6 & ~tnz5 & ~tnz4; + uint64_t tnz2 = sgnw(m2 | -m2) & ~tnz7 & ~tnz6 & ~tnz5 & ~tnz4 & ~tnz3; + uint64_t tnz1 = sgnw(m1 | -m1) & ~tnz7 & ~tnz6 & ~tnz5 & ~tnz4 & ~tnz3 & ~tnz2; + uint64_t tnzm = (m7 & tnz7) | (m6 & tnz6) | (m5 & tnz5) | (m4 & tnz4) | (m3 & tnz3) | (m2 & tnz2) | (m1 & tnz1); + uint64_t tnza = (a.v7 & tnz7) | (a.v6 & tnz6) | (a.v5 & tnz5) | (a.v4 & tnz4) | (a.v3 & tnz3) | (a.v2 & tnz2) | + (a.v1 & tnz1); + uint64_t tnzb = (b.v7 & tnz7) | (b.v6 & tnz6) | (b.v5 & tnz5) | (b.v4 & tnz4) | (b.v3 & tnz3) | (b.v2 & tnz2) | + (b.v1 & tnz1); + uint64_t snza = (a.v6 & tnz7) | (a.v5 & tnz6) | (a.v4 & tnz5) | (a.v3 & tnz4) | (a.v2 & tnz3) | (a.v1 & tnz2) | + (a.v0 & tnz1); + uint64_t snzb = (b.v6 & tnz7) | (b.v5 & tnz6) | (b.v4 & tnz5) | (b.v3 & tnz4) | (b.v2 & tnz3) | (b.v1 & tnz2) | + (b.v0 & tnz1); + + // If both len(a) <= 64 and len(b) <= 64, then: + // tnzm = 0 + // tnza = 0, snza = 0, tnzb = 0, snzb = 0 + // Otherwise: + // tnzm != 0 + // tnza contains the top non-zero limb of a + // snza contains the limb right below tnza + // tnzb contains the top non-zero limb of a + // snzb contains the limb right below tnzb + // + // We count the number of leading zero bits in tnzm: + // - If s <= 31, then the top 31 bits can be extracted from + // tnza and tnzb alone. + // - If 32 <= s <= 63, then we need some bits from snza and + // snzb as well. + int64_t s = lzcnt(tnzm); + uint64_t sm = (uint64_t)((31 - s) >> 63); + tnza ^= sm & (tnza ^ ((tnza << 32) | (snza >> 32))); + tnzb ^= sm & (tnzb ^ ((tnzb << 32) | (snzb >> 32))); + s -= 32 & sm; + tnza <<= s; + tnzb <<= s; + + // At this point: + // - If len(a) <= 64 and len(b) <= 64, then: + // tnza = 0 + // tnzb = 0 + // tnz1 = tnz2 = tnz3 = tnz4 = tnz5 = 0 + // we want to use the entire low words of a and b + // - Otherwise, we want to use the top 33 bits of tnza and + // tnzb, and the low 31 bits of the low words of a and b. + uint64_t tzx = ~(tnz1 | tnz2 | tnz3 | tnz4 | tnz5 | tnz6 | tnz7); + tnza |= a.v0 & tzx; + tnzb |= b.v0 & tzx; + xa = (a.v0 & 0x7FFFFFFF) | (tnza & 0xFFFFFFFF80000000); + xb = (b.v0 & 0x7FFFFFFF) | (tnzb & 0xFFFFFFFF80000000); + + // Compute the 31 inner iterations on xa and xb. + uint64_t fg0 = (uint64_t)1; + uint64_t fg1 = (uint64_t)1 << 32; + for (int j = 0; j < 31; j++) { + uint64_t a_odd, swap, t0, t1, t2; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf27500_sbb(0, xa, xb, &t0); + (void)inner_gf27500_sbb(cc, 0, 0, &swap); + swap &= a_odd; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + xa >>= 1; + fg1 <<= 1; + } + fg0 += 0x7FFFFFFF7FFFFFFF; + fg1 += 0x7FFFFFFF7FFFFFFF; + f0 = (fg0 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0 >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1 >> 32) - (uint64_t)0x7FFFFFFF; + + // Propagate updates to a, b, u and v. + gf27500 na, nb, nu, nv; + uint64_t nega = lindiv31abs(&na, &a, &b, f0, g0); + uint64_t negb = lindiv31abs(&nb, &a, &b, f1, g1); + f0 = (f0 ^ nega) - nega; + g0 = (g0 ^ nega) - nega; + f1 = (f1 ^ negb) - negb; + g1 = (g1 ^ negb) - negb; + gf27500_lin(&nu, &u, &v, f0, g0); + gf27500_lin(&nv, &u, &v, f1, g1); + a = na; + b = nb; + u = nu; + v = nv; + } + + // If y is invertible, then the final GCD is 1, and + // len(a) + len(b) <= 49, so we can end the computation with + // the low words directly. We only need 47 iterations to reach + // the point where b = 1. + // + // If y is zero, then v is unchanged (hence zero) and none of + // the subsequent iterations will change it either, so we get + // 0 on output, which is what we want. + xa = a.v0; + xb = b.v0; + f0 = 1; + g0 = 0; + f1 = 0; + g1 = 1; + for (int j = 0; j < 47; j++) { + uint64_t a_odd, swap, t0, t1, t2, t3; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf27500_sbb(0, xa, xb, &t0); + (void)inner_gf27500_sbb(cc, 0, 0, &swap); + swap &= a_odd; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (f0 ^ f1); + f0 ^= t2; + f1 ^= t2; + t3 = swap & (g0 ^ g1); + g0 ^= t3; + g1 ^= t3; + xa -= a_odd & xb; + f0 -= a_odd & f1; + g0 -= a_odd & g1; + xa >>= 1; + f1 <<= 1; + g1 <<= 1; + } + gf27500_lin(d, &u, &v, f1, g1); + + // At the point: + // - Numerator and denominator were both in Montgomery representation, + // but the two factors R canceled each other. + // - We have injected 31*31+47 = 1008 extra factors of 2, hence we + // must divide the result by 2^1008. + // - However, we also want to obtain the result in Montgomery + // representation, i.e. multiply by 2^512. We thus want to + // divide the current result by 2^(1008 - 512) = 2^496. + // - We do this division by using a Montgomery multiplication with + // the Montgomery representation of 1/2^496, i.e. the integer + // 2^512/2^496 = 2^16. + gf27500_mul(d, d, &INVT496); + return r; +} + +// see gf27500.h +uint32_t +gf27500_invert(gf27500 *d, const gf27500 *a) +{ + return gf27500_div(d, &ONE, a); +} + +// see gf27500.h +int32_t +gf27500_legendre(const gf27500 *x) +{ + // Same algorithm as the binary GCD in gf27500_div(), with + // a few differences: + // - We do not keep track of the Bézout coefficients u and v. + // - In each inner iteration we adjust the running symbol value, + // which uses the low 3 bits of the values. + // - Since we need two extra bits of look-ahead, we can only run + // 29 inner iterations, and then need an extra recomputation + // for the last 2. + + gf27500 a, b; + uint64_t xa, xb, f0, g0, f1, g1, ls; + + inner_gf27500_normalize(&a, x); + b = MODULUS; + ls = 0; // running symbol information in bit 1. + + // Outer loop + for (int i = 0; i < 31; i++) { + // Get approximations of a and b over 64 bits. + uint64_t m7 = a.v7 | b.v7; + uint64_t m6 = a.v6 | b.v6; + uint64_t m5 = a.v5 | b.v5; + uint64_t m4 = a.v4 | b.v4; + uint64_t m3 = a.v3 | b.v3; + uint64_t m2 = a.v2 | b.v2; + uint64_t m1 = a.v1 | b.v1; + uint64_t tnz7 = sgnw(m7 | -m7); + uint64_t tnz6 = sgnw(m6 | -m6) & ~tnz7; + uint64_t tnz5 = sgnw(m5 | -m5) & ~tnz7 & ~tnz6; + uint64_t tnz4 = sgnw(m4 | -m4) & ~tnz7 & ~tnz6 & ~tnz5; + uint64_t tnz3 = sgnw(m3 | -m3) & ~tnz7 & ~tnz6 & ~tnz5 & ~tnz4; + uint64_t tnz2 = sgnw(m2 | -m2) & ~tnz7 & ~tnz6 & ~tnz5 & ~tnz4 & ~tnz3; + uint64_t tnz1 = sgnw(m1 | -m1) & ~tnz7 & ~tnz6 & ~tnz5 & ~tnz4 & ~tnz3 & ~tnz2; + uint64_t tnzm = (m7 & tnz7) | (m6 & tnz6) | (m5 & tnz5) | (m4 & tnz4) | (m3 & tnz3) | (m2 & tnz2) | (m1 & tnz1); + uint64_t tnza = (a.v7 & tnz7) | (a.v6 & tnz6) | (a.v5 & tnz5) | (a.v4 & tnz4) | (a.v3 & tnz3) | (a.v2 & tnz2) | + (a.v1 & tnz1); + uint64_t tnzb = (b.v7 & tnz7) | (b.v6 & tnz6) | (b.v5 & tnz5) | (b.v4 & tnz4) | (b.v3 & tnz3) | (b.v2 & tnz2) | + (b.v1 & tnz1); + uint64_t snza = (a.v6 & tnz7) | (a.v5 & tnz6) | (a.v4 & tnz5) | (a.v3 & tnz4) | (a.v2 & tnz3) | (a.v1 & tnz2) | + (a.v0 & tnz1); + uint64_t snzb = (b.v6 & tnz7) | (b.v5 & tnz6) | (b.v4 & tnz5) | (b.v3 & tnz4) | (b.v2 & tnz3) | (b.v1 & tnz2) | + (b.v0 & tnz1); + + int64_t s = lzcnt(tnzm); + uint64_t sm = (uint64_t)((31 - s) >> 63); + tnza ^= sm & (tnza ^ ((tnza << 32) | (snza >> 32))); + tnzb ^= sm & (tnzb ^ ((tnzb << 32) | (snzb >> 32))); + s -= 32 & sm; + tnza <<= s; + tnzb <<= s; + + uint64_t tzx = ~(tnz1 | tnz2 | tnz3 | tnz4 | tnz5 | tnz6 | tnz7); + tnza |= a.v0 & tzx; + tnzb |= b.v0 & tzx; + xa = (a.v0 & 0x7FFFFFFF) | (tnza & 0xFFFFFFFF80000000); + xb = (b.v0 & 0x7FFFFFFF) | (tnzb & 0xFFFFFFFF80000000); + + // First 290 inner iterations. + uint64_t fg0 = (uint64_t)1; + uint64_t fg1 = (uint64_t)1 << 32; + for (int j = 0; j < 29; j++) { + uint64_t a_odd, swap, t0, t1, t2; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf27500_sbb(0, xa, xb, &t0); + (void)inner_gf27500_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & xa & xb; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + xa >>= 1; + fg1 <<= 1; + ls ^= (xb + 2) >> 1; + } + + // Compute the updated a and b (low words only) to get + // enough bits for the next two iterations. + uint64_t fg0z = fg0 + 0x7FFFFFFF7FFFFFFF; + uint64_t fg1z = fg1 + 0x7FFFFFFF7FFFFFFF; + f0 = (fg0z & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0z >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1z & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1z >> 32) - (uint64_t)0x7FFFFFFF; + uint64_t a0 = (a.v0 * f0 + b.v0 * g0) >> 29; + uint64_t b0 = (a.v0 * f1 + b.v0 * g1) >> 29; + for (int j = 0; j < 2; j++) { + uint64_t a_odd, swap, t0, t1, t2, t3; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf27500_sbb(0, xa, xb, &t0); + (void)inner_gf27500_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & a0 & b0; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + t2 = swap & (fg0 ^ fg1); + fg0 ^= t2; + fg1 ^= t2; + t3 = swap & (a0 ^ b0); + a0 ^= t3; + b0 ^= t3; + xa -= a_odd & xb; + fg0 -= a_odd & fg1; + a0 -= a_odd & b0; + xa >>= 1; + fg1 <<= 1; + a0 >>= 1; + ls ^= (b0 + 2) >> 1; + } + + // Propagate updates to a and b. + fg0 += 0x7FFFFFFF7FFFFFFF; + fg1 += 0x7FFFFFFF7FFFFFFF; + f0 = (fg0 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g0 = (fg0 >> 32) - (uint64_t)0x7FFFFFFF; + f1 = (fg1 & 0xFFFFFFFF) - (uint64_t)0x7FFFFFFF; + g1 = (fg1 >> 32) - (uint64_t)0x7FFFFFFF; + gf27500 na, nb; + uint64_t nega = lindiv31abs(&na, &a, &b, f0, g0); + (void)lindiv31abs(&nb, &a, &b, f1, g1); + ls ^= nega & nb.v0; + a = na; + b = nb; + } + + // Final iterations: values are at most 49 bits now. We do not + // need to keep track of update coefficients. Just like the GCD, + // we need only 47 iterations, because after 47 iterations, + // value a is 0 or 1, and b is 1, and no further modification to + // the Legendre symbol may happen. + xa = a.v0; + xb = b.v0; + for (int j = 0; j < 47; j++) { + uint64_t a_odd, swap, t0, t1; + unsigned char cc; + a_odd = -(xa & 1); + cc = inner_gf27500_sbb(0, xa, xb, &t0); + (void)inner_gf27500_sbb(cc, 0, 0, &swap); + swap &= a_odd; + ls ^= swap & xa & xb; + t1 = swap & (xa ^ xb); + xa ^= t1; + xb ^= t1; + xa -= a_odd & xb; + xa >>= 1; + ls ^= (xb + 2) >> 1; + } + + // At this point, if the source value was not zero, then the low + // bit of ls contains the QR status (0 = square, 1 = non-square), + // which we need to convert to the expected value (+1 or -1). + // If y == 0, then we return 0, per the API. + uint32_t r = 1 - ((uint32_t)ls & 2); + r &= ~gf27500_iszero(x); + return *(int32_t *)&r; +} + +// see gf27500.h +uint32_t +gf27500_sqrt(gf27500 *d, const gf27500 *a) +{ + // Candidate root is a^((q+1)/4), with (q+1)/4 = 27*2^498 + gf27500 y3, y; + gf27500_square(&y3, a); + gf27500_mul(&y3, &y3, a); // a^3 + gf27500_xsquare(&y, &y3, 3); // a^24 + gf27500_mul(&y, &y, &y3); // a^27 + gf27500_xsquare(&y, &y, 498); // a^27*2^498 + + // Normalize y and negate if necessary, to set the low bit to 0. + // The low bit check must be on the normal representation, + // not the Montgomery representation. + gf27500 yn; + inner_gf27500_montgomery_reduce(&yn, &y); + uint32_t ctl = -((uint32_t)yn.v0 & 1); + gf27500_neg(&yn, &y); + gf27500_select(&y, &y, &yn, ctl); + + // Check whether the candidate is indeed a square root. + gf27500_square(&yn, &y); + uint32_t r = gf27500_equals(&yn, a); + *d = y; + return r; +} + +// Little-endian encoding of a 64-bit integer. +static inline void +enc64le(void *dst, uint64_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); + buf[4] = (uint8_t)(x >> 32); + buf[5] = (uint8_t)(x >> 40); + buf[6] = (uint8_t)(x >> 48); + buf[7] = (uint8_t)(x >> 56); +} + +// Little-endian decoding of a 64-bit integer. +static inline uint64_t +dec64le(const void *src) +{ + const uint8_t *buf = src; + return (uint64_t)buf[0] | ((uint64_t)buf[1] << 8) | ((uint64_t)buf[2] << 16) | ((uint64_t)buf[3] << 24) | + ((uint64_t)buf[4] << 32) | ((uint64_t)buf[5] << 40) | ((uint64_t)buf[6] << 48) | ((uint64_t)buf[7] << 56); +} + +// see gf27500.h +void +gf27500_encode(void *dst, const gf27500 *a) +{ + uint8_t *buf = dst; + gf27500 x; + + inner_gf27500_montgomery_reduce(&x, a); + enc64le(buf, x.v0); + enc64le(buf + 8, x.v1); + enc64le(buf + 16, x.v2); + enc64le(buf + 24, x.v3); + enc64le(buf + 32, x.v4); + enc64le(buf + 40, x.v5); + enc64le(buf + 48, x.v6); + enc64le(buf + 56, x.v7); +} + +// see gf27500.h +uint32_t +gf27500_decode(gf27500 *d, const void *src) +{ + const uint8_t *buf = src; + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, t; + unsigned char cc; + + d0 = dec64le(buf); + d1 = dec64le(buf + 8); + d2 = dec64le(buf + 16); + d3 = dec64le(buf + 24); + d4 = dec64le(buf + 32); + d5 = dec64le(buf + 40); + d6 = dec64le(buf + 48); + d7 = dec64le(buf + 56); + cc = inner_gf27500_sbb(0, d0, MODULUS.v0, &t); + cc = inner_gf27500_sbb(cc, d1, MODULUS.v1, &t); + cc = inner_gf27500_sbb(cc, d2, MODULUS.v2, &t); + cc = inner_gf27500_sbb(cc, d3, MODULUS.v3, &t); + cc = inner_gf27500_sbb(cc, d4, MODULUS.v4, &t); + cc = inner_gf27500_sbb(cc, d5, MODULUS.v5, &t); + cc = inner_gf27500_sbb(cc, d6, MODULUS.v6, &t); + cc = inner_gf27500_sbb(cc, d7, MODULUS.v7, &t); + + (void)inner_gf27500_sbb(cc, 0, 0, &t); + + // If the value was not canonical then t = 0; otherwise, t = -1. + d->v0 = d0 & t; + d->v1 = d1 & t; + d->v2 = d2 & t; + d->v3 = d3 & t; + d->v4 = d4 & t; + d->v5 = d5 & t; + d->v6 = d6 & t; + d->v7 = d7 & t; + + // Convert to Montgomery representation. + gf27500_mul(d, d, &R2); + + return (uint32_t)t; +} + +// see gf27500.h +void +gf27500_decode_reduce(gf27500 *d, const void *src, size_t len) +{ + const uint8_t *buf = src; + + *d = ZERO; + if (len == 0) { + return; + } + + if ((len & 63) != 0) { + // Input size is not a multiple of 64, we decode a partial + // block, which is already less than 2^512. + uint8_t tmp[64]; + size_t k; + + k = len & ~(size_t)63; + memcpy(tmp, buf + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + d->v0 = dec64le(&tmp[0]); + d->v1 = dec64le(&tmp[8]); + d->v2 = dec64le(&tmp[16]); + d->v3 = dec64le(&tmp[24]); + d->v4 = dec64le(&tmp[32]); + d->v5 = dec64le(&tmp[40]); + d->v6 = dec64le(&tmp[48]); + d->v7 = dec64le(&tmp[56]); + + len = k; + } else { + // Input size is a multiple of 48, we decode a full block, + // and a reduction is needed. + len -= 64; + uint64_t d0 = dec64le(buf + len); + uint64_t d1 = dec64le(buf + len + 8); + uint64_t d2 = dec64le(buf + len + 16); + uint64_t d3 = dec64le(buf + len + 24); + uint64_t d4 = dec64le(buf + len + 32); + uint64_t d5 = dec64le(buf + len + 40); + uint64_t d6 = dec64le(buf + len + 48); + uint64_t d7 = dec64le(buf + len + 56); + + inner_gf27500_partial_reduce(d, d0, d1, d2, d3, d4, d5, d6, d7); + } + + // Process all remaining blocks, in descending address order. + while (len > 0) { + gf27500_mul(d, d, &R2); + len -= 64; + uint64_t t0 = dec64le(buf + len); + uint64_t t1 = dec64le(buf + len + 8); + uint64_t t2 = dec64le(buf + len + 16); + uint64_t t3 = dec64le(buf + len + 24); + uint64_t t4 = dec64le(buf + len + 32); + uint64_t t5 = dec64le(buf + len + 40); + uint64_t t6 = dec64le(buf + len + 48); + uint64_t t7 = dec64le(buf + len + 56); + + gf27500 t; + inner_gf27500_partial_reduce(&t, t0, t1, t2, t3, t4, t5, t6, t7); + gf27500_add(d, d, &t); + } + + // Final conversion to Montgomery representation. + gf27500_mul(d, d, &R2); +} + +void +gf27500_div3(gf27500 *d, const gf27500 *a) +{ + const digit_t MAGIC = 0xAAAAAAAAAAAAAAAB; // 3^-1 mod 2^64 + uint64_t c0, c1, f0, f1; + gf27500 t; + + inner_gf27500_umul(f0, f1, a->arr[7], MAGIC); + t.arr[7] = f1 >> 1; + c1 = a->arr[7] - 3 * t.arr[7]; + + for (int32_t i = 6; i >= 0; i--) { + c0 = c1; + inner_gf27500_umul(f0, f1, a->arr[i], MAGIC); + t.arr[i] = f1 >> 1; + c1 = c0 + a->arr[i] - 3 * t.arr[i]; + t.arr[i] += c0 * ((MAGIC - 1) >> 1); + f0 = ((c1 >> 1) & c1); /* c1 == 3 */ + f1 = ((c1 >> 2) & !(c1 & 0x11)); /* c1 == 4 */ + f0 |= f1; + t.arr[i] += f0; + c1 = c1 - 3 * f0; + } + *d = t; + gf27500_sub(&t, d, &PM1O3); + gf27500_select(d, d, &t, -((c1 & 1) | (c1 >> 1))); // c1 >= 1 + gf27500_sub(&t, d, &PM1O3); + gf27500_select(d, d, &t, -(c1 == 2)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.h new file mode 100644 index 0000000000..3ca640cc29 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.h @@ -0,0 +1,1409 @@ +#ifndef gf27500_h__ +#define gf27500_h__ + +#ifdef __cplusplus +extern "C" +{ +#endif + +#include +#include +#include +#include +#include +#include + + typedef uint64_t digit_t; // Datatype for representing field elements + + /* + * A gf27500 instance represents an integer modulo q. + * This is a structure; it can be copied with a simple assignment, and + * passed around as a value (though exchanging pointers is possibly more + * efficient). + * The contents are opaque. No calling code should make any assumption + * about the contents. + */ + typedef union + { + // Contents are opaque. + // Implementation note: this encodes the value in Montgomery + // representation, with R = 2^512. Only partial reduction is + // done internally to ensure the value is below 2^505 + struct + { + uint64_t v0; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t v5; + uint64_t v6; + uint64_t v7; + }; + digit_t arr[8]; + } gf27500; + + /* + * Constant zero (in the field). + */ + extern const gf27500 ZERO; + + /* + * Constant one (in the field). + */ + extern const gf27500 ONE; + + /* + * Constant -1 (in the field). + */ + extern const gf27500 gf27500_MINUS_ONE; + + /* + * API RULES: + * ========== + * + * Elementary operations on field elements are implemented by functions + * which take as parameter pointers to the operands. The first parameter + * is the pointer to the destination. Thus: + * gf27500 a = ...; + * gf27500 b = ...; + * gf27500 d; + * gf27500_sub(&d, &a, &b) + * sets field element d to a - b (implicitly modulo q). + * + * Operands may be used several times: it is always valid to use as + * output a gf27500 structure which is also used as input. + * + * Boolean values are represented by 32-bit integer (uint32_t) which have + * value exactly 0xFFFFFFFF (for "true") or 0x00000000 (for "false"). This + * convention minimizes the risk that a "smart" compiler breaks the + * constant-time property of the code through unfortunated optimizations. + * When a function expects such a Boolean, the caller MUST take care never + * to provide any value other than 0x00000000 or 0xFFFFFFFF. + * + * Values are encoded into exactly 64 bytes: value x modulo q is mapped to + * its unique integer representant in the [0..q-1] range, which is then + * encoded over 64 bytes with little-endian convention. Encoding is canonical + * and checked: when decoding (with gf27500_decode()), the input value is + * verified to be in the [0..q-1] range; for an out-of-range value, + * gf27500_decode() fills the output structure with zero, and returns + * 0x00000000. + * + * For most operations, the implementation is an inline function, defined + * below; the compiler can thus efficiently include it in the calling code. + * A few expensive operations (e.g. divisions) use non-inline functions, + * declared below but defined in gf27500.c + * + * All functions and macro whose name starts with "inner_gf27500_" are + * internal to this implementation and visible here only in order to + * support the API inline functions; they MUST NOT be used directly. + */ + +#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) +#include +#define inner_gf27500_adc(cc, a, b, d) _addcarry_u64(cc, a, b, (unsigned long long *)(void *)d) +#define inner_gf27500_sbb(cc, a, b, d) _subborrow_u64(cc, a, b, (unsigned long long *)(void *)d) +#else +static inline unsigned char +inner_gf27500_adc(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) +{ + unsigned __int128 t = (unsigned __int128)a + (unsigned __int128)b + cc; + *d = (uint64_t)t; + return (unsigned char)(t >> 64); +} +static inline unsigned char +inner_gf27500_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) +{ + unsigned __int128 t = (unsigned __int128)a - (unsigned __int128)b - cc; + *d = (uint64_t)t; + return (unsigned char)(-(uint64_t)(t >> 64)); +} +#endif + +#if defined _MSC_VER +#define inner_gf27500_umul(lo, hi, x, y) \ + do { \ + uint64_t umul_hi; \ + (lo) = _umul128((x), (y), &umul_hi); \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf27500_umul_add(lo, hi, x, y, z) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x), (y), &umul_hi); \ + unsigned char umul_cc; \ + umul_cc = inner_gf27500_adc(0, umul_lo, (z), &umul_lo); \ + (void)inner_gf27500_adc(umul_cc, umul_hi, 0, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf27500_umul_x2(lo, hi, x1, y1, x2, y2) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x1), (y1), &umul_hi); \ + uint64_t umul_lo2, umul_hi2; \ + umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + unsigned char umul_cc; \ + umul_cc = inner_gf27500_adc(0, umul_lo, umul_lo2, &umul_lo); \ + (void)inner_gf27500_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#define inner_gf27500_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ + do { \ + uint64_t umul_lo, umul_hi; \ + umul_lo = _umul128((x1), (y1), &umul_hi); \ + uint64_t umul_lo2, umul_hi2; \ + umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + unsigned char umul_cc; \ + umul_cc = inner_gf27500_adc(0, umul_lo, umul_lo2, &umul_lo); \ + (void)inner_gf27500_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ + umul_cc = inner_gf27500_adc(0, umul_lo, (z), &umul_lo); \ + (void)inner_gf27500_adc(umul_cc, umul_hi, 0, &umul_hi); \ + (lo) = umul_lo; \ + (hi) = umul_hi; \ + } while (0) +#else +#define inner_gf27500_umul(lo, hi, x, y) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x) * (unsigned __int128)(y); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf27500_umul_add(lo, hi, x, y, z) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x) * (unsigned __int128)(y) + (unsigned __int128)(uint64_t)(z); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf27500_umul_x2(lo, hi, x1, y1, x2, y2) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = \ + (unsigned __int128)(x1) * (unsigned __int128)(y1) + (unsigned __int128)(x2) * (unsigned __int128)(y2); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#define inner_gf27500_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ + do { \ + unsigned __int128 umul_tmp; \ + umul_tmp = (unsigned __int128)(x1) * (unsigned __int128)(y1) + \ + (unsigned __int128)(x2) * (unsigned __int128)(y2) + (unsigned __int128)(uint64_t)(z); \ + (lo) = (uint64_t)umul_tmp; \ + (hi) = (uint64_t)(umul_tmp >> 64); \ + } while (0) +#endif + + /* + * d <- a + b + */ + static inline void + gf27500_add(gf27500 *d, const gf27500 *a, const gf27500 *b) + { + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, f; + unsigned char cc; + + // Raw addition. + cc = inner_gf27500_adc(0, a->v0, b->v0, &d0); + cc = inner_gf27500_adc(cc, a->v1, b->v1, &d1); + cc = inner_gf27500_adc(cc, a->v2, b->v2, &d2); + cc = inner_gf27500_adc(cc, a->v3, b->v3, &d3); + cc = inner_gf27500_adc(cc, a->v4, b->v4, &d4); + cc = inner_gf27500_adc(cc, a->v5, b->v5, &d5); + cc = inner_gf27500_adc(cc, a->v6, b->v6, &d6); + (void)inner_gf27500_adc(cc, a->v7, b->v7, &d7); + + // Sum is up to 2^506 - 2. Subtract q if the value is not lower + // than 2^505 (we subtract q by adding -q). + // Note: 0xE5 = (-27) % 256, 52 = 500 - 7*64 + f = d7 >> 57; + cc = inner_gf27500_adc(0, d0, f, &d0); + cc = inner_gf27500_adc(cc, d1, 0, &d1); + cc = inner_gf27500_adc(cc, d2, 0, &d2); + cc = inner_gf27500_adc(cc, d3, 0, &d3); + cc = inner_gf27500_adc(cc, d4, 0, &d4); + cc = inner_gf27500_adc(cc, d5, 0, &d5); + cc = inner_gf27500_adc(cc, d6, 0, &d6); + (void)inner_gf27500_adc(cc, d7, ((uint64_t)0xFE5 << 52) & -f, &d7); + + // One subtraction of q might not be enough. + f = d7 >> 57; + cc = inner_gf27500_adc(0, d0, f, &d0); + cc = inner_gf27500_adc(cc, d1, 0, &d1); + cc = inner_gf27500_adc(cc, d2, 0, &d2); + cc = inner_gf27500_adc(cc, d3, 0, &d3); + cc = inner_gf27500_adc(cc, d4, 0, &d4); + cc = inner_gf27500_adc(cc, d5, 0, &d5); + cc = inner_gf27500_adc(cc, d6, 0, &d6); + (void)inner_gf27500_adc(cc, d7, ((uint64_t)0xFE5 << 52) & -f, &d7); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; + } + + /* + * d <- a - b + */ + static inline void + gf27500_sub(gf27500 *d, const gf27500 *a, const gf27500 *b) + { + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, m, f; + unsigned char cc; + + // Raw subtraction. + cc = inner_gf27500_sbb(0, a->v0, b->v0, &d0); + cc = inner_gf27500_sbb(cc, a->v1, b->v1, &d1); + cc = inner_gf27500_sbb(cc, a->v2, b->v2, &d2); + cc = inner_gf27500_sbb(cc, a->v3, b->v3, &d3); + cc = inner_gf27500_sbb(cc, a->v4, b->v4, &d4); + cc = inner_gf27500_sbb(cc, a->v5, b->v5, &d5); + cc = inner_gf27500_sbb(cc, a->v6, b->v6, &d6); + cc = inner_gf27500_sbb(cc, a->v7, b->v7, &d7); + + // Add 2*q if the result is negative. + // Note: 0xCA = (-2*27) % 256, 52 = 500 - 7*64 + (void)inner_gf27500_sbb(cc, 0, 0, &m); + cc = inner_gf27500_sbb(0, d0, m & 2, &d0); + cc = inner_gf27500_sbb(cc, d1, 0, &d1); + cc = inner_gf27500_sbb(cc, d2, 0, &d2); + cc = inner_gf27500_sbb(cc, d3, 0, &d3); + cc = inner_gf27500_sbb(cc, d4, 0, &d4); + cc = inner_gf27500_sbb(cc, d5, 0, &d5); + cc = inner_gf27500_sbb(cc, d6, 0, &d6); + (void)inner_gf27500_sbb(cc, d7, ((uint64_t)0xFCA << 52) & m, &d7); + + // We might have overdone it; subtract q if necessary. + // Note: 0xE5 = (-27) % 256, 52 = 500 - 7*64 + f = d7 >> 57; + cc = inner_gf27500_adc(0, d0, f, &d0); + cc = inner_gf27500_adc(cc, d1, 0, &d1); + cc = inner_gf27500_adc(cc, d2, 0, &d2); + cc = inner_gf27500_adc(cc, d3, 0, &d3); + cc = inner_gf27500_adc(cc, d4, 0, &d4); + cc = inner_gf27500_adc(cc, d5, 0, &d5); + cc = inner_gf27500_adc(cc, d6, 0, &d6); + (void)inner_gf27500_adc(cc, d7, ((uint64_t)0xFE5 << 52) & -f, &d7); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; + } + + /* + * d <- -a + */ + static inline void + gf27500_neg(gf27500 *d, const gf27500 *a) + { + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, f; + unsigned char cc; + + // 2*q - a + cc = inner_gf27500_sbb(0, (uint64_t)0xFFFFFFFFFFFFFFFE, a->v0, &d0); + cc = inner_gf27500_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v1, &d1); + cc = inner_gf27500_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v2, &d2); + cc = inner_gf27500_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v3, &d3); + cc = inner_gf27500_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v4, &d4); + cc = inner_gf27500_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v5, &d5); + cc = inner_gf27500_sbb(cc, (uint64_t)0xFFFFFFFFFFFFFFFF, a->v6, &d6); + (void)inner_gf27500_sbb(cc, (uint64_t)0x035FFFFFFFFFFFFF, a->v7, &d7); + + // Subtract q if the value is not lower than 2^505. + f = d7 >> 57; + cc = inner_gf27500_adc(0, d0, f, &d0); + cc = inner_gf27500_adc(cc, d1, 0, &d1); + cc = inner_gf27500_adc(cc, d2, 0, &d2); + cc = inner_gf27500_adc(cc, d3, 0, &d3); + cc = inner_gf27500_adc(cc, d4, 0, &d4); + cc = inner_gf27500_adc(cc, d5, 0, &d5); + cc = inner_gf27500_adc(cc, d6, 0, &d6); + (void)inner_gf27500_adc(cc, d7, ((uint64_t)0xFE5 << 52) & -f, &d7); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; + } + + /* + * If ctl == 0x00000000, then *a0 is copied into *d. + * If ctl == 0xFFFFFFFF, then *a1 is copied into *d. + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ + static inline void + gf27500_select(gf27500 *d, const gf27500 *a0, const gf27500 *a1, uint32_t ctl) + { + uint64_t cw = (uint64_t)*(int32_t *)&ctl; + d->v0 = a0->v0 ^ (cw & (a0->v0 ^ a1->v0)); + d->v1 = a0->v1 ^ (cw & (a0->v1 ^ a1->v1)); + d->v2 = a0->v2 ^ (cw & (a0->v2 ^ a1->v2)); + d->v3 = a0->v3 ^ (cw & (a0->v3 ^ a1->v3)); + d->v4 = a0->v4 ^ (cw & (a0->v4 ^ a1->v4)); + d->v5 = a0->v5 ^ (cw & (a0->v5 ^ a1->v5)); + d->v6 = a0->v6 ^ (cw & (a0->v6 ^ a1->v6)); + d->v7 = a0->v7 ^ (cw & (a0->v7 ^ a1->v7)); + } + + /* + * If ctl == 0x00000000, then *a and *b are unchanged. + * If ctl == 0xFFFFFFFF, then the contents of *a and *b are swapped. + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ + static inline void + gf27500_cswap(gf27500 *a, gf27500 *b, uint32_t ctl) + { + uint64_t cw = (uint64_t)*(int32_t *)&ctl; + uint64_t t; + t = cw & (a->v0 ^ b->v0); + a->v0 ^= t; + b->v0 ^= t; + t = cw & (a->v1 ^ b->v1); + a->v1 ^= t; + b->v1 ^= t; + t = cw & (a->v2 ^ b->v2); + a->v2 ^= t; + b->v2 ^= t; + t = cw & (a->v3 ^ b->v3); + a->v3 ^= t; + b->v3 ^= t; + t = cw & (a->v4 ^ b->v4); + a->v4 ^= t; + b->v4 ^= t; + t = cw & (a->v5 ^ b->v5); + a->v5 ^= t; + b->v5 ^= t; + t = cw & (a->v6 ^ b->v6); + a->v6 ^= t; + b->v6 ^= t; + t = cw & (a->v7 ^ b->v7); + a->v7 ^= t; + b->v7 ^= t; + } + + /* + * d <- a/2 + */ + static inline void + gf27500_half(gf27500 *d, const gf27500 *a) + { + uint64_t d0, d1, d2, d3, d4, d5, d6, d7; + + d0 = (a->v0 >> 1) | (a->v1 << 63); + d1 = (a->v1 >> 1) | (a->v2 << 63); + d2 = (a->v2 >> 1) | (a->v3 << 63); + d3 = (a->v3 >> 1) | (a->v4 << 63); + d4 = (a->v4 >> 1) | (a->v5 << 63); + d5 = (a->v5 >> 1) | (a->v6 << 63); + d6 = (a->v6 >> 1) | (a->v7 << 63); + d7 = a->v7 >> 1; + d7 += ((uint64_t)27 << 51) & -(a->v0 & 1); + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; + } + + // Inner function: 512-bit to 505-bit reduction + static inline void + inner_gf27500_partial_reduce(gf27500 *d, + uint64_t a0, + uint64_t a1, + uint64_t a2, + uint64_t a3, + uint64_t a4, + uint64_t a5, + uint64_t a6, + uint64_t a7) + { + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, h, quo, rem; + unsigned char cc; + + // Split value in high (12 bits) and low (500 bits) parts. + h = a7 >> 52; + a7 &= 0x000FFFFFFFFFFFFF; + + // 27*2^500 = 1 mod q; hence, we add floor(h/27) + (h mod 27)*2^500 + // to the low part. + quo = (0x12F7 * h) >> 17; + rem = h - (27 * quo); + + cc = inner_gf27500_adc(0, a0, quo, &d0); + cc = inner_gf27500_adc(cc, a1, 0, &d1); + cc = inner_gf27500_adc(cc, a2, 0, &d2); + cc = inner_gf27500_adc(cc, a3, 0, &d3); + cc = inner_gf27500_adc(cc, a4, 0, &d4); + cc = inner_gf27500_adc(cc, a5, 0, &d5); + cc = inner_gf27500_adc(cc, a6, 0, &d6); + (void)inner_gf27500_adc(cc, a7, rem << 52, &d7); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; + } + + // Inner function: Normalize value *a into *d. + static inline void + inner_gf27500_normalize(gf27500 *d, const gf27500 *a) + { + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, m; + unsigned char cc; + + // Subtract q. + cc = inner_gf27500_sbb(0, a->v0, 0xFFFFFFFFFFFFFFFF, &d0); + cc = inner_gf27500_sbb(cc, a->v1, 0xFFFFFFFFFFFFFFFF, &d1); + cc = inner_gf27500_sbb(cc, a->v2, 0xFFFFFFFFFFFFFFFF, &d2); + cc = inner_gf27500_sbb(cc, a->v3, 0xFFFFFFFFFFFFFFFF, &d3); + cc = inner_gf27500_sbb(cc, a->v4, 0xFFFFFFFFFFFFFFFF, &d4); + cc = inner_gf27500_sbb(cc, a->v5, 0xFFFFFFFFFFFFFFFF, &d5); + cc = inner_gf27500_sbb(cc, a->v6, 0xFFFFFFFFFFFFFFFF, &d6); + cc = inner_gf27500_sbb(cc, a->v7, 0x01AFFFFFFFFFFFFF, &d7); + + // Add back q if the result is negative. + (void)inner_gf27500_sbb(cc, 0, 0, &m); + cc = inner_gf27500_adc(0, d0, m, &d0); + cc = inner_gf27500_adc(cc, d1, m, &d1); + cc = inner_gf27500_adc(cc, d2, m, &d2); + cc = inner_gf27500_adc(cc, d3, m, &d3); + cc = inner_gf27500_adc(cc, d4, m, &d4); + cc = inner_gf27500_adc(cc, d5, m, &d5); + cc = inner_gf27500_adc(cc, d6, m, &d6); + (void)inner_gf27500_adc(cc, d7, m & 0x01AFFFFFFFFFFFFF, &d7); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; + } + + /* + * d <- 2*a + */ + static inline void + gf27500_mul2(gf27500 *d, const gf27500 *a) + { + gf27500_add(d, a, a); + } + + /* + * d <- a*x + * (multiplication by a 32-bit integer) + */ + static inline void + gf27500_mul_small(gf27500 *d, const gf27500 *a, uint32_t x) + { + uint64_t d0, d1, d2, d3, d4, d5, d6, d7, d8; + uint64_t lo, hi, b, h, quo, rem; + unsigned char cc; + + // Product over the integers. Top output word (d6) is at most 31 bits. + b = (uint64_t)x; + inner_gf27500_umul(d0, d1, a->v0, b); + inner_gf27500_umul(d2, d3, a->v2, b); + inner_gf27500_umul(d4, d5, a->v4, b); + inner_gf27500_umul(d6, d7, a->v6, b); + + inner_gf27500_umul(lo, hi, a->v1, b); + cc = inner_gf27500_adc(0, d1, lo, &d1); + cc = inner_gf27500_adc(cc, d2, hi, &d2); + inner_gf27500_umul(lo, hi, a->v3, b); + cc = inner_gf27500_adc(cc, d3, lo, &d3); + cc = inner_gf27500_adc(cc, d4, hi, &d4); + inner_gf27500_umul(lo, hi, a->v5, b); + cc = inner_gf27500_adc(cc, d5, lo, &d5); + cc = inner_gf27500_adc(cc, d6, hi, &d6); + inner_gf27500_umul(lo, d8, a->v7, b); + cc = inner_gf27500_adc(cc, d7, lo, &d7); + (void)inner_gf27500_adc(cc, d8, 0, &d8); + + // Extract low 500-bit part, and the high part (at most 35 bits). + h = (d8 << 12) | (d7 >> 52); + d7 &= 0x000FFFFFFFFFFFFF; + + // Fold h by adding floor(h/65) + (h mod 65)*2^376 to the low part. + inner_gf27500_umul(lo, hi, h, 0x97B425ED097B425F); + quo = hi >> 4; + rem = h - (27 * quo); + + cc = inner_gf27500_adc(cc, d0, quo, &d0); + cc = inner_gf27500_adc(cc, d1, 0, &d1); + cc = inner_gf27500_adc(cc, d2, 0, &d2); + cc = inner_gf27500_adc(cc, d3, 0, &d3); + cc = inner_gf27500_adc(cc, d4, 0, &d4); + cc = inner_gf27500_adc(cc, d5, 0, &d5); + cc = inner_gf27500_adc(cc, d6, 0, &d6); + (void)inner_gf27500_adc(cc, d7, rem << 52, &d7); + + d->v0 = d0; + d->v1 = d1; + d->v2 = d2; + d->v3 = d3; + d->v4 = d4; + d->v5 = d5; + d->v6 = d6; + d->v7 = d7; + } + + /* + * d <- x + * Input value x (32-bit integer) is converted to field element x mod q. + */ + static inline void + gf27500_set_small(gf27500 *d, uint32_t x) + { + // We want Montgomery representation, i.e. x*2^512 mod q. + // We set h = x*2^12; then: + // x*2^512 = h*2^500 + // = (h mod 27)*2^500 + floor(h/27)*27*2^500 + // = (h mod 27)*2^500 + floor(h/27) mod q + // by using the fact that 27*2^500 = 1 mod q. + uint64_t h, lo, hi, quo, rem; + h = (uint64_t)x << 12; + inner_gf27500_umul(lo, hi, h, 0x97B425ED097B425F); + (void)lo; + quo = hi >> 4; + rem = h - (27 * quo); + + d->v0 = quo; + d->v1 = 0; + d->v2 = 0; + d->v3 = 0; + d->v4 = 0; + d->v5 = 0; + d->v6 = 0; + d->v7 = rem << 52; + } + + // Inner function: d <- a/2^512, with normalization to [0..q-1]. + static inline void + inner_gf27500_montgomery_reduce(gf27500 *d, const gf27500 *a) + { + uint64_t x0, x1, x2, x3, x4, x5, x6, x7; + uint64_t f0, f1, f2, f3, f4, f5, f6, f7; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15; + uint64_t d0, d1, d2, d3, d4, d5, d6, d7; + uint64_t hi, t, w; + unsigned char cc; + + // Let m = -1/q mod 2^512 = 27*2^500 + 1 + // For input x, we compute f = x*m mod 2^512, then + // h = x + f*q, which is a multiple of 2^512. The output + // is then h/2^512. + // Since x < 2^512, we have: + // h <= 2^512 - 1 + (2^512 - 1)*q + // h <= q*2^512 + 2^512 - q - 1 + // Since h = 0 mod 2^512, this implies that h <= q*2^512. + // The output h/2^512 is therefore between 0 and q (inclusive). + + x0 = a->v0; + x1 = a->v1; + x2 = a->v2; + x3 = a->v3; + x4 = a->v4; + x5 = a->v5; + x6 = a->v6; + x7 = a->v7; + + // f = x*(-1/q) mod 2^500 + f0 = x0; + f1 = x1; + f2 = x2; + f3 = x3; + f4 = x4; + f5 = x5; + f6 = x6; + f7 = x7 + ((x0 * 27) << 52); + + // g = f*q + inner_gf27500_umul(g7, hi, f0, (uint64_t)27 << 52); + inner_gf27500_umul_add(g8, hi, f1, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g9, hi, f2, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g10, hi, f3, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g11, hi, f4, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g12, hi, f5, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g13, hi, f6, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g14, g15, f7, (uint64_t)27 << 52, hi); + + cc = inner_gf27500_sbb(0, 0, f0, &g0); + cc = inner_gf27500_sbb(cc, 0, f1, &g1); + cc = inner_gf27500_sbb(cc, 0, f2, &g2); + cc = inner_gf27500_sbb(cc, 0, f3, &g3); + cc = inner_gf27500_sbb(cc, 0, f4, &g4); + cc = inner_gf27500_sbb(cc, 0, f5, &g5); + cc = inner_gf27500_sbb(cc, 0, f6, &g6); + cc = inner_gf27500_sbb(cc, g7, f7, &g7); + cc = inner_gf27500_sbb(cc, g8, 0, &g8); + cc = inner_gf27500_sbb(cc, g9, 0, &g9); + cc = inner_gf27500_sbb(cc, g10, 0, &g10); + cc = inner_gf27500_sbb(cc, g11, 0, &g11); + cc = inner_gf27500_sbb(cc, g12, 0, &g12); + cc = inner_gf27500_sbb(cc, g13, 0, &g13); + cc = inner_gf27500_sbb(cc, g14, 0, &g14); + (void)inner_gf27500_sbb(cc, g15, 0, &g15); + + // h = x + f*q (we drop the low 512 bits). + cc = inner_gf27500_adc(0, g0, x0, &x0); + cc = inner_gf27500_adc(cc, g1, x1, &x1); + cc = inner_gf27500_adc(cc, g2, x2, &x2); + cc = inner_gf27500_adc(cc, g3, x3, &x3); + cc = inner_gf27500_adc(cc, g4, x4, &x4); + cc = inner_gf27500_adc(cc, g5, x5, &x5); + cc = inner_gf27500_adc(cc, g6, x6, &x6); + cc = inner_gf27500_adc(cc, g7, x7, &x7); + cc = inner_gf27500_adc(cc, g8, 0, &d0); + cc = inner_gf27500_adc(cc, g9, 0, &d1); + cc = inner_gf27500_adc(cc, g10, 0, &d2); + cc = inner_gf27500_adc(cc, g11, 0, &d3); + cc = inner_gf27500_adc(cc, g12, 0, &d4); + cc = inner_gf27500_adc(cc, g13, 0, &d5); + cc = inner_gf27500_adc(cc, g14, 0, &d6); + (void)inner_gf27500_adc(cc, g15, 0, &d7); + + // Normalize: if h = q, replace it with zero. + t = d0 & d1 & d2 & d3 & d4 & d5 & d6 & (d7 ^ ~(uint64_t)0x01AFFFFFFFFFFFFF); + cc = inner_gf27500_adc(0, t, 1, &t); + (void)inner_gf27500_sbb(cc, 0, 0, &w); + w = ~w; + d->v0 = d0 & w; + d->v1 = d1 & w; + d->v2 = d2 & w; + d->v3 = d3 & w; + d->v4 = d4 & w; + d->v5 = d5 & w; + d->v6 = d6 & w; + d->v7 = d7 & w; + } + + /* + * d <- a*b + */ + static inline void + gf27500_mul(gf27500 *d, const gf27500 *a, const gf27500 *b) + { + uint64_t e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15; + uint64_t f0, f1, f2, f3, f4, f5, f6, f7; + uint64_t lo, hi, lo2, hi2; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15; + unsigned char cc; + + // Multiplication over integers. + // 8 mul + inner_gf27500_umul(e0, e1, a->v0, b->v0); + inner_gf27500_umul(e2, e3, a->v1, b->v1); + inner_gf27500_umul(e4, e5, a->v2, b->v2); + inner_gf27500_umul(e6, e7, a->v3, b->v3); + inner_gf27500_umul(e8, e9, a->v4, b->v4); + inner_gf27500_umul(e10, e11, a->v5, b->v5); + inner_gf27500_umul(e12, e13, a->v6, b->v6); + inner_gf27500_umul(e14, e15, a->v7, b->v7); + + // + 7 mul = 15 + inner_gf27500_umul(lo, hi, a->v0, b->v1); + cc = inner_gf27500_adc(0, e1, lo, &e1); + cc = inner_gf27500_adc(cc, e2, hi, &e2); + inner_gf27500_umul(lo, hi, a->v0, b->v3); + cc = inner_gf27500_adc(cc, e3, lo, &e3); + cc = inner_gf27500_adc(cc, e4, hi, &e4); + inner_gf27500_umul(lo, hi, a->v0, b->v5); + cc = inner_gf27500_adc(cc, e5, lo, &e5); + cc = inner_gf27500_adc(cc, e6, hi, &e6); + inner_gf27500_umul(lo, hi, a->v0, b->v7); + cc = inner_gf27500_adc(cc, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + inner_gf27500_umul(lo, hi, a->v2, b->v7); + cc = inner_gf27500_adc(cc, e9, lo, &e9); + cc = inner_gf27500_adc(cc, e10, hi, &e10); + inner_gf27500_umul(lo, hi, a->v4, b->v7); + cc = inner_gf27500_adc(cc, e11, lo, &e11); + cc = inner_gf27500_adc(cc, e12, hi, &e12); + inner_gf27500_umul(lo, hi, a->v6, b->v7); + cc = inner_gf27500_adc(cc, e13, lo, &e13); + cc = inner_gf27500_adc(cc, e14, hi, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 7 mul = 22 + inner_gf27500_umul(lo, hi, a->v1, b->v0); + cc = inner_gf27500_adc(0, e1, lo, &e1); + cc = inner_gf27500_adc(cc, e2, hi, &e2); + inner_gf27500_umul(lo, hi, a->v3, b->v0); + cc = inner_gf27500_adc(cc, e3, lo, &e3); + cc = inner_gf27500_adc(cc, e4, hi, &e4); + inner_gf27500_umul(lo, hi, a->v5, b->v0); + cc = inner_gf27500_adc(cc, e5, lo, &e5); + cc = inner_gf27500_adc(cc, e6, hi, &e6); + inner_gf27500_umul(lo, hi, a->v7, b->v0); + cc = inner_gf27500_adc(cc, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + inner_gf27500_umul(lo, hi, a->v7, b->v2); + cc = inner_gf27500_adc(cc, e9, lo, &e9); + cc = inner_gf27500_adc(cc, e10, hi, &e10); + inner_gf27500_umul(lo, hi, a->v7, b->v4); + cc = inner_gf27500_adc(cc, e11, lo, &e11); + cc = inner_gf27500_adc(cc, e12, hi, &e12); + inner_gf27500_umul(lo, hi, a->v7, b->v6); + cc = inner_gf27500_adc(cc, e13, lo, &e13); + cc = inner_gf27500_adc(cc, e14, hi, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 6 mul = 28 + inner_gf27500_umul(lo, hi, a->v0, b->v2); + cc = inner_gf27500_adc(0, e2, lo, &e2); + cc = inner_gf27500_adc(cc, e3, hi, &e3); + inner_gf27500_umul(lo, hi, a->v0, b->v4); + cc = inner_gf27500_adc(cc, e4, lo, &e4); + cc = inner_gf27500_adc(cc, e5, hi, &e5); + inner_gf27500_umul(lo, hi, a->v0, b->v6); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v1, b->v7); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + inner_gf27500_umul(lo, hi, a->v3, b->v7); + cc = inner_gf27500_adc(cc, e10, lo, &e10); + cc = inner_gf27500_adc(cc, e11, hi, &e11); + inner_gf27500_umul(lo, hi, a->v5, b->v7); + cc = inner_gf27500_adc(cc, e12, lo, &e12); + cc = inner_gf27500_adc(cc, e13, hi, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 6 mul = 34 + inner_gf27500_umul(lo, hi, a->v2, b->v0); + cc = inner_gf27500_adc(0, e2, lo, &e2); + cc = inner_gf27500_adc(cc, e3, hi, &e3); + inner_gf27500_umul(lo, hi, a->v4, b->v0); + cc = inner_gf27500_adc(cc, e4, lo, &e4); + cc = inner_gf27500_adc(cc, e5, hi, &e5); + inner_gf27500_umul(lo, hi, a->v6, b->v0); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v7, b->v1); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + inner_gf27500_umul(lo, hi, a->v7, b->v3); + cc = inner_gf27500_adc(cc, e10, lo, &e10); + cc = inner_gf27500_adc(cc, e11, hi, &e11); + inner_gf27500_umul(lo, hi, a->v7, b->v5); + cc = inner_gf27500_adc(cc, e12, lo, &e12); + cc = inner_gf27500_adc(cc, e13, hi, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 5 mul = 39 + inner_gf27500_umul(lo, hi, a->v1, b->v2); + cc = inner_gf27500_adc(cc, e3, lo, &e3); + cc = inner_gf27500_adc(cc, e4, hi, &e4); + inner_gf27500_umul(lo, hi, a->v1, b->v4); + cc = inner_gf27500_adc(cc, e5, lo, &e5); + cc = inner_gf27500_adc(cc, e6, hi, &e6); + inner_gf27500_umul(lo, hi, a->v1, b->v6); + cc = inner_gf27500_adc(cc, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + inner_gf27500_umul(lo, hi, a->v3, b->v6); + cc = inner_gf27500_adc(cc, e9, lo, &e9); + cc = inner_gf27500_adc(cc, e10, hi, &e10); + inner_gf27500_umul(lo, hi, a->v5, b->v6); + cc = inner_gf27500_adc(cc, e11, lo, &e11); + cc = inner_gf27500_adc(cc, e12, hi, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 5 mul = 44 + inner_gf27500_umul(lo, hi, a->v2, b->v1); + cc = inner_gf27500_adc(cc, e3, lo, &e3); + cc = inner_gf27500_adc(cc, e4, hi, &e4); + inner_gf27500_umul(lo, hi, a->v4, b->v1); + cc = inner_gf27500_adc(cc, e5, lo, &e5); + cc = inner_gf27500_adc(cc, e6, hi, &e6); + inner_gf27500_umul(lo, hi, a->v6, b->v1); + cc = inner_gf27500_adc(cc, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + inner_gf27500_umul(lo, hi, a->v6, b->v3); + cc = inner_gf27500_adc(cc, e9, lo, &e9); + cc = inner_gf27500_adc(cc, e10, hi, &e10); + inner_gf27500_umul(lo, hi, a->v6, b->v5); + cc = inner_gf27500_adc(cc, e11, lo, &e11); + cc = inner_gf27500_adc(cc, e12, hi, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 4 mul = 48 + inner_gf27500_umul(lo, hi, a->v1, b->v3); + cc = inner_gf27500_adc(cc, e4, lo, &e4); + cc = inner_gf27500_adc(cc, e5, hi, &e5); + inner_gf27500_umul(lo, hi, a->v1, b->v5); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v3, b->v5); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + inner_gf27500_umul(lo, hi, a->v4, b->v6); + cc = inner_gf27500_adc(cc, e10, lo, &e10); + cc = inner_gf27500_adc(cc, e11, hi, &e11); + cc = inner_gf27500_adc(cc, e12, 0, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 4 mul = 52 + inner_gf27500_umul(lo, hi, a->v3, b->v1); + cc = inner_gf27500_adc(cc, e4, lo, &e4); + cc = inner_gf27500_adc(cc, e5, hi, &e5); + inner_gf27500_umul(lo, hi, a->v5, b->v1); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v5, b->v3); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + inner_gf27500_umul(lo, hi, a->v6, b->v4); + cc = inner_gf27500_adc(cc, e10, lo, &e10); + cc = inner_gf27500_adc(cc, e11, hi, &e11); + cc = inner_gf27500_adc(cc, e12, 0, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 3 mul = 55 + inner_gf27500_umul(lo, hi, a->v2, b->v3); + cc = inner_gf27500_adc(cc, e5, lo, &e5); + cc = inner_gf27500_adc(cc, e6, hi, &e6); + inner_gf27500_umul(lo, hi, a->v2, b->v5); + cc = inner_gf27500_adc(cc, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + inner_gf27500_umul(lo, hi, a->v4, b->v5); + cc = inner_gf27500_adc(cc, e9, lo, &e9); + cc = inner_gf27500_adc(cc, e10, hi, &e10); + cc = inner_gf27500_adc(cc, e11, 0, &e11); + cc = inner_gf27500_adc(cc, e12, 0, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 3 mul = 58 + inner_gf27500_umul(lo, hi, a->v3, b->v2); + cc = inner_gf27500_adc(cc, e5, lo, &e5); + cc = inner_gf27500_adc(cc, e6, hi, &e6); + inner_gf27500_umul(lo, hi, a->v5, b->v2); + cc = inner_gf27500_adc(cc, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + inner_gf27500_umul(lo, hi, a->v5, b->v4); + cc = inner_gf27500_adc(cc, e9, lo, &e9); + cc = inner_gf27500_adc(cc, e10, hi, &e10); + cc = inner_gf27500_adc(cc, e11, 0, &e11); + cc = inner_gf27500_adc(cc, e12, 0, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 2 mul = 60 + inner_gf27500_umul(lo, hi, a->v2, b->v4); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v2, b->v6); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + cc = inner_gf27500_adc(cc, e10, 0, &e10); + cc = inner_gf27500_adc(cc, e11, 0, &e11); + cc = inner_gf27500_adc(cc, e12, 0, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 2 mul = 62 + inner_gf27500_umul(lo, hi, a->v4, b->v2); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v6, b->v2); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + cc = inner_gf27500_adc(cc, e10, 0, &e10); + cc = inner_gf27500_adc(cc, e11, 0, &e11); + cc = inner_gf27500_adc(cc, e12, 0, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // + 2 mul = 64 + inner_gf27500_umul(lo, hi, a->v3, b->v4); + inner_gf27500_umul(lo2, hi2, a->v4, b->v3); + cc = inner_gf27500_adc(0, lo, lo2, &lo); + cc = inner_gf27500_adc(cc, hi, hi2, &hi); + cc = inner_gf27500_adc(cc, 0, 0, &hi2); + assert(cc == 0); + cc = inner_gf27500_adc(0, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + cc = inner_gf27500_adc(cc, e9, hi2, &e9); + cc = inner_gf27500_adc(cc, e10, 0, &e10); + cc = inner_gf27500_adc(cc, e11, 0, &e11); + cc = inner_gf27500_adc(cc, e12, 0, &e12); + cc = inner_gf27500_adc(cc, e13, 0, &e13); + cc = inner_gf27500_adc(cc, e14, 0, &e14); + cc = inner_gf27500_adc(cc, e15, 0, &e15); + assert(cc == 0); + + // Montgomery reduction. + // + // Low part is lo(e) = e0..e7 (512 bits). + // Let m = -1/q mod 2^512; we add (lo(e)*m mod 2^512)*q to the + // high part g = e8..e15 (498 bits). + // + // We have m = 27*2^500 + 1. + f0 = e0; + f1 = e1; + f2 = e2; + f3 = e3; + f4 = e4; + f5 = e5; + f6 = e6; + f7 = e7 + ((e0 * 27) << 52); + + // g = f*q + inner_gf27500_umul(g7, hi, f0, (uint64_t)27 << 52); + inner_gf27500_umul_add(g8, hi, f1, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g9, hi, f2, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g10, hi, f3, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g11, hi, f4, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g12, hi, f5, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g13, hi, f6, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g14, g15, f7, (uint64_t)27 << 52, hi); + + cc = inner_gf27500_sbb(0, 0, f0, &g0); + cc = inner_gf27500_sbb(cc, 0, f1, &g1); + cc = inner_gf27500_sbb(cc, 0, f2, &g2); + cc = inner_gf27500_sbb(cc, 0, f3, &g3); + cc = inner_gf27500_sbb(cc, 0, f4, &g4); + cc = inner_gf27500_sbb(cc, 0, f5, &g5); + cc = inner_gf27500_sbb(cc, 0, f6, &g6); + cc = inner_gf27500_sbb(cc, g7, f7, &g7); + cc = inner_gf27500_sbb(cc, g8, 0, &g8); + cc = inner_gf27500_sbb(cc, g9, 0, &g9); + cc = inner_gf27500_sbb(cc, g10, 0, &g10); + cc = inner_gf27500_sbb(cc, g11, 0, &g11); + cc = inner_gf27500_sbb(cc, g12, 0, &g12); + cc = inner_gf27500_sbb(cc, g13, 0, &g13); + cc = inner_gf27500_sbb(cc, g14, 0, &g14); + (void)inner_gf27500_sbb(cc, g15, 0, &g15); + + // Add g = f*q to e0..e11. + // Since e0..e15 < 2^1010 and f < 2^512, we know that the result + // is less than 2^1010 + 2^512*27*2^500, which is less than 2^1017. + // This is also a multiple of 2^512. We divide by 2^512 by simply + // dropping the low 512 bits (which are all equal to zero), and + // the result is less than 2**505 + cc = inner_gf27500_adc(0, g0, e0, &e0); + cc = inner_gf27500_adc(cc, g1, e1, &e1); + cc = inner_gf27500_adc(cc, g2, e2, &e2); + cc = inner_gf27500_adc(cc, g3, e3, &e3); + cc = inner_gf27500_adc(cc, g4, e4, &e4); + cc = inner_gf27500_adc(cc, g5, e5, &e5); + cc = inner_gf27500_adc(cc, g6, e6, &e6); + cc = inner_gf27500_adc(cc, g7, e7, &e7); + cc = inner_gf27500_adc(cc, g8, e8, &e8); + cc = inner_gf27500_adc(cc, g9, e9, &e9); + cc = inner_gf27500_adc(cc, g10, e10, &e10); + cc = inner_gf27500_adc(cc, g11, e11, &e11); + cc = inner_gf27500_adc(cc, g12, e12, &e12); + cc = inner_gf27500_adc(cc, g13, e13, &e13); + cc = inner_gf27500_adc(cc, g14, e14, &e14); + cc = inner_gf27500_adc(cc, g15, e15, &e15); + assert(cc == 0); + + d->v0 = e8; + d->v1 = e9; + d->v2 = e10; + d->v3 = e11; + d->v4 = e12; + d->v5 = e13; + d->v6 = e14; + d->v7 = e15; + } + + /* + * d <- a^2 + */ + static inline void + gf27500_square(gf27500 *d, const gf27500 *a) + { + uint64_t e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15; + uint64_t f0, f1, f2, f3, f4, f5, f6, f7; + uint64_t lo, hi; + uint64_t g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15; + unsigned char cc; + + // Squaring over integers. + // 7 mul + inner_gf27500_umul(e1, e2, a->v0, a->v1); + inner_gf27500_umul(e3, e4, a->v0, a->v3); + inner_gf27500_umul(e5, e6, a->v0, a->v5); + inner_gf27500_umul(e7, e8, a->v0, a->v7); + inner_gf27500_umul(e9, e10, a->v2, a->v7); + inner_gf27500_umul(e11, e12, a->v4, a->v7); + inner_gf27500_umul(e13, e14, a->v6, a->v7); + + // + 6 = 13 mul + inner_gf27500_umul(lo, hi, a->v0, a->v2); + cc = inner_gf27500_adc(0, e2, lo, &e2); + cc = inner_gf27500_adc(cc, e3, hi, &e3); + inner_gf27500_umul(lo, hi, a->v0, a->v4); + cc = inner_gf27500_adc(cc, e4, lo, &e4); + cc = inner_gf27500_adc(cc, e5, hi, &e5); + inner_gf27500_umul(lo, hi, a->v0, a->v6); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v1, a->v7); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + inner_gf27500_umul(lo, hi, a->v3, a->v7); + cc = inner_gf27500_adc(cc, e10, lo, &e10); + cc = inner_gf27500_adc(cc, e11, hi, &e11); + inner_gf27500_umul(lo, hi, a->v5, a->v7); + cc = inner_gf27500_adc(cc, e12, lo, &e12); + cc = inner_gf27500_adc(cc, e13, hi, &e13); + (void)inner_gf27500_adc(cc, e14, 0, &e14); + + // + 5 = 18 mul + inner_gf27500_umul(lo, hi, a->v1, a->v2); + cc = inner_gf27500_adc(0, e3, lo, &e3); + cc = inner_gf27500_adc(cc, e4, hi, &e4); + inner_gf27500_umul(lo, hi, a->v1, a->v4); + cc = inner_gf27500_adc(cc, e5, lo, &e5); + cc = inner_gf27500_adc(cc, e6, hi, &e6); + inner_gf27500_umul(lo, hi, a->v1, a->v6); + cc = inner_gf27500_adc(cc, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + inner_gf27500_umul(lo, hi, a->v3, a->v6); + cc = inner_gf27500_adc(cc, e9, lo, &e9); + cc = inner_gf27500_adc(cc, e10, hi, &e10); + inner_gf27500_umul(lo, hi, a->v5, a->v6); + cc = inner_gf27500_adc(cc, e11, lo, &e11); + cc = inner_gf27500_adc(cc, e12, hi, &e12); + (void)inner_gf27500_adc(cc, e13, 0, &e13); + + // + 4 = 22 mul + inner_gf27500_umul(lo, hi, a->v1, a->v3); + cc = inner_gf27500_adc(0, e4, lo, &e4); + cc = inner_gf27500_adc(cc, e5, hi, &e5); + inner_gf27500_umul(lo, hi, a->v1, a->v5); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v2, a->v6); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + inner_gf27500_umul(lo, hi, a->v4, a->v6); + cc = inner_gf27500_adc(cc, e10, lo, &e10); + cc = inner_gf27500_adc(cc, e11, hi, &e11); + (void)inner_gf27500_adc(cc, e12, 0, &e12); + + // + 3 = 25 mul + inner_gf27500_umul(lo, hi, a->v2, a->v3); + cc = inner_gf27500_adc(0, e5, lo, &e5); + cc = inner_gf27500_adc(cc, e6, hi, &e6); + inner_gf27500_umul(lo, hi, a->v2, a->v5); + cc = inner_gf27500_adc(cc, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + inner_gf27500_umul(lo, hi, a->v4, a->v5); + cc = inner_gf27500_adc(cc, e9, lo, &e9); + cc = inner_gf27500_adc(cc, e10, hi, &e10); + (void)inner_gf27500_adc(cc, e11, 0, &e11); + + // + 2 = 27 mul + inner_gf27500_umul(lo, hi, a->v2, a->v4); + cc = inner_gf27500_adc(0, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v3, a->v5); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + (void)inner_gf27500_adc(cc, e10, 0, &e10); + + // + 1 = 28 mul + inner_gf27500_umul(lo, hi, a->v3, a->v4); + cc = inner_gf27500_adc(0, e7, lo, &e7); + cc = inner_gf27500_adc(cc, e8, hi, &e8); + (void)inner_gf27500_adc(cc, e9, 0, &e9); + + // Multiplication by two for all off diagonal + // terms + e15 = e14 >> 63; + e14 = (e14 << 1) | (e13 >> 63); + e13 = (e13 << 1) | (e12 >> 63); + e12 = (e12 << 1) | (e11 >> 63); + e11 = (e11 << 1) | (e10 >> 63); + e10 = (e10 << 1) | (e9 >> 63); + e9 = (e9 << 1) | (e8 >> 63); + e8 = (e8 << 1) | (e7 >> 63); + e7 = (e7 << 1) | (e6 >> 63); + e6 = (e6 << 1) | (e5 >> 63); + e5 = (e5 << 1) | (e4 >> 63); + e4 = (e4 << 1) | (e3 >> 63); + e3 = (e3 << 1) | (e2 >> 63); + e2 = (e2 << 1) | (e1 >> 63); + e1 = e1 << 1; + + // + 8 = 36 mul (diagonal terms) + inner_gf27500_umul(e0, hi, a->v0, a->v0); + cc = inner_gf27500_adc(0, e1, hi, &e1); + inner_gf27500_umul(lo, hi, a->v1, a->v1); + cc = inner_gf27500_adc(cc, e2, lo, &e2); + cc = inner_gf27500_adc(cc, e3, hi, &e3); + inner_gf27500_umul(lo, hi, a->v2, a->v2); + cc = inner_gf27500_adc(cc, e4, lo, &e4); + cc = inner_gf27500_adc(cc, e5, hi, &e5); + inner_gf27500_umul(lo, hi, a->v3, a->v3); + cc = inner_gf27500_adc(cc, e6, lo, &e6); + cc = inner_gf27500_adc(cc, e7, hi, &e7); + inner_gf27500_umul(lo, hi, a->v4, a->v4); + cc = inner_gf27500_adc(cc, e8, lo, &e8); + cc = inner_gf27500_adc(cc, e9, hi, &e9); + inner_gf27500_umul(lo, hi, a->v5, a->v5); + cc = inner_gf27500_adc(cc, e10, lo, &e10); + cc = inner_gf27500_adc(cc, e11, hi, &e11); + inner_gf27500_umul(lo, hi, a->v6, a->v6); + cc = inner_gf27500_adc(cc, e12, lo, &e12); + cc = inner_gf27500_adc(cc, e13, hi, &e13); + inner_gf27500_umul(lo, hi, a->v7, a->v7); + cc = inner_gf27500_adc(cc, e14, lo, &e14); + (void)inner_gf27500_adc(cc, e15, hi, &e15); + + // Montgomery reduction. + // + // Low part is lo(e) = e0..e7 (512 bits). + // Let m = -1/q mod 2^512; we add (lo(e)*m mod 2^512)*q to the + // high part g = e8..e15 (498 bits). + // + // We have m = 27*2^500 + 1. + f0 = e0; + f1 = e1; + f2 = e2; + f3 = e3; + f4 = e4; + f5 = e5; + f6 = e6; + f7 = e7 + ((e0 * 27) << 52); + + // g = f*q + inner_gf27500_umul(g7, hi, f0, (uint64_t)27 << 52); + inner_gf27500_umul_add(g8, hi, f1, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g9, hi, f2, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g10, hi, f3, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g11, hi, f4, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g12, hi, f5, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g13, hi, f6, (uint64_t)27 << 52, hi); + inner_gf27500_umul_add(g14, g15, f7, (uint64_t)27 << 52, hi); + + cc = inner_gf27500_sbb(0, 0, f0, &g0); + cc = inner_gf27500_sbb(cc, 0, f1, &g1); + cc = inner_gf27500_sbb(cc, 0, f2, &g2); + cc = inner_gf27500_sbb(cc, 0, f3, &g3); + cc = inner_gf27500_sbb(cc, 0, f4, &g4); + cc = inner_gf27500_sbb(cc, 0, f5, &g5); + cc = inner_gf27500_sbb(cc, 0, f6, &g6); + cc = inner_gf27500_sbb(cc, g7, f7, &g7); + cc = inner_gf27500_sbb(cc, g8, 0, &g8); + cc = inner_gf27500_sbb(cc, g9, 0, &g9); + cc = inner_gf27500_sbb(cc, g10, 0, &g10); + cc = inner_gf27500_sbb(cc, g11, 0, &g11); + cc = inner_gf27500_sbb(cc, g12, 0, &g12); + cc = inner_gf27500_sbb(cc, g13, 0, &g13); + cc = inner_gf27500_sbb(cc, g14, 0, &g14); + (void)inner_gf27500_sbb(cc, g15, 0, &g15); + + // Add g = f*q to e0..e11. + // Since e0..e15 < 2^1010 and f < 2^512, we know that the result + // is less than 2^1010 + 2^512*27*2^500, which is less than 2^1017. + // This is also a multiple of 2^512. We divide by 2^512 by simply + // dropping the low 512 bits (which are all equal to zero), and + // the result is less than 2**505 + cc = inner_gf27500_adc(0, g0, e0, &e0); + cc = inner_gf27500_adc(cc, g1, e1, &e1); + cc = inner_gf27500_adc(cc, g2, e2, &e2); + cc = inner_gf27500_adc(cc, g3, e3, &e3); + cc = inner_gf27500_adc(cc, g4, e4, &e4); + cc = inner_gf27500_adc(cc, g5, e5, &e5); + cc = inner_gf27500_adc(cc, g6, e6, &e6); + cc = inner_gf27500_adc(cc, g7, e7, &e7); + cc = inner_gf27500_adc(cc, g8, e8, &e8); + cc = inner_gf27500_adc(cc, g9, e9, &e9); + cc = inner_gf27500_adc(cc, g10, e10, &e10); + cc = inner_gf27500_adc(cc, g11, e11, &e11); + cc = inner_gf27500_adc(cc, g12, e12, &e12); + cc = inner_gf27500_adc(cc, g13, e13, &e13); + cc = inner_gf27500_adc(cc, g14, e14, &e14); + cc = inner_gf27500_adc(cc, g15, e15, &e15); + assert(cc == 0); + + d->v0 = e8; + d->v1 = e9; + d->v2 = e10; + d->v3 = e11; + d->v4 = e12; + d->v5 = e13; + d->v6 = e14; + d->v7 = e15; + } + + /* + * d <- a^(2^n) + * This computes n successive squarings of value a, with result in d. + * n == 0 is a valid input (in that case, *a is copied into *d). + * This function is not constant-time with regard to n: the number of + * successive squarings may be observable through timing-based side channels. + */ + static inline void + gf27500_xsquare(gf27500 *d, const gf27500 *a, unsigned n) + { + if (n == 0) { + *d = *a; + return; + } + gf27500_square(d, a); + while (n-- > 1) { + gf27500_square(d, d); + } + } + + /* + * Returns 0xFFFFFFFF if *a is zero; otherwise, 0x00000000 is returned. + */ + static inline uint32_t + gf27500_iszero(const gf27500 *a) + { + uint64_t a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, r; + + // Zero can be represented by 0 or by q. + a0 = a->v0; + a1 = a->v1; + a2 = a->v2; + a3 = a->v3; + a4 = a->v4; + a5 = a->v5; + a6 = a->v6; + a7 = a->v7; + + t0 = a0 | a1 | a2 | a3 | a4 | a5 | a6 | a7; + t1 = ~a0 | ~a1 | ~a2 | ~a3 | ~a4 | ~a5 | ~a6 | (a7 ^ 0x01AFFFFFFFFFFFFF); + + // Top bit of r is 0 if and only if one of t0 or t1 is zero. + r = (t0 | -t0) & (t1 | -t1); + return (uint32_t)(r >> 63) - 1; + } + + /* + * Returns 0xFFFFFFFF if *a and *b represent the same field element; + * otherwise, 0x00000000 is returned. + */ + static inline uint32_t + gf27500_equals(const gf27500 *a, const gf27500 *b) + { + gf27500 d; + gf27500_sub(&d, a, b); + return gf27500_iszero(&d); + } + + /* + * d <- 1/a + * If *a is not zero, then the inverse is well-defined and written into *d, + * and the function returns 0xFFFFFFFF. If *a is zero, then this function + * sets *d to zero and returns 0x00000000. + */ + uint32_t gf27500_invert(gf27500 *d, const gf27500 *a); + + /* + * d <- a/b + * If *b is not zero, then this functions writes a/b into *d, and returns + * 0xFFFFFFFF. If *b is zero, then this function sets *d to zero (regardless + * of the value of *a) and returns 0x00000000. + */ + uint32_t gf27500_div(gf27500 *d, const gf27500 *a, const gf27500 *b); + + /* + * d <- a/3 + * Divides by 3 in the field by implementing the algorithm proposed in + * "Efficient Multiplication in Finite Field Extensions of Degree 5" + * by El Mrabet, Guillevic and Ionica at ASIACRYPT 2011. + */ + void gf27500_div3(gf27500 *out, const gf27500 *a); + + /* + * Get the Legendre symbol of *a (0 for zero, +1 for a non-zero square, + * -1 for a non-square). + */ + int32_t gf27500_legendre(const gf27500 *a); + + /* + * If *a is a square, then this function sets *d to a square root of a, + * and returns 0xFFFFFFFF. If *a is not a square, then this function + * sets *d to a square root of -a, and returns 0x00000000. + * In all cases, the value written into *d is such that the least significant + * bit of its integer representation (in [0..q-1]) is zero. + */ + uint32_t gf27500_sqrt(gf27500 *d, const gf27500 *a); + + /* + * Encode field element *a into buffer dst (exactly 64 bytes are written). + */ + void gf27500_encode(void *dst, const gf27500 *a); + + /* + * Decode source buffer src (exactly 64 bytes) into a field element *d. + * If the source value is not a valid canonical encoding, then *d is zero + * and the function returns 0x00000000; otherwise, the function returns + * 0xFFFFFFFF. + */ + uint32_t gf27500_decode(gf27500 *d, const void *src); + + /* + * Interpret the source buffer (of size len bytes) as an unsigned integer + * (little-endian convention) and reduce it modulo q, yielding a field + * element which is written into *d. Since reduction is applied, this + * function cannot fail. + */ + void gf27500_decode_reduce(gf27500 *d, const void *src, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.c new file mode 100644 index 0000000000..0424108019 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.c @@ -0,0 +1,93 @@ +#include +#include + +void +double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2) +{ + ec_dbl(&out->P1, &in->P1, &E1E2->E1); + ec_dbl(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + memmove(out, in, sizeof(theta_couple_point_t)); + } else { + double_couple_point(out, in, E1E2); + for (unsigned i = 0; i < n - 1; i++) { + double_couple_point(out, out, E1E2); + } + } +} + +void +add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2) +{ + ADD(&out->P1, &T1->P1, &T2->P1, &E1E2->E1); + ADD(&out->P2, &T1->P2, &T2->P2, &E1E2->E2); +} + +void +double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + DBL(&out->P1, &in->P1, &E1E2->E1); + DBL(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + *out = *in; + } else if (n == 1) { + double_couple_jac_point(out, in, E1E2); + } else { + fp2_t a1, a2, t1, t2; + + jac_to_ws(&out->P1, &t1, &a1, &in->P1, &E1E2->E1); + jac_to_ws(&out->P2, &t2, &a2, &in->P2, &E1E2->E2); + + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + for (unsigned i = 0; i < n - 1; i++) { + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + } + + jac_from_ws(&out->P1, &out->P1, &a1, &E1E2->E1); + jac_from_ws(&out->P2, &out->P2, &a2, &E1E2->E2); + } +} + +void +couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP) +{ + jac_to_xz(&P->P1, &xyP->P1); + jac_to_xz(&P->P2, &xyP->P2); +} + +void +copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2) +{ + // Copy the basis on E1 to (P, _) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P1, &B1->P); + copy_point(&ker->T2.P1, &B1->Q); + copy_point(&ker->T1m2.P1, &B1->PmQ); + + // Copy the basis on E2 to (_, P) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P2, &B2->P); + copy_point(&ker->T2.P2, &B2->Q); + copy_point(&ker->T1m2.P2, &B2->PmQ); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.h new file mode 100644 index 0000000000..2b16e23834 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.h @@ -0,0 +1,435 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The HD-isogenies algorithm required by the signature + * + */ + +#ifndef HD_H +#define HD_H + +#include +#include +#include + +/** @defgroup hd_module Abelian surfaces and their isogenies + * @{ + */ + +#define HD_extra_torsion 2 + +/** @defgroup hd_struct Data structures for dimension 2 + * @{ + */ + +/** @brief Type for couple point with XZ coordinates + * @typedef theta_couple_point_t + * + * @struct theta_couple_point + * + * Structure for the couple point on an elliptic product + * using XZ coordinates + */ +typedef struct theta_couple_point +{ + ec_point_t P1; + ec_point_t P2; +} theta_couple_point_t; + +/** @brief Type for three couple points T1, T2, T1-T2 with XZ coordinates + * @typedef theta_kernel_couple_points_t + * + * @struct theta_kernel_couple_points + * + * Structure for a triple of theta couple points T1, T2 and T1 - T2 + */ +typedef struct theta_kernel_couple_points +{ + theta_couple_point_t T1; + theta_couple_point_t T2; + theta_couple_point_t T1m2; +} theta_kernel_couple_points_t; + +/** @brief Type for couple point with XYZ coordinates + * @typedef theta_couple_jac_point_t + * + * @struct theta_couple_jac_point + * + * Structure for the couple point on an elliptic product + * using XYZ coordinates + */ +typedef struct theta_couple_jac_point +{ + jac_point_t P1; + jac_point_t P2; +} theta_couple_jac_point_t; + +/** @brief Type for couple curve * + * @typedef theta_couple_curve_t + * + * @struct theta_couple_curve + * + * the theta_couple_curve structure + */ +typedef struct theta_couple_curve +{ + ec_curve_t E1; + ec_curve_t E2; +} theta_couple_curve_t; + +/** @brief Type for a product E1 x E2 with corresponding bases + * @typedef theta_couple_curve_with_basis_t + * + * @struct theta_couple_curve_with_basis + * + * tType for a product E1 x E2 with corresponding bases Ei[2^n] + */ +typedef struct theta_couple_curve_with_basis +{ + ec_curve_t E1; + ec_curve_t E2; + ec_basis_t B1; + ec_basis_t B2; +} theta_couple_curve_with_basis_t; + +/** @brief Type for theta point * + * @typedef theta_point_t + * + * @struct theta_point + * + * the theta_point structure used + */ +typedef struct theta_point +{ + fp2_t x; + fp2_t y; + fp2_t z; + fp2_t t; +} theta_point_t; + +/** @brief Type for theta point with repeating components + * @typedef theta_point_compact_t + * + * @struct theta_point_compact + * + * the theta_point structure used for points with repeated components + */ +typedef struct theta_point_compact +{ + fp2_t x; + fp2_t y; +} theta_point_compact_t; + +/** @brief Type for theta structure * + * @typedef theta_structure_t + * + * @struct theta_structure + * + * the theta_structure structure used + */ +typedef struct theta_structure +{ + theta_point_t null_point; + bool precomputation; + + // Eight precomputed values used for doubling and + // (2,2)-isogenies. + fp2_t XYZ0; + fp2_t YZT0; + fp2_t XZT0; + fp2_t XYT0; + + fp2_t xyz0; + fp2_t yzt0; + fp2_t xzt0; + fp2_t xyt0; +} theta_structure_t; + +/** @brief A 2x2 matrix used for action by translation + * @typedef translation_matrix_t + * + * @struct translation_matrix + * + * Structure to hold 4 fp2_t elements representing a 2x2 matrix used when computing + * a compatible theta structure during gluing. + */ +typedef struct translation_matrix +{ + fp2_t g00; + fp2_t g01; + fp2_t g10; + fp2_t g11; +} translation_matrix_t; + +/** @brief A 4x4 matrix used for basis changes + * @typedef basis_change_matrix_t + * + * @struct basis_change_matrix + * + * Structure to hold 16 elements representing a 4x4 matrix used for changing + * the basis of a theta point. + */ +typedef struct basis_change_matrix +{ + fp2_t m[4][4]; +} basis_change_matrix_t; + +/** @brief Type for gluing (2,2) theta isogeny * + * @typedef theta_gluing_t + * + * @struct theta_gluing + * + * the theta_gluing structure + */ +typedef struct theta_gluing +{ + + theta_couple_curve_t domain; + theta_couple_jac_point_t xyK1_8; + theta_point_compact_t imageK1_8; + basis_change_matrix_t M; + theta_point_t precomputation; + theta_point_t codomain; + +} theta_gluing_t; + +/** @brief Type for standard (2,2) theta isogeny * + * @typedef theta_isogeny_t + * + * @struct theta_isogeny + * + * the theta_isogeny structure + */ +typedef struct theta_isogeny +{ + theta_point_t T1_8; + theta_point_t T2_8; + bool hadamard_bool_1; + bool hadamard_bool_2; + theta_structure_t domain; + theta_point_t precomputation; + theta_structure_t codomain; +} theta_isogeny_t; + +/** @brief Type for splitting isomorphism * + * @typedef theta_splitting_t + * + * @struct theta_splitting + * + * the theta_splitting structure + */ +typedef struct theta_splitting +{ + basis_change_matrix_t M; + theta_structure_t B; + +} theta_splitting_t; + +// end of hd_struct +/** + * @} + */ + +/** @defgroup hd_functions Functions for dimension 2 + * @{ + */ + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param n : the number of iteration + * @param E1E2 an elliptic product + * @param in the theta couple point in the elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the addition of two points in (X : Y : Z) coordinates on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param T1 the theta couple jac point in the elliptic product + * @param T2 the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1, P2), (Q1, Q2) + * out = (P1 + Q1, P2 + Q2) + * + **/ +void add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple jac point in on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param n : the number of iteration + * @param in the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief A forgetful function which returns (X : Z) points given a pair of (X : Y : Z) points + * + * @param P Output: the theta_couple_point + * @param xyP : the theta_couple_jac_point + **/ +void couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it does extra isotropy + * checks on the kernel. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it selects a random Montgomery + * model of the codomain. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success, 0 on failure + * + */ +int theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Given a bases B1 on E1 and B2 on E2 copies this to create a kernel + * on E1 x E2 as couple points T1, T2 and T1 - T2 + * + * @param ker Output: a kernel for dim_two_isogenies (T1, T2, T1-T2) + * @param B1 Input basis on E1 + * @param B2 Input basis on E2 + **/ +void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2); + +/** + * @brief Given a couple of points (P1, P2) on a couple of curves (E1, E2) + * this function tests if both points are of order exactly 2^t + * + * @param T: couple point (P1, P2) + * @param E: a couple of curves (E1, E2) + * @param t: an integer + * @returns 0xFFFFFFFF on success, 0 on failure + */ +static int +test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) +{ + int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); + int check_P2 = test_point_order_twof(&T->P2, &E->E2, t); + + return check_P1 & check_P2; +} + +// end of hd_functions +/** + * @} + */ +// end of hd_module +/** + * @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c new file mode 100644 index 0000000000..a697ac7eb1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c @@ -0,0 +1,143 @@ +#include + +#define FP2_ZERO 0 +#define FP2_ONE 1 +#define FP2_I 2 +#define FP2_MINUS_ONE 3 +#define FP2_MINUS_I 4 + +const int EVEN_INDEX[10][2] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 0}, {1, 2}, {2, 0}, {2, 1}, {3, 0}, {3, 3}}; +const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}; +const fp2_t FP2_CONSTANTS[5] = {{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf} +#elif RADIX == 32 +{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff} +#else +{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf} +#elif RADIX == 32 +{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff} +#else +{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff} +#endif +#endif +}}; +const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10] = {{{{FP2_ONE, FP2_I, FP2_ONE, FP2_I}, {FP2_ONE, FP2_MINUS_I, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_MINUS_ONE, FP2_MINUS_I}, {FP2_MINUS_ONE, FP2_I, FP2_MINUS_ONE, FP2_I}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}}; +const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6] = {{{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}, {{{FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.h new file mode 100644 index 0000000000..b3147a42a9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.h @@ -0,0 +1,18 @@ +#ifndef HD_SPLITTING_H +#define HD_SPLITTING_H + +#include +#include + +typedef struct precomp_basis_change_matrix { + uint8_t m[4][4]; +} precomp_basis_change_matrix_t; + +extern const int EVEN_INDEX[10][2]; +extern const int CHI_EVAL[4][4]; +extern const fp2_t FP2_CONSTANTS[5]; +extern const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10]; +extern const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6]; + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.c new file mode 100644 index 0000000000..0743974345 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.c @@ -0,0 +1,338 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Scalar multiplication [x]P + [y]Q where x and y are stored +// inside an ibz_vec_2_t [x, y] and P, Q \in E[2^f] +void +ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + digit_t scalars[2][NWORDS_ORDER]; + ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); + ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); +} + +// Given an ideal, computes the scalars s0, s1 which determine the kernel generator +// of the equivalent isogeny +void +id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lideal) +{ + ibz_t tmp; + ibz_init(&tmp); + + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + // construct the matrix of the dual of alpha on the 2^f-torsion + { + quat_alg_elem_t alpha; + quat_alg_elem_init(&alpha); + + int lideal_generator_ok UNUSED = quat_lideal_generator(&alpha, lideal, &QUATALG_PINFTY); + assert(lideal_generator_ok); + quat_alg_conj(&alpha, &alpha); + + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + quat_change_to_O0_basis(&coeffs, &alpha); + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + } + } + + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&alpha); + } + + // find the kernel of alpha modulo the norm of the ideal + { + const ibz_t *const norm = &lideal->norm; + + ibz_mod(&(*vec)[0], &mat[0][0], norm); + ibz_mod(&(*vec)[1], &mat[1][0], norm); + ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + if (ibz_is_even(&tmp)) { + ibz_mod(&(*vec)[0], &mat[0][1], norm); + ibz_mod(&(*vec)[1], &mat[1][1], norm); + } +#ifndef NDEBUG + ibz_gcd(&tmp, &(*vec)[0], norm); + ibz_gcd(&tmp, &(*vec)[1], &tmp); + assert(!ibz_cmp(&tmp, &ibz_const_one)); +#endif + } + + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&tmp); +} + +// helper function to apply a matrix to a basis of E[2^f] +// works in place +int +matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f) +{ + digit_t scalars[2][NWORDS_ORDER] = { 0 }; + int ret; + + ibz_t tmp, pow_two; + ibz_init(&tmp); + ibz_init(&pow_two); + ibz_pow(&pow_two, &ibz_const_two, f); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // reduction mod 2f + ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); + ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); + ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); + ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][0]); + ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); + + // second basis element S = [c]P + [d]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][1]); + ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); + + // Their difference R - S = [a - c]P + [b - d]Q + ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[0], &tmp); + ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[1], &tmp); + ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); + + ibz_finalize(&tmp); + ibz_finalize(&pow_two); + + return ret; +} + +// helper function to apply some endomorphism of E0 on the precomputed basis of E[2^f] +// works in place +void +endomorphism_application_even_basis(ec_basis_t *bas, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_t content; + ibz_init(&content); + + // decomposing theta on the basis + quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); + assert(ibz_is_odd(&content)); + + ibz_set(&mat[0][0], 0); + ibz_set(&mat[0][1], 0); + ibz_set(&mat[1][0], 0); + ibz_set(&mat[1][1], 0); + + // computing the matrix + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&mat[i][j], &mat[i][j], &content); + } + } + + // and now we apply it + matrix_application_even_basis(bas, E, &mat, f); + + ibz_vec_4_finalize(&coeffs); + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&content); + + ibz_finalize(&tmp); +} + +// compute the ideal whose kernel is generated by vec2[0]*BO[0] + vec2[1]*B0[1] where B0 is the +// canonical basis of E0 +void +id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f) +{ + + // algorithm: apply endomorphisms 1 and j+(1+k)/2 to the kernel point, + // the result should form a basis of the respective torsion subgroup. + // then apply i to the kernel point and decompose over said basis. + // hence we have an equation a*P + b*[j+(1+k)/2]P == [i]P, which will + // easily reveal an endomorphism that kills P. + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + if (f == TORSION_EVEN_POWER) { + ibz_copy(&two_pow, &TORSION_PLUS_2POWER); + } else { + ibz_pow(&two_pow, &ibz_const_two, f); + } + + { + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_copy(&mat[0][0], &(*vec2)[0]); + ibz_copy(&mat[1][0], &(*vec2)[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); + ibz_copy(&mat[0][1], &vec[0]); + ibz_copy(&mat[1][1], &vec[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); + ibz_add(&mat[0][1], &mat[0][1], &vec[0]); + ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + + ibz_mod(&mat[0][1], &mat[0][1], &two_pow); + ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + + ibz_mat_2x2_t inv; + ibz_mat_2x2_init(&inv); + { + int inv_ok UNUSED = ibz_mat_2x2_inv_mod(&inv, &mat, &two_pow); + assert(inv_ok); + } + ibz_mat_2x2_finalize(&mat); + + ibz_mat_2x2_eval(&vec, &ACTION_I, vec2); + ibz_mat_2x2_eval(&vec, &inv, &vec); + + ibz_mat_2x2_finalize(&inv); + } + + // final result: a - i + b*(j+(1+k)/2) + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + ibz_set(&gen.denom, 2); + ibz_add(&gen.coord[0], &vec[0], &vec[0]); + ibz_set(&gen.coord[1], -2); + ibz_add(&gen.coord[2], &vec[1], &vec[1]); + ibz_copy(&gen.coord[3], &vec[1]); + ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_vec_2_finalize(&vec); + + quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + assert(0 == ibz_cmp(&lideal->norm, &two_pow)); + + quat_alg_elem_finalize(&gen); + ibz_finalize(&two_pow); +} + +// finds mat such that: +// (mat*v).B2 = v.B1 +// where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q +// mat encodes the coordinates of the points of B1 in the basis B2 +// specifically requires B1 or B2 to be "full" w.r.t to the 2^n torsion, so that we use tate +// full = 0 assumes B2 is "full" so the easier case. +// if we want to switch the role of B2 and B1, we invert the matrix, e.g. set full = 1 +static void +_change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f, + bool invert) +{ + digit_t x1[NWORDS_ORDER] = { 0 }, x2[NWORDS_ORDER] = { 0 }, x3[NWORDS_ORDER] = { 0 }, x4[NWORDS_ORDER] = { 0 }; + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - f; +#endif + + // Ensure the input basis has points of order 2^f + if (invert) { + assert(test_basis_order_twof(B1, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B1, B2, E, f); + mp_invert_matrix(x1, x2, x3, x4, f, NWORDS_ORDER); + } else { + assert(test_basis_order_twof(B2, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B2, B1, E, f); + } + +#ifndef NDEBUG + { + if (invert) { + ec_point_t test, test2; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->P, E); + assert(ec_is_equal(&test, &test2)); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->Q, E); + assert(ec_is_equal(&test, &test2)); + } else { + ec_point_t test; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->P))); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->Q))); + } + } +#endif + + // Copy the results into the matrix + ibz_copy_digit_array(&((*mat)[0][0]), x1); + ibz_copy_digit_array(&((*mat)[1][0]), x2); + ibz_copy_digit_array(&((*mat)[0][1]), x3); + ibz_copy_digit_array(&((*mat)[1][1]), x4); +} + +void +change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, false); +} + +void +change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.h new file mode 100644 index 0000000000..1b4eaae3c5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.h @@ -0,0 +1,280 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The id2iso algorithms + */ + +#ifndef ID2ISO_H +#define ID2ISO_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @defgroup id2iso_id2iso Ideal to isogeny conversion + * @{ + */ +static const quat_represent_integer_params_t QUAT_represent_integer_params = { + .algebra = &QUATALG_PINFTY, /// The level-specific quaternion algebra + .order = &(EXTREMAL_ORDERS[0]), // The special extremal order O0 + .primality_test_iterations = QUAT_primality_num_iter // precompted bound on the iteration number in primality tests +}; + +/*************************** Functions *****************************/ + +/** @defgroup id2iso_others Other functions needed for id2iso + * @{ + */ + +/** + * @brief Scalar multiplication [x]P + [y]Q where x and y are stored inside an + * ibz_vec_2_t [x, y] and P, Q in E[2^f] + * + * @param res Output: the point R = [x]P + [y]Q + * @param scalar_vec: a vector of ibz type elements (x, y) + * @param f: an integer such that P, Q are in E[2^f] + * @param PQ: an x-only basis x(P), x(Q) and x(P-Q) + * @param curve: the curve E the points P, Q, R are defined on + * + */ +void ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Translating an ideal of norm 2^f dividing p²-1 into the corresponding + * kernel coefficients + * + * @param ker_dlog Output : two coefficients indicating the decomposition of the + * kernel over the canonical basis of E0[2^f] + * @param lideal_input : O0-ideal corresponding to the ideal to be translated of + * norm 2^f + * + */ +void id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *ker_dlog, const quat_left_ideal_t *lideal_input); + +/** + * @brief Applies some 2x2 matrix on a basis of E[2^TORSION_EVEN_POWER] + * + * @param P the basis + * @param E the curve + * @param mat the matrix + * @param f TORSION_EVEN_POWER + * @returns 1 if success, 0 if error + * + * helper function, works in place + * + */ +int matrix_application_even_basis(ec_basis_t *P, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f); + +/** + * @brief Applies some endomorphism of an alternate curve to E[f] + * + * @param P the basis + * @param index_alternate_curve index of the alternate order in the list of precomputed extremal + * orders + * @param E the curve (E is not required to be the alternate curve in question since in the end we + * only apply a matrix) + * @param theta the endomorphism + * @param f TORSION_EVEN_POWER + * + * helper function, works in place + * + */ +void endomorphism_application_even_basis(ec_basis_t *P, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f); + +/** + * @brief Translating a kernel on the curve E0, represented as a vector with + * respect to the precomputed 2^f-torsion basis, into the corresponding O0-ideal + * + * @param lideal Output : the output O0-ideal + * @param f : exponent definining the norm of the ideal to compute + * @param vec2 : length-2 vector giving the 2-power part of the kernel with + * respect to the precomputed 2^f basis + * + */ +void id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B2 = v.B1 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^f] + * @param B2 the target basis for E[2^e] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2 + */ +void change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B1 = [2^e-f]*v.B2 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^e] + * @param B2 the target basis for E[2^f] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2, by + * applying change_of_basis_matrix_tate and inverting the outcome + */ +void change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f); + +/** @} + */ + +/** @defgroup id2iso_arbitrary Arbitrary isogeny evaluation + * @{ + */ +/** + * @brief Function to find elements u, v, d1, d2, beta1, beta2 for the ideal to isogeny + * + * @param u Output: integer + * @param v Output: integer + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param d1 Output: integer + * @param d2 Output: integer + * @param index_alternate_order_1 Output: small integer (index of an alternate order) + * @param index_alternate_order_2 Output: small integer (index of an alternate order) + * @param target : integer, target norm + * @param lideal : O0-ideal defining the search space + * @param Bpoo : quaternion algebra + * @param num_alternate_order number of alternate order we consider + * @returns 1 if the computation succeeds, 0 otherwise + * + * Let us write ti = index_alternate_order_i, + * we look for u,v,beta1,beta2,d1,d2,t1,t2 + * such that u d1 + v d2 = target + * and where di = norm(betai)/norm(Ii), where the ideal Ii is equal to overbar{Ji} * lideal and + * betai is in Ii where Ji is a connecting ideal between the maximal order O0 and O_ti t1,t2 must be + * contained between 0 and num_alternate_order This corresponds to the function SuitableIdeals in + * the spec + */ +int find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order); + +/** + * @brief Computes an arbitrary isogeny of fixed degree starting from E0 + * and evaluates it a list of points of the form (P1,0) or (0,P2). + * + * @param lideal Output : an ideal of norm u + * @param u : integer + * @param small : bit indicating if we the value of u is "small" meaning that we + expect it to be + * around sqrt{p}, in that case we use a length slightly above + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny + (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @param index_alternate_order : index of the special extremal order to be used (in the list of + these orders) + * @returns the length of the chain if the computation succeeded, zero upon + failure + * + * F is an isogeny encoding an isogeny [adjust]*phi : E0 -> Eu of degree u + * note that the codomain of F can be either Eu x Eu' or Eu' x Eu for some curve + Eu' + */ +int fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param u Output: integer + * @param v Output: integer + * @param d1 Output: integer + * @param d2 Output: integer + * @param codomain the codomain of the isogeny corresponding to lideal + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : O0 - ideal in input + * @param Bpoo : the quaternion algebra + * @returns 1 if the computation succeeded, 0 otherwise + * + * Compute the codomain and image on the basis of E0 of the isogeny + * E0 -> codomain corresponding to lideal + * + * There is some integer e >= 0 such that + * 2^e * u, 2^e * v,beta1, beta2, d1, d2 are the output of find_uv + * on input target = 2^TORSION_PLUS_EVEN_POWER and lideal + * + * codomain and basis are computed with the help of a dimension 2 isogeny + * of degree 2^TORSION_PLUS_EVEN_POWER - e using a Kani diagram + * + */ +int dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : ideal in input + * @param codomain + * @returns 1 if the computation succeeds, 0 otherwise + * + * This is a wrapper around the ideal to isogeny clapotis function + */ +int dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.h new file mode 100644 index 0000000000..a0c2c02477 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.h @@ -0,0 +1,303 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for big integers in the reference implementation + */ + +#ifndef INTBIG_H +#define INTBIG_H + +#include +#if defined(MINI_GMP) +#include +#include +#else +#include +#endif +#include +#include + +/** @ingroup quat_quat + * @defgroup ibz_all Signed big integers (gmp-based) + * @{ + */ + +/** @defgroup ibz_t Precise number types + * @{ + */ + +/** @brief Type for signed long integers + * + * @typedef ibz_t + * + * For integers of arbitrary size, used by intbig module, using gmp + */ +typedef mpz_t ibz_t; + +/** @} + */ + +/** @defgroup ibz_c Constants + * @{ + */ + +/** + * Constant zero + */ +extern const ibz_t ibz_const_zero; + +/** + * Constant one + */ +extern const ibz_t ibz_const_one; + +/** + * Constant two + */ +extern const ibz_t ibz_const_two; + +/** + * Constant three + */ +extern const ibz_t ibz_const_three; + +/** @} + */ + +/** @defgroup ibz_finit Constructors and Destructors + * @{ + */ + +void ibz_init(ibz_t *x); +void ibz_finalize(ibz_t *x); + +/** @} + */ + +/** @defgroup ibz_za Basic integer arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b); + +/** @brief diff=a-b + */ +void ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b); + +/** @brief prod=a*b + */ +void ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b); + +/** @brief neg=-a + */ +void ibz_neg(ibz_t *neg, const ibz_t *a); + +/** @brief abs=|a| + */ +void ibz_abs(ibz_t *abs, const ibz_t *a); + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards zero. + */ +void ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b); + +/** @brief Euclidean division of a by 2^exp + * + * Computes a right shift of abs(a) by exp bits, then sets sign(quotient) to sign(a). + * + * Division and rounding is as in ibz_div. + */ +void ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp); + +/** @brief Two adic valuation computation + * + * Computes the position of the first 1 in the binary representation of the integer given in input + * + * When this number is a power of two this gives the two adic valuation of the integer + */ +int ibz_two_adic(ibz_t *pow); + +/** @brief r = a mod b + * + * Assumes valid inputs + * The sign of the divisor is ignored, the result is always non-negative + */ +void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); + +unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); + +/** @brief Test if a = 0 mod b + */ +int ibz_divides(const ibz_t *a, const ibz_t *b); + +/** @brief pow=x^e + * + * Assumes valid inputs, The case 0^0 yields 1. + */ +void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e); + +/** @brief pow=(x^e) mod m + * + * Assumes valid inputs + */ +void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibz_cmp(const ibz_t *a, const ibz_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibz_is_zero(const ibz_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibz_is_one(const ibz_t *x); + +/** @brief Compare x to y + * + * @returns 0 if x=y, positive if x>y, negative if x= 0 and target must hold sufficient elements to hold ibz + * + * @param target Target digit_t array + * @param ibz ibz source ibz_t element + */ +void ibz_to_digits(digit_t *target, const ibz_t *ibz); +#define ibz_to_digit_array(T, I) \ + do { \ + memset((T), 0, sizeof(T)); \ + ibz_to_digits((T), (I)); \ + } while (0) + +/** @brief get int32_t equal to the lowest bits of i + * + * Should not be used to get the value of i if its bitsize is close to 32 bit + * It can however be used on any i to get an int32_t of the same parity as i (and same value modulo + * 4) + * + * @param i Input integer + */ +int32_t ibz_get(const ibz_t *i); + +/** @brief generate random value in [a, b] + * assumed that a >= 0 and b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b); + +/** @brief generate random value in [-m, m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m); + +/** @brief Bitsize of a. + * + * @returns Bitsize of a. + * + */ +int ibz_bitsize(const ibz_t *a); + +/** @brief Size of a in given base. + * + * @returns Size of a in given base. + * + */ +int ibz_size_in_base(const ibz_t *a, int base); + +/** @} + */ + +/** @defgroup ibz_n Number theory functions + * @{ + */ + +/** + * @brief Greatest common divisor + * + * @param gcd Output: Set to the gcd of a and b + * @param a + * @param b + */ +void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b); + +/** + * @brief Modular inverse + * + * @param inv Output: Set to the integer in [0,mod[ such that a*inv = 1 mod (mod) if it exists + * @param a + * @param mod + * @returns 1 if inverse exists and was computed, 0 otherwise + */ +int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod); + +/** + * @brief Floor of Integer square root + * + * @param sqrt Output: Set to the floor of an integer square root + * @param a number of which a floor of an integer square root is searched + */ +void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/isog.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/isog.h new file mode 100644 index 0000000000..b251ca3cdc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/isog.h @@ -0,0 +1,28 @@ +#ifndef _ISOG_H_ +#define _ISOG_H_ +#include +#include + +/* KPS structure for isogenies of degree 2 or 4 */ +typedef struct +{ + ec_point_t K; +} ec_kps2_t; +typedef struct +{ + ec_point_t K[3]; +} ec_kps4_t; + +void xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P); // degree-2 isogeny construction +void xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24); + +void xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P); // degree-4 isogeny construction +void xisog_4_singular(ec_kps4_t *kps, ec_point_t *B24, const ec_point_t P, ec_point_t A24); + +void xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps); +void xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps); + +void xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps); +void xeval_4_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_point_t P, const ec_kps4_t *kps); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/isog_chains.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/isog_chains.c new file mode 100644 index 0000000000..abc9808057 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/isog_chains.c @@ -0,0 +1,241 @@ +#include "isog.h" +#include + +// since we use degree 4 isogeny steps, we need to handle the odd case with care +static uint32_t +ec_eval_even_strategy(ec_curve_t *curve, + ec_point_t *points, + unsigned len_points, + const ec_point_t *kernel, + const int isog_len) +{ + ec_curve_normalize_A24(curve); + ec_point_t A24; + copy_point(&A24, &curve->A24); + + int space = 1; + for (int i = 1; i < isog_len; i *= 2) + ++space; + + // Stack of remaining kernel points and their associated orders + ec_point_t splits[space]; + uint16_t todo[space]; + splits[0] = *kernel; + todo[0] = isog_len; + + int current = 0; // Pointer to current top of stack + + // Chain of 4-isogenies + for (int j = 0; j < isog_len / 2; ++j) { + assert(current >= 0); + assert(todo[current] >= 1); + // Get the next point of order 4 + while (todo[current] != 2) { + assert(todo[current] >= 3); + // A new split will be added + ++current; + assert(current < space); + // We set the seed of the new split to be computed and saved + copy_point(&splits[current], &splits[current - 1]); + // if we copied from the very first element, then we perform one additional doubling + unsigned num_dbls = todo[current - 1] / 4 * 2 + todo[current - 1] % 2; + todo[current] = todo[current - 1] - num_dbls; + while (num_dbls--) + xDBL_A24(&splits[current], &splits[current], &A24, false); + } + + if (j == 0) { + assert(fp2_is_one(&A24.z)); + if (!ec_is_four_torsion(&splits[current], curve)) + return -1; + + ec_point_t T; + xDBL_A24(&T, &splits[current], &A24, false); + if (fp2_is_zero(&T.x)) + return -1; // special isogenies not allowed + } else { + assert(todo[current] == 2); +#ifndef NDEBUG + if (fp2_is_zero(&splits[current].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + + ec_point_t test; + xDBL_A24(&test, &splits[current], &A24, false); + if (fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly zero before doubling"); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + } + + // Evaluate 4-isogeny + ec_kps4_t kps4; + xisog_4(&kps4, &A24, splits[current]); + xeval_4(splits, splits, current, &kps4); + for (int i = 0; i < current; ++i) + todo[i] -= 2; + xeval_4(points, points, len_points, &kps4); + + --current; + } + assert(isog_len % 2 ? !current : current == -1); + + // Final 2-isogeny + if (isog_len % 2) { +#ifndef NDEBUG + if (fp2_is_zero(&splits[0].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + ec_point_t test; + copy_point(&test, &splits[0]); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + + // We need to check the order of this point in case there were no 4-isogenies + if (isog_len == 1 && !ec_is_two_torsion(&splits[0], curve)) + return -1; + if (fp2_is_zero(&splits[0].x)) { + // special isogenies not allowed + // this case can only happen if isog_len == 1; otherwise the + // previous 4-isogenies we computed ensure that $T=(0:1)$ is put + // as the kernel of the dual isogeny + return -1; + } + + ec_kps2_t kps2; + xisog_2(&kps2, &A24, splits[0]); + xeval_2(points, points, len_points, &kps2); + } + + // Output curve in the form (A:C) + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + + return 0; +} + +uint32_t +ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points) +{ + copy_curve(image, &phi->curve); + return ec_eval_even_strategy(image, points, len_points, &phi->kernel, phi->length); +} + +// naive implementation +uint32_t +ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special) // do we allow special isogenies? +{ + + ec_point_t A24; + AC_to_A24(&A24, curve); + + ec_kps2_t kps; + ec_point_t small_K, big_K; + copy_point(&big_K, kernel); + + for (int i = 0; i < len; i++) { + copy_point(&small_K, &big_K); + // small_K = big_K; + for (int j = 0; j < len - i - 1; j++) { + xDBL_A24(&small_K, &small_K, &A24, false); + } + // Check the order of the point before the first isogeny step + if (i == 0 && !ec_is_two_torsion(&small_K, curve)) + return (uint32_t)-1; + // Perform isogeny step + if (fp2_is_zero(&small_K.x)) { + if (special) { + ec_point_t B24; + xisog_2_singular(&kps, &B24, A24); + xeval_2_singular(&big_K, &big_K, 1, &kps); + xeval_2_singular(points, points, len_points, &kps); + copy_point(&A24, &B24); + } else { + return (uint32_t)-1; + } + } else { + xisog_2(&kps, &A24, small_K); + xeval_2(&big_K, &big_K, 1, &kps); + xeval_2(points, points, len_points, &kps); + } + } + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + return 0; +} + +uint32_t +ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to) +{ + fp2_t t0, t1, t2, t3, t4; + + fp2_mul(&t0, &from->A, &from->C); + fp2_mul(&t1, &to->A, &to->C); + + fp2_mul(&t2, &t1, &to->C); // toA*toC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*toA*toC^2 + fp2_sqr(&t3, &to->A); + fp2_mul(&t3, &t3, &to->A); // toA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->Nx, &t3, &t2); // 2*toA^3-9*toA*toC^2 + fp2_mul(&t2, &t0, &from->A); // fromA^2*fromC + fp2_sqr(&t3, &from->C); + fp2_mul(&t3, &t3, &from->C); // fromC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*fromC^3 + fp2_sub(&t3, &t3, &t2); // 3*fromC^3-fromA^2*fromC + fp2_mul(&isom->Nx, &isom->Nx, &t3); // lambda_x = (2*toA^3-9*toA*toC^2)*(3*fromC^3-fromA^2*fromC) + + fp2_mul(&t2, &t0, &from->C); // fromA*fromC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*fromA*fromC^2 + fp2_sqr(&t3, &from->A); + fp2_mul(&t3, &t3, &from->A); // fromA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->D, &t3, &t2); // 2*fromA^3-9*fromA*fromC^2 + fp2_mul(&t2, &t1, &to->A); // toA^2*toC + fp2_sqr(&t3, &to->C); + fp2_mul(&t3, &t3, &to->C); // toC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*toC^3 + fp2_sub(&t3, &t3, &t2); // 3*toC^3-toA^2*toC + fp2_mul(&isom->D, &isom->D, &t3); // lambda_z = (2*fromA^3-9*fromA*fromC^2)*(3*toC^3-toA^2*toC) + + // Mont -> SW -> SW -> Mont + fp2_mul(&t0, &to->C, &from->A); + fp2_mul(&t0, &t0, &isom->Nx); // lambda_x*toC*fromA + fp2_mul(&t1, &from->C, &to->A); + fp2_mul(&t1, &t1, &isom->D); // lambda_z*fromC*toA + fp2_sub(&isom->Nz, &t0, &t1); // lambda_x*toC*fromA - lambda_z*fromC*toA + fp2_mul(&t0, &from->C, &to->C); + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // 3*fromC*toC + fp2_mul(&isom->D, &isom->D, &t0); // 3*lambda_z*fromC*toC + fp2_mul(&isom->Nx, &isom->Nx, &t0); // 3*lambda_x*fromC*toC + + return (fp2_is_zero(&isom->Nx) | fp2_is_zero(&isom->D)); +} + +void +ec_iso_eval(ec_point_t *P, ec_isom_t *isom) +{ + fp2_t tmp; + fp2_mul(&P->x, &P->x, &isom->Nx); + fp2_mul(&tmp, &P->z, &isom->Nz); + fp2_add(&P->x, &P->x, &tmp); + fp2_mul(&P->z, &P->z, &isom->D); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/keygen.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/keygen.c new file mode 100644 index 0000000000..c1c206c99d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/keygen.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +void +secret_key_init(secret_key_t *sk) +{ + quat_left_ideal_init(&(sk->secret_ideal)); + ibz_mat_2x2_init(&(sk->mat_BAcan_to_BA0_two)); + ec_curve_init(&sk->curve); +} + +void +secret_key_finalize(secret_key_t *sk) +{ + quat_left_ideal_finalize(&(sk->secret_ideal)); + ibz_mat_2x2_finalize(&(sk->mat_BAcan_to_BA0_two)); +} + +int +protocols_keygen(public_key_t *pk, secret_key_t *sk) +{ + int found = 0; + ec_basis_t B_0_two; + + // iterating until a solution has been found + while (!found) { + + found = quat_sampling_random_ideal_O0_given_norm( + &sk->secret_ideal, &SEC_DEGREE, 1, &QUAT_represent_integer_params, NULL); + + // replacing the secret key ideal by a shorter equivalent one for efficiency + found = found && quat_lideal_prime_norm_reduced_equivalent( + &sk->secret_ideal, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + + // ideal to isogeny clapotis + + found = found && dim2id2iso_arbitrary_isogeny_evaluation(&B_0_two, &sk->curve, &sk->secret_ideal); + } + + // Assert the isogeny was found and images have the correct order + assert(test_basis_order_twof(&B_0_two, &sk->curve, TORSION_EVEN_POWER)); + + // Compute a deterministic basis with a hint to speed up verification + pk->hint_pk = ec_curve_to_basis_2f_to_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER); + + // Assert the deterministic basis we computed has the correct order + assert(test_basis_order_twof(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the 2x2 matrix basis change from the canonical basis to the evaluation of our secret + // isogeny + change_of_basis_matrix_tate( + &sk->mat_BAcan_to_BA0_two, &sk->canonical_basis, &B_0_two, &sk->curve, TORSION_EVEN_POWER); + + // Set the public key from the codomain curve + copy_curve(&pk->curve, &sk->curve); + pk->curve.is_A24_computed_and_normalized = false; // We don't send any precomputation + + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lvlx.cmake b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lvlx.cmake new file mode 100644 index 0000000000..3ab2d2dc90 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lvlx.cmake @@ -0,0 +1,12 @@ +set(SOURCE_FILES_GF_${SVARIANT_UPPER}_BROADWELL + ${SOURCE_FILES_GF_SPECIFIC} + fp.c + ${LVLX_DIR}/fp2.c +) + +add_library(${LIB_GF_${SVARIANT_UPPER}} STATIC ${SOURCE_FILES_GF_${SVARIANT_UPPER}_BROADWELL}) +target_include_directories(${LIB_GF_${SVARIANT_UPPER}} PRIVATE ${INC_COMMON} ${PROJECT_SOURCE_DIR}/src/precomp/ref/${SVARIANT_LOWER}/include ${INC_GF} ${INC_GF_${SVARIANT_UPPER}} include ${INC_PUBLIC}) +target_compile_options(${LIB_GF_${SVARIANT_UPPER}} PRIVATE ${C_OPT_FLAGS}) +target_compile_definitions(${LIB_GF_${SVARIANT_UPPER}} PUBLIC SQISIGN_VARIANT=${SVARIANT_LOWER}) + +add_subdirectory(test) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.c new file mode 100644 index 0000000000..4956beda50 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include + +void +sqisign_secure_free(void *mem, size_t size) +{ + if (mem) { + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); + free(mem); + } +} +void +sqisign_secure_clear(void *mem, size_t size) +{ + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.h new file mode 100644 index 0000000000..ab8f6c6481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef MEM_H +#define MEM_H +#include +#include + +/** + * Clears and frees allocated memory. + * + * @param[out] mem Memory to be cleared and freed. + * @param size Size of memory to be cleared and freed. + */ +void sqisign_secure_free(void *mem, size_t size); + +/** + * Clears memory. + * + * @param[out] mem Memory to be cleared. + * @param size Size of memory to be cleared. + */ +void sqisign_secure_clear(void *mem, size_t size); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c new file mode 100644 index 0000000000..396d505aec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c @@ -0,0 +1,73 @@ +#include +#include +#if defined(MINI_GMP) +#include "mini-gmp.h" +#else +// This configuration is used only for testing +#include +#endif +#include + +// Exported for testing +int +mini_mpz_legendre(const mpz_t a, const mpz_t p) +{ + int res = 0; + mpz_t e; + mpz_init_set(e, p); + mpz_sub_ui(e, e, 1); + mpz_fdiv_q_2exp(e, e, 1); + mpz_powm(e, a, e, p); + + if (mpz_cmp_ui(e, 1) <= 0) { + res = mpz_get_si(e); + } else { + res = -1; + } + mpz_clear(e); + return res; +} + +#if defined(MINI_GMP) +int +mpz_legendre(const mpz_t a, const mpz_t p) +{ + return mini_mpz_legendre(a, p); +} +#endif + +// Exported for testing +double +mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + double ret; + int tmp_exp; + mpz_t tmp; + + // Handle the case where op is 0 + if (mpz_cmp_ui(op, 0) == 0) { + *exp = 0; + return 0.0; + } + + *exp = mpz_sizeinbase(op, 2); + + mpz_init_set(tmp, op); + + if (*exp > DBL_MAX_EXP) { + mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); + } + + ret = frexp(mpz_get_d(tmp), &tmp_exp); + mpz_clear(tmp); + + return ret; +} + +#if defined(MINI_GMP) +double +mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + return mini_mpz_get_d_2exp(exp, op); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.h new file mode 100644 index 0000000000..0113cfdfe6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.h @@ -0,0 +1,19 @@ +#ifndef MINI_GMP_EXTRA_H +#define MINI_GMP_EXTRA_H + +#if defined MINI_GMP +#include "mini-gmp.h" + +typedef long mp_exp_t; + +int mpz_legendre(const mpz_t a, const mpz_t p); +double mpz_get_d_2exp(signed long int *exp, const mpz_t op); +#else +// This configuration is used only for testing +#include +#endif + +int mini_mpz_legendre(const mpz_t a, const mpz_t p); +double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c new file mode 100644 index 0000000000..3830ab2031 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c @@ -0,0 +1,4671 @@ +/* Note: The code from mini-gmp is modifed from the original by + commenting out the definition of GMP_LIMB_BITS */ + +/* + mini-gmp, a minimalistic implementation of a GNU GMP subset. + + Contributed to the GNU project by Niels Möller + Additional functionalities and improvements by Marco Bodrato. + +Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* NOTE: All functions in this file which are not declared in + mini-gmp.h are internal, and are not intended to be compatible + with GMP or with future versions of mini-gmp. */ + +/* Much of the material copied from GMP files, including: gmp-impl.h, + longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, + mpn/generic/lshift.c, mpn/generic/mul_1.c, + mpn/generic/mul_basecase.c, mpn/generic/rshift.c, + mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, + mpn/generic/submul_1.c. */ + +#include +#include +#include +#include +#include +#include + +#include "mini-gmp.h" + +#if !defined(MINI_GMP_DONT_USE_FLOAT_H) +#include +#endif + + +/* Macros */ +/* Removed from here as it is passed as a compiler command-line definition */ +/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ + +#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) +#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) + +#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) +#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) + +#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) +#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) + +#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) +#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) + +#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) + +#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 +#define GMP_DBL_MANT_BITS DBL_MANT_DIG +#else +#define GMP_DBL_MANT_BITS (53) +#endif + +/* Return non-zero if xp,xsize and yp,ysize overlap. + If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no + overlap. If both these are false, there's an overlap. */ +#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ + ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) + +#define gmp_assert_nocarry(x) do { \ + mp_limb_t __cy = (x); \ + assert (__cy == 0); \ + (void) (__cy); \ + } while (0) + +#define gmp_clz(count, x) do { \ + mp_limb_t __clz_x = (x); \ + unsigned __clz_c = 0; \ + int LOCAL_SHIFT_BITS = 8; \ + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ + for (; \ + (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ + __clz_c += 8) \ + { __clz_x <<= LOCAL_SHIFT_BITS; } \ + for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ + __clz_x <<= 1; \ + (count) = __clz_c; \ + } while (0) + +#define gmp_ctz(count, x) do { \ + mp_limb_t __ctz_x = (x); \ + unsigned __ctz_c = 0; \ + gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ + (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ + } while (0) + +#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) + (bl); \ + (sh) = (ah) + (bh) + (__x < (al)); \ + (sl) = __x; \ + } while (0) + +#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) - (bl); \ + (sh) = (ah) - (bh) - ((al) < (bl)); \ + (sl) = __x; \ + } while (0) + +#define gmp_umul_ppmm(w1, w0, u, v) \ + do { \ + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ + if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned int __ww = (unsigned int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned long int __ww = (unsigned long int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else { \ + mp_limb_t __x0, __x1, __x2, __x3; \ + unsigned __ul, __vl, __uh, __vh; \ + mp_limb_t __u = (u), __v = (v); \ + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ + \ + __ul = __u & GMP_LLIMB_MASK; \ + __uh = __u >> (GMP_LIMB_BITS / 2); \ + __vl = __v & GMP_LLIMB_MASK; \ + __vh = __v >> (GMP_LIMB_BITS / 2); \ + \ + __x0 = (mp_limb_t) __ul * __vl; \ + __x1 = (mp_limb_t) __ul * __vh; \ + __x2 = (mp_limb_t) __uh * __vl; \ + __x3 = (mp_limb_t) __uh * __vh; \ + \ + __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ + (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ + } \ + } while (0) + +/* If mp_limb_t is of size smaller than int, plain u*v implies + automatic promotion to *signed* int, and then multiply may overflow + and cause undefined behavior. Explicitly cast to unsigned int for + that case. */ +#define gmp_umullo_limb(u, v) \ + ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) + +#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ + do { \ + mp_limb_t _qh, _ql, _r, _mask; \ + gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ + gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ + _r = (nl) - gmp_umullo_limb (_qh, (d)); \ + _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ + _qh += _mask; \ + _r += _mask & (d); \ + if (_r >= (d)) \ + { \ + _r -= (d); \ + _qh++; \ + } \ + \ + (r) = _r; \ + (q) = _qh; \ + } while (0) + +#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ + do { \ + mp_limb_t _q0, _t1, _t0, _mask; \ + gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ + gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ + \ + /* Compute the two most significant limbs of n - q'd */ \ + (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ + gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ + (q)++; \ + \ + /* Conditionally adjust q and the remainders */ \ + _mask = - (mp_limb_t) ((r1) >= _q0); \ + (q) += _mask; \ + gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ + if ((r1) >= (d1)) \ + { \ + if ((r1) > (d1) || (r0) >= (d0)) \ + { \ + (q)++; \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ + } \ + } \ + } while (0) + +/* Swap macros. */ +#define MP_LIMB_T_SWAP(x, y) \ + do { \ + mp_limb_t __mp_limb_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_limb_t_swap__tmp; \ + } while (0) +#define MP_SIZE_T_SWAP(x, y) \ + do { \ + mp_size_t __mp_size_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_size_t_swap__tmp; \ + } while (0) +#define MP_BITCNT_T_SWAP(x,y) \ + do { \ + mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_bitcnt_t_swap__tmp; \ + } while (0) +#define MP_PTR_SWAP(x, y) \ + do { \ + mp_ptr __mp_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_ptr_swap__tmp; \ + } while (0) +#define MP_SRCPTR_SWAP(x, y) \ + do { \ + mp_srcptr __mp_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_srcptr_swap__tmp; \ + } while (0) + +#define MPN_PTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_PTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) +#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_SRCPTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) + +#define MPZ_PTR_SWAP(x, y) \ + do { \ + mpz_ptr __mpz_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_ptr_swap__tmp; \ + } while (0) +#define MPZ_SRCPTR_SWAP(x, y) \ + do { \ + mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_srcptr_swap__tmp; \ + } while (0) + +const int mp_bits_per_limb = GMP_LIMB_BITS; + + +/* Memory allocation and other helper functions. */ +static void +gmp_die (const char *msg) +{ + fprintf (stderr, "%s\n", msg); + abort(); +} + +static void * +gmp_default_alloc (size_t size) +{ + void *p; + + assert (size > 0); + + p = malloc (size); + if (!p) + gmp_die("gmp_default_alloc: Virtual memory exhausted."); + + return p; +} + +static void * +gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) +{ + void * p; + + p = realloc (old, new_size); + + if (!p) + gmp_die("gmp_default_realloc: Virtual memory exhausted."); + + return p; +} + +static void +gmp_default_free (void *p, size_t unused_size) +{ + free (p); +} + +static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; +static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; +static void (*gmp_free_func) (void *, size_t) = gmp_default_free; + +void +mp_get_memory_functions (void *(**alloc_func) (size_t), + void *(**realloc_func) (void *, size_t, size_t), + void (**free_func) (void *, size_t)) +{ + if (alloc_func) + *alloc_func = gmp_allocate_func; + + if (realloc_func) + *realloc_func = gmp_reallocate_func; + + if (free_func) + *free_func = gmp_free_func; +} + +void +mp_set_memory_functions (void *(*alloc_func) (size_t), + void *(*realloc_func) (void *, size_t, size_t), + void (*free_func) (void *, size_t)) +{ + if (!alloc_func) + alloc_func = gmp_default_alloc; + if (!realloc_func) + realloc_func = gmp_default_realloc; + if (!free_func) + free_func = gmp_default_free; + + gmp_allocate_func = alloc_func; + gmp_reallocate_func = realloc_func; + gmp_free_func = free_func; +} + +#define gmp_alloc(size) ((*gmp_allocate_func)((size))) +#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) +#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) + +static mp_ptr +gmp_alloc_limbs (mp_size_t size) +{ + return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); +} + +static mp_ptr +gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) +{ + assert (size > 0); + return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); +} + +static void +gmp_free_limbs (mp_ptr old, mp_size_t size) +{ + gmp_free (old, size * sizeof (mp_limb_t)); +} + + +/* MPN interface */ + +void +mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + mp_size_t i; + for (i = 0; i < n; i++) + d[i] = s[i]; +} + +void +mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + while (--n >= 0) + d[n] = s[n]; +} + +int +mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + while (--n >= 0) + { + if (ap[n] != bp[n]) + return ap[n] > bp[n] ? 1 : -1; + } + return 0; +} + +static int +mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + if (an != bn) + return an < bn ? -1 : 1; + else + return mpn_cmp (ap, bp, an); +} + +static mp_size_t +mpn_normalized_size (mp_srcptr xp, mp_size_t n) +{ + while (n > 0 && xp[n-1] == 0) + --n; + return n; +} + +int +mpn_zero_p(mp_srcptr rp, mp_size_t n) +{ + return mpn_normalized_size (rp, n) == 0; +} + +void +mpn_zero (mp_ptr rp, mp_size_t n) +{ + while (--n >= 0) + rp[n] = 0; +} + +mp_limb_t +mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + i = 0; + do + { + mp_limb_t r = ap[i] + b; + /* Carry out */ + b = (r < b); + rp[i] = r; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b, r; + a = ap[i]; b = bp[i]; + r = a + cy; + cy = (r < cy); + r += b; + cy += (r < b); + rp[i] = r; + } + return cy; +} + +mp_limb_t +mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_add_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + + i = 0; + do + { + mp_limb_t a = ap[i]; + /* Carry out */ + mp_limb_t cy = a < b; + rp[i] = a - b; + b = cy; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b; + a = ap[i]; b = bp[i]; + b += cy; + cy = (b < cy); + cy += (a < b); + rp[i] = a - b; + } + return cy; +} + +mp_limb_t +mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_sub_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl + lpl; + cl += lpl < rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl - lpl; + cl += lpl > rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn >= 1); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); + + /* We first multiply by the low order limb. This result can be + stored, not added, to rp. We also avoid a loop for zeroing this + way. */ + + rp[un] = mpn_mul_1 (rp, up, un, vp[0]); + + /* Now accumulate the product of up[] and the next higher limb from + vp[]. */ + + while (--vn >= 1) + { + rp += 1, vp += 1; + rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); + } + return rp[un]; +} + +void +mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mpn_mul (rp, ap, n, bp, n); +} + +void +mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) +{ + mpn_mul (rp, ap, n, ap, n); +} + +mp_limb_t +mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + up += n; + rp += n; + + tnc = GMP_LIMB_BITS - cnt; + low_limb = *--up; + retval = low_limb >> tnc; + high_limb = (low_limb << cnt); + + while (--n != 0) + { + low_limb = *--up; + *--rp = high_limb | (low_limb >> tnc); + high_limb = (low_limb << cnt); + } + *--rp = high_limb; + + return retval; +} + +mp_limb_t +mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + tnc = GMP_LIMB_BITS - cnt; + high_limb = *up++; + retval = (high_limb << tnc); + low_limb = high_limb >> cnt; + + while (--n != 0) + { + high_limb = *up++; + *rp++ = low_limb | (high_limb << tnc); + low_limb = high_limb >> cnt; + } + *rp = low_limb; + + return retval; +} + +static mp_bitcnt_t +mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, + mp_limb_t ux) +{ + unsigned cnt; + + assert (ux == 0 || ux == GMP_LIMB_MAX); + assert (0 <= i && i <= un ); + + while (limb == 0) + { + i++; + if (i == un) + return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); + limb = ux ^ up[i]; + } + gmp_ctz (cnt, limb); + return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; +} + +mp_bitcnt_t +mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, 0); +} + +mp_bitcnt_t +mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, GMP_LIMB_MAX); +} + +void +mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (--n >= 0) + *rp++ = ~ *up++; +} + +mp_limb_t +mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (*up == 0) + { + *rp = 0; + if (!--n) + return 0; + ++up; ++rp; + } + *rp = - *up; + mpn_com (++rp, ++up, --n); + return 1; +} + + +/* MPN division interface. */ + +/* The 3/2 inverse is defined as + + m = floor( (B^3-1) / (B u1 + u0)) - B +*/ +mp_limb_t +mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) +{ + mp_limb_t r, m; + + { + mp_limb_t p, ql; + unsigned ul, uh, qh; + + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); + /* For notation, let b denote the half-limb base, so that B = b^2. + Split u1 = b uh + ul. */ + ul = u1 & GMP_LLIMB_MASK; + uh = u1 >> (GMP_LIMB_BITS / 2); + + /* Approximation of the high half of quotient. Differs from the 2/1 + inverse of the half limb uh, since we have already subtracted + u0. */ + qh = (u1 ^ GMP_LIMB_MAX) / uh; + + /* Adjust to get a half-limb 3/2 inverse, i.e., we want + + qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u + = floor( (b (~u) + b-1) / u), + + and the remainder + + r = b (~u) + b-1 - qh (b uh + ul) + = b (~u - qh uh) + b-1 - qh ul + + Subtraction of qh ul may underflow, which implies adjustments. + But by normalization, 2 u >= B > qh ul, so we need to adjust by + at most 2. + */ + + r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; + + p = (mp_limb_t) qh * ul; + /* Adjustment steps taken from udiv_qrnnd_c */ + if (r < p) + { + qh--; + r += u1; + if (r >= u1) /* i.e. we didn't get carry when adding to r */ + if (r < p) + { + qh--; + r += u1; + } + } + r -= p; + + /* Low half of the quotient is + + ql = floor ( (b r + b-1) / u1). + + This is a 3/2 division (on half-limbs), for which qh is a + suitable inverse. */ + + p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; + /* Unlike full-limb 3/2, we can add 1 without overflow. For this to + work, it is essential that ql is a full mp_limb_t. */ + ql = (p >> (GMP_LIMB_BITS / 2)) + 1; + + /* By the 3/2 trick, we don't need the high half limb. */ + r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; + + if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) + { + ql--; + r += u1; + } + m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; + if (r >= u1) + { + m++; + r -= u1; + } + } + + /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a + 3/2 inverse. */ + if (u0 > 0) + { + mp_limb_t th, tl; + r = ~r; + r += u0; + if (r < u0) + { + m--; + if (r >= u1) + { + m--; + r -= u1; + } + r -= u1; + } + gmp_umul_ppmm (th, tl, u0, m); + r += th; + if (r < th) + { + m--; + m -= ((r > u1) | ((r == u1) & (tl > u0))); + } + } + + return m; +} + +struct gmp_div_inverse +{ + /* Normalization shift count. */ + unsigned shift; + /* Normalized divisor (d0 unused for mpn_div_qr_1) */ + mp_limb_t d1, d0; + /* Inverse, for 2/1 or 3/2. */ + mp_limb_t di; +}; + +static void +mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) +{ + unsigned shift; + + assert (d > 0); + gmp_clz (shift, d); + inv->shift = shift; + inv->d1 = d << shift; + inv->di = mpn_invert_limb (inv->d1); +} + +static void +mpn_div_qr_2_invert (struct gmp_div_inverse *inv, + mp_limb_t d1, mp_limb_t d0) +{ + unsigned shift; + + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 <<= shift; + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); +} + +static void +mpn_div_qr_invert (struct gmp_div_inverse *inv, + mp_srcptr dp, mp_size_t dn) +{ + assert (dn > 0); + + if (dn == 1) + mpn_div_qr_1_invert (inv, dp[0]); + else if (dn == 2) + mpn_div_qr_2_invert (inv, dp[1], dp[0]); + else + { + unsigned shift; + mp_limb_t d1, d0; + + d1 = dp[dn-1]; + d0 = dp[dn-2]; + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); + } +} + +/* Not matching current public gmp interface, rather corresponding to + the sbpi1_div_* functions. */ +static mp_limb_t +mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + mp_limb_t d, di; + mp_limb_t r; + mp_ptr tp = NULL; + mp_size_t tn = 0; + + if (inv->shift > 0) + { + /* Shift, reusing qp area if possible. In-place shift if qp == np. */ + tp = qp; + if (!tp) + { + tn = nn; + tp = gmp_alloc_limbs (tn); + } + r = mpn_lshift (tp, np, nn, inv->shift); + np = tp; + } + else + r = 0; + + d = inv->d1; + di = inv->di; + while (--nn >= 0) + { + mp_limb_t q; + + gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); + if (qp) + qp[nn] = q; + } + if (tn) + gmp_free_limbs (tp, tn); + + return r >> inv->shift; +} + +static void +mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + unsigned shift; + mp_size_t i; + mp_limb_t d1, d0, di, r1, r0; + + assert (nn >= 2); + shift = inv->shift; + d1 = inv->d1; + d0 = inv->d0; + di = inv->di; + + if (shift > 0) + r1 = mpn_lshift (np, np, nn, shift); + else + r1 = 0; + + r0 = np[nn - 1]; + + i = nn - 2; + do + { + mp_limb_t n0, q; + n0 = np[i]; + gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + if (shift > 0) + { + assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); + r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); + r1 >>= shift; + } + + np[1] = r1; + np[0] = r0; +} + +static void +mpn_div_qr_pi1 (mp_ptr qp, + mp_ptr np, mp_size_t nn, mp_limb_t n1, + mp_srcptr dp, mp_size_t dn, + mp_limb_t dinv) +{ + mp_size_t i; + + mp_limb_t d1, d0; + mp_limb_t cy, cy1; + mp_limb_t q; + + assert (dn > 2); + assert (nn >= dn); + + d1 = dp[dn - 1]; + d0 = dp[dn - 2]; + + assert ((d1 & GMP_LIMB_HIGHBIT) != 0); + /* Iteration variable is the index of the q limb. + * + * We divide + * by + */ + + i = nn - dn; + do + { + mp_limb_t n0 = np[dn-1+i]; + + if (n1 == d1 && n0 == d0) + { + q = GMP_LIMB_MAX; + mpn_submul_1 (np+i, dp, dn, q); + n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ + } + else + { + gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); + + cy = mpn_submul_1 (np + i, dp, dn-2, q); + + cy1 = n0 < cy; + n0 = n0 - cy; + cy = n1 < cy1; + n1 = n1 - cy1; + np[dn-2+i] = n0; + + if (cy != 0) + { + n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); + q--; + } + } + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + np[dn - 1] = n1; +} + +static void +mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + mp_srcptr dp, mp_size_t dn, + const struct gmp_div_inverse *inv) +{ + assert (dn > 0); + assert (nn >= dn); + + if (dn == 1) + np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); + else if (dn == 2) + mpn_div_qr_2_preinv (qp, np, nn, inv); + else + { + mp_limb_t nh; + unsigned shift; + + assert (inv->d1 == dp[dn-1]); + assert (inv->d0 == dp[dn-2]); + assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); + + shift = inv->shift; + if (shift > 0) + nh = mpn_lshift (np, np, nn, shift); + else + nh = 0; + + mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); + + if (shift > 0) + gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); + } +} + +static void +mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) +{ + struct gmp_div_inverse inv; + mp_ptr tp = NULL; + + assert (dn > 0); + assert (nn >= dn); + + mpn_div_qr_invert (&inv, dp, dn); + if (dn > 2 && inv.shift > 0) + { + tp = gmp_alloc_limbs (dn); + gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); + dp = tp; + } + mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); + if (tp) + gmp_free_limbs (tp, dn); +} + + +/* MPN base conversion. */ +static unsigned +mpn_base_power_of_two_p (unsigned b) +{ + switch (b) + { + case 2: return 1; + case 4: return 2; + case 8: return 3; + case 16: return 4; + case 32: return 5; + case 64: return 6; + case 128: return 7; + case 256: return 8; + default: return 0; + } +} + +struct mpn_base_info +{ + /* bb is the largest power of the base which fits in one limb, and + exp is the corresponding exponent. */ + unsigned exp; + mp_limb_t bb; +}; + +static void +mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) +{ + mp_limb_t m; + mp_limb_t p; + unsigned exp; + + m = GMP_LIMB_MAX / b; + for (exp = 1, p = b; p <= m; exp++) + p *= b; + + info->exp = exp; + info->bb = p; +} + +static mp_bitcnt_t +mpn_limb_size_in_base_2 (mp_limb_t u) +{ + unsigned shift; + + assert (u > 0); + gmp_clz (shift, u); + return GMP_LIMB_BITS - shift; +} + +static size_t +mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) +{ + unsigned char mask; + size_t sn, j; + mp_size_t i; + unsigned shift; + + sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) + + bits - 1) / bits; + + mask = (1U << bits) - 1; + + for (i = 0, j = sn, shift = 0; j-- > 0;) + { + unsigned char digit = up[i] >> shift; + + shift += bits; + + if (shift >= GMP_LIMB_BITS && ++i < un) + { + shift -= GMP_LIMB_BITS; + digit |= up[i] << (bits - shift); + } + sp[j] = digit & mask; + } + return sn; +} + +/* We generate digits from the least significant end, and reverse at + the end. */ +static size_t +mpn_limb_get_str (unsigned char *sp, mp_limb_t w, + const struct gmp_div_inverse *binv) +{ + mp_size_t i; + for (i = 0; w > 0; i++) + { + mp_limb_t h, l, r; + + h = w >> (GMP_LIMB_BITS - binv->shift); + l = w << binv->shift; + + gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); + assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); + r >>= binv->shift; + + sp[i] = r; + } + return i; +} + +static size_t +mpn_get_str_other (unsigned char *sp, + int base, const struct mpn_base_info *info, + mp_ptr up, mp_size_t un) +{ + struct gmp_div_inverse binv; + size_t sn; + size_t i; + + mpn_div_qr_1_invert (&binv, base); + + sn = 0; + + if (un > 1) + { + struct gmp_div_inverse bbinv; + mpn_div_qr_1_invert (&bbinv, info->bb); + + do + { + mp_limb_t w; + size_t done; + w = mpn_div_qr_1_preinv (up, up, un, &bbinv); + un -= (up[un-1] == 0); + done = mpn_limb_get_str (sp + sn, w, &binv); + + for (sn += done; done < info->exp; done++) + sp[sn++] = 0; + } + while (un > 1); + } + sn += mpn_limb_get_str (sp + sn, up[0], &binv); + + /* Reverse order */ + for (i = 0; 2*i + 1 < sn; i++) + { + unsigned char t = sp[i]; + sp[i] = sp[sn - i - 1]; + sp[sn - i - 1] = t; + } + + return sn; +} + +size_t +mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) +{ + unsigned bits; + + assert (un > 0); + assert (up[un-1] > 0); + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_get_str_bits (sp, bits, up, un); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_get_str_other (sp, base, &info, up, un); + } +} + +static mp_size_t +mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, + unsigned bits) +{ + mp_size_t rn; + mp_limb_t limb; + unsigned shift; + + for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) + { + limb |= (mp_limb_t) sp[sn] << shift; + shift += bits; + if (shift >= GMP_LIMB_BITS) + { + shift -= GMP_LIMB_BITS; + rp[rn++] = limb; + /* Next line is correct also if shift == 0, + bits == 8, and mp_limb_t == unsigned char. */ + limb = (unsigned int) sp[sn] >> (bits - shift); + } + } + if (limb != 0) + rp[rn++] = limb; + else + rn = mpn_normalized_size (rp, rn); + return rn; +} + +/* Result is usually normalized, except for all-zero input, in which + case a single zero limb is written at *RP, and 1 is returned. */ +static mp_size_t +mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, + mp_limb_t b, const struct mpn_base_info *info) +{ + mp_size_t rn; + mp_limb_t w; + unsigned k; + size_t j; + + assert (sn > 0); + + k = 1 + (sn - 1) % info->exp; + + j = 0; + w = sp[j++]; + while (--k != 0) + w = w * b + sp[j++]; + + rp[0] = w; + + for (rn = 1; j < sn;) + { + mp_limb_t cy; + + w = sp[j++]; + for (k = 1; k < info->exp; k++) + w = w * b + sp[j++]; + + cy = mpn_mul_1 (rp, rp, rn, info->bb); + cy += mpn_add_1 (rp, rp, rn, w); + if (cy > 0) + rp[rn++] = cy; + } + assert (j == sn); + + return rn; +} + +mp_size_t +mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) +{ + unsigned bits; + + if (sn == 0) + return 0; + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_set_str_bits (rp, sp, sn, bits); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_set_str_other (rp, sp, sn, base, &info); + } +} + + +/* MPZ interface */ +void +mpz_init (mpz_t r) +{ + static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; + + r->_mp_alloc = 0; + r->_mp_size = 0; + r->_mp_d = (mp_ptr) &dummy_limb; +} + +/* The utility of this function is a bit limited, since many functions + assigns the result variable using mpz_swap. */ +void +mpz_init2 (mpz_t r, mp_bitcnt_t bits) +{ + mp_size_t rn; + + bits -= (bits != 0); /* Round down, except if 0 */ + rn = 1 + bits / GMP_LIMB_BITS; + + r->_mp_alloc = rn; + r->_mp_size = 0; + r->_mp_d = gmp_alloc_limbs (rn); +} + +void +mpz_clear (mpz_t r) +{ + if (r->_mp_alloc) + gmp_free_limbs (r->_mp_d, r->_mp_alloc); +} + +static mp_ptr +mpz_realloc (mpz_t r, mp_size_t size) +{ + size = GMP_MAX (size, 1); + + if (r->_mp_alloc) + r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); + else + r->_mp_d = gmp_alloc_limbs (size); + r->_mp_alloc = size; + + if (GMP_ABS (r->_mp_size) > size) + r->_mp_size = 0; + + return r->_mp_d; +} + +/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ +#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ + ? mpz_realloc(z,n) \ + : (z)->_mp_d) + +/* MPZ assignment and basic conversions. */ +void +mpz_set_si (mpz_t r, signed long int x) +{ + if (x >= 0) + mpz_set_ui (r, x); + else /* (x < 0) */ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); + mpz_neg (r, r); + } + else + { + r->_mp_size = -1; + MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); + } +} + +void +mpz_set_ui (mpz_t r, unsigned long int x) +{ + if (x > 0) + { + r->_mp_size = 1; + MPZ_REALLOC (r, 1)[0] = x; + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + while (x >>= LOCAL_GMP_LIMB_BITS) + { + ++ r->_mp_size; + MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; + } + } + } + else + r->_mp_size = 0; +} + +void +mpz_set (mpz_t r, const mpz_t x) +{ + /* Allow the NOP r == x */ + if (r != x) + { + mp_size_t n; + mp_ptr rp; + + n = GMP_ABS (x->_mp_size); + rp = MPZ_REALLOC (r, n); + + mpn_copyi (rp, x->_mp_d, n); + r->_mp_size = x->_mp_size; + } +} + +void +mpz_init_set_si (mpz_t r, signed long int x) +{ + mpz_init (r); + mpz_set_si (r, x); +} + +void +mpz_init_set_ui (mpz_t r, unsigned long int x) +{ + mpz_init (r); + mpz_set_ui (r, x); +} + +void +mpz_init_set (mpz_t r, const mpz_t x) +{ + mpz_init (r); + mpz_set (r, x); +} + +int +mpz_fits_slong_p (const mpz_t u) +{ + return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; +} + +static int +mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) +{ + int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; + mp_limb_t ulongrem = 0; + + if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) + ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; + + return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); +} + +int +mpz_fits_ulong_p (const mpz_t u) +{ + mp_size_t us = u->_mp_size; + + return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); +} + +int +mpz_fits_sint_p (const mpz_t u) +{ + return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; +} + +int +mpz_fits_uint_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; +} + +int +mpz_fits_sshort_p (const mpz_t u) +{ + return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; +} + +int +mpz_fits_ushort_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; +} + +long int +mpz_get_si (const mpz_t u) +{ + unsigned long r = mpz_get_ui (u); + unsigned long c = -LONG_MAX - LONG_MIN; + + if (u->_mp_size < 0) + /* This expression is necessary to properly handle -LONG_MIN */ + return -(long) c - (long) ((r - c) & LONG_MAX); + else + return (long) (r & LONG_MAX); +} + +unsigned long int +mpz_get_ui (const mpz_t u) +{ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + unsigned long r = 0; + mp_size_t n = GMP_ABS (u->_mp_size); + n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); + while (--n >= 0) + r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; + return r; + } + + return u->_mp_size == 0 ? 0 : u->_mp_d[0]; +} + +size_t +mpz_size (const mpz_t u) +{ + return GMP_ABS (u->_mp_size); +} + +mp_limb_t +mpz_getlimbn (const mpz_t u, mp_size_t n) +{ + if (n >= 0 && n < GMP_ABS (u->_mp_size)) + return u->_mp_d[n]; + else + return 0; +} + +void +mpz_realloc2 (mpz_t x, mp_bitcnt_t n) +{ + mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); +} + +mp_srcptr +mpz_limbs_read (mpz_srcptr x) +{ + return x->_mp_d; +} + +mp_ptr +mpz_limbs_modify (mpz_t x, mp_size_t n) +{ + assert (n > 0); + return MPZ_REALLOC (x, n); +} + +mp_ptr +mpz_limbs_write (mpz_t x, mp_size_t n) +{ + return mpz_limbs_modify (x, n); +} + +void +mpz_limbs_finish (mpz_t x, mp_size_t xs) +{ + mp_size_t xn; + xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); + x->_mp_size = xs < 0 ? -xn : xn; +} + +static mpz_srcptr +mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + x->_mp_alloc = 0; + x->_mp_d = (mp_ptr) xp; + x->_mp_size = xs; + return x; +} + +mpz_srcptr +mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + mpz_roinit_normal_n (x, xp, xs); + mpz_limbs_finish (x, xs); + return x; +} + + +/* Conversions and comparison to double. */ +void +mpz_set_d (mpz_t r, double x) +{ + int sign; + mp_ptr rp; + mp_size_t rn, i; + double B; + double Bi; + mp_limb_t f; + + /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is + zero or infinity. */ + if (x != x || x == x * 0.5) + { + r->_mp_size = 0; + return; + } + + sign = x < 0.0 ; + if (sign) + x = - x; + + if (x < 1.0) + { + r->_mp_size = 0; + return; + } + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + for (rn = 1; x >= B; rn++) + x *= Bi; + + rp = MPZ_REALLOC (r, rn); + + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + i = rn-1; + rp[i] = f; + while (--i >= 0) + { + x = B * x; + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + rp[i] = f; + } + + r->_mp_size = sign ? - rn : rn; +} + +void +mpz_init_set_d (mpz_t r, double x) +{ + mpz_init (r); + mpz_set_d (r, x); +} + +double +mpz_get_d (const mpz_t u) +{ + int m; + mp_limb_t l; + mp_size_t un; + double x; + double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + + un = GMP_ABS (u->_mp_size); + + if (un == 0) + return 0.0; + + l = u->_mp_d[--un]; + gmp_clz (m, l); + m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + + for (x = l; --un >= 0;) + { + x = B*x; + if (m > 0) { + l = u->_mp_d[un]; + m -= GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + x += l; + } + } + + if (u->_mp_size < 0) + x = -x; + + return x; +} + +int +mpz_cmpabs_d (const mpz_t x, double d) +{ + mp_size_t xn; + double B, Bi; + mp_size_t i; + + xn = x->_mp_size; + d = GMP_ABS (d); + + if (xn != 0) + { + xn = GMP_ABS (xn); + + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + + /* Scale d so it can be compared with the top limb. */ + for (i = 1; i < xn; i++) + d *= Bi; + + if (d >= B) + return -1; + + /* Compare floor(d) to top limb, subtract and cancel when equal. */ + for (i = xn; i-- > 0;) + { + mp_limb_t f, xl; + + f = (mp_limb_t) d; + xl = x->_mp_d[i]; + if (xl > f) + return 1; + else if (xl < f) + return -1; + d = B * (d - f); + } + } + return - (d > 0.0); +} + +int +mpz_cmp_d (const mpz_t x, double d) +{ + if (x->_mp_size < 0) + { + if (d >= 0.0) + return -1; + else + return -mpz_cmpabs_d (x, d); + } + else + { + if (d < 0.0) + return 1; + else + return mpz_cmpabs_d (x, d); + } +} + + +/* MPZ comparisons and the like. */ +int +mpz_sgn (const mpz_t u) +{ + return GMP_CMP (u->_mp_size, 0); +} + +int +mpz_cmp_si (const mpz_t u, long v) +{ + mp_size_t usize = u->_mp_size; + + if (v >= 0) + return mpz_cmp_ui (u, v); + else if (usize >= 0) + return 1; + else + return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); +} + +int +mpz_cmp_ui (const mpz_t u, unsigned long v) +{ + mp_size_t usize = u->_mp_size; + + if (usize < 0) + return -1; + else + return mpz_cmpabs_ui (u, v); +} + +int +mpz_cmp (const mpz_t a, const mpz_t b) +{ + mp_size_t asize = a->_mp_size; + mp_size_t bsize = b->_mp_size; + + if (asize != bsize) + return (asize < bsize) ? -1 : 1; + else if (asize >= 0) + return mpn_cmp (a->_mp_d, b->_mp_d, asize); + else + return mpn_cmp (b->_mp_d, a->_mp_d, -asize); +} + +int +mpz_cmpabs_ui (const mpz_t u, unsigned long v) +{ + mp_size_t un = GMP_ABS (u->_mp_size); + + if (! mpn_absfits_ulong_p (u->_mp_d, un)) + return 1; + else + { + unsigned long uu = mpz_get_ui (u); + return GMP_CMP(uu, v); + } +} + +int +mpz_cmpabs (const mpz_t u, const mpz_t v) +{ + return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), + v->_mp_d, GMP_ABS (v->_mp_size)); +} + +void +mpz_abs (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = GMP_ABS (r->_mp_size); +} + +void +mpz_neg (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = -r->_mp_size; +} + +void +mpz_swap (mpz_t u, mpz_t v) +{ + MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); + MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); +} + + +/* MPZ addition and subtraction */ + + +void +mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_t bb; + mpz_init_set_ui (bb, b); + mpz_add (r, a, bb); + mpz_clear (bb); +} + +void +mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_ui_sub (r, b, a); + mpz_neg (r, r); +} + +void +mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) +{ + mpz_neg (r, b); + mpz_add_ui (r, r, a); +} + +static mp_size_t +mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + mp_ptr rp; + mp_limb_t cy; + + if (an < bn) + { + MPZ_SRCPTR_SWAP (a, b); + MP_SIZE_T_SWAP (an, bn); + } + + rp = MPZ_REALLOC (r, an + 1); + cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); + + rp[an] = cy; + + return an + cy; +} + +static mp_size_t +mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + int cmp; + mp_ptr rp; + + cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); + if (cmp > 0) + { + rp = MPZ_REALLOC (r, an); + gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); + return mpn_normalized_size (rp, an); + } + else if (cmp < 0) + { + rp = MPZ_REALLOC (r, bn); + gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); + return -mpn_normalized_size (rp, bn); + } + else + return 0; +} + +void +mpz_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_add (r, a, b); + else + rn = mpz_abs_sub (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + +void +mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_sub (r, a, b); + else + rn = mpz_abs_add (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + + +/* MPZ multiplication */ +void +mpz_mul_si (mpz_t r, const mpz_t u, long int v) +{ + if (v < 0) + { + mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); + mpz_neg (r, r); + } + else + mpz_mul_ui (r, u, v); +} + +void +mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t vv; + mpz_init_set_ui (vv, v); + mpz_mul (r, u, vv); + mpz_clear (vv); + return; +} + +void +mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) +{ + int sign; + mp_size_t un, vn, rn; + mpz_t t; + mp_ptr tp; + + un = u->_mp_size; + vn = v->_mp_size; + + if (un == 0 || vn == 0) + { + r->_mp_size = 0; + return; + } + + sign = (un ^ vn) < 0; + + un = GMP_ABS (un); + vn = GMP_ABS (vn); + + mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); + + tp = t->_mp_d; + if (un >= vn) + mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); + else + mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); + + rn = un + vn; + rn -= tp[rn-1] == 0; + + t->_mp_size = sign ? - rn : rn; + mpz_swap (r, t); + mpz_clear (t); +} + +void +mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) +{ + mp_size_t un, rn; + mp_size_t limbs; + unsigned shift; + mp_ptr rp; + + un = GMP_ABS (u->_mp_size); + if (un == 0) + { + r->_mp_size = 0; + return; + } + + limbs = bits / GMP_LIMB_BITS; + shift = bits % GMP_LIMB_BITS; + + rn = un + limbs + (shift > 0); + rp = MPZ_REALLOC (r, rn); + if (shift > 0) + { + mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); + rp[rn-1] = cy; + rn -= (cy == 0); + } + else + mpn_copyd (rp + limbs, u->_mp_d, un); + + mpn_zero (rp, limbs); + + r->_mp_size = (u->_mp_size < 0) ? - rn : rn; +} + +void +mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_sub (r, r, t); + mpz_clear (t); +} + +void +mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_sub (r, r, t); + mpz_clear (t); +} + + +/* MPZ division */ +enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; + +/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ +static int +mpz_div_qr (mpz_t q, mpz_t r, + const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) +{ + mp_size_t ns, ds, nn, dn, qs; + ns = n->_mp_size; + ds = d->_mp_size; + + if (ds == 0) + gmp_die("mpz_div_qr: Divide by zero."); + + if (ns == 0) + { + if (q) + q->_mp_size = 0; + if (r) + r->_mp_size = 0; + return 0; + } + + nn = GMP_ABS (ns); + dn = GMP_ABS (ds); + + qs = ds ^ ns; + + if (nn < dn) + { + if (mode == GMP_DIV_CEIL && qs >= 0) + { + /* q = 1, r = n - d */ + if (r) + mpz_sub (r, n, d); + if (q) + mpz_set_ui (q, 1); + } + else if (mode == GMP_DIV_FLOOR && qs < 0) + { + /* q = -1, r = n + d */ + if (r) + mpz_add (r, n, d); + if (q) + mpz_set_si (q, -1); + } + else + { + /* q = 0, r = d */ + if (r) + mpz_set (r, n); + if (q) + q->_mp_size = 0; + } + return 1; + } + else + { + mp_ptr np, qp; + mp_size_t qn, rn; + mpz_t tq, tr; + + mpz_init_set (tr, n); + np = tr->_mp_d; + + qn = nn - dn + 1; + + if (q) + { + mpz_init2 (tq, qn * GMP_LIMB_BITS); + qp = tq->_mp_d; + } + else + qp = NULL; + + mpn_div_qr (qp, np, nn, d->_mp_d, dn); + + if (qp) + { + qn -= (qp[qn-1] == 0); + + tq->_mp_size = qs < 0 ? -qn : qn; + } + rn = mpn_normalized_size (np, dn); + tr->_mp_size = ns < 0 ? - rn : rn; + + if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) + { + if (q) + mpz_sub_ui (tq, tq, 1); + if (r) + mpz_add (tr, tr, d); + } + else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) + { + if (q) + mpz_add_ui (tq, tq, 1); + if (r) + mpz_sub (tr, tr, d); + } + + if (q) + { + mpz_swap (tq, q); + mpz_clear (tq); + } + if (r) + mpz_swap (tr, r); + + mpz_clear (tr); + + return rn != 0; + } +} + +void +mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); +} + +static void +mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t un, qn; + mp_size_t limb_cnt; + mp_ptr qp; + int adjust; + + un = u->_mp_size; + if (un == 0) + { + q->_mp_size = 0; + return; + } + limb_cnt = bit_index / GMP_LIMB_BITS; + qn = GMP_ABS (un) - limb_cnt; + bit_index %= GMP_LIMB_BITS; + + if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ + /* Note: Below, the final indexing at limb_cnt is valid because at + that point we have qn > 0. */ + adjust = (qn <= 0 + || !mpn_zero_p (u->_mp_d, limb_cnt) + || (u->_mp_d[limb_cnt] + & (((mp_limb_t) 1 << bit_index) - 1))); + else + adjust = 0; + + if (qn <= 0) + qn = 0; + else + { + qp = MPZ_REALLOC (q, qn); + + if (bit_index != 0) + { + mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); + qn -= qp[qn - 1] == 0; + } + else + { + mpn_copyi (qp, u->_mp_d + limb_cnt, qn); + } + } + + q->_mp_size = qn; + + if (adjust) + mpz_add_ui (q, q, 1); + if (un < 0) + mpz_neg (q, q); +} + +static void +mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t us, un, rn; + mp_ptr rp; + mp_limb_t mask; + + us = u->_mp_size; + if (us == 0 || bit_index == 0) + { + r->_mp_size = 0; + return; + } + rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + assert (rn > 0); + + rp = MPZ_REALLOC (r, rn); + un = GMP_ABS (us); + + mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); + + if (rn > un) + { + /* Quotient (with truncation) is zero, and remainder is + non-zero */ + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* Have to negate and sign extend. */ + mp_size_t i; + + gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); + for (i = un; i < rn - 1; i++) + rp[i] = GMP_LIMB_MAX; + + rp[rn-1] = mask; + us = -us; + } + else + { + /* Just copy */ + if (r != u) + mpn_copyi (rp, u->_mp_d, un); + + rn = un; + } + } + else + { + if (r != u) + mpn_copyi (rp, u->_mp_d, rn - 1); + + rp[rn-1] = u->_mp_d[rn-1] & mask; + + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* If r != 0, compute 2^{bit_count} - r. */ + mpn_neg (rp, rp, rn); + + rp[rn-1] &= mask; + + /* us is not used for anything else, so we can modify it + here to indicate flipped sign. */ + us = -us; + } + } + rn = mpn_normalized_size (rp, rn); + r->_mp_size = us < 0 ? -rn : rn; +} + +void +mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) +{ + gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_p (const mpz_t n, const mpz_t d) +{ + return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + +int +mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) +{ + mpz_t t; + int res; + + /* a == b (mod 0) iff a == b */ + if (mpz_sgn (m) == 0) + return (mpz_cmp (a, b) == 0); + + mpz_init (t); + mpz_sub (t, a, b); + res = mpz_divisible_p (t, m); + mpz_clear (t); + + return res; +} + +static unsigned long +mpz_div_qr_ui (mpz_t q, mpz_t r, + const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) +{ + unsigned long ret; + mpz_t rr, dd; + + mpz_init (rr); + mpz_init_set_ui (dd, d); + mpz_div_qr (q, rr, n, dd, mode); + mpz_clear (dd); + ret = mpz_get_ui (rr); + + if (r) + mpz_swap (r, rr); + mpz_clear (rr); + + return ret; +} + +unsigned long +mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); +} +unsigned long +mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} +unsigned long +mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_ui_p (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + + +/* GCD */ +static mp_limb_t +mpn_gcd_11 (mp_limb_t u, mp_limb_t v) +{ + unsigned shift; + + assert ( (u | v) > 0); + + if (u == 0) + return v; + else if (v == 0) + return u; + + gmp_ctz (shift, u | v); + + u >>= shift; + v >>= shift; + + if ( (u & 1) == 0) + MP_LIMB_T_SWAP (u, v); + + while ( (v & 1) == 0) + v >>= 1; + + while (u != v) + { + if (u > v) + { + u -= v; + do + u >>= 1; + while ( (u & 1) == 0); + } + else + { + v -= u; + do + v >>= 1; + while ( (v & 1) == 0); + } + } + return u << shift; +} + +mp_size_t +mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn > 0); + assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); + assert (vp[vn-1] > 0); + assert ((up[0] | vp[0]) & 1); + + if (un > vn) + mpn_div_qr (NULL, up, un, vp, vn); + + un = mpn_normalized_size (up, vn); + if (un == 0) + { + mpn_copyi (rp, vp, vn); + return vn; + } + + if (!(vp[0] & 1)) + MPN_PTR_SWAP (up, un, vp, vn); + + while (un > 1 || vn > 1) + { + int shift; + assert (vp[0] & 1); + + while (up[0] == 0) + { + up++; + un--; + } + gmp_ctz (shift, up[0]); + if (shift > 0) + { + gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); + un -= (up[un-1] == 0); + } + + if (un < vn) + MPN_PTR_SWAP (up, un, vp, vn); + else if (un == vn) + { + int c = mpn_cmp (up, vp, un); + if (c == 0) + { + mpn_copyi (rp, up, un); + return un; + } + else if (c < 0) + MP_PTR_SWAP (up, vp); + } + + gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); + un = mpn_normalized_size (up, un); + } + rp[0] = mpn_gcd_11 (up[0], vp[0]); + return 1; +} + +unsigned long +mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) +{ + mpz_t t; + mpz_init_set_ui(t, v); + mpz_gcd (t, u, t); + if (v > 0) + v = mpz_get_ui (t); + + if (g) + mpz_swap (t, g); + + mpz_clear (t); + + return v; +} + +static mp_bitcnt_t +mpz_make_odd (mpz_t r) +{ + mp_bitcnt_t shift; + + assert (r->_mp_size > 0); + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + shift = mpn_scan1 (r->_mp_d, 0); + mpz_tdiv_q_2exp (r, r, shift); + + return shift; +} + +void +mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv; + mp_bitcnt_t uz, vz, gz; + + if (u->_mp_size == 0) + { + mpz_abs (g, v); + return; + } + if (v->_mp_size == 0) + { + mpz_abs (g, u); + return; + } + + mpz_init (tu); + mpz_init (tv); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + if (tu->_mp_size < tv->_mp_size) + mpz_swap (tu, tv); + + tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); + mpz_mul_2exp (g, tu, gz); + + mpz_clear (tu); + mpz_clear (tv); +} + +void +mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv, s0, s1, t0, t1; + mp_bitcnt_t uz, vz, gz; + mp_bitcnt_t power; + int cmp; + + if (u->_mp_size == 0) + { + /* g = 0 u + sgn(v) v */ + signed long sign = mpz_sgn (v); + mpz_abs (g, v); + if (s) + s->_mp_size = 0; + if (t) + mpz_set_si (t, sign); + return; + } + + if (v->_mp_size == 0) + { + /* g = sgn(u) u + 0 v */ + signed long sign = mpz_sgn (u); + mpz_abs (g, u); + if (s) + mpz_set_si (s, sign); + if (t) + t->_mp_size = 0; + return; + } + + mpz_init (tu); + mpz_init (tv); + mpz_init (s0); + mpz_init (s1); + mpz_init (t0); + mpz_init (t1); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + uz -= gz; + vz -= gz; + + /* Cofactors corresponding to odd gcd. gz handled later. */ + if (tu->_mp_size < tv->_mp_size) + { + mpz_swap (tu, tv); + MPZ_SRCPTR_SWAP (u, v); + MPZ_PTR_SWAP (s, t); + MP_BITCNT_T_SWAP (uz, vz); + } + + /* Maintain + * + * u = t0 tu + t1 tv + * v = s0 tu + s1 tv + * + * where u and v denote the inputs with common factors of two + * eliminated, and det (s0, t0; s1, t1) = 2^p. Then + * + * 2^p tu = s1 u - t1 v + * 2^p tv = -s0 u + t0 v + */ + + /* After initial division, tu = q tv + tu', we have + * + * u = 2^uz (tu' + q tv) + * v = 2^vz tv + * + * or + * + * t0 = 2^uz, t1 = 2^uz q + * s0 = 0, s1 = 2^vz + */ + + mpz_tdiv_qr (t1, tu, tu, tv); + mpz_mul_2exp (t1, t1, uz); + + mpz_setbit (s1, vz); + power = uz + vz; + + if (tu->_mp_size > 0) + { + mp_bitcnt_t shift; + shift = mpz_make_odd (tu); + mpz_setbit (t0, uz + shift); + power += shift; + + for (;;) + { + int c; + c = mpz_cmp (tu, tv); + if (c == 0) + break; + + if (c < 0) + { + /* tv = tv' + tu + * + * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' + * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ + + mpz_sub (tv, tv, tu); + mpz_add (t0, t0, t1); + mpz_add (s0, s0, s1); + + shift = mpz_make_odd (tv); + mpz_mul_2exp (t1, t1, shift); + mpz_mul_2exp (s1, s1, shift); + } + else + { + mpz_sub (tu, tu, tv); + mpz_add (t1, t0, t1); + mpz_add (s1, s0, s1); + + shift = mpz_make_odd (tu); + mpz_mul_2exp (t0, t0, shift); + mpz_mul_2exp (s0, s0, shift); + } + power += shift; + } + } + else + mpz_setbit (t0, uz); + + /* Now tv = odd part of gcd, and -s0 and t0 are corresponding + cofactors. */ + + mpz_mul_2exp (tv, tv, gz); + mpz_neg (s0, s0); + + /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To + adjust cofactors, we need u / g and v / g */ + + mpz_divexact (s1, v, tv); + mpz_abs (s1, s1); + mpz_divexact (t1, u, tv); + mpz_abs (t1, t1); + + while (power-- > 0) + { + /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ + if (mpz_odd_p (s0) || mpz_odd_p (t0)) + { + mpz_sub (s0, s0, s1); + mpz_add (t0, t0, t1); + } + assert (mpz_even_p (t0) && mpz_even_p (s0)); + mpz_tdiv_q_2exp (s0, s0, 1); + mpz_tdiv_q_2exp (t0, t0, 1); + } + + /* Choose small cofactors (they should generally satify + + |s| < |u| / 2g and |t| < |v| / 2g, + + with some documented exceptions). Always choose the smallest s, + if there are two choices for s with same absolute value, choose + the one with smallest corresponding t (this asymmetric condition + is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ + mpz_add (s1, s0, s1); + mpz_sub (t1, t0, t1); + cmp = mpz_cmpabs (s0, s1); + if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) + { + mpz_swap (s0, s1); + mpz_swap (t0, t1); + } + if (u->_mp_size < 0) + mpz_neg (s0, s0); + if (v->_mp_size < 0) + mpz_neg (t0, t0); + + mpz_swap (g, tv); + if (s) + mpz_swap (s, s0); + if (t) + mpz_swap (t, t0); + + mpz_clear (tu); + mpz_clear (tv); + mpz_clear (s0); + mpz_clear (s1); + mpz_clear (t0); + mpz_clear (t1); +} + +void +mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t g; + + if (u->_mp_size == 0 || v->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + mpz_init (g); + + mpz_gcd (g, u, v); + mpz_divexact (g, u, g); + mpz_mul (r, g, v); + + mpz_clear (g); + mpz_abs (r, r); +} + +void +mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) +{ + if (v == 0 || u->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + v /= mpz_gcd_ui (NULL, u, v); + mpz_mul_ui (r, u, v); + + mpz_abs (r, r); +} + +int +mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) +{ + mpz_t g, tr; + int invertible; + + if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) + return 0; + + mpz_init (g); + mpz_init (tr); + + mpz_gcdext (g, tr, NULL, u, m); + invertible = (mpz_cmp_ui (g, 1) == 0); + + if (invertible) + { + if (tr->_mp_size < 0) + { + if (m->_mp_size >= 0) + mpz_add (tr, tr, m); + else + mpz_sub (tr, tr, m); + } + mpz_swap (r, tr); + } + + mpz_clear (g); + mpz_clear (tr); + return invertible; +} + + +/* Higher level operations (sqrt, pow and root) */ + +void +mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) +{ + unsigned long bit; + mpz_t tr; + mpz_init_set_ui (tr, 1); + + bit = GMP_ULONG_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (e & bit) + mpz_mul (tr, tr, b); + bit >>= 1; + } + while (bit > 0); + + mpz_swap (r, tr); + mpz_clear (tr); +} + +void +mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) +{ + mpz_t b; + + mpz_init_set_ui (b, blimb); + mpz_pow_ui (r, b, e); + mpz_clear (b); +} + +void +mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) +{ + mpz_t tr; + mpz_t base; + mp_size_t en, mn; + mp_srcptr mp; + struct gmp_div_inverse minv; + unsigned shift; + mp_ptr tp = NULL; + + en = GMP_ABS (e->_mp_size); + mn = GMP_ABS (m->_mp_size); + if (mn == 0) + gmp_die ("mpz_powm: Zero modulo."); + + if (en == 0) + { + mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); + return; + } + + mp = m->_mp_d; + mpn_div_qr_invert (&minv, mp, mn); + shift = minv.shift; + + if (shift > 0) + { + /* To avoid shifts, we do all our reductions, except the final + one, using a *normalized* m. */ + minv.shift = 0; + + tp = gmp_alloc_limbs (mn); + gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); + mp = tp; + } + + mpz_init (base); + + if (e->_mp_size < 0) + { + if (!mpz_invert (base, b, m)) + gmp_die ("mpz_powm: Negative exponent and non-invertible base."); + } + else + { + mp_size_t bn; + mpz_abs (base, b); + + bn = base->_mp_size; + if (bn >= mn) + { + mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); + bn = mn; + } + + /* We have reduced the absolute value. Now take care of the + sign. Note that we get zero represented non-canonically as + m. */ + if (b->_mp_size < 0) + { + mp_ptr bp = MPZ_REALLOC (base, mn); + gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); + bn = mn; + } + base->_mp_size = mpn_normalized_size (base->_mp_d, bn); + } + mpz_init_set_ui (tr, 1); + + while (--en >= 0) + { + mp_limb_t w = e->_mp_d[en]; + mp_limb_t bit; + + bit = GMP_LIMB_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (w & bit) + mpz_mul (tr, tr, base); + if (tr->_mp_size > mn) + { + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + bit >>= 1; + } + while (bit > 0); + } + + /* Final reduction */ + if (tr->_mp_size >= mn) + { + minv.shift = shift; + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + if (tp) + gmp_free_limbs (tp, mn); + + mpz_swap (r, tr); + mpz_clear (tr); + mpz_clear (base); +} + +void +mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) +{ + mpz_t e; + + mpz_init_set_ui (e, elimb); + mpz_powm (r, b, e, m); + mpz_clear (e); +} + +/* x=trunc(y^(1/z)), r=y-x^z */ +void +mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) +{ + int sgn; + mp_bitcnt_t bc; + mpz_t t, u; + + sgn = y->_mp_size < 0; + if ((~z & sgn) != 0) + gmp_die ("mpz_rootrem: Negative argument, with even root."); + if (z == 0) + gmp_die ("mpz_rootrem: Zeroth root."); + + if (mpz_cmpabs_ui (y, 1) <= 0) { + if (x) + mpz_set (x, y); + if (r) + r->_mp_size = 0; + return; + } + + mpz_init (u); + mpz_init (t); + bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; + mpz_setbit (t, bc); + + if (z == 2) /* simplify sqrt loop: z-1 == 1 */ + do { + mpz_swap (u, t); /* u = x */ + mpz_tdiv_q (t, y, u); /* t = y/x */ + mpz_add (t, t, u); /* t = y/x + x */ + mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + else /* z != 2 */ { + mpz_t v; + + mpz_init (v); + if (sgn) + mpz_neg (t, t); + + do { + mpz_swap (u, t); /* u = x */ + mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ + mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ + mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ + mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ + mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + + mpz_clear (v); + } + + if (r) { + mpz_pow_ui (t, u, z); + mpz_sub (r, y, t); + } + if (x) + mpz_swap (x, u); + mpz_clear (u); + mpz_clear (t); +} + +int +mpz_root (mpz_t x, const mpz_t y, unsigned long z) +{ + int res; + mpz_t r; + + mpz_init (r); + mpz_rootrem (x, r, y, z); + res = r->_mp_size == 0; + mpz_clear (r); + + return res; +} + +/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ +void +mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) +{ + mpz_rootrem (s, r, u, 2); +} + +void +mpz_sqrt (mpz_t s, const mpz_t u) +{ + mpz_rootrem (s, NULL, u, 2); +} + +int +mpz_perfect_square_p (const mpz_t u) +{ + if (u->_mp_size <= 0) + return (u->_mp_size == 0); + else + return mpz_root (NULL, u, 2); +} + +int +mpn_perfect_square_p (mp_srcptr p, mp_size_t n) +{ + mpz_t t; + + assert (n > 0); + assert (p [n-1] != 0); + return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); +} + +mp_size_t +mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) +{ + mpz_t s, r, u; + mp_size_t res; + + assert (n > 0); + assert (p [n-1] != 0); + + mpz_init (r); + mpz_init (s); + mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); + + assert (s->_mp_size == (n+1)/2); + mpn_copyd (sp, s->_mp_d, s->_mp_size); + mpz_clear (s); + res = r->_mp_size; + if (rp) + mpn_copyd (rp, r->_mp_d, res); + mpz_clear (r); + return res; +} + +/* Combinatorics */ + +void +mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) +{ + mpz_set_ui (x, n + (n == 0)); + if (m + 1 < 2) return; + while (n > m + 1) + mpz_mul_ui (x, x, n -= m); +} + +void +mpz_2fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 2); +} + +void +mpz_fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 1); +} + +void +mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) +{ + mpz_t t; + + mpz_set_ui (r, k <= n); + + if (k > (n >> 1)) + k = (k <= n) ? n - k : 0; + + mpz_init (t); + mpz_fac_ui (t, k); + + for (; k > 0; --k) + mpz_mul_ui (r, r, n--); + + mpz_divexact (r, r, t); + mpz_clear (t); +} + + +/* Primality testing */ + +/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ +/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ +static int +gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) +{ + int c, bit = 0; + + assert (b & 1); + assert (a != 0); + /* assert (mpn_gcd_11 (a, b) == 1); */ + + /* Below, we represent a and b shifted right so that the least + significant one bit is implicit. */ + b >>= 1; + + gmp_ctz(c, a); + a >>= 1; + + for (;;) + { + a >>= c; + /* (2/b) = -1 if b = 3 or 5 mod 8 */ + bit ^= c & (b ^ (b >> 1)); + if (a < b) + { + if (a == 0) + return bit & 1 ? -1 : 1; + bit ^= a & b; + a = b - a; + b -= a; + } + else + { + a -= b; + assert (a != 0); + } + + gmp_ctz(c, a); + ++c; + } +} + +static void +gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) +{ + mpz_mod (Qk, Qk, n); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + mpz_mul (V, V, V); + mpz_submul_ui (V, Qk, 2); + mpz_tdiv_r (V, V, n); + /* Q^{2k} = (Q^k)^2 */ + mpz_mul (Qk, Qk, Qk); +} + +/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ +/* with P=1, Q=Q; k = (n>>b0)|1. */ +/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ +/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ +static int +gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, + mp_bitcnt_t b0, const mpz_t n) +{ + mp_bitcnt_t bs; + mpz_t U; + int res; + + assert (b0 > 0); + assert (Q <= - (LONG_MIN / 2)); + assert (Q >= - (LONG_MAX / 2)); + assert (mpz_cmp_ui (n, 4) > 0); + assert (mpz_odd_p (n)); + + mpz_init_set_ui (U, 1); /* U1 = 1 */ + mpz_set_ui (V, 1); /* V1 = 1 */ + mpz_set_si (Qk, Q); + + for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) + { + /* U_{2k} <- U_k * V_k */ + mpz_mul (U, U, V); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + /* A step k->k+1 is performed if the bit in $n$ is 1 */ + /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ + /* should be 1 in $n+1$ (bs == b0) */ + if (b0 == bs || mpz_tstbit (n, bs)) + { + /* Q^{k+1} <- Q^k * Q */ + mpz_mul_si (Qk, Qk, Q); + /* U_{k+1} <- (U_k + V_k) / 2 */ + mpz_swap (U, V); /* Keep in V the old value of U_k */ + mpz_add (U, U, V); + /* We have to compute U/2, so we need an even value, */ + /* equivalent (mod n) */ + if (mpz_odd_p (U)) + mpz_add (U, U, n); + mpz_tdiv_q_2exp (U, U, 1); + /* V_{k+1} <-(D*U_k + V_k) / 2 = + U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ + mpz_mul_si (V, V, -2*Q); + mpz_add (V, U, V); + mpz_tdiv_r (V, V, n); + } + mpz_tdiv_r (U, U, n); + } + + res = U->_mp_size == 0; + mpz_clear (U); + return res; +} + +/* Performs strong Lucas' test on x, with parameters suggested */ +/* for the BPSW test. Qk is only passed to recycle a variable. */ +/* Requires GCD (x,6) = 1.*/ +static int +gmp_stronglucas (const mpz_t x, mpz_t Qk) +{ + mp_bitcnt_t b0; + mpz_t V, n; + mp_limb_t maxD, D; /* The absolute value is stored. */ + long Q; + mp_limb_t tl; + + /* Test on the absolute value. */ + mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); + + assert (mpz_odd_p (n)); + /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ + if (mpz_root (Qk, n, 2)) + return 0; /* A square is composite. */ + + /* Check Ds up to square root (in case, n is prime) + or avoid overflows */ + maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; + + D = 3; + /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ + /* For those Ds we have (D/n) = (n/|D|) */ + do + { + if (D >= maxD) + return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ + D += 2; + tl = mpz_tdiv_ui (n, D); + if (tl == 0) + return 0; + } + while (gmp_jacobi_coprime (tl, D) == 1); + + mpz_init (V); + + /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ + b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); + /* b0 = mpz_scan0 (n, 0); */ + + /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ + Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); + + if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ + while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ + /* V <- V ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + mpz_clear (V); + return (b0 != 0); +} + +static int +gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, + const mpz_t q, mp_bitcnt_t k) +{ + assert (k > 0); + + /* Caller must initialize y to the base. */ + mpz_powm (y, y, q, n); + + if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) + return 1; + + while (--k > 0) + { + mpz_powm_ui (y, y, 2, n); + if (mpz_cmp (y, nm1) == 0) + return 1; + } + return 0; +} + +/* This product is 0xc0cfd797, and fits in 32 bits. */ +#define GMP_PRIME_PRODUCT \ + (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) + +/* Bit (p+1)/2 is set, for each odd prime <= 61 */ +#define GMP_PRIME_MASK 0xc96996dcUL + +int +mpz_probab_prime_p (const mpz_t n, int reps) +{ + mpz_t nm1; + mpz_t q; + mpz_t y; + mp_bitcnt_t k; + int is_prime; + int j; + + /* Note that we use the absolute value of n only, for compatibility + with the real GMP. */ + if (mpz_even_p (n)) + return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; + + /* Above test excludes n == 0 */ + assert (n->_mp_size != 0); + + if (mpz_cmpabs_ui (n, 64) < 0) + return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; + + if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) + return 0; + + /* All prime factors are >= 31. */ + if (mpz_cmpabs_ui (n, 31*31) < 0) + return 2; + + mpz_init (nm1); + mpz_init (q); + + /* Find q and k, where q is odd and n = 1 + 2**k * q. */ + mpz_abs (nm1, n); + nm1->_mp_d[0] -= 1; + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + k = mpn_scan1 (nm1->_mp_d, 0); + mpz_tdiv_q_2exp (q, nm1, k); + + /* BPSW test */ + mpz_init_set_ui (y, 2); + is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); + reps -= 24; /* skip the first 24 repetitions */ + + /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = + j^2 + j + 41 using Euler's polynomial. We potentially stop early, + if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > + 30 (a[30] == 971 > 31*31 == 961). */ + + for (j = 0; is_prime & (j < reps); j++) + { + mpz_set_ui (y, (unsigned long) j*j+j+41); + if (mpz_cmp (y, nm1) >= 0) + { + /* Don't try any further bases. This "early" break does not affect + the result for any reasonable reps value (<=5000 was tested) */ + assert (j >= 30); + break; + } + is_prime = gmp_millerrabin (n, nm1, y, q, k); + } + mpz_clear (nm1); + mpz_clear (q); + mpz_clear (y); + + return is_prime; +} + + +/* Logical operations and bit manipulation. */ + +/* Numbers are treated as if represented in two's complement (and + infinitely sign extended). For a negative values we get the two's + complement from -x = ~x + 1, where ~ is bitwise complement. + Negation transforms + + xxxx10...0 + + into + + yyyy10...0 + + where yyyy is the bitwise complement of xxxx. So least significant + bits, up to and including the first one bit, are unchanged, and + the more significant bits are all complemented. + + To change a bit from zero to one in a negative number, subtract the + corresponding power of two from the absolute value. This can never + underflow. To change a bit from one to zero, add the corresponding + power of two, and this might overflow. E.g., if x = -001111, the + two's complement is 110001. Clearing the least significant bit, we + get two's complement 110000, and -010000. */ + +int +mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t limb_index; + unsigned shift; + mp_size_t ds; + mp_size_t dn; + mp_limb_t w; + int bit; + + ds = d->_mp_size; + dn = GMP_ABS (ds); + limb_index = bit_index / GMP_LIMB_BITS; + if (limb_index >= dn) + return ds < 0; + + shift = bit_index % GMP_LIMB_BITS; + w = d->_mp_d[limb_index]; + bit = (w >> shift) & 1; + + if (ds < 0) + { + /* d < 0. Check if any of the bits below is set: If so, our bit + must be complemented. */ + if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) + return bit ^ 1; + while (--limb_index >= 0) + if (d->_mp_d[limb_index] > 0) + return bit ^ 1; + } + return bit; +} + +static void +mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_limb_t bit; + mp_ptr dp; + + dn = GMP_ABS (d->_mp_size); + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + if (limb_index >= dn) + { + mp_size_t i; + /* The bit should be set outside of the end of the number. + We have to increase the size of the number. */ + dp = MPZ_REALLOC (d, limb_index + 1); + + dp[limb_index] = bit; + for (i = dn; i < limb_index; i++) + dp[i] = 0; + dn = limb_index + 1; + } + else + { + mp_limb_t cy; + + dp = d->_mp_d; + + cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); + if (cy > 0) + { + dp = MPZ_REALLOC (d, dn + 1); + dp[dn++] = cy; + } + } + + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +static void +mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_ptr dp; + mp_limb_t bit; + + dn = GMP_ABS (d->_mp_size); + dp = d->_mp_d; + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + assert (limb_index < dn); + + gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, + dn - limb_index, bit)); + dn = mpn_normalized_size (dp, dn); + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +void +mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (!mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_add_bit (d, bit_index); + else + mpz_abs_sub_bit (d, bit_index); + } +} + +void +mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); + } +} + +void +mpz_combit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); +} + +void +mpz_com (mpz_t r, const mpz_t u) +{ + mpz_add_ui (r, u, 1); + mpz_neg (r, r); +} + +void +mpz_and (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + r->_mp_size = 0; + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc & vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is positive, higher limbs don't matter. */ + rn = vx ? un : vn; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul & vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul & vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc | vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is negative, by sign extension higher limbs + don't matter. */ + rn = vx ? vn : un; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul | vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul | vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc ^ vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + rp = MPZ_REALLOC (r, un + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = (ul ^ vl ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = (ul ^ ux) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[un++] = rc; + else + un = mpn_normalized_size (rp, un); + + r->_mp_size = rx ? -un : un; +} + +static unsigned +gmp_popcount_limb (mp_limb_t x) +{ + unsigned c; + + /* Do 16 bits at a time, to avoid limb-sized constants. */ + int LOCAL_SHIFT_BITS = 16; + for (c = 0; x > 0;) + { + unsigned w = x - ((x >> 1) & 0x5555); + w = ((w >> 2) & 0x3333) + (w & 0x3333); + w = (w >> 4) + w; + w = ((w >> 8) & 0x000f) + (w & 0x000f); + c += w; + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) + x >>= LOCAL_SHIFT_BITS; + else + x = 0; + } + return c; +} + +mp_bitcnt_t +mpn_popcount (mp_srcptr p, mp_size_t n) +{ + mp_size_t i; + mp_bitcnt_t c; + + for (c = 0, i = 0; i < n; i++) + c += gmp_popcount_limb (p[i]); + + return c; +} + +mp_bitcnt_t +mpz_popcount (const mpz_t u) +{ + mp_size_t un; + + un = u->_mp_size; + + if (un < 0) + return ~(mp_bitcnt_t) 0; + + return mpn_popcount (u->_mp_d, un); +} + +mp_bitcnt_t +mpz_hamdist (const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_limb_t uc, vc, ul, vl, comp; + mp_srcptr up, vp; + mp_bitcnt_t c; + + un = u->_mp_size; + vn = v->_mp_size; + + if ( (un ^ vn) < 0) + return ~(mp_bitcnt_t) 0; + + comp = - (uc = vc = (un < 0)); + if (uc) + { + assert (vn < 0); + un = -un; + vn = -vn; + } + + up = u->_mp_d; + vp = v->_mp_d; + + if (un < vn) + MPN_SRCPTR_SWAP (up, un, vp, vn); + + for (i = 0, c = 0; i < vn; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + vl = (vp[i] ^ comp) + vc; + vc = vl < vc; + + c += gmp_popcount_limb (ul ^ vl); + } + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + c += gmp_popcount_limb (ul ^ comp); + } + + return c; +} + +mp_bitcnt_t +mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit + for u<0. Notice this test picks up any u==0 too. */ + if (i >= un) + return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); + + up = u->_mp_d; + ux = 0; + limb = up[i]; + + if (starting_bit != 0) + { + if (us < 0) + { + ux = mpn_zero_p (up, i); + limb = ~ limb + ux; + ux = - (mp_limb_t) (limb >= ux); + } + + /* Mask to 0 all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + } + + return mpn_common_scan (limb, i, up, un, ux); +} + +mp_bitcnt_t +mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + ux = - (mp_limb_t) (us >= 0); + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for + u<0. Notice this test picks up all cases of u==0 too. */ + if (i >= un) + return (ux ? starting_bit : ~(mp_bitcnt_t) 0); + + up = u->_mp_d; + limb = up[i] ^ ux; + + if (ux == 0) + limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ + + /* Mask all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + + return mpn_common_scan (limb, i, up, un, ux); +} + + +/* MPZ base conversion. */ + +size_t +mpz_sizeinbase (const mpz_t u, int base) +{ + mp_size_t un, tn; + mp_srcptr up; + mp_ptr tp; + mp_bitcnt_t bits; + struct gmp_div_inverse bi; + size_t ndigits; + + assert (base >= 2); + assert (base <= 62); + + un = GMP_ABS (u->_mp_size); + if (un == 0) + return 1; + + up = u->_mp_d; + + bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); + switch (base) + { + case 2: + return bits; + case 4: + return (bits + 1) / 2; + case 8: + return (bits + 2) / 3; + case 16: + return (bits + 3) / 4; + case 32: + return (bits + 4) / 5; + /* FIXME: Do something more clever for the common case of base + 10. */ + } + + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, up, un); + mpn_div_qr_1_invert (&bi, base); + + tn = un; + ndigits = 0; + do + { + ndigits++; + mpn_div_qr_1_preinv (tp, tp, tn, &bi); + tn -= (tp[tn-1] == 0); + } + while (tn > 0); + + gmp_free_limbs (tp, un); + return ndigits; +} + +char * +mpz_get_str (char *sp, int base, const mpz_t u) +{ + unsigned bits; + const char *digits; + mp_size_t un; + size_t i, sn, osn; + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + if (base > 1) + { + if (base <= 36) + digits = "0123456789abcdefghijklmnopqrstuvwxyz"; + else if (base > 62) + return NULL; + } + else if (base >= -1) + base = 10; + else + { + base = -base; + if (base > 36) + return NULL; + } + + sn = 1 + mpz_sizeinbase (u, base); + if (!sp) + { + osn = 1 + sn; + sp = (char *) gmp_alloc (osn); + } + else + osn = 0; + un = GMP_ABS (u->_mp_size); + + if (un == 0) + { + sp[0] = '0'; + sn = 1; + goto ret; + } + + i = 0; + + if (u->_mp_size < 0) + sp[i++] = '-'; + + bits = mpn_base_power_of_two_p (base); + + if (bits) + /* Not modified in this case. */ + sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); + else + { + struct mpn_base_info info; + mp_ptr tp; + + mpn_get_base_info (&info, base); + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, u->_mp_d, un); + + sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); + gmp_free_limbs (tp, un); + } + + for (; i < sn; i++) + sp[i] = digits[(unsigned char) sp[i]]; + +ret: + sp[sn] = '\0'; + if (osn && osn != sn + 1) + sp = (char*) gmp_realloc (sp, osn, sn + 1); + return sp; +} + +int +mpz_set_str (mpz_t r, const char *sp, int base) +{ + unsigned bits, value_of_a; + mp_size_t rn, alloc; + mp_ptr rp; + size_t dn, sn; + int sign; + unsigned char *dp; + + assert (base == 0 || (base >= 2 && base <= 62)); + + while (isspace( (unsigned char) *sp)) + sp++; + + sign = (*sp == '-'); + sp += sign; + + if (base == 0) + { + if (sp[0] == '0') + { + if (sp[1] == 'x' || sp[1] == 'X') + { + base = 16; + sp += 2; + } + else if (sp[1] == 'b' || sp[1] == 'B') + { + base = 2; + sp += 2; + } + else + base = 8; + } + else + base = 10; + } + + if (!*sp) + { + r->_mp_size = 0; + return -1; + } + sn = strlen(sp); + dp = (unsigned char *) gmp_alloc (sn); + + value_of_a = (base > 36) ? 36 : 10; + for (dn = 0; *sp; sp++) + { + unsigned digit; + + if (isspace ((unsigned char) *sp)) + continue; + else if (*sp >= '0' && *sp <= '9') + digit = *sp - '0'; + else if (*sp >= 'a' && *sp <= 'z') + digit = *sp - 'a' + value_of_a; + else if (*sp >= 'A' && *sp <= 'Z') + digit = *sp - 'A' + 10; + else + digit = base; /* fail */ + + if (digit >= (unsigned) base) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + + dp[dn++] = digit; + } + + if (!dn) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + bits = mpn_base_power_of_two_p (base); + + if (bits > 0) + { + alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_bits (rp, dp, dn, bits); + } + else + { + struct mpn_base_info info; + mpn_get_base_info (&info, base); + alloc = (dn + info.exp - 1) / info.exp; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_other (rp, dp, dn, base, &info); + /* Normalization, needed for all-zero input. */ + assert (rn > 0); + rn -= rp[rn-1] == 0; + } + assert (rn <= alloc); + gmp_free (dp, sn); + + r->_mp_size = sign ? - rn : rn; + + return 0; +} + +int +mpz_init_set_str (mpz_t r, const char *sp, int base) +{ + mpz_init (r); + return mpz_set_str (r, sp, base); +} + +size_t +mpz_out_str (FILE *stream, int base, const mpz_t x) +{ + char *str; + size_t len, n; + + str = mpz_get_str (NULL, base, x); + if (!str) + return 0; + len = strlen (str); + n = fwrite (str, 1, len, stream); + gmp_free (str, len + 1); + return n; +} + + +static int +gmp_detect_endian (void) +{ + static const int i = 2; + const unsigned char *p = (const unsigned char *) &i; + return 1 - *p; +} + +/* Import and export. Does not support nails. */ +void +mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, + size_t nails, const void *src) +{ + const unsigned char *p; + ptrdiff_t word_step; + mp_ptr rp; + mp_size_t rn; + + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes already copied to this limb (starting from + the low end). */ + size_t bytes; + /* The index where the limb should be stored, when completed. */ + mp_size_t i; + + if (nails != 0) + gmp_die ("mpz_import: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) src; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); + rp = MPZ_REALLOC (r, rn); + + for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) + { + size_t j; + for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) + { + limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); + if (bytes == sizeof(mp_limb_t)) + { + rp[i++] = limb; + bytes = 0; + limb = 0; + } + } + } + assert (i + (bytes > 0) == rn); + if (limb != 0) + rp[i++] = limb; + else + i = mpn_normalized_size (rp, i); + + r->_mp_size = i; +} + +void * +mpz_export (void *r, size_t *countp, int order, size_t size, int endian, + size_t nails, const mpz_t u) +{ + size_t count; + mp_size_t un; + + if (nails != 0) + gmp_die ("mpz_export: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + assert (size > 0 || u->_mp_size == 0); + + un = u->_mp_size; + count = 0; + if (un != 0) + { + size_t k; + unsigned char *p; + ptrdiff_t word_step; + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes left to do in this limb. */ + size_t bytes; + /* The index where the limb was read. */ + mp_size_t i; + + un = GMP_ABS (un); + + /* Count bytes in top limb. */ + limb = u->_mp_d[un-1]; + assert (limb != 0); + + k = (GMP_LIMB_BITS <= CHAR_BIT); + if (!k) + { + do { + int LOCAL_CHAR_BIT = CHAR_BIT; + k++; limb >>= LOCAL_CHAR_BIT; + } while (limb != 0); + } + /* else limb = 0; */ + + count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; + + if (!r) + r = gmp_alloc (count * size); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) r; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) + { + size_t j; + for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) + { + if (sizeof (mp_limb_t) == 1) + { + if (i < un) + *p = u->_mp_d[i++]; + else + *p = 0; + } + else + { + int LOCAL_CHAR_BIT = CHAR_BIT; + if (bytes == 0) + { + if (i < un) + limb = u->_mp_d[i++]; + bytes = sizeof (mp_limb_t); + } + *p = limb; + limb >>= LOCAL_CHAR_BIT; + bytes--; + } + } + } + assert (i == un); + assert (k == count); + } + + if (countp) + *countp = count; + + return r; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.h new file mode 100644 index 0000000000..f28cb360ce --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.h @@ -0,0 +1,311 @@ +/* mini-gmp, a minimalistic implementation of a GNU GMP subset. + +Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* About mini-gmp: This is a minimal implementation of a subset of the + GMP interface. It is intended for inclusion into applications which + have modest bignums needs, as a fallback when the real GMP library + is not installed. + + This file defines the public interface. */ + +#ifndef __MINI_GMP_H__ +#define __MINI_GMP_H__ + +/* For size_t */ +#include + +#if defined (__cplusplus) +extern "C" { +#endif + +void mp_set_memory_functions (void *(*) (size_t), + void *(*) (void *, size_t, size_t), + void (*) (void *, size_t)); + +void mp_get_memory_functions (void *(**) (size_t), + void *(**) (void *, size_t, size_t), + void (**) (void *, size_t)); + +#ifndef MINI_GMP_LIMB_TYPE +#define MINI_GMP_LIMB_TYPE long +#endif + +typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; +typedef long mp_size_t; +typedef unsigned long mp_bitcnt_t; + +typedef mp_limb_t *mp_ptr; +typedef const mp_limb_t *mp_srcptr; + +typedef struct +{ + int _mp_alloc; /* Number of *limbs* allocated and pointed + to by the _mp_d field. */ + int _mp_size; /* abs(_mp_size) is the number of limbs the + last field points to. If _mp_size is + negative this is a negative number. */ + mp_limb_t *_mp_d; /* Pointer to the limbs. */ +} __mpz_struct; + +typedef __mpz_struct mpz_t[1]; + +typedef __mpz_struct *mpz_ptr; +typedef const __mpz_struct *mpz_srcptr; + +extern const int mp_bits_per_limb; + +void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); +void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); +void mpn_zero (mp_ptr, mp_size_t); + +int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); +int mpn_zero_p (mp_srcptr, mp_size_t); + +mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); +void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); +int mpn_perfect_square_p (mp_srcptr, mp_size_t); +mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); +mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); + +mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); +mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); + +mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); +mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); + +void mpn_com (mp_ptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); + +mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); + +mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); +#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) + +size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); +mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); + +void mpz_init (mpz_t); +void mpz_init2 (mpz_t, mp_bitcnt_t); +void mpz_clear (mpz_t); + +#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) +#define mpz_even_p(z) (! mpz_odd_p (z)) + +int mpz_sgn (const mpz_t); +int mpz_cmp_si (const mpz_t, long); +int mpz_cmp_ui (const mpz_t, unsigned long); +int mpz_cmp (const mpz_t, const mpz_t); +int mpz_cmpabs_ui (const mpz_t, unsigned long); +int mpz_cmpabs (const mpz_t, const mpz_t); +int mpz_cmp_d (const mpz_t, double); +int mpz_cmpabs_d (const mpz_t, double); + +void mpz_abs (mpz_t, const mpz_t); +void mpz_neg (mpz_t, const mpz_t); +void mpz_swap (mpz_t, mpz_t); + +void mpz_add_ui (mpz_t, const mpz_t, unsigned long); +void mpz_add (mpz_t, const mpz_t, const mpz_t); +void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); +void mpz_sub (mpz_t, const mpz_t, const mpz_t); + +void mpz_mul_si (mpz_t, const mpz_t, long int); +void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_mul (mpz_t, const mpz_t, const mpz_t); +void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_addmul (mpz_t, const mpz_t, const mpz_t); +void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_submul (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); + +void mpz_mod (mpz_t, const mpz_t, const mpz_t); + +void mpz_divexact (mpz_t, const mpz_t, const mpz_t); + +int mpz_divisible_p (const mpz_t, const mpz_t); +int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); + +unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); + +unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); + +void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); + +int mpz_divisible_ui_p (const mpz_t, unsigned long); + +unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); +void mpz_gcd (mpz_t, const mpz_t, const mpz_t); +void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); +void mpz_lcm (mpz_t, const mpz_t, const mpz_t); +int mpz_invert (mpz_t, const mpz_t, const mpz_t); + +void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); +void mpz_sqrt (mpz_t, const mpz_t); +int mpz_perfect_square_p (const mpz_t); + +void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); +void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); +void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); + +void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); +int mpz_root (mpz_t, const mpz_t, unsigned long); + +void mpz_fac_ui (mpz_t, unsigned long); +void mpz_2fac_ui (mpz_t, unsigned long); +void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); +void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); + +int mpz_probab_prime_p (const mpz_t, int); + +int mpz_tstbit (const mpz_t, mp_bitcnt_t); +void mpz_setbit (mpz_t, mp_bitcnt_t); +void mpz_clrbit (mpz_t, mp_bitcnt_t); +void mpz_combit (mpz_t, mp_bitcnt_t); + +void mpz_com (mpz_t, const mpz_t); +void mpz_and (mpz_t, const mpz_t, const mpz_t); +void mpz_ior (mpz_t, const mpz_t, const mpz_t); +void mpz_xor (mpz_t, const mpz_t, const mpz_t); + +mp_bitcnt_t mpz_popcount (const mpz_t); +mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); +mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); +mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); + +int mpz_fits_slong_p (const mpz_t); +int mpz_fits_ulong_p (const mpz_t); +int mpz_fits_sint_p (const mpz_t); +int mpz_fits_uint_p (const mpz_t); +int mpz_fits_sshort_p (const mpz_t); +int mpz_fits_ushort_p (const mpz_t); +long int mpz_get_si (const mpz_t); +unsigned long int mpz_get_ui (const mpz_t); +double mpz_get_d (const mpz_t); +size_t mpz_size (const mpz_t); +mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); + +void mpz_realloc2 (mpz_t, mp_bitcnt_t); +mp_srcptr mpz_limbs_read (mpz_srcptr); +mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); +mp_ptr mpz_limbs_write (mpz_t, mp_size_t); +void mpz_limbs_finish (mpz_t, mp_size_t); +mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); + +#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} + +void mpz_set_si (mpz_t, signed long int); +void mpz_set_ui (mpz_t, unsigned long int); +void mpz_set (mpz_t, const mpz_t); +void mpz_set_d (mpz_t, double); + +void mpz_init_set_si (mpz_t, signed long int); +void mpz_init_set_ui (mpz_t, unsigned long int); +void mpz_init_set (mpz_t, const mpz_t); +void mpz_init_set_d (mpz_t, double); + +size_t mpz_sizeinbase (const mpz_t, int); +char *mpz_get_str (char *, int, const mpz_t); +int mpz_set_str (mpz_t, const char *, int); +int mpz_init_set_str (mpz_t, const char *, int); + +/* This long list taken from gmp.h. */ +/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, + defines EOF but not FILE. */ +#if defined (FILE) \ + || defined (H_STDIO) \ + || defined (_H_STDIO) /* AIX */ \ + || defined (_STDIO_H) /* glibc, Sun, SCO */ \ + || defined (_STDIO_H_) /* BSD, OSF */ \ + || defined (__STDIO_H) /* Borland */ \ + || defined (__STDIO_H__) /* IRIX */ \ + || defined (_STDIO_INCLUDED) /* HPUX */ \ + || defined (__dj_include_stdio_h_) /* DJGPP */ \ + || defined (_FILE_DEFINED) /* Microsoft */ \ + || defined (__STDIO__) /* Apple MPW MrC */ \ + || defined (_MSL_STDIO_H) /* Metrowerks */ \ + || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ + || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ + || defined (__STDIO_LOADED) /* VMS */ \ + || defined (_STDIO) /* HPE NonStop */ \ + || defined (__DEFINED_FILE) /* musl */ +size_t mpz_out_str (FILE *, int, const mpz_t); +#endif + +void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); +void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); + +#if defined (__cplusplus) +} +#endif +#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.h new file mode 100644 index 0000000000..b3733b520d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.h @@ -0,0 +1,88 @@ +#ifndef MP_H +#define MP_H + +#include +#include +#include + +// Functions taken from the GF module + +void mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +digit_t mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords); +void multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void MUL(digit_t *out, const digit_t a, const digit_t b); + +// Functions taken from the EC module + +void mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +void select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords); +void swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords); +int mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords); +bool mp_is_zero(const digit_t *a, unsigned int nwords); +void mp_mul2(digit_t *c, const digit_t *a, const digit_t *b); + +// Further functions for multiprecision arithmetic +void mp_print(const digit_t *a, size_t nwords); +void mp_copy(digit_t *b, const digit_t *a, size_t nwords); +void mp_neg(digit_t *a, unsigned int nwords); +bool mp_is_one(const digit_t *x, unsigned int nwords); +void mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords); +void mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords); +void mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords); +void mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords); + +#define mp_is_odd(x, nwords) (((nwords) != 0) & (int)(x)[0]) +#define mp_is_even(x, nwords) (!mp_is_odd(x, nwords)) + +/********************** Constant-time unsigned comparisons ***********************/ + +// The following functions return 1 (TRUE) if condition is true, 0 (FALSE) otherwise +static inline unsigned int +is_digit_nonzero_ct(digit_t x) +{ // Is x != 0? + return (unsigned int)((x | (0 - x)) >> (RADIX - 1)); +} + +static inline unsigned int +is_digit_zero_ct(digit_t x) +{ // Is x = 0? + return (unsigned int)(1 ^ is_digit_nonzero_ct(x)); +} + +static inline unsigned int +is_digit_lessthan_ct(digit_t x, digit_t y) +{ // Is x < y? + return (unsigned int)((x ^ ((x ^ y) | ((x - y) ^ y))) >> (RADIX - 1)); +} + +/********************** Platform-independent macros for digit-size operations + * **********************/ + +// Digit addition with carry +#define ADDC(sumOut, carryOut, addend1, addend2, carryIn) \ + { \ + digit_t tempReg = (addend1) + (digit_t)(carryIn); \ + (sumOut) = (addend2) + tempReg; \ + (carryOut) = (is_digit_lessthan_ct(tempReg, (digit_t)(carryIn)) | is_digit_lessthan_ct((sumOut), tempReg)); \ + } + +// Digit subtraction with borrow +#define SUBC(differenceOut, borrowOut, minuend, subtrahend, borrowIn) \ + { \ + digit_t tempReg = (minuend) - (subtrahend); \ + unsigned int borrowReg = \ + (is_digit_lessthan_ct((minuend), (subtrahend)) | ((borrowIn) & is_digit_zero_ct(tempReg))); \ + (differenceOut) = tempReg - (digit_t)(borrowIn); \ + (borrowOut) = borrowReg; \ + } + +// Shift right with flexible datatype +#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << (DigitSize - (shift))); + +// Digit shift left +#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((highIn) << (shift)) ^ ((lowIn) >> (RADIX - (shift))); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion.h new file mode 100644 index 0000000000..a567657464 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion.h @@ -0,0 +1,708 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for quaternion algebra operations + */ + +#ifndef QUATERNION_H +#define QUATERNION_H + +// #include +#include +#include "intbig.h" +#include + +/** @defgroup quat_quat Quaternion algebra + * @{ + */ + +/** @defgroup quat_vec_t Types for integer vectors and matrices + * @{ + */ + +/** @brief Type for vector of 2 big integers + * + * @typedef ibz_vec_2_t + */ +typedef ibz_t ibz_vec_2_t[2]; + +/** @brief Type for vectors of 4 integers + * + * @typedef ibz_vec_4_t + * + * Represented as a vector of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_vec_4_t[4]; + +/** @brief Type for 2 by 2 matrices of integers + * + * @typedef ibz_mat_2x2_t + * + * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_2x2_t[2][2]; + +/** @brief Type for 4 by 4 matrices of integers + * + * @typedef ibz_mat_4x4_t + * + * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_4x4_t[4][4]; +/** + * @} + */ + +/** @defgroup quat_quat_t Types for quaternion algebras + * @{ + */ + +/** @brief Type for quaternion algebras + * + * @typedef quat_alg_t + * + * @struct quat_alg + * + * The quaternion algebra ramified at p = 3 mod 4 and ∞. + */ +typedef struct quat_alg +{ + ibz_t p; ///< Prime number, must be = 3 mod 4. +} quat_alg_t; + +/** @brief Type for quaternion algebra elements + * + * @typedef quat_alg_elem_t + * + * @struct quat_alg_elem + * + * Represented as a array *coord* of 4 ibz_t integers and a common ibz_t denominator *denom*. + * + * The representation is not necessarily normalized, that is, gcd(denom, content(coord)) might not + * be 1. For getting a normalized representation, use the quat_alg_normalize function + * + * The elements are always represented in basis (1,i,j,ij) of the quaternion algebra, with i^2=-1 + * and j^2 = -p + */ +typedef struct quat_alg_elem +{ + ibz_t denom; ///< Denominator by which all coordinates are divided (big integer, must not be 0) + ibz_vec_4_t coord; ///< Numerators of the 4 coordinates of the quaternion algebra element in basis (1,i,j,ij) +} quat_alg_elem_t; + +/** @brief Type for lattices in dimension 4 + * + * @typedef quat_lattice_t + * + * @struct quat_lattice + * + * Represented as a rational (`frac`) times an integreal lattice (`basis`) + * + * The basis is such that its columns divided by its denominator are elements of + * the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + * + * All lattices must have full rank (4) + */ +typedef struct quat_lattice +{ + ibz_t denom; ///< Denominator by which the basis is divided (big integer, must not be 0) + ibz_mat_4x4_t basis; ///< Integer basis of the lattice (its columns divided by denom are + ///< algebra elements in the usual basis) +} quat_lattice_t; + +/** @brief Type for left ideals of maximal orders in quaternion algebras + * + * @typedef quat_left_ideal_t + * + * @struct quat_left_ideal + * + * The basis of the lattice representing it is such that its columns divided by its denominator are + * elements of the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + */ +typedef struct quat_left_ideal +{ + quat_lattice_t lattice; ///< lattice representing the ideal + ibz_t norm; ///< norm of the lattice + const quat_lattice_t *parent_order; ///< should be a maximal order +} quat_left_ideal_t; +/** @} + */ + +/** @brief Type for extremal maximal orders + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + * The basis of the order representing it is in hermite normal form, and its columns divid +ed by its denominator are elements of the quaternion algebra, represented in basis (1,z,t, +tz) where z^2 = -q, t^2 = -p. +*/ +typedef struct quat_p_extremal_maximal_order +{ + quat_lattice_t order; ///< the order represented as a lattice + quat_alg_elem_t z; ///< the element of small discriminant + quat_alg_elem_t t; ///< the element of norm p orthogonal to z + uint32_t q; ///< the absolute value of the square of z +} quat_p_extremal_maximal_order_t; + +/** @brief Type for represent integer parameters + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + */ +typedef struct quat_represent_integer_params +{ + int primality_test_iterations; ///< Primality test iterations + const quat_p_extremal_maximal_order_t *order; ///< The standard extremal maximal order + const quat_alg_t *algebra; ///< The quaternion algebra +} quat_represent_integer_params_t; + +/*************************** Functions *****************************/ + +/** @defgroup quat_c Constructors and Destructors + * @{ + */ +void quat_alg_init_set(quat_alg_t *alg, const ibz_t *p); +void quat_alg_finalize(quat_alg_t *alg); + +void quat_alg_elem_init(quat_alg_elem_t *elem); +void quat_alg_elem_finalize(quat_alg_elem_t *elem); + +void ibz_vec_2_init(ibz_vec_2_t *vec); +void ibz_vec_2_finalize(ibz_vec_2_t *vec); + +void ibz_vec_4_init(ibz_vec_4_t *vec); +void ibz_vec_4_finalize(ibz_vec_4_t *vec); + +void ibz_mat_2x2_init(ibz_mat_2x2_t *mat); +void ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat); + +void ibz_mat_4x4_init(ibz_mat_4x4_t *mat); +void ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat); + +void quat_lattice_init(quat_lattice_t *lat); +void quat_lattice_finalize(quat_lattice_t *lat); + +void quat_left_ideal_init(quat_left_ideal_t *lideal); +void quat_left_ideal_finalize(quat_left_ideal_t *lideal); +/** @} + */ + +/** @defgroup quat_printers Print functions for types from the quaternion module + * @{ + */ +void ibz_mat_2x2_print(const ibz_mat_2x2_t *mat); +void ibz_mat_4x4_print(const ibz_mat_4x4_t *mat); +void ibz_vec_2_print(const ibz_vec_2_t *vec); +void ibz_vec_4_print(const ibz_vec_4_t *vec); + +void quat_lattice_print(const quat_lattice_t *lat); +void quat_alg_print(const quat_alg_t *alg); +void quat_alg_elem_print(const quat_alg_elem_t *elem); +void quat_left_ideal_print(const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @defgroup quat_int Integer functions for quaternion algebra + * @{ + */ + +/** @defgroup quat_int_mat Integer matrix and vector functions + * @{ + */ + +/** @brief Copy matrix + * + * @param copy Output: Matrix into which copied will be copied + * @param copied + */ +void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied); + +/** + * @brief Inverse of 2x2 integer matrices modulo m + * + * @param inv Output matrix + * @param mat Input matrix + * @param m Integer modulo + * @return 1 if inverse exists 0 otherwise + */ +int ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m); + +/** @brief mat*vec in dimension 2 for integers + * + * @param res Output vector + * @param mat Input vector + * @param vec Input vector + */ +void ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, + const ibz_mat_4x4_t *mat); // dim4, lattice, test/dim4, ideal + +/** @brief transpose a 4x4 integer matrix + * + * @param transposed Output: is set to the transposition of mat + * @param mat Input matrix + */ +void ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat); + +/** @brief a*b for a,b integer 4x4 matrices + * + * Naive implementation + * + * @param res Output: A 4x4 integer matrix + * @param a + * @param b + */ +void ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b); + +/** @brief divides all values in matrix by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param mat + */ +int ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** + * @brief mat*vec + * + * + * @param res Output: coordinate vector + * @param mat Integer 4x4 matrix + * @param vec Integer vector (coordinate vector) + * + * Multiplies 4x4 integer matrix mat by a 4-integers column vector vec + */ +void ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec); + +/** + * @brief vec*mat + * + * + * @param res Output: coordinate vector. + * @param vec Integer vector (coordinate vector) + * @param mat Integer 4x4 matrix + * + * Multiplies 4x4 integer matrix mat by a 4-integers row vector vec (on the left) + */ +void ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @defgroup quat_integer Higher-level integer functions for quaternion algebra + * @{ + */ + +/** + * @brief Generates a random prime + * + * A number is accepted as prime if it passes a 30-round Miller-Rabin test. + * This function is fairly inefficient and mostly meant for tests. + * + * @returns 1 if a prime is found, 0 otherwise + * @param p Output: The prime (if found) + * @param is3mod4 If 1, the prime is required to be 3 mod 4, if 0 no congruence condition is imposed + * @param bitsize Maximal size of output prime + * @param probability_test_iterations Miller-Rabin iteartions for probabilistic primality testing in + * rejection sampling + */ +int ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations); + +/** + * @brief Find integers x and y such that x^2 + n*y^2 = p + * + * Uses Cornacchia's algorithm, should be used only for prime p + * + * @param x Output + * @param y Output + * @param n first parameter defining the equation + * @param p seond parameter defining the equation, must be prime + * @return 1 if success, 0 otherwise + */ +int ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p); + +/** @} + */ + +/** @defgroup quat_qf Quadratic form functions + * @{ + */ + +/** + * @brief Quadratic form evaluation + * + * qf and coord must be represented in the same basis. + * + * @param res Output: coordinate vector + * @param qf Quadratic form (4x4 integer matrix) + * @param coord Integer vector (coordinate vector) + */ +void quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord); +/** @} + */ + +/** @} + */ + +/** @defgroup quat_quat_f Quaternion algebra functions + * @{ + */ +/** + * @brief Copies an algebra element + * + * @param copy Output: The element into which another one is copied + * @param copied Source element copied into copy + */ +void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied); + +void quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg); + +/** @brief reduced norm of alg_elem x + * + * @param res_num Output: rational which will contain the numerator of the reduced norm of a + * @param res_denom Output: rational which will contain the denominator of the reduced norm of a (it + * is 1 if the norm is integer) + * @param x Algebra element whose norm is computed + * @param alg The quaternion algebra + */ +void quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *x, const quat_alg_t *alg); + +/** @brief Normalize representation of alg_elem x + * + * @param x Algebra element whose representation will be normalized + * + * Modification of x. + * Sets coord and denom of x so that gcd(denom, content(coord))=1 + * without changing the value of x = (coord0/denom, coord1/denom, coord2/denom, coord3/denom). + */ +void quat_alg_normalize(quat_alg_elem_t *x); + +/** + * @brief Standard involution in a quaternion algebra + * + * @param conj Output: image of x by standard involution of the quaternion algebra alg + * @param x element of alg whose image is searched + */ +void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x); + +/** + * @brief Given `x` ∈ `order`, factor it into its primitive and impritive parts + * + * Given `x` ∈ `order`, return a coordinate vector `primitive_x` and an integer `content` + * such that `x` = `content` · Λ `primitive_x`, where Λ is the basis of `order` + * and `x` / `content` is primitive in `order`. + * + * @param primitive_x Output: coordinates of a primitive element of `order` (in `order`'s basis) + * @param content Output: content of `x`'s coordinate vector in order's basis + * @param order order of `alg` + * @param x element of order, must be in `order` + */ +void quat_alg_make_primitive(ibz_vec_4_t *primitive_x, + ibz_t *content, + const quat_alg_elem_t *x, + const quat_lattice_t *order); + +// end quat_quat_f +/** @} + */ + +/** @defgroup quat_lat_f Lattice functions + * @{ + */ + +void quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2); + +/** + * @brief Test whether x ∈ lat. If so, compute its coordinates in lat's basis. + * + * @param coord Output: Set to the coordinates of x in lat. May be NULL. + * @param lat The lattice, not necessarily in HNF but full rank + * @param x An element of the quaternion algebra + * @return true if x ∈ lat + */ +int quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x); + +/** + * @brief Conjugate of a lattice with basis not in HNF + * + * @param conj Output: The lattice conjugate to lat. ATTENTION: is not under HNF + * @param lat Input lattice + */ +void quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat); + +/** + * @brief Multiply a lattice and an algebra element + * + * The element is multiplied to the right of the lattice + * + * @param prod Output: Lattice lat*elem + * @param lat Input lattice + * @param elem Algebra element + * @param alg The quaternion algebra + */ +void quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg); // ideal + +/** + * @brief Sample from the intersection of a lattice with a ball + * + * Sample a uniform non-zero vector of norm ≤ `radius` from the lattice. + * + * @param res Output: sampled quaternion from the lattice + * @param lattice Input lattice + * @param alg The quaternion algebra + * @param radius The ball radius (quaternion norm) + * @return 0 if an error occurred (ball too small or RNG error), 1 otherwise + */ +int quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius); + +// end quat_lat_f +/** @} + */ + +/** @defgroup quat_lideal_f Functions for left ideals + * @{ + */ + +/** @defgroup quat_lideal_c Creating left ideals + * @{ + */ + +/** + * @brief Left ideal of order, generated by x and N as order*x+order*N + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element. Must be non-zero + * @param N generating integer + * + * Creates the left ideal in order generated by the element x and the integer N. + * If x is not divisible (inside the order) by any integer divisor n>1 of N, + * then the norm of the output ideal is N. + * + */ +void quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg); + +/** @} + */ + +/** @defgroup quat_lideal_gen Generators of left ideals + * @{ + */ + +/** + * @brief Generator of 'lideal' + * + * @returns 1 if such a generator was found, 0 otherwise + * @param gen Output: non scalar generator of lideal + * @param lideal left ideal + * @param alg the quaternion algebra + * + * Ideal is generated by gen and the ideal's norm + * + * Bound has as default value QUATERNION_lideal_generator_search_bound + */ +int quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg); +/** @} + */ + +/** @defgroup quat_lideal_op Operations on left ideals + * @{ + */ + +/** + * @brief Copies an ideal + * + * @param copy Output: The ideal into which another one is copied + * @param copied Source ideal copied into copy. The parent order is not copied (only the pointer). + */ +void quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied); + +/** + * @brief Conjugate of a left ideal (not in HNF) + * + * @param conj Output: Ideal conjugate to lideal, with norm and parent order correctly set, but its + * lattice not in HNF + * @param new_parent_order Output: Will be set to the right order of lideal, and serve as parent + * order for conj (so must have at least the lifetime of conj) + * @param lideal input left ideal (of which conj will be the conjugate) + * @param alg the quaternion algebra + */ +void quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); + +/** + * @brief Intersection of two left ideals + * + * @param intersection Output: Left ideal which is the intersection of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_inter(quat_left_ideal_t *intersection, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief L2-reduce the basis of the left ideal, without considering its denominator + * + * This function reduce the basis of the lattice of the ideal, but it does completely ignore its + * denominator. So the outputs of this function must still e divided by the appropriate power of + * lideal.lattice.denom. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param reduced Output: Lattice defining the ideal, which has its basis in a lll-reduced form. + * Must be divided by lideal.lattice.denom before usage + * @param gram Output: Matrix of the quadratic form given by the norm on the basis of the reduced + * ideal, divided by the norm of the ideal + * @param lideal ideal whose basis will be reduced + * @param alg the quaternion algebra + */ +void quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // replaces lideal_lll + +/** + * @brief Multplies two ideals and L2-reduces the lattice of the result + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param prod Output: The product ideal with its lattice basis being L2-reduced + * @param gram Output: Gram matrix of the reduced norm (as quadratic but not bilinear form) on the + * basis of prod, divided by the norm of prod + * @param lideal1 Ideal at left in the product + * @param lideal2 Ideal at right in the product + * @param alg The quaternion algebra + */ +void quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Replaces an ideal by a smaller equivalent one of prime norm + * + * @returns 1 if the computation succeeded and 0 otherwise + * @param lideal In- and Output: Ideal to be replaced + * @param alg The quaternion algebra + * @param primality_num_iter number of repetition for primality testing + * @param equiv_bound_coeff bound on the coefficients for the candidates + */ +int quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff); + +/** @} + */ + +// end quat_lideal_f +/** @} + */ + +/** @defgroup quat_normeq Functions specific to special extremal maximal orders + * @{ + */ + +/** + * @brief Representing an integer by the quadratic norm form of a maximal extremal order + * + * @returns 1 if the computation succeeded + * @param gamma Output: a quaternion element + * @param n_gamma Target norm of gamma. n_gamma must be odd. If n_gamma/(p*params.order->q) < + * 2^QUAT_repres_bound_input failure is likely + * @param non_diag If set to 1 (instead of 0) and the order is O0, an additional property is ensured + * @param params Represent integer parameters specifying the algebra, the special extremal order, + * the number of trials for finding gamma and the number of iterations of the primality test. + * Special requirements apply if non-diag is set to 1 + * + * This algorithm finds a primitive quaternion element gamma of n_gamma inside any maximal extremal + * order. Failure is possible. Most efficient for the standard order. + * + * If non-diag is set to 1,this algorithm finds a primitive quaternion element gamma with some + * special properties used in fixed degree isogeny of n_gamma inside any maximal extremal order such + * that params->order->q=1 mod 4. Failure is possible. Most efficient for the standard order. The + * most important property is to avoid diagonal isogenies, meaning that the gamma returned by the + * algorithm must not be contained inside ZZ + 2 O where O is the maximal order params->order When O + * is the special order O0 corresponding to j=1728, we further need to avoid endomorphisms of E0xE0 + * and there is another requirement + * + * If non-diag is set to 1, the number of trials for finding gamma (in params), the number of + * iterations of the primality test and the value of params->order->q is required to be 1 mod 4 + */ +int quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params); + +/** @brief Basis change to (1,i,(i+j)/2,(1+ij)/2) for elements of O0 + * + * Change the basis in which an element is give from 1,i,j,ij to (1,i,(i+j)/2,(1+ij)/2) the ususal + * basis of the special maximal order O0 Only for elements of O0 + * + * @param vec Output: Coordinates of el in basis (1,i,(i+j)/2,(1+ij)/2) + * @param el Imput: An algebra element in O0 + */ +void quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el); + +/** + * @brief Random O0-ideal of given norm + * + * Much faster if norm is prime and is_prime is set to 1 + * + * @param lideal Output: O0-ideal of norm norm + * @param norm Norm of the ideal to be found + * @param is_prime Indicates if norm is prime: 1 if it is, 0 otherwise + * @param params Represent Integer parameters from the level-dependent constants + * @param prime_cofactor Prime distinct from the prime p defining the algebra but of similar size + * and coprime to norm. If is_prime is 1, it might be NULL. + * @returns 1 if success, 0 if no ideal found or randomness failed + */ +int quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor); +// end quat_normeq +/** @} + */ +// end quat_quat +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_constants.h new file mode 100644 index 0000000000..a2f4b52b93 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_constants.h @@ -0,0 +1,6 @@ +#include +#define QUAT_primality_num_iter 32 +#define QUAT_repres_bound_input 21 +#define QUAT_equiv_bound_coeff 64 +#define FINDUV_box_size 3 +#define FINDUV_cube_size 2400 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c new file mode 100644 index 0000000000..98b792431a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c @@ -0,0 +1,3176 @@ +#include +#include +#include +const ibz_t QUAT_prime_cofactor = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x200}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x200000000000000}}} +#endif +; +const quat_alg_t QUATALG_PINFTY = { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x1af}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x1afffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x1afffffffffffff}}} +#endif +}; +const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 1}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2f6d,0xbfbd,0x6af0,0xbcd3,0x5c61,0x8f62,0x9b0b,0xd78a,0x3142,0x61aa,0x4716,0x208,0x93c7,0x43bd,0x97d6,0xda1a,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xd7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbfbd2f6d,0xbcd36af0,0x8f625c61,0xd78a9b0b,0x61aa3142,0x2084716,0x43bd93c7,0xda1a97d6,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xd7ffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbcd36af0bfbd2f6d,0xd78a9b0b8f625c61,0x208471661aa3142,0xda1a97d643bd93c7,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xd7ffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9add,0x156b,0x8705,0x6bb9,0x8bdf,0xd034,0x21a6,0xb827,0x44e9,0x34c7,0x3da3,0xa9fd,0xcebd,0x3ec0,0xcd63,0xca1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x156b9add,0x6bb98705,0xd0348bdf,0xb82721a6,0x34c744e9,0xa9fd3da3,0x3ec0cebd,0xca1cd63}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6bb98705156b9add,0xb82721a6d0348bdf,0xa9fd3da334c744e9,0xca1cd633ec0cebd}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 5}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1f45,0x5630,0xd526,0x9cc7,0x1aab,0x114d,0x87b3,0xbb27,0xc6b6,0xe50,0x8bb4,0x813f,0xff7a,0xf810,0xa8d3,0x66ee,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56301f45,0x9cc7d526,0x114d1aab,0xbb2787b3,0xe50c6b6,0x813f8bb4,0xf810ff7a,0x66eea8d3,0xfffffffc,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9cc7d52656301f45,0xbb2787b3114d1aab,0x813f8bb40e50c6b6,0x66eea8d3f810ff7a,0xfffffffffffffffc,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x233f,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38d9233f,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d9233f,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 37}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x3b03,0xe541,0x6454,0x6f9,0x3808,0xb93,0x7509,0x2b52,0xed1,0xf4fe,0x8961,0x4869,0x4671,0xdd21,0x4c4c,0x70b0,0xfff9,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe5413b03,0x6f96454,0xb933808,0x2b527509,0xf4fe0ed1,0x48698961,0xdd214671,0x70b04c4c,0xfffffff9,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6f96454e5413b03,0x2b5275090b933808,0x48698961f4fe0ed1,0x70b04c4cdd214671,0xfffffffffffffff9,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe953,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf5ace953,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace953,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 61}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x7013,0x423f,0x42b7,0x3f3d,0x82a,0x9883,0x52bf,0xfede,0x8018,0xa449,0xf571,0xb8a,0x3139,0xbe7,0x439d,0x9e1f,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x423f7013,0x3f3d42b7,0x9883082a,0xfede52bf,0xa4498018,0xb8af571,0xbe73139,0x9e1f439d,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0xd80000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3f3d42b7423f7013,0xfede52bf9883082a,0xb8af571a4498018,0x9e1f439d0be73139,0x2,0x0,0x0,0xd8000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca2d,0x34af,0xea29,0x177b,0x91ed,0x86ca,0x588a,0xe94d,0x55df,0x4621,0xa1e4,0x67d7,0xb617,0x6a1,0x88f5,0x87b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x34afca2d,0x177bea29,0x86ca91ed,0xe94d588a,0x462155df,0x67d7a1e4,0x6a1b617,0x87b88f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x177bea2934afca2d,0xe94d588a86ca91ed,0x67d7a1e4462155df,0x87b88f506a1b617}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 97}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8f1a,0x6fa2,0xe7d3,0x5101,0xf0c5,0x6c62,0xea1,0x3d18,0x4367,0xbd09,0xfc99,0x3a0e,0x35a4,0xf247,0x5fb2,0xc2a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6fa28f1a,0x5101e7d3,0x6c62f0c5,0x3d180ea1,0xbd094367,0x3a0efc99,0xf24735a4,0xc2a5fb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5101e7d36fa28f1a,0x3d180ea16c62f0c5,0x3a0efc99bd094367,0xc2a5fb2f24735a4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf03,0xa322,0x8523,0x93d,0x3bc,0x4b50,0x33ca,0x56b5,0x863f,0x87c8,0x38bf,0x98c6,0x8ce8,0xfab7,0xfc02,0x78ed}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa322cf03,0x93d8523,0x4b5003bc,0x56b533ca,0x87c8863f,0x98c638bf,0xfab78ce8,0x78edfc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x93d8523a322cf03,0x56b533ca4b5003bc,0x98c638bf87c8863f,0x78edfc02fab78ce8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x24ed,0x1400,0x74a1,0x1310,0xce8a,0x1c0d,0x512a,0x3500,0x2451,0x6992,0x892c,0x3cdb,0x45d8,0x520,0x420,0xf11f,0x6cb,0xbe4d,0xd06c,0xcbe4,0x4d06,0x6cbe,0xe4d0,0x6cb,0xbe4d,0xd06c,0xcbe4,0x4d06,0x6cbe,0xe4d0,0x6cb,0x15}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x140024ed,0x131074a1,0x1c0dce8a,0x3500512a,0x69922451,0x3cdb892c,0x52045d8,0xf11f0420,0xbe4d06cb,0xcbe4d06c,0x6cbe4d06,0x6cbe4d0,0xd06cbe4d,0x4d06cbe4,0xe4d06cbe,0x1506cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x131074a1140024ed,0x3500512a1c0dce8a,0x3cdb892c69922451,0xf11f0420052045d8,0xcbe4d06cbe4d06cb,0x6cbe4d06cbe4d06,0x4d06cbe4d06cbe4d,0x1506cbe4d06cbe}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8f1a,0x6fa2,0xe7d3,0x5101,0xf0c5,0x6c62,0xea1,0x3d18,0x4367,0xbd09,0xfc99,0x3a0e,0x35a4,0xf247,0x5fb2,0xc2a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6fa28f1a,0x5101e7d3,0x6c62f0c5,0x3d180ea1,0xbd094367,0x3a0efc99,0xf24735a4,0xc2a5fb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5101e7d36fa28f1a,0x3d180ea16c62f0c5,0x3a0efc99bd094367,0xc2a5fb2f24735a4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x98b3,0xd2e,0x314c,0x5199,0x7a5a,0xb592,0xbd65,0x1ef7,0x7d32,0x94fd,0x6cfe,0x68e3,0xcda6,0x8d91,0xfb73,0x88}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd2e98b3,0x5199314c,0xb5927a5a,0x1ef7bd65,0x94fd7d32,0x68e36cfe,0x8d91cda6,0x88fb73}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5199314c0d2e98b3,0x1ef7bd65b5927a5a,0x68e36cfe94fd7d32,0x88fb738d91cda6}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf03,0xa322,0x8523,0x93d,0x3bc,0x4b50,0x33ca,0x56b5,0x863f,0x87c8,0x38bf,0x98c6,0x8ce8,0xfab7,0xfc02,0x78ed}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa322cf03,0x93d8523,0x4b5003bc,0x56b533ca,0x87c8863f,0x98c638bf,0xfab78ce8,0x78edfc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x93d8523a322cf03,0x56b533ca4b5003bc,0x98c638bf87c8863f,0x78edfc02fab78ce8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 113}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x9c90,0x5de8,0xf815,0x67c5,0x989,0xc9,0x7c9e,0x180b,0x526d,0xdf5a,0x3386,0xea88,0x580a,0x24c5,0x5507,0x3bad,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x438}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x5de89c90,0x67c5f815,0xc90989,0x180b7c9e,0xdf5a526d,0xea883386,0x24c5580a,0x3bad5507,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x4380000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x67c5f8155de89c90,0x180b7c9e00c90989,0xea883386df5a526d,0x3bad550724c5580a,0x10,0x0,0x0,0x438000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa1f8,0x1530,0xa6be,0x126c,0xfd3b,0xbdd9,0xb3bc,0x8495,0x5457,0x1985,0xcfae,0xf440,0x4ea6,0x84ba,0x6881,0x2eb1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1530a1f8,0x126ca6be,0xbdd9fd3b,0x8495b3bc,0x19855457,0xf440cfae,0x84ba4ea6,0x2eb16881}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x126ca6be1530a1f8,0x8495b3bcbdd9fd3b,0xf440cfae19855457,0x2eb1688184ba4ea6}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 149}}; +const quat_left_ideal_t CONNECTING_IDEALS[7] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdb03,0x2777,0xbc36,0x4be5,0x38dd,0xd474,0x83b4,0x41a7,0x5426,0xa361,0x1f00,0xc617,0xe350,0x8cb4,0x2b1c,0xaa2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2777db03,0x4be5bc36,0xd47438dd,0x41a783b4,0xa3615426,0xc6171f00,0x8cb4e350,0xaa22b1c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4be5bc362777db03,0x41a783b4d47438dd,0xc6171f00a3615426,0xaa22b1c8cb4e350}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd9c7,0x9715,0x12ad,0x4a84,0xd0ee,0xb276,0x7344,0xf5a4,0xda41,0x2e90,0x1415,0xe548,0x3eb7,0x1d14,0x3d52,0x1a9f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9715d9c7,0x4a8412ad,0xb276d0ee,0xf5a47344,0x2e90da41,0xe5481415,0x1d143eb7,0x1a9f3d52}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4a8412ad9715d9c7,0xf5a47344b276d0ee,0xe54814152e90da41,0x1a9f3d521d143eb7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda65,0xdf46,0xe771,0xcb34,0x84e5,0xc375,0xfb7c,0x1ba5,0x1734,0xe8f9,0x998a,0x55af,0x9104,0x54e4,0xb437,0x12a0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf46da65,0xcb34e771,0xc37584e5,0x1ba5fb7c,0xe8f91734,0x55af998a,0x54e49104,0x12a0b437}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcb34e771df46da65,0x1ba5fb7cc37584e5,0x55af998ae8f91734,0x12a0b43754e49104}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e7d,0xd8b2,0x8be,0xf2e3,0x7c3e,0x1572,0x7609,0xf4ae,0x8366,0xb93e,0x53ec,0x9b03,0x6573,0xae18,0x41b0,0x707}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd8b26e7d,0xf2e308be,0x15727c3e,0xf4ae7609,0xb93e8366,0x9b0353ec,0xae186573,0x70741b0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf2e308bed8b26e7d,0xf4ae760915727c3e,0x9b0353ecb93e8366,0x70741b0ae186573}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1595,0x819b,0xe0c3,0x8b65,0xe55f,0x5790,0xb373,0x30e9,0xe798,0x6bc0,0x74b1,0xb6c5,0xa184,0xbb4c,0x3cca,0xcd7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x819b1595,0x8b65e0c3,0x5790e55f,0x30e9b373,0x6bc0e798,0xb6c574b1,0xbb4ca184,0xcd73cca}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8b65e0c3819b1595,0x30e9b3735790e55f,0xb6c574b16bc0e798,0xcd73ccabb4ca184}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc209,0x2d26,0x74c1,0x3f24,0xb0cf,0x3681,0x14be,0x92cc,0xb57f,0x127f,0x644f,0x28e4,0x837c,0xb4b2,0x3f3d,0x9ef}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d26c209,0x3f2474c1,0x3681b0cf,0x92cc14be,0x127fb57f,0x28e4644f,0xb4b2837c,0x9ef3f3d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3f2474c12d26c209,0x92cc14be3681b0cf,0x28e4644f127fb57f,0x9ef3f3db4b2837c}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9427,0xa69c,0xda24,0xb3a7,0x4f9a,0x22fc,0xa39a,0xcb05,0xd93e,0x923d,0xb97d,0xad95,0x3374,0x96bd,0xbdeb,0x51}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa69c9427,0xb3a7da24,0x22fc4f9a,0xcb05a39a,0x923dd93e,0xad95b97d,0x96bd3374,0x51bdeb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3a7da24a69c9427,0xcb05a39a22fc4f9a,0xad95b97d923dd93e,0x51bdeb96bd3374}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1f4f,0xcff8,0x8a18,0x405f,0xbfc2,0x4b46,0x2fab,0x911a,0x1385,0xe540,0x5687,0x7768,0x556f,0xbcad,0x9e99,0xdb7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcff81f4f,0x405f8a18,0x4b46bfc2,0x911a2fab,0xe5401385,0x77685687,0xbcad556f,0xdb79e99}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x405f8a18cff81f4f,0x911a2fab4b46bfc2,0x77685687e5401385,0xdb79e99bcad556f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x59bb,0xbb4a,0xb21e,0x7a03,0x87ae,0xb721,0xe9a2,0x2e0f,0xf662,0xbbbe,0x802,0x127f,0x4472,0xa9b5,0xae42,0x704}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbb4a59bb,0x7a03b21e,0xb72187ae,0x2e0fe9a2,0xbbbef662,0x127f0802,0xa9b54472,0x704ae42}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7a03b21ebb4a59bb,0x2e0fe9a2b72187ae,0x127f0802bbbef662,0x704ae42a9b54472}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa3e3,0x12fb,0x32f3,0xb40f,0x4bbe,0x537d,0xbefc,0xdda9,0x8954,0xaca9,0xaaf3,0xc020,0x17da,0xf48f,0x88fd,0x21a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x12fba3e3,0xb40f32f3,0x537d4bbe,0xdda9befc,0xaca98954,0xc020aaf3,0xf48f17da,0x21a88fd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb40f32f312fba3e3,0xdda9befc537d4bbe,0xc020aaf3aca98954,0x21a88fdf48f17da}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xb938,0xecc6,0xa73e,0x1f10,0xfb92,0xfc6b,0x4373,0x1c26,0x1cb,0x5c8f,0xe4f1,0xbf81,0xc0e7,0xd1f7,0x9e1a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb938d647,0xa73eecc6,0xfb921f10,0x4373fc6b,0x1cb1c26,0xe4f15c8f,0xc0e7bf81,0x9e1ad1f7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa73eecc6b938d647,0x4373fc6bfb921f10,0xe4f15c8f01cb1c26,0x9e1ad1f7c0e7bf81}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d15,0xe61a,0xfdc,0xada7,0xb567,0x2787,0xddb4,0x908e,0x52bd,0x573a,0x3c1,0x5289,0x6bae,0xdabb,0xad7a,0x501a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe61a3d15,0xada70fdc,0x2787b567,0x908eddb4,0x573a52bd,0x528903c1,0xdabb6bae,0x501aad7a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xada70fdce61a3d15,0x908eddb42787b567,0x528903c1573a52bd,0x501aad7adabb6bae}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc0eb,0xf94,0x78d,0x1b2f,0x47a5,0xcae4,0x9c58,0xc3f8,0x5cff,0xce65,0xc11c,0x8e58,0x387,0xc7ef,0x2f9f,0x12df}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94c0eb,0x1b2f078d,0xcae447a5,0xc3f89c58,0xce655cff,0x8e58c11c,0xc7ef0387,0x12df2f9f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b2f078d0f94c0eb,0xc3f89c58cae447a5,0x8e58c11cce655cff,0x12df2f9fc7ef0387}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9203,0x57ee,0x3867,0xdf50,0xd8ad,0xbe9c,0x9e30,0x7a77,0xcd0f,0x77d9,0xbb7f,0x65f1,0x1b16,0xbbf5,0xe5c0,0x2563}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57ee9203,0xdf503867,0xbe9cd8ad,0x7a779e30,0x77d9cd0f,0x65f1bb7f,0xbbf51b16,0x2563e5c0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf50386757ee9203,0x7a779e30be9cd8ad,0x65f1bb7f77d9cd0f,0x2563e5c0bbf51b16}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc883,0xbf3a,0x5485,0xa330,0xfbe1,0x5f72,0xc008,0xaa3b,0xa7aa,0x2aba,0x1e74,0xe83d,0x71aa,0x3276,0x2812,0xb15}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbf3ac883,0xa3305485,0x5f72fbe1,0xaa3bc008,0x2abaa7aa,0xe83d1e74,0x327671aa,0xb152812}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3305485bf3ac883,0xaa3bc0085f72fbe1,0xe83d1e742abaa7aa,0xb152812327671aa}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad43,0x8b94,0x4676,0xc140,0xea47,0x8f07,0xaf1c,0x1259,0x3a5d,0xd14a,0x6cf9,0xa717,0xc660,0x7735,0x86e9,0x183c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b94ad43,0xc1404676,0x8f07ea47,0x1259af1c,0xd14a3a5d,0xa7176cf9,0x7735c660,0x183c86e9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc14046768b94ad43,0x1259af1c8f07ea47,0xa7176cf9d14a3a5d,0x183c86e97735c660}}} +#endif +, &MAXORD_O0}}; +const quat_alg_elem_t CONJUGATING_ELEMENTS[7] = {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#endif +}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.h new file mode 100644 index 0000000000..a5eb1106e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.h @@ -0,0 +1,12 @@ +#include +#define MAXORD_O0 (EXTREMAL_ORDERS->order) +#define STANDARD_EXTREMAL_ORDER (EXTREMAL_ORDERS[0]) +#define NUM_ALTERNATE_EXTREMAL_ORDERS 6 +#define ALTERNATE_EXTREMAL_ORDERS (EXTREMAL_ORDERS+1) +#define ALTERNATE_CONNECTING_IDEALS (CONNECTING_IDEALS+1) +#define ALTERNATE_CONJUGATING_ELEMENTS (CONJUGATING_ELEMENTS+1) +extern const ibz_t QUAT_prime_cofactor; +extern const quat_alg_t QUATALG_PINFTY; +extern const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7]; +extern const quat_left_ideal_t CONNECTING_IDEALS[7]; +extern const quat_alg_elem_t CONJUGATING_ELEMENTS[7]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_arm64crypto.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_arm64crypto.h new file mode 100644 index 0000000000..88c4bf48d0 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_arm64crypto.h @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef RANDOMBYTES_ARM64CRYPTO_H +#define RANDOMBYTES_ARM64CRYPTO_H + +#include + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +typedef struct { + unsigned char buffer[16]; + int buffer_pos; + unsigned long length_remaining; + unsigned char key[32]; + unsigned char ctr[16]; +} AES_XOF_struct; + +typedef struct { + unsigned char Key[32]; + unsigned char V[16]; + int reseed_counter; +} AES256_CTR_DRBG_struct; + +#endif /* RANDOMBYTES_ARM64CRYPTO_H */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c new file mode 100644 index 0000000000..3fc67acfb6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 and Unknown +// +/* +NIST-developed software is provided by NIST as a public service. You may use, +copy, and distribute copies of the software in any medium, provided that you +keep intact this entire notice. You may improve, modify, and create derivative +works of the software or any portion of the software, and you may copy and +distribute such modifications or works. Modified works should carry a notice +stating that you changed the software and should note the date and nature of any +such change. Please explicitly acknowledge the National Institute of Standards +and Technology as the source of the software. + +NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF +ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, +WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS +NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR +ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE +ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, +INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR +USEFULNESS OF THE SOFTWARE. + +You are solely responsible for determining the appropriateness of using and +distributing the software and you assume all risks associated with its use, +including but not limited to the risks and costs of program errors, compliance +with applicable laws, damage to or loss of data, programs or equipment, and the +unavailability or interruption of operation. This software is not intended to be +used in any situation where a failure could cause risk of injury or damage to +property. The software developed by NIST employees is not subject to copyright +protection within the United States. +*/ + +#include + +#include +#include "ctr_drbg.h" + +#ifdef ENABLE_CT_TESTING +#include +#endif + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +CTR_DRBG_STATE drbg; + +#ifndef CTRDRBG_TEST_BENCH +static +#endif +void +randombytes_init_aes_ni(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + (void)security_strength; // fixed to 256 + CTR_DRBG_init(&drbg, entropy_input, personalization_string, + (personalization_string == NULL) ? 0 : CTR_DRBG_ENTROPY_LEN); +} + +#ifndef CTRDRBG_TEST_BENCH +static +#endif +int +randombytes_aes_ni(unsigned char *x, size_t xlen) { + CTR_DRBG_generate(&drbg, x, xlen, NULL, 0); + return RNG_SUCCESS; +} + +#ifdef RANDOMBYTES_AES_NI +SQISIGN_API +int randombytes(unsigned char *random_array, unsigned long long nbytes) { + int ret = randombytes_aes_ni(random_array, nbytes); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); +#endif + return ret; +} + +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + randombytes_init_aes_ni(entropy_input, personalization_string, + security_strength); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c new file mode 100644 index 0000000000..689c29b242 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: MIT + +/* +The MIT License +Copyright (c) 2017 Daan Sprenkels +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +// In the case that are compiling on linux, we need to define _GNU_SOURCE +// *before* randombytes.h is included. Otherwise SYS_getrandom will not be +// declared. +#if defined(__linux__) || defined(__GNU__) +#define _GNU_SOURCE +#endif /* defined(__linux__) || defined(__GNU__) */ + +#if defined(_WIN32) +/* Windows */ +#include +#include /* CryptAcquireContext, CryptGenRandom */ +#endif /* defined(_WIN32) */ + +/* wasi */ +#if defined(__wasi__) +#include +#endif + +/* kFreeBSD */ +#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) +#define GNU_KFREEBSD +#endif + +#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +/* Linux */ +// We would need to include , but not every target has access +// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. +// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the +// linux repo. +#define RNDGETENTCNT 0x80045200 + +#include +#include +#include +#include +#include +#include +#include +#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ + ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) +#define USE_GLIBC +#include +#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ + (__GLIBC_MINOR__ > 24)) */ +#include +#include +#include +#include + +// We need SSIZE_MAX as the maximum read len from /dev/urandom +#if !defined(SSIZE_MAX) +#define SSIZE_MAX (SIZE_MAX / 2 - 1) +#endif /* defined(SSIZE_MAX) */ + +#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ + +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ +#include +#if defined(BSD) +#include +#endif +/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ +#if defined(__GNU__) +#undef BSD +#endif +#endif + +#if defined(__EMSCRIPTEN__) +#include +#include +#include +#include +#endif /* defined(__EMSCRIPTEN__) */ + +#if defined(_WIN32) +static int +randombytes_win32_randombytes(void *buf, size_t n) +{ + HCRYPTPROV ctx; + BOOL tmp; + DWORD to_read = 0; + const size_t MAX_DWORD = 0xFFFFFFFF; + + tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); + if (tmp == FALSE) + return -1; + + while (n > 0) { + to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); + tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); + if (tmp == FALSE) + return -1; + buf = ((char *)buf) + to_read; + n -= to_read; + } + + tmp = CryptReleaseContext(ctx, 0); + if (tmp == FALSE) + return -1; + + return 0; +} +#endif /* defined(_WIN32) */ + +#if defined(__wasi__) +static int +randombytes_wasi_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(__wasi__) */ + +#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) +#if defined(USE_GLIBC) +// getrandom is declared in glibc. +#elif defined(SYS_getrandom) +static ssize_t +getrandom(void *buf, size_t buflen, unsigned int flags) +{ + return syscall(SYS_getrandom, buf, buflen, flags); +} +#endif + +static int +randombytes_linux_randombytes_getrandom(void *buf, size_t n) +{ + /* I have thought about using a separate PRF, seeded by getrandom, but + * it turns out that the performance of getrandom is good enough + * (250 MB/s on my laptop). + */ + size_t offset = 0, chunk; + int ret; + while (n > 0) { + /* getrandom does not allow chunks larger than 33554431 */ + chunk = n <= 33554431 ? n : 33554431; + do { + ret = getrandom((char *)buf + offset, chunk, 0); + } while (ret == -1 && errno == EINTR); + if (ret < 0) + return ret; + offset += ret; + n -= ret; + } + assert(n == 0); + return 0; +} +#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ + defined(SYS_getrandom)) */ + +#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) + +#if defined(__linux__) +static int +randombytes_linux_read_entropy_ioctl(int device, int *entropy) +{ + return ioctl(device, RNDGETENTCNT, entropy); +} + +static int +randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) +{ + int retcode; + do { + rewind(stream); + retcode = fscanf(stream, "%d", entropy); + } while (retcode != 1 && errno == EINTR); + if (retcode != 1) { + return -1; + } + return 0; +} + +static int +randombytes_linux_wait_for_entropy(int device) +{ + /* We will block on /dev/random, because any increase in the OS' entropy + * level will unblock the request. I use poll here (as does libsodium), + * because we don't *actually* want to read from the device. */ + enum + { + IOCTL, + PROC + } strategy = IOCTL; + const int bits = 128; + struct pollfd pfd; + int fd; + FILE *proc_file; + int retcode, retcode_error = 0; // Used as return codes throughout this function + int entropy = 0; + + /* If the device has enough entropy already, we will want to return early */ + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + // printf("errno: %d (%s)\n", errno, strerror(errno)); + if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { + // The ioctl call on /dev/urandom has failed due to a + // - ENOTTY (unsupported action), or + // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). + // + // We will fall back to reading from + // `/proc/sys/kernel/random/entropy_avail`. This less ideal, + // because it allocates a file descriptor, and it may not work + // in a chroot. But at this point it seems we have no better + // options left. + strategy = PROC; + // Open the entropy count file + proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); + if (proc_file == NULL) { + return -1; + } + } else if (retcode != 0) { + // Unrecoverable ioctl error + return -1; + } + if (entropy >= bits) { + return 0; + } + + do { + fd = open("/dev/random", O_RDONLY); + } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ + if (fd == -1) { + /* Unrecoverable IO error */ + return -1; + } + + pfd.fd = fd; + pfd.events = POLLIN; + for (;;) { + retcode = poll(&pfd, 1, -1); + if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { + continue; + } else if (retcode == 1) { + if (strategy == IOCTL) { + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + } else if (strategy == PROC) { + retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); + } else { + return -1; // Unreachable + } + + if (retcode != 0) { + // Unrecoverable I/O error + retcode_error = retcode; + break; + } + if (entropy >= bits) { + break; + } + } else { + // Unreachable: poll() should only return -1 or 1 + retcode_error = -1; + break; + } + } + do { + retcode = close(fd); + } while (retcode == -1 && errno == EINTR); + if (strategy == PROC) { + do { + retcode = fclose(proc_file); + } while (retcode == -1 && errno == EINTR); + } + if (retcode_error != 0) { + return retcode_error; + } + return retcode; +} +#endif /* defined(__linux__) */ + +static int +randombytes_linux_randombytes_urandom(void *buf, size_t n) +{ + int fd; + size_t offset = 0, count; + ssize_t tmp; + do { + fd = open("/dev/urandom", O_RDONLY); + } while (fd == -1 && errno == EINTR); + if (fd == -1) + return -1; +#if defined(__linux__) + if (randombytes_linux_wait_for_entropy(fd) == -1) + return -1; +#endif + + while (n > 0) { + count = n <= SSIZE_MAX ? n : SSIZE_MAX; + tmp = read(fd, (char *)buf + offset, count); + if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { + continue; + } + if (tmp == -1) + return -1; /* Unrecoverable IO error */ + offset += tmp; + n -= tmp; + } + close(fd); + assert(n == 0); + return 0; +} +#endif /* defined(__linux__) && !defined(SYS_getrandom) */ + +#if defined(BSD) +static int +randombytes_bsd_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(BSD) */ + +#if defined(__EMSCRIPTEN__) +static int +randombytes_js_randombytes_nodejs(void *buf, size_t n) +{ + const int ret = EM_ASM_INT( + { + var crypto; + try { + crypto = require('crypto'); + } catch (error) { + return -2; + } + try { + writeArrayToMemory(crypto.randomBytes($1), $0); + return 0; + } catch (error) { + return -1; + } + }, + buf, + n); + switch (ret) { + case 0: + return 0; + case -1: + errno = EINVAL; + return -1; + case -2: + errno = ENOSYS; + return -1; + } + assert(false); // Unreachable +} +#endif /* defined(__EMSCRIPTEN__) */ + +SQISIGN_API +int +randombytes_select(unsigned char *buf, unsigned long long n) +{ +#if defined(__EMSCRIPTEN__) + return randombytes_js_randombytes_nodejs(buf, n); +#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +#if defined(USE_GLIBC) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#elif defined(SYS_getrandom) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#else + /* When we have enough entropy, we can read from /dev/urandom */ + return randombytes_linux_randombytes_urandom(buf, n); +#endif +#elif defined(BSD) + /* Use arc4random system call */ + return randombytes_bsd_randombytes(buf, n); +#elif defined(_WIN32) + /* Use windows API */ + return randombytes_win32_randombytes(buf, n); +#elif defined(__wasi__) + /* Use WASI */ + return randombytes_wasi_randombytes(buf, n); +#else +#error "randombytes(...) is not supported on this platform" +#endif +} + +#ifdef RANDOMBYTES_SYSTEM +SQISIGN_API +int +randombytes(unsigned char *x, unsigned long long xlen) +{ + + int ret = randombytes_select(x, (size_t)xlen); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); +#endif + return ret; +} + +SQISIGN_API +void +randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) +{ + (void)entropy_input; + (void)personalization_string; + (void)security_strength; +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h new file mode 100644 index 0000000000..0a9ca0e465 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef rng_h +#define rng_h + +#include + +/** + * Randombytes initialization. + * Initialization may be needed for some random number generators (e.g. CTR-DRBG). + * + * @param[in] entropy_input 48 bytes entropy input + * @param[in] personalization_string Personalization string + * @param[in] security_strength Security string + */ +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength); + +/** + * Random byte generation using /dev/urandom. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes_select(unsigned char *x, unsigned long long xlen); + +/** + * Random byte generation. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes(unsigned char *x, unsigned long long xlen); + +#endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sig.h new file mode 100644 index 0000000000..4c33510084 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sig.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef SQISIGN_H +#define SQISIGN_H + +#include +#include + +#if defined(ENABLE_SIGN) +/** + * SQIsign keypair generation. + * + * The implementation corresponds to SQIsign.CompactKeyGen() in the SQIsign spec. + * The caller is responsible to allocate sufficient memory to hold pk and sk. + * + * @param[out] pk SQIsign public key + * @param[out] sk SQIsign secret key + * @return int status code + */ +SQISIGN_API +int sqisign_keypair(unsigned char *pk, unsigned char *sk); + +/** + * SQIsign signature generation. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] sm Signature concatenated with message + * @param[out] smlen Pointer to the length of sm + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); +#endif + +/** + * SQIsign open signature. + * + * The implementation performs SQIsign.verify(). If the signature verification succeeded, the + * original message is stored in m. Keys provided is a compact public key. The caller is responsible + * to allocate sufficient memory to hold m. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sm Signature concatenated with message + * @param[in] smlen Length of sm + * @param[in] pk Compacted public key + * @return int status code + */ +SQISIGN_API +int sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk); + +/** + * SQIsign verify signature. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sign.c new file mode 100644 index 0000000000..9216bbe4d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sign.c @@ -0,0 +1,634 @@ +#include +#include +#include +#include +#include +#include + +// compute the commitment with ideal to isogeny clapotis +// and apply it to the basis of E0 (together with the multiplication by some scalar u) +static bool +commit(ec_curve_t *E_com, ec_basis_t *basis_even_com, quat_left_ideal_t *lideal_com) +{ + + bool found = false; + + found = quat_sampling_random_ideal_O0_given_norm(lideal_com, &COM_DEGREE, 1, &QUAT_represent_integer_params, NULL); + // replacing it with a shorter prime norm equivalent ideal + found = found && quat_lideal_prime_norm_reduced_equivalent( + lideal_com, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + // ideal to isogeny clapotis + found = found && dim2id2iso_arbitrary_isogeny_evaluation(basis_even_com, E_com, lideal_com); + return found; +} + +static void +compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const signature_t *sig, const secret_key_t *sk) +{ + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge + // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the + // 2^TORSION_EVEN_POWER torsion of EA + ibz_set(&vec[0], 1); + ibz_copy_digit_array(&vec[1], sig->chall_coeff); + + // now we compute the ideal associated to the challenge + // for that, we need to find vec such that + // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // is the image through the secret key isogeny of the canonical basis E0 + ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); + + // lideal_chall_two is the pullback of the ideal challenge through the secret key ideal + id2iso_kernel_dlogs_to_ideal_even(lideal_chall_two, &vec, TORSION_EVEN_POWER); + assert(ibz_cmp(&lideal_chall_two->norm, &TORSION_PLUS_2POWER) == 0); + + ibz_vec_2_finalize(&vec); +} + +static void +sample_response(quat_alg_elem_t *x, const quat_lattice_t *lattice, const ibz_t *lattice_content) +{ + ibz_t bound; + ibz_init(&bound); + ibz_pow(&bound, &ibz_const_two, SQIsign_response_length); + ibz_sub(&bound, &bound, &ibz_const_one); + ibz_mul(&bound, &bound, lattice_content); + + int ok UNUSED = quat_lattice_sample_from_ball(x, lattice, &QUATALG_PINFTY, &bound); + assert(ok); + + ibz_finalize(&bound); +} + +static void +compute_response_quat_element(quat_alg_elem_t *resp_quat, + ibz_t *lattice_content, + const secret_key_t *sk, + const quat_left_ideal_t *lideal_chall_two, + const quat_left_ideal_t *lideal_commit) +{ + quat_left_ideal_t lideal_chall_secret; + quat_lattice_t lattice_hom_chall_to_com, lat_commit; + + // Init + quat_left_ideal_init(&lideal_chall_secret); + quat_lattice_init(&lat_commit); + quat_lattice_init(&lattice_hom_chall_to_com); + + // lideal_chall_secret = lideal_secret * lideal_chall_two + quat_lideal_inter(&lideal_chall_secret, lideal_chall_two, &(sk->secret_ideal), &QUATALG_PINFTY); + + // now we compute lideal_com_to_chall which is dual(Icom)* lideal_chall_secret + quat_lattice_conjugate_without_hnf(&lat_commit, &(lideal_commit->lattice)); + quat_lattice_intersect(&lattice_hom_chall_to_com, &lideal_chall_secret.lattice, &lat_commit); + + // sampling the smallest response + ibz_mul(lattice_content, &lideal_chall_secret.norm, &lideal_commit->norm); + sample_response(resp_quat, &lattice_hom_chall_to_com, lattice_content); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_secret); + quat_lattice_finalize(&lat_commit); + quat_lattice_finalize(&lattice_hom_chall_to_com); +} + +static void +compute_backtracking_signature(signature_t *sig, quat_alg_elem_t *resp_quat, ibz_t *lattice_content, ibz_t *remain) +{ + uint_fast8_t backtracking; + ibz_t tmp; + ibz_init(&tmp); + + ibz_vec_4_t dummy_coord; + ibz_vec_4_init(&dummy_coord); + + quat_alg_make_primitive(&dummy_coord, &tmp, resp_quat, &MAXORD_O0); + ibz_mul(&resp_quat->denom, &resp_quat->denom, &tmp); + assert(quat_lattice_contains(NULL, &MAXORD_O0, resp_quat)); + + // the backtracking is the common part of the response and the challenge + // its degree is the scalar tmp computed above such that quat_resp is in tmp * O0. + backtracking = ibz_two_adic(&tmp); + sig->backtracking = backtracking; + + ibz_pow(&tmp, &ibz_const_two, backtracking); + ibz_div(lattice_content, remain, lattice_content, &tmp); + + ibz_finalize(&tmp); + ibz_vec_4_finalize(&dummy_coord); +} + +static uint_fast8_t +compute_random_aux_norm_and_helpers(signature_t *sig, + ibz_t *random_aux_norm, + ibz_t *degree_resp_inv, + ibz_t *remain, + const ibz_t *lattice_content, + quat_alg_elem_t *resp_quat, + quat_left_ideal_t *lideal_com_resp, + quat_left_ideal_t *lideal_commit) +{ + uint_fast8_t pow_dim2_deg_resp; + uint_fast8_t exp_diadic_val_full_resp; + + ibz_t tmp, degree_full_resp, degree_odd_resp, norm_d; + + // Init + ibz_init(°ree_full_resp); + ibz_init(°ree_odd_resp); + ibz_init(&norm_d); + ibz_init(&tmp); + + quat_alg_norm(°ree_full_resp, &norm_d, resp_quat, &QUATALG_PINFTY); + + // dividing by n(lideal_com) * n(lideal_secret_chall) + assert(ibz_is_one(&norm_d)); + ibz_div(°ree_full_resp, remain, °ree_full_resp, lattice_content); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); + + // computing the diadic valuation + exp_diadic_val_full_resp = ibz_two_adic(°ree_full_resp); + sig->two_resp_length = exp_diadic_val_full_resp; + + // removing the power of two part + ibz_pow(&tmp, &ibz_const_two, exp_diadic_val_full_resp); + ibz_div(°ree_odd_resp, remain, °ree_full_resp, &tmp); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); +#ifndef NDEBUG + ibz_pow(&tmp, &ibz_const_two, SQIsign_response_length - sig->backtracking); + assert(ibz_cmp(&tmp, °ree_odd_resp) > 0); +#endif + + // creating the ideal + quat_alg_conj(resp_quat, resp_quat); + + // setting the norm + ibz_mul(&tmp, &lideal_commit->norm, °ree_odd_resp); + quat_lideal_create(lideal_com_resp, resp_quat, &tmp, &MAXORD_O0, &QUATALG_PINFTY); + + // now we compute the ideal_aux + // computing the norm + pow_dim2_deg_resp = SQIsign_response_length - exp_diadic_val_full_resp - sig->backtracking; + ibz_pow(remain, &ibz_const_two, pow_dim2_deg_resp); + ibz_sub(random_aux_norm, remain, °ree_odd_resp); + + // multiplying by 2^HD_extra_torsion to account for the fact that + // we use extra torsion above the kernel + for (int i = 0; i < HD_extra_torsion; i++) + ibz_mul(remain, remain, &ibz_const_two); + + ibz_invmod(degree_resp_inv, °ree_odd_resp, remain); + + ibz_finalize(°ree_full_resp); + ibz_finalize(°ree_odd_resp); + ibz_finalize(&norm_d); + ibz_finalize(&tmp); + + return pow_dim2_deg_resp; +} + +static int +evaluate_random_aux_isogeny_signature(ec_curve_t *E_aux, + ec_basis_t *B_aux, + const ibz_t *norm, + const quat_left_ideal_t *lideal_com_resp) +{ + quat_left_ideal_t lideal_aux; + quat_left_ideal_t lideal_aux_resp_com; + + // Init + quat_left_ideal_init(&lideal_aux); + quat_left_ideal_init(&lideal_aux_resp_com); + + // sampling the ideal at random + int found = quat_sampling_random_ideal_O0_given_norm( + &lideal_aux, norm, 0, &QUAT_represent_integer_params, &QUAT_prime_cofactor); + + if (found) { + // pushing forward + quat_lideal_inter(&lideal_aux_resp_com, lideal_com_resp, &lideal_aux, &QUATALG_PINFTY); + + // now we evaluate this isogeny on the basis of E0 + found = dim2id2iso_arbitrary_isogeny_evaluation(B_aux, E_aux, &lideal_aux_resp_com); + + // Clean up + quat_left_ideal_finalize(&lideal_aux_resp_com); + quat_left_ideal_finalize(&lideal_aux); + } + + return found; +} + +static int +compute_dim2_isogeny_challenge(theta_couple_curve_with_basis_t *codomain, + theta_couple_curve_with_basis_t *domain, + const ibz_t *degree_resp_inv, + int pow_dim2_deg_resp, + int exp_diadic_val_full_resp, + int reduced_order) +{ + // now, we compute the isogeny Phi : Ecom x Eaux -> Echl' x Eaux' + // where Echl' is 2^exp_diadic_val_full_resp isogenous to Echal + // ker Phi = <(Bcom_can.P,Baux.P),(Bcom_can.Q,Baux.Q)> + + // preparing the domain + theta_couple_curve_t EcomXEaux; + copy_curve(&EcomXEaux.E1, &domain->E1); + copy_curve(&EcomXEaux.E2, &domain->E2); + + // preparing the kernel + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &domain->B1, &domain->B2); + + // dividing by the degree of the response + digit_t scalar[NWORDS_ORDER]; + ibz_to_digit_array(scalar, degree_resp_inv); + ec_mul(&dim_two_ker.T1.P2, scalar, reduced_order, &dim_two_ker.T1.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T2.P2, scalar, reduced_order, &dim_two_ker.T2.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T1m2.P2, scalar, reduced_order, &dim_two_ker.T1m2.P2, &EcomXEaux.E2); + + // and multiplying by 2^exp_diadic... + double_couple_point_iter(&dim_two_ker.T1, exp_diadic_val_full_resp, &dim_two_ker.T1, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T2, exp_diadic_val_full_resp, &dim_two_ker.T2, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T1m2, exp_diadic_val_full_resp, &dim_two_ker.T1m2, &EcomXEaux); + + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const Tev1 = pushed_points + 0, *const Tev2 = pushed_points + 1, + *const Tev1m2 = pushed_points + 2; + + // Set points on the commitment curve + copy_point(&Tev1->P1, &domain->B1.P); + copy_point(&Tev2->P1, &domain->B1.Q); + copy_point(&Tev1m2->P1, &domain->B1.PmQ); + + // Zero points on the aux curve + ec_point_init(&Tev1->P2); + ec_point_init(&Tev2->P2); + ec_point_init(&Tev1m2->P2); + + theta_couple_curve_t codomain_product; + + // computation of the dim2 isogeny + if (!theta_chain_compute_and_eval_randomized(pow_dim2_deg_resp, + &EcomXEaux, + &dim_two_ker, + true, + &codomain_product, + pushed_points, + sizeof(pushed_points) / sizeof(*pushed_points))) + return 0; + + assert(test_couple_point_order_twof(Tev1, &codomain_product, reduced_order)); + + // Set the auxiliary curve + copy_curve(&codomain->E1, &codomain_product.E2); + + // Set the codomain curve from the dim 2 isogeny + // it should always be the first curve + copy_curve(&codomain->E2, &codomain_product.E1); + + // Set the evaluated basis points + copy_point(&codomain->B1.P, &Tev1->P2); + copy_point(&codomain->B1.Q, &Tev2->P2); + copy_point(&codomain->B1.PmQ, &Tev1m2->P2); + + copy_point(&codomain->B2.P, &Tev1->P1); + copy_point(&codomain->B2.Q, &Tev2->P1); + copy_point(&codomain->B2.PmQ, &Tev1m2->P1); + return 1; +} + +static int +compute_small_chain_isogeny_signature(ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2, + const quat_alg_elem_t *resp_quat, + int pow_dim2_deg_resp, + int length) +{ + int ret = 1; + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec_resp_two; + ibz_vec_2_init(&vec_resp_two); + + quat_left_ideal_t lideal_resp_two; + quat_left_ideal_init(&lideal_resp_two); + + // computing the ideal + ibz_pow(&two_pow, &ibz_const_two, length); + + // we compute the generator of the challenge ideal + quat_lideal_create(&lideal_resp_two, resp_quat, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + // computing the coefficients of the kernel in terms of the basis of O0 + id2iso_ideal_to_kernel_dlogs_even(&vec_resp_two, &lideal_resp_two); + + ec_point_t points[3]; + copy_point(&points[0], &B_chall_2->P); + copy_point(&points[1], &B_chall_2->Q); + copy_point(&points[2], &B_chall_2->PmQ); + + // getting down to the right order and applying the matrix + ec_dbl_iter_basis(B_chall_2, pow_dim2_deg_resp + HD_extra_torsion, B_chall_2, E_chall_2); + assert(test_basis_order_twof(B_chall_2, E_chall_2, length)); + + ec_point_t ker; + // applying the vector to find the kernel + ec_biscalar_mul_ibz_vec(&ker, &vec_resp_two, length, B_chall_2, E_chall_2); + assert(test_point_order_twof(&ker, E_chall_2, length)); + + // computing the isogeny and pushing the points + if (ec_eval_small_chain(E_chall_2, &ker, length, points, 3, true)) { + ret = 0; + } + + // copying the result + copy_point(&B_chall_2->P, &points[0]); + copy_point(&B_chall_2->Q, &points[1]); + copy_point(&B_chall_2->PmQ, &points[2]); + + ibz_finalize(&two_pow); + ibz_vec_2_finalize(&vec_resp_two); + quat_left_ideal_finalize(&lideal_resp_two); + + return ret; +} + +static int +compute_challenge_codomain_signature(const signature_t *sig, + secret_key_t *sk, + ec_curve_t *E_chall, + const ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2) +{ + ec_isog_even_t phi_chall; + ec_basis_t bas_sk; + copy_basis(&bas_sk, &sk->canonical_basis); + + phi_chall.curve = sk->curve; + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + assert(test_basis_order_twof(&bas_sk, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the kernel + { + ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_sk.P, &bas_sk.Q, &bas_sk.PmQ, &sk->curve); + } + assert(test_point_order_twof(&phi_chall.kernel, &sk->curve, TORSION_EVEN_POWER)); + + // Double kernel to get correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &sk->curve); + + assert(test_point_order_twof(&phi_chall.kernel, E_chall, phi_chall.length)); + + // Compute the codomain from challenge isogeny + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + +#ifndef NDEBUG + fp2_t j_chall, j_codomain; + ec_j_inv(&j_codomain, E_chall_2); + ec_j_inv(&j_chall, E_chall); + // apparently its always the second one curve + assert(fp2_is_equal(&j_chall, &j_codomain)); +#endif + + // applying the isomorphism from E_chall_2 to E_chall + ec_isom_t isom; + if (ec_isomorphism(&isom, E_chall_2, E_chall)) + return 0; // error due to a corner case with 1/p probability + ec_iso_eval(&B_chall_2->P, &isom); + ec_iso_eval(&B_chall_2->Q, &isom); + ec_iso_eval(&B_chall_2->PmQ, &isom); + + return 1; +} + +static void +set_aux_curve_signature(signature_t *sig, ec_curve_t *E_aux) +{ + ec_normalize_curve(E_aux); + fp2_copy(&sig->E_aux_A, &E_aux->A); +} + +static void +compute_and_set_basis_change_matrix(signature_t *sig, + const ec_basis_t *B_aux_2, + ec_basis_t *B_chall_2, + ec_curve_t *E_aux_2, + ec_curve_t *E_chall, + int f) +{ + // Matrices for change of bases matrices + ibz_mat_2x2_t mat_Baux2_to_Baux2_can, mat_Bchall_can_to_Bchall; + ibz_mat_2x2_init(&mat_Baux2_to_Baux2_can); + ibz_mat_2x2_init(&mat_Bchall_can_to_Bchall); + + // Compute canonical bases + ec_basis_t B_can_chall, B_aux_2_can; + sig->hint_chall = ec_curve_to_basis_2f_to_hint(&B_can_chall, E_chall, TORSION_EVEN_POWER); + sig->hint_aux = ec_curve_to_basis_2f_to_hint(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(B_aux_2, E_aux_2, f)); + fp2_t w0; + weil(&w0, f, &B_aux_2->P, &B_aux_2->Q, &B_aux_2->PmQ, E_aux_2); + } +#endif + + // compute the matrix to go from B_aux_2 to B_aux_2_can + change_of_basis_matrix_tate_invert(&mat_Baux2_to_Baux2_can, &B_aux_2_can, B_aux_2, E_aux_2, f); + + // apply the change of basis to B_chall_2 + matrix_application_even_basis(B_chall_2, E_chall, &mat_Baux2_to_Baux2_can, f); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_can_chall, E_chall, TORSION_EVEN_POWER)); + } +#endif + + // compute the matrix to go from B_chall_can to B_chall_2 + change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); + + // Assert all values in the matrix are of the expected size for packing + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + + // Set the basis change matrix to signature + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + + // Finalise the matrices + ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); + ibz_mat_2x2_finalize(&mat_Baux2_to_Baux2_can); +} + +int +protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l) +{ + int ret = 0; + int reduced_order = 0; // work around false positive gcc warning + + uint_fast8_t pow_dim2_deg_resp; + assert(SQIsign_response_length <= (intmax_t)UINT_FAST8_MAX); // otherwise we might need more bits there + + ibz_t remain, lattice_content, random_aux_norm, degree_resp_inv; + ibz_init(&remain); + ibz_init(&lattice_content); + ibz_init(&random_aux_norm); + ibz_init(°ree_resp_inv); + + quat_alg_elem_t resp_quat; + quat_alg_elem_init(&resp_quat); + + quat_left_ideal_t lideal_commit, lideal_com_resp; + quat_left_ideal_init(&lideal_commit); + quat_left_ideal_init(&lideal_com_resp); + + // This structure holds two curves E1 x E2 together with a basis + // Bi of E[2^n] for each of these curves + theta_couple_curve_with_basis_t Ecom_Eaux; + // This structure holds two curves E1 x E2 together with a basis + // Bi of Ei[2^n] + theta_couple_curve_with_basis_t Eaux2_Echall2; + + // This will hold the challenge curve + ec_curve_t E_chall = sk->curve; + + ec_curve_init(&Ecom_Eaux.E1); + ec_curve_init(&Ecom_Eaux.E2); + + while (!ret) { + + // computing the commitment + ret = commit(&Ecom_Eaux.E1, &Ecom_Eaux.B1, &lideal_commit); + + // start again if the commitment generation has failed + if (!ret) { + continue; + } + + // Hash the message to a kernel generator + // i.e. a scalar such that ker = P + [s]Q + hash_to_challenge(&sig->chall_coeff, pk, &Ecom_Eaux.E1, m, l); + // Compute the challenge ideal and response quaternion element + { + quat_left_ideal_t lideal_chall_two; + quat_left_ideal_init(&lideal_chall_two); + + // computing the challenge ideal + compute_challenge_ideal_signature(&lideal_chall_two, sig, sk); + compute_response_quat_element(&resp_quat, &lattice_content, sk, &lideal_chall_two, &lideal_commit); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_two); + } + + // computing the amount of backtracking we're making + // and removing it + compute_backtracking_signature(sig, &resp_quat, &lattice_content, &remain); + + // creating lideal_com * lideal_resp + // we first compute the norm of lideal_resp + // norm of the resp_quat + pow_dim2_deg_resp = compute_random_aux_norm_and_helpers(sig, + &random_aux_norm, + °ree_resp_inv, + &remain, + &lattice_content, + &resp_quat, + &lideal_com_resp, + &lideal_commit); + + // notational conventions: + // B0 = canonical basis of E0 + // B_com = image through commitment isogeny (odd degree) of canonical basis of E0 + // B_aux = image through aux_resp_com isogeny (odd degree) of canonical basis of E0 + + if (pow_dim2_deg_resp > 0) { + // Evaluate the random aux ideal on the curve E0 and its basis to find E_aux and B_aux + ret = + evaluate_random_aux_isogeny_signature(&Ecom_Eaux.E2, &Ecom_Eaux.B2, &random_aux_norm, &lideal_com_resp); + + // auxiliary isogeny computation failed we must start again + if (!ret) { + continue; + } + +#ifndef NDEBUG + // testing that the order of the points in the bases is as expected + assert(test_basis_order_twof(&Ecom_Eaux.B1, &Ecom_Eaux.E1, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(&Ecom_Eaux.B2, &Ecom_Eaux.E2, TORSION_EVEN_POWER)); +#endif + + // applying the matrix to compute Baux + // first, we reduce to the relevant order + reduced_order = pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length; + ec_dbl_iter_basis(&Ecom_Eaux.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Ecom_Eaux.B2, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B2, &Ecom_Eaux.E2); + + // Given all the above data, compute a dim two isogeny with domain + // E_com x E_aux + // and codomain + // E_aux_2 x E_chall_2 (note: E_chall_2 is isomorphic to E_chall) + // and evaluated points stored as bases in + // B_aux_2 on E_aux_2 + // B_chall_2 on E_chall_2 + ret = compute_dim2_isogeny_challenge( + &Eaux2_Echall2, &Ecom_Eaux, °ree_resp_inv, pow_dim2_deg_resp, sig->two_resp_length, reduced_order); + if (!ret) + continue; + } else { + // No 2d isogeny needed, so simulate a "Kani matrix" identity here + copy_curve(&Eaux2_Echall2.E1, &Ecom_Eaux.E1); + copy_curve(&Eaux2_Echall2.E2, &Ecom_Eaux.E1); + + reduced_order = sig->two_resp_length; + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + copy_basis(&Eaux2_Echall2.B2, &Eaux2_Echall2.B1); + } + + // computation of the remaining small chain of two isogenies when needed + if (sig->two_resp_length > 0) { + if (!compute_small_chain_isogeny_signature( + &Eaux2_Echall2.E2, &Eaux2_Echall2.B2, &resp_quat, pow_dim2_deg_resp, sig->two_resp_length)) { + assert(0); // this shouldn't fail + } + } + + // computation of the challenge codomain + if (!compute_challenge_codomain_signature(sig, sk, &E_chall, &Eaux2_Echall2.E2, &Eaux2_Echall2.B2)) + assert(0); // this shouldn't fail + } + + // Set to the signature the Montgomery A-coefficient of E_aux_2 + set_aux_curve_signature(sig, &Eaux2_Echall2.E1); + + // Set the basis change matrix from canonical bases to the supplied bases + compute_and_set_basis_change_matrix( + sig, &Eaux2_Echall2.B1, &Eaux2_Echall2.B2, &Eaux2_Echall2.E1, &E_chall, reduced_order); + + quat_alg_elem_finalize(&resp_quat); + quat_left_ideal_finalize(&lideal_commit); + quat_left_ideal_finalize(&lideal_com_resp); + + ibz_finalize(&lattice_content); + ibz_finalize(&remain); + ibz_finalize(°ree_resp_inv); + ibz_finalize(&random_aux_norm); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/signature.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/signature.h new file mode 100644 index 0000000000..ba38c360e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/signature.h @@ -0,0 +1,97 @@ +/** @file + * + * @brief The key generation and signature protocols + */ + +#ifndef SIGNATURE_H +#define SIGNATURE_H + +#include +#include +#include +#include + +/** @defgroup signature SQIsignHD key generation and signature protocols + * @{ + */ +/** @defgroup signature_t Types for SQIsignHD key generation and signature protocols + * @{ + */ + +/** @brief Type for the secret keys + * + * @typedef secret_key_t + * + * @struct secret_key + * + */ +typedef struct secret_key +{ + ec_curve_t curve; /// the public curve, but with little precomputations + quat_left_ideal_t secret_ideal; + ibz_mat_2x2_t mat_BAcan_to_BA0_two; // mat_BA0_to_BAcan*BA0 = BAcan, where BAcan is the + // canonical basis of EA[2^e], and BA0 the image of the + // basis of E0[2^e] through the secret isogeny + ec_basis_t canonical_basis; // the canonical basis of the public key curve +} secret_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void secret_key_init(secret_key_t *sk); +void secret_key_finalize(secret_key_t *sk); + +/** + * @brief Key generation + * + * @param pk Output: will contain the public key + * @param sk Output: will contain the secret key + * @returns 1 if success, 0 otherwise + */ +int protocols_keygen(public_key_t *pk, secret_key_t *sk); + +/** + * @brief Signature computation + * + * @param sig Output: will contain the signature + * @param sk secret key + * @param pk public key + * @param m message + * @param l size + * @returns 1 if success, 0 otherwise + */ +int protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a secret key as a byte array + * + * @param enc : Byte array to encode the secret key (including public key) in + * @param sk : Secret key to encode + * @param pk : Public key to encode + */ +void secret_key_to_bytes(unsigned char *enc, const secret_key_t *sk, const public_key_t *pk); + +/** + * @brief Decodes a secret key (and public key) from a byte array + * + * @param sk : Structure to decode the secret key in + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +void secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign.c new file mode 100644 index 0000000000..7335c38d9a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign.c @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#if defined(ENABLE_SIGN) +#include +#endif + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +sqisign_keypair(unsigned char *pk, unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + secret_key_init(&skt); + + ret = !protocols_keygen(&pkt, &skt); + + secret_key_to_bytes(sk, &skt, &pkt); + public_key_to_bytes(pk, &pkt); + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + memmove(sm + SIGNATURE_BYTES, m, mlen); + + ret = !protocols_sign(&sigt, &pkt, &skt, sm + SIGNATURE_BYTES, mlen); + if (ret != 0) { + *smlen = 0; + goto err; + } + + signature_to_bytes(sm, &sigt); + *smlen = SIGNATURE_BYTES + mlen; + +err: + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + ret = !protocols_sign(&sigt, &pkt, &skt, m, mlen); + if (ret != 0) { + *slen = 0; + goto err; + } + + signature_to_bytes(s, &sigt); + *slen = SIGNATURE_BYTES; + +err: + secret_key_finalize(&skt); + return ret; +} +#endif + +SQISIGN_API +int +sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk) +{ + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sm); + + ret = !protocols_verify(&sigt, &pkt, sm + SIGNATURE_BYTES, smlen - SIGNATURE_BYTES); + + if (!ret) { + *mlen = smlen - SIGNATURE_BYTES; + memmove(m, sm + SIGNATURE_BYTES, *mlen); + } else { + *mlen = 0; + memset(m, 0, smlen - SIGNATURE_BYTES); + } + + return ret; +} + +SQISIGN_API +int +sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk) +{ + + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sig); + + ret = !protocols_verify(&sigt, &pkt, m, mlen); + + return ret; +} + +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk) +{ + return sqisign_verify(m, mlen, sig, siglen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h new file mode 100644 index 0000000000..007d2572b9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h @@ -0,0 +1,1071 @@ + +#ifndef SQISIGN_NAMESPACE_H +#define SQISIGN_NAMESPACE_H + +//#define DISABLE_NAMESPACING + +#if defined(_WIN32) +#define SQISIGN_API __declspec(dllexport) +#else +#define SQISIGN_API __attribute__((visibility("default"))) +#endif + +#define PARAM_JOIN3_(a, b, c) sqisign_##a##_##b##_##c +#define PARAM_JOIN3(a, b, c) PARAM_JOIN3_(a, b, c) +#define PARAM_NAME3(end, s) PARAM_JOIN3(SQISIGN_VARIANT, end, s) + +#define PARAM_JOIN2_(a, b) sqisign_##a##_##b +#define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) +#define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) + +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + +#if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) +#if defined(SQISIGN_BUILD_TYPE_REF) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) +#elif defined(SQISIGN_BUILD_TYPE_OPT) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(opt, s) +#elif defined(SQISIGN_BUILD_TYPE_BROADWELL) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(broadwell, s) +#elif defined(SQISIGN_BUILD_TYPE_ARM64CRYPTO) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(arm64crypto, s) +#else +#error "Build type not known" +#endif + +#else +#define SQISIGN_NAMESPACE(s) s +#endif + +// Namespacing symbols exported from algebra.c: +#undef quat_alg_add +#undef quat_alg_conj +#undef quat_alg_coord_mul +#undef quat_alg_elem_copy +#undef quat_alg_elem_copy_ibz +#undef quat_alg_elem_equal +#undef quat_alg_elem_is_zero +#undef quat_alg_elem_mul_by_scalar +#undef quat_alg_elem_set +#undef quat_alg_equal_denom +#undef quat_alg_init_set_ui +#undef quat_alg_make_primitive +#undef quat_alg_mul +#undef quat_alg_norm +#undef quat_alg_normalize +#undef quat_alg_scalar +#undef quat_alg_sub + +#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) + +// Namespacing symbols exported from api.c: +#undef crypto_sign +#undef crypto_sign_keypair +#undef crypto_sign_open + +#define crypto_sign SQISIGN_NAMESPACE(crypto_sign) +#define crypto_sign_keypair SQISIGN_NAMESPACE(crypto_sign_keypair) +#define crypto_sign_open SQISIGN_NAMESPACE(crypto_sign_open) + +// Namespacing symbols exported from basis.c: +#undef ec_curve_to_basis_2f_from_hint +#undef ec_curve_to_basis_2f_to_hint +#undef ec_recover_y +#undef lift_basis +#undef lift_basis_normalized + +#define ec_curve_to_basis_2f_from_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_from_hint) +#define ec_curve_to_basis_2f_to_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_to_hint) +#define ec_recover_y SQISIGN_NAMESPACE(ec_recover_y) +#define lift_basis SQISIGN_NAMESPACE(lift_basis) +#define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) + +// Namespacing symbols exported from biextension.c: +#undef clear_cofac +#undef ec_dlog_2_tate +#undef ec_dlog_2_weil +#undef fp2_frob +#undef reduced_tate +#undef weil + +#define clear_cofac SQISIGN_NAMESPACE(clear_cofac) +#define ec_dlog_2_tate SQISIGN_NAMESPACE(ec_dlog_2_tate) +#define ec_dlog_2_weil SQISIGN_NAMESPACE(ec_dlog_2_weil) +#define fp2_frob SQISIGN_NAMESPACE(fp2_frob) +#define reduced_tate SQISIGN_NAMESPACE(reduced_tate) +#define weil SQISIGN_NAMESPACE(weil) + +// Namespacing symbols exported from common.c: +#undef hash_to_challenge +#undef public_key_finalize +#undef public_key_init + +#define hash_to_challenge SQISIGN_NAMESPACE(hash_to_challenge) +#define public_key_finalize SQISIGN_NAMESPACE(public_key_finalize) +#define public_key_init SQISIGN_NAMESPACE(public_key_init) + +// Namespacing symbols exported from dim2.c: +#undef ibz_2x2_mul_mod +#undef ibz_mat_2x2_add +#undef ibz_mat_2x2_copy +#undef ibz_mat_2x2_det_from_ibz +#undef ibz_mat_2x2_eval +#undef ibz_mat_2x2_inv_mod +#undef ibz_mat_2x2_set +#undef ibz_vec_2_set + +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) + +// Namespacing symbols exported from dim2id2iso.c: +#undef dim2id2iso_arbitrary_isogeny_evaluation +#undef dim2id2iso_ideal_to_isogeny_clapotis +#undef find_uv +#undef fixed_degree_isogeny_and_eval + +#define dim2id2iso_arbitrary_isogeny_evaluation SQISIGN_NAMESPACE(dim2id2iso_arbitrary_isogeny_evaluation) +#define dim2id2iso_ideal_to_isogeny_clapotis SQISIGN_NAMESPACE(dim2id2iso_ideal_to_isogeny_clapotis) +#define find_uv SQISIGN_NAMESPACE(find_uv) +#define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) + +// Namespacing symbols exported from dim4.c: +#undef ibz_inv_dim4_make_coeff_mpm +#undef ibz_inv_dim4_make_coeff_pmp +#undef ibz_mat_4x4_copy +#undef ibz_mat_4x4_equal +#undef ibz_mat_4x4_eval +#undef ibz_mat_4x4_eval_t +#undef ibz_mat_4x4_gcd +#undef ibz_mat_4x4_identity +#undef ibz_mat_4x4_inv_with_det_as_denom +#undef ibz_mat_4x4_is_identity +#undef ibz_mat_4x4_mul +#undef ibz_mat_4x4_negate +#undef ibz_mat_4x4_scalar_div +#undef ibz_mat_4x4_scalar_mul +#undef ibz_mat_4x4_transpose +#undef ibz_mat_4x4_zero +#undef ibz_vec_4_add +#undef ibz_vec_4_content +#undef ibz_vec_4_copy +#undef ibz_vec_4_copy_ibz +#undef ibz_vec_4_is_zero +#undef ibz_vec_4_linear_combination +#undef ibz_vec_4_negate +#undef ibz_vec_4_scalar_div +#undef ibz_vec_4_scalar_mul +#undef ibz_vec_4_set +#undef ibz_vec_4_sub +#undef quat_qf_eval + +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) + +// Namespacing symbols exported from ec.c: +#undef cswap_points +#undef ec_biscalar_mul +#undef ec_curve_init +#undef ec_curve_init_from_A +#undef ec_curve_normalize_A24 +#undef ec_curve_verify_A +#undef ec_dbl +#undef ec_dbl_iter +#undef ec_dbl_iter_basis +#undef ec_has_zero_coordinate +#undef ec_is_basis_four_torsion +#undef ec_is_equal +#undef ec_is_four_torsion +#undef ec_is_two_torsion +#undef ec_is_zero +#undef ec_j_inv +#undef ec_ladder3pt +#undef ec_mul +#undef ec_normalize_curve +#undef ec_normalize_curve_and_A24 +#undef ec_normalize_point +#undef ec_point_init +#undef select_point +#undef xADD +#undef xDBL +#undef xDBLADD +#undef xDBLMUL +#undef xDBL_A24 +#undef xDBL_E0 +#undef xMUL + +#define cswap_points SQISIGN_NAMESPACE(cswap_points) +#define ec_biscalar_mul SQISIGN_NAMESPACE(ec_biscalar_mul) +#define ec_curve_init SQISIGN_NAMESPACE(ec_curve_init) +#define ec_curve_init_from_A SQISIGN_NAMESPACE(ec_curve_init_from_A) +#define ec_curve_normalize_A24 SQISIGN_NAMESPACE(ec_curve_normalize_A24) +#define ec_curve_verify_A SQISIGN_NAMESPACE(ec_curve_verify_A) +#define ec_dbl SQISIGN_NAMESPACE(ec_dbl) +#define ec_dbl_iter SQISIGN_NAMESPACE(ec_dbl_iter) +#define ec_dbl_iter_basis SQISIGN_NAMESPACE(ec_dbl_iter_basis) +#define ec_has_zero_coordinate SQISIGN_NAMESPACE(ec_has_zero_coordinate) +#define ec_is_basis_four_torsion SQISIGN_NAMESPACE(ec_is_basis_four_torsion) +#define ec_is_equal SQISIGN_NAMESPACE(ec_is_equal) +#define ec_is_four_torsion SQISIGN_NAMESPACE(ec_is_four_torsion) +#define ec_is_two_torsion SQISIGN_NAMESPACE(ec_is_two_torsion) +#define ec_is_zero SQISIGN_NAMESPACE(ec_is_zero) +#define ec_j_inv SQISIGN_NAMESPACE(ec_j_inv) +#define ec_ladder3pt SQISIGN_NAMESPACE(ec_ladder3pt) +#define ec_mul SQISIGN_NAMESPACE(ec_mul) +#define ec_normalize_curve SQISIGN_NAMESPACE(ec_normalize_curve) +#define ec_normalize_curve_and_A24 SQISIGN_NAMESPACE(ec_normalize_curve_and_A24) +#define ec_normalize_point SQISIGN_NAMESPACE(ec_normalize_point) +#define ec_point_init SQISIGN_NAMESPACE(ec_point_init) +#define select_point SQISIGN_NAMESPACE(select_point) +#define xADD SQISIGN_NAMESPACE(xADD) +#define xDBL SQISIGN_NAMESPACE(xDBL) +#define xDBLADD SQISIGN_NAMESPACE(xDBLADD) +#define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) +#define xMUL SQISIGN_NAMESPACE(xMUL) + +// Namespacing symbols exported from ec_jac.c: +#undef ADD +#undef DBL +#undef DBLW +#undef copy_jac_point +#undef jac_from_ws +#undef jac_init +#undef jac_is_equal +#undef jac_neg +#undef jac_to_ws +#undef jac_to_xz +#undef jac_to_xz_add_components +#undef select_jac_point + +#define ADD SQISIGN_NAMESPACE(ADD) +#define DBL SQISIGN_NAMESPACE(DBL) +#define DBLW SQISIGN_NAMESPACE(DBLW) +#define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) +#define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) +#define jac_init SQISIGN_NAMESPACE(jac_init) +#define jac_is_equal SQISIGN_NAMESPACE(jac_is_equal) +#define jac_neg SQISIGN_NAMESPACE(jac_neg) +#define jac_to_ws SQISIGN_NAMESPACE(jac_to_ws) +#define jac_to_xz SQISIGN_NAMESPACE(jac_to_xz) +#define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) +#define select_jac_point SQISIGN_NAMESPACE(select_jac_point) + +// Namespacing symbols exported from encode_signature.c: +#undef secret_key_from_bytes +#undef secret_key_to_bytes + +#define secret_key_from_bytes SQISIGN_NAMESPACE(secret_key_from_bytes) +#define secret_key_to_bytes SQISIGN_NAMESPACE(secret_key_to_bytes) + +// Namespacing symbols exported from encode_verification.c: +#undef public_key_from_bytes +#undef public_key_to_bytes +#undef signature_from_bytes +#undef signature_to_bytes + +#define public_key_from_bytes SQISIGN_NAMESPACE(public_key_from_bytes) +#define public_key_to_bytes SQISIGN_NAMESPACE(public_key_to_bytes) +#define signature_from_bytes SQISIGN_NAMESPACE(signature_from_bytes) +#define signature_to_bytes SQISIGN_NAMESPACE(signature_to_bytes) + +// Namespacing symbols exported from finit.c: +#undef ibz_mat_2x2_finalize +#undef ibz_mat_2x2_init +#undef ibz_mat_4x4_finalize +#undef ibz_mat_4x4_init +#undef ibz_vec_2_finalize +#undef ibz_vec_2_init +#undef ibz_vec_4_finalize +#undef ibz_vec_4_init +#undef quat_alg_elem_finalize +#undef quat_alg_elem_init +#undef quat_alg_finalize +#undef quat_alg_init_set +#undef quat_lattice_finalize +#undef quat_lattice_init +#undef quat_left_ideal_finalize +#undef quat_left_ideal_init + +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) + +// Namespacing symbols exported from fp.c: +#undef fp_select +#undef p +#undef p2 + +#define fp_select SQISIGN_NAMESPACE(fp_select) +#define p SQISIGN_NAMESPACE(p) +#define p2 SQISIGN_NAMESPACE(p2) + +// Namespacing symbols exported from fp.c, fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_exp3div4 +#undef fp_inv +#undef fp_is_square +#undef fp_sqrt + +#define fp_exp3div4 SQISIGN_NAMESPACE(fp_exp3div4) +#define fp_inv SQISIGN_NAMESPACE(fp_inv) +#define fp_is_square SQISIGN_NAMESPACE(fp_is_square) +#define fp_sqrt SQISIGN_NAMESPACE(fp_sqrt) + +// Namespacing symbols exported from fp2.c: +#undef fp2_add +#undef fp2_add_one +#undef fp2_batched_inv +#undef fp2_copy +#undef fp2_cswap +#undef fp2_decode +#undef fp2_encode +#undef fp2_half +#undef fp2_inv +#undef fp2_is_equal +#undef fp2_is_one +#undef fp2_is_square +#undef fp2_is_zero +#undef fp2_mul +#undef fp2_mul_small +#undef fp2_neg +#undef fp2_pow_vartime +#undef fp2_print +#undef fp2_select +#undef fp2_set_one +#undef fp2_set_small +#undef fp2_set_zero +#undef fp2_sqr +#undef fp2_sqrt +#undef fp2_sqrt_verify +#undef fp2_sub + +#define fp2_add SQISIGN_NAMESPACE(fp2_add) +#define fp2_add_one SQISIGN_NAMESPACE(fp2_add_one) +#define fp2_batched_inv SQISIGN_NAMESPACE(fp2_batched_inv) +#define fp2_copy SQISIGN_NAMESPACE(fp2_copy) +#define fp2_cswap SQISIGN_NAMESPACE(fp2_cswap) +#define fp2_decode SQISIGN_NAMESPACE(fp2_decode) +#define fp2_encode SQISIGN_NAMESPACE(fp2_encode) +#define fp2_half SQISIGN_NAMESPACE(fp2_half) +#define fp2_inv SQISIGN_NAMESPACE(fp2_inv) +#define fp2_is_equal SQISIGN_NAMESPACE(fp2_is_equal) +#define fp2_is_one SQISIGN_NAMESPACE(fp2_is_one) +#define fp2_is_square SQISIGN_NAMESPACE(fp2_is_square) +#define fp2_is_zero SQISIGN_NAMESPACE(fp2_is_zero) +#define fp2_mul SQISIGN_NAMESPACE(fp2_mul) +#define fp2_mul_small SQISIGN_NAMESPACE(fp2_mul_small) +#define fp2_neg SQISIGN_NAMESPACE(fp2_neg) +#define fp2_pow_vartime SQISIGN_NAMESPACE(fp2_pow_vartime) +#define fp2_print SQISIGN_NAMESPACE(fp2_print) +#define fp2_select SQISIGN_NAMESPACE(fp2_select) +#define fp2_set_one SQISIGN_NAMESPACE(fp2_set_one) +#define fp2_set_small SQISIGN_NAMESPACE(fp2_set_small) +#define fp2_set_zero SQISIGN_NAMESPACE(fp2_set_zero) +#define fp2_sqr SQISIGN_NAMESPACE(fp2_sqr) +#define fp2_sqrt SQISIGN_NAMESPACE(fp2_sqrt) +#define fp2_sqrt_verify SQISIGN_NAMESPACE(fp2_sqrt_verify) +#define fp2_sub SQISIGN_NAMESPACE(fp2_sub) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_copy +#undef fp_cswap +#undef fp_decode +#undef fp_decode_reduce +#undef fp_div3 +#undef fp_encode +#undef fp_half +#undef fp_is_equal +#undef fp_is_zero +#undef fp_mul_small +#undef fp_neg +#undef fp_set_one +#undef fp_set_small +#undef fp_set_zero + +#define fp_copy SQISIGN_NAMESPACE(fp_copy) +#define fp_cswap SQISIGN_NAMESPACE(fp_cswap) +#define fp_decode SQISIGN_NAMESPACE(fp_decode) +#define fp_decode_reduce SQISIGN_NAMESPACE(fp_decode_reduce) +#define fp_div3 SQISIGN_NAMESPACE(fp_div3) +#define fp_encode SQISIGN_NAMESPACE(fp_encode) +#define fp_half SQISIGN_NAMESPACE(fp_half) +#define fp_is_equal SQISIGN_NAMESPACE(fp_is_equal) +#define fp_is_zero SQISIGN_NAMESPACE(fp_is_zero) +#define fp_mul_small SQISIGN_NAMESPACE(fp_mul_small) +#define fp_neg SQISIGN_NAMESPACE(fp_neg) +#define fp_set_one SQISIGN_NAMESPACE(fp_set_one) +#define fp_set_small SQISIGN_NAMESPACE(fp_set_small) +#define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef fp_add +#undef fp_mul +#undef fp_sqr +#undef fp_sub + +#define fp_add SQISIGN_NAMESPACE(fp_add) +#define fp_mul SQISIGN_NAMESPACE(fp_mul) +#define fp_sqr SQISIGN_NAMESPACE(fp_sqr) +#define fp_sub SQISIGN_NAMESPACE(fp_sub) + +// Namespacing symbols exported from gf27500.c: +#undef gf27500_decode +#undef gf27500_decode_reduce +#undef gf27500_div +#undef gf27500_div3 +#undef gf27500_encode +#undef gf27500_invert +#undef gf27500_legendre +#undef gf27500_sqrt + +#define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) +#define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) +#define gf27500_div SQISIGN_NAMESPACE(gf27500_div) +#define gf27500_div3 SQISIGN_NAMESPACE(gf27500_div3) +#define gf27500_encode SQISIGN_NAMESPACE(gf27500_encode) +#define gf27500_invert SQISIGN_NAMESPACE(gf27500_invert) +#define gf27500_legendre SQISIGN_NAMESPACE(gf27500_legendre) +#define gf27500_sqrt SQISIGN_NAMESPACE(gf27500_sqrt) + +// Namespacing symbols exported from gf27500.c, gf5248.c, gf65376.c: +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 + +#define fp2_mul_c0 SQISIGN_NAMESPACE(fp2_mul_c0) +#define fp2_mul_c1 SQISIGN_NAMESPACE(fp2_mul_c1) +#define fp2_sq_c0 SQISIGN_NAMESPACE(fp2_sq_c0) +#define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) + +// Namespacing symbols exported from gf5248.c: +#undef gf5248_decode +#undef gf5248_decode_reduce +#undef gf5248_div +#undef gf5248_div3 +#undef gf5248_encode +#undef gf5248_invert +#undef gf5248_legendre +#undef gf5248_sqrt + +#define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) +#define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) +#define gf5248_div SQISIGN_NAMESPACE(gf5248_div) +#define gf5248_div3 SQISIGN_NAMESPACE(gf5248_div3) +#define gf5248_encode SQISIGN_NAMESPACE(gf5248_encode) +#define gf5248_invert SQISIGN_NAMESPACE(gf5248_invert) +#define gf5248_legendre SQISIGN_NAMESPACE(gf5248_legendre) +#define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) + +// Namespacing symbols exported from gf65376.c: +#undef gf65376_decode +#undef gf65376_decode_reduce +#undef gf65376_div +#undef gf65376_div3 +#undef gf65376_encode +#undef gf65376_invert +#undef gf65376_legendre +#undef gf65376_sqrt + +#define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) +#define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) +#define gf65376_div SQISIGN_NAMESPACE(gf65376_div) +#define gf65376_div3 SQISIGN_NAMESPACE(gf65376_div3) +#define gf65376_encode SQISIGN_NAMESPACE(gf65376_encode) +#define gf65376_invert SQISIGN_NAMESPACE(gf65376_invert) +#define gf65376_legendre SQISIGN_NAMESPACE(gf65376_legendre) +#define gf65376_sqrt SQISIGN_NAMESPACE(gf65376_sqrt) + +// Namespacing symbols exported from hd.c: +#undef add_couple_jac_points +#undef copy_bases_to_kernel +#undef couple_jac_to_xz +#undef double_couple_jac_point +#undef double_couple_jac_point_iter +#undef double_couple_point +#undef double_couple_point_iter + +#define add_couple_jac_points SQISIGN_NAMESPACE(add_couple_jac_points) +#define copy_bases_to_kernel SQISIGN_NAMESPACE(copy_bases_to_kernel) +#define couple_jac_to_xz SQISIGN_NAMESPACE(couple_jac_to_xz) +#define double_couple_jac_point SQISIGN_NAMESPACE(double_couple_jac_point) +#define double_couple_jac_point_iter SQISIGN_NAMESPACE(double_couple_jac_point_iter) +#define double_couple_point SQISIGN_NAMESPACE(double_couple_point) +#define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) + +// Namespacing symbols exported from hnf.c: +#undef ibz_mat_4x4_is_hnf +#undef ibz_mat_4xn_hnf_mod_core +#undef ibz_vec_4_copy_mod +#undef ibz_vec_4_linear_combination_mod +#undef ibz_vec_4_scalar_mul_mod + +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) + +// Namespacing symbols exported from hnf_internal.c: +#undef ibz_centered_mod +#undef ibz_conditional_assign +#undef ibz_mod_not_zero +#undef ibz_xgcd_with_u_not_0 + +#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) + +// Namespacing symbols exported from ibz_division.c: +#undef ibz_xgcd + +#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) + +// Namespacing symbols exported from id2iso.c: +#undef change_of_basis_matrix_tate +#undef change_of_basis_matrix_tate_invert +#undef ec_biscalar_mul_ibz_vec +#undef endomorphism_application_even_basis +#undef id2iso_ideal_to_kernel_dlogs_even +#undef id2iso_kernel_dlogs_to_ideal_even +#undef matrix_application_even_basis + +#define change_of_basis_matrix_tate SQISIGN_NAMESPACE(change_of_basis_matrix_tate) +#define change_of_basis_matrix_tate_invert SQISIGN_NAMESPACE(change_of_basis_matrix_tate_invert) +#define ec_biscalar_mul_ibz_vec SQISIGN_NAMESPACE(ec_biscalar_mul_ibz_vec) +#define endomorphism_application_even_basis SQISIGN_NAMESPACE(endomorphism_application_even_basis) +#define id2iso_ideal_to_kernel_dlogs_even SQISIGN_NAMESPACE(id2iso_ideal_to_kernel_dlogs_even) +#define id2iso_kernel_dlogs_to_ideal_even SQISIGN_NAMESPACE(id2iso_kernel_dlogs_to_ideal_even) +#define matrix_application_even_basis SQISIGN_NAMESPACE(matrix_application_even_basis) + +// Namespacing symbols exported from ideal.c: +#undef quat_lideal_add +#undef quat_lideal_class_gram +#undef quat_lideal_conjugate_without_hnf +#undef quat_lideal_copy +#undef quat_lideal_create +#undef quat_lideal_create_principal +#undef quat_lideal_equals +#undef quat_lideal_generator +#undef quat_lideal_inter +#undef quat_lideal_inverse_lattice_without_hnf +#undef quat_lideal_mul +#undef quat_lideal_norm +#undef quat_lideal_right_order +#undef quat_lideal_right_transporter +#undef quat_order_discriminant +#undef quat_order_is_maximal + +#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) + +// Namespacing symbols exported from intbig.c: +#undef ibz_abs +#undef ibz_add +#undef ibz_bitsize +#undef ibz_cmp +#undef ibz_cmp_int32 +#undef ibz_convert_to_str +#undef ibz_copy +#undef ibz_copy_digits +#undef ibz_div +#undef ibz_div_2exp +#undef ibz_div_floor +#undef ibz_divides +#undef ibz_finalize +#undef ibz_gcd +#undef ibz_get +#undef ibz_init +#undef ibz_invmod +#undef ibz_is_even +#undef ibz_is_odd +#undef ibz_is_one +#undef ibz_is_zero +#undef ibz_legendre +#undef ibz_mod +#undef ibz_mod_ui +#undef ibz_mul +#undef ibz_neg +#undef ibz_pow +#undef ibz_pow_mod +#undef ibz_print +#undef ibz_probab_prime +#undef ibz_rand_interval +#undef ibz_rand_interval_bits +#undef ibz_rand_interval_i +#undef ibz_rand_interval_minm_m +#undef ibz_set +#undef ibz_set_from_str +#undef ibz_size_in_base +#undef ibz_sqrt +#undef ibz_sqrt_floor +#undef ibz_sqrt_mod_p +#undef ibz_sub +#undef ibz_swap +#undef ibz_to_digits +#undef ibz_two_adic + +#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) +#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) +#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) + +// Namespacing symbols exported from integers.c: +#undef ibz_cornacchia_prime +#undef ibz_generate_random_prime + +#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) + +// Namespacing symbols exported from isog_chains.c: +#undef ec_eval_even +#undef ec_eval_small_chain +#undef ec_iso_eval +#undef ec_isomorphism + +#define ec_eval_even SQISIGN_NAMESPACE(ec_eval_even) +#define ec_eval_small_chain SQISIGN_NAMESPACE(ec_eval_small_chain) +#define ec_iso_eval SQISIGN_NAMESPACE(ec_iso_eval) +#define ec_isomorphism SQISIGN_NAMESPACE(ec_isomorphism) + +// Namespacing symbols exported from keygen.c: +#undef protocols_keygen +#undef secret_key_finalize +#undef secret_key_init + +#define protocols_keygen SQISIGN_NAMESPACE(protocols_keygen) +#define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) +#define secret_key_init SQISIGN_NAMESPACE(secret_key_init) + +// Namespacing symbols exported from l2.c: +#undef quat_lattice_lll +#undef quat_lll_core + +#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) + +// Namespacing symbols exported from lat_ball.c: +#undef quat_lattice_bound_parallelogram +#undef quat_lattice_sample_from_ball + +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) + +// Namespacing symbols exported from lattice.c: +#undef quat_lattice_add +#undef quat_lattice_alg_elem_mul +#undef quat_lattice_conjugate_without_hnf +#undef quat_lattice_contains +#undef quat_lattice_dual_without_hnf +#undef quat_lattice_equal +#undef quat_lattice_gram +#undef quat_lattice_hnf +#undef quat_lattice_inclusion +#undef quat_lattice_index +#undef quat_lattice_intersect +#undef quat_lattice_mat_alg_coord_mul_without_hnf +#undef quat_lattice_mul +#undef quat_lattice_reduce_denom + +#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) + +// Namespacing symbols exported from lll_applications.c: +#undef quat_lideal_lideal_mul_reduced +#undef quat_lideal_prime_norm_reduced_equivalent +#undef quat_lideal_reduce_basis + +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) + +// Namespacing symbols exported from lll_verification.c: +#undef ibq_vec_4_copy_ibz +#undef quat_lll_bilinear +#undef quat_lll_gram_schmidt_transposed_with_ibq +#undef quat_lll_set_ibq_parameters +#undef quat_lll_verify + +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) + +// Namespacing symbols exported from mem.c: +#undef sqisign_secure_clear +#undef sqisign_secure_free + +#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) + +// Namespacing symbols exported from mp.c: +#undef MUL +#undef mp_add +#undef mp_compare +#undef mp_copy +#undef mp_inv_2e +#undef mp_invert_matrix +#undef mp_is_one +#undef mp_is_zero +#undef mp_mod_2exp +#undef mp_mul +#undef mp_mul2 +#undef mp_neg +#undef mp_print +#undef mp_shiftl +#undef mp_shiftr +#undef mp_sub +#undef multiple_mp_shiftl +#undef select_ct +#undef swap_ct + +#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) +#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) +#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) +#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) +#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) +#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) + +// Namespacing symbols exported from normeq.c: +#undef quat_change_to_O0_basis +#undef quat_lattice_O0_set +#undef quat_lattice_O0_set_extremal +#undef quat_order_elem_create +#undef quat_represent_integer +#undef quat_sampling_random_ideal_O0_given_norm + +#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) + +// Namespacing symbols exported from printer.c: +#undef ibz_mat_2x2_print +#undef ibz_mat_4x4_print +#undef ibz_vec_2_print +#undef ibz_vec_4_print +#undef quat_alg_elem_print +#undef quat_alg_print +#undef quat_lattice_print +#undef quat_left_ideal_print + +#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) + +// Namespacing symbols exported from random_input_generation.c: +#undef quat_test_input_random_ideal_generation +#undef quat_test_input_random_ideal_lattice_generation +#undef quat_test_input_random_lattice_generation + +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) + +// Namespacing symbols exported from rationals.c: +#undef ibq_abs +#undef ibq_add +#undef ibq_cmp +#undef ibq_copy +#undef ibq_finalize +#undef ibq_init +#undef ibq_inv +#undef ibq_is_ibz +#undef ibq_is_one +#undef ibq_is_zero +#undef ibq_mat_4x4_finalize +#undef ibq_mat_4x4_init +#undef ibq_mat_4x4_print +#undef ibq_mul +#undef ibq_neg +#undef ibq_reduce +#undef ibq_set +#undef ibq_sub +#undef ibq_to_ibz +#undef ibq_vec_4_finalize +#undef ibq_vec_4_init +#undef ibq_vec_4_print + +#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) + +// Namespacing symbols exported from sign.c: +#undef protocols_sign + +#define protocols_sign SQISIGN_NAMESPACE(protocols_sign) + +// Namespacing symbols exported from sqisign.c: +#undef sqisign_keypair +#undef sqisign_open +#undef sqisign_sign +#undef sqisign_sign_signature +#undef sqisign_verify +#undef sqisign_verify_signature + +#define sqisign_keypair SQISIGN_NAMESPACE(sqisign_keypair) +#define sqisign_open SQISIGN_NAMESPACE(sqisign_open) +#define sqisign_sign SQISIGN_NAMESPACE(sqisign_sign) +#define sqisign_sign_signature SQISIGN_NAMESPACE(sqisign_sign_signature) +#define sqisign_verify SQISIGN_NAMESPACE(sqisign_verify) +#define sqisign_verify_signature SQISIGN_NAMESPACE(sqisign_verify_signature) + +// Namespacing symbols exported from theta_isogenies.c: +#undef theta_chain_compute_and_eval +#undef theta_chain_compute_and_eval_randomized +#undef theta_chain_compute_and_eval_verify + +#define theta_chain_compute_and_eval SQISIGN_NAMESPACE(theta_chain_compute_and_eval) +#define theta_chain_compute_and_eval_randomized SQISIGN_NAMESPACE(theta_chain_compute_and_eval_randomized) +#define theta_chain_compute_and_eval_verify SQISIGN_NAMESPACE(theta_chain_compute_and_eval_verify) + +// Namespacing symbols exported from theta_structure.c: +#undef double_iter +#undef double_point +#undef is_product_theta_point +#undef theta_precomputation + +#define double_iter SQISIGN_NAMESPACE(double_iter) +#define double_point SQISIGN_NAMESPACE(double_point) +#define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) +#define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) + +// Namespacing symbols exported from verify.c: +#undef protocols_verify + +#define protocols_verify SQISIGN_NAMESPACE(protocols_verify) + +// Namespacing symbols exported from xeval.c: +#undef xeval_2 +#undef xeval_2_singular +#undef xeval_4 + +#define xeval_2 SQISIGN_NAMESPACE(xeval_2) +#define xeval_2_singular SQISIGN_NAMESPACE(xeval_2_singular) +#define xeval_4 SQISIGN_NAMESPACE(xeval_4) + +// Namespacing symbols exported from xisog.c: +#undef xisog_2 +#undef xisog_2_singular +#undef xisog_4 + +#define xisog_2 SQISIGN_NAMESPACE(xisog_2) +#define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) +#define xisog_4 SQISIGN_NAMESPACE(xisog_4) + +// Namespacing symbols from precomp: +#undef BASIS_E0_PX +#undef BASIS_E0_QX +#undef p_cofactor_for_2f +#undef CURVES_WITH_ENDOMORPHISMS +#undef EVEN_INDEX +#undef CHI_EVAL +#undef FP2_CONSTANTS +#undef SPLITTING_TRANSFORMS +#undef NORMALIZATION_TRANSFORMS +#undef QUAT_prime_cofactor +#undef QUATALG_PINFTY +#undef EXTREMAL_ORDERS +#undef CONNECTING_IDEALS +#undef CONJUGATING_ELEMENTS +#undef TWO_TO_SECURITY_BITS +#undef TORSION_PLUS_2POWER +#undef SEC_DEGREE +#undef COM_DEGREE + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_parameters.txt b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_parameters.txt new file mode 100644 index 0000000000..947af4bbbe --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_parameters.txt @@ -0,0 +1,3 @@ +lvl = 5 +p = 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +num_orders = 7 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c new file mode 100644 index 0000000000..478a9ab25b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c @@ -0,0 +1,1283 @@ +#include "theta_isogenies.h" +#include +#include +#include +#include +#include + +// Select a base change matrix in constant time, with M1 a regular +// base change matrix and M2 a precomputed base change matrix +// If option = 0 then M <- M1, else if option = 0xFF...FF then M <- M2 +static inline void +select_base_change_matrix(basis_change_matrix_t *M, + const basis_change_matrix_t *M1, + const precomp_basis_change_matrix_t *M2, + const uint32_t option) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + fp2_select(&M->m[i][j], &M1->m[i][j], &FP2_CONSTANTS[M2->m[i][j]], option); +} + +// Set a regular base change matrix from a precomputed one +static inline void +set_base_change_matrix_from_precomp(basis_change_matrix_t *res, const precomp_basis_change_matrix_t *M) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + res->m[i][j] = FP2_CONSTANTS[M->m[i][j]]; +} + +static inline void +choose_index_theta_point(fp2_t *res, int ind, const theta_point_t *T) +{ + const fp2_t *src = NULL; + switch (ind % 4) { + case 0: + src = &T->x; + break; + case 1: + src = &T->y; + break; + case 2: + src = &T->z; + break; + case 3: + src = &T->t; + break; + default: + assert(0); + } + fp2_copy(res, src); +} + +// same as apply_isomorphism method but more efficient when the t component of P is zero. +static void +apply_isomorphism_general(theta_point_t *res, + const basis_change_matrix_t *M, + const theta_point_t *P, + const bool Pt_not_zero) +{ + fp2_t x1; + theta_point_t temp; + + fp2_mul(&temp.x, &P->x, &M->m[0][0]); + fp2_mul(&x1, &P->y, &M->m[0][1]); + fp2_add(&temp.x, &temp.x, &x1); + fp2_mul(&x1, &P->z, &M->m[0][2]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&temp.y, &P->x, &M->m[1][0]); + fp2_mul(&x1, &P->y, &M->m[1][1]); + fp2_add(&temp.y, &temp.y, &x1); + fp2_mul(&x1, &P->z, &M->m[1][2]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&temp.z, &P->x, &M->m[2][0]); + fp2_mul(&x1, &P->y, &M->m[2][1]); + fp2_add(&temp.z, &temp.z, &x1); + fp2_mul(&x1, &P->z, &M->m[2][2]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&temp.t, &P->x, &M->m[3][0]); + fp2_mul(&x1, &P->y, &M->m[3][1]); + fp2_add(&temp.t, &temp.t, &x1); + fp2_mul(&x1, &P->z, &M->m[3][2]); + fp2_add(&temp.t, &temp.t, &x1); + + if (Pt_not_zero) { + fp2_mul(&x1, &P->t, &M->m[0][3]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&x1, &P->t, &M->m[1][3]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&x1, &P->t, &M->m[2][3]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&x1, &P->t, &M->m[3][3]); + fp2_add(&temp.t, &temp.t, &x1); + } + + fp2_copy(&res->x, &temp.x); + fp2_copy(&res->y, &temp.y); + fp2_copy(&res->z, &temp.z); + fp2_copy(&res->t, &temp.t); +} + +static void +apply_isomorphism(theta_point_t *res, const basis_change_matrix_t *M, const theta_point_t *P) +{ + apply_isomorphism_general(res, M, P, true); +} + +// set res = M1 * M2 with matrix multiplication +static void +base_change_matrix_multiplication(basis_change_matrix_t *res, + const basis_change_matrix_t *M1, + const basis_change_matrix_t *M2) +{ + basis_change_matrix_t tmp; + fp2_t sum, m_ik, m_kj; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + fp2_set_zero(&sum); + for (int k = 0; k < 4; k++) { + m_ik = M1->m[i][k]; + m_kj = M2->m[k][j]; + fp2_mul(&m_ik, &m_ik, &m_kj); + fp2_add(&sum, &sum, &m_ik); + } + tmp.m[i][j] = sum; + } + } + *res = tmp; +} + +// compute the theta_point corresponding to the couple of point T on an elliptic product +static void +base_change(theta_point_t *out, const theta_gluing_t *phi, const theta_couple_point_t *T) +{ + theta_point_t null_point; + + // null_point = (a : b : c : d) + // a = P1.x P2.x, b = P1.x P2.z, c = P1.z P2.x, d = P1.z P2.z + fp2_mul(&null_point.x, &T->P1.x, &T->P2.x); + fp2_mul(&null_point.y, &T->P1.x, &T->P2.z); + fp2_mul(&null_point.z, &T->P2.x, &T->P1.z); + fp2_mul(&null_point.t, &T->P1.z, &T->P2.z); + + // Apply the basis change + apply_isomorphism(out, &phi->M, &null_point); +} + +static void +action_by_translation_z_and_det(fp2_t *z_inv, fp2_t *det_inv, const ec_point_t *P4, const ec_point_t *P2) +{ + // Store the Z-coordinate to invert + fp2_copy(z_inv, &P4->z); + + // Then collect detij = xij wij - uij zij + fp2_t tmp; + fp2_mul(det_inv, &P4->x, &P2->z); + fp2_mul(&tmp, &P4->z, &P2->x); + fp2_sub(det_inv, det_inv, &tmp); +} + +static void +action_by_translation_compute_matrix(translation_matrix_t *G, + const ec_point_t *P4, + const ec_point_t *P2, + const fp2_t *z_inv, + const fp2_t *det_inv) +{ + fp2_t tmp; + + // Gi.g10 = uij xij /detij - xij/zij + fp2_mul(&tmp, &P4->x, z_inv); + fp2_mul(&G->g10, &P4->x, &P2->x); + fp2_mul(&G->g10, &G->g10, det_inv); + fp2_sub(&G->g10, &G->g10, &tmp); + + // Gi.g11 = uij zij * detij + fp2_mul(&G->g11, &P2->x, det_inv); + fp2_mul(&G->g11, &G->g11, &P4->z); + + // Gi.g00 = -Gi.g11 + fp2_neg(&G->g00, &G->g11); + + // Gi.g01 = - wij zij detij + fp2_mul(&G->g01, &P2->z, det_inv); + fp2_mul(&G->g01, &G->g01, &P4->z); + fp2_neg(&G->g01, &G->g01); +} + +// Returns 1 if the basis is as expected and 0 otherwise +// We only expect this to fail for malformed signatures, so +// do not require this to run in constant time. +static int +verify_two_torsion(const theta_couple_point_t *K1_2, const theta_couple_point_t *K2_2, const theta_couple_curve_t *E12) +{ + // First check if any point in K1_2 or K2_2 is zero, if they are then the points did not have + // order 8 when we started gluing + if (ec_is_zero(&K1_2->P1) | ec_is_zero(&K1_2->P2) | ec_is_zero(&K2_2->P1) | ec_is_zero(&K2_2->P2)) { + return 0; + } + + // Now ensure that P1, Q1 and P2, Q2 are independent. For points of order two this means + // that they're not the same + if (ec_is_equal(&K1_2->P1, &K2_2->P1) | ec_is_equal(&K1_2->P2, &K2_2->P2)) { + return 0; + } + + // Finally, double points to ensure all points have order exactly 0 + theta_couple_point_t O1, O2; + double_couple_point(&O1, K1_2, E12); + double_couple_point(&O2, K2_2, E12); + // If this check fails then the points had order 2*f for some f, and the kernel is malformed. + if (!(ec_is_zero(&O1.P1) & ec_is_zero(&O1.P2) & ec_is_zero(&O2.P1) & ec_is_zero(&O2.P2))) { + return 0; + } + + return 1; +} + +// Computes the action by translation for four points +// (P1, P2) and (Q1, Q2) on E1 x E2 simultaneously to +// save on inversions. +// Returns 0 if any of Pi or Qi does not have order 2 +// and 1 otherwise +static int +action_by_translation(translation_matrix_t *Gi, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute points of order 2 from Ki_4 + theta_couple_point_t K1_2, K2_2; + double_couple_point(&K1_2, K1_4, E12); + double_couple_point(&K2_2, K2_4, E12); + + if (!verify_two_torsion(&K1_2, &K2_2, E12)) { + return 0; + } + + // We need to invert four Z coordinates and + // four determinants which we do with batched + // inversion + fp2_t inverses[8]; + action_by_translation_z_and_det(&inverses[0], &inverses[4], &K1_4->P1, &K1_2.P1); + action_by_translation_z_and_det(&inverses[1], &inverses[5], &K1_4->P2, &K1_2.P2); + action_by_translation_z_and_det(&inverses[2], &inverses[6], &K2_4->P1, &K2_2.P1); + action_by_translation_z_and_det(&inverses[3], &inverses[7], &K2_4->P2, &K2_2.P2); + + fp2_batched_inv(inverses, 8); + if (fp2_is_zero(&inverses[0])) + return 0; // something was wrong with our input (which somehow was not caught by + // verify_two_torsion) + + action_by_translation_compute_matrix(&Gi[0], &K1_4->P1, &K1_2.P1, &inverses[0], &inverses[4]); + action_by_translation_compute_matrix(&Gi[1], &K1_4->P2, &K1_2.P2, &inverses[1], &inverses[5]); + action_by_translation_compute_matrix(&Gi[2], &K2_4->P1, &K2_2.P1, &inverses[2], &inverses[6]); + action_by_translation_compute_matrix(&Gi[3], &K2_4->P2, &K2_2.P2, &inverses[3], &inverses[7]); + + return 1; +} + +// Given the appropriate four torsion, computes the +// change of basis to compute the correct theta null +// point. +// Returns 0 if the order of K1_4 or K2_4 is not 4 +static int +gluing_change_of_basis(basis_change_matrix_t *M, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute the four 2x2 matrices for the action by translation + // on the four points: + translation_matrix_t Gi[4]; + if (!action_by_translation(Gi, K1_4, K2_4, E12)) + return 0; + + // Computation of the 4x4 matrix from Mij + // t001, t101 (resp t002, t102) first column of M11 * M21 (resp M12 * M22) + fp2_t t001, t101, t002, t102, tmp; + + fp2_mul(&t001, &Gi[0].g00, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g01, &Gi[2].g10); + fp2_add(&t001, &t001, &tmp); + + fp2_mul(&t101, &Gi[0].g10, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g11, &Gi[2].g10); + fp2_add(&t101, &t101, &tmp); + + fp2_mul(&t002, &Gi[1].g00, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g01, &Gi[3].g10); + fp2_add(&t002, &t002, &tmp); + + fp2_mul(&t102, &Gi[1].g10, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g11, &Gi[3].g10); + fp2_add(&t102, &t102, &tmp); + + // trace for the first row + fp2_set_one(&M->m[0][0]); + fp2_mul(&tmp, &t001, &t002); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + + fp2_mul(&M->m[0][1], &t001, &t102); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + + fp2_mul(&M->m[0][2], &t101, &t002); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + + fp2_mul(&M->m[0][3], &t101, &t102); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + + // Compute the action of (0,out.K2_4.P2) for the second row + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][1]); + fp2_mul(&M->m[1][0], &Gi[3].g00, &M->m[0][0]); + fp2_add(&M->m[1][0], &M->m[1][0], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][1]); + fp2_mul(&M->m[1][1], &Gi[3].g10, &M->m[0][0]); + fp2_add(&M->m[1][1], &M->m[1][1], &tmp); + + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][3]); + fp2_mul(&M->m[1][2], &Gi[3].g00, &M->m[0][2]); + fp2_add(&M->m[1][2], &M->m[1][2], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][3]); + fp2_mul(&M->m[1][3], &Gi[3].g10, &M->m[0][2]); + fp2_add(&M->m[1][3], &M->m[1][3], &tmp); + + // compute the action of (K1_4.P1,0) for the third row + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][2]); + fp2_mul(&M->m[2][0], &Gi[0].g00, &M->m[0][0]); + fp2_add(&M->m[2][0], &M->m[2][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][3]); + fp2_mul(&M->m[2][1], &Gi[0].g00, &M->m[0][1]); + fp2_add(&M->m[2][1], &M->m[2][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][2]); + fp2_mul(&M->m[2][2], &Gi[0].g10, &M->m[0][0]); + fp2_add(&M->m[2][2], &M->m[2][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][3]); + fp2_mul(&M->m[2][3], &Gi[0].g10, &M->m[0][1]); + fp2_add(&M->m[2][3], &M->m[2][3], &tmp); + + // compute the action of (K1_4.P1,K2_4.P2) for the final row + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][2]); + fp2_mul(&M->m[3][0], &Gi[0].g00, &M->m[1][0]); + fp2_add(&M->m[3][0], &M->m[3][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][3]); + fp2_mul(&M->m[3][1], &Gi[0].g00, &M->m[1][1]); + fp2_add(&M->m[3][1], &M->m[3][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][2]); + fp2_mul(&M->m[3][2], &Gi[0].g10, &M->m[1][0]); + fp2_add(&M->m[3][2], &M->m[3][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][3]); + fp2_mul(&M->m[3][3], &Gi[0].g10, &M->m[1][1]); + fp2_add(&M->m[3][3], &M->m[3][3], &tmp); + + return 1; +} + +/** + * @brief Compute the gluing isogeny from an elliptic product + * + * @param out Output: the theta_gluing + * @param K1_8 a couple point + * @param E12 an elliptic curve product + * @param K2_8 a point in E2[8] + * + * out : E1xE2 -> A of kernel [4](K1_8,K2_8) + * if the kernel supplied has the incorrect order, or gluing seems malformed, + * returns 0, otherwise returns 1. + */ +static int +gluing_compute(theta_gluing_t *out, + const theta_couple_curve_t *E12, + const theta_couple_jac_point_t *xyK1_8, + const theta_couple_jac_point_t *xyK2_8, + bool verify) +{ + // Ensure that we have been given the eight torsion +#ifndef NDEBUG + { + int check = test_jac_order_twof(&xyK1_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK1_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK1_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P2 does not have order 8"); + } +#endif + + out->xyK1_8 = *xyK1_8; + out->domain = *E12; + + // Given points in E[8] x E[8] we need the four torsion below + theta_couple_jac_point_t xyK1_4, xyK2_4; + + double_couple_jac_point(&xyK1_4, xyK1_8, E12); + double_couple_jac_point(&xyK2_4, xyK2_8, E12); + + // Convert from (X:Y:Z) coordinates to (X:Z) + theta_couple_point_t K1_8, K2_8; + theta_couple_point_t K1_4, K2_4; + + couple_jac_to_xz(&K1_8, xyK1_8); + couple_jac_to_xz(&K2_8, xyK2_8); + couple_jac_to_xz(&K1_4, &xyK1_4); + couple_jac_to_xz(&K2_4, &xyK2_4); + + // Set the basis change matrix, if we have not been given a valid K[8] for this computation + // gluing_change_of_basis will detect this and return 0 + if (!gluing_change_of_basis(&out->M, &K1_4, &K2_4, E12)) { + debug_print("gluing failed as kernel does not have correct order"); + return 0; + } + + // apply the base change to the kernel + theta_point_t TT1, TT2; + + base_change(&TT1, out, &K1_8); + base_change(&TT2, out, &K2_8); + + // compute the codomain + to_squared_theta(&TT1, &TT1); + to_squared_theta(&TT2, &TT2); + + // If the kernel is well formed then TT1.t and TT2.t are zero + // if they are not, we exit early as the signature we are validating + // is probably malformed + if (!(fp2_is_zero(&TT1.t) & fp2_is_zero(&TT2.t))) { + debug_print("gluing failed TT1.t or TT2.t is not zero"); + return 0; + } + // Test our projective factors are non zero + if (fp2_is_zero(&TT1.x) | fp2_is_zero(&TT2.x) | fp2_is_zero(&TT1.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT1.z)) + return 0; // invalid input + + // Projective factor: Ax + fp2_mul(&out->codomain.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.y, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.z, &TT1.x, &TT2.z); + fp2_set_zero(&out->codomain.t); + // Projective factor: ABCxz + fp2_mul(&out->precomputation.x, &TT1.y, &TT2.z); + fp2_copy(&out->precomputation.y, &out->codomain.z); + fp2_copy(&out->precomputation.z, &out->codomain.y); + fp2_set_zero(&out->precomputation.t); + + // Compute the two components of phi(K1_8) = (x:x:y:y). + fp2_mul(&out->imageK1_8.x, &TT1.x, &out->precomputation.x); + fp2_mul(&out->imageK1_8.y, &TT1.z, &out->precomputation.z); + + // If K1_8 and K2_8 are our 8-torsion points, this ensures that the + // 4-torsion points [2]K1_8 and [2]K2_8 are isotropic. + if (verify) { + fp2_t t1, t2; + fp2_mul(&t1, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&out->imageK1_8.x, &t1)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t2, &t1)) + return 0; + } + + // compute the final codomain + hadamard(&out->codomain, &out->codomain); + return 1; +} + +// sub routine of the gluing eval +static void +gluing_eval_point(theta_point_t *image, const theta_couple_jac_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T1, T2; + add_components_t add_comp1, add_comp2; + + // Compute the cross addition components of P1+Q1 and P2+Q2 + jac_to_xz_add_components(&add_comp1, &P->P1, &phi->xyK1_8.P1, &phi->domain.E1); + jac_to_xz_add_components(&add_comp2, &P->P2, &phi->xyK1_8.P2, &phi->domain.E2); + + // Compute T1 and T2 derived from the cross addition components. + fp2_mul(&T1.x, &add_comp1.u, &add_comp2.u); // T1x = u1u2 + fp2_mul(&T2.t, &add_comp1.v, &add_comp2.v); // T2t = v1v2 + fp2_add(&T1.x, &T1.x, &T2.t); // T1x = u1u2 + v1v2 + fp2_mul(&T1.y, &add_comp1.u, &add_comp2.w); // T1y = u1w2 + fp2_mul(&T1.z, &add_comp1.w, &add_comp2.u); // T1z = w1u2 + fp2_mul(&T1.t, &add_comp1.w, &add_comp2.w); // T1t = w1w2 + fp2_add(&T2.x, &add_comp1.u, &add_comp1.v); // T2x = (u1+v1) + fp2_add(&T2.y, &add_comp2.u, &add_comp2.v); // T2y = (u2+v2) + fp2_mul(&T2.x, &T2.x, &T2.y); // T2x = (u1+v1)(u2+v2) + fp2_sub(&T2.x, &T2.x, &T1.x); // T1x = v1u2 + u1v2 + fp2_mul(&T2.y, &add_comp1.v, &add_comp2.w); // T2y = v1w2 + fp2_mul(&T2.z, &add_comp1.w, &add_comp2.v); // T2z = w1v2 + fp2_set_zero(&T2.t); // T2t = 0 + + // Apply the basis change and compute their respective square + // theta(P+Q) = M.T1 - M.T2 and theta(P-Q) = M.T1 + M.T2 + apply_isomorphism_general(&T1, &phi->M, &T1, true); + apply_isomorphism_general(&T2, &phi->M, &T2, false); + pointwise_square(&T1, &T1); + pointwise_square(&T2, &T2); + + // the difference between the two is therefore theta(P+Q)theta(P-Q) + // whose hadamard transform is then the product of the dual + // theta_points of phi(P) and phi(Q). + fp2_sub(&T1.x, &T1.x, &T2.x); + fp2_sub(&T1.y, &T1.y, &T2.y); + fp2_sub(&T1.z, &T1.z, &T2.z); + fp2_sub(&T1.t, &T1.t, &T2.t); + hadamard(&T1, &T1); + + // Compute (x, y, z, t) + // As imageK1_8 = (x:x:y:y), its inverse is (y:y:x:x). + fp2_mul(&image->x, &T1.x, &phi->imageK1_8.y); + fp2_mul(&image->y, &T1.y, &phi->imageK1_8.y); + fp2_mul(&image->z, &T1.z, &phi->imageK1_8.x); + fp2_mul(&image->t, &T1.t, &phi->imageK1_8.x); + + hadamard(image, image); +} + +// Same as gluing_eval_point but in the very special case where we already know that the point will +// have a zero coordinate at the place where the zero coordinate of the dual_theta_nullpoint would +// have made the computation difficult +static int +gluing_eval_point_special_case(theta_point_t *image, const theta_couple_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T; + + // Apply the basis change + base_change(&T, phi, P); + + // Apply the to_squared_theta transform + to_squared_theta(&T, &T); + + // This coordinate should always be 0 in a gluing because D=0. + // If this is not the case, something went very wrong, so reject + if (!fp2_is_zero(&T.t)) + return 0; + + // Compute (x, y, z, t) + fp2_mul(&image->x, &T.x, &phi->precomputation.x); + fp2_mul(&image->y, &T.y, &phi->precomputation.y); + fp2_mul(&image->z, &T.z, &phi->precomputation.z); + fp2_set_zero(&image->t); + + hadamard(image, image); + return 1; +} + +/** + * @brief Evaluate a gluing isogeny from an elliptic product on a basis + * + * @param image1 Output: the theta_point of the image of the first couple of points + * @param image2 Output : the theta point of the image of the second couple of points + * @param xyT1: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param xyT2: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param phi : a gluing isogeny E1 x E2 -> A + * + **/ +static void +gluing_eval_basis(theta_point_t *image1, + theta_point_t *image2, + const theta_couple_jac_point_t *xyT1, + const theta_couple_jac_point_t *xyT2, + const theta_gluing_t *phi) +{ + gluing_eval_point(image1, xyT1, phi); + gluing_eval_point(image2, xyT2, phi); +} + +/** + * @brief Compute a (2,2) isogeny in dimension 2 in the theta_model + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_8 a point in A[8] + * @param T2_8 a point in A[8] + * @param hadamard_bool_1 a boolean used for the last two steps of the chain + * @param hadamard_bool_2 a boolean used for the last two steps of the chain + * + * out : A -> B of kernel [4](T1_8,T2_8) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * verify: add extra sanity check to ensure our 8-torsion points are coherent with the isogeny + * + */ +static int +theta_isogeny_compute(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_8, + const theta_point_t *T2_8, + bool hadamard_bool_1, + bool hadamard_bool_2, + bool verify) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_8; + out->T2_8 = *T2_8; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_8); + to_squared_theta(&TT1, &TT1); + hadamard(&TT2, T2_8); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_8); + to_squared_theta(&TT2, T2_8); + } + + fp2_t t1, t2; + + // Test that our projective factor ABCDxzw is non zero, where + // TT1=(Ax, Bx, Cy, Dy), TT2=(Az, Bw, Cz, Dw) + // But ABCDxzw=0 can only happen if we had an unexpected splitting in + // the isogeny chain. + // In either case reject + // (this is not strictly necessary, we could just return (0:0:0:0)) + if (fp2_is_zero(&TT2.x) | fp2_is_zero(&TT2.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT2.t) | fp2_is_zero(&TT1.x) | + fp2_is_zero(&TT1.y)) + return 0; + + fp2_mul(&t1, &TT1.x, &TT2.y); + fp2_mul(&t2, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.null_point.x, &TT2.x, &t1); + fp2_mul(&out->codomain.null_point.y, &TT2.y, &t2); + fp2_mul(&out->codomain.null_point.z, &TT2.z, &t1); + fp2_mul(&out->codomain.null_point.t, &TT2.t, &t2); + fp2_t t3; + fp2_mul(&t3, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.x, &t3, &TT1.y); + fp2_mul(&out->precomputation.y, &t3, &TT1.x); + fp2_copy(&out->precomputation.z, &out->codomain.null_point.t); + fp2_copy(&out->precomputation.t, &out->codomain.null_point.z); + + // If T1_8 and T2_8 are our 8-torsion points, this ensures that the + // 4-torsion points 2T1_8 and 2T2_8 are isotropic. + if (verify) { + fp2_mul(&t1, &TT1.x, &out->precomputation.x); + fp2_mul(&t2, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT1.z, &out->precomputation.z); + fp2_mul(&t2, &TT1.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.y, &out->precomputation.y); + fp2_mul(&t2, &TT2.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + } + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } + return 1; +} + +/** + * @brief Compute a (2,2) isogeny when only the 4 torsion above the kernel is known and not the 8 + * torsion + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_4 a point in A[4] + * @param T2_4 a point in A[4] + * @param hadamard_bool_1 a boolean + * @param hadamard_bool_2 a boolean + * + * out : A -> B of kernel [2](T1_4,T2_4) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_4(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_4, + const theta_point_t *T2_4, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_4; + out->T2_8 = *T2_4; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + // we will compute: + // TT1 = (xAB, _ , xCD, _) + // TT2 = (AA,BB,CC,DD) + + // fp2_t xA_inv,zA_inv,tB_inv; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_4); + to_squared_theta(&TT1, &TT1); + + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_4); + to_squared_theta(&TT2, &A->null_point); + } + + fp2_t sqaabb, sqaacc; + fp2_mul(&sqaabb, &TT2.x, &TT2.y); + fp2_mul(&sqaacc, &TT2.x, &TT2.z); + // No need to check the square roots, only used for signing. + // sqaabb = sqrt(AA*BB) + fp2_sqrt(&sqaabb); + // sqaacc = sqrt(AA*CC) + fp2_sqrt(&sqaacc); + + // we compute out->codomain.null_point = (xAB * sqaacc * AA, xAB *sqaabb *sqaacc, xCD*sqaabb * + // AA) out->precomputation = (xAB * BB * CC *DD , sqaabb * CC * DD * xAB , sqaacc * BB* DD * xAB + // , xCD * sqaabb *sqaacc * BB) + + fp2_mul(&out->codomain.null_point.y, &sqaabb, &sqaacc); + fp2_mul(&out->precomputation.t, &out->codomain.null_point.y, &TT1.z); + fp2_mul(&out->codomain.null_point.y, &out->codomain.null_point.y, + &TT1.x); // done for out->codomain.null_point.y + + fp2_mul(&out->codomain.null_point.t, &TT1.z, &sqaabb); + fp2_mul(&out->codomain.null_point.t, &out->codomain.null_point.t, + &TT2.x); // done for out->codomain.null_point.t + + fp2_mul(&out->codomain.null_point.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.null_point.z, &out->codomain.null_point.x, + &TT2.z); // done for out->codomain.null_point.z + fp2_mul(&out->codomain.null_point.x, &out->codomain.null_point.x, + &sqaacc); // done for out->codomain.null_point.x + + fp2_mul(&out->precomputation.x, &TT1.x, &TT2.t); + fp2_mul(&out->precomputation.z, &out->precomputation.x, &TT2.y); + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.z); + fp2_mul(&out->precomputation.y, &out->precomputation.x, &sqaabb); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &out->precomputation.z, &sqaacc); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +/** + * @brief Compute a (2,2) isogeny when only the kernel is known and not the 8 or 4 torsion above + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_2 a point in A[2] + * @param T2_2 a point in A[2] + * @param hadamard_bool_1 a boolean + * @param boo2 a boolean + * + * out : A -> B of kernel (T1_2,T2_2) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_2(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_2, + const theta_point_t *T2_2, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_2; + out->T2_8 = *T2_2; + out->codomain.precomputation = false; + + theta_point_t TT2; + // we will compute: + // TT2 = (AA,BB,CC,DD) + + if (hadamard_bool_1) { + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT2, &A->null_point); + } + + // we compute out->codomain.null_point = (AA,sqaabb, sqaacc, sqaadd) + // out->precomputation = ( BB * CC *DD , sqaabb * CC * DD , sqaacc * BB* DD , sqaadd * BB * CC) + fp2_copy(&out->codomain.null_point.x, &TT2.x); + fp2_mul(&out->codomain.null_point.y, &TT2.x, &TT2.y); + fp2_mul(&out->codomain.null_point.z, &TT2.x, &TT2.z); + fp2_mul(&out->codomain.null_point.t, &TT2.x, &TT2.t); + // No need to check the square roots, only used for signing. + fp2_sqrt(&out->codomain.null_point.y); + fp2_sqrt(&out->codomain.null_point.z); + fp2_sqrt(&out->codomain.null_point.t); + + fp2_mul(&out->precomputation.x, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.y, + &out->precomputation.x, + &out->codomain.null_point.y); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &TT2.t, &out->codomain.null_point.z); + fp2_mul(&out->precomputation.z, &out->precomputation.z, &TT2.y); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &TT2.z, &out->codomain.null_point.t); + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +static void +theta_isogeny_eval(theta_point_t *out, const theta_isogeny_t *phi, const theta_point_t *P) +{ + if (phi->hadamard_bool_1) { + hadamard(out, P); + to_squared_theta(out, out); + } else { + to_squared_theta(out, P); + } + fp2_mul(&out->x, &out->x, &phi->precomputation.x); + fp2_mul(&out->y, &out->y, &phi->precomputation.y); + fp2_mul(&out->z, &out->z, &phi->precomputation.z); + fp2_mul(&out->t, &out->t, &phi->precomputation.t); + + if (phi->hadamard_bool_2) { + hadamard(out, out); + } +} + +#if defined(ENABLE_SIGN) +// Sample a random secret index in [0, 5] to select one of the 6 normalisation +// matrices for the normalisation of the output of the (2,2)-chain during +// splitting +static unsigned char +sample_random_index(void) +{ + // To avoid bias in reduction we should only consider integers smaller + // than 2^32 which are a multiple of 6, so we only reduce bytes with a + // value in [0, 4294967292-1]. + // We have 4294967292/2^32 = ~99.9999999% chance that the first try is "good". + unsigned char seed_arr[4]; + uint32_t seed; + + do { + randombytes(seed_arr, 4); + seed = (seed_arr[0] | (seed_arr[1] << 8) | (seed_arr[2] << 16) | (seed_arr[3] << 24)); + } while (seed >= 4294967292U); + + uint32_t secret_index = seed - (((uint64_t)seed * 2863311531U) >> 34) * 6; + assert(secret_index == seed % 6); // ensure the constant time trick above works + return (unsigned char)secret_index; +} +#endif + +static bool +splitting_compute(theta_splitting_t *out, const theta_structure_t *A, int zero_index, bool randomize) + +{ + // init + uint32_t ctl; + uint32_t count = 0; + fp2_t U_cst, t1, t2; + + memset(&out->M, 0, sizeof(basis_change_matrix_t)); + + // enumerate through all indices + for (int i = 0; i < 10; i++) { + fp2_set_zero(&U_cst); + for (int t = 0; t < 4; t++) { + // Iterate through the null point + choose_index_theta_point(&t2, t, &A->null_point); + choose_index_theta_point(&t1, t ^ EVEN_INDEX[i][1], &A->null_point); + + // Compute t1 * t2 + fp2_mul(&t1, &t1, &t2); + // If CHI_EVAL(i,t) is +1 we want ctl to be 0 and + // If CHI_EVAL(i,t) is -1 we want ctl to be 0xFF..FF + ctl = (uint32_t)(CHI_EVAL[EVEN_INDEX[i][0]][t] >> 1); + assert(ctl == 0 || ctl == 0xffffffff); + + fp2_neg(&t2, &t1); + fp2_select(&t1, &t1, &t2, ctl); + + // Then we compute U_cst ± (t1 * t2) + fp2_add(&U_cst, &U_cst, &t1); + } + + // If U_cst is 0 then update the splitting matrix + ctl = fp2_is_zero(&U_cst); + count -= ctl; + select_base_change_matrix(&out->M, &out->M, &SPLITTING_TRANSFORMS[i], ctl); + if (zero_index != -1 && i == zero_index && + !ctl) { // extra checks if we know exactly where the 0 index should be + return 0; + } + } + +#if defined(ENABLE_SIGN) + // Pick a random normalization matrix + if (randomize) { + unsigned char secret_index = sample_random_index(); + basis_change_matrix_t Mrandom; + + set_base_change_matrix_from_precomp(&Mrandom, &NORMALIZATION_TRANSFORMS[0]); + + // Use a constant time selection to pick the index we want + for (unsigned char i = 1; i < 6; i++) { + // When i == secret_index, mask == 0 and 0xFF..FF otherwise + int32_t mask = i - secret_index; + mask = (mask | -mask) >> 31; + select_base_change_matrix(&Mrandom, &Mrandom, &NORMALIZATION_TRANSFORMS[i], ~mask); + } + base_change_matrix_multiplication(&out->M, &Mrandom, &out->M); + } +#else + assert(!randomize); +#endif + + // apply the isomorphism to ensure the null point is compatible with splitting + apply_isomorphism(&out->B.null_point, &out->M, &A->null_point); + + // splitting was successful only if exactly one zero was identified + return count == 1; +} + +static int +theta_product_structure_to_elliptic_product(theta_couple_curve_t *E12, theta_structure_t *A) +{ + fp2_t xx, yy; + + // This should be true from our computations in splitting_compute + // but still check this for sanity + if (!is_product_theta_point(&A->null_point)) + return 0; + + ec_curve_init(&(E12->E1)); + ec_curve_init(&(E12->E2)); + + // A valid elliptic theta null point has no zero coordinate + if (fp2_is_zero(&A->null_point.x) | fp2_is_zero(&A->null_point.y) | fp2_is_zero(&A->null_point.z)) + return 0; + + // xx = x², yy = y² + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.y); + // xx = x^4, yy = y^4 + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A2 = -2(x^4+y^4)/(x^4-y^4) + fp2_add(&E12->E2.A, &xx, &yy); + fp2_sub(&E12->E2.C, &xx, &yy); + fp2_add(&E12->E2.A, &E12->E2.A, &E12->E2.A); + fp2_neg(&E12->E2.A, &E12->E2.A); + + // same with x,z + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.z); + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A1 = -2(x^4+z^4)/(x^4-z^4) + fp2_add(&E12->E1.A, &xx, &yy); + fp2_sub(&E12->E1.C, &xx, &yy); + fp2_add(&E12->E1.A, &E12->E1.A, &E12->E1.A); + fp2_neg(&E12->E1.A, &E12->E1.A); + + if (fp2_is_zero(&E12->E1.C) | fp2_is_zero(&E12->E2.C)) + return 0; + + return 1; +} + +static int +theta_point_to_montgomery_point(theta_couple_point_t *P12, const theta_point_t *P, const theta_structure_t *A) +{ + fp2_t temp; + const fp2_t *x, *z; + + if (!is_product_theta_point(P)) + return 0; + + x = &P->x; + z = &P->y; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->z; + z = &P->t; + } + if (fp2_is_zero(x) & fp2_is_zero(z)) { + return 0; // at this point P=(0:0:0:0) so is invalid + } + // P2.X = A.null_point.y * P.x + A.null_point.x * P.y + // P2.Z = - A.null_point.y * P.x + A.null_point.x * P.y + fp2_mul(&P12->P2.x, &A->null_point.y, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P2.z, &temp, &P12->P2.x); + fp2_add(&P12->P2.x, &P12->P2.x, &temp); + + x = &P->x; + z = &P->z; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->y; + z = &P->t; + } + // P1.X = A.null_point.z * P.x + A.null_point.x * P.z + // P1.Z = -A.null_point.z * P.x + A.null_point.x * P.z + fp2_mul(&P12->P1.x, &A->null_point.z, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P1.z, &temp, &P12->P1.x); + fp2_add(&P12->P1.x, &P12->P1.x, &temp); + return 1; +} + +static int +_theta_chain_compute_impl(unsigned n, + theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + bool verify, + bool randomize) +{ + theta_structure_t theta; + + // lift the basis + theta_couple_jac_point_t xyT1, xyT2; + + ec_basis_t bas1 = { .P = ker->T1.P1, .Q = ker->T2.P1, .PmQ = ker->T1m2.P1 }; + ec_basis_t bas2 = { .P = ker->T1.P2, .Q = ker->T2.P2, .PmQ = ker->T1m2.P2 }; + if (!lift_basis(&xyT1.P1, &xyT2.P1, &bas1, &E12->E1)) + return 0; + if (!lift_basis(&xyT1.P2, &xyT2.P2, &bas2, &E12->E2)) + return 0; + + const unsigned extra = HD_extra_torsion * extra_torsion; + +#ifndef NDEBUG + assert(extra == 0 || extra == 2); // only cases implemented + if (!test_point_order_twof(&bas2.P, &E12->E2, n + extra)) + debug_print("bas2.P does not have correct order"); + + if (!test_jac_order_twof(&xyT2.P2, &E12->E2, n + extra)) + debug_print("xyT2.P2 does not have correct order"); +#endif + + theta_point_t pts[numP ? numP : 1]; + + int space = 1; + for (unsigned i = 1; i < n; i *= 2) + ++space; + + uint16_t todo[space]; + todo[0] = n - 2 + extra; + + int current = 0; + + // kernel points for the gluing isogeny + theta_couple_jac_point_t jacQ1[space], jacQ2[space]; + jacQ1[0] = xyT1; + jacQ2[0] = xyT2; + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + // the gluing isogeny is quite a bit more expensive than the others, + // so we adjust the usual splitting rule here a little bit: towards + // the end of the doubling chain it will be cheaper to recompute the + // doublings after evaluation than to push the intermediate points. + const unsigned num_dbls = todo[current - 1] >= 16 ? todo[current - 1] / 2 : todo[current - 1] - 1; + assert(num_dbls && num_dbls < todo[current - 1]); + double_couple_jac_point_iter(&jacQ1[current], num_dbls, &jacQ1[current - 1], E12); + double_couple_jac_point_iter(&jacQ2[current], num_dbls, &jacQ2[current - 1], E12); + todo[current] = todo[current - 1] - num_dbls; + } + + // kernel points for the remaining isogeny steps + theta_point_t thetaQ1[space], thetaQ2[space]; + + // the gluing step + theta_gluing_t first_step; + { + assert(todo[current] == 1); + + // compute the gluing isogeny + if (!gluing_compute(&first_step, E12, &jacQ1[current], &jacQ2[current], verify)) + return 0; + + // evaluate + for (unsigned j = 0; j < numP; ++j) { + assert(ec_is_zero(&P12[j].P1) || ec_is_zero(&P12[j].P2)); + if (!gluing_eval_point_special_case(&pts[j], &P12[j], &first_step)) + return 0; + } + + // push kernel points through gluing isogeny + for (int j = 0; j < current; ++j) { + gluing_eval_basis(&thetaQ1[j], &thetaQ2[j], &jacQ1[j], &jacQ2[j], &first_step); + --todo[j]; + } + + --current; + } + + // set-up the theta_structure for the first codomain + theta.null_point = first_step.codomain; + theta.precomputation = 0; + theta_precomputation(&theta); + + theta_isogeny_t step; + + // and now we do the remaining steps + for (unsigned i = 1; current >= 0 && todo[current]; ++i) { + assert(current < space); + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + const unsigned num_dbls = todo[current - 1] / 2; + assert(num_dbls && num_dbls < todo[current - 1]); + double_iter(&thetaQ1[current], &theta, &thetaQ1[current - 1], num_dbls); + double_iter(&thetaQ2[current], &theta, &thetaQ2[current - 1], num_dbls); + todo[current] = todo[current - 1] - num_dbls; + } + + // computing the next step + int ret; + if (i == n - 2) // penultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 0, verify); + else if (i == n - 1) // ultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 1, 0, false); + else + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 1, verify); + if (!ret) + return 0; + + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + + // updating the codomain + theta = step.codomain; + + // pushing the kernel + assert(todo[current] == 1); + for (int j = 0; j < current; ++j) { + theta_isogeny_eval(&thetaQ1[j], &step, &thetaQ1[j]); + theta_isogeny_eval(&thetaQ2[j], &step, &thetaQ2[j]); + assert(todo[j]); + --todo[j]; + } + + --current; + } + + assert(current == -1); + + if (!extra_torsion) { + if (n >= 3) { + // in the last step we've skipped pushing the kernel since current was == 0, let's do it now + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + } + + // penultimate step + theta_isogeny_compute_4(&step, &theta, &thetaQ1[0], &thetaQ2[0], 0, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + + // ultimate step + theta_isogeny_compute_2(&step, &theta, &thetaQ1[0], &thetaQ2[0], 1, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + } + + // final splitting step + theta_splitting_t last_step; + + bool is_split = splitting_compute(&last_step, &theta, extra_torsion ? 8 : -1, randomize); + + if (!is_split) { + debug_print("kernel did not generate an isogeny between elliptic products"); + return 0; + } + + if (!theta_product_structure_to_elliptic_product(E34, &last_step.B)) + return 0; + + // evaluate + for (size_t j = 0; j < numP; ++j) { + apply_isomorphism(&pts[j], &last_step.M, &pts[j]); + if (!theta_point_to_montgomery_point(&P12[j], &pts[j], &last_step.B)) + return 0; + } + + return 1; +} + +int +theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, false); +} + +// Like theta_chain_compute_and_eval, adding extra verification checks; +// used in the signature verification +int +theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, true, false); +} + +int +theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.h new file mode 100644 index 0000000000..d151811fe7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.h @@ -0,0 +1,18 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta isogeny header + */ + +#ifndef THETA_ISOGENY_H +#define THETA_ISOGENY_H + +#include +#include +#include +#include "theta_structure.h" +#include +#include + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_structure.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_structure.c new file mode 100644 index 0000000000..ce97ac61a8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_structure.c @@ -0,0 +1,78 @@ +#include "theta_structure.h" +#include + +void +theta_precomputation(theta_structure_t *A) +{ + + if (A->precomputation) { + return; + } + + theta_point_t A_dual; + to_squared_theta(&A_dual, &A->null_point); + + fp2_t t1, t2; + fp2_mul(&t1, &A_dual.x, &A_dual.y); + fp2_mul(&t2, &A_dual.z, &A_dual.t); + fp2_mul(&A->XYZ0, &t1, &A_dual.z); + fp2_mul(&A->XYT0, &t1, &A_dual.t); + fp2_mul(&A->YZT0, &t2, &A_dual.y); + fp2_mul(&A->XZT0, &t2, &A_dual.x); + + fp2_mul(&t1, &A->null_point.x, &A->null_point.y); + fp2_mul(&t2, &A->null_point.z, &A->null_point.t); + fp2_mul(&A->xyz0, &t1, &A->null_point.z); + fp2_mul(&A->xyt0, &t1, &A->null_point.t); + fp2_mul(&A->yzt0, &t2, &A->null_point.y); + fp2_mul(&A->xzt0, &t2, &A->null_point.x); + + A->precomputation = true; +} + +void +double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in) +{ + to_squared_theta(out, in); + fp2_sqr(&out->x, &out->x); + fp2_sqr(&out->y, &out->y); + fp2_sqr(&out->z, &out->z); + fp2_sqr(&out->t, &out->t); + + if (!A->precomputation) { + theta_precomputation(A); + } + fp2_mul(&out->x, &out->x, &A->YZT0); + fp2_mul(&out->y, &out->y, &A->XZT0); + fp2_mul(&out->z, &out->z, &A->XYT0); + fp2_mul(&out->t, &out->t, &A->XYZ0); + + hadamard(out, out); + + fp2_mul(&out->x, &out->x, &A->yzt0); + fp2_mul(&out->y, &out->y, &A->xzt0); + fp2_mul(&out->z, &out->z, &A->xyt0); + fp2_mul(&out->t, &out->t, &A->xyz0); +} + +void +double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp) +{ + if (exp == 0) { + *out = *in; + } else { + double_point(out, A, in); + for (int i = 1; i < exp; i++) { + double_point(out, A, out); + } + } +} + +uint32_t +is_product_theta_point(const theta_point_t *P) +{ + fp2_t t1, t2; + fp2_mul(&t1, &P->x, &P->t); + fp2_mul(&t2, &P->y, &P->z); + return fp2_is_equal(&t1, &t2); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_structure.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_structure.h new file mode 100644 index 0000000000..fc630b750a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_structure.h @@ -0,0 +1,135 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta structure header + */ + +#ifndef THETA_STRUCTURE_H +#define THETA_STRUCTURE_H + +#include +#include +#include + +/** @internal + * @ingroup hd_module + * @defgroup hd_theta Functions for theta structures + * @{ + */ + +/** + * @brief Perform the hadamard transform on a theta point + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x+y+z+t, x-y+z-t, x+y-z-t, x-y-z+t) + * + */ +static inline void +hadamard(theta_point_t *out, const theta_point_t *in) +{ + fp2_t t1, t2, t3, t4; + + // t1 = x + y + fp2_add(&t1, &in->x, &in->y); + // t2 = x - y + fp2_sub(&t2, &in->x, &in->y); + // t3 = z + t + fp2_add(&t3, &in->z, &in->t); + // t4 = z - t + fp2_sub(&t4, &in->z, &in->t); + + fp2_add(&out->x, &t1, &t3); + fp2_add(&out->y, &t2, &t4); + fp2_sub(&out->z, &t1, &t3); + fp2_sub(&out->t, &t2, &t4); +} + +/** + * @brief Square the coordinates of a theta point + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2, y^2, z^2, t^2) + * + */ +static inline void +pointwise_square(theta_point_t *out, const theta_point_t *in) +{ + fp2_sqr(&out->x, &in->x); + fp2_sqr(&out->y, &in->y); + fp2_sqr(&out->z, &in->z); + fp2_sqr(&out->t, &in->t); +} + +/** + * @brief Square the coordinates and then perform the hadamard transform + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2+y^2+z^2+t^2, x^2-y^2+z^2-t^2, x^2+y^2-z^2-t^2, x^2-y^2-z^2+t^2) + * + */ +static inline void +to_squared_theta(theta_point_t *out, const theta_point_t *in) +{ + pointwise_square(out, in); + hadamard(out, out); +} + +/** + * @brief Perform the theta structure precomputation + * + * @param A Output: the theta_structure + * + * if A.null_point = (x,y,z,t) + * if (xx,yy,zz,tt) = to_squared_theta(A.null_point) + * Computes y0,z0,t0,Y0,Z0,T0 = x/y,x/z,x/t,XX/YY,XX/ZZ,XX/TT + * + */ +void theta_precomputation(theta_structure_t *A); + +/** + * @brief Compute the double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * in = (x,y,z,t) + * out = [2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in); + +/** + * @brief Compute the iterated double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * @param exp the exponent + * in = (x,y,z,t) + * out = [2^2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp); + +/* + * @brief Check if a theta point is a product theta point + * + * @param P a theta point + * @return 0xFFFFFFFF if true, zero otherwise + */ +uint32_t is_product_theta_point(const theta_point_t *P); + +// end hd_theta +/** + * @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.c new file mode 100644 index 0000000000..242ea08fe2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.c @@ -0,0 +1,75 @@ +#include +#include + +static clock_t global_timer; + +clock_t +tic(void) +{ + global_timer = clock(); + return global_timer; +} + +float +tac(void) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); + return ms; +} + +float +TAC(const char *str) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); +#ifndef NDEBUG + printf("%s [%d ms]\n", str, (int)ms); +#endif + return ms; +} + +float +toc(const clock_t t) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + return ms; +} + +float +TOC(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,clock()-t); + // return (float) (clock()-t); +} + +float +TOC_clock(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, clock() - t); + return (float)(clock() - t); +} + +clock_t +dclock(const clock_t t) +{ + return (clock() - t); +} + +float +clock_to_time(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,t); + // return (float) (t); +} + +float +clock_print(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, t); + return (float)(t); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.h new file mode 100644 index 0000000000..5a6a505fc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.h @@ -0,0 +1,49 @@ + +#ifndef TOOLS_H +#define TOOLS_H + +#include + +// Debug printing: +// https://stackoverflow.com/questions/1644868/define-macro-for-debug-printing-in-c +#ifndef NDEBUG +#define DEBUG_PRINT 1 +#else +#define DEBUG_PRINT 0 +#endif + +#ifndef __FILE_NAME__ +#define __FILE_NAME__ "NA" +#endif + +#ifndef __LINE__ +#define __LINE__ 0 +#endif + +#ifndef __func__ +#define __func__ "NA" +#endif + +#define debug_print(fmt) \ + do { \ + if (DEBUG_PRINT) \ + printf("warning: %s, file %s, line %d, function %s().\n", \ + fmt, \ + __FILE_NAME__, \ + __LINE__, \ + __func__); \ + } while (0) + + +clock_t tic(void); +float tac(void); /* time in ms since last tic */ +float TAC(const char *str); /* same, but prints it with label 'str' */ +float toc(const clock_t t); /* time in ms since t */ +float TOC(const clock_t t, const char *str); /* same, but prints it with label 'str' */ +float TOC_clock(const clock_t t, const char *str); + +clock_t dclock(const clock_t t); // return the clock cycle diff between now and t +float clock_to_time(const clock_t t, + const char *str); // convert the number of clock cycles t to time +float clock_print(const clock_t t, const char *str); +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c new file mode 100644 index 0000000000..6fb2f97637 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c @@ -0,0 +1,43 @@ +#include +#include +#include +const ibz_t TWO_TO_SECURITY_BITS = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t TORSION_PLUS_2POWER = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10000000000000}}} +#endif +; +const ibz_t SEC_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t COM_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.h new file mode 100644 index 0000000000..363f86e6ac --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.h @@ -0,0 +1,6 @@ +#include +#define TORSION_2POWER_BYTES 63 +extern const ibz_t TWO_TO_SECURITY_BITS; +extern const ibz_t TORSION_PLUS_2POWER; +extern const ibz_t SEC_DEGREE; +extern const ibz_t COM_DEGREE; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tutil.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tutil.h new file mode 100644 index 0000000000..59f162093e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tutil.h @@ -0,0 +1,36 @@ +#ifndef TUTIL_H +#define TUTIL_H + +#include +#include + +#if defined(__GNUC__) || defined(__clang__) +#define BSWAP16(i) __builtin_bswap16((i)) +#define BSWAP32(i) __builtin_bswap32((i)) +#define BSWAP64(i) __builtin_bswap64((i)) +#define UNUSED __attribute__((unused)) +#else +#define BSWAP16(i) ((((i) >> 8) & 0xff) | (((i) & 0xff00) << 8)) +#define BSWAP32(i) \ + ((((i) >> 24) & 0xff) | (((i) >> 8) & 0xff00) | (((i) & 0xff00) << 8) | ((i) << 24)) +#define BSWAP64(i) ((BSWAP32((i) >> 32) & 0xffffffff) | (BSWAP32(i) << 32) +#define UNUSED +#endif + +#if defined(RADIX_64) +#define digit_t uint64_t +#define sdigit_t int64_t +#define RADIX 64 +#define LOG2RADIX 6 +#define BSWAP_DIGIT(i) BSWAP64(i) +#elif defined(RADIX_32) +#define digit_t uint32_t +#define sdigit_t int32_t +#define RADIX 32 +#define LOG2RADIX 5 +#define BSWAP_DIGIT(i) BSWAP32(i) +#else +#error "Radix must be 32bit or 64 bit" +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S new file mode 100644 index 0000000000..2311fa9bc8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S @@ -0,0 +1,122 @@ +#*************************************************************************** +# This implementation is a modified version of the code, +# written by Nir Drucker and Shay Gueron +# AWS Cryptographic Algorithms Group +# (ndrucker@amazon.com, gueron@amazon.com) +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# The license is detailed in the file LICENSE.txt, and applies to this file. +#*************************************************************************** + +.intel_syntax noprefix +.data + +.p2align 4, 0x90 +MASK1: +.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d +CON1: +.long 1,1,1,1 + +.set k256_size, 32 + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",@progbits +#endif +.text + +################################################################################ +# void aes256_key_expansion(OUT aes256_ks_t* ks, IN const uint8_t* key); +# The output parameter must be 16 bytes aligned! +# +#Linux ABI +#define out rdi +#define in rsi + +#define CON xmm0 +#define MASK_REG xmm1 + +#define IN0 xmm2 +#define IN1 xmm3 + +#define TMP1 xmm4 +#define TMP2 xmm5 + +#define ZERO xmm15 + +.macro ROUND1 in0 in1 + add out, k256_size + vpshufb TMP2, \in1, MASK_REG + aesenclast TMP2, CON + vpslld CON, CON, 1 + vpslldq TMP1, \in0, 4 + vpxor \in0, \in0, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor \in0, \in0, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor \in0, \in0, TMP1 + vpxor \in0, \in0, TMP2 + vmovdqa [out], \in0 + +.endm + +.macro ROUND2 + vpshufd TMP2, IN0, 0xff + aesenclast TMP2, ZERO + vpslldq TMP1, IN1, 4 + vpxor IN1, IN1, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor IN1, IN1, TMP1 + vpslldq TMP1, TMP1, 4 + vpxor IN1, IN1, TMP1 + vpxor IN1, IN1, TMP2 + vmovdqa [out+16], IN1 +.endm + +#ifdef __APPLE__ +#define AES256_KEY_EXPANSION _aes256_key_expansion +#else +#define AES256_KEY_EXPANSION aes256_key_expansion +#endif + +#ifndef __APPLE__ +.type AES256_KEY_EXPANSION,@function +.hidden AES256_KEY_EXPANSION +#endif +.globl AES256_KEY_EXPANSION +AES256_KEY_EXPANSION: + vmovdqu IN0, [in] + vmovdqu IN1, [in+16] + vmovdqa [out], IN0 + vmovdqa [out+16], IN1 + + vmovdqa CON, [rip+CON1] + vmovdqa MASK_REG, [rip+MASK1] + + vpxor ZERO, ZERO, ZERO + + mov ax, 6 +.loop256: + + ROUND1 IN0, IN1 + dec ax + ROUND2 + jne .loop256 + + ROUND1 IN0, IN1 + + ret +#ifndef __APPLE__ +.size AES256_KEY_EXPANSION, .-AES256_KEY_EXPANSION +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/verification.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/verification.h new file mode 100644 index 0000000000..af674691da --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/verification.h @@ -0,0 +1,123 @@ +/** @file + * + * @brief The verification protocol + */ + +#ifndef VERIFICATION_H +#define VERIFICATION_H + +#include +#include + +/** @defgroup verification SQIsignHD verification protocol + * @{ + */ + +/** @defgroup verification_t Types for SQIsignHD verification protocol + * @{ + */ + +typedef digit_t scalar_t[NWORDS_ORDER]; +typedef scalar_t scalar_mtx_2x2_t[2][2]; + +/** @brief Type for the signature + * + * @typedef signature_t + * + * @struct signature + * + */ +typedef struct signature +{ + fp2_t E_aux_A; // the Montgomery A-coefficient for the auxiliary curve + uint8_t backtracking; + uint8_t two_resp_length; + scalar_mtx_2x2_t mat_Bchall_can_to_B_chall; // the matrix of the desired basis + scalar_t chall_coeff; + uint8_t hint_aux; + uint8_t hint_chall; +} signature_t; + +/** @brief Type for the public keys + * + * @typedef public_key_t + * + * @struct public_key + * + */ +typedef struct public_key +{ + ec_curve_t curve; // the normalized A-coefficient of the Montgomery curve + uint8_t hint_pk; +} public_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void public_key_init(public_key_t *pk); +void public_key_finalize(public_key_t *pk); + +void hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length); + +/** + * @brief Verification + * + * @param sig signature + * @param pk public key + * @param m message + * @param l size + * @returns 1 if the signature verifies, 0 otherwise + */ +int protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a signature as a byte array + * + * @param enc : Byte array to encode the signature in + * @param sig : Signature to encode + */ +void signature_to_bytes(unsigned char *enc, const signature_t *sig); + +/** + * @brief Decodes a signature from a byte array + * + * @param sig : Structure to decode the signature in + * @param enc : Byte array to decode + */ +void signature_from_bytes(signature_t *sig, const unsigned char *enc); + +/** + * @brief Encodes a public key as a byte array + * + * @param enc : Byte array to encode the public key in + * @param pk : Public key to encode + */ +unsigned char *public_key_to_bytes(unsigned char *enc, const public_key_t *pk); + +/** + * @brief Decodes a public key from a byte array + * + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +const unsigned char *public_key_from_bytes(public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/verify.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/verify.c new file mode 100644 index 0000000000..b5f78ad398 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/verify.c @@ -0,0 +1,309 @@ +#include +#include +#include +#include +#include + +// Check that the basis change matrix elements are canonical +// representatives modulo 2^(SQIsign_response_length + 2). +static int +check_canonical_basis_change_matrix(const signature_t *sig) +{ + // This works as long as all values in sig->mat_Bchall_can_to_B_chall are + // positive integers. + int ret = 1; + scalar_t aux; + + memset(aux, 0, NWORDS_ORDER * sizeof(digit_t)); + aux[0] = 0x1; + multiple_mp_shiftl(aux, SQIsign_response_length + HD_extra_torsion - (int)sig->backtracking, NWORDS_ORDER); + + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + if (mp_compare(aux, sig->mat_Bchall_can_to_B_chall[i][j], NWORDS_ORDER) <= 0) { + ret = 0; + } + } + } + + return ret; +} + +// Compute the 2^n isogeny from the signature with kernel +// P + [chall_coeff]Q and store the codomain in E_chall +static int +compute_challenge_verify(ec_curve_t *E_chall, const signature_t *sig, const ec_curve_t *Epk, const uint8_t hint_pk) +{ + ec_basis_t bas_EA; + ec_isog_even_t phi_chall; + + // Set domain and length of 2^n isogeny + copy_curve(&phi_chall.curve, Epk); + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + + // Compute the basis from the supplied hint + if (!ec_curve_to_basis_2f_from_hint(&bas_EA, &phi_chall.curve, TORSION_EVEN_POWER, hint_pk)) // canonical + return 0; + + // recovering the exact challenge + { + if (!ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_EA.P, &bas_EA.Q, &bas_EA.PmQ, &phi_chall.curve)) { + return 0; + }; + } + + // Double the kernel until is has the correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &phi_chall.curve); + + // Compute the codomain + copy_curve(E_chall, &phi_chall.curve); + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + return 1; +} + +// same as matrix_application_even_basis() in id2iso.c, with some modifications: +// - this version works with a matrix of scalars (not ibz_t). +// - reduction modulo 2^f of matrix elements is removed here, because it is +// assumed that the elements are already cannonical representatives modulo +// 2^f; this is ensured by calling check_canonical_basis_change_matrix() at +// the beginning of protocols_verify(). +static int +matrix_scalar_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, scalar_mtx_2x2_t *mat, int f) +{ + scalar_t scalar0, scalar1; + memset(scalar0, 0, NWORDS_ORDER * sizeof(digit_t)); + memset(scalar1, 0, NWORDS_ORDER * sizeof(digit_t)); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + if (!ec_biscalar_mul(&bas->P, (*mat)[0][0], (*mat)[1][0], f, &tmp_bas, E)) + return 0; + // second basis element S = [c]P + [d]Q + if (!ec_biscalar_mul(&bas->Q, (*mat)[0][1], (*mat)[1][1], f, &tmp_bas, E)) + return 0; + // Their difference R - S = [a - c]P + [b - d]Q + mp_sub(scalar0, (*mat)[0][0], (*mat)[0][1], NWORDS_ORDER); + mp_mod_2exp(scalar0, f, NWORDS_ORDER); + mp_sub(scalar1, (*mat)[1][0], (*mat)[1][1], NWORDS_ORDER); + mp_mod_2exp(scalar1, f, NWORDS_ORDER); + return ec_biscalar_mul(&bas->PmQ, scalar0, scalar1, f, &tmp_bas, E); +} + +// Compute the bases for the challenge and auxillary curve from +// the canonical bases. Challenge basis is reconstructed from the +// compressed scalars within the challenge. +static int +challenge_and_aux_basis_verify(ec_basis_t *B_chall_can, + ec_basis_t *B_aux_can, + ec_curve_t *E_chall, + ec_curve_t *E_aux, + signature_t *sig, + const int pow_dim2_deg_resp) +{ + + // recovering the canonical basis as TORSION_EVEN_POWER for consistency with signing + if (!ec_curve_to_basis_2f_from_hint(B_chall_can, E_chall, TORSION_EVEN_POWER, sig->hint_chall)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_chall_can, + TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion - sig->two_resp_length, + B_chall_can, + E_chall); + + if (!ec_curve_to_basis_2f_from_hint(B_aux_can, E_aux, TORSION_EVEN_POWER, sig->hint_aux)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_aux_can, TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion, B_aux_can, E_aux); + +#ifndef NDEBUG + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp + sig->two_resp_length)) + debug_print("canonical basis has wrong order, expect something to fail"); +#endif + + // applying the change matrix on the basis of E_chall + return matrix_scalar_application_even_basis(B_chall_can, + E_chall, + &sig->mat_Bchall_can_to_B_chall, + pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length); +} + +// When two_resp_length is non-zero, we must compute a small 2^n-isogeny +// updating E_chall as the codomain as well as push the basis on E_chall +// through this isogeny +static int +two_response_isogeny_verify(ec_curve_t *E_chall, ec_basis_t *B_chall_can, const signature_t *sig, int pow_dim2_deg_resp) +{ + ec_point_t ker, points[3]; + + // choosing the right point for the small two_isogenies + if (mp_is_even(sig->mat_Bchall_can_to_B_chall[0][0], NWORDS_ORDER) && + mp_is_even(sig->mat_Bchall_can_to_B_chall[1][0], NWORDS_ORDER)) { + copy_point(&ker, &B_chall_can->Q); + } else { + copy_point(&ker, &B_chall_can->P); + } + + copy_point(&points[0], &B_chall_can->P); + copy_point(&points[1], &B_chall_can->Q); + copy_point(&points[2], &B_chall_can->PmQ); + + ec_dbl_iter(&ker, pow_dim2_deg_resp + HD_extra_torsion, &ker, E_chall); + +#ifndef NDEBUG + if (!test_point_order_twof(&ker, E_chall, sig->two_resp_length)) + debug_print("kernel does not have order 2^(two_resp_length"); +#endif + + if (ec_eval_small_chain(E_chall, &ker, sig->two_resp_length, points, 3, false)) { + return 0; + } + +#ifndef NDEBUG + if (!test_point_order_twof(&points[0], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[0] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[1], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[1] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[2], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[2] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + copy_point(&B_chall_can->P, &points[0]); + copy_point(&B_chall_can->Q, &points[1]); + copy_point(&B_chall_can->PmQ, &points[2]); + return 1; +} + +// The commitment curve can be recovered from the codomain of the 2D +// isogeny built from the bases computed during verification. +static int +compute_commitment_curve_verify(ec_curve_t *E_com, + const ec_basis_t *B_chall_can, + const ec_basis_t *B_aux_can, + const ec_curve_t *E_chall, + const ec_curve_t *E_aux, + int pow_dim2_deg_resp) + +{ +#ifndef NDEBUG + // Check all the points are the correct order + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_chall_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + + if (!test_basis_order_twof(B_aux_can, E_aux, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_aux_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + // now compute the dim2 isogeny from Echall x E_aux -> E_com x E_aux' + // of kernel B_chall_can x B_aux_can + + // first we set-up the kernel + theta_couple_curve_t EchallxEaux; + copy_curve(&EchallxEaux.E1, E_chall); + copy_curve(&EchallxEaux.E2, E_aux); + + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, B_chall_can, B_aux_can); + + // computing the isogeny + theta_couple_curve_t codomain; + int codomain_splits; + ec_curve_init(&codomain.E1); + ec_curve_init(&codomain.E2); + // handling the special case where we don't need to perform any dim2 computation + if (pow_dim2_deg_resp == 0) { + codomain_splits = 1; + copy_curve(&codomain.E1, &EchallxEaux.E1); + copy_curve(&codomain.E2, &EchallxEaux.E2); + // We still need to check that E_chall is supersingular + // This assumes that HD_extra_torsion == 2 + if (!ec_is_basis_four_torsion(B_chall_can, E_chall)) { + return 0; + } + } else { + codomain_splits = theta_chain_compute_and_eval_verify( + pow_dim2_deg_resp, &EchallxEaux, &dim_two_ker, true, &codomain, NULL, 0); + } + + // computing the commitment curve + // its always the first one because of our (2^n,2^n)-isogeny formulae + copy_curve(E_com, &codomain.E1); + + return codomain_splits; +} + +// SQIsign verification +int +protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l) +{ + int verify; + + if (!check_canonical_basis_change_matrix(sig)) + return 0; + + // Computation of the length of the dim 2 2^n isogeny + int pow_dim2_deg_resp = SQIsign_response_length - (int)sig->two_resp_length - (int)sig->backtracking; + + // basic sanity test: checking that the response is not too long + if (pow_dim2_deg_resp < 0) + return 0; + // The dim 2 isogeny embeds a dim 1 isogeny of odd degree, so it can + // never be of length 2. + if (pow_dim2_deg_resp == 1) + return 0; + + // check the public curve is valid + if (!ec_curve_verify_A(&(pk->curve).A)) + return 0; + + // Set auxiliary curve from the A-coefficient within the signature + ec_curve_t E_aux; + if (!ec_curve_init_from_A(&E_aux, &sig->E_aux_A)) + return 0; // invalid curve + + // checking that we are given A-coefficients and no precomputation + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF && !pk->curve.is_A24_computed_and_normalized); + + // computation of the challenge + ec_curve_t E_chall; + if (!compute_challenge_verify(&E_chall, sig, &pk->curve, pk->hint_pk)) { + return 0; + } + + // Computation of the canonical bases for the challenge and aux curve + ec_basis_t B_chall_can, B_aux_can; + + if (!challenge_and_aux_basis_verify(&B_chall_can, &B_aux_can, &E_chall, &E_aux, sig, pow_dim2_deg_resp)) { + return 0; + } + + // When two_resp_length != 0 we need to compute a second, short 2^r-isogeny + if (sig->two_resp_length > 0) { + if (!two_response_isogeny_verify(&E_chall, &B_chall_can, sig, pow_dim2_deg_resp)) { + return 0; + } + } + + // We can recover the commitment curve with a 2D isogeny + // The supplied signature did not compute an isogeny between eliptic products + // and so definitely is an invalid signature. + ec_curve_t E_com; + if (!compute_commitment_curve_verify(&E_com, &B_chall_can, &B_aux_can, &E_chall, &E_aux, pow_dim2_deg_resp)) + return 0; + + scalar_t chk_chall; + + // recomputing the challenge vector + hash_to_challenge(&chk_chall, pk, &E_com, m, l); + + // performing the final check + verify = mp_compare(sig->chall_coeff, chk_chall, NWORDS_ORDER) == 0; + + return verify; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/xeval.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/xeval.c new file mode 100644 index 0000000000..7fc7170423 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/xeval.c @@ -0,0 +1,64 @@ +#include "isog.h" +#include "ec.h" +#include + +// ----------------------------------------------------------------------------------------- +// ----------------------------------------------------------------------------------------- + +// Degree-2 isogeny evaluation with kenerl generated by P != (0, 0) +void +xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1, t2; + for (int j = 0; j < lenQ; j++) { + fp2_add(&t0, &Q[j].x, &Q[j].z); + fp2_sub(&t1, &Q[j].x, &Q[j].z); + fp2_mul(&t2, &kps->K.x, &t1); + fp2_mul(&t1, &kps->K.z, &t0); + fp2_add(&t0, &t2, &t1); + fp2_sub(&t1, &t2, &t1); + fp2_mul(&R[j].x, &Q[j].x, &t0); + fp2_mul(&R[j].z, &Q[j].z, &t1); + } +} + +void +xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1; + for (int i = 0; i < lenQ; i++) { + fp2_mul(&t0, &Q[i].x, &Q[i].z); + fp2_mul(&t1, &kps->K.x, &Q[i].z); + fp2_add(&t1, &t1, &Q[i].x); + fp2_mul(&t1, &t1, &Q[i].x); + fp2_sqr(&R[i].x, &Q[i].z); + fp2_add(&R[i].x, &R[i].x, &t1); + fp2_mul(&R[i].z, &t0, &kps->K.z); + } +} + +// Degree-4 isogeny evaluation with kenerl generated by P such that [2]P != (0, 0) +void +xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps) +{ + const ec_point_t *K = kps->K; + + fp2_t t0, t1; + + for (int i = 0; i < lenQ; i++) { + fp2_add(&t0, &Q[i].x, &Q[i].z); + fp2_sub(&t1, &Q[i].x, &Q[i].z); + fp2_mul(&(R[i].x), &t0, &K[1].x); + fp2_mul(&(R[i].z), &t1, &K[2].x); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &K[0].x); + fp2_add(&t1, &(R[i].x), &(R[i].z)); + fp2_sub(&(R[i].z), &(R[i].x), &(R[i].z)); + fp2_sqr(&t1, &t1); + fp2_sqr(&(R[i].z), &(R[i].z)); + fp2_add(&(R[i].x), &t0, &t1); + fp2_sub(&t0, &t0, &(R[i].z)); + fp2_mul(&(R[i].x), &(R[i].x), &t1); + fp2_mul(&(R[i].z), &(R[i].z), &t0); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/xisog.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/xisog.c new file mode 100644 index 0000000000..7242d29433 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/xisog.c @@ -0,0 +1,61 @@ +#include "isog.h" +#include "ec.h" +#include + +// ------------------------------------------------------------------------- +// ------------------------------------------------------------------------- + +// Degree-2 isogeny with kernel generated by P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P) +{ + fp2_sqr(&B->x, &P.x); + fp2_sqr(&B->z, &P.z); + fp2_sub(&B->x, &B->z, &B->x); + fp2_add(&kps->K.x, &P.x, &P.z); + fp2_sub(&kps->K.z, &P.x, &P.z); +} + +void +xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24) +{ + // No need to check the square root, only used for signing. + fp2_t t0, four; + fp2_set_small(&four, 4); + fp2_add(&t0, &A24.x, &A24.x); + fp2_sub(&t0, &t0, &A24.z); + fp2_add(&t0, &t0, &t0); + fp2_inv(&A24.z); + fp2_mul(&t0, &t0, &A24.z); + fp2_copy(&kps->K.x, &t0); + fp2_add(&B24->x, &t0, &t0); + fp2_sqr(&t0, &t0); + fp2_sub(&t0, &t0, &four); + fp2_sqrt(&t0); + fp2_neg(&kps->K.z, &t0); + fp2_add(&B24->z, &t0, &t0); + fp2_add(&B24->x, &B24->x, &B24->z); + fp2_add(&B24->z, &B24->z, &B24->z); +} + +// Degree-4 isogeny with kernel generated by P such that [2]P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P) +{ + ec_point_t *K = kps->K; + + fp2_sqr(&K[0].x, &P.x); + fp2_sqr(&K[0].z, &P.z); + fp2_add(&K[1].x, &K[0].z, &K[0].x); + fp2_sub(&K[1].z, &K[0].z, &K[0].x); + fp2_mul(&B->x, &K[1].x, &K[1].z); + fp2_sqr(&B->z, &K[0].z); + + // Constants for xeval_4 + fp2_add(&K[2].x, &P.x, &P.z); + fp2_sub(&K[1].x, &P.x, &P.z); + fp2_add(&K[0].x, &K[0].z, &K[0].z); + fp2_add(&K[0].x, &K[0].x, &K[0].x); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/COPYING.LGPL new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/COPYING.LGPL @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/LICENSE b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/LICENSE new file mode 100644 index 0000000000..8f71f43fee --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/NOTICE b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/NOTICE new file mode 100644 index 0000000000..6eccf392fa --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/NOTICE @@ -0,0 +1,21 @@ +Copyright 2023-2025 the SQIsign team. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +The DPE Library is (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, +LORIA/INRIA, and licensed under the GNU Lesser General Public License, +version 3. You may obtain a copy of the License at + + https://www.gnu.org/licenses/lgpl-3.0.en.html + +or in the file COPYING.LGPL. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes.h new file mode 100644 index 0000000000..e35ec3705b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef AES_H +#define AES_H + +#include +#include + +void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); +#define AES_ECB_encrypt AES_256_ECB + +#ifdef ENABLE_AESNI +int AES_128_CTR_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +int AES_128_CTR_4R_NI(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#define AES_128_CTR AES_128_CTR_NI +#else +int AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen); +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes_c.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes_c.c new file mode 100644 index 0000000000..5e2d7d6161 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes_c.c @@ -0,0 +1,783 @@ +// SPDX-License-Identifier: MIT and Apache-2.0 + +/* + * AES implementation based on code from PQClean, + * which is in turn based on BearSSL (https://bearssl.org/) + * by Thomas Pornin. + * + * + * Copyright (c) 2016 Thomas Pornin + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#define AES128_KEYBYTES 16 +#define AES192_KEYBYTES 24 +#define AES256_KEYBYTES 32 +#define AESCTR_NONCEBYTES 12 +#define AES_BLOCKBYTES 16 + +#define PQC_AES128_STATESIZE 88 +typedef struct +{ + uint64_t sk_exp[PQC_AES128_STATESIZE]; +} aes128ctx; + +#define PQC_AES192_STATESIZE 104 +typedef struct +{ + uint64_t sk_exp[PQC_AES192_STATESIZE]; +} aes192ctx; + +#define PQC_AES256_STATESIZE 120 +typedef struct +{ + uint64_t sk_exp[PQC_AES256_STATESIZE]; +} aes256ctx; + +/** Initializes the context **/ +void aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key); + +void aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key); + +void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx); + +void aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx); + +/** Frees the context **/ +void aes128_ctx_release(aes128ctx *r); + +/** Initializes the context **/ +void aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key); + +void aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key); + +void aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx); + +void aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx); + +void aes192_ctx_release(aes192ctx *r); + +/** Initializes the context **/ +void aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key); + +void aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key); + +void aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx); + +void aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx); + +/** Frees the context **/ +void aes256_ctx_release(aes256ctx *r); + +static inline uint32_t +br_dec32le(const unsigned char *src) +{ + return (uint32_t)src[0] | ((uint32_t)src[1] << 8) | ((uint32_t)src[2] << 16) | + ((uint32_t)src[3] << 24); +} + +static void +br_range_dec32le(uint32_t *v, size_t num, const unsigned char *src) +{ + while (num-- > 0) { + *v++ = br_dec32le(src); + src += 4; + } +} + +static inline uint32_t +br_swap32(uint32_t x) +{ + x = ((x & (uint32_t)0x00FF00FF) << 8) | ((x >> 8) & (uint32_t)0x00FF00FF); + return (x << 16) | (x >> 16); +} + +static inline void +br_enc32le(unsigned char *dst, uint32_t x) +{ + dst[0] = (unsigned char)x; + dst[1] = (unsigned char)(x >> 8); + dst[2] = (unsigned char)(x >> 16); + dst[3] = (unsigned char)(x >> 24); +} + +static void +br_range_enc32le(unsigned char *dst, const uint32_t *v, size_t num) +{ + while (num-- > 0) { + br_enc32le(dst, *v++); + dst += 4; + } +} + +static void +br_aes_ct64_bitslice_Sbox(uint64_t *q) +{ + /* + * This S-box implementation is a straightforward translation of + * the circuit described by Boyar and Peralta in "A new + * combinational logic minimization technique with applications + * to cryptology" (https://eprint.iacr.org/2009/191.pdf). + * + * Note that variables x* (input) and s* (output) are numbered + * in "reverse" order (x0 is the high bit, x7 is the low bit). + */ + + uint64_t x0, x1, x2, x3, x4, x5, x6, x7; + uint64_t y1, y2, y3, y4, y5, y6, y7, y8, y9; + uint64_t y10, y11, y12, y13, y14, y15, y16, y17, y18, y19; + uint64_t y20, y21; + uint64_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9; + uint64_t z10, z11, z12, z13, z14, z15, z16, z17; + uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; + uint64_t t10, t11, t12, t13, t14, t15, t16, t17, t18, t19; + uint64_t t20, t21, t22, t23, t24, t25, t26, t27, t28, t29; + uint64_t t30, t31, t32, t33, t34, t35, t36, t37, t38, t39; + uint64_t t40, t41, t42, t43, t44, t45, t46, t47, t48, t49; + uint64_t t50, t51, t52, t53, t54, t55, t56, t57, t58, t59; + uint64_t t60, t61, t62, t63, t64, t65, t66, t67; + uint64_t s0, s1, s2, s3, s4, s5, s6, s7; + + x0 = q[7]; + x1 = q[6]; + x2 = q[5]; + x3 = q[4]; + x4 = q[3]; + x5 = q[2]; + x6 = q[1]; + x7 = q[0]; + + /* + * Top linear transformation. + */ + y14 = x3 ^ x5; + y13 = x0 ^ x6; + y9 = x0 ^ x3; + y8 = x0 ^ x5; + t0 = x1 ^ x2; + y1 = t0 ^ x7; + y4 = y1 ^ x3; + y12 = y13 ^ y14; + y2 = y1 ^ x0; + y5 = y1 ^ x6; + y3 = y5 ^ y8; + t1 = x4 ^ y12; + y15 = t1 ^ x5; + y20 = t1 ^ x1; + y6 = y15 ^ x7; + y10 = y15 ^ t0; + y11 = y20 ^ y9; + y7 = x7 ^ y11; + y17 = y10 ^ y11; + y19 = y10 ^ y8; + y16 = t0 ^ y11; + y21 = y13 ^ y16; + y18 = x0 ^ y16; + + /* + * Non-linear section. + */ + t2 = y12 & y15; + t3 = y3 & y6; + t4 = t3 ^ t2; + t5 = y4 & x7; + t6 = t5 ^ t2; + t7 = y13 & y16; + t8 = y5 & y1; + t9 = t8 ^ t7; + t10 = y2 & y7; + t11 = t10 ^ t7; + t12 = y9 & y11; + t13 = y14 & y17; + t14 = t13 ^ t12; + t15 = y8 & y10; + t16 = t15 ^ t12; + t17 = t4 ^ t14; + t18 = t6 ^ t16; + t19 = t9 ^ t14; + t20 = t11 ^ t16; + t21 = t17 ^ y20; + t22 = t18 ^ y19; + t23 = t19 ^ y21; + t24 = t20 ^ y18; + + t25 = t21 ^ t22; + t26 = t21 & t23; + t27 = t24 ^ t26; + t28 = t25 & t27; + t29 = t28 ^ t22; + t30 = t23 ^ t24; + t31 = t22 ^ t26; + t32 = t31 & t30; + t33 = t32 ^ t24; + t34 = t23 ^ t33; + t35 = t27 ^ t33; + t36 = t24 & t35; + t37 = t36 ^ t34; + t38 = t27 ^ t36; + t39 = t29 & t38; + t40 = t25 ^ t39; + + t41 = t40 ^ t37; + t42 = t29 ^ t33; + t43 = t29 ^ t40; + t44 = t33 ^ t37; + t45 = t42 ^ t41; + z0 = t44 & y15; + z1 = t37 & y6; + z2 = t33 & x7; + z3 = t43 & y16; + z4 = t40 & y1; + z5 = t29 & y7; + z6 = t42 & y11; + z7 = t45 & y17; + z8 = t41 & y10; + z9 = t44 & y12; + z10 = t37 & y3; + z11 = t33 & y4; + z12 = t43 & y13; + z13 = t40 & y5; + z14 = t29 & y2; + z15 = t42 & y9; + z16 = t45 & y14; + z17 = t41 & y8; + + /* + * Bottom linear transformation. + */ + t46 = z15 ^ z16; + t47 = z10 ^ z11; + t48 = z5 ^ z13; + t49 = z9 ^ z10; + t50 = z2 ^ z12; + t51 = z2 ^ z5; + t52 = z7 ^ z8; + t53 = z0 ^ z3; + t54 = z6 ^ z7; + t55 = z16 ^ z17; + t56 = z12 ^ t48; + t57 = t50 ^ t53; + t58 = z4 ^ t46; + t59 = z3 ^ t54; + t60 = t46 ^ t57; + t61 = z14 ^ t57; + t62 = t52 ^ t58; + t63 = t49 ^ t58; + t64 = z4 ^ t59; + t65 = t61 ^ t62; + t66 = z1 ^ t63; + s0 = t59 ^ t63; + s6 = t56 ^ ~t62; + s7 = t48 ^ ~t60; + t67 = t64 ^ t65; + s3 = t53 ^ t66; + s4 = t51 ^ t66; + s5 = t47 ^ t65; + s1 = t64 ^ ~s3; + s2 = t55 ^ ~t67; + + q[7] = s0; + q[6] = s1; + q[5] = s2; + q[4] = s3; + q[3] = s4; + q[2] = s5; + q[1] = s6; + q[0] = s7; +} + +static void +br_aes_ct64_ortho(uint64_t *q) +{ +#define SWAPN(cl, ch, s, x, y) \ + do { \ + uint64_t a, b; \ + a = (x); \ + b = (y); \ + (x) = (a & (uint64_t)(cl)) | ((b & (uint64_t)(cl)) << (s)); \ + (y) = ((a & (uint64_t)(ch)) >> (s)) | (b & (uint64_t)(ch)); \ + } while (0) + +#define SWAP2(x, y) SWAPN(0x5555555555555555, 0xAAAAAAAAAAAAAAAA, 1, x, y) +#define SWAP4(x, y) SWAPN(0x3333333333333333, 0xCCCCCCCCCCCCCCCC, 2, x, y) +#define SWAP8(x, y) SWAPN(0x0F0F0F0F0F0F0F0F, 0xF0F0F0F0F0F0F0F0, 4, x, y) + + SWAP2(q[0], q[1]); + SWAP2(q[2], q[3]); + SWAP2(q[4], q[5]); + SWAP2(q[6], q[7]); + + SWAP4(q[0], q[2]); + SWAP4(q[1], q[3]); + SWAP4(q[4], q[6]); + SWAP4(q[5], q[7]); + + SWAP8(q[0], q[4]); + SWAP8(q[1], q[5]); + SWAP8(q[2], q[6]); + SWAP8(q[3], q[7]); +} + +static void +br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t *w) +{ + uint64_t x0, x1, x2, x3; + + x0 = w[0]; + x1 = w[1]; + x2 = w[2]; + x3 = w[3]; + x0 |= (x0 << 16); + x1 |= (x1 << 16); + x2 |= (x2 << 16); + x3 |= (x3 << 16); + x0 &= (uint64_t)0x0000FFFF0000FFFF; + x1 &= (uint64_t)0x0000FFFF0000FFFF; + x2 &= (uint64_t)0x0000FFFF0000FFFF; + x3 &= (uint64_t)0x0000FFFF0000FFFF; + x0 |= (x0 << 8); + x1 |= (x1 << 8); + x2 |= (x2 << 8); + x3 |= (x3 << 8); + x0 &= (uint64_t)0x00FF00FF00FF00FF; + x1 &= (uint64_t)0x00FF00FF00FF00FF; + x2 &= (uint64_t)0x00FF00FF00FF00FF; + x3 &= (uint64_t)0x00FF00FF00FF00FF; + *q0 = x0 | (x2 << 8); + *q1 = x1 | (x3 << 8); +} + +static void +br_aes_ct64_interleave_out(uint32_t *w, uint64_t q0, uint64_t q1) +{ + uint64_t x0, x1, x2, x3; + + x0 = q0 & (uint64_t)0x00FF00FF00FF00FF; + x1 = q1 & (uint64_t)0x00FF00FF00FF00FF; + x2 = (q0 >> 8) & (uint64_t)0x00FF00FF00FF00FF; + x3 = (q1 >> 8) & (uint64_t)0x00FF00FF00FF00FF; + x0 |= (x0 >> 8); + x1 |= (x1 >> 8); + x2 |= (x2 >> 8); + x3 |= (x3 >> 8); + x0 &= (uint64_t)0x0000FFFF0000FFFF; + x1 &= (uint64_t)0x0000FFFF0000FFFF; + x2 &= (uint64_t)0x0000FFFF0000FFFF; + x3 &= (uint64_t)0x0000FFFF0000FFFF; + w[0] = (uint32_t)x0 | (uint32_t)(x0 >> 16); + w[1] = (uint32_t)x1 | (uint32_t)(x1 >> 16); + w[2] = (uint32_t)x2 | (uint32_t)(x2 >> 16); + w[3] = (uint32_t)x3 | (uint32_t)(x3 >> 16); +} + +static const unsigned char Rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36 }; + +static uint32_t +sub_word(uint32_t x) +{ + uint64_t q[8]; + + memset(q, 0, sizeof q); + q[0] = x; + br_aes_ct64_ortho(q); + br_aes_ct64_bitslice_Sbox(q); + br_aes_ct64_ortho(q); + return (uint32_t)q[0]; +} + +static void +br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, unsigned int key_len) +{ + unsigned int i, j, k, nk, nkf; + uint32_t tmp; + uint32_t skey[60]; + unsigned nrounds = 10 + ((key_len - 16) >> 2); + + nk = (key_len >> 2); + nkf = ((nrounds + 1) << 2); + br_range_dec32le(skey, (key_len >> 2), key); + tmp = skey[(key_len >> 2) - 1]; + for (i = nk, j = 0, k = 0; i < nkf; i++) { + if (j == 0) { + tmp = (tmp << 24) | (tmp >> 8); + tmp = sub_word(tmp) ^ Rcon[k]; + } else if (nk > 6 && j == 4) { + tmp = sub_word(tmp); + } + tmp ^= skey[i - nk]; + skey[i] = tmp; + if (++j == nk) { + j = 0; + k++; + } + } + + for (i = 0, j = 0; i < nkf; i += 4, j += 2) { + uint64_t q[8]; + + br_aes_ct64_interleave_in(&q[0], &q[4], skey + i); + q[1] = q[0]; + q[2] = q[0]; + q[3] = q[0]; + q[5] = q[4]; + q[6] = q[4]; + q[7] = q[4]; + br_aes_ct64_ortho(q); + comp_skey[j + 0] = + (q[0] & (uint64_t)0x1111111111111111) | (q[1] & (uint64_t)0x2222222222222222) | + (q[2] & (uint64_t)0x4444444444444444) | (q[3] & (uint64_t)0x8888888888888888); + comp_skey[j + 1] = + (q[4] & (uint64_t)0x1111111111111111) | (q[5] & (uint64_t)0x2222222222222222) | + (q[6] & (uint64_t)0x4444444444444444) | (q[7] & (uint64_t)0x8888888888888888); + } +} + +static void +br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, unsigned int nrounds) +{ + unsigned u, v, n; + + n = (nrounds + 1) << 1; + for (u = 0, v = 0; u < n; u++, v += 4) { + uint64_t x0, x1, x2, x3; + + x0 = x1 = x2 = x3 = comp_skey[u]; + x0 &= (uint64_t)0x1111111111111111; + x1 &= (uint64_t)0x2222222222222222; + x2 &= (uint64_t)0x4444444444444444; + x3 &= (uint64_t)0x8888888888888888; + x1 >>= 1; + x2 >>= 2; + x3 >>= 3; + skey[v + 0] = (x0 << 4) - x0; + skey[v + 1] = (x1 << 4) - x1; + skey[v + 2] = (x2 << 4) - x2; + skey[v + 3] = (x3 << 4) - x3; + } +} + +static inline void +add_round_key(uint64_t *q, const uint64_t *sk) +{ + q[0] ^= sk[0]; + q[1] ^= sk[1]; + q[2] ^= sk[2]; + q[3] ^= sk[3]; + q[4] ^= sk[4]; + q[5] ^= sk[5]; + q[6] ^= sk[6]; + q[7] ^= sk[7]; +} + +static inline void +shift_rows(uint64_t *q) +{ + int i; + + for (i = 0; i < 8; i++) { + uint64_t x; + + x = q[i]; + q[i] = + (x & (uint64_t)0x000000000000FFFF) | ((x & (uint64_t)0x00000000FFF00000) >> 4) | + ((x & (uint64_t)0x00000000000F0000) << 12) | ((x & (uint64_t)0x0000FF0000000000) >> 8) | + ((x & (uint64_t)0x000000FF00000000) << 8) | ((x & (uint64_t)0xF000000000000000) >> 12) | + ((x & (uint64_t)0x0FFF000000000000) << 4); + } +} + +static inline uint64_t +rotr32(uint64_t x) +{ + return (x << 32) | (x >> 32); +} + +static inline void +mix_columns(uint64_t *q) +{ + uint64_t q0, q1, q2, q3, q4, q5, q6, q7; + uint64_t r0, r1, r2, r3, r4, r5, r6, r7; + + q0 = q[0]; + q1 = q[1]; + q2 = q[2]; + q3 = q[3]; + q4 = q[4]; + q5 = q[5]; + q6 = q[6]; + q7 = q[7]; + r0 = (q0 >> 16) | (q0 << 48); + r1 = (q1 >> 16) | (q1 << 48); + r2 = (q2 >> 16) | (q2 << 48); + r3 = (q3 >> 16) | (q3 << 48); + r4 = (q4 >> 16) | (q4 << 48); + r5 = (q5 >> 16) | (q5 << 48); + r6 = (q6 >> 16) | (q6 << 48); + r7 = (q7 >> 16) | (q7 << 48); + + q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0); + q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1); + q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2); + q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3); + q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4); + q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5); + q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6); + q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7); +} + +static void +inc4_be(uint32_t *x) +{ + uint32_t t = br_swap32(*x) + 4; + *x = br_swap32(t); +} + +static void +aes_ecb4x(unsigned char out[64], + const uint32_t ivw[16], + const uint64_t *sk_exp, + unsigned int nrounds) +{ + uint32_t w[16]; + uint64_t q[8]; + unsigned int i; + + memcpy(w, ivw, sizeof(w)); + for (i = 0; i < 4; i++) { + br_aes_ct64_interleave_in(&q[i], &q[i + 4], w + (i << 2)); + } + br_aes_ct64_ortho(q); + + add_round_key(q, sk_exp); + for (i = 1; i < nrounds; i++) { + br_aes_ct64_bitslice_Sbox(q); + shift_rows(q); + mix_columns(q); + add_round_key(q, sk_exp + (i << 3)); + } + br_aes_ct64_bitslice_Sbox(q); + shift_rows(q); + add_round_key(q, sk_exp + 8 * nrounds); + + br_aes_ct64_ortho(q); + for (i = 0; i < 4; i++) { + br_aes_ct64_interleave_out(w + (i << 2), q[i], q[i + 4]); + } + br_range_enc32le(out, w, 16); +} + +static void +aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) +{ + aes_ecb4x(out, ivw, sk_exp, nrounds); + + /* Increase counter for next 4 blocks */ + inc4_be(ivw + 3); + inc4_be(ivw + 7); + inc4_be(ivw + 11); + inc4_be(ivw + 15); +} + +static void +aes_ecb(unsigned char *out, + const unsigned char *in, + size_t nblocks, + const uint64_t *rkeys, + unsigned int nrounds) +{ + uint32_t blocks[16]; + unsigned char t[64]; + + while (nblocks >= 4) { + br_range_dec32le(blocks, 16, in); + aes_ecb4x(out, blocks, rkeys, nrounds); + nblocks -= 4; + in += 64; + out += 64; + } + + if (nblocks) { + br_range_dec32le(blocks, nblocks * 4, in); + aes_ecb4x(t, blocks, rkeys, nrounds); + memcpy(out, t, nblocks * 16); + } +} + +static void +aes_ctr(unsigned char *out, + size_t outlen, + const unsigned char *iv, + const uint64_t *rkeys, + unsigned int nrounds) +{ + uint32_t ivw[16]; + size_t i; + uint32_t cc = 0; + + br_range_dec32le(ivw, 3, iv); + memcpy(ivw + 4, ivw, 3 * sizeof(uint32_t)); + memcpy(ivw + 8, ivw, 3 * sizeof(uint32_t)); + memcpy(ivw + 12, ivw, 3 * sizeof(uint32_t)); + ivw[3] = br_swap32(cc); + ivw[7] = br_swap32(cc + 1); + ivw[11] = br_swap32(cc + 2); + ivw[15] = br_swap32(cc + 3); + + while (outlen > 64) { + aes_ctr4x(out, ivw, rkeys, nrounds); + out += 64; + outlen -= 64; + } + if (outlen > 0) { + unsigned char tmp[64]; + aes_ctr4x(tmp, ivw, rkeys, nrounds); + for (i = 0; i < outlen; i++) { + out[i] = tmp[i]; + } + } +} + +void +aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key) +{ + uint64_t skey[22]; + + br_aes_ct64_keysched(skey, key, 16); + br_aes_ct64_skey_expand(r->sk_exp, skey, 10); +} + +void +aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key) +{ + aes128_ecb_keyexp(r, key); +} + +void +aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key) +{ + uint64_t skey[26]; + + br_aes_ct64_keysched(skey, key, 24); + br_aes_ct64_skey_expand(r->sk_exp, skey, 12); +} + +void +aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key) +{ + aes192_ecb_keyexp(r, key); +} + +void +aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key) +{ + uint64_t skey[30]; + + br_aes_ct64_keysched(skey, key, 32); + br_aes_ct64_skey_expand(r->sk_exp, skey, 14); +} + +void +aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key) +{ + aes256_ecb_keyexp(r, key); +} + +void +aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 10); +} + +void +aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 10); +} + +void +aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 12); +} + +void +aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 12); +} + +void +aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx) +{ + aes_ecb(out, in, nblocks, ctx->sk_exp, 14); +} + +void +aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx) +{ + aes_ctr(out, outlen, iv, ctx->sk_exp, 14); +} + +void +aes128_ctx_release(aes128ctx *r) +{ +} + +void +aes192_ctx_release(aes192ctx *r) +{ +} + +void +aes256_ctx_release(aes256ctx *r) +{ +} + +int +AES_128_CTR(unsigned char *output, + size_t outputByteLen, + const unsigned char *input, + size_t inputByteLen) +{ + aes128ctx ctx; + const unsigned char iv[16] = { 0 }; + + aes128_ctr_keyexp(&ctx, input); + aes128_ctr(output, outputByteLen, iv, &ctx); + aes128_ctx_release(&ctx); + + return (int)outputByteLen; +} + +void +AES_256_ECB(const uint8_t *input, const unsigned char *key, unsigned char *output) +{ + aes256ctx ctx; + + aes256_ecb_keyexp(&ctx, key); + aes256_ecb(output, input, 1, &ctx); + aes256_ctx_release(&ctx); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c new file mode 100644 index 0000000000..50629f9fec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c @@ -0,0 +1,280 @@ +#include +#include "internal.h" + +// Internal helper functions + +void +quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) +{ + ibz_t bp; + ibz_init(&bp); + ibz_set(&bp, p); + quat_alg_init_set(alg, &bp); + ibz_finalize(&bp); +} + +void +quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg) +{ + ibz_t prod; + ibz_vec_4_t sum; + ibz_init(&prod); + ibz_vec_4_init(&sum); + + ibz_set(&(sum[0]), 0); + ibz_set(&(sum[1]), 0); + ibz_set(&(sum[2]), 0); + ibz_set(&(sum[3]), 0); + + // compute 1 coordinate + ibz_mul(&prod, &((*a)[2]), &((*b)[2])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[3])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[0])); + ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[1])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + // compute i coordiante + ibz_mul(&prod, &((*a)[2]), &((*b)[3])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[2])); + ibz_sub(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[1])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[0])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + // compute j coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[2])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[0])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[3])); + ibz_sub(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[1])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + // compute ij coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[3])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[0])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[1])); + ibz_sub(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[2])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + + ibz_copy(&((*res)[0]), &(sum[0])); + ibz_copy(&((*res)[1]), &(sum[1])); + ibz_copy(&((*res)[2]), &(sum[2])); + ibz_copy(&((*res)[3]), &(sum[3])); + + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &(a->denom), &(b->denom)); + // temporarily set res_a.denom to a.denom/gcd, and res_b.denom to b.denom/gcd + ibz_div(&(res_a->denom), &r, &(a->denom), &gcd); + ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); + for (int i = 0; i < 4; i++) { + // multiply coordiates by reduced denominators from the other element + ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + } + // multiply both reduced denominators + ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); + // multiply them by the gcd to get the new common denominator + ibz_mul(&(res_b->denom), &(res_a->denom), &gcd); + ibz_mul(&(res_a->denom), &(res_a->denom), &gcd); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +// Public Functions + +void +quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then add + ibz_copy(&(res->denom), &(res_a.denom)); + ibz_vec_4_add(&(res->coord), &(res_a.coord), &(res_b.coord)); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then substract + ibz_copy(&res->denom, &res_a.denom); + ibz_vec_4_sub(&res->coord, &res_a.coord, &res_b.coord); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg) +{ + // denominator: product of denominators + ibz_mul(&(res->denom), &(a->denom), &(b->denom)); + quat_alg_coord_mul(&(res->coord), &(a->coord), &(b->coord), alg); +} + +void +quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_t *alg) +{ + ibz_t r, g; + quat_alg_elem_t norm; + ibz_init(&r); + ibz_init(&g); + quat_alg_elem_init(&norm); + + quat_alg_conj(&norm, a); + quat_alg_mul(&norm, a, &norm, alg); + ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_div(res_denom, &r, &(norm.denom), &g); + ibz_abs(res_denom, res_denom); + ibz_abs(res_num, res_num); + assert(ibz_cmp(res_denom, &ibz_const_zero) > 0); + + quat_alg_elem_finalize(&norm); + ibz_finalize(&r); + ibz_finalize(&g); +} + +void +quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) +{ + ibz_copy(&(elem->denom), denominator); + ibz_copy(&(elem->coord[0]), numerator); + ibz_set(&(elem->coord[1]), 0); + ibz_set(&(elem->coord[2]), 0); + ibz_set(&(elem->coord[3]), 0); +} + +void +quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) +{ + ibz_copy(&(conj->denom), &(x->denom)); + ibz_copy(&(conj->coord[0]), &(x->coord[0])); + ibz_neg(&(conj->coord[1]), &(x->coord[1])); + ibz_neg(&(conj->coord[2]), &(x->coord[2])); + ibz_neg(&(conj->coord[3]), &(x->coord[3])); +} + +void +quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg_elem_t *x, const quat_lattice_t *order) +{ + int ok UNUSED = quat_lattice_contains(primitive_x, order, x); + assert(ok); + ibz_vec_4_content(content, primitive_x); + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + } + ibz_finalize(&r); +} + +void +quat_alg_normalize(quat_alg_elem_t *x) +{ + ibz_t gcd, sign, r; + ibz_init(&gcd); + ibz_init(&sign); + ibz_init(&r); + ibz_vec_4_content(&gcd, &(x->coord)); + ibz_gcd(&gcd, &gcd, &(x->denom)); + ibz_div(&(x->denom), &r, &(x->denom), &gcd); + ibz_vec_4_scalar_div(&(x->coord), &gcd, &(x->coord)); + ibz_set(&sign, 2 * (0 > ibz_cmp(&ibz_const_zero, &(x->denom))) - 1); + ibz_vec_4_scalar_mul(&(x->coord), &sign, &(x->coord)); + ibz_mul(&(x->denom), &sign, &(x->denom)); + ibz_finalize(&gcd); + ibz_finalize(&sign); + ibz_finalize(&r); +} + +int +quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t diff; + quat_alg_elem_init(&diff); + quat_alg_sub(&diff, a, b); + int res = quat_alg_elem_is_zero(&diff); + quat_alg_elem_finalize(&diff); + return (res); +} + +int +quat_alg_elem_is_zero(const quat_alg_elem_t *x) +{ + int res = ibz_vec_4_is_zero(&(x->coord)); + return (res); +} + +void +quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&(elem->coord[0]), coord0); + ibz_set(&(elem->coord[1]), coord1); + ibz_set(&(elem->coord[2]), coord2); + ibz_set(&(elem->coord[3]), coord3); + + ibz_set(&(elem->denom), denom); +} + +void +quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) +{ + ibz_copy(©->denom, &copied->denom); + ibz_copy(©->coord[0], &copied->coord[0]); + ibz_copy(©->coord[1], &copied->coord[1]); + ibz_copy(©->coord[2], &copied->coord[2]); + ibz_copy(©->coord[3], &copied->coord[3]); +} + +// helper functions for lattices +void +quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3) +{ + ibz_copy(&(elem->coord[0]), coord0); + ibz_copy(&(elem->coord[1]), coord1); + ibz_copy(&(elem->coord[2]), coord2); + ibz_copy(&(elem->coord[3]), coord3); + + ibz_copy(&(elem->denom), denom); +} + +void +quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + } + ibz_copy(&(res->denom), &(elem->denom)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/api.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/api.c new file mode 100644 index 0000000000..e01f911e87 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/api.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk) +{ + + return sqisign_keypair(pk, sk); +} + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk) +{ + return sqisign_sign(sm, smlen, m, mlen, sk); +} +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk) +{ + return sqisign_open(m, mlen, sm, smlen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/api.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/api.h new file mode 100644 index 0000000000..dee239e1cd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/api.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef api_h +#define api_h + +#include + +#define CRYPTO_SECRETKEYBYTES 701 +#define CRYPTO_PUBLICKEYBYTES 129 +#define CRYPTO_BYTES 292 + +#define CRYPTO_ALGNAME "SQIsign_lvl5" + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +crypto_sign_keypair(unsigned char *pk, unsigned char *sk); + +SQISIGN_API +int +crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk); +#endif + +SQISIGN_API +int +crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk); + +#endif /* api_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/basis.c new file mode 100644 index 0000000000..94cb7fcacb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/basis.c @@ -0,0 +1,416 @@ +#include "ec.h" +#include "fp2.h" +#include "e0_basis.h" +#include + +uint32_t +ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve) +{ // Recover y-coordinate of a point on the Montgomery curve y^2 = x^3 + Ax^2 + x + fp2_t t0; + + fp2_sqr(&t0, Px); + fp2_mul(y, &t0, &curve->A); // Ax^2 + fp2_add(y, y, Px); // Ax^2 + x + fp2_mul(&t0, &t0, Px); + fp2_add(y, y, &t0); // x^3 + Ax^2 + x + // This is required, because we do not yet know that our curves are + // supersingular so our points live on the twist with B = 1. + return fp2_sqrt_verify(y); +} + +static void +difference_point(ec_point_t *PQ, const ec_point_t *P, const ec_point_t *Q, const ec_curve_t *curve) +{ + // Given P,Q in projective x-only, computes a deterministic choice for (P-Q) + // Based on Proposition 3 of https://eprint.iacr.org/2017/518.pdf + + fp2_t Bxx, Bxz, Bzz, t0, t1; + + fp2_mul(&t0, &P->x, &Q->x); + fp2_mul(&t1, &P->z, &Q->z); + fp2_sub(&Bxx, &t0, &t1); + fp2_sqr(&Bxx, &Bxx); + fp2_mul(&Bxx, &Bxx, &curve->C); // C*(P.x*Q.x-P.z*Q.z)^2 + fp2_add(&Bxz, &t0, &t1); + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + fp2_add(&Bzz, &t0, &t1); + fp2_mul(&Bxz, &Bxz, &Bzz); // (P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_sub(&Bzz, &t0, &t1); + fp2_sqr(&Bzz, &Bzz); + fp2_mul(&Bzz, &Bzz, &curve->C); // C*(P.x*Q.z-P.z*Q.x)^2 + fp2_mul(&Bxz, &Bxz, &curve->C); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &curve->A); + fp2_add(&t0, &t0, &t0); + fp2_add(&Bxz, &Bxz, &t0); // C*(P.x*Q.x+P.z*Q.z)(P.x*Q.z+P.z*Q.x) + 2*A*P.x*Q.z*P.z*Q.x + + // To ensure that the denominator is a fourth power in Fp, we normalize by + // C*C_bar^2*(P.z)_bar^2*(Q.z)_bar^2 + fp_copy(&t0.re, &curve->C.re); + fp_neg(&t0.im, &curve->C.im); + fp2_sqr(&t0, &t0); + fp2_mul(&t0, &t0, &curve->C); + fp_copy(&t1.re, &P->z.re); + fp_neg(&t1.im, &P->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp_copy(&t1.re, &Q->z.re); + fp_neg(&t1.im, &Q->z.im); + fp2_sqr(&t1, &t1); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&Bxx, &Bxx, &t0); + fp2_mul(&Bxz, &Bxz, &t0); + fp2_mul(&Bzz, &Bzz, &t0); + + // Solving quadratic equation + fp2_sqr(&t0, &Bxz); + fp2_mul(&t1, &Bxx, &Bzz); + fp2_sub(&t0, &t0, &t1); + // No need to check if t0 is square, as per the entangled basis algorithm. + fp2_sqrt(&t0); + fp2_add(&PQ->x, &Bxz, &t0); + fp2_copy(&PQ->z, &Bzz); +} + +// Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and the point +// P = (X/Z : 1). For generic implementation see lift_basis() +uint32_t +lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + assert(fp2_is_one(&B->P.z)); + assert(fp2_is_one(&E->C)); + + fp2_copy(&P->x, &B->P.x); + fp2_copy(&Q->x, &B->Q.x); + fp2_copy(&Q->z, &B->Q.z); + fp2_set_one(&P->z); + uint32_t ret = ec_recover_y(&P->y, &P->x, E); + + // Algorithm of Okeya-Sakurai to recover y.Q in the montgomery model + fp2_t v1, v2, v3, v4; + fp2_mul(&v1, &P->x, &Q->z); + fp2_add(&v2, &Q->x, &v1); + fp2_sub(&v3, &Q->x, &v1); + fp2_sqr(&v3, &v3); + fp2_mul(&v3, &v3, &B->PmQ.x); + fp2_add(&v1, &E->A, &E->A); + fp2_mul(&v1, &v1, &Q->z); + fp2_add(&v2, &v2, &v1); + fp2_mul(&v4, &P->x, &Q->x); + fp2_add(&v4, &v4, &Q->z); + fp2_mul(&v2, &v2, &v4); + fp2_mul(&v1, &v1, &Q->z); + fp2_sub(&v2, &v2, &v1); + fp2_mul(&v2, &v2, &B->PmQ.z); + fp2_sub(&Q->y, &v3, &v2); + fp2_add(&v1, &P->y, &P->y); + fp2_mul(&v1, &v1, &Q->z); + fp2_mul(&v1, &v1, &B->PmQ.z); + fp2_mul(&Q->x, &Q->x, &v1); + fp2_mul(&Q->z, &Q->z, &v1); + + // Transforming to a jacobian coordinate + fp2_sqr(&v1, &Q->z); + fp2_mul(&Q->y, &Q->y, &v1); + fp2_mul(&Q->x, &Q->x, &Q->z); + return ret; +} + +uint32_t +lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E) +{ + // Normalise the curve E such that (A : C) is (A/C : 1) + // and the point x(P) = (X/Z : 1). + fp2_t inverses[2]; + fp2_copy(&inverses[0], &B->P.z); + fp2_copy(&inverses[1], &E->C); + + fp2_batched_inv(inverses, 2); + fp2_set_one(&B->P.z); + fp2_set_one(&E->C); + + fp2_mul(&B->P.x, &B->P.x, &inverses[0]); + fp2_mul(&E->A, &E->A, &inverses[1]); + + // Lift the basis to Jacobian points P, Q + return lift_basis_normalized(P, Q, B, E); +} + +// Given an x-coordinate, determines if this is a valid +// point on the curve. Assumes C=1. +static uint32_t +is_on_curve(const fp2_t *x, const ec_curve_t *curve) +{ + assert(fp2_is_one(&curve->C)); + fp2_t t0; + + fp2_add(&t0, x, &curve->A); // x + (A/C) + fp2_mul(&t0, &t0, x); // x^2 + (A/C)*x + fp2_add_one(&t0, &t0); // x^2 + (A/C)*x + 1 + fp2_mul(&t0, &t0, x); // x^3 + (A/C)*x^2 + x + + return fp2_is_square(&t0); +} + +// Helper function which given a point of order k*2^n with n maximal +// and k odd, computes a point of order 2^f +static inline void +clear_cofactor_for_maximal_even_order(ec_point_t *P, ec_curve_t *curve, int f) +{ + // clear out the odd cofactor to get a point of order 2^n + ec_mul(P, p_cofactor_for_2f, P_COFACTOR_FOR_2F_BITLENGTH, P, curve); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_A24(P, P, &curve->A24, curve->is_A24_computed_and_normalized); + } +} + +// Helper function which finds an NQR -1 / (1 + i*b) for entangled basis generation +static uint8_t +find_nqr_factor(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + // factor = -1/(1 + i*b) for b in Fp will be NQR whenever 1 + b^2 is NQR + // in Fp, so we find one of these and then invert (1 + i*b). We store b + // as a u8 hint to save time in verification. + + // We return the hint as a u8, but use (uint16_t)n to give 2^16 - 1 + // to make failure cryptographically negligible, with a fallback when + // n > 128 is required. + uint8_t hint; + uint32_t found = 0; + uint16_t n = start; + + bool qr_b = 1; + fp_t b, tmp; + fp2_t z, t0, t1; + + do { + while (qr_b) { + // find b with 1 + b^2 a non-quadratic residue + fp_set_small(&tmp, (uint32_t)n * n + 1); + qr_b = fp_is_square(&tmp); + n++; // keeps track of b = n - 1 + } + + // for Px := -A/(1 + i*b) to be on the curve + // is equivalent to A^2*(z-1) - z^2 NQR for z = 1 + i*b + // thus prevents unnecessary inversion pre-check + + // t0 = z - 1 = i*b + // t1 = z = 1 + i*b + fp_set_small(&b, (uint32_t)n - 1); + fp2_set_zero(&t0); + fp2_set_one(&z); + fp_copy(&z.im, &b); + fp_copy(&t0.im, &b); + + // A^2*(z-1) - z^2 + fp2_sqr(&t1, &curve->A); + fp2_mul(&t0, &t0, &t1); // A^2 * (z - 1) + fp2_sqr(&t1, &z); + fp2_sub(&t0, &t0, &t1); // A^2 * (z - 1) - z^2 + found = !fp2_is_square(&t0); + + qr_b = 1; + } while (!found); + + // set Px to -A/(1 + i*b) + fp2_copy(x, &z); + fp2_inv(x); + fp2_mul(x, x, &curve->A); + fp2_neg(x, x); + + /* + * With very low probability n will not fit in 7 bits. + * We set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + hint = n <= 128 ? n - 1 : 0; + + return hint; +} + +// Helper function which finds a point x(P) = n * A +static uint8_t +find_nA_x_coord(fp2_t *x, ec_curve_t *curve, const uint8_t start) +{ + assert(!fp2_is_square(&curve->A)); // Only to be called when A is a NQR + + // when A is NQR we allow x(P) to be a multiple n*A of A + uint8_t n = start; + if (n == 1) { + fp2_copy(x, &curve->A); + } else { + fp2_mul_small(x, &curve->A, n); + } + + while (!is_on_curve(x, curve)) { + fp2_add(x, x, &curve->A); + n++; + } + + /* + * With very low probability (1/2^128), n will not fit in 7 bits. + * In this case, we set hint = 0 which signals failure and the need + * to generate a value on the fly during verification + */ + uint8_t hint = n < 128 ? n : 0; + return hint; +} + +// The entangled basis generation does not allow A = 0 +// so we simply return the one we have already precomputed +static void +ec_basis_E0_2f(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + assert(fp2_is_zero(&curve->A)); + ec_point_t P, Q; + + // Set P, Q to precomputed (X : 1) values + fp2_copy(&P.x, &BASIS_E0_PX); + fp2_copy(&Q.x, &BASIS_E0_QX); + fp2_set_one(&P.z); + fp2_set_one(&Q.z); + + // clear the power of two to get a point of order 2^f + for (int i = 0; i < TORSION_EVEN_POWER - f; i++) { + xDBL_E0(&P, &P); + xDBL_E0(&Q, &Q); + } + + // Set P, Q in the basis and compute x(P - Q) + copy_point(&PQ2->P, &P); + copy_point(&PQ2->Q, &Q); + difference_point(&PQ2->PmQ, &P, &Q, curve); +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// and stores hints as an array for faster recomputation at a later point +uint8_t +ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 0; + } + + uint8_t hint; + bool hint_A = fp2_is_square(&curve->A); + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_A) { + // when A is NQR we allow x(P) to be a multiple n*A of A + hint = find_nA_x_coord(&P.x, curve, 1); + } else { + // when A is QR we instead have to find (1 + b^2) a NQR + // such that x(P) = -A / (1 + i*b) + hint = find_nqr_factor(&P.x, curve, 1); + } + + fp2_set_one(&P.z); + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + + // Finally, we compress hint_A and hint into a single bytes. + // We choose to set the LSB of hint to hint_A + assert(hint < 128); // We expect hint to be 7-bits in size + return (hint << 1) | hint_A; +} + +// Computes a basis E[2^f] = where the point Q is above (0 : 0) +// given the hints as an array for faster basis computation +int +ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint) +{ + // Normalise (A/C : 1) and ((A + 2)/4 : 1) + ec_normalize_curve_and_A24(curve); + + if (fp2_is_zero(&curve->A)) { + ec_basis_E0_2f(PQ2, curve, f); + return 1; + } + + // The LSB of hint encodes whether A is a QR + // The remaining 7-bits are used to find a valid x(P) + bool hint_A = hint & 1; + uint8_t hint_P = hint >> 1; + + // Compute the points P, Q + ec_point_t P, Q; + + if (!hint_P) { + // When hint_P = 0 it means we did not find a point in 128 attempts + // this is very rare and we almost never expect to need this fallback + // In either case, we can start with b = 128 to skip testing the known + // values which will not work + if (!hint_A) { + find_nA_x_coord(&P.x, curve, 128); + } else { + find_nqr_factor(&P.x, curve, 128); + } + } else { + // Otherwise we use the hint to directly find x(P) based on hint_A + if (!hint_A) { + // when A is NQR, we have found n such that x(P) = n*A + fp2_mul_small(&P.x, &curve->A, hint_P); + } else { + // when A is QR we have found b such that (1 + b^2) is a NQR in + // Fp, so we must compute x(P) = -A / (1 + i*b) + fp_set_one(&P.x.re); + fp_set_small(&P.x.im, hint_P); + fp2_inv(&P.x); + fp2_mul(&P.x, &P.x, &curve->A); + fp2_neg(&P.x, &P.x); + } + } + fp2_set_one(&P.z); + +#ifndef NDEBUG + int passed = 1; + passed = is_on_curve(&P.x, curve); + passed &= !fp2_is_square(&P.x); + + if (!passed) + return 0; +#endif + + // set xQ to -xP - A + fp2_add(&Q.x, &curve->A, &P.x); + fp2_neg(&Q.x, &Q.x); + fp2_set_one(&Q.z); + + // clear out the odd cofactor to get a point of order 2^f + clear_cofactor_for_maximal_even_order(&P, curve, f); + clear_cofactor_for_maximal_even_order(&Q, curve, f); + + // compute PmQ, set PmQ to Q to ensure Q above (0,0) + difference_point(&PQ2->Q, &P, &Q, curve); + copy_point(&PQ2->P, &P); + copy_point(&PQ2->PmQ, &Q); + +#ifndef NDEBUG + passed &= test_basis_order_twof(PQ2, curve, f); + + if (!passed) + return 0; +#endif + + return 1; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/bench.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/bench.h new file mode 100644 index 0000000000..c253825828 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/bench.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: Apache-2.0 +#ifndef BENCH_H__ +#define BENCH_H__ + +#include +#include +#include +#include +#include +#if defined(__APPLE__) +#include "bench_macos.h" +#endif + +#if defined(TARGET_ARM) || defined(TARGET_S390X) || defined(NO_CYCLE_COUNTER) +#define BENCH_UNIT0 "nanoseconds" +#define BENCH_UNIT3 "microseconds" +#define BENCH_UNIT6 "milliseconds" +#define BENCH_UNIT9 "seconds" +#else +#define BENCH_UNIT0 "cycles" +#define BENCH_UNIT3 "kilocycles" +#define BENCH_UNIT6 "megacycles" +#define BENCH_UNIT9 "gigacycles" +#endif + +static inline void +cpucycles_init(void) { +#if defined(__APPLE__) && defined(TARGET_ARM64) + macos_init_rdtsc(); +#endif +} + +static inline uint64_t +cpucycles(void) +{ +#if defined(TARGET_AMD64) || defined(TARGET_X86) + uint32_t hi, lo; + + asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)lo) | ((uint64_t)hi << 32); +#elif defined(TARGET_S390X) + uint64_t tod; + asm volatile("stckf %0\n" : "=Q"(tod) : : "cc"); + return (tod * 1000 / 4096); +#elif defined(TARGET_ARM64) && !defined(NO_CYCLE_COUNTER) +#if defined(__APPLE__) + return macos_rdtsc(); +#else + uint64_t cycles; + asm volatile("mrs %0, PMCCNTR_EL0" : "=r"(cycles)); + return cycles; +#endif // __APPLE__ +#else + struct timespec time; + clock_gettime(CLOCK_REALTIME, &time); + return (uint64_t)time.tv_sec * 1000000000 + time.tv_nsec; +#endif +} + +static inline int +CMPFUNC(const void *a, const void *b) +{ + uint64_t aa = *(uint64_t *)a, bb = *(uint64_t *)b; + + if (aa > bb) + return +1; + if (aa < bb) + return -1; + return 0; +} + +static inline uint32_t +ISQRT(uint64_t x) +{ + uint32_t r = 0; + for (ssize_t i = 31; i >= 0; --i) { + uint32_t s = r + (1 << i); + if ((uint64_t)s * s <= x) + r = s; + } + return r; +} + +static inline double +_TRUNC(uint64_t x) +{ + return x / 1000 / 1000.; +} +#define _FMT ".3lf" +#define _UNIT BENCH_UNIT6 + +#define BENCH_CODE_1(RUNS) \ + { \ + const size_t count = (RUNS); \ + if (!count) \ + abort(); \ + uint64_t cycles, cycles1, cycles2; \ + uint64_t cycles_list[count]; \ + cycles = 0; \ + for (size_t i = 0; i < count; ++i) { \ + cycles1 = cpucycles(); + +#define BENCH_CODE_2(name) \ + cycles2 = cpucycles(); \ + cycles_list[i] = cycles2 - cycles1; \ + cycles += cycles2 - cycles1; \ + } \ + qsort(cycles_list, count, sizeof(uint64_t), CMPFUNC); \ + uint64_t variance = 0; \ + for (size_t i = 0; i < count; ++i) { \ + int64_t off = cycles_list[i] - cycles / count; \ + variance += off * off; \ + } \ + variance /= count; \ + printf(" %-10s", name); \ + printf(" | average %9" _FMT " | stddev %9" _FMT, \ + _TRUNC(cycles / count), \ + _TRUNC(ISQRT(variance))); \ + printf(" | median %9" _FMT " | min %9" _FMT " | max %9" _FMT, \ + _TRUNC(cycles_list[count / 2]), \ + _TRUNC(cycles_list[0]), \ + _TRUNC(cycles_list[count - 1])); \ + printf(" (%s)\n", _UNIT); \ + } + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/bench_macos.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/bench_macos.h new file mode 100644 index 0000000000..0494fc85e9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/bench_macos.h @@ -0,0 +1,143 @@ +// WARNING: must be run as root on an M1 device +// WARNING: fragile, uses private apple APIs +// currently no command line interface, see variables at top of main + +/* +no warranty; use at your own risk - i believe this code needs +some minor changes to work on some later hardware and/or software revisions, +which is unsurprising given the use of undocumented, private APIs. +------------------------------------------------------------------------------ +This code is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2020 Dougall Johnson +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ + +/* + Based on https://github.com/travisdowns/robsize + Henry Wong + http://blog.stuffedcow.net/2013/05/measuring-rob-capacity/ + 2014-10-14 +*/ + +#include +#include +#include +#include + +#define KPERF_LIST \ + /* ret, name, params */ \ + F(int, kpc_force_all_ctrs_set, int) \ + F(int, kpc_set_counting, uint32_t) \ + F(int, kpc_set_thread_counting, uint32_t) \ + F(int, kpc_set_config, uint32_t, void *) \ + F(int, kpc_get_thread_counters, int, unsigned int, void *) + +#define F(ret, name, ...) \ + typedef ret name##proc(__VA_ARGS__); \ + static name##proc *name; +KPERF_LIST +#undef F + +#define CFGWORD_EL0A64EN_MASK (0x20000) + +#define CPMU_CORE_CYCLE 0x02 + +#define KPC_CLASS_FIXED (0) +#define KPC_CLASS_CONFIGURABLE (1) + +#define COUNTERS_COUNT 10 +#define KPC_MASK ((1u << KPC_CLASS_CONFIGURABLE) | (1u << KPC_CLASS_FIXED)) +static uint64_t g_config[COUNTERS_COUNT]; +static uint64_t g_counters[COUNTERS_COUNT]; + +static void +macos_configure_rdtsc() +{ + if (kpc_force_all_ctrs_set(1)) { + printf("kpc_force_all_ctrs_set failed\n"); + return; + } + + if (kpc_set_config(KPC_MASK, g_config)) { + printf("kpc_set_config failed\n"); + return; + } + + if (kpc_set_counting(KPC_MASK)) { + printf("kpc_set_counting failed\n"); + return; + } + + if (kpc_set_thread_counting(KPC_MASK)) { + printf("kpc_set_thread_counting failed\n"); + return; + } +} + +static void +macos_init_rdtsc() +{ + void *kperf = + dlopen("/System/Library/PrivateFrameworks/kperf.framework/Versions/A/kperf", RTLD_LAZY); + if (!kperf) { + printf("kperf = %p\n", kperf); + return; + } +#define F(ret, name, ...) \ + name = (name##proc *)(intptr_t)(dlsym(kperf, #name)); \ + if (!name) { \ + printf("%s = %p\n", #name, (void *)(intptr_t)name); \ + return; \ + } + KPERF_LIST +#undef F + + g_config[0] = CPMU_CORE_CYCLE | CFGWORD_EL0A64EN_MASK; + + macos_configure_rdtsc(); +} + +static uint64_t +macos_rdtsc(void) +{ + if (kpc_get_thread_counters(0, COUNTERS_COUNT, g_counters)) { + printf("kpc_get_thread_counters failed\n"); + return 1; + } + return g_counters[2]; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/biextension.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/biextension.c new file mode 100644 index 0000000000..1df7ab938b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/biextension.c @@ -0,0 +1,770 @@ +#include +#include +#include +#include + +/* + * We implement the biextension arithmetic by using the cubical torsor + * representation. For now only implement the 2^e-ladder. + * + * Warning: cubicalADD is off by a factor x4 with respect to the correct + * cubical arithmetic. This does not affect the Weil pairing or the Tate + * pairing over F_{p^2} (due to the final exponentiation), but would give + * the wrong result if we compute the Tate pairing over F_p. + */ + +// this would be exactly like xADD if PQ was 'antinormalised' as (1,z) +// Cost: 3M + 2S + 3a + 3s +// Note: if needed, cubicalDBL is simply xDBL_A24 normalized and +// costs 3M + 2S + 2a + 2s + +static void +cubicalADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const fp2_t *ixPQ) +{ + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&R->z, &t3); + fp2_sqr(&t2, &t2); + fp2_mul(&R->x, ixPQ, &t2); +} + +// Given cubical reps of P, Q and x(P - Q) = (1 : ixPQ) +// compute P + Q, [2]Q +// Cost: 6M + 4S + 4a + 4s +static void +cubicalDBLADD(ec_point_t *PpQ, + ec_point_t *QQ, + const ec_point_t *P, + const ec_point_t *Q, + const fp2_t *ixPQ, + const ec_point_t *A24) +{ + // A24 = (A+2C/4C: 1) + assert(fp2_is_one(&A24->z)); + + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&PpQ->x, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_sqr(&t2, &PpQ->x); + fp2_sqr(&QQ->z, &t3); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &PpQ->x); + fp2_add(&PpQ->x, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&PpQ->z, &t3); + fp2_sqr(&PpQ->x, &PpQ->x); + fp2_mul(&PpQ->x, ixPQ, &PpQ->x); + fp2_sub(&t3, &t2, &QQ->z); + fp2_mul(&QQ->x, &t2, &QQ->z); + fp2_mul(&t0, &t3, &A24->x); + fp2_add(&t0, &t0, &QQ->z); + fp2_mul(&QQ->z, &t0, &t3); +} + +// iterative biextension doubling +static void +biext_ladder_2e(uint32_t e, + ec_point_t *PnQ, + ec_point_t *nQ, + const ec_point_t *PQ, + const ec_point_t *Q, + const fp2_t *ixP, + const ec_point_t *A24) +{ + copy_point(PnQ, PQ); + copy_point(nQ, Q); + for (uint32_t i = 0; i < e; i++) { + cubicalDBLADD(PnQ, nQ, PnQ, nQ, ixP, A24); + } +} + +// Compute the monodromy ratio X/Z above as a (X:Z) point to avoid a division +// We implicitly use (1,0) as a cubical point above 0_E +static void +point_ratio(ec_point_t *R, const ec_point_t *PnQ, const ec_point_t *nQ, const ec_point_t *P) +{ + // Sanity tests + assert(ec_is_zero(nQ)); + assert(ec_is_equal(PnQ, P)); + + fp2_mul(&R->x, &nQ->x, &P->x); + fp2_copy(&R->z, &PnQ->x); +} + +// Compute the cubical translation of P by a point of 2-torsion T +static void +translate(ec_point_t *P, const ec_point_t *T) +{ + // When we translate, the following three things can happen: + // T = (A : 0) then the translation of P should be P + // T = (0 : B) then the translation of P = (X : Z) should be (Z : X) + // Otherwise T = (A : B) and P translates to (AX - BZ : BX - AZ) + // We compute this in constant time by computing the generic case + // and then using constant time swaps. + fp2_t PX_new, PZ_new; + + { + fp2_t t0, t1; + + // PX_new = AX - BZ + fp2_mul(&t0, &T->x, &P->x); + fp2_mul(&t1, &T->z, &P->z); + fp2_sub(&PX_new, &t0, &t1); + + // PZ_new = BX - AZ + fp2_mul(&t0, &T->z, &P->x); + fp2_mul(&t1, &T->x, &P->z); + fp2_sub(&PZ_new, &t0, &t1); + } + + // When we have A zero we should return (Z : X) + uint32_t TA_is_zero = fp2_is_zero(&T->x); + fp2_select(&PX_new, &PX_new, &P->z, TA_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->x, TA_is_zero); + + // When we have B zero we should return (X : Z) + uint32_t TB_is_zero = fp2_is_zero(&T->z); + fp2_select(&PX_new, &PX_new, &P->x, TB_is_zero); + fp2_select(&PZ_new, &PZ_new, &P->z, TB_is_zero); + + // Set the point to the desired result + fp2_copy(&P->x, &PX_new); + fp2_copy(&P->z, &PZ_new); +} + +// Compute the biextension monodromy g_P,Q^{2^g} (in level 1) via the +// cubical arithmetic of P+2^e Q. +// The suffix _i means that we are given 1/x(P) as parameter. Warning: to +// get meaningful result when using the monodromy to compute pairings, we +// need P, Q, PQ, A24 to be normalised (this is not strictly necessary, but +// care need to be taken when they are not normalised. Only handle the +// normalised case for now) +static void +monodromy_i(ec_point_t *R, const pairing_params_t *pairing_data, bool swap_PQ) +{ + fp2_t ixP; + ec_point_t P, Q, PnQ, nQ; + + // When we compute the Weil pairing we need both P + [2^e]Q and + // Q + [2^e]P which we can do easily with biext_ladder_2e() below + // we use a bool to decide wether to use Q, ixP or P, ixQ in the + // ladder and P or Q in translation. + if (!swap_PQ) { + copy_point(&P, &pairing_data->P); + copy_point(&Q, &pairing_data->Q); + fp2_copy(&ixP, &pairing_data->ixP); + } else { + copy_point(&P, &pairing_data->Q); + copy_point(&Q, &pairing_data->P); + fp2_copy(&ixP, &pairing_data->ixQ); + } + + // Compute the biextension ladder P + [2^e]Q + biext_ladder_2e(pairing_data->e - 1, &PnQ, &nQ, &pairing_data->PQ, &Q, &ixP, &pairing_data->A24); + translate(&PnQ, &nQ); + translate(&nQ, &nQ); + point_ratio(R, &PnQ, &nQ, &P); +} + +// Normalize the points and also store 1/x(P), 1/x(Q) +static void +cubical_normalization(pairing_params_t *pairing_data, const ec_point_t *P, const ec_point_t *Q) +{ + fp2_t t[4]; + fp2_copy(&t[0], &P->x); + fp2_copy(&t[1], &P->z); + fp2_copy(&t[2], &Q->x); + fp2_copy(&t[3], &Q->z); + fp2_batched_inv(t, 4); + + // Store PZ / PX and QZ / QX + fp2_mul(&pairing_data->ixP, &P->z, &t[0]); + fp2_mul(&pairing_data->ixQ, &Q->z, &t[2]); + + // Store x(P), x(Q) normalised to (X/Z : 1) + fp2_mul(&pairing_data->P.x, &P->x, &t[1]); + fp2_mul(&pairing_data->Q.x, &Q->x, &t[3]); + fp2_set_one(&pairing_data->P.z); + fp2_set_one(&pairing_data->Q.z); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// We assume the points are normalised correctly +static void +weil_n(fp2_t *r, const pairing_params_t *pairing_data) +{ + ec_point_t R0, R1; + monodromy_i(&R0, pairing_data, true); + monodromy_i(&R1, pairing_data, false); + + fp2_mul(r, &R0.x, &R1.z); + fp2_inv(r); + fp2_mul(r, r, &R0.z); + fp2_mul(r, r, &R1.x); +} + +// Weil pairing, PQ should be P+Q in (X:Z) coordinates +// Normalise the points and call the code above +// The code will crash (division by 0) if either P or Q is (0:1) +void +weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + pairing_params_t pairing_data; + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + // Compute the Weil pairing e_(2^n)(P, Q) + weil_n(r, &pairing_data); +} + +// two helper functions for reducing the tate pairing +// clear_cofac clears (p + 1) // 2^f for an Fp2 value +void +clear_cofac(fp2_t *r, const fp2_t *a) +{ + digit_t exp = *p_cofactor_for_2f; + exp >>= 1; + + fp2_t x; + fp2_copy(&x, a); + fp2_copy(r, a); + + // removes cofac + while (exp > 0) { + fp2_sqr(r, r); + if (exp & 1) { + fp2_mul(r, r, &x); + } + exp >>= 1; + } +} + +// applies frobenius a + ib --> a - ib to an fp2 element +void +fp2_frob(fp2_t *out, const fp2_t *in) +{ + fp_copy(&(out->re), &(in->re)); + fp_neg(&(out->im), &(in->im)); +} + +// reduced Tate pairing, normalizes the points, assumes PQ is P+Q in (X:Z) +// coordinates. Computes 1/x(P) and 1/x(Q) for efficient cubical ladder +void +reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E) +{ + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - e; + ec_point_t R; + pairing_params_t pairing_data; + + // Construct the structure for the Weil pairing + // Set (PX/PZ : 1), (QX : QZ : 1), PZ/PX and QZ/QX + pairing_data.e = e; + cubical_normalization(&pairing_data, P, Q); + copy_point(&pairing_data.PQ, PQ); + + // Ensure the input curve has A24 normalised and store + // in a struct + ec_curve_normalize_A24(E); + copy_point(&pairing_data.A24, &E->A24); + + monodromy_i(&R, &pairing_data, true); + + // we get unreduced tate as R.X, R.Z + // reduced tate is -(R.Z/R.X)^((p^2 - 1) div 2^f) + // we reuse R.X and R.Z to split reduction step ^(p-1) into frobenius and ^-1 + fp2_t frob, tmp; + fp2_copy(&tmp, &R.x); + fp2_frob(&frob, &R.x); + fp2_mul(&R.x, &R.z, &frob); + fp2_frob(&frob, &R.z); + fp2_mul(&R.z, &tmp, &frob); + fp2_inv(&R.x); + fp2_mul(r, &R.x, &R.z); + + clear_cofac(r, r); + // clear remaining 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(r, r); + } +} + +// Functions to compute discrete logs by computing the Weil pairing of points +// followed by computing the dlog in Fp^2 +// (If we work with full order points, it would be faster to use the Tate +// pairings rather than the Weil pairings; this is not implemented yet) + +// recursive dlog function +static bool +fp2_dlog_2e_rec(digit_t *a, long len, fp2_t *pows_f, fp2_t *pows_g, long stacklen) +{ + if (len == 0) { + // *a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + return true; + } else if (len == 1) { + if (fp2_is_one(&pows_f[stacklen - 1])) { + // a = 0; + for (int i = 0; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else if (fp2_is_equal(&pows_f[stacklen - 1], &pows_g[stacklen - 1])) { + // a = 1; + a[0] = 1; + for (int i = 1; i < NWORDS_ORDER; i++) { + a[i] = 0; + } + for (int i = 0; i < stacklen - 1; ++i) { + fp2_mul(&pows_f[i], &pows_f[i], &pows_g[i]); // new_f = f*g + fp2_sqr(&pows_g[i], &pows_g[i]); // new_g = g^2 + } + return true; + } else { + return false; + } + } else { + long right = (double)len * 0.5; + long left = len - right; + pows_f[stacklen] = pows_f[stacklen - 1]; + pows_g[stacklen] = pows_g[stacklen - 1]; + for (int i = 0; i < left; i++) { + fp2_sqr(&pows_f[stacklen], &pows_f[stacklen]); + fp2_sqr(&pows_g[stacklen], &pows_g[stacklen]); + } + // uint32_t dlp1 = 0, dlp2 = 0; + digit_t dlp1[NWORDS_ORDER], dlp2[NWORDS_ORDER]; + bool ok; + ok = fp2_dlog_2e_rec(dlp1, right, pows_f, pows_g, stacklen + 1); + if (!ok) + return false; + ok = fp2_dlog_2e_rec(dlp2, left, pows_f, pows_g, stacklen); + if (!ok) + return false; + // a = dlp1 + 2^right * dlp2 + multiple_mp_shiftl(dlp2, right, NWORDS_ORDER); + mp_add(a, dlp2, dlp1, NWORDS_ORDER); + + return true; + } +} + +// compute DLP: compute scal such that f = g^scal with f, 1/g as input +static bool +fp2_dlog_2e(digit_t *scal, const fp2_t *f, const fp2_t *g_inverse, int e) +{ + long log, len = e; + for (log = 0; len > 1; len >>= 1) + log++; + log += 1; + + fp2_t pows_f[log], pows_g[log]; + pows_f[0] = *f; + pows_g[0] = *g_inverse; + + for (int i = 0; i < NWORDS_ORDER; i++) { + scal[i] = 0; + } + + bool ok = fp2_dlog_2e_rec(scal, e, pows_f, pows_g, 1); + assert(ok); + + return ok; +} + +// Normalize the bases (P, Q), (R, S) and store their inverse +// and additionally normalise the curve to (A/C : 1) +static void +cubical_normalization_dlog(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + fp2_t t[11]; + ec_basis_t *PQ = &pairing_dlog_data->PQ; + ec_basis_t *RS = &pairing_dlog_data->RS; + fp2_copy(&t[0], &PQ->P.x); + fp2_copy(&t[1], &PQ->P.z); + fp2_copy(&t[2], &PQ->Q.x); + fp2_copy(&t[3], &PQ->Q.z); + fp2_copy(&t[4], &PQ->PmQ.x); + fp2_copy(&t[5], &PQ->PmQ.z); + fp2_copy(&t[6], &RS->P.x); + fp2_copy(&t[7], &RS->P.z); + fp2_copy(&t[8], &RS->Q.x); + fp2_copy(&t[9], &RS->Q.z); + fp2_copy(&t[10], &curve->C); + + fp2_batched_inv(t, 11); + + fp2_mul(&pairing_dlog_data->ixP, &PQ->P.z, &t[0]); + fp2_mul(&PQ->P.x, &PQ->P.x, &t[1]); + fp2_set_one(&PQ->P.z); + + fp2_mul(&pairing_dlog_data->ixQ, &PQ->Q.z, &t[2]); + fp2_mul(&PQ->Q.x, &PQ->Q.x, &t[3]); + fp2_set_one(&PQ->Q.z); + + fp2_mul(&PQ->PmQ.x, &PQ->PmQ.x, &t[5]); + fp2_set_one(&PQ->PmQ.z); + + fp2_mul(&pairing_dlog_data->ixR, &RS->P.z, &t[6]); + fp2_mul(&RS->P.x, &RS->P.x, &t[7]); + fp2_set_one(&RS->P.z); + + fp2_mul(&pairing_dlog_data->ixS, &RS->Q.z, &t[8]); + fp2_mul(&RS->Q.x, &RS->Q.x, &t[9]); + fp2_set_one(&RS->Q.z); + + fp2_mul(&curve->A, &curve->A, &t[10]); + fp2_set_one(&curve->C); +} + +// Given two bases and basis = compute +// x(P - R), x(P - S), x(R - Q), x(S - Q) +static void +compute_difference_points(pairing_dlog_params_t *pairing_dlog_data, ec_curve_t *curve) +{ + jac_point_t xyP, xyQ, xyR, xyS, temp; + + // lifting the two basis points, assumes that x(P) and x(R) + // and the curve itself are normalised to (X : 1) + lift_basis_normalized(&xyP, &xyQ, &pairing_dlog_data->PQ, curve); + lift_basis_normalized(&xyR, &xyS, &pairing_dlog_data->RS, curve); + + // computation of the differences + // x(P - R) + jac_neg(&temp, &xyR); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmR, &temp); + + // x(P - S) + jac_neg(&temp, &xyS); + ADD(&temp, &temp, &xyP, curve); + jac_to_xz(&pairing_dlog_data->diff.PmS, &temp); + + // x(R - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyR, curve); + jac_to_xz(&pairing_dlog_data->diff.RmQ, &temp); + + // x(S - Q) + jac_neg(&temp, &xyQ); + ADD(&temp, &temp, &xyS, curve); + jac_to_xz(&pairing_dlog_data->diff.SmQ, &temp); +} + +// Inline all the Weil pairing computations needed for ec_dlog_2_weil +static void +weil_dlog(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + ec_point_t nP, nQ, nR, nS, nPQ, PnQ, nPR, PnR, nPS, PnS, nRQ, RnQ, nSQ, SnQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&nPR, &pairing_dlog_data->diff.PmR); + copy_point(&nPS, &pairing_dlog_data->diff.PmS); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + copy_point(&RnQ, &pairing_dlog_data->diff.RmQ); + copy_point(&SnQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&nPQ, &nPQ, &nP, &pairing_dlog_data->ixQ); + cubicalADD(&nPR, &nPR, &nP, &pairing_dlog_data->ixR); + cubicalDBLADD(&nPS, &nP, &nPS, &nP, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnQ, &PnQ, &nQ, &pairing_dlog_data->ixP); + cubicalADD(&RnQ, &RnQ, &nQ, &pairing_dlog_data->ixR); + cubicalDBLADD(&SnQ, &nQ, &SnQ, &nQ, &pairing_dlog_data->ixS, &pairing_dlog_data->A24); + + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + // weil(&w0,e,&PQ->P,&PQ->Q,&PQ->PmQ,&A24); + translate(&nPQ, &nP); + translate(&nPR, &nP); + translate(&nPS, &nP); + translate(&PnQ, &nQ); + translate(&RnQ, &nQ); + translate(&SnQ, &nQ); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference weil pairing + ec_point_t T0, T1; + fp2_t w1[5], w2[5]; + + // e(P, Q) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &PnQ, &nQ, &pairing_dlog_data->PQ.P); + // For the first element we need it's inverse for + // fp2_dlog_2e so we swap w1 and w2 here to save inversions + fp2_mul(&w2[0], &T0.x, &T1.z); + fp2_mul(&w1[0], &T1.x, &T0.z); + + // e(P,R) = w0^r2 + point_ratio(&T0, &nPR, &nP, &pairing_dlog_data->RS.P); + point_ratio(&T1, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[1], &T0.x, &T1.z); + fp2_mul(&w2[1], &T1.x, &T0.z); + + // e(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &RnQ, &nQ, &pairing_dlog_data->RS.P); + fp2_mul(&w1[2], &T0.x, &T1.z); + fp2_mul(&w2[2], &T1.x, &T0.z); + + // e(P,S) = w0^s2 + point_ratio(&T0, &nPS, &nP, &pairing_dlog_data->RS.Q); + point_ratio(&T1, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_mul(&w1[3], &T0.x, &T1.z); + fp2_mul(&w2[3], &T1.x, &T0.z); + + // e(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + point_ratio(&T1, &SnQ, &nQ, &pairing_dlog_data->RS.Q); + fp2_mul(&w1[4], &T0.x, &T1.z); + fp2_mul(&w2[4], &T1.x, &T0.z); + + fp2_batched_inv(w1, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + assert(test_point_order_twof(&PQ->Q, curve, e)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + + weil_dlog(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} + +// Inline all the Tate pairing computations needed for ec_dlog_2_weil +// including reduction, assumes a bases PQ of full E[2^e_full] torsion +// and a bases RS of smaller E[2^e] torsion +static void +tate_dlog_partial(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, pairing_dlog_params_t *pairing_dlog_data) +{ + + uint32_t e_full = TORSION_EVEN_POWER; + uint32_t e_diff = e_full - pairing_dlog_data->e; + + ec_point_t nP, nQ, nR, nS, nPQ, PnR, PnS, nRQ, nSQ; + + copy_point(&nP, &pairing_dlog_data->PQ.P); + copy_point(&nQ, &pairing_dlog_data->PQ.Q); + copy_point(&nR, &pairing_dlog_data->RS.P); + copy_point(&nS, &pairing_dlog_data->RS.Q); + copy_point(&nPQ, &pairing_dlog_data->PQ.PmQ); + copy_point(&PnR, &pairing_dlog_data->diff.PmR); + copy_point(&PnS, &pairing_dlog_data->diff.PmS); + copy_point(&nRQ, &pairing_dlog_data->diff.RmQ); + copy_point(&nSQ, &pairing_dlog_data->diff.SmQ); + + for (uint32_t i = 0; i < e_full - 1; i++) { + cubicalDBLADD(&nPQ, &nP, &nPQ, &nP, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + for (uint32_t i = 0; i < pairing_dlog_data->e - 1; i++) { + cubicalADD(&PnR, &PnR, &nR, &pairing_dlog_data->ixP); + cubicalDBLADD(&nRQ, &nR, &nRQ, &nR, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + + cubicalADD(&PnS, &PnS, &nS, &pairing_dlog_data->ixP); + cubicalDBLADD(&nSQ, &nS, &nSQ, &nS, &pairing_dlog_data->ixQ, &pairing_dlog_data->A24); + } + + translate(&nPQ, &nP); + translate(&PnR, &nR); + translate(&nRQ, &nR); + translate(&PnS, &nS); + translate(&nSQ, &nS); + + translate(&nP, &nP); + translate(&nQ, &nQ); + translate(&nR, &nR); + translate(&nS, &nS); + + // computation of the reference Tate pairing + ec_point_t T0; + fp2_t w1[5], w2[5]; + + // t(P, Q)^(2^e_diff) = w0 + point_ratio(&T0, &nPQ, &nP, &pairing_dlog_data->PQ.Q); + fp2_copy(&w1[0], &T0.x); + fp2_copy(&w2[0], &T0.z); + + // t(R,P) = w0^r2 + point_ratio(&T0, &PnR, &nR, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[1], &T0.x); + fp2_copy(&w2[1], &T0.z); + + // t(R,Q) = w0^r1 + point_ratio(&T0, &nRQ, &nR, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[2], &T0.x); + fp2_copy(&w1[2], &T0.z); + + // t(S,P) = w0^s2 + point_ratio(&T0, &PnS, &nS, &pairing_dlog_data->PQ.P); + fp2_copy(&w1[3], &T0.x); + fp2_copy(&w2[3], &T0.z); + + // t(S,Q) = w0^s1 + point_ratio(&T0, &nSQ, &nS, &pairing_dlog_data->PQ.Q); + fp2_copy(&w2[4], &T0.x); + fp2_copy(&w1[4], &T0.z); + + // batched reduction using projective representation + for (int i = 0; i < 5; i++) { + fp2_t frob, tmp; + fp2_copy(&tmp, &w1[i]); + // inline frobenius for ^p + // multiply by inverse to get ^(p-1) + fp2_frob(&frob, &w1[i]); + fp2_mul(&w1[i], &w2[i], &frob); + + // repeat for denom + fp2_frob(&frob, &w2[i]); + fp2_mul(&w2[i], &tmp, &frob); + } + + // batched normalization + fp2_batched_inv(w2, 5); + for (int i = 0; i < 5; i++) { + fp2_mul(&w1[i], &w1[i], &w2[i]); + } + + for (int i = 0; i < 5; i++) { + clear_cofac(&w1[i], &w1[i]); + + // removes 2^e_diff + for (uint32_t j = 0; j < e_diff; j++) { + fp2_sqr(&w1[i], &w1[i]); + } + } + + fp2_dlog_2e(r2, &w1[1], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(r1, &w1[2], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s2, &w1[3], &w1[0], pairing_dlog_data->e); + fp2_dlog_2e(s1, &w1[4], &w1[0], pairing_dlog_data->e); +} + +void +ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e) +{ + // assume PQ is a full torsion basis + // returns a, b, c, d such that R = [a]P + [b]Q, S = [c]P + [d]Q + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - e; +#endif + assert(test_basis_order_twof(PQ, curve, e_full)); + + // precomputing the correct curve data + ec_curve_normalize_A24(curve); + + pairing_dlog_params_t pairing_dlog_data; + pairing_dlog_data.e = e; + pairing_dlog_data.PQ = *PQ; + pairing_dlog_data.RS = *RS; + pairing_dlog_data.A24 = curve->A24; + + cubical_normalization_dlog(&pairing_dlog_data, curve); + compute_difference_points(&pairing_dlog_data, curve); + tate_dlog_partial(r1, r2, s1, s2, &pairing_dlog_data); + +#ifndef NDEBUG + ec_point_t test; + ec_biscalar_mul(&test, r1, r2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // R = [r1]P + [r2]Q + assert(ec_is_equal(&test, &RS->P)); + + ec_biscalar_mul(&test, s1, s2, e, PQ, curve); + ec_dbl_iter(&test, e_diff, &test, curve); + // S = [s1]P + [s2]Q + assert(ec_is_equal(&test, &RS->Q)); +#endif +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/biextension.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/biextension.h new file mode 100644 index 0000000000..1a50fcc738 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/biextension.h @@ -0,0 +1,82 @@ +#ifndef _BIEXT_H_ +#define _BIEXT_H_ + +#include +#include + +typedef struct pairing_params +{ + uint32_t e; // Points have order 2^e + ec_point_t P; // x(P) + ec_point_t Q; // x(Q) + ec_point_t PQ; // x(P-Q) = (PQX/PQZ : 1) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_params_t; + +// For two bases and store: +// x(P - R), x(P - S), x(R - Q), x(S - Q) +typedef struct pairing_dlog_diff_points +{ + ec_point_t PmR; // x(P - R) + ec_point_t PmS; // x(P - S) + ec_point_t RmQ; // x(R - Q) + ec_point_t SmQ; // x(S - Q) +} pairing_dlog_diff_points_t; + +typedef struct pairing_dlog_params +{ + uint32_t e; // Points have order 2^e + ec_basis_t PQ; // x(P), x(Q), x(P-Q) + ec_basis_t RS; // x(R), x(S), x(R-S) + pairing_dlog_diff_points_t diff; // x(P - R), x(P - S), x(R - Q), x(S - Q) + fp2_t ixP; // PZ/PX + fp2_t ixQ; // QZ/QX + fp2_t ixR; // RZ/RX + fp2_t ixS; // SZ/SX + ec_point_t A24; // ((A+2)/4 : 1) +} pairing_dlog_params_t; + +// Computes e = e_{2^e}(P, Q) using biextension ladder +void weil(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Computes (reduced) z = t_{2^e}(P, Q) using biextension ladder +void reduced_tate(fp2_t *r, uint32_t e, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ, ec_curve_t *E); + +// Given two bases and computes scalars +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_weil(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +// Given two bases and +// where is a basis for E[2^f] +// the full 2-torsion, and a basis +// for smaller torsion E[2^e] +// computes scalars r1, r2, s1, s2 +// such that R = [r1]P + [r2]Q, S = [s1]P + [s2]Q +void ec_dlog_2_tate(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + const ec_basis_t *PQ, + const ec_basis_t *RS, + ec_curve_t *curve, + int e); + +void ec_dlog_2_tate_to_full(digit_t *r1, + digit_t *r2, + digit_t *s1, + digit_t *s2, + ec_basis_t *PQ, + ec_basis_t *RS, + ec_curve_t *curve, + int e); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c new file mode 100644 index 0000000000..d393e9cb11 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include + +void +public_key_init(public_key_t *pk) +{ + ec_curve_init(&pk->curve); +} + +void +public_key_finalize(public_key_t *pk) +{ +} + +// compute the challenge as the hash of the message and the commitment curve and public key +void +hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length) +{ + unsigned char buf[2 * FP2_ENCODED_BYTES]; + { + fp2_t j1, j2; + ec_j_inv(&j1, &pk->curve); + ec_j_inv(&j2, com_curve); + fp2_encode(buf, &j1); + fp2_encode(buf + FP2_ENCODED_BYTES, &j2); + } + + { + // The type scalar_t represents an element of GF(p), which is about + // 2*lambda bits, where lambda = 128, 192 or 256, according to the + // security level. Thus, the variable scalar should have enough memory + // for the values produced by SHAKE256 in the intermediate iterations. + + shake256incctx ctx; + + size_t hash_bytes = ((2 * SECURITY_BITS) + 7) / 8; + size_t limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + size_t bits = (2 * SECURITY_BITS) % RADIX; + digit_t mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, buf, 2 * FP2_ENCODED_BYTES); + shake256_inc_absorb(&ctx, message, length); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + for (int i = 2; i < HASH_ITERATIONS; i++) { + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + } + shake256_inc_init(&ctx); + shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); + shake256_inc_finalize(&ctx); + + hash_bytes = ((TORSION_EVEN_POWER - SQIsign_response_length) + 7) / 8; + limbs = (hash_bytes + sizeof(digit_t) - 1) / sizeof(digit_t); + bits = (TORSION_EVEN_POWER - SQIsign_response_length) % RADIX; + mask = ((digit_t)-1) >> ((RADIX - bits) % RADIX); +#ifdef TARGET_BIG_ENDIAN + mask = BSWAP_DIGIT(mask); +#endif + + memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); + shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + (*scalar)[limbs - 1] &= mask; + +#ifdef TARGET_BIG_ENDIAN + for (int i = 0; i < NWORDS_ORDER; i++) + (*scalar)[i] = BSWAP_DIGIT((*scalar)[i]); +#endif + + mp_mod_2exp(*scalar, SECURITY_BITS, NWORDS_ORDER); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2.c new file mode 100644 index 0000000000..b31ae7771a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +// internal helpers, also for other files +void +ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) +{ + ibz_set(&((*vec)[0]), a0); + ibz_set(&((*vec)[1]), a1); +} +void +ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) +{ + ibz_set(&((*mat)[0][0]), a00); + ibz_set(&((*mat)[0][1]), a01); + ibz_set(&((*mat)[1][0]), a10); + ibz_set(&((*mat)[1][1]), a11); +} + +void +ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) +{ + ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); + ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); + ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); + ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); +} + +void +ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) +{ + ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); + ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); + ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); + ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); +} + +void +ibz_mat_2x2_det_from_ibz(ibz_t *det, const ibz_t *a11, const ibz_t *a12, const ibz_t *a21, const ibz_t *a22) +{ + ibz_t prod; + ibz_init(&prod); + ibz_mul(&prod, a12, a21); + ibz_mul(det, a11, a22); + ibz_sub(det, det, &prod); + ibz_finalize(&prod); +} + +void +ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec) +{ + ibz_t prod; + ibz_vec_2_t matvec; + ibz_init(&prod); + ibz_vec_2_init(&matvec); + ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); + ibz_copy(&(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); + ibz_add(&(matvec[0]), &(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); + ibz_copy(&(matvec[1]), &prod); + ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); + ibz_add(&(matvec[1]), &(matvec[1]), &prod); + ibz_copy(&((*res)[0]), &(matvec[0])); + ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_finalize(&prod); + ibz_vec_2_finalize(&matvec); +} + +// modular 2x2 operations + +void +ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2x2_t *mat_b, const ibz_t *m) +{ + ibz_t mul; + ibz_mat_2x2_t sums; + ibz_init(&mul); + ibz_mat_2x2_init(&sums); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_set(&(sums[i][j]), 0); + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + for (int k = 0; k < 2; k++) { + ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); + ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); + ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + } + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + } + } + ibz_finalize(&mul); + ibz_mat_2x2_finalize(&sums); +} + +int +ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m) +{ + ibz_t det, prod; + ibz_init(&det); + ibz_init(&prod); + ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mod(&det, &det, m); + ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_sub(&det, &det, &prod); + ibz_mod(&det, &det, m); + int res = ibz_invmod(&det, &det, m); + // return 0 matrix if non invertible determinant + ibz_set(&prod, res); + ibz_mul(&det, &det, &prod); + // compute inverse + ibz_copy(&prod, &((*mat)[0][0])); + ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); + ibz_copy(&((*inv)[1][1]), &prod); + ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); + ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); + ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + } + } + ibz_finalize(&det); + ibz_finalize(&prod); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c new file mode 100644 index 0000000000..171473d481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c @@ -0,0 +1,1172 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +_fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + + // var declaration + int ret; + ibz_t two_pow, tmp; + quat_alg_elem_t theta; + + ec_curve_t E0; + copy_curve(&E0, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].curve); + ec_curve_normalize_A24(&E0); + + unsigned length; + + int u_bitsize = ibz_bitsize(u); + + // deciding the power of 2 of the dim2 isogeny we use for this + // the smaller the faster, but if it set too low there is a risk that + // RepresentInteger will fail + if (!small) { + // in that case, we just set it to be the biggest value possible + length = TORSION_EVEN_POWER - HD_extra_torsion; + } else { + length = ibz_bitsize(&QUATALG_PINFTY.p) + QUAT_repres_bound_input - u_bitsize; + assert(u_bitsize < (int)length); + assert(length < TORSION_EVEN_POWER - HD_extra_torsion); + } + assert(length); + + // var init + ibz_init(&two_pow); + ibz_init(&tmp); + quat_alg_elem_init(&theta); + + ibz_pow(&two_pow, &ibz_const_two, length); + ibz_copy(&tmp, u); + assert(ibz_cmp(&two_pow, &tmp) > 0); + assert(!ibz_is_even(&tmp)); + + // computing the endomorphism theta of norm u * (2^(length) - u) + ibz_sub(&tmp, &two_pow, &tmp); + ibz_mul(&tmp, &tmp, u); + assert(!ibz_is_even(&tmp)); + + // setting-up the quat_represent_integer_params + quat_represent_integer_params_t ri_params; + ri_params.primality_test_iterations = QUAT_represent_integer_params.primality_test_iterations; + + quat_p_extremal_maximal_order_t order_hnf; + quat_alg_elem_init(&order_hnf.z); + quat_alg_elem_copy(&order_hnf.z, &EXTREMAL_ORDERS[index_alternate_order].z); + quat_alg_elem_init(&order_hnf.t); + quat_alg_elem_copy(&order_hnf.t, &EXTREMAL_ORDERS[index_alternate_order].t); + quat_lattice_init(&order_hnf.order); + ibz_copy(&order_hnf.order.denom, &EXTREMAL_ORDERS[index_alternate_order].order.denom); + ibz_mat_4x4_copy(&order_hnf.order.basis, &EXTREMAL_ORDERS[index_alternate_order].order.basis); + order_hnf.q = EXTREMAL_ORDERS[index_alternate_order].q; + ri_params.order = &order_hnf; + ri_params.algebra = &QUATALG_PINFTY; + +#ifndef NDEBUG + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->z)); + assert(quat_lattice_contains(NULL, &ri_params.order->order, &ri_params.order->t)); +#endif + + ret = quat_represent_integer(&theta, &tmp, 1, &ri_params); + + assert(!ibz_is_even(&tmp)); + + if (!ret) { + printf("represent integer failed for the alternate order number %d and for " + "a target of " + "size %d for a u of size %d with length = " + "%u \n", + index_alternate_order, + ibz_bitsize(&tmp), + ibz_bitsize(u), + length); + goto cleanup; + } + quat_lideal_create(lideal, &theta, u, &order_hnf.order, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&order_hnf.z); + quat_alg_elem_finalize(&order_hnf.t); + quat_lattice_finalize(&order_hnf.order); + +#ifndef NDEBUG + ibz_t test_norm, test_denom; + ibz_init(&test_denom); + ibz_init(&test_norm); + quat_alg_norm(&test_norm, &test_denom, &theta, &QUATALG_PINFTY); + assert(ibz_is_one(&test_denom)); + assert(ibz_cmp(&test_norm, &tmp) == 0); + assert(!ibz_is_even(&tmp)); + assert(quat_lattice_contains(NULL, &EXTREMAL_ORDERS[index_alternate_order].order, &theta)); + ibz_finalize(&test_norm); + ibz_finalize(&test_denom); +#endif + + ec_basis_t B0_two; + // copying the basis + copy_basis(&B0_two, &CURVES_WITH_ENDOMORPHISMS[index_alternate_order].basis_even); + assert(test_basis_order_twof(&B0_two, &E0, TORSION_EVEN_POWER)); + ec_dbl_iter_basis(&B0_two, TORSION_EVEN_POWER - length - HD_extra_torsion, &B0_two, &E0); + + assert(test_basis_order_twof(&B0_two, &E0, length + HD_extra_torsion)); + + // now we set-up the kernel + theta_couple_point_t T1; + theta_couple_point_t T2, T1m2; + + copy_point(&T1.P1, &B0_two.P); + copy_point(&T2.P1, &B0_two.Q); + copy_point(&T1m2.P1, &B0_two.PmQ); + + // multiplication of theta by (u)^-1 mod 2^(length+2) + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_mul(&two_pow, &two_pow, &ibz_const_two); + ibz_copy(&tmp, u); + ibz_invmod(&tmp, &tmp, &two_pow); + assert(!ibz_is_even(&tmp)); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta to the basis + ec_basis_t B0_two_theta; + copy_basis(&B0_two_theta, &B0_two); + endomorphism_application_even_basis(&B0_two_theta, index_alternate_order, &E0, &theta, length + HD_extra_torsion); + + // Ensure the basis we're using has the expected order + assert(test_basis_order_twof(&B0_two_theta, &E0, length + HD_extra_torsion)); + + // Set-up the domain E0 x E0 + theta_couple_curve_t E00; + E00.E1 = E0; + E00.E2 = E0; + + // Set-up the kernel from the bases + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &B0_two, &B0_two_theta); + + ret = theta_chain_compute_and_eval(length, &E00, &dim_two_ker, true, E34, P12, numP); + if (!ret) + goto cleanup; + + assert(length); + ret = (int)length; + +cleanup: + // var finalize + ibz_finalize(&two_pow); + ibz_finalize(&tmp); + quat_alg_elem_finalize(&theta); + + return ret; +} + +int +fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order) +{ + return _fixed_degree_isogeny_impl(lideal, u, small, E34, P12, numP, index_alternate_order); +} + +// takes the output of LLL and apply some small treatment on the basis +// reordering vectors and switching some signs if needed to make it in a nicer +// shape +static void +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +{ + // if the left order is the special one, then we apply some additional post + // treatment + if (is_special_order) { + // reordering the basis if needed + if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + } + ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); + ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); + ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); + ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); + } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + // in this case it seems that we need to swap the second and third + // element, and then recompute entirely the second element from the first + // first we swap the second and third element + for (int i = 0; i < 4; i++) { + ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + } + ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); + ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); + ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); + ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); + ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + } + + // adjusting the sign if needed + if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); + ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); + ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + } + } + if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + for (int i = 0; i < 4; i++) { + ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); + ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); + ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + } + // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + } + } +} + +// enumerate all vectors in an hypercube of norm m for the infinity norm +// with respect to a basis whose gram matrix is given by gram +// Returns an int `count`, the number of vectors found with the desired +// properties +static int +enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t *gram, const ibz_t *adjusted_norm) +{ + + ibz_t remain, norm; + ibz_vec_4_t point; + + ibz_init(&remain); + ibz_init(&norm); + ibz_vec_4_init(&point); + + assert(m > 0); + + int count = 0; + int dim = 2 * m + 1; + int dim2 = dim * dim; + int dim3 = dim2 * dim; + + // if the basis is of the form alpha, i*alpha, beta, i*beta + // we can remove some values due to symmetry of the basis that + bool need_remove_symmetry = + (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + + int check1, check2, check3; + + // Enumerate over points in a hypercube with coordinates (x, y, z, w) + for (int x = -m; x <= 0; x++) { // We only check non-positive x-values + for (int y = -m; y < m + 1; y++) { + // Once x = 0 we only consider non-positive y values + if (x == 0 && y > 0) { + break; + } + for (int z = -m; z < m + 1; z++) { + // If x and y are both zero, we only consider non-positive z values + if (x == 0 && y == 0 && z > 0) { + break; + } + for (int w = -m; w < m + 1; w++) { + // If x, y, z are all zero, we only consider negative w values + if (x == 0 && y == 0 && z == 0 && w >= 0) { + break; + } + + // Now for each candidate (x, y, z, w) we need to check a number of + // conditions We have already filtered for symmetry with several break + // statements, but there are more checks. + + // 1. We do not allow all (x, y, z, w) to be multiples of 2 + // 2. We do not allow all (x, y, z, w) to be multiples of 3 + // 3. We do not want elements of the same norm, so we quotient out the + // action + // of a group of order four generated by i for a basis expected to + // be of the form: [gamma, i gamma, beta, i beta ]. + + // Ensure that not all values are even + if (!((x | y | z | w) & 1)) { + continue; + } + // Ensure that not all values are multiples of three + if (x % 3 == 0 && y % 3 == 0 && z % 3 == 0 && w % 3 == 0) { + continue; + } + + check1 = (m + w) + dim * (m + z) + dim2 * (m + y) + dim3 * (m + x); + check2 = (m - z) + dim * (m + w) + dim2 * (m - x) + dim3 * (m + y); + check3 = (m + z) + dim * (m - w) + dim2 * (m + x) + dim3 * (m - y); + + // either the basis does not have symmetry and we are good, + // or there is a special symmetry that we can exploit + // and we ensure that we don't record the same norm in the list + if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { + // Set the point as a vector (x, y, z, w) + ibz_set(&point[0], x); + ibz_set(&point[1], y); + ibz_set(&point[2], z); + ibz_set(&point[3], w); + + // Evaluate this through the gram matrix and divide out by the + // adjusted_norm + quat_qf_eval(&norm, gram, &point); + ibz_div(&norm, &remain, &norm, adjusted_norm); + assert(ibz_is_zero(&remain)); + + if (ibz_mod_ui(&norm, 2) == 1) { + ibz_set(&vecs[count][0], x); + ibz_set(&vecs[count][1], y); + ibz_set(&vecs[count][2], z); + ibz_set(&vecs[count][3], w); + ibz_copy(&norms[count], &norm); + count++; + } + } + } + } + } + } + + ibz_finalize(&remain); + ibz_finalize(&norm); + ibz_vec_4_finalize(&point); + + return count - 1; +} + +// enumerate through the two list given in input to find to integer d1,d2 such +// that there exists u,v with u d1 + v d2 = target the bool is diagonal +// indicates if the two lists are the same +static int +find_uv_from_lists(ibz_t *au, + ibz_t *bu, + ibz_t *av, + ibz_t *bv, + ibz_t *u, + ibz_t *v, + int *index_sol1, + int *index_sol2, + const ibz_t *target, + const ibz_t *small_norms1, + const ibz_t *small_norms2, + const ibz_t *quotients, + const int index1, + const int index2, + const int is_diagonal, + const int number_sum_square) +{ + + ibz_t n, remain, adjusted_norm; + ibz_init(&n); + ibz_init(&remain); + ibz_init(&adjusted_norm); + + int found = 0; + int cmp; + ibz_copy(&n, target); + + // enumerating through the list + for (int i1 = 0; i1 < index1; i1++) { + ibz_mod(&adjusted_norm, &n, &small_norms1[i1]); + int starting_index2; + if (is_diagonal) { + starting_index2 = i1; + } else { + starting_index2 = 0; + } + for (int i2 = starting_index2; i2 < index2; i2++) { + // u = target / d1 mod d2 + if (!ibz_invmod(&remain, &small_norms2[i2], &small_norms1[i1])) { + continue; + } + ibz_mul(v, &remain, &adjusted_norm); + ibz_mod(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + while (!found && cmp < 0) { + if (number_sum_square > 0) { + found = ibz_cornacchia_prime(av, bv, &ibz_const_one, v); + } else if (number_sum_square == 0) { + found = 1; + } + if (found) { + ibz_mul(&remain, v, &small_norms2[i2]); + ibz_copy(au, &n); + ibz_sub(u, au, &remain); + assert(ibz_cmp(u, &ibz_const_zero) > 0); + ibz_div(u, &remain, u, &small_norms1[i1]); + assert(ibz_is_zero(&remain)); + // we want to remove weird cases where u,v have big power of two + found = found && (ibz_get(u) != 0 && ibz_get(v) != 0); + if (number_sum_square == 2) { + found = ibz_cornacchia_prime(au, bu, &ibz_const_one, u); + } + } + if (!found) { + ibz_add(v, v, &small_norms1[i1]); + cmp = ibz_cmp(v, "ients[i2]); + } + } + + if (found) { + // copying the indices + *index_sol1 = i1; + *index_sol2 = i2; + break; + } + } + if (found) { + break; + } + } + + ibz_finalize(&n); + ibz_finalize(&remain); + ibz_finalize(&adjusted_norm); + + return found; +} + +struct vec_and_norm +{ + ibz_vec_4_t vec; + ibz_t norm; + int idx; +}; + +static int +compare_vec_by_norm(const void *_first, const void *_second) +{ + const struct vec_and_norm *first = _first, *second = _second; + int res = ibz_cmp(&first->norm, &second->norm); + if (res != 0) + return res; + else + return first->idx - second->idx; +} + +// use several special curves +// we assume that the first one is always j=1728 +int +find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order) + +{ + + // variable declaration & init + ibz_vec_4_t vec; + ibz_t n; + ibz_t au, bu, av, bv; + ibz_t norm_d; + ibz_t remain; + ibz_init(&au); + ibz_init(&bu); + ibz_init(&av); + ibz_init(&bv); + ibz_init(&norm_d); + ibz_init(&n); + ibz_vec_4_init(&vec); + ibz_init(&remain); + + ibz_copy(&n, target); + + ibz_t adjusted_norm[num_alternate_order + 1]; + ibz_mat_4x4_t gram[num_alternate_order + 1], reduced[num_alternate_order + 1]; + quat_left_ideal_t ideal[num_alternate_order + 1]; + + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_init(&adjusted_norm[i]); + ibz_mat_4x4_init(&gram[i]); + ibz_mat_4x4_init(&reduced[i]); + quat_left_ideal_init(&ideal[i]); + } + + // first we reduce the ideal given in input + quat_lideal_copy(&ideal[0], lideal); + quat_lideal_reduce_basis(&reduced[0], &gram[0], &ideal[0], Bpoo); + + ibz_mat_4x4_copy(&ideal[0].lattice.basis, &reduced[0]); + ibz_set(&adjusted_norm[0], 1); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); + post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + + // for efficient lattice reduction, we replace ideal[0] by the equivalent + // ideal of smallest norm + quat_left_ideal_t reduced_id; + quat_left_ideal_init(&reduced_id); + quat_lideal_copy(&reduced_id, &ideal[0]); + quat_alg_elem_t delta; + // delta will be the element of smallest norm + quat_alg_elem_init(&delta); + ibz_set(&delta.coord[0], 1); + ibz_set(&delta.coord[1], 0); + ibz_set(&delta.coord[2], 0); + ibz_set(&delta.coord[3], 0); + ibz_copy(&delta.denom, &reduced_id.lattice.denom); + ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); + assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); + + // reduced_id = ideal[0] * \overline{delta}/n(ideal[0]) + quat_alg_conj(&delta, &delta); + ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); + quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); + ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + + // and conj_ideal is the conjugate of reduced_id + // init the right order; + quat_lattice_t right_order; + quat_lattice_init(&right_order); + // computing the conjugate + quat_left_ideal_t conj_ideal; + quat_left_ideal_init(&conj_ideal); + quat_lideal_conjugate_without_hnf(&conj_ideal, &right_order, &reduced_id, Bpoo); + + // computing all the other connecting ideals and reducing them + for (int i = 1; i < num_alternate_order + 1; i++) { + quat_lideal_lideal_mul_reduced(&ideal[i], &gram[i], &conj_ideal, &ALTERNATE_CONNECTING_IDEALS[i - 1], Bpoo); + ibz_mat_4x4_copy(&reduced[i], &ideal[i].lattice.basis); + ibz_set(&adjusted_norm[i], 1); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); + post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + } + + // enumerating small vectors + + // global parameters for the enumeration + int m = FINDUV_box_size; + int m4 = FINDUV_cube_size; + + ibz_vec_4_t small_vecs[num_alternate_order + 1][m4]; + ibz_t small_norms[num_alternate_order + 1][m4]; + ibz_vec_4_t alternate_small_vecs[num_alternate_order + 1][m4]; + ibz_t alternate_small_norms[num_alternate_order + 1][m4]; + ibz_t quotients[num_alternate_order + 1][m4]; + int indices[num_alternate_order + 1]; + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_init(&small_norms[j][i]); + ibz_vec_4_init(&small_vecs[j][i]); + ibz_init(&alternate_small_norms[j][i]); + ibz_init("ients[j][i]); + ibz_vec_4_init(&alternate_small_vecs[j][i]); + } + // enumeration in the hypercube of norm m + indices[j] = enumerate_hypercube(small_vecs[j], small_norms[j], m, &gram[j], &adjusted_norm[j]); + + // sorting the list + { + struct vec_and_norm small_vecs_and_norms[indices[j]]; + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs_and_norms[i].vec, &small_vecs[j][i], sizeof(ibz_vec_4_t)); + memcpy(&small_vecs_and_norms[i].norm, &small_norms[j][i], sizeof(ibz_t)); + small_vecs_and_norms[i].idx = i; + } + qsort(small_vecs_and_norms, indices[j], sizeof(*small_vecs_and_norms), compare_vec_by_norm); + for (int i = 0; i < indices[j]; ++i) { + memcpy(&small_vecs[j][i], &small_vecs_and_norms[i].vec, sizeof(ibz_vec_4_t)); + memcpy(&small_norms[j][i], &small_vecs_and_norms[i].norm, sizeof(ibz_t)); + } +#ifndef NDEBUG + for (int i = 1; i < indices[j]; ++i) + assert(ibz_cmp(&small_norms[j][i - 1], &small_norms[j][i]) <= 0); +#endif + } + + for (int i = 0; i < indices[j]; i++) { + ibz_div("ients[j][i], &remain, &n, &small_norms[j][i]); + } + } + + int found = 0; + int i1; + int i2; + for (int j1 = 0; j1 < num_alternate_order + 1; j1++) { + for (int j2 = j1; j2 < num_alternate_order + 1; j2++) { + // in this case, there are some small adjustements to make + int is_diago = (j1 == j2); + found = find_uv_from_lists(&au, + &bu, + &av, + &bv, + u, + v, + &i1, + &i2, + target, + small_norms[j1], + small_norms[j2], + quotients[j2], + indices[j1], + indices[j2], + is_diago, + 0); + // } + + if (found) { + // recording the solutions that we found + ibz_copy(&beta1->denom, &ideal[j1].lattice.denom); + ibz_copy(&beta2->denom, &ideal[j2].lattice.denom); + ibz_copy(d1, &small_norms[j1][i1]); + ibz_copy(d2, &small_norms[j2][i2]); + ibz_mat_4x4_eval(&beta1->coord, &reduced[j1], &small_vecs[j1][i1]); + ibz_mat_4x4_eval(&beta2->coord, &reduced[j2], &small_vecs[j2][i2]); + assert(quat_lattice_contains(NULL, &ideal[j1].lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal[j2].lattice, beta2)); + if (j1 != 0 || j2 != 0) { + ibz_div(&delta.denom, &remain, &delta.denom, &lideal->norm); + assert(ibz_cmp(&remain, &ibz_const_zero) == 0); + ibz_mul(&delta.denom, &delta.denom, &conj_ideal.norm); + } + if (j1 != 0) { + // we send back beta1 to the original ideal + quat_alg_mul(beta1, &delta, beta1, Bpoo); + quat_alg_normalize(beta1); + } + if (j2 != 0) { + // we send back beta2 to the original ideal + quat_alg_mul(beta2, &delta, beta2, Bpoo); + quat_alg_normalize(beta2); + } + + // if the selected element belong to an alternate order, we conjugate it + if (j1 != 0) { + quat_alg_conj(beta1, beta1); + } + if (j2 != 0) { + quat_alg_conj(beta2, beta2); + } + +#ifndef NDEBUG + quat_alg_norm(&remain, &norm_d, beta1, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d1, &ideal->norm); + if (j1 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j1 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + quat_alg_norm(&remain, &norm_d, beta2, &QUATALG_PINFTY); + assert(ibz_is_one(&norm_d)); + ibz_mul(&n, d2, &ideal->norm); + if (j2 > 0) { + ibz_mul(&n, &n, &ALTERNATE_CONNECTING_IDEALS[j2 - 1].norm); + } + assert(ibz_cmp(&n, &remain) == 0); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta1)); + assert(quat_lattice_contains(NULL, &ideal->lattice, beta2)); + + quat_left_ideal_t ideal_test; + quat_lattice_t ro; + quat_left_ideal_init(&ideal_test); + quat_lattice_init(&ro); + if (j1 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j1 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta1)); + } + if (j2 > 0) { + quat_lideal_copy(&ideal_test, &ALTERNATE_CONNECTING_IDEALS[j2 - 1]); + quat_lideal_conjugate_without_hnf(&ideal_test, &ro, &ideal_test, Bpoo); + quat_lideal_lideal_mul_reduced(&ideal_test, &gram[0], &ideal_test, ideal, Bpoo); + assert(quat_lattice_contains(NULL, &ideal_test.lattice, beta2)); + } + + quat_lattice_finalize(&ro); + quat_left_ideal_finalize(&ideal_test); +#endif + + *index_alternate_order_1 = j1; + *index_alternate_order_2 = j2; + break; + } + } + if (found) { + break; + } + } + + for (int j = 0; j < num_alternate_order + 1; j++) { + for (int i = 0; i < m4; i++) { + ibz_finalize(&small_norms[j][i]); + ibz_vec_4_finalize(&small_vecs[j][i]); + ibz_finalize(&alternate_small_norms[j][i]); + ibz_finalize("ients[j][i]); + ibz_vec_4_finalize(&alternate_small_vecs[j][i]); + } + } + + // var finalize + for (int i = 0; i < num_alternate_order + 1; i++) { + ibz_mat_4x4_finalize(&gram[i]); + ibz_mat_4x4_finalize(&reduced[i]); + quat_left_ideal_finalize(&ideal[i]); + ibz_finalize(&adjusted_norm[i]); + } + + ibz_finalize(&n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&au); + ibz_finalize(&bu); + ibz_finalize(&av); + ibz_finalize(&bv); + ibz_finalize(&remain); + ibz_finalize(&norm_d); + quat_lattice_finalize(&right_order); + quat_left_ideal_finalize(&conj_ideal); + quat_left_ideal_finalize(&reduced_id); + quat_alg_elem_finalize(&delta); + + return found; +} + +int +dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo) +{ + ibz_t target, tmp, two_pow; + ; + quat_alg_elem_t theta; + + ibz_t norm_d; + ibz_init(&norm_d); + ibz_t test1, test2; + ibz_init(&test1); + ibz_init(&test2); + + ibz_init(&target); + ibz_init(&tmp); + ibz_init(&two_pow); + int exp = TORSION_EVEN_POWER; + quat_alg_elem_init(&theta); + + // first, we find u,v,d1,d2,beta1,beta2 + // such that u*d1 + v*d2 = 2^TORSION_EVEN_POWER and there are ideals of + // norm d1,d2 equivalent to ideal beta1 and beta2 are elements of norm nd1, + // nd2 where n=n(lideal) + int ret; + int index_order1 = 0, index_order2 = 0; +#ifndef NDEBUG + unsigned int Fu_length, Fv_length; +#endif + ret = find_uv(u, + v, + beta1, + beta2, + d1, + d2, + &index_order1, + &index_order2, + &TORSION_PLUS_2POWER, + lideal, + Bpoo, + NUM_ALTERNATE_EXTREMAL_ORDERS); + if (!ret) { + goto cleanup; + } + + assert(ibz_is_odd(d1) && ibz_is_odd(d2)); + // compute the valuation of the GCD of u,v + ibz_gcd(&tmp, u, v); + assert(ibz_cmp(&tmp, &ibz_const_zero) != 0); + int exp_gcd = ibz_two_adic(&tmp); + exp = TORSION_EVEN_POWER - exp_gcd; + // removing the power of 2 from u and v + ibz_div(u, &test1, u, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + ibz_div(v, &test1, v, &tmp); + assert(ibz_cmp(&test1, &ibz_const_zero) == 0); + +#ifndef NDEBUG + // checking that ud1+vd2 = 2^exp + ibz_t pow_check, tmp_check; + ibz_init(&pow_check); + ibz_init(&tmp_check); + ibz_pow(&pow_check, &ibz_const_two, exp); + ibz_mul(&tmp_check, d1, u); + ibz_sub(&pow_check, &pow_check, &tmp_check); + ibz_mul(&tmp_check, v, d2); + ibz_sub(&pow_check, &pow_check, &tmp_check); + assert(ibz_cmp(&pow_check, &ibz_const_zero) == 0); + ibz_finalize(&tmp_check); + ibz_finalize(&pow_check); +#endif + + // now we compute the dimension 2 isogeny + // F : Eu x Ev -> E x E' + // where we have phi_u : Eu -> E_index_order1 and phi_v : Ev -> E_index_order2 + // if we have phi1 : E_index_order_1 -> E of degree d1 + // and phi2 : E_index_order_2 -> E of degree d2 + // we can define theta = phi2 o hat{phi1} + // and the kernel of F is given by + // ( [ud1](P), phiv o theta o hat{phiu} (P)),( [ud1](Q), phiv o theta o + // hat{phiu} (Q)) where P,Q is a basis of E0[2e] + + // now we set-up the kernel + // ec_curve_t E0 = CURVE_E0; + ec_curve_t E1; + copy_curve(&E1, &CURVES_WITH_ENDOMORPHISMS[index_order1].curve); + ec_curve_t E2; + copy_curve(&E2, &CURVES_WITH_ENDOMORPHISMS[index_order2].curve); + ec_basis_t bas1, bas2; + theta_couple_curve_t E01; + theta_kernel_couple_points_t ker; + + ec_basis_t bas_u; + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + + // we start by computing theta = beta2 \hat{beta1}/n + ibz_set(&theta.denom, 1); + quat_alg_conj(&theta, beta1); + quat_alg_mul(&theta, beta2, &theta, &QUATALG_PINFTY); + ibz_mul(&theta.denom, &theta.denom, &lideal->norm); + + // now we perform the actual computation + quat_left_ideal_t idealu, idealv; + quat_left_ideal_init(&idealu); + quat_left_ideal_init(&idealv); + theta_couple_curve_t Fu_codomain, Fv_codomain; + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const V1 = pushed_points + 0, *const V2 = pushed_points + 1, *const V1m2 = pushed_points + 2; + theta_couple_point_t P, Q, PmQ; + + copy_point(&P.P1, &bas1.P); + copy_point(&PmQ.P1, &bas1.PmQ); + copy_point(&Q.P1, &bas1.Q); + // Set points to zero + ec_point_init(&P.P2); + ec_point_init(&Q.P2); + ec_point_init(&PmQ.P2); + + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + // we perform the computation of phiu with a fixed degree isogeny + ret = fixed_degree_isogeny_and_eval( + &idealu, u, true, &Fu_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order1); + + if (!ret) { + goto cleanup; + } + assert(test_point_order_twof(&V1->P1, &Fu_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fu_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fu_length = (unsigned int)ret; + // presumably the correct curve is the first one, we check this + fp2_t w0a, w1a, w2a; + ec_curve_t E1_tmp, Fu_codomain_E1_tmp, Fu_codomain_E2_tmp; + copy_curve(&E1_tmp, &E1); + copy_curve(&Fu_codomain_E1_tmp, &Fu_codomain.E1); + copy_curve(&Fu_codomain_E2_tmp, &Fu_codomain.E2); + weil(&w0a, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fu_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fu_codomain_E2_tmp); + ibz_pow(&two_pow, &ibz_const_two, Fu_length); + ibz_sub(&two_pow, &two_pow, u); + + // now we are checking that the weil pairings are equal to the correct value + digit_t digit_u[NWORDS_ORDER] = { 0 }; + ibz_to_digit_array(digit_u, u); + fp2_t test_powa; + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); +#endif + + // copying the basis images + copy_point(&bas_u.P, &V1->P1); + copy_point(&bas_u.Q, &V2->P1); + copy_point(&bas_u.PmQ, &V1m2->P1); + + // copying the points to the first part of the kernel + copy_point(&ker.T1.P1, &bas_u.P); + copy_point(&ker.T2.P1, &bas_u.Q); + copy_point(&ker.T1m2.P1, &bas_u.PmQ); + copy_curve(&E01.E1, &Fu_codomain.E1); + + copy_point(&P.P1, &bas2.P); + copy_point(&PmQ.P1, &bas2.PmQ); + copy_point(&Q.P1, &bas2.Q); + pushed_points[0] = P; + pushed_points[1] = Q; + pushed_points[2] = PmQ; + + // computation of phiv + ret = fixed_degree_isogeny_and_eval( + &idealv, v, true, &Fv_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points), index_order2); + if (!ret) { + goto cleanup; + } + + assert(test_point_order_twof(&V1->P1, &Fv_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&V1->P2, &Fv_codomain.E2, TORSION_EVEN_POWER)); + +#ifndef NDEBUG + Fv_length = (unsigned int)ret; + ec_curve_t E2_tmp, Fv_codomain_E1_tmp, Fv_codomain_E2_tmp; + copy_curve(&E2_tmp, &E2); + copy_curve(&Fv_codomain_E1_tmp, &Fv_codomain.E1); + copy_curve(&Fv_codomain_E2_tmp, &Fv_codomain.E2); + // presumably the correct curve is the first one, we check this + weil(&w0a, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + weil(&w1a, TORSION_EVEN_POWER, &V1->P1, &V2->P1, &V1m2->P1, &Fv_codomain_E1_tmp); + weil(&w2a, TORSION_EVEN_POWER, &V1->P2, &V2->P2, &V1m2->P2, &Fv_codomain_E2_tmp); + if (Fv_length == 0) { + ibz_set(&tmp, 1); + ibz_set(&two_pow, 1); + } else { + ibz_pow(&two_pow, &ibz_const_two, Fv_length); + ibz_sub(&two_pow, &two_pow, v); + } + + // now we are checking that one of the two is equal to the correct value + ibz_to_digit_array(digit_u, v); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w1a)); + ibz_to_digit_array(digit_u, &two_pow); + fp2_pow_vartime(&test_powa, &w0a, digit_u, NWORDS_ORDER); + assert(fp2_is_equal(&test_powa, &w2a)); + +#endif + + copy_point(&bas2.P, &V1->P1); + copy_point(&bas2.Q, &V2->P1); + copy_point(&bas2.PmQ, &V1m2->P1); + + // multiplying theta by 1 / (d1 * n(connecting_ideal2)) + ibz_pow(&two_pow, &ibz_const_two, TORSION_EVEN_POWER); + ibz_copy(&tmp, d1); + if (index_order2 > 0) { + ibz_mul(&tmp, &tmp, &ALTERNATE_CONNECTING_IDEALS[index_order2 - 1].norm); + } + ibz_invmod(&tmp, &tmp, &two_pow); + + ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); + ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); + ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); + ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + + // applying theta + endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); + + assert(test_basis_order_twof(&bas2, &Fv_codomain.E1, TORSION_EVEN_POWER)); + + // copying points to the second part of the kernel + copy_point(&ker.T1.P2, &bas2.P); + copy_point(&ker.T2.P2, &bas2.Q); + copy_point(&ker.T1m2.P2, &bas2.PmQ); + copy_curve(&E01.E2, &Fv_codomain.E1); + + // copying the points to the first part of the kernel + quat_left_ideal_finalize(&idealu); + quat_left_ideal_finalize(&idealv); + + double_couple_point_iter(&ker.T1, TORSION_EVEN_POWER - exp, &ker.T1, &E01); + double_couple_point_iter(&ker.T2, TORSION_EVEN_POWER - exp, &ker.T2, &E01); + double_couple_point_iter(&ker.T1m2, TORSION_EVEN_POWER - exp, &ker.T1m2, &E01); + + assert(test_point_order_twof(&ker.T1.P1, &E01.E1, exp)); + assert(test_point_order_twof(&ker.T1m2.P2, &E01.E2, exp)); + + assert(ibz_is_odd(u)); + + // now we evaluate the basis points through the isogeny + assert(test_basis_order_twof(&bas_u, &E01.E1, TORSION_EVEN_POWER)); + + // evaluating the basis through the isogeny of degree u*d1 + copy_point(&pushed_points[0].P1, &bas_u.P); + copy_point(&pushed_points[2].P1, &bas_u.PmQ); + copy_point(&pushed_points[1].P1, &bas_u.Q); + // Set points to zero + ec_point_init(&pushed_points[0].P2); + ec_point_init(&pushed_points[1].P2); + ec_point_init(&pushed_points[2].P2); + + theta_couple_curve_t theta_codomain; + + ret = theta_chain_compute_and_eval_randomized( + exp, &E01, &ker, false, &theta_codomain, pushed_points, sizeof(pushed_points) / sizeof(*pushed_points)); + if (!ret) { + goto cleanup; + } + + theta_couple_point_t T1, T2, T1m2; + T1 = pushed_points[0]; + T2 = pushed_points[1]; + T1m2 = pushed_points[2]; + + assert(test_point_order_twof(&T1.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1.P1, &theta_codomain.E1, TORSION_EVEN_POWER)); + assert(test_point_order_twof(&T1m2.P2, &theta_codomain.E2, TORSION_EVEN_POWER)); + + copy_point(&basis->P, &T1.P1); + copy_point(&basis->Q, &T2.P1); + copy_point(&basis->PmQ, &T1m2.P1); + copy_curve(codomain, &theta_codomain.E1); + + // using weil pairing to verify that we selected the correct curve + fp2_t w0, w1; + // ec_curve_t E0 = CURVE_E0; + // ec_basis_t bas0 = BASIS_EVEN; + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, codomain); + + digit_t digit_d[NWORDS_ORDER] = { 0 }; + ibz_mul(&tmp, d1, u); + ibz_mul(&tmp, &tmp, u); + ibz_mod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_to_digit_array(digit_d, &tmp); + fp2_t test_pow; + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + + // then we have selected the wrong one + if (!fp2_is_equal(&w1, &test_pow)) { + copy_point(&basis->P, &T1.P2); + copy_point(&basis->Q, &T2.P2); + copy_point(&basis->PmQ, &T1m2.P2); + copy_curve(codomain, &theta_codomain.E2); + +// verifying that the other one is the good one +#ifndef NDEBUG + ec_curve_t codomain_tmp; + copy_curve(&codomain_tmp, codomain); + weil(&w1, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + fp2_pow_vartime(&test_pow, &w0, digit_d, NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1)); +#endif + } + + // now we apply M / (u * d1) where M is the matrix corresponding to the + // endomorphism beta1 = phi o dual(phi1) we multiply beta1 by the inverse of + // (u*d1) mod 2^TORSION_EVEN_POWER + ibz_mul(&tmp, u, d1); + if (index_order1 != 0) { + ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); + } + ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); + ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); + ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); + ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); + ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + + endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + ec_curve_t E0 = CURVE_E0; + ec_curve_t codomain_tmp; + ec_basis_t bas0 = CURVES_WITH_ENDOMORPHISMS[0].basis_even; + copy_curve(&codomain_tmp, codomain); + copy_curve(&E1_tmp, &E1); + copy_curve(&E2_tmp, &E2); + weil(&w0a, TORSION_EVEN_POWER, &bas0.P, &bas0.Q, &bas0.PmQ, &E0); + weil(&w1a, TORSION_EVEN_POWER, &basis->P, &basis->Q, &basis->PmQ, &codomain_tmp); + digit_t tmp_d[2 * NWORDS_ORDER] = { 0 }; + if (index_order1 != 0) { + copy_basis(&bas1, &CURVES_WITH_ENDOMORPHISMS[index_order1].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas1.P, &bas1.Q, &bas1.PmQ, &E1_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order1].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + if (index_order2 != 0) { + copy_basis(&bas2, &CURVES_WITH_ENDOMORPHISMS[index_order2].basis_even); + weil(&w0, TORSION_EVEN_POWER, &bas2.P, &bas2.Q, &bas2.PmQ, &E2_tmp); + ibz_to_digit_array(tmp_d, &CONNECTING_IDEALS[index_order2].norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w0)); + } + ibz_to_digit_array(tmp_d, &lideal->norm); + fp2_pow_vartime(&test_pow, &w0a, tmp_d, 2 * NWORDS_ORDER); + assert(fp2_is_equal(&test_pow, &w1a)); + } +#endif + +cleanup: + ibz_finalize(&norm_d); + ibz_finalize(&test1); + ibz_finalize(&test2); + ibz_finalize(&target); + ibz_finalize(&tmp); + ibz_finalize(&two_pow); + quat_alg_elem_finalize(&theta); + return ret; +} + +int +dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal) +{ + int ret; + + quat_alg_elem_t beta1, beta2; + ibz_t u, v, d1, d2; + + quat_alg_elem_init(&beta1); + quat_alg_elem_init(&beta2); + + ibz_init(&u); + ibz_init(&v); + ibz_init(&d1); + ibz_init(&d2); + + ret = dim2id2iso_ideal_to_isogeny_clapotis( + &beta1, &beta2, &u, &v, &d1, &d2, codomain, basis, lideal, &QUATALG_PINFTY); + + quat_alg_elem_finalize(&beta1); + quat_alg_elem_finalize(&beta2); + + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&d1); + ibz_finalize(&d2); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim4.c new file mode 100644 index 0000000000..495dc2dcb2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim4.c @@ -0,0 +1,470 @@ +#include +#include "internal.h" + +// internal helper functions +void +ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b) +{ + ibz_mat_4x4_t mat; + ibz_t prod; + ibz_init(&prod); + ibz_mat_4x4_init(&mat); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(mat[i][j]), 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); + ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + } + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*res)[i][j]), &(mat[i][j])); + } + } + ibz_mat_4x4_finalize(&mat); + ibz_finalize(&prod); +} + +// helper functions for lattices +void +ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&((*vec)[0]), coord0); + ibz_set(&((*vec)[1]), coord1); + ibz_set(&((*vec)[2]), coord2); + ibz_set(&((*vec)[3]), coord3); +} + +void +ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_copy(&((*new)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) +{ + ibz_copy(&((*res)[0]), coord0); + ibz_copy(&((*res)[1]), coord1); + ibz_copy(&((*res)[2]), coord2); + ibz_copy(&((*res)[3]), coord3); +} + +void +ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) +{ + ibz_gcd(content, &((*v)[0]), &((*v)[1])); + ibz_gcd(content, &((*v)[2]), content); + ibz_gcd(content, &((*v)[3]), content); +} + +void +ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_neg(&((*neg)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +void +ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +int +ibz_vec_4_is_zero(const ibz_vec_4_t *x) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + res &= ibz_is_zero(&((*x)[i])); + } + return (res); +} + +void +ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b) +{ + ibz_t prod; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + } +} + +int +ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + res = res && ibz_is_zero(&r); + } + ibz_finalize(&r); + return (res); +} + +void +ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) +{ + ibz_mat_4x4_t work; + ibz_mat_4x4_init(&work); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(work[i][j]), &((*mat)[j][i])); + } + } + ibz_mat_4x4_copy(transposed, &work); + ibz_mat_4x4_finalize(&work); +} + +void +ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*zero)[i][j]), 0); + } + } +} + +void +ibz_mat_4x4_identity(ibz_mat_4x4_t *id) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*id)[i][j]), 0); + } + ibz_set(&((*id)[i][i]), 1); + } +} + +int +ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + } + } + return (res); +} + +int +ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) +{ + int res = 0; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + } + } + return (!res); +} + +void +ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + } + } +} + +void +ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) +{ + ibz_t d; + ibz_init(&d); + ibz_copy(&d, &((*mat)[0][0])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_gcd(&d, &d, &((*mat)[i][j])); + } + } + ibz_copy(gcd, &d); + ibz_finalize(&d); +} + +int +ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + res = res && ibz_is_zero(&r); + } + } + ibz_finalize(&r); + return (res); +} + +// 4x4 inversion helper functions +void +ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, a1, a2); + ibz_mul(&prod, b1, b2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_add(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +void +ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, b1, b2); + ibz_mul(&prod, a1, a2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_sub(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +// Method from https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf 3rd of May +// 2023, 16h15 CEST +int +ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat) +{ + ibz_t prod, work_det; + ibz_mat_4x4_t work; + ibz_t s[6]; + ibz_t c[6]; + for (int i = 0; i < 6; i++) { + ibz_init(&(s[i])); + ibz_init(&(c[i])); + } + ibz_mat_4x4_init(&work); + ibz_init(&prod); + ibz_init(&work_det); + + // compute some 2x2 minors, store them in s and c + for (int i = 0; i < 3; i++) { + ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + } + for (int i = 0; i < 2; i++) { + ibz_mat_2x2_det_from_ibz( + &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + ibz_mat_2x2_det_from_ibz( + &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + } + ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + + // compute det + ibz_set(&work_det, 0); + for (int i = 0; i < 6; i++) { + ibz_mul(&prod, &(s[i]), &(c[5 - i])); + if ((i != 1) && (i != 4)) { + ibz_add(&work_det, &work_det, &prod); + } else { + ibz_sub(&work_det, &work_det, &prod); + } + } + // compute transposed adjugate + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 2; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } + } + for (int k = 2; k < 4; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } + } + } + if (inv != NULL) { + // put transposed adjugate in result, or 0 if no inverse + ibz_set(&prod, !ibz_is_zero(&work_det)); + ibz_mat_4x4_scalar_mul(inv, &prod, &work); + } + // output det + if (det != NULL) + ibz_copy(det, &work_det); + for (int i = 0; i < 6; i++) { + ibz_finalize(&s[i]); + ibz_finalize(&c[i]); + } + ibz_mat_4x4_finalize(&work); + ibz_finalize(&work_det); + ibz_finalize(&prod); + return (!ibz_is_zero(det)); +} + +// matrix evaluation + +void +ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +// quadratic forms + +void +quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + ibz_mat_4x4_eval(&sum, qf, coord); + for (int i = 0; i < 4; i++) { + ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + if (i > 0) { + ibz_add(&(sum[0]), &(sum[0]), &prod); + } else { + ibz_copy(&sum[0], &prod); + } + } + ibz_copy(res, &sum[0]); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dpe.h new file mode 100644 index 0000000000..b9a7a35e0b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dpe.h @@ -0,0 +1,743 @@ +/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. + +This file is part of the DPE Library. + +The DPE Library is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 3 of the License, or (at your +option) any later version. + +The DPE Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with the DPE Library; see the file COPYING.LIB. +If not, see . */ + +#ifndef __DPE +#define __DPE + +#include /* For abort */ +#include /* For fprintf */ +#include /* for round, floor, ceil */ +#include + +/* if you change the version, please change it in Makefile too */ +#define DPE_VERSION_MAJOR 1 +#define DPE_VERSION_MINOR 7 + +#if defined(__GNUC__) && (__GNUC__ >= 3) +# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) +# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) +# define DPE_UNUSED_ATTR __attribute__((unused)) +#else +# define DPE_LIKELY(x) (x) +# define DPE_UNLIKELY(x) (x) +# define DPE_UNUSED_ATTR +#endif + +/* If no user defined mode, define it to double */ +#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) +# define DPE_USE_DOUBLE +#endif + +#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) +# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." +#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#endif + +#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) +# define DPE_LITTLEENDIAN32 +#endif + +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) +# define DPE_DEFINE_ROUND_TRUNC +#endif + +#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 +# define DPE_ISFINITE __builtin_isfinite +#elif defined(isfinite) +# define DPE_ISFINITE isfinite /* new C99 function */ +#else +# define DPE_ISFINITE finite /* obsolete BSD function */ +#endif + +/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ +/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with + 1/2 <= m < 1 */ +/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ +#if defined(DPE_USE_DOUBLE) +# define DPE_DOUBLE double /* mantissa type */ +# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ +# define DPE_2_POW_BITSIZE 0x1P53 +# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 +# define DPE_LDEXP __builtin_ldexp +# define DPE_FREXP __builtin_frexp +# define DPE_FLOOR __builtin_floor +# define DPE_CEIL __builtin_ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND __builtin_round +# define DPE_TRUNC __builtin_trunc +# endif +# else +# define DPE_LDEXP ldexp +# define DPE_FREXP frexp +# define DPE_FLOOR floor +# define DPE_CEIL ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND round +# define DPE_TRUNC trunc +# endif +# endif + +#elif defined(DPE_USE_LONGDOUBLE) +# define DPE_DOUBLE long double +# define DPE_BITSIZE 64 +# define DPE_2_POW_BITSIZE 0x1P64 +# define DPE_LDEXP ldexpl +# define DPE_FREXP frexpl +# define DPE_FLOOR floorl +# define DPE_CEIL ceill +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundl +# define DPE_TRUNC truncl +# endif + +#elif defined(DPE_USE_FLOAT128) +# include "quadmath.h" +# define DPE_DOUBLE __float128 +# define DPE_BITSIZE 113 +# define DPE_2_POW_BITSIZE 0x1P113 +# define DPE_LDEXP ldexpq +# define DPE_FLOOR floorq +# define DPE_CEIL ceilq +# define DPE_FREXP frexpq +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundq +# define DPE_TRUNC truncq +# endif + +#else +# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" +#endif + +/* If no C99, do what we can */ +#ifndef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) +# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) +#endif + +#if defined(DPE_USE_LONG) +# define DPE_EXP_T long /* exponent type */ +# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ +#elif defined(DPE_USE_LONGLONG) +# define DPE_EXP_T long long +# define DPE_EXPMIN LLONG_MIN +#else +# define DPE_EXP_T int /* exponent type */ +# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ +#endif + +#ifdef DPE_LITTLEENDIAN32 +typedef union +{ + double d; +#if INT_MAX == 0x7FFFFFFFL + int i[2]; +#elif LONG_MAX == 0x7FFFFFFFL + long i[2]; +#elif SHRT_MAX == 0x7FFFFFFFL + short i[2]; +#else +# error Cannot find a 32 bits integer type. +#endif +} dpe_double_words; +#endif + +typedef struct +{ + DPE_DOUBLE d; /* significand */ + DPE_EXP_T exp; /* exponent */ +} dpe_struct; + +typedef dpe_struct dpe_t[1]; + +#define DPE_MANT(x) ((x)->d) +#define DPE_EXP(x) ((x)->exp) +#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) + +#define DPE_INLINE static inline + +/* initialize */ +DPE_INLINE void +dpe_init (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* clear */ +DPE_INLINE void +dpe_clear (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* set x to y */ +DPE_INLINE void +dpe_set (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to -y */ +DPE_INLINE void +dpe_neg (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to |y| */ +DPE_INLINE void +dpe_abs (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ +/* FIXME: don't inline this function yet ? */ +static void +dpe_normalize (dpe_t x) +{ + if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) + { + if (DPE_MANT(x) == 0.0) + DPE_EXP(x) = DPE_EXPMIN; + /* otherwise let the exponent of NaN, Inf unchanged */ + } + else + { + DPE_EXP_T e; +#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ + dpe_double_words dw; + dw.d = DPE_MANT(x); + e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ + DPE_EXP(x) += e - 1022; + dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; + DPE_MANT(x) = dw.d; +#else /* portable code */ + double m = DPE_MANT(x); + DPE_MANT(x) = DPE_FREXP (m, &e); + DPE_EXP(x) += e; +#endif + } +} + +#if defined(DPE_USE_DOUBLE) +static const double dpe_scale_tab[54] = { + 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, + 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, + 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, + 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, + 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, + 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, + 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; +#endif + +DPE_INLINE DPE_DOUBLE +dpe_scale (DPE_DOUBLE d, int s) +{ + /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ +#if defined(DPE_USE_DOUBLE) + return d * dpe_scale_tab [-s]; +#else /* portable code */ + return DPE_LDEXP (d, s); +#endif +} + +/* set x to y */ +DPE_INLINE void +dpe_set_d (dpe_t x, double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ld (dpe_t x, long double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ui (dpe_t x, unsigned long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_si (dpe_t x, long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +DPE_INLINE long +dpe_get_si (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (long) d; +} + +DPE_INLINE unsigned long +dpe_get_ui (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (d < 0.0) ? 0 : (unsigned long) d; +} + +DPE_INLINE double +dpe_get_d (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +DPE_INLINE long double +dpe_get_ld (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +#if defined(__GMP_H__) || defined(__MINI_GMP_H__) +/* set x to y */ +DPE_INLINE void +dpe_set_z (dpe_t x, mpz_t y) +{ + long e; + DPE_MANT(x) = mpz_get_d_2exp (&e, y); + DPE_EXP(x) = (DPE_EXP_T) e; +} + +/* set x to y, rounded to nearest */ +DPE_INLINE void +dpe_get_z (mpz_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey >= DPE_BITSIZE) /* y is an integer */ + { + DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ + mpz_set_d (x, d); /* should be exact */ + mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); + } + else /* DPE_EXP(y) < DPE_BITSIZE */ + { + if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ + mpz_set_ui (x, 0); + else + { + DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); + mpz_set_d (x, (double) DPE_ROUND(d)); + } + } +} + +/* return e and x such that y = x*2^e */ +DPE_INLINE mp_exp_t +dpe_get_z_exp (mpz_t x, dpe_t y) +{ + mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); + return DPE_EXP(y) - DPE_BITSIZE; +} +#endif + +/* x <- y + z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_add (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y+z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_set (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y - z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_sub (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y-z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_neg (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y * z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_mul (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- sqrt(y), assuming y is normalized, returns x normalized */ +DPE_INLINE void +dpe_sqrt (dpe_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey % 2) + { + /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ + DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); + DPE_EXP(x) = (ey + 1) / 2; + } + else + { + DPE_MANT(x) = sqrt (DPE_MANT(y)); + DPE_EXP(x) = ey / 2; + } +} + +/* x <- y / z, assuming y and z are normalized, returns x normalized. + Assumes z is not zero. */ +DPE_INLINE void +dpe_div (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- y * z, assuming y normalized, returns x normalized */ +DPE_INLINE void +dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ +DPE_INLINE void +dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y * 2^e */ +DPE_INLINE void +dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; +} + +/* x <- y / 2^e */ +DPE_INLINE void +dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; +} + +/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' + type has fewer bits than the significand in dpe_t) */ +DPE_INLINE DPE_EXP_T +dpe_get_si_exp (long *x, dpe_t y) +{ + if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ + { + *x = (long) (DPE_MANT(y) * 2147483648.0); + return DPE_EXP(y) - 31; + } + else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ + { + *x = (long) (DPE_MANT (y) * 9223372036854775808.0); + return DPE_EXP(y) - 63; + } + else + { + fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); + exit (1); + } +} + +static DPE_UNUSED_ATTR int dpe_str_prec = 16; +static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; + +static int +dpe_out_str (FILE *s, int base, dpe_t x) +{ + DPE_DOUBLE d = DPE_MANT(x); + DPE_EXP_T e2 = DPE_EXP(x); + int e10 = 0; + char sign = ' '; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } + if (d == 0.0) +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%1.*f", dpe_str_prec, d); +#else + return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); +#endif + if (d < 0) + { + d = -d; + sign = '-'; + } + if (e2 > 0) + { + while (e2 > 0) + { + e2 --; + d *= 2.0; + if (d >= 10.0) + { + d /= 10.0; + e10 ++; + } + } + } + else /* e2 <= 0 */ + { + while (e2 < 0) + { + e2 ++; + d /= 2.0; + if (d < 1.0) + { + d *= 10.0; + e10 --; + } + } + } +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); +#else + return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); +#endif +} + +static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; + +static size_t +dpe_inp_str (dpe_t x, FILE *s, int base) +{ + size_t res; + DPE_DOUBLE d; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } +#ifdef DPE_USE_DOUBLE + res = fscanf (s, "%lf", &d); +#elif defined(DPE_USE_LONGDOUBLE) + res = fscanf (s, "%Lf", &d); +#else + { + long double d_ld; + res = fscanf (s, "%Lf", &d_ld); + d = d_ld; + } +#endif + dpe_set_d (x, d); + return res; +} + +DPE_INLINE void +dpe_dump (dpe_t x) +{ + dpe_out_str (stdout, 10, x); + putchar ('\n'); +} + +DPE_INLINE int +dpe_zero_p (dpe_t x) +{ + return DPE_MANT (x) == 0; +} + +/* return a positive value if x > y + a negative value if x < y + and 0 otherwise (x=y). */ +DPE_INLINE int +dpe_cmp (dpe_t x, dpe_t y) +{ + int sx = DPE_SIGN(x); + int d = sx - DPE_SIGN(y); + + if (d != 0) + return d; + else if (DPE_EXP(x) > DPE_EXP(y)) + return (sx > 0) ? 1 : -1; + else if (DPE_EXP(y) > DPE_EXP(x)) + return (sx > 0) ? -1 : 1; + else /* DPE_EXP(x) = DPE_EXP(y) */ + return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); +} + +DPE_INLINE int +dpe_cmp_d (dpe_t x, double d) +{ + dpe_t y; + dpe_set_d (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_ui (dpe_t x, unsigned long d) +{ + dpe_t y; + dpe_set_ui (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_si (dpe_t x, long d) +{ + dpe_t y; + dpe_set_si (y, d); + return dpe_cmp (x, y); +} + +/* set x to integer nearest to y */ +DPE_INLINE void +dpe_round (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) < 0) /* |y| < 1/2 */ + dpe_set_ui (x, 0); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_ROUND(d)); + } +} + +/* set x to the fractional part of y, defined as y - trunc(y), thus the + fractional part has absolute value in [0, 1), and same sign as y */ +DPE_INLINE void +dpe_frac (dpe_t x, dpe_t y) +{ + /* If |y| is smaller than 1, keep it */ + if (DPE_EXP(y) <= 0) + dpe_set (x, y); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set_ui (x, 0); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, d - DPE_TRUNC(d)); + } +} + +/* set x to largest integer <= y */ +DPE_INLINE void +dpe_floor (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ + dpe_set_ui (x, 0); + else /* -1 < y < 0 */ + dpe_set_si (x, -1); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_FLOOR(d)); + } +} + +/* set x to smallest integer >= y */ +DPE_INLINE void +dpe_ceil (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ + dpe_set_ui (x, 1); + else /* -1 < y <= 0 */ + dpe_set_si (x, 0); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_CEIL(d)); + } +} + +DPE_INLINE void +dpe_swap (dpe_t x, dpe_t y) +{ + DPE_EXP_T i = DPE_EXP (x); + DPE_DOUBLE d = DPE_MANT (x); + DPE_EXP (x) = DPE_EXP (y); + DPE_MANT (x) = DPE_MANT (y); + DPE_EXP (y) = i; + DPE_MANT (y) = d; +} + +#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.c new file mode 100644 index 0000000000..a7148e485b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.c @@ -0,0 +1,55 @@ +#include +const fp2_t BASIS_E0_PX = { +#if 0 +#elif RADIX == 16 +{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314} +#elif RADIX == 32 +{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6} +#else +{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7} +#elif RADIX == 32 +{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5} +#else +{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f} +#endif +#endif +}; +const fp2_t BASIS_E0_QX = { +#if 0 +#elif RADIX == 16 +{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe} +#elif RADIX == 32 +{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2} +#else +{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330} +#elif RADIX == 32 +{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5} +#else +{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85} +#endif +#endif +}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.h new file mode 100644 index 0000000000..05cafb8462 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.h @@ -0,0 +1,3 @@ +#include +extern const fp2_t BASIS_E0_PX; +extern const fp2_t BASIS_E0_QX; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.c new file mode 100644 index 0000000000..be4e4e55b1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.c @@ -0,0 +1,665 @@ +#include +#include +#include +#include + +void +ec_point_init(ec_point_t *P) +{ // Initialize point as identity element (1:0) + fp2_set_one(&(P->x)); + fp2_set_zero(&(P->z)); +} + +void +ec_curve_init(ec_curve_t *E) +{ // Initialize the curve struct + // Initialize the constants + fp2_set_zero(&(E->A)); + fp2_set_one(&(E->C)); + + // Initialize the point (A+2 : 4C) + ec_point_init(&(E->A24)); + + // Set the bool to be false by default + E->is_A24_computed_and_normalized = false; +} + +void +select_point(ec_point_t *Q, const ec_point_t *P1, const ec_point_t *P2, const digit_t option) +{ // Select points in constant time + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +cswap_points(ec_point_t *P, ec_point_t *Q, const digit_t option) +{ // Swap points in constant time + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then P <- Q and Q <- P + fp2_cswap(&(P->x), &(Q->x), option); + fp2_cswap(&(P->z), &(Q->z), option); +} + +void +ec_normalize_point(ec_point_t *P) +{ + fp2_inv(&P->z); + fp2_mul(&P->x, &P->x, &P->z); + fp2_set_one(&(P->z)); +} + +void +ec_normalize_curve(ec_curve_t *E) +{ + fp2_inv(&E->C); + fp2_mul(&E->A, &E->A, &E->C); + fp2_set_one(&E->C); +} + +void +ec_curve_normalize_A24(ec_curve_t *E) +{ + if (!E->is_A24_computed_and_normalized) { + AC_to_A24(&E->A24, E); + ec_normalize_point(&E->A24); + E->is_A24_computed_and_normalized = true; + } + assert(fp2_is_one(&E->A24.z)); +} + +void +ec_normalize_curve_and_A24(ec_curve_t *E) +{ // Neither the curve or A24 are guaranteed to be normalized. + // First we normalize (A/C : 1) and conditionally compute + if (!fp2_is_one(&E->C)) { + ec_normalize_curve(E); + } + + if (!E->is_A24_computed_and_normalized) { + // Now compute A24 = ((A + 2) / 4 : 1) + fp2_add_one(&E->A24.x, &E->A); // re(A24.x) = re(A) + 1 + fp2_add_one(&E->A24.x, &E->A24.x); // re(A24.x) = re(A) + 2 + fp_copy(&E->A24.x.im, &E->A.im); // im(A24.x) = im(A) + + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 2 + fp2_half(&E->A24.x, &E->A24.x); // (A + 2) / 4 + fp2_set_one(&E->A24.z); + + E->is_A24_computed_and_normalized = true; + } +} + +uint32_t +ec_is_zero(const ec_point_t *P) +{ + return fp2_is_zero(&P->z); +} + +uint32_t +ec_has_zero_coordinate(const ec_point_t *P) +{ + return fp2_is_zero(&P->x) | fp2_is_zero(&P->z); +} + +uint32_t +ec_is_equal(const ec_point_t *P, const ec_point_t *Q) +{ // Evaluate if two points in Montgomery coordinates (X:Z) are equal + // Returns 0xFFFFFFFF (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1; + + // Check if P, Q are the points at infinity + uint32_t l_zero = ec_is_zero(P); + uint32_t r_zero = ec_is_zero(Q); + + // Check if PX * QZ = QX * PZ + fp2_mul(&t0, &P->x, &Q->z); + fp2_mul(&t1, &P->z, &Q->x); + uint32_t lr_equal = fp2_is_equal(&t0, &t1); + + // Points are equal if + // - Both are zero, or + // - neither are zero AND PX * QZ = QX * PZ + return (l_zero & r_zero) | (~l_zero & ~r_zero * lr_equal); +} + +uint32_t +ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + if (ec_is_zero(P)) + return 0; + + uint32_t x_is_zero, tmp_is_zero; + fp2_t t0, t1, t2; + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t0, &t1); + fp2_mul(&t2, &t2, &E->A); + fp2_mul(&t1, &t1, &E->C); + fp2_add(&t1, &t1, &t1); + fp2_add(&t0, &t1, &t2); // 4 (CX^2+CZ^2+AXZ) + + x_is_zero = fp2_is_zero(&P->x); + tmp_is_zero = fp2_is_zero(&t0); + + // two torsion if x or x^2 + Ax + 1 is zero + return x_is_zero | tmp_is_zero; +} + +uint32_t +ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E) +{ + ec_point_t test; + xDBL_A24(&test, P, &E->A24, E->is_A24_computed_and_normalized); + return ec_is_two_torsion(&test, E); +} + +uint32_t +ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E) +{ // Check if basis points (P, Q) form a full 2^t-basis + ec_point_t P2, Q2; + xDBL_A24(&P2, &B->P, &E->A24, E->is_A24_computed_and_normalized); + xDBL_A24(&Q2, &B->Q, &E->A24, E->is_A24_computed_and_normalized); + return (ec_is_two_torsion(&P2, E) & ec_is_two_torsion(&Q2, E) & ~ec_is_equal(&P2, &Q2)); +} + +int +ec_curve_verify_A(const fp2_t *A) +{ // Verify the Montgomery coefficient A is valid (A^2-4 \ne 0) + // Return 1 if curve is valid, 0 otherwise + fp2_t t; + fp2_set_one(&t); + fp_add(&t.re, &t.re, &t.re); // t=2 + if (fp2_is_equal(A, &t)) + return 0; + fp_neg(&t.re, &t.re); // t=-2 + if (fp2_is_equal(A, &t)) + return 0; + return 1; +} + +int +ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A) +{ // Initialize the curve from the A coefficient and check it is valid + // Return 1 if curve is valid, 0 otherwise + ec_curve_init(E); + fp2_copy(&E->A, A); // Set A + return ec_curve_verify_A(A); +} + +void +ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve) +{ // j-invariant computation for Montgommery coefficient A2=(A+2C:4C) + fp2_t t0, t1; + + fp2_sqr(&t1, &curve->C); + fp2_sqr(j_inv, &curve->A); + fp2_add(&t0, &t1, &t1); + fp2_sub(&t0, j_inv, &t0); + fp2_sub(&t0, &t0, &t1); + fp2_sub(j_inv, &t0, &t1); + fp2_sqr(&t1, &t1); + fp2_mul(j_inv, j_inv, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_sqr(&t1, &t0); + fp2_mul(&t0, &t0, &t1); + fp2_add(&t0, &t0, &t0); + fp2_add(&t0, &t0, &t0); + fp2_inv(j_inv); + fp2_mul(j_inv, &t0, j_inv); +} + +void +xDBL_E0(ec_point_t *Q, const ec_point_t *P) +{ // Doubling of a Montgomery point in projective coordinates (X:Z) on the curve E0 with (A:C) = (0:1). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C) = (0:1). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&Q->z, &t1, &t2); + fp2_mul(&Q->z, &Q->z, &t2); +} + +void +xDBL(ec_point_t *Q, const ec_point_t *P, const ec_point_t *AC) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). Computation of coefficient values A+2C and 4C + // on-the-fly. + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and Montgomery curve constants (A:C). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + fp2_add(&t3, &AC->z, &AC->z); + fp2_mul(&t1, &t1, &t3); + fp2_add(&t1, &t1, &t1); + fp2_mul(&Q->x, &t0, &t1); + fp2_add(&t0, &t3, &AC->x); + fp2_mul(&t0, &t0, &t2); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized) +{ // Doubling of a Montgomery point in projective coordinates (X:Z). + // Input: projective Montgomery x-coordinates P = (XP:ZP), where xP=XP/ZP, and + // the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery x-coordinates Q <- 2*P = (XQ:ZQ) such that x(2P)=XQ/ZQ. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sqr(&t0, &t0); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&t1, &t1); + fp2_sub(&t2, &t0, &t1); + if (!A24_normalized) + fp2_mul(&t1, &t1, &A24->z); + fp2_mul(&Q->x, &t0, &t1); + fp2_mul(&t0, &t2, &A24->x); + fp2_add(&t0, &t0, &t1); + fp2_mul(&Q->z, &t0, &t2); +} + +void +xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ) +{ // Differential addition of Montgomery points in projective coordinates (X:Z). + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, and difference + // PQ=P-Q=(XPQ:ZPQ). + // Output: projective Montgomery point R <- P+Q = (XR:ZR) such that x(P+Q)=XR/ZR. + fp2_t t0, t1, t2, t3; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_add(&t2, &Q->x, &Q->z); + fp2_sub(&t3, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t3); + fp2_mul(&t1, &t1, &t2); + fp2_add(&t2, &t0, &t1); + fp2_sub(&t3, &t0, &t1); + fp2_sqr(&t2, &t2); + fp2_sqr(&t3, &t3); + fp2_mul(&t2, &PQ->z, &t2); + fp2_mul(&R->z, &PQ->x, &t3); + fp2_copy(&R->x, &t2); +} + +void +xDBLADD(ec_point_t *R, + ec_point_t *S, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_point_t *A24, + const bool A24_normalized) +{ // Simultaneous doubling and differential addition. + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, the difference + // PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C:4C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points R <- 2*P = (XR:ZR) such that x(2P)=XR/ZR, and S <- P+Q = (XS:ZS) such that = + // x(Q+P)=XS/ZS. + fp2_t t0, t1, t2; + + fp2_add(&t0, &P->x, &P->z); + fp2_sub(&t1, &P->x, &P->z); + fp2_sqr(&R->x, &t0); + fp2_sub(&t2, &Q->x, &Q->z); + fp2_add(&S->x, &Q->x, &Q->z); + fp2_mul(&t0, &t0, &t2); + fp2_sqr(&R->z, &t1); + fp2_mul(&t1, &t1, &S->x); + fp2_sub(&t2, &R->x, &R->z); + if (!A24_normalized) + fp2_mul(&R->z, &R->z, &A24->z); + fp2_mul(&R->x, &R->x, &R->z); + fp2_mul(&S->x, &A24->x, &t2); + fp2_sub(&S->z, &t0, &t1); + fp2_add(&R->z, &R->z, &S->x); + fp2_add(&S->x, &t0, &t1); + fp2_mul(&R->z, &R->z, &t2); + fp2_sqr(&S->z, &S->z); + fp2_sqr(&S->x, &S->x); + fp2_mul(&S->z, &S->z, &PQ->x); + fp2_mul(&S->x, &S->x, &PQ->z); +} + +void +xMUL(ec_point_t *Q, const ec_point_t *P, const digit_t *k, const int kbits, const ec_curve_t *curve) +{ // The Montgomery ladder + // Input: projective Montgomery point P=(XP:ZP) such that xP=XP/ZP, a scalar k of bitlength kbits, and + // the Montgomery curve constants (A:C) (or A24 = (A+2C/4C:1) if normalized). + // Output: projective Montgomery points Q <- k*P = (XQ:ZQ) such that x(k*P)=XQ/ZQ. + ec_point_t R0, R1, A24; + digit_t mask; + unsigned int bit, prevbit = 0, swap; + + if (!curve->is_A24_computed_and_normalized) { + // Computation of A24=(A+2C:4C) + fp2_add(&A24.x, &curve->C, &curve->C); + fp2_add(&A24.z, &A24.x, &A24.x); + fp2_add(&A24.x, &A24.x, &curve->A); + } else { + fp2_copy(&A24.x, &curve->A24.x); + fp2_copy(&A24.z, &curve->A24.z); + // Assert A24 has been normalised + assert(fp2_is_one(&A24.z)); + } + + // R0 <- (1:0), R1 <- P + ec_point_init(&R0); + fp2_copy(&R1.x, &P->x); + fp2_copy(&R1.z, &P->z); + + // Main loop + for (int i = kbits - 1; i >= 0; i--) { + bit = (k[i >> LOG2RADIX] >> (i & (RADIX - 1))) & 1; + swap = bit ^ prevbit; + prevbit = bit; + mask = 0 - (digit_t)swap; + + cswap_points(&R0, &R1, mask); + xDBLADD(&R0, &R1, &R0, &R1, P, &A24, true); + } + swap = 0 ^ prevbit; + mask = 0 - (digit_t)swap; + cswap_points(&R0, &R1, mask); + + fp2_copy(&Q->x, &R0.x); + fp2_copy(&Q->z, &R0.z); +} + +int +xDBLMUL(ec_point_t *S, + const ec_point_t *P, + const digit_t *k, + const ec_point_t *Q, + const digit_t *l, + const ec_point_t *PQ, + const int kbits, + const ec_curve_t *curve) +{ // The Montgomery biladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, scalars k and l of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants (A:C). + // Output: projective Montgomery point S <- k*P + l*Q = (XS:ZS) such that x(k*P + l*Q)=XS/ZS. + + int i, A_is_zero; + digit_t evens, mevens, bitk0, bitl0, maskk, maskl, temp, bs1_ip1, bs2_ip1, bs1_i, bs2_i, h; + digit_t sigma[2] = { 0 }, pre_sigma = 0; + digit_t k_t[NWORDS_ORDER], l_t[NWORDS_ORDER], one[NWORDS_ORDER] = { 0 }, r[2 * BITS] = { 0 }; + ec_point_t DIFF1a, DIFF1b, DIFF2a, DIFF2b, R[3] = { 0 }, T[3]; + + // differential additions formulas are invalid in this case + if (ec_has_zero_coordinate(P) | ec_has_zero_coordinate(Q) | ec_has_zero_coordinate(PQ)) + return 0; + + // Derive sigma according to parity + bitk0 = (k[0] & 1); + bitl0 = (l[0] & 1); + maskk = 0 - bitk0; // Parity masks: 0 if even, otherwise 1...1 + maskl = 0 - bitl0; + sigma[0] = (bitk0 ^ 1); + sigma[1] = (bitl0 ^ 1); + evens = sigma[0] + sigma[1]; // Count number of even scalars + mevens = 0 - (evens & 1); // Mask mevens <- 0 if # even of scalars = 0 or 2, otherwise mevens = 1...1 + + // If k and l are both even or both odd, pick sigma = (0,1) + sigma[0] = (sigma[0] & mevens); + sigma[1] = (sigma[1] & mevens) | (1 & ~mevens); + + // Convert even scalars to odd + one[0] = 1; + mp_sub(k_t, k, one, NWORDS_ORDER); + mp_sub(l_t, l, one, NWORDS_ORDER); + select_ct(k_t, k_t, k, maskk, NWORDS_ORDER); + select_ct(l_t, l_t, l, maskl, NWORDS_ORDER); + + // Scalar recoding + for (i = 0; i < kbits; i++) { + // If sigma[0] = 1 swap k_t and l_t + maskk = 0 - (sigma[0] ^ pre_sigma); + swap_ct(k_t, l_t, maskk, NWORDS_ORDER); + + if (i == kbits - 1) { + bs1_ip1 = 0; + bs2_ip1 = 0; + } else { + bs1_ip1 = mp_shiftr(k_t, 1, NWORDS_ORDER); + bs2_ip1 = mp_shiftr(l_t, 1, NWORDS_ORDER); + } + bs1_i = k_t[0] & 1; + bs2_i = l_t[0] & 1; + + r[2 * i] = bs1_i ^ bs1_ip1; + r[2 * i + 1] = bs2_i ^ bs2_ip1; + + // Revert sigma if second bit, r_(2i+1), is 1 + pre_sigma = sigma[0]; + maskk = 0 - r[2 * i + 1]; + select_ct(&temp, &sigma[0], &sigma[1], maskk, 1); + select_ct(&sigma[1], &sigma[1], &sigma[0], maskk, 1); + sigma[0] = temp; + } + + // Point initialization + ec_point_init(&R[0]); + maskk = 0 - sigma[0]; + select_point(&R[1], P, Q, maskk); + select_point(&R[2], Q, P, maskk); + + fp2_copy(&DIFF1a.x, &R[1].x); + fp2_copy(&DIFF1a.z, &R[1].z); + fp2_copy(&DIFF1b.x, &R[2].x); + fp2_copy(&DIFF1b.z, &R[2].z); + + // Initialize DIFF2a <- P+Q, DIFF2b <- P-Q + xADD(&R[2], &R[1], &R[2], PQ); + if (ec_has_zero_coordinate(&R[2])) + return 0; // non valid formulas + + fp2_copy(&DIFF2a.x, &R[2].x); + fp2_copy(&DIFF2a.z, &R[2].z); + fp2_copy(&DIFF2b.x, &PQ->x); + fp2_copy(&DIFF2b.z, &PQ->z); + + A_is_zero = fp2_is_zero(&curve->A); + + // Main loop + for (i = kbits - 1; i >= 0; i--) { + h = r[2 * i] + r[2 * i + 1]; // in {0, 1, 2} + maskk = 0 - (h & 1); + select_point(&T[0], &R[0], &R[1], maskk); + maskk = 0 - (h >> 1); + select_point(&T[0], &T[0], &R[2], maskk); + if (A_is_zero) { + xDBL_E0(&T[0], &T[0]); + } else { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(&T[0], &T[0], &curve->A24, true); + } + + maskk = 0 - r[2 * i + 1]; // in {0, 1} + select_point(&T[1], &R[0], &R[1], maskk); + select_point(&T[2], &R[1], &R[2], maskk); + + cswap_points(&DIFF1a, &DIFF1b, maskk); + xADD(&T[1], &T[1], &T[2], &DIFF1a); + xADD(&T[2], &R[0], &R[2], &DIFF2a); + + // If hw (mod 2) = 1 then swap DIFF2a and DIFF2b + maskk = 0 - (h & 1); + cswap_points(&DIFF2a, &DIFF2b, maskk); + + // R <- T + copy_point(&R[0], &T[0]); + copy_point(&R[1], &T[1]); + copy_point(&R[2], &T[2]); + } + + // Output R[evens] + select_point(S, &R[0], &R[1], mevens); + + maskk = 0 - (bitk0 & bitl0); + select_point(S, S, &R[2], maskk); + return 1; +} + +int +ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *E) +{ // The 3-point Montgomery ladder + // Input: projective Montgomery points P=(XP:ZP) and Q=(XQ:ZQ) such that xP=XP/ZP and xQ=XQ/ZQ, a scalar k of + // bitlength kbits, the difference PQ=P-Q=(XPQ:ZPQ), and the Montgomery curve constants A24 = (A+2C/4C:1). + // Output: projective Montgomery point R <- P + m*Q = (XR:ZR) such that x(P + m*Q)=XR/ZR. + assert(E->is_A24_computed_and_normalized); + if (!fp2_is_one(&E->A24.z)) { + return 0; + } + // Formulas are not valid in that case + if (ec_has_zero_coordinate(PQ)) { + return 0; + } + + ec_point_t X0, X1, X2; + copy_point(&X0, Q); + copy_point(&X1, P); + copy_point(&X2, PQ); + + int i, j; + digit_t t; + for (i = 0; i < NWORDS_ORDER; i++) { + t = 1; + for (j = 0; j < RADIX; j++) { + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + xDBLADD(&X0, &X1, &X0, &X1, &X2, &E->A24, true); + cswap_points(&X1, &X2, -((t & m[i]) == 0)); + t <<= 1; + }; + }; + copy_point(R, &X1); + return 1; +} + +// WRAPPERS to export + +void +ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve) +{ + // If A24 = ((A+2)/4 : 1) we save multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + } else { + // Otherwise we compute A24 on the fly for doubling + xDBL(res, P, (const ec_point_t *)curve); + } +} + +void +ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve) +{ + if (n == 0) { + copy_point(res, P); + return; + } + + // When the chain is long enough, we should normalise A24 + if (n > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is normalized we can save some multiplications + if (curve->is_A24_computed_and_normalized) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, P, &curve->A24, true); + for (int i = 0; i < n - 1; i++) { + assert(fp2_is_one(&curve->A24.z)); + xDBL_A24(res, res, &curve->A24, true); + } + } else { + // Otherwise we do normal doubling + xDBL(res, P, (const ec_point_t *)curve); + for (int i = 0; i < n - 1; i++) { + xDBL(res, res, (const ec_point_t *)curve); + } + } +} + +void +ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve) +{ + ec_dbl_iter(&res->P, n, &B->P, curve); + ec_dbl_iter(&res->Q, n, &B->Q, curve); + ec_dbl_iter(&res->PmQ, n, &B->PmQ, curve); +} + +void +ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve) +{ + // For large scalars it's worth normalising anyway + if (kbits > 50) { + ec_curve_normalize_A24(curve); + } + + // When A24 is computed and normalized we save some Fp2 multiplications + xMUL(res, P, scalar, kbits, curve); +} + +int +ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + if (fp2_is_zero(&PQ->PmQ.z)) + return 0; + + /* Differential additions behave badly when PmQ = (0:1), so we need to + * treat this case specifically. Since we assume P, Q are a basis, this + * can happen only if kbits==1 */ + if (kbits == 1) { + // Sanity check: our basis should be given by 2-torsion points + if (!ec_is_two_torsion(&PQ->P, curve) || !ec_is_two_torsion(&PQ->Q, curve) || + !ec_is_two_torsion(&PQ->PmQ, curve)) + return 0; + digit_t bP, bQ; + bP = (scalarP[0] & 1); + bQ = (scalarQ[0] & 1); + if (bP == 0 && bQ == 0) + ec_point_init(res); //(1: 0) + else if (bP == 1 && bQ == 0) + copy_point(res, &PQ->P); + else if (bP == 0 && bQ == 1) + copy_point(res, &PQ->Q); + else if (bP == 1 && bQ == 1) + copy_point(res, &PQ->PmQ); + else // should never happen + assert(0); + return 1; + } else { + ec_curve_t E; + copy_curve(&E, curve); + + if (!fp2_is_zero(&curve->A)) { // If A is not zero normalize + ec_curve_normalize_A24(&E); + } + return xDBLMUL(res, &PQ->P, scalarP, &PQ->Q, scalarQ, &PQ->PmQ, kbits, (const ec_curve_t *)&E); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h new file mode 100644 index 0000000000..ee2be38060 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h @@ -0,0 +1,668 @@ +/** @file + * + * @authors Luca De Feo, Francisco RH + * + * @brief Elliptic curve stuff + */ + +#ifndef EC_H +#define EC_H +#include +#include +#include +#include +#include + +/** @defgroup ec Elliptic curves + * @{ + */ + +/** @defgroup ec_t Data structures + * @{ + */ + +/** @brief Projective point on the Kummer line E/pm 1 in Montgomery coordinates + * + * @typedef ec_point_t + * + * @struct ec_point_t + * + * A projective point in (X:Z) or (X:Y:Z) coordinates (tbd). + */ +typedef struct ec_point_t +{ + fp2_t x; + fp2_t z; +} ec_point_t; + +/** @brief Projective point in Montgomery coordinates + * + * @typedef jac_point_t + * + * @struct jac_point_t + * + * A projective point in (X:Y:Z) coordinates + */ +typedef struct jac_point_t +{ + fp2_t x; + fp2_t y; + fp2_t z; +} jac_point_t; + +/** @brief Addition components + * + * @typedef add_components_t + * + * @struct add_components_t + * + * 3 components u,v,w that define the (X:Z) coordinates of both + * addition and substraction of two distinct points with + * P+Q =(u-v:w) and P-Q = (u+v=w) + */ +typedef struct add_components_t +{ + fp2_t u; + fp2_t v; + fp2_t w; +} add_components_t; + +/** @brief A basis of a torsion subgroup + * + * @typedef ec_basis_t + * + * @struct ec_basis_t + * + * A pair of points (or a triplet, tbd) forming a basis of a torsion subgroup. + */ +typedef struct ec_basis_t +{ + ec_point_t P; + ec_point_t Q; + ec_point_t PmQ; +} ec_basis_t; + +/** @brief An elliptic curve + * + * @typedef ec_curve_t + * + * @struct ec_curve_t + * + * An elliptic curve in projective Montgomery form + */ +typedef struct ec_curve_t +{ + fp2_t A; + fp2_t C; ///< cannot be 0 + ec_point_t A24; // the point (A+2 : 4C) + bool is_A24_computed_and_normalized; // says if A24 has been computed and normalized +} ec_curve_t; + +/** @brief An isogeny of degree a power of 2 + * + * @typedef ec_isog_even_t + * + * @struct ec_isog_even_t + */ +typedef struct ec_isog_even_t +{ + ec_curve_t curve; ///< The domain curve + ec_point_t kernel; ///< A kernel generator + unsigned length; ///< The length as a 2-isogeny walk +} ec_isog_even_t; + +/** @brief Isomorphism of Montgomery curves + * + * @typedef ec_isom_t + * + * @struct ec_isom_t + * + * The isomorphism is given by the map maps (X:Z) ↦ ( (Nx X + Nz Z) : (D Z) ) + */ +typedef struct ec_isom_t +{ + fp2_t Nx; + fp2_t Nz; + fp2_t D; +} ec_isom_t; + +// end ec_t +/** @} + */ + +/** @defgroup ec_curve_t Curves and isomorphisms + * @{ + */ + +// Initalisation for curves and points +void ec_curve_init(ec_curve_t *E); +void ec_point_init(ec_point_t *P); + +/** + * @brief Verify that a Montgomery coefficient is valid + * + * @param A an fp2_t + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_verify_A(const fp2_t *A); + +/** + * @brief Initialize an elliptic curve from a coefficient + * + * @param A an fp2_t + * @param E the elliptic curve to initialize + * + * @return 0 if curve is invalid, 1 otherwise + */ +int ec_curve_init_from_A(ec_curve_t *E, const fp2_t *A); + +// Copying points, bases and curves +static inline void +copy_point(ec_point_t *P, const ec_point_t *Q) +{ + fp2_copy(&P->x, &Q->x); + fp2_copy(&P->z, &Q->z); +} + +static inline void +copy_basis(ec_basis_t *B1, const ec_basis_t *B0) +{ + copy_point(&B1->P, &B0->P); + copy_point(&B1->Q, &B0->Q); + copy_point(&B1->PmQ, &B0->PmQ); +} + +static inline void +copy_curve(ec_curve_t *E1, const ec_curve_t *E2) +{ + fp2_copy(&(E1->A), &(E2->A)); + fp2_copy(&(E1->C), &(E2->C)); + E1->is_A24_computed_and_normalized = E2->is_A24_computed_and_normalized; + copy_point(&E1->A24, &E2->A24); +} + +// Functions for working with the A24 point and normalisation + +/** + * @brief Reduce (A : C) to (A/C : 1) in place + * + * @param E a curve + */ +void ec_normalize_curve(ec_curve_t *E); + +/** + * @brief Reduce (A + 2 : 4C) to ((A+2)/4C : 1) in place + * + * @param E a curve + */ +void ec_curve_normalize_A24(ec_curve_t *E); + +/** + * @brief Normalise both (A : C) and (A + 2 : 4C) as above, in place + * + * @param E a curve + */ +void ec_normalize_curve_and_A24(ec_curve_t *E); + +/** + * @brief Given a curve E, compute (A+2 : 4C) + * + * @param A24 the value (A+2 : 4C) to return into + * @param E a curve + */ +static inline void +AC_to_A24(ec_point_t *A24, const ec_curve_t *E) +{ + // Maybe we already have this computed + if (E->is_A24_computed_and_normalized) { + copy_point(A24, &E->A24); + return; + } + + // A24 = (A+2C : 4C) + fp2_add(&A24->z, &E->C, &E->C); + fp2_add(&A24->x, &E->A, &A24->z); + fp2_add(&A24->z, &A24->z, &A24->z); +} + +/** + * @brief Given a curve the point (A+2 : 4C) compute the curve coefficients (A : C) + * + * @param E a curve to compute + * @param A24 the value (A+2 : 4C) + */ +static inline void +A24_to_AC(ec_curve_t *E, const ec_point_t *A24) +{ + // (A:C) = ((A+2C)*2-4C : 4C) + fp2_add(&E->A, &A24->x, &A24->x); + fp2_sub(&E->A, &E->A, &A24->z); + fp2_add(&E->A, &E->A, &E->A); + fp2_copy(&E->C, &A24->z); +} + +/** + * @brief j-invariant. + * + * @param j_inv computed j_invariant + * @param curve input curve + */ +void ec_j_inv(fp2_t *j_inv, const ec_curve_t *curve); + +/** + * @brief Isomorphism of elliptic curve + * Takes as input two isomorphic Kummer lines in Montgomery form, and output an isomorphism between + * them + * + * @param isom computed isomorphism + * @param from domain curve + * @param to image curve + * @return 0xFFFFFFFF if there was an error during the computation, zero otherwise + */ +uint32_t ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to); + +/** + * @brief In-place evaluation of an isomorphism + * + * @param P a point + * @param isom an isomorphism + */ +void ec_iso_eval(ec_point_t *P, ec_isom_t *isom); + +/** @} + */ +/** @defgroup ec_point_t Point operations + * @{ + */ + +/** + * @brief Point equality + * + * @param P a point + * @param Q a point + * @return 0xFFFFFFFF if equal, zero otherwise + */ +uint32_t ec_is_equal(const ec_point_t *P, const ec_point_t *Q); + +/** + * @brief Point equality + * + * @param P a point + * @return 0xFFFFFFFF if point at infinity, zero otherwise + */ +uint32_t ec_is_zero(const ec_point_t *P); + +/** + * @brief Two torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_two_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Four torsion test + * + * @param P a point + * @param E the elliptic curve + * @return 0xFFFFFFFF if P is 2-torsion but not zero, zero otherwise + */ +uint32_t ec_is_four_torsion(const ec_point_t *P, const ec_curve_t *E); + +/** + * @brief Reduce Z-coordinate of point in place + * + * @param P a point + */ +void ec_normalize_point(ec_point_t *P); + +void xDBL_E0(ec_point_t *Q, const ec_point_t *P); +void xADD(ec_point_t *R, const ec_point_t *P, const ec_point_t *Q, const ec_point_t *PQ); +void xDBL_A24(ec_point_t *Q, const ec_point_t *P, const ec_point_t *A24, const bool A24_normalized); + +/** + * @brief Point doubling + * + * @param res computed double of P + * @param P a point + * @param curve an elliptic curve + */ +void ec_dbl(ec_point_t *res, const ec_point_t *P, const ec_curve_t *curve); + +/** + * @brief Point iterated doubling + * + * @param res computed double of P + * @param P a point + * @param n the number of double + * @param curve the curve on which P lays + */ +void ec_dbl_iter(ec_point_t *res, int n, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Iterated doubling for a basis P, Q, PmQ + * + * @param res the computed iterated double of basis B + * @param n the number of doubles + * @param B the basis to double + * @param curve the parent curve of the basis + */ +void ec_dbl_iter_basis(ec_basis_t *res, int n, const ec_basis_t *B, ec_curve_t *curve); + +/** + * @brief Point multiplication + * + * @param res computed scalar * P + * @param curve the curve + * @param scalar an unsigned multi-precision integer + * @param P a point + * @param kbits numer of bits of the scalar + */ +void ec_mul(ec_point_t *res, const digit_t *scalar, const int kbits, const ec_point_t *P, ec_curve_t *curve); + +/** + * @brief Combination P+m*Q + * + * @param R computed P + m * Q + * @param curve the curve + * @param m an unsigned multi-precision integer + * @param P a point + * @param Q a point + * @param PQ the difference P-Q + * @return 0 if there was an error, 1 otherwise + */ +int ec_ladder3pt(ec_point_t *R, + const digit_t *m, + const ec_point_t *P, + const ec_point_t *Q, + const ec_point_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Linear combination of points of a basis + * + * @param res computed scalarP * P + scalarQ * Q + * @param scalarP an unsigned multi-precision integer + * @param scalarQ an unsigned multi-precision integer + * @param kbits number of bits of the scalars, or n for points of order 2^n + * @param PQ a torsion basis consisting of points P and Q + * @param curve the curve + * + * @return 0 if there was an error, 1 otherwise + */ +int ec_biscalar_mul(ec_point_t *res, + const digit_t *scalarP, + const digit_t *scalarQ, + const int kbits, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +// end point computations +/** + * @} + */ + +/** @defgroup ec_dlog_t Torsion basis computations + * @{ + */ + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve along with a hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * + * @return A hint + * + * The algorithm is deterministc + */ +uint8_t ec_curve_to_basis_2f_to_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f); + +/** + * @brief Generate a 2^f-torsion basis from a Montgomery curve and a given hint + * + * @param PQ2 an ec_basis_t + * @param curve an ec_curve_t + * @param f an integer + * @param hint the hint + * + * @return 1 is the basis is valid, 0 otherwise + * + * The algorithm is deterministc + */ +int ec_curve_to_basis_2f_from_hint(ec_basis_t *PQ2, ec_curve_t *curve, int f, const uint8_t hint); +/** // end basis computations + * @} + */ + +/** @defgroup ec_isog_t Isogenies + * @{ + */ + +/** + * @brief Evaluate isogeny of even degree on list of points. + * Returns 0 if successful and -1 if kernel has the wrong order or includes (0:1). + * + * @param image computed image curve + * @param phi isogeny + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points); + +/** + * @brief Multiplicative strategy for a short isogeny chain. Returns 1 if successfull and -1 + * if kernel has the wrong order or includes (0:1) when special=false. + * + * @param curve domain curve, to be overwritten by the codomain curve. + * @param kernel a kernel generator of order 2^len + * @param len the length of t he 2-isogeny chain + * @param points a list of points to evaluate the isogeny on, modified in place + * @param len_points length of the list points + * @param special if true, allow isogenies with (0:1) in the kernel + * + * @return 0 if there was no error, 0xFFFFFFFF otherwise + */ +uint32_t ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special); + +/** + * @brief Recover Y-coordinate from X-coordinate and curve coefficients. + * + * @param y: a y-coordinate + * @param Px: a x-coordinate + * @param curve: the elliptic curve + * + * @return 0xFFFFFFFF if the point was on the curve, 0 otherwise + */ +uint32_t ec_recover_y(fp2_t *y, const fp2_t *Px, const ec_curve_t *curve); + +// Jacobian point init and copying +void jac_init(jac_point_t *P); +void copy_jac_point(jac_point_t *P, const jac_point_t *Q); + +/** + * @brief Test if two Jacobian points are equal + * + * @param P: a point + * @param Q: a point + * + * @return 0xFFFFFFFF if they are equal, 0 otherwise + */ +uint32_t jac_is_equal(const jac_point_t *P, const jac_point_t *Q); + +// Convert from Jacobian to x-only (just drop the Y-coordinate) +void jac_to_xz(ec_point_t *P, const jac_point_t *xyP); +// Convert from Jacobian coordinates in Montgomery model to Weierstrass +void jac_to_ws(jac_point_t *P, fp2_t *t, fp2_t *ao3, const jac_point_t *Q, const ec_curve_t *curve); +void jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve); + +// Jacobian arithmetic +void jac_neg(jac_point_t *Q, const jac_point_t *P); +void ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); +void DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC); +void DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t); +void jac_to_xz_add_components(add_components_t *uvw, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + * + * + * Lifts a basis x(P), x(Q), x(P-Q) assuming the curve has (A/C : 1) and + * the point P = (X/Z : 1). For generic implementation see lift_basis() + */ +uint32_t lift_basis_normalized(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Given a basis in x-only, lift to a pair of Jacobian points + * + * @param P: a point + * @param Q: a point + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if there was no error, 0 otherwise + */ +uint32_t lift_basis(jac_point_t *P, jac_point_t *Q, ec_basis_t *B, ec_curve_t *E); + +/** + * @brief Check if basis points (P, Q) form a full 4-basis + * + * @param B: a basis + * @param E: an elliptic curve + * + * @return 0xFFFFFFFF if they form a basis, 0 otherwise + */ +uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); + +/* + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Test functions for printing and order checking, only used in debug mode + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + */ + +/** + * @brief Check if a point (X : Z) has order exactly 2^t + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) +{ + ec_point_t test; + ec_curve_t curve; + test = *P; + copy_curve(&curve, E); + + if (ec_is_zero(&test)) + return 0; + // Scale point by 2^(t-1) + ec_dbl_iter(&test, t - 1, &test, &curve); + // If it's zero now, it doesnt have order 2^t + if (ec_is_zero(&test)) + return 0; + // Ensure [2^t] P = 0 + ec_dbl(&test, &test, &curve); + return ec_is_zero(&test); +} + +/** + * @brief Check if basis points (P, Q, PmQ) all have order exactly 2^t + * + * @param B: a basis + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) +{ + int check_P = test_point_order_twof(&B->P, E, t); + int check_Q = test_point_order_twof(&B->Q, E, t); + int check_PmQ = test_point_order_twof(&B->PmQ, E, t); + + return check_P & check_Q & check_PmQ; +} + +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} + +// Prints the x-coordinate of the point (X : 1) +static void +ec_point_print(const char *name, ec_point_t P) +{ + fp2_t a; + if (fp2_is_zero(&P.z)) { + printf("%s = INF\n", name); + } else { + fp2_copy(&a, &P.z); + fp2_inv(&a); + fp2_mul(&a, &a, &P.x); + fp2_print(name, &a); + } +} + +// Prints the Montgomery coefficient A +static void +ec_curve_print(const char *name, ec_curve_t E) +{ + fp2_t a; + fp2_copy(&a, &E.C); + fp2_inv(&a); + fp2_mul(&a, &a, &E.A); + fp2_print(name, &a); +} + +#endif +// end isogeny computations +/** + * @} + */ + +// end ec +/** + * @} + */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_jac.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_jac.c new file mode 100644 index 0000000000..20ca68c9b2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_jac.c @@ -0,0 +1,335 @@ +#include +#include + +void +jac_init(jac_point_t *P) +{ // Initialize Montgomery in Jacobian coordinates as identity element (0:1:0) + fp2_set_zero(&P->x); + fp2_set_one(&P->y); + fp2_set_zero(&P->z); +} + +uint32_t +jac_is_equal(const jac_point_t *P, const jac_point_t *Q) +{ // Evaluate if two points in Jacobian coordinates (X:Y:Z) are equal + // Returns 1 (true) if P=Q, 0 (false) otherwise + fp2_t t0, t1, t2, t3; + + fp2_sqr(&t0, &Q->z); + fp2_mul(&t2, &P->x, &t0); // x1*z2^2 + fp2_sqr(&t1, &P->z); + fp2_mul(&t3, &Q->x, &t1); // x2*z1^2 + fp2_sub(&t2, &t2, &t3); + + fp2_mul(&t0, &t0, &Q->z); + fp2_mul(&t0, &P->y, &t0); // y1*z2^3 + fp2_mul(&t1, &t1, &P->z); + fp2_mul(&t1, &Q->y, &t1); // y2*z1^3 + fp2_sub(&t0, &t0, &t1); + + return fp2_is_zero(&t0) & fp2_is_zero(&t2); +} + +void +jac_to_xz(ec_point_t *P, const jac_point_t *xyP) +{ + fp2_copy(&P->x, &xyP->x); + fp2_copy(&P->z, &xyP->z); + fp2_sqr(&P->z, &P->z); + + // If xyP = (0:1:0), we currently have P=(0 : 0) but we want to set P=(1:0) + uint32_t c1, c2; + fp2_t one; + fp2_set_one(&one); + + c1 = fp2_is_zero(&P->x); + c2 = fp2_is_zero(&P->z); + fp2_select(&P->x, &P->x, &one, c1 & c2); +} + +void +jac_to_ws(jac_point_t *Q, fp2_t *t, fp2_t *ao3, const jac_point_t *P, const ec_curve_t *curve) +{ + // Cost of 3M + 2S when A != 0. + fp_t one; + fp2_t a; + /* a = 1 - A^2/3, U = X + (A*Z^2)/3, V = Y, W = Z, T = a*Z^4*/ + fp_set_one(&one); + if (!fp2_is_zero(&(curve->A))) { + fp_div3(&(ao3->re), &(curve->A.re)); + fp_div3(&(ao3->im), &(curve->A.im)); + fp2_sqr(t, &P->z); + fp2_mul(&Q->x, ao3, t); + fp2_add(&Q->x, &Q->x, &P->x); + fp2_sqr(t, t); + fp2_mul(&a, ao3, &(curve->A)); + fp_sub(&(a.re), &one, &(a.re)); + fp_neg(&(a.im), &(a.im)); + fp2_mul(t, t, &a); + } else { + fp2_copy(&Q->x, &P->x); + fp2_sqr(t, &P->z); + fp2_sqr(t, t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +jac_from_ws(jac_point_t *Q, const jac_point_t *P, const fp2_t *ao3, const ec_curve_t *curve) +{ + // Cost of 1M + 1S when A != 0. + fp2_t t; + /* X = U - (A*W^2)/3, Y = V, Z = W. */ + if (!fp2_is_zero(&(curve->A))) { + fp2_sqr(&t, &P->z); + fp2_mul(&t, &t, ao3); + fp2_sub(&Q->x, &P->x, &t); + } + fp2_copy(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +copy_jac_point(jac_point_t *P, const jac_point_t *Q) +{ + fp2_copy(&(P->x), &(Q->x)); + fp2_copy(&(P->y), &(Q->y)); + fp2_copy(&(P->z), &(Q->z)); +} + +void +jac_neg(jac_point_t *Q, const jac_point_t *P) +{ + fp2_copy(&Q->x, &P->x); + fp2_neg(&Q->y, &P->y); + fp2_copy(&Q->z, &P->z); +} + +void +DBL(jac_point_t *Q, const jac_point_t *P, const ec_curve_t *AC) +{ // Cost of 6M + 6S. + // Doubling on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding to + // (X/Z^2,Y/Z^3) This version receives the coefficient value A + fp2_t t0, t1, t2, t3; + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // t0 = 3x1^2 + fp2_sqr(&t1, &P->z); // t1 = z1^2 + fp2_mul(&t2, &P->x, &AC->A); + fp2_add(&t2, &t2, &t2); // t2 = 2Ax1 + fp2_add(&t2, &t1, &t2); // t2 = 2Ax1+z1^2 + fp2_mul(&t2, &t1, &t2); // t2 = z1^2(2Ax1+z1^2) + fp2_add(&t2, &t0, &t2); // t2 = alpha = 3x1^2 + z1^2(2Ax1+z1^2) + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); // z2 = 2y1z1 + fp2_sqr(&t0, &Q->z); + fp2_mul(&t0, &t0, &AC->A); // t0 = 4Ay1^2z1^2 + fp2_sqr(&t1, &P->y); + fp2_add(&t1, &t1, &t1); // t1 = 2y1^2 + fp2_add(&t3, &P->x, &P->x); // t3 = 2x1 + fp2_mul(&t3, &t1, &t3); // t3 = 4x1y1^2 + fp2_sqr(&Q->x, &t2); // x2 = alpha^2 + fp2_sub(&Q->x, &Q->x, &t0); // x2 = alpha^2 - 4Ay1^2z1^2 + fp2_sub(&Q->x, &Q->x, &t3); + fp2_sub(&Q->x, &Q->x, &t3); // x2 = alpha^2 - 4Ay1^2z1^2 - 8x1y1^2 + fp2_sub(&Q->y, &t3, &Q->x); // y2 = 4x1y1^2 - x2 + fp2_mul(&Q->y, &Q->y, &t2); // y2 = alpha(4x1y1^2 - x2) + fp2_sqr(&t1, &t1); // t1 = 4y1^4 + fp2_sub(&Q->y, &Q->y, &t1); + fp2_sub(&Q->y, &Q->y, &t1); // y2 = alpha(4x1y1^2 - x2) - 8y1^4 + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +DBLW(jac_point_t *Q, fp2_t *u, const jac_point_t *P, const fp2_t *t) +{ // Cost of 3M + 5S. + // Doubling on a Weierstrass curve, representation in modified Jacobian coordinates + // (X:Y:Z:T=a*Z^4) corresponding to (X/Z^2,Y/Z^3), where a is the curve coefficient. + // Formula from https://hyperelliptic.org/EFD/g1p/auto-shortw-modified.html + + uint32_t flag = fp2_is_zero(&P->x) & fp2_is_zero(&P->z); + + fp2_t xx, c, cc, r, s, m; + // XX = X^2 + fp2_sqr(&xx, &P->x); + // A = 2*Y^2 + fp2_sqr(&c, &P->y); + fp2_add(&c, &c, &c); + // AA = A^2 + fp2_sqr(&cc, &c); + // R = 2*AA + fp2_add(&r, &cc, &cc); + // S = (X+A)^2-XX-AA + fp2_add(&s, &P->x, &c); + fp2_sqr(&s, &s); + fp2_sub(&s, &s, &xx); + fp2_sub(&s, &s, &cc); + // M = 3*XX+T1 + fp2_add(&m, &xx, &xx); + fp2_add(&m, &m, &xx); + fp2_add(&m, &m, t); + // X3 = M^2-2*S + fp2_sqr(&Q->x, &m); + fp2_sub(&Q->x, &Q->x, &s); + fp2_sub(&Q->x, &Q->x, &s); + // Z3 = 2*Y*Z + fp2_mul(&Q->z, &P->y, &P->z); + fp2_add(&Q->z, &Q->z, &Q->z); + // Y3 = M*(S-X3)-R + fp2_sub(&Q->y, &s, &Q->x); + fp2_mul(&Q->y, &Q->y, &m); + fp2_sub(&Q->y, &Q->y, &r); + // T3 = 2*R*T1 + fp2_mul(u, t, &r); + fp2_add(u, u, u); + + fp2_select(&Q->x, &Q->x, &P->x, -flag); + fp2_select(&Q->z, &Q->z, &P->z, -flag); +} + +void +select_jac_point(jac_point_t *Q, const jac_point_t *P1, const jac_point_t *P2, const digit_t option) +{ // Select points + // If option = 0 then Q <- P1, else if option = 0xFF...FF then Q <- P2 + fp2_select(&(Q->x), &(P1->x), &(P2->x), option); + fp2_select(&(Q->y), &(P1->y), &(P2->y), option); + fp2_select(&(Q->z), &(P1->z), &(P2->z), option); +} + +void +ADD(jac_point_t *R, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Addition on a Montgomery curve, representation in Jacobian coordinates (X:Y:Z) corresponding + // to (x,y) = (X/Z^2,Y/Z^3) This version receives the coefficient value A + // + // Complete routine, to handle all edge cases: + // if ZP == 0: # P == inf + // return Q + // if ZQ == 0: # Q == inf + // return P + // dy <- YQ*ZP**3 - YP*ZQ**3 + // dx <- XQ*ZP**2 - XP*ZQ**2 + // if dx == 0: # x1 == x2 + // if dy == 0: # ... and y1 == y2: doubling case + // dy <- ZP*ZQ * (3*XP^2 + ZP^2 * (2*A*XP + ZP^2)) + // dx <- 2*YP*ZP + // else: # ... but y1 != y2, thus P = -Q + // return inf + // XR <- dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) + // YR <- dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3 + // ZR <- dx * ZP * ZQ + + // Constant time processing: + // - The case for P == 0 or Q == 0 is handled at the end with conditional select + // - dy and dx are computed for both the normal and doubling cases, we switch when + // dx == dy == 0 for the normal case. + // - If we have that P = -Q then dx = 0 and so ZR will be zero, giving us the point + // at infinity for "free". + // + // These current formula are expensive and I'm probably missing some tricks... + // Thought I'd get the ball rolling. + // Cost 17M + 6S + 13a + fp2_t t0, t1, t2, t3, u1, u2, v1, dx, dy; + + /* If P is zero or Q is zero we will conditionally swap before returning. */ + uint32_t ctl1 = fp2_is_zero(&P->z); + uint32_t ctl2 = fp2_is_zero(&Q->z); + + /* Precompute some values */ + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + + /* Compute dy and dx for ordinary case */ + fp2_mul(&v1, &t1, &Q->z); // v1 = z2^3 + fp2_mul(&t2, &t0, &P->z); // t2 = z1^3 + fp2_mul(&v1, &v1, &P->y); // v1 = y1z2^3 + fp2_mul(&t2, &t2, &Q->y); // t2 = y2z1^3 + fp2_sub(&dy, &t2, &v1); // dy = y2z1^3 - y1z2^3 + fp2_mul(&u2, &t0, &Q->x); // u2 = x2z1^2 + fp2_mul(&u1, &t1, &P->x); // u1 = x1z2^2 + fp2_sub(&dx, &u2, &u1); // dx = x2z1^2 - x1z2^2 + + /* Compute dy and dx for doubling case */ + fp2_add(&t1, &P->y, &P->y); // dx_dbl = t1 = 2y1 + fp2_add(&t2, &AC->A, &AC->A); // t2 = 2A + fp2_mul(&t2, &t2, &P->x); // t2 = 2Ax1 + fp2_add(&t2, &t2, &t0); // t2 = 2Ax1 + z1^2 + fp2_mul(&t2, &t2, &t0); // t2 = z1^2 * (2Ax1 + z1^2) + fp2_sqr(&t0, &P->x); // t0 = x1^2 + fp2_add(&t2, &t2, &t0); // t2 = x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 2*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_add(&t2, &t2, &t0); // t2 = 3*x1^2 + z1^2 * (2Ax1 + z1^2) + fp2_mul(&t2, &t2, &Q->z); // dy_dbl = t2 = z2 * (3*x1^2 + z1^2 * (2Ax1 + z1^2)) + + /* If dx is zero and dy is zero swap with double variables */ + uint32_t ctl = fp2_is_zero(&dx) & fp2_is_zero(&dy); + fp2_select(&dx, &dx, &t1, ctl); + fp2_select(&dy, &dy, &t2, ctl); + + /* Some more precomputations */ + fp2_mul(&t0, &P->z, &Q->z); // t0 = z1z2 + fp2_sqr(&t1, &t0); // t1 = z1z2^2 + fp2_sqr(&t2, &dx); // t2 = dx^2 + fp2_sqr(&t3, &dy); // t3 = dy^2 + + /* Compute x3 = dy**2 - dx**2 * (A*ZP^2*ZQ^2 + XP*ZQ^2 + XQ*ZP^2) */ + fp2_mul(&R->x, &AC->A, &t1); // x3 = A*(z1z2)^2 + fp2_add(&R->x, &R->x, &u1); // x3 = A*(z1z2)^2 + u1 + fp2_add(&R->x, &R->x, &u2); // x3 = A*(z1z2)^2 + u1 + u2 + fp2_mul(&R->x, &R->x, &t2); // x3 = dx^2 * (A*(z1z2)^2 + u1 + u2) + fp2_sub(&R->x, &t3, &R->x); // x3 = dy^2 - dx^2 * (A*(z1z2)^2 + u1 + u2) + + /* Compute y3 = dy * (XP*ZQ^2 * dx^2 - XR) - YP*ZQ^3 * dx^3*/ + fp2_mul(&R->y, &u1, &t2); // y3 = u1 * dx^2 + fp2_sub(&R->y, &R->y, &R->x); // y3 = u1 * dx^2 - x3 + fp2_mul(&R->y, &R->y, &dy); // y3 = dy * (u1 * dx^2 - x3) + fp2_mul(&t3, &t2, &dx); // t3 = dx^3 + fp2_mul(&t3, &t3, &v1); // t3 = v1 * dx^3 + fp2_sub(&R->y, &R->y, &t3); // y3 = dy * (u1 * dx^2 - x3) - v1 * dx^3 + + /* Compute z3 = dx * z1 * z2 */ + fp2_mul(&R->z, &dx, &t0); + + /* Finally, we need to set R = P is Q.Z = 0 and R = Q if P.Z = 0 */ + select_jac_point(R, R, Q, ctl1); + select_jac_point(R, R, P, ctl2); +} + +void +jac_to_xz_add_components(add_components_t *add_comp, const jac_point_t *P, const jac_point_t *Q, const ec_curve_t *AC) +{ + // Take P and Q in E distinct, two jac_point_t, return three components u,v and w in Fp2 such + // that the xz coordinates of P+Q are (u-v:w) and of P-Q are (u+v:w) + + fp2_t t0, t1, t2, t3, t4, t5, t6; + + fp2_sqr(&t0, &P->z); // t0 = z1^2 + fp2_sqr(&t1, &Q->z); // t1 = z2^2 + fp2_mul(&t2, &P->x, &t1); // t2 = x1z2^2 + fp2_mul(&t3, &t0, &Q->x); // t3 = z1^2x2 + fp2_mul(&t4, &P->y, &Q->z); // t4 = y1z2 + fp2_mul(&t4, &t4, &t1); // t4 = y1z2^3 + fp2_mul(&t5, &P->z, &Q->y); // t5 = z1y2 + fp2_mul(&t5, &t5, &t0); // t5 = z1^3y2 + fp2_mul(&t0, &t0, &t1); // t0 = (z1z2)^2 + fp2_mul(&t6, &t4, &t5); // t6 = (z1z_2)^3y1y2 + fp2_add(&add_comp->v, &t6, &t6); // v = 2(z1z_2)^3y1y2 + fp2_sqr(&t4, &t4); // t4 = y1^2z2^6 + fp2_sqr(&t5, &t5); // t5 = z1^6y_2^2 + fp2_add(&t4, &t4, &t5); // t4 = z1^6y_2^2 + y1^2z2^6 + fp2_add(&t5, &t2, &t3); // t5 = x1z2^2 +z_1^2x2 + fp2_add(&t6, &t3, &t3); // t6 = 2z_1^2x2 + fp2_sub(&t6, &t5, &t6); // t6 = lambda = x1z2^2 - z_1^2x2 + fp2_sqr(&t6, &t6); // t6 = lambda^2 = (x1z2^2 - z_1^2x2)^2 + fp2_mul(&t1, &AC->A, &t0); // t1 = A*(z1z2)^2 + fp2_add(&t1, &t5, &t1); // t1 = gamma =A*(z1z2)^2 + x1z2^2 +z_1^2x2 + fp2_mul(&t1, &t1, &t6); // t1 = gamma*lambda^2 + fp2_sub(&add_comp->u, &t4, &t1); // u = z1^6y_2^2 + y1^2z2^6 - gamma*lambda^2 + fp2_mul(&add_comp->w, &t6, &t0); // w = (z1z2)^2(lambda)^2 +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_params.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_params.c new file mode 100644 index 0000000000..d2aa074b7f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_params.c @@ -0,0 +1,4 @@ +#include +// p+1 divided by the power of 2 +const digit_t p_cofactor_for_2f[1] = {27}; + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_params.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_params.h new file mode 100644 index 0000000000..9f2aca3be7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec_params.h @@ -0,0 +1,12 @@ +#ifndef EC_PARAMS_H +#define EC_PARAMS_H + +#include + +#define TORSION_EVEN_POWER 500 + +// p+1 divided by the power of 2 +extern const digit_t p_cofactor_for_2f[1]; +#define P_COFACTOR_FOR_2F_BITLENGTH 5 + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_signature.c new file mode 100644 index 0000000000..112c695941 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_signature.c @@ -0,0 +1,208 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// ibz_t + +static byte_t * +ibz_to_bytes(byte_t *enc, const ibz_t *x, size_t nbytes, bool sgn) +{ +#ifndef NDEBUG + { + // make sure there is enough space + ibz_t abs, bnd; + ibz_init(&bnd); + ibz_init(&abs); + ibz_pow(&bnd, &ibz_const_two, 8 * nbytes - sgn); + ibz_abs(&abs, x); + assert(ibz_cmp(&abs, &bnd) < 0); + ibz_finalize(&bnd); + ibz_finalize(&abs); + } +#endif + const size_t digits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + digit_t d[digits]; + memset(d, 0, sizeof(d)); + if (ibz_cmp(x, &ibz_const_zero) >= 0) { + // non-negative, straightforward. + ibz_to_digits(d, x); + } else { + assert(sgn); + // negative; use two's complement. + ibz_t tmp; + ibz_init(&tmp); + ibz_neg(&tmp, x); + ibz_sub(&tmp, &tmp, &ibz_const_one); + ibz_to_digits(d, &tmp); + for (size_t i = 0; i < digits; ++i) + d[i] = ~d[i]; +#ifndef NDEBUG + { + // make sure the result is correct + ibz_t chk; + ibz_init(&chk); + ibz_copy_digit_array(&tmp, d); + ibz_sub(&tmp, &tmp, x); + ibz_pow(&chk, &ibz_const_two, 8 * sizeof(d)); + assert(!ibz_cmp(&tmp, &chk)); + ibz_finalize(&chk); + } +#endif + ibz_finalize(&tmp); + } + encode_digits(enc, d, nbytes); + return enc + nbytes; +} + +static const byte_t * +ibz_from_bytes(ibz_t *x, const byte_t *enc, size_t nbytes, bool sgn) +{ + assert(nbytes > 0); + const size_t ndigits = (nbytes + sizeof(digit_t) - 1) / sizeof(digit_t); + assert(ndigits > 0); + digit_t d[ndigits]; + memset(d, 0, sizeof(d)); + decode_digits(d, enc, nbytes, ndigits); + if (sgn && enc[nbytes - 1] >> 7) { + // negative, decode two's complement + const size_t s = sizeof(digit_t) - 1 - (sizeof(d) - nbytes); + assert(s < sizeof(digit_t)); + d[ndigits - 1] |= ((digit_t)-1) >> 8 * s << 8 * s; + for (size_t i = 0; i < ndigits; ++i) + d[i] = ~d[i]; + ibz_copy_digits(x, d, ndigits); + ibz_add(x, x, &ibz_const_one); + ibz_neg(x, x); + } else { + // non-negative + ibz_copy_digits(x, d, ndigits); + } + return enc + nbytes; +} + +// public API + +void +secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = public_key_to_bytes(enc, pk); + +#ifndef NDEBUG + { + fp2_t lhs, rhs; + fp2_mul(&lhs, &sk->curve.A, &pk->curve.C); + fp2_mul(&rhs, &sk->curve.C, &pk->curve.A); + assert(fp2_is_equal(&lhs, &rhs)); + } +#endif + + enc = ibz_to_bytes(enc, &sk->secret_ideal.norm, FP_ENCODED_BYTES, false); + { + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + int ret UNUSED = quat_lideal_generator(&gen, &sk->secret_ideal, &QUATALG_PINFTY); + assert(ret); + // we skip encoding the denominator since it won't change the generated ideal +#ifndef NDEBUG + { + // let's make sure that the denominator is indeed coprime to the norm of the ideal + ibz_t gcd; + ibz_init(&gcd); + ibz_gcd(&gcd, &gen.denom, &sk->secret_ideal.norm); + assert(!ibz_cmp(&gcd, &ibz_const_one)); + ibz_finalize(&gcd); + } +#endif + enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); +} + +void +secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = public_key_from_bytes(pk, enc); + + { + ibz_t norm; + ibz_init(&norm); + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); + enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); + ibz_finalize(&norm); + quat_alg_elem_finalize(&gen); + } + + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + + assert(enc - start == SECRETKEY_BYTES); + + sk->curve = pk->curve; + ec_curve_to_basis_2f_from_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER, pk->hint_pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_verification.c new file mode 100644 index 0000000000..fecdb9c259 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_verification.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include +#include +#include + +typedef unsigned char byte_t; + +// digits + +static void +encode_digits(byte_t *enc, const digit_t *x, size_t nbytes) +{ +#ifdef TARGET_BIG_ENDIAN + const size_t ndigits = nbytes / sizeof(digit_t); + const size_t rem = nbytes % sizeof(digit_t); + + for (size_t i = 0; i < ndigits; i++) + ((digit_t *)enc)[i] = BSWAP_DIGIT(x[i]); + if (rem) { + digit_t ld = BSWAP_DIGIT(x[ndigits]); + memcpy(enc + ndigits * sizeof(digit_t), (byte_t *)&ld, rem); + } +#else + memcpy(enc, (const byte_t *)x, nbytes); +#endif +} + +static void +decode_digits(digit_t *x, const byte_t *enc, size_t nbytes, size_t ndigits) +{ + assert(nbytes <= ndigits * sizeof(digit_t)); + memcpy((byte_t *)x, enc, nbytes); + memset((byte_t *)x + nbytes, 0, ndigits * sizeof(digit_t) - nbytes); + +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < ndigits; i++) + x[i] = BSWAP_DIGIT(x[i]); +#endif +} + +// fp2_t + +static byte_t * +fp2_to_bytes(byte_t *enc, const fp2_t *x) +{ + fp2_encode(enc, x); + return enc + FP2_ENCODED_BYTES; +} + +static const byte_t * +fp2_from_bytes(fp2_t *x, const byte_t *enc) +{ + fp2_decode(x, enc); + return enc + FP2_ENCODED_BYTES; +} + +// curves and points + +static byte_t * +proj_to_bytes(byte_t *enc, const fp2_t *x, const fp2_t *z) +{ + assert(!fp2_is_zero(z)); + fp2_t tmp = *z; + fp2_inv(&tmp); +#ifndef NDEBUG + { + fp2_t chk; + fp2_mul(&chk, z, &tmp); + fp2_t one; + fp2_set_one(&one); + assert(fp2_is_equal(&chk, &one)); + } +#endif + fp2_mul(&tmp, x, &tmp); + enc = fp2_to_bytes(enc, &tmp); + return enc; +} + +static const byte_t * +proj_from_bytes(fp2_t *x, fp2_t *z, const byte_t *enc) +{ + enc = fp2_from_bytes(x, enc); + fp2_set_one(z); + return enc; +} + +static byte_t * +ec_curve_to_bytes(byte_t *enc, const ec_curve_t *curve) +{ + return proj_to_bytes(enc, &curve->A, &curve->C); +} + +static const byte_t * +ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) +{ + memset(curve, 0, sizeof(*curve)); + return proj_from_bytes(&curve->A, &curve->C, enc); +} + +static byte_t * +ec_point_to_bytes(byte_t *enc, const ec_point_t *point) +{ + return proj_to_bytes(enc, &point->x, &point->z); +} + +static const byte_t * +ec_point_from_bytes(ec_point_t *point, const byte_t *enc) +{ + return proj_from_bytes(&point->x, &point->z, enc); +} + +static byte_t * +ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) +{ + enc = ec_point_to_bytes(enc, &basis->P); + enc = ec_point_to_bytes(enc, &basis->Q); + enc = ec_point_to_bytes(enc, &basis->PmQ); + return enc; +} + +static const byte_t * +ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) +{ + enc = ec_point_from_bytes(&basis->P, enc); + enc = ec_point_from_bytes(&basis->Q, enc); + enc = ec_point_from_bytes(&basis->PmQ, enc); + return enc; +} + +// public API + +byte_t * +public_key_to_bytes(byte_t *enc, const public_key_t *pk) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_to_bytes(enc, &pk->curve); + *enc++ = pk->hint_pk; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +const byte_t * +public_key_from_bytes(public_key_t *pk, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + enc = ec_curve_from_bytes(&pk->curve, enc); + pk->hint_pk = *enc++; + assert(enc - start == PUBLICKEY_BYTES); + return enc; +} + +void +signature_to_bytes(byte_t *enc, const signature_t *sig) +{ +#ifndef NDEBUG + byte_t *const start = enc; +#endif + + enc = fp2_to_bytes(enc, &sig->E_aux_A); + + *enc++ = sig->backtracking; + *enc++ = sig->two_resp_length; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[0][1], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][0], nbytes); + enc += nbytes; + encode_digits(enc, sig->mat_Bchall_can_to_B_chall[1][1], nbytes); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + encode_digits(enc, sig->chall_coeff, nbytes); + enc += nbytes; + + *enc++ = sig->hint_aux; + *enc++ = sig->hint_chall; + + assert(enc - start == SIGNATURE_BYTES); +} + +void +signature_from_bytes(signature_t *sig, const byte_t *enc) +{ +#ifndef NDEBUG + const byte_t *const start = enc; +#endif + + enc = fp2_from_bytes(&sig->E_aux_A, enc); + + sig->backtracking = *enc++; + sig->two_resp_length = *enc++; + + size_t nbytes = (SQIsign_response_length + 9) / 8; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[0][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][0], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + decode_digits(sig->mat_Bchall_can_to_B_chall[1][1], enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + nbytes = SECURITY_BITS / 8; + decode_digits(sig->chall_coeff, enc, nbytes, NWORDS_ORDER); + enc += nbytes; + + sig->hint_aux = *enc++; + sig->hint_chall = *enc++; + + assert(enc - start == SIGNATURE_BYTES); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encoded_sizes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encoded_sizes.h new file mode 100644 index 0000000000..3aafb0d5f7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encoded_sizes.h @@ -0,0 +1,11 @@ +#define SECURITY_BITS 256 +#define SQIsign_response_length 253 +#define HASH_ITERATIONS 512 +#define FP_ENCODED_BYTES 64 +#define FP2_ENCODED_BYTES 128 +#define EC_CURVE_ENCODED_BYTES 128 +#define EC_POINT_ENCODED_BYTES 128 +#define EC_BASIS_ENCODED_BYTES 384 +#define PUBLICKEY_BYTES 129 +#define SECRETKEY_BYTES 701 +#define SIGNATURE_BYTES 292 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c new file mode 100644 index 0000000000..dd089e6f4f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c @@ -0,0 +1,3336 @@ +#include +#include +#include +const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x280} +#elif RADIX == 32 +{0x12f68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x400} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x4b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x170000000000000} +#else +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1300000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314} +#elif RADIX == 32 +{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6} +#else +{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7} +#elif RADIX == 32 +{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5} +#else +{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe} +#elif RADIX == 32 +{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2} +#else +{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330} +#elif RADIX == 32 +{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5} +#else +{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x19da, 0x19cd, 0x19e2, 0x5ea, 0x1079, 0x11ba, 0x1f5e, 0x228, 0x1a45, 0x16ee, 0x18a1, 0x11eb, 0x127a, 0x1d6f, 0x106f, 0x118f, 0x1d0c, 0x1571, 0x1b2d, 0xb60, 0xb27, 0xe1f, 0xe58, 0xe01, 0x4f4, 0x183, 0x13a9, 0x1584, 0x5cb, 0xcce, 0x1ce7, 0x4da, 0x1e62, 0x1213, 0x7fe, 0x1e6, 0x17d, 0x350, 0x3a0} +#elif RADIX == 32 +{0x1ced44bf, 0x159e2ce6, 0xea0f25e, 0x1147d7a3, 0x16eed228, 0xa3d78a1, 0x17f5be4f, 0x10c8c7c1, 0x165b571e, 0x1ac9d6c1, 0x172c387, 0x1064f470, 0x16127521, 0x1667172e, 0x44dae73, 0x1fa427e6, 0xbe8798f, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf25eacf167373b51, 0xbb48a228faf46ea0, 0x7f5be4f51ebc50db, 0xd96d5c7a1918f83, 0x8e0172c387d64eb6, 0x8b975849d4860c9e, 0x484fcc44dae73b33, 0x50d402fa1e63ff} +#else +{0xbd59e2ce6e76a2, 0xa228faf46ea0f2, 0x7a8f5e286ddda4, 0x1e86463e0dfd6f9, 0xfac9d6c1b2dab8, 0x60c9e8e0172c38, 0x1d99c5cbac24ea4, 0x1fd213f31136b9c, 0xa1a805f43cc7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1dea, 0x1bbc, 0x9b0, 0x1066, 0x10fb, 0x1fe8, 0x1bca, 0x34d, 0x275, 0x42a, 0xc7b, 0x6e8, 0x1f5c, 0x12e5, 0x155d, 0x4f2, 0x1422, 0xfce, 0x603, 0x17a8, 0xd9f, 0x182d, 0x9fe, 0x3b1, 0x342, 0x1c21, 0x1aff, 0x1e38, 0x1ac8, 0x1c98, 0x51f, 0x897, 0xe23, 0x17e7, 0xced, 0x1e6, 0x125a, 0x18f3, 0x1b8} +#elif RADIX == 32 +{0xef520a6, 0xc9b0dde, 0x1a21f706, 0x1a6ef2bf, 0x42a13a8, 0x10dd0c7b, 0xecb97eb, 0x2227955, 0xc06fcea, 0xb67ef50, 0x114ff60b, 0x423421d, 0x18e35ffc, 0x1e4c6b23, 0x689728f, 0x1b6fcee2, 0x12d07999, 0x69c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf70664d86ef3bd48, 0xa84ea34dde57fa21, 0xecb97eb86e863d90, 0x8301bf3a8444f2aa, 0x43b14ff60b5b3f7a, 0x3591e38d7ff08468, 0xdf9dc4689728ff26, 0x463ce4b41e6676} +#else +{0xcc9b0dde77a90, 0xa34dde57fa21f7, 0x15c37431ec85427, 0xa1113caabb2e5f, 0x16b67ef506037e7, 0x10846843b14ff60, 0x1f931ac8f1c6bff, 0x1db7e7711a25ca3, 0x8c79c9683ccc} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x20f3,0x77e0,0xc9a6,0xeb4f,0xb334,0xff68,0xecb4,0xa6e3,0x5015,0x43c1,0x9e87,0xf4eb,0x22e7,0x5f37,0x9392,0x80a0,0x9ea0,0x670f,0x1be3,0x7559,0x2cb5,0x900d,0xfa83,0x1519,0x67b8,0x4d7c,0xaf3a,0x6dc4,0x12e1,0x1e51,0x8d84,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77e020f3,0xeb4fc9a6,0xff68b334,0xa6e3ecb4,0x43c15015,0xf4eb9e87,0x5f3722e7,0x80a09392,0x670f9ea0,0x75591be3,0x900d2cb5,0x1519fa83,0x4d7c67b8,0x6dc4af3a,0x1e5112e1,0x58d84}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb4fc9a677e020f3,0xa6e3ecb4ff68b334,0xf4eb9e8743c15015,0x80a093925f3722e7,0x75591be3670f9ea0,0x1519fa83900d2cb5,0x6dc4af3a4d7c67b8,0x58d841e5112e1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8e98,0xe430,0x6d21,0x2fa6,0x524f,0xf0cf,0xe5eb,0x30ec,0x3658,0x7711,0x7d2f,0x47bf,0xbbc5,0x720c,0xe7a6,0x1ef4,0x335f,0x2c25,0x59e5,0x471c,0x5e06,0x5d38,0x62d6,0xa2a7,0x65f3,0xdefc,0x5e15,0x7a7a,0xdac4,0xc542,0x7bb8,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4308e98,0x2fa66d21,0xf0cf524f,0x30ece5eb,0x77113658,0x47bf7d2f,0x720cbbc5,0x1ef4e7a6,0x2c25335f,0x471c59e5,0x5d385e06,0xa2a762d6,0xdefc65f3,0x7a7a5e15,0xc542dac4,0xd7bb8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fa66d21e4308e98,0x30ece5ebf0cf524f,0x47bf7d2f77113658,0x1ef4e7a6720cbbc5,0x471c59e52c25335f,0xa2a762d65d385e06,0x7a7a5e15defc65f3,0xd7bb8c542dac4}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3249,0xe4fe,0xec61,0x49e0,0x5b5f,0xc495,0x6ef6,0x811,0x4fdf,0x59fc,0xbd69,0x608e,0xafe2,0xe9a9,0x5706,0x98ac,0xb327,0x481a,0x9c4e,0xecac,0x19fa,0x6401,0xfaad,0x14a4,0xeda,0x3fb5,0x7eb5,0x9768,0x6597,0x4c10,0xdc28,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4fe3249,0x49e0ec61,0xc4955b5f,0x8116ef6,0x59fc4fdf,0x608ebd69,0xe9a9afe2,0x98ac5706,0x481ab327,0xecac9c4e,0x640119fa,0x14a4faad,0x3fb50eda,0x97687eb5,0x4c106597,0xbdc28}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e0ec61e4fe3249,0x8116ef6c4955b5f,0x608ebd6959fc4fdf,0x98ac5706e9a9afe2,0xecac9c4e481ab327,0x14a4faad640119fa,0x97687eb53fb50eda,0xbdc284c106597}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdf0d,0x881f,0x3659,0x14b0,0x4ccb,0x97,0x134b,0x591c,0xafea,0xbc3e,0x6178,0xb14,0xdd18,0xa0c8,0x6c6d,0x7f5f,0x615f,0x98f0,0xe41c,0x8aa6,0xd34a,0x6ff2,0x57c,0xeae6,0x9847,0xb283,0x50c5,0x923b,0xed1e,0xe1ae,0x727b,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x881fdf0d,0x14b03659,0x974ccb,0x591c134b,0xbc3eafea,0xb146178,0xa0c8dd18,0x7f5f6c6d,0x98f0615f,0x8aa6e41c,0x6ff2d34a,0xeae6057c,0xb2839847,0x923b50c5,0xe1aeed1e,0xa727b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14b03659881fdf0d,0x591c134b00974ccb,0xb146178bc3eafea,0x7f5f6c6da0c8dd18,0x8aa6e41c98f0615f,0xeae6057c6ff2d34a,0x923b50c5b2839847,0xa727be1aeed1e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xaa15,0x7f4c,0xb027,0xba3f,0xa936,0x25fb,0xd8a6,0xc32c,0x4ff6,0xcba,0x7e3a,0x6517,0x8b62,0x1a7d,0x90bb,0x13df,0x3bed,0x3d1a,0x462b,0x6826,0xf410,0xe897,0x8229,0x4b78,0xee4b,0x42f9,0x6ed,0x6da5,0x4789,0x56bf,0x95bb,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f4caa15,0xba3fb027,0x25fba936,0xc32cd8a6,0xcba4ff6,0x65177e3a,0x1a7d8b62,0x13df90bb,0x3d1a3bed,0x6826462b,0xe897f410,0x4b788229,0x42f9ee4b,0x6da506ed,0x56bf4789,0xb95bb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xba3fb0277f4caa15,0xc32cd8a625fba936,0x65177e3a0cba4ff6,0x13df90bb1a7d8b62,0x6826462b3d1a3bed,0x4b788229e897f410,0x6da506ed42f9ee4b,0xb95bb56bf4789}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc893,0xf896,0x2771,0xa804,0x1b30,0x95f4,0x9365,0xd12c,0x33e,0xa849,0x9eb8,0x99bc,0xbb85,0x5dc7,0x7fc2,0x63f9,0x71ec,0x9605,0x475f,0xb8e1,0xc488,0xe25f,0x7f40,0x8735,0xecac,0xd7f,0x2994,0x17fb,0xf1ae,0xdafb,0xc2a,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf896c893,0xa8042771,0x95f41b30,0xd12c9365,0xa849033e,0x99bc9eb8,0x5dc7bb85,0x63f97fc2,0x960571ec,0xb8e1475f,0xe25fc488,0x87357f40,0xd7fecac,0x17fb2994,0xdafbf1ae,0x30c2a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8042771f896c893,0xd12c936595f41b30,0x99bc9eb8a849033e,0x63f97fc25dc7bb85,0xb8e1475f960571ec,0x87357f40e25fc488,0x17fb29940d7fecac,0x30c2adafbf1ae}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3bfd,0x13ce,0x920a,0x911b,0x4570,0x25b1,0xd461,0xc4e5,0x637e,0x243d,0x5ee1,0x2e39,0x5d17,0x952,0x68c2,0x7a32,0x2b9d,0x2f39,0xe4d1,0x13a4,0x6ad4,0x6cd2,0x9b,0xa287,0x5fc3,0x37c9,0xd69b,0xa250,0x1cb2,0xbc08,0xc8f9,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x13ce3bfd,0x911b920a,0x25b14570,0xc4e5d461,0x243d637e,0x2e395ee1,0x9525d17,0x7a3268c2,0x2f392b9d,0x13a4e4d1,0x6cd26ad4,0xa287009b,0x37c95fc3,0xa250d69b,0xbc081cb2,0x1c8f9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x911b920a13ce3bfd,0xc4e5d46125b14570,0x2e395ee1243d637e,0x7a3268c209525d17,0x13a4e4d12f392b9d,0xa287009b6cd26ad4,0xa250d69b37c95fc3,0x1c8f9bc081cb2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55eb,0x80b3,0x4fd8,0x45c0,0x56c9,0xda04,0x2759,0x3cd3,0xb009,0xf345,0x81c5,0x9ae8,0x749d,0xe582,0x6f44,0xec20,0xc412,0xc2e5,0xb9d4,0x97d9,0xbef,0x1768,0x7dd6,0xb487,0x11b4,0xbd06,0xf912,0x925a,0xb876,0xa940,0x6a44,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x80b355eb,0x45c04fd8,0xda0456c9,0x3cd32759,0xf345b009,0x9ae881c5,0xe582749d,0xec206f44,0xc2e5c412,0x97d9b9d4,0x17680bef,0xb4877dd6,0xbd0611b4,0x925af912,0xa940b876,0x46a44}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45c04fd880b355eb,0x3cd32759da0456c9,0x9ae881c5f345b009,0xec206f44e582749d,0x97d9b9d4c2e5c412,0xb4877dd617680bef,0x925af912bd0611b4,0x46a44a940b876}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x4d6, 0x18fd, 0x18c0, 0x13c1, 0x1718, 0x122c, 0x830, 0xa02, 0xee0, 0x1ad1, 0x1485, 0x27f, 0x13e5, 0x118f, 0xdce, 0x31c, 0x1b49, 0x998, 0x117, 0x343, 0xdae, 0x1fe7, 0x16a0, 0x1c43, 0x172, 0xa6e, 0x702, 0xeb8, 0x835, 0x1cf, 0x48d, 0x4e4, 0x13eb, 0x57d, 0xccf, 0x97e, 0x3b6, 0x910, 0x2dd} +#elif RADIX == 32 +{0x126b3651, 0x38c0c7e, 0xb2e313c, 0x10120c24, 0x1ad17702, 0x144ff485, 0x7463e7c, 0x14918e37, 0x22e998d, 0x1b6b8686, 0x3b507f9, 0xdc172e2, 0x1ae0e04a, 0x10e7a0d5, 0x164e4246, 0x13cafb3e, 0x1db25f99, 0x300} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x313c1c6063f49acd, 0x45dc0a0241848b2e, 0x7463e7ca27fa42eb, 0x308ba66369231c6e, 0x5c43b507f9db5c34, 0xd06aeb838129b82e, 0x95f67d64e4246873, 0xfa44076c97e667} +#else +{0x7838c0c7e9359b, 0xa0241848b2e31, 0x1e513fd2175a2ee, 0xda48c71b9d18f9, 0x13b6b86861174cc, 0x9b82e5c43b507f, 0x1439e83575c1c09, 0x19e57d9f5939091, 0x44880ed92fcc} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x937, 0x63f, 0xe30, 0x4f0, 0x5c6, 0x48b, 0x120c, 0x280, 0xbb8, 0xeb4, 0x1d21, 0x89f, 0x1cf9, 0x1463, 0x373, 0x8c7, 0x6d2, 0x1a66, 0x1845, 0x10d0, 0x1b6b, 0x7f9, 0x1da8, 0x1710, 0x105c, 0x129b, 0x1c0, 0xbae, 0x1a0d, 0x873, 0x123, 0x1939, 0xcfa, 0x195f, 0x1333, 0x125f, 0xed, 0xa44, 0x697} +#elif RADIX == 32 +{0x149bfcfc, 0xe3031f, 0x2cb8c4f, 0x14048309, 0xeb45dc0, 0x513fd21, 0x19d18f9f, 0xd24638d, 0x108ba663, 0xedae1a1, 0x10ed41fe, 0x13705cb8, 0xeb83812, 0x1439e835, 0x15939091, 0xcf2becf, 0x76c97e6, 0x820} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8c4f071818fd26ff, 0xd1770280906122cb, 0x9d18f9f289fe90ba, 0xc22e998da48c71b, 0x9710ed41fe76d70d, 0xf41abae0e04a6e0b, 0xe57d9f5939091a1c, 0x6a9101db25f999} +#else +{0x9e0e3031fa4dfe, 0x10280906122cb8c, 0xf944ff485d68bb, 0x369231c6e7463e, 0x1cedae1a1845d33, 0xa6e0b9710ed41f, 0xd0e7a0d5d70702, 0x6795f67d64e424, 0xd52203b64bf3} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x1863, 0x635, 0x19a9, 0x17fc, 0xdfe, 0x1784, 0x150b, 0x16c3, 0x15c0, 0x1f5f, 0x11d9, 0x1064, 0x1893, 0x1829, 0x211, 0x1a9e, 0x2e1, 0x3cc, 0x1e64, 0x12ed, 0x1c2c, 0x18b9, 0x121d, 0x234, 0xec9, 0x14dc, 0x4b6, 0xaad, 0x19f6, 0x805, 0x1984, 0x1843, 0xfca, 0x1a7a, 0xe04, 0x4af, 0x881, 0x65b, 0x421} +#elif RADIX == 32 +{0x1c31ce4f, 0x199a931a, 0x11bfd7f, 0x161d42ef, 0x1f5fae05, 0xe0c91d9, 0x8e0a712, 0xe1d4f08, 0x1cc83cc1, 0xf0b25db, 0x1490ee2e, 0x1b8ec911, 0xab496d4, 0x402e7d9, 0x15843cc2, 0x134f4fc, 0x4092bdc, 0x85a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xfd7fccd498d70c73, 0x7eb816c3a85de11b, 0x8e0a71270648ecfd, 0xdf320f305c3a9e10, 0x223490ee2e78592e, 0x73ecaad25b5371d9, 0x69e9f95843cc2201, 0xf996d1024af702} +#else +{0xff99a931ae18e7, 0x16c3a85de11bfd, 0x938324767ebf5c, 0x170ea78423829c, 0x1cf0b25dbe641e6, 0x1371d9223490ee2, 0x1100b9f655692da, 0x9a7a7e5610f30, 0x432da20495ee} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1a7, 0x175b, 0x9bd, 0xb94, 0x1a66, 0x1d52, 0x1eb3, 0x1431, 0x9e7, 0x1b9d, 0x75f, 0xcba, 0x17e9, 0xe1d, 0xdb, 0xc7b, 0x76, 0xa04, 0xd73, 0x3f7, 0x17dd, 0x1555, 0x5d6, 0x16ee, 0x1df6, 0x1429, 0x15cb, 0x140b, 0x1aeb, 0x14fb, 0x1984, 0x179b, 0x1ba1, 0x125e, 0xb62, 0x249, 0x95a, 0x137a, 0x7c} +#elif RADIX == 32 +{0x10d3893a, 0x89bdbad, 0x14b4ccb9, 0x18facfa, 0x1b9d4f3d, 0x597475f, 0xdb876fd, 0x7663d83, 0x1ae6a040, 0xdf747ee, 0xe2eb555, 0x53df6b7, 0x102eb974, 0xa7debae, 0x379bcc2, 0x18a4bdba, 0xad09256, 0xcd2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xccb944dedd6c34e2, 0x753cf431f59f54b4, 0xdb876fd2cba3afee, 0x76b9a8100ecc7b06, 0xd6ee2eb5556fba3f, 0xf5d740bae5d0a7be, 0x497b74379bcc253e, 0x84de92b42495b1} +#else +{0x17289bdbad869c4, 0xf431f59f54b4cc, 0x1e965d1d7f73a9e, 0x3b31ec1b6e1db, 0xadf747eed73502, 0x10a7bed6ee2eb55, 0x129f7aeba05d72e, 0xc525edd0de6f30, 0x109bd2568492b} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1d6a, 0x5b, 0x24a, 0x1bfc, 0x1cef, 0xc7e, 0x1cac, 0x1e4, 0x68, 0x16da, 0x30d, 0x13a5, 0x505, 0x329, 0x9f4, 0x1dae, 0x371, 0x111b, 0x200, 0x1b69, 0x1e51, 0x3b7, 0x316, 0x509, 0x1af2, 0x1220, 0x8c2, 0x195a, 0x1050, 0x1b7a, 0xd8b, 0x1a21, 0x336, 0x14fa, 0x1a4b, 0x11d, 0x167d, 0x1501, 0x302} +#elif RADIX == 32 +{0x1eb53915, 0x1824a02d, 0x1fb9dfbf, 0xf272b18, 0x16da0340, 0x1674a30d, 0x1a0ca4a0, 0x171ed727, 0x40111b1, 0x1f9476d2, 0x918b0ed, 0x41af228, 0x5691852, 0x1dbd4143, 0xda216c5, 0x12e9f433, 0x13e84774, 0xc8d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xdfbfc125016fad4e, 0x680d01e4e5631fb9, 0xa0ca4a0b3a5186db, 0x9100446c6e3dae4f, 0x450918b0edfca3b6, 0xa0a195a46148835e, 0xd3e866da216c5ede, 0x75406cfa11dd25} +#else +{0x17f824a02df5a9c, 0x101e4e5631fb9df, 0x1059d28c36db406, 0x11b8f6b93e83292, 0x1bf9476d220088d, 0x8835e450918b0e, 0xf6f5050cad230a, 0x974fa19b6885b1, 0xea80d9f423ba} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1e9d, 0xbb9, 0x14f9, 0xc51, 0x1731, 0x122e, 0x1901, 0x59a, 0xcc1, 0xb65, 0xc68, 0x1eaf, 0x1f48, 0x1e46, 0xe46, 0x9c1, 0x1013, 0x12f8, 0x18a, 0x177f, 0x1e19, 0x1cca, 0x257, 0x18b9, 0xa38, 0x184b, 0x15a4, 0x86d, 0xa8c, 0x1df5, 0xf2, 0x37, 0x5d9, 0x292, 0x11ae, 0x9e, 0x1fce, 0x7f4, 0x407} +#elif RADIX == 32 +{0x1f4ecc63, 0x34f95dc, 0xbae62c5, 0xcd64064, 0xb656609, 0x3d5ec68, 0x3791be9, 0x134e0b9, 0x3152f88, 0x17866efe, 0x1912bf32, 0x96a38c5, 0x1b6b498, 0xefaaa31, 0x12037079, 0xb85245d, 0x1e7027a3, 0x727} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x62c51a7caee7d3b3, 0x9598259ac80c8bae, 0x3791be91eaf6342d, 0xf0c54be20269c172, 0x18b912bf32bc3377, 0x551886dad2612d47, 0xa48bb203707977d, 0x29fd3f9c09e8d7} +#else +{0x18a34f95dcfa766, 0x259ac80c8bae62, 0x148f57b1a16cacc, 0x809a705c8de46f, 0x57866efe18a97c, 0x12d4718b912bf3, 0xbbeaa8c436d693, 0x15c2922ec80dc1e, 0x53fa7f3813d1} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x177, 0xf70, 0x25, 0x503, 0x1f96, 0x1abd, 0x6f5, 0x115b, 0xa68, 0x1192, 0x338, 0x1bae, 0x15af, 0x1570, 0xb79, 0x1c9a, 0xe78, 0x19de, 0x860, 0x1076, 0x1a63, 0x1d52, 0x1511, 0x10c5, 0x1fdf, 0xab1, 0x1454, 0x2c4, 0x292, 0x1135, 0x273, 0x1d, 0xefa, 0x47, 0x344, 0x226, 0x9c1, 0x1af, 0x639} +#elif RADIX == 32 +{0xbbf600, 0x60257b8, 0xf7f2c50, 0xad9bd75, 0x11925344, 0x1f75c338, 0x1cd5c2b5, 0x78e4d2d, 0x10c19de7, 0x1698e0ec, 0x5a88f54, 0x163fdf86, 0xb128a8a, 0x189a8a48, 0x1401d139, 0x11008eef, 0xe088986, 0xd7a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2c503012bdc02efd, 0x494d115b37aeaf7f, 0xcd5c2b5fbae19c46, 0x64306779cf1c9a5b, 0xf0c5a88f54b4c707, 0x45242c4a2a2ac7fb, 0x11ddf401d139c4d, 0xd86bd3822261a2} +#else +{0xa060257b805dfb, 0x1115b37aeaf7f2c, 0x1afdd70ce2324a6, 0x73c72696f3570a, 0x9698e0ec860cef, 0xac7fbf0c5a88f5, 0xe26a2921625151, 0x8804777d00744e, 0xd7a70444c3} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x153b, 0x598, 0x100c, 0x1537, 0x1eda, 0x190b, 0x1406, 0x186e, 0x457, 0x469, 0x14a0, 0x1ce0, 0x1f6d, 0xf2f, 0x1837, 0x616, 0x16d0, 0xf35, 0x192b, 0x106, 0x17d6, 0x6b3, 0x169e, 0x27a, 0xe54, 0xa42, 0x1694, 0x16c3, 0x7b, 0x298, 0x118, 0xb0, 0x893, 0xbca, 0x1678, 0x19de, 0xb59, 0x3a, 0x43} +#elif RADIX == 32 +{0xa9d84f6, 0xf00c2cc, 0x2fdb553, 0x37501b2, 0x46922be, 0x179c14a0, 0x1bbcbfed, 0xd030b60, 0x1256f35b, 0x1df5820d, 0x1ab4f1ac, 0x84e5413, 0x1b0ed28a, 0x14c01ee, 0x60b008c, 0x1e179489, 0x1ace77ac, 0x8d2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xb55378061662a761, 0xa48af86ea03642fd, 0xbbcbfedbce0a5011, 0x6c95bcd6da0616c1, 0x827ab4f1acefac10, 0xf76c3b4a2909ca, 0x2f291260b008c0a6, 0x680e96b39deb3c} +#else +{0xa6f00c2cc54ec2, 0xf86ea03642fdb5, 0x16de7052808d245, 0x1b68185b06ef2ff, 0x19df5820d92b79a, 0x909ca827ab4f1a, 0x53007bb61da51, 0xf0bca44982c023, 0xd01d2d673bd6} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1aff,0x9f84,0xf1c6,0xd816,0xbdd0,0xd450,0x1990,0x119,0xbcf7,0x1a97,0x4780,0x8209,0x695b,0x1d73,0x20ba,0x7b53,0x5e3c,0x4ce5,0xac53,0x351f,0xaaa3,0x5a3e,0xd54c,0x121f,0xbf17,0xdb55,0xc9c,0x8370,0x2061,0x415c,0x1f35,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9f841aff,0xd816f1c6,0xd450bdd0,0x1191990,0x1a97bcf7,0x82094780,0x1d73695b,0x7b5320ba,0x4ce55e3c,0x351fac53,0x5a3eaaa3,0x121fd54c,0xdb55bf17,0x83700c9c,0x415c2061,0xc1f35}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd816f1c69f841aff,0x1191990d450bdd0,0x820947801a97bcf7,0x7b5320ba1d73695b,0x351fac534ce55e3c,0x121fd54c5a3eaaa3,0x83700c9cdb55bf17,0xc1f35415c2061}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x7734,0xde6f,0xbab1,0xd4f3,0xc928,0x6c68,0x69b0,0x7cc0,0x994f,0x296c,0xb1dc,0x2eb2,0xe4ce,0x8494,0xa8ff,0x95d3,0x5f30,0xe7f,0x918,0x6cd6,0xae27,0x747c,0x1f93,0xed96,0x5590,0xc91a,0x713d,0xc33e,0xc075,0x40fd,0x9ce5,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xde6f7734,0xd4f3bab1,0x6c68c928,0x7cc069b0,0x296c994f,0x2eb2b1dc,0x8494e4ce,0x95d3a8ff,0xe7f5f30,0x6cd60918,0x747cae27,0xed961f93,0xc91a5590,0xc33e713d,0x40fdc075,0x39ce5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4f3bab1de6f7734,0x7cc069b06c68c928,0x2eb2b1dc296c994f,0x95d3a8ff8494e4ce,0x6cd609180e7f5f30,0xed961f93747cae27,0xc33e713dc91a5590,0x39ce540fdc075}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xda85,0x89f5,0x1aaf,0x9ec7,0xcfff,0xec63,0x3ae9,0x20bc,0xc2f3,0x9942,0x7d84,0xfa25,0x5e69,0xeb7b,0xc357,0x9342,0x5c58,0xd26c,0x857b,0x7a7f,0x757,0xfb5c,0xbb97,0x33,0x6c28,0xfceb,0xd644,0xcc0a,0x22ad,0xe1c0,0x12d6,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x89f5da85,0x9ec71aaf,0xec63cfff,0x20bc3ae9,0x9942c2f3,0xfa257d84,0xeb7b5e69,0x9342c357,0xd26c5c58,0x7a7f857b,0xfb5c0757,0x33bb97,0xfceb6c28,0xcc0ad644,0xe1c022ad,0x412d6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9ec71aaf89f5da85,0x20bc3ae9ec63cfff,0xfa257d849942c2f3,0x9342c357eb7b5e69,0x7a7f857bd26c5c58,0x33bb97fb5c0757,0xcc0ad644fceb6c28,0x412d6e1c022ad}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe501,0x607b,0xe39,0x27e9,0x422f,0x2baf,0xe66f,0xfee6,0x4308,0xe568,0xb87f,0x7df6,0x96a4,0xe28c,0xdf45,0x84ac,0xa1c3,0xb31a,0x53ac,0xcae0,0x555c,0xa5c1,0x2ab3,0xede0,0x40e8,0x24aa,0xf363,0x7c8f,0xdf9e,0xbea3,0xe0ca,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x607be501,0x27e90e39,0x2baf422f,0xfee6e66f,0xe5684308,0x7df6b87f,0xe28c96a4,0x84acdf45,0xb31aa1c3,0xcae053ac,0xa5c1555c,0xede02ab3,0x24aa40e8,0x7c8ff363,0xbea3df9e,0x3e0ca}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27e90e39607be501,0xfee6e66f2baf422f,0x7df6b87fe5684308,0x84acdf45e28c96a4,0xcae053acb31aa1c3,0xede02ab3a5c1555c,0x7c8ff36324aa40e8,0x3e0cabea3df9e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x679c,0x35ac,0x6c8c,0xee5e,0x2827,0x29fa,0x9f6c,0xbda,0x2083,0x5e20,0xd351,0x39bd,0xd9bc,0x4085,0x3727,0x8f2,0xe905,0x55dd,0x6f90,0x6e26,0x6779,0xf15a,0xf170,0xec90,0xdb0e,0x53a0,0x6f99,0xe710,0xad92,0xa7f0,0xe2e1,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35ac679c,0xee5e6c8c,0x29fa2827,0xbda9f6c,0x5e202083,0x39bdd351,0x4085d9bc,0x8f23727,0x55dde905,0x6e266f90,0xf15a6779,0xec90f170,0x53a0db0e,0xe7106f99,0xa7f0ad92,0xde2e1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xee5e6c8c35ac679c,0xbda9f6c29fa2827,0x39bdd3515e202083,0x8f237274085d9bc,0x6e266f9055dde905,0xec90f170f15a6779,0xe7106f9953a0db0e,0xde2e1a7f0ad92}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa483,0xbf25,0x238c,0x4c65,0xdd0b,0xccc9,0xc5af,0xac20,0xe998,0xb162,0xe2bf,0xbd24,0x5fd,0x6720,0xd781,0xd37d,0xa89,0x595a,0x76b0,0x7f86,0xdea4,0x59ea,0x2c01,0xd679,0x714b,0x5454,0xe262,0x2bcf,0xfad4,0x8bc0,0x8cd3,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbf25a483,0x4c65238c,0xccc9dd0b,0xac20c5af,0xb162e998,0xbd24e2bf,0x672005fd,0xd37dd781,0x595a0a89,0x7f8676b0,0x59eadea4,0xd6792c01,0x5454714b,0x2bcfe262,0x8bc0fad4,0xc8cd3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4c65238cbf25a483,0xac20c5afccc9dd0b,0xbd24e2bfb162e998,0xd37dd781672005fd,0x7f8676b0595a0a89,0xd6792c0159eadea4,0x2bcfe2625454714b,0xc8cd38bc0fad4}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3f72,0x6188,0x95e8,0xed15,0x2b1a,0x2fd,0xaae9,0x15d9,0x5945,0x23ff,0xfe55,0xce25,0xaa48,0xa648,0x8534,0x16db,0x3fcf,0xa301,0xfb7c,0x3a68,0x4ba,0x1c1d,0x30ee,0xf044,0x116f,0xc4f8,0x98b2,0x4971,0xea5c,0xb93e,0x2836,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x61883f72,0xed1595e8,0x2fd2b1a,0x15d9aae9,0x23ff5945,0xce25fe55,0xa648aa48,0x16db8534,0xa3013fcf,0x3a68fb7c,0x1c1d04ba,0xf04430ee,0xc4f8116f,0x497198b2,0xb93eea5c,0x32836}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed1595e861883f72,0x15d9aae902fd2b1a,0xce25fe5523ff5945,0x16db8534a648aa48,0x3a68fb7ca3013fcf,0xf04430ee1c1d04ba,0x497198b2c4f8116f,0x32836b93eea5c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9864,0xca53,0x9373,0x11a1,0xd7d8,0xd605,0x6093,0xf425,0xdf7c,0xa1df,0x2cae,0xc642,0x2643,0xbf7a,0xc8d8,0xf70d,0x16fa,0xaa22,0x906f,0x91d9,0x9886,0xea5,0xe8f,0x136f,0x24f1,0xac5f,0x9066,0x18ef,0x526d,0x580f,0x1d1e,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca539864,0x11a19373,0xd605d7d8,0xf4256093,0xa1dfdf7c,0xc6422cae,0xbf7a2643,0xf70dc8d8,0xaa2216fa,0x91d9906f,0xea59886,0x136f0e8f,0xac5f24f1,0x18ef9066,0x580f526d,0x21d1e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x11a19373ca539864,0xf4256093d605d7d8,0xc6422caea1dfdf7c,0xf70dc8d8bf7a2643,0x91d9906faa2216fa,0x136f0e8f0ea59886,0x18ef9066ac5f24f1,0x21d1e580f526d}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x185f, 0xecc, 0x21c, 0xa62, 0x77, 0x8b1, 0x1188, 0xec2, 0xda1, 0xbf0, 0xb11, 0x82d, 0x1fe9, 0x10d4, 0x1e36, 0x194e, 0x77e, 0x1112, 0x14c0, 0x1dcd, 0x1be7, 0x1a4c, 0x140e, 0x10c5, 0x1aaf, 0x236, 0xdf3, 0x1a21, 0x1dff, 0x15bb, 0xda8, 0x30e, 0x17b0, 0xc7b, 0xd1, 0xcb, 0x868, 0x2e5, 0x5a} +#elif RADIX == 32 +{0xc2f86ac, 0x421c766, 0xc40eea6, 0x16146211, 0xbf06d0b, 0x505ab11, 0x1b4353fd, 0x17eca778, 0x9811123, 0x6f9fb9b, 0x5a07693, 0x6daaf86, 0x885be62, 0xaddf7ff, 0x30e6d4, 0x1458f77b, 0x34032c1, 0x52a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xeea6210e3b330be1, 0xc1b42ec28c422c40, 0xb4353fd282d588af, 0xda604448efd94ef1, 0xf0c5a0769337cfdc, 0xfbffa216f988db55, 0xb1eef6030e6d456e, 0x120b950d00cb068} +#else +{0x14c421c766617c3, 0x2ec28c422c40ee, 0x1e9416ac457e0da, 0x3bf653bc6d0d4f, 0x66f9fb9b4c0889, 0x8db55f0c5a0769, 0x2b77dffd10b7cc, 0x1a2c7bbd80c39b5, 0x9172a1a01960} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x61a, 0x3b3, 0x1087, 0x1a98, 0x81d, 0x22c, 0x1462, 0xbb0, 0x368, 0xafc, 0xac4, 0xa0b, 0x7fa, 0x1435, 0x178d, 0x1653, 0x11df, 0x444, 0xd30, 0x1f73, 0x6f9, 0x1693, 0xd03, 0x1c31, 0x16ab, 0x188d, 0xb7c, 0x1e88, 0x1f7f, 0x56e, 0x136a, 0xc3, 0x1dec, 0xb1e, 0x1834, 0x32, 0xa1a, 0x10b9, 0xe6} +#elif RADIX == 32 +{0x130d1113, 0x110871d9, 0xb103ba9, 0x1d851884, 0xafc1b42, 0x9416ac4, 0x6d0d4ff, 0x1dfb29de, 0x1a604448, 0x19be7ee6, 0x11681da4, 0x11b6abe1, 0x1a216f98, 0x2b77dff, 0x180c39b5, 0xd163dde, 0x10d00cb0, 0x54a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3ba988438eccc344, 0xf06d0bb0a3108b10, 0x6d0d4ff4a0b5622b, 0x369811123bf653bc, 0x7c31681da4cdf3f7, 0xbeffe885be6236d5, 0x2c7bbd80c39b515b, 0x742e5434032c1a} +#else +{0x15310871d998688, 0x10bb0a3108b103b, 0x1fa505ab115f836, 0x8efd94ef1b4353, 0x99be7ee6d30222, 0x236d57c31681da, 0x8addf7ff442df3, 0x68b1eef6030e6d, 0xe85ca8680658} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xa5a, 0x2ab, 0x659, 0x149f, 0xf1b, 0xa1a, 0xb05, 0x1915, 0x1aa8, 0x1aa0, 0x1c4d, 0xe2f, 0xe1c, 0x19ab, 0x1d34, 0xa8f, 0xf59, 0x1f1, 0xc6d, 0x520, 0xb6e, 0x127f, 0x5dd, 0x175a, 0x1957, 0x1ca4, 0x1563, 0x122f, 0x705, 0xcd6, 0x1c02, 0xdc1, 0x93b, 0x387, 0x1870, 0x54, 0x853, 0x1adc, 0x6bc} +#elif RADIX == 32 +{0x152d7fc4, 0x1e659155, 0x69e3749, 0x8aac154, 0x1aa0d546, 0x11c5fc4d, 0x1a66adc3, 0x159547f4, 0x18da1f17, 0x1adb8a40, 0x1a2eec9f, 0x149957ba, 0x8beac7c, 0x66b1c16, 0x16dc1e01, 0x1c070e93, 0x2981530, 0xe2} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3749f32c8aad4b5f, 0x83551915582a869e, 0xa66adc38e2fe26ea, 0x63687c5eb2a8fe9, 0xf75a2eec9fd6dc52, 0x8e0b22fab1f2932a, 0xe1d276dc1e01335, 0x196b710a6054c38} +#else +{0x93e659155a96bf, 0x11915582a869e37, 0x1c717f137541aa, 0x17acaa3fa699ab7, 0x1fadb8a40c6d0f8, 0x12932af75a2eec9, 0x99ac705917d58f, 0xe038749db70780, 0x17d6e214c0a98} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x66e, 0xe79, 0xadd, 0x23, 0xf11, 0x7d6, 0x1091, 0x42a, 0x1885, 0x128, 0x6f9, 0xcdd, 0x1d55, 0x19bd, 0x116f, 0x1dbd, 0x107b, 0xaef, 0x8bc, 0xa74, 0x7b5, 0xdff, 0x743, 0x17e0, 0x453, 0x414, 0x672, 0xf28, 0x198a, 0x19c4, 0x1e85, 0xcb9, 0x17c2, 0x14c6, 0x1871, 0x1034, 0x6cb, 0x55b, 0xbf} +#elif RADIX == 32 +{0x13370e29, 0x6add73c, 0x159e2202, 0x154244f, 0x128c429, 0x159ba6f9, 0x17e6f7aa, 0x7bedec5, 0x1178aef8, 0x19ed54e8, 0x3a1b7f, 0x28453bf, 0x1ca0ce44, 0x1ce26629, 0x4cb9f42, 0x1c698d7c, 0x165c0d30, 0x159} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x2202356eb9e4cdc3, 0xa310a42a8489f59e, 0x7e6f7aaacdd37c84, 0x445e2bbe0f7dbd8b, 0x77e03a1b7fcf6aa7, 0x3314f2833910508a, 0xd31af84cb9f42e71, 0xe956cd97034c38} +#else +{0x46add73c99b87, 0xa42a8489f59e22, 0x15566e9be425188, 0x183df6f62df9bde, 0x1f9ed54e88bc577, 0x10508a77e03a1b7, 0x1738998a79419c8, 0xe34c6be132e7d0, 0x22ad9b2e0698} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x165f, 0x1e7c, 0xe41, 0x12eb, 0xa1, 0x1655, 0x6db, 0x1dfc, 0x4a, 0xac7, 0x1dcb, 0x3d9, 0x16a0, 0x562, 0x1d70, 0x528, 0xaa7, 0x172e, 0x36c, 0x728, 0x1e76, 0x23f, 0x6e6, 0x53e, 0x1640, 0x1a82, 0x1b78, 0x1066, 0x895, 0x17eb, 0x1713, 0x174d, 0x679, 0x1415, 0x19a8, 0xe7c, 0x674, 0x1f81, 0x15} +#elif RADIX == 32 +{0xb2f81a0, 0x16e41f3e, 0x1541432e, 0xfe1b6ec, 0xac70257, 0x7b3dcb, 0x18158ad4, 0xa729475, 0x6d972e5, 0x1f9d8e50, 0x1e37308f, 0x10564029, 0x19b6f1a, 0x1bf5a256, 0x1374db89, 0xa282a67, 0x13a39f33, 0xc09} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x432eb720f9f2cbe0, 0x1c095dfc36dd9541, 0x8158ad403d9ee5ab, 0x81b65cb954e528eb, 0x53e37308ffcec72, 0xd12b066dbc6a0ac8, 0x5054cf374db89dfa, 0xafe04ce8e7ccd4} +#else +{0x5d6e41f3e597c0, 0x15dfc36dd954143, 0xa01ecf72d58e04, 0x55394a3ae0562b, 0x1ff9d8e5036cb97, 0xa0ac8053e37308, 0xefd68958336de3, 0x15141533cdd36e2, 0x15fc099d1cf99} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1e32, 0x1f7c, 0x1c05, 0x372, 0x34a, 0x1d26, 0x11b9, 0x294, 0xa87, 0x1835, 0x158f, 0x1d19, 0x13e8, 0x4dc, 0x1e1a, 0x195f, 0x116e, 0x62c, 0x1839, 0x107a, 0xa4f, 0x119f, 0x18f3, 0xc48, 0x1c7a, 0x100d, 0x2e9, 0x12df, 0xbec, 0x6f1, 0x8bf, 0xe24, 0xa57, 0x50c, 0x28b, 0x31e, 0x430, 0x1b08, 0x378} +#elif RADIX == 32 +{0xf1941d7, 0x5c05fbe, 0x9869437, 0x14a46e7a, 0x18355438, 0x3a3358f, 0xd13727d, 0x16ecaff8, 0x107262c8, 0x1a93e0f5, 0x8c79c67, 0x1bc7a62, 0xb7c5d30, 0x1378afb2, 0xee2445f, 0x2ca18a5, 0x180c785, 0x1c1} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x94372e02fdf3c650, 0xd550e2948dcf4986, 0xd13727d1d19ac7e0, 0xac1c98b22dd95ff0, 0x4c48c79c67d49f07, 0x57d92df174c0378f, 0x94314aee2445f9bc, 0xc6c2086031e145} +#else +{0x6e5c05fbe78ca0, 0xe2948dcf498694, 0x1e8e8cd63f06aa8, 0x8b7657fc344dc9, 0xfa93e0f5839316, 0x378f4c48c79c6, 0x1cde2bec96f8ba6, 0x11650c52bb89117, 0x18d8410c063c2} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1044, 0x2d0, 0x1004, 0x1082, 0x535, 0x141a, 0x10a6, 0x1f9d, 0xc2d, 0x1347, 0xdf4, 0x1db1, 0x90e, 0x116d, 0x59c, 0xc2b, 0x7c2, 0x15d7, 0x119, 0x32c, 0x1e89, 0x1b01, 0xe5f, 0x105f, 0xd7d, 0xb4f, 0x1c33, 0x1b3b, 0xf2d, 0xc22, 0x11d8, 0x1848, 0x11a9, 0x1ee7, 0x6ea, 0x165d, 0x17d4, 0x77, 0x64b} +#elif RADIX == 32 +{0x8227755, 0x5004168, 0x68a6b08, 0x1cec29a8, 0x1347616f, 0x1bb62df4, 0xe45b521, 0x1c261596, 0x2335d73, 0xfa24658, 0x1f72fec0, 0x9ed7d82, 0xcef866b, 0x6113cb7, 0x138488ec, 0x1abdcf1a, 0x1ea5974d, 0x83d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6b0828020b42089d, 0x1d85bf9d8535068a, 0xe45b521ddb16fa4d, 0xc08cd75cf84c2b2c, 0xb05f72fec07d1232, 0x9e5bb3be19ad3daf, 0x7b9e3538488ec308, 0x1681defa965d375} +#else +{0x1050041684113b, 0x1bf9d8535068a6b, 0x10eed8b7d268ec2, 0x13e130acb3916d4, 0xfa24658119aeb, 0xd3dafb05f72fec, 0x1844f2dd9df0cd, 0x1d5ee78d4e1223b, 0x1203bdf52cba6} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x7bc, 0x14d4, 0x1225, 0x1afb, 0x179e, 0x2c0, 0x1c0, 0x1267, 0x450, 0x1f26, 0x1e3f, 0x2bb, 0x19a5, 0x12f9, 0xa57, 0x2d, 0x1ed, 0xa16, 0x754, 0x1893, 0x759, 0x6bb, 0x618, 0x1379, 0xff3, 0x1989, 0x1abb, 0x1c40, 0x1bf5, 0x71e, 0xd6d, 0xc04, 0x15ef, 0x6aa, 0x4da, 0x1fb6, 0xb5b, 0x9f2, 0x211} +#elif RADIX == 32 +{0x3de2735, 0x17225a6a, 0x102f3daf, 0x13387005, 0x1f262284, 0x14577e3f, 0xbcbe734, 0x1ed016a9, 0xea8a160, 0x19d67126, 0x1930c1ae, 0x112ff39b, 0x11035779, 0x138f6fd7, 0x1ec046b6, 0x168d555e, 0x1adfed89, 0x412} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3dafb912d350f789, 0x988a12670e00b02f, 0xbcbe734a2bbf1ffc, 0x33aa28583da02d52, 0x737930c1aeceb389, 0xb7ebc40d5de625fe, 0x1aaabdec046b69c7, 0x15a7c96b7fb626d} +#else +{0x15f7225a6a1ef13, 0x12670e00b02f3d, 0x1a515df8ffe4c45, 0xf680b54af2f9c, 0x1d9d6712675450b, 0x625fe737930c1a, 0x14e3dbf5e206aef, 0x1b46aaaf7b011ad, 0x104f92d6ff6c4} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5eb9,0x2393,0xd8e8,0xc566,0xd78,0xa77f,0x1bf1,0x4577,0x3141,0xecd3,0x132c,0x281,0x13b5,0x1d34,0xb4bb,0xf25,0xdc3,0xbf86,0x5e9f,0xde50,0xf536,0xe95e,0xd5b0,0x687d,0x3ab,0x992c,0xdb8d,0xc8cc,0xfaf0,0xd954,0x6e1a,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x23935eb9,0xc566d8e8,0xa77f0d78,0x45771bf1,0xecd33141,0x281132c,0x1d3413b5,0xf25b4bb,0xbf860dc3,0xde505e9f,0xe95ef536,0x687dd5b0,0x992c03ab,0xc8ccdb8d,0xd954faf0,0x56e1a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc566d8e823935eb9,0x45771bf1a77f0d78,0x281132cecd33141,0xf25b4bb1d3413b5,0xde505e9fbf860dc3,0x687dd5b0e95ef536,0xc8ccdb8d992c03ab,0x56e1ad954faf0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf17c,0xf7a8,0xd9f7,0x1544,0xb2c8,0xf5aa,0x3812,0x3fba,0xf63e,0xb545,0x678c,0xad77,0xed9f,0x12f8,0xa5dc,0x74c9,0xec1d,0xc1e0,0x806f,0x14a0,0xfb25,0x34f3,0x606c,0x57d5,0x9733,0x9c8c,0x83e3,0xa787,0x7cae,0x503b,0x2499,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf7a8f17c,0x1544d9f7,0xf5aab2c8,0x3fba3812,0xb545f63e,0xad77678c,0x12f8ed9f,0x74c9a5dc,0xc1e0ec1d,0x14a0806f,0x34f3fb25,0x57d5606c,0x9c8c9733,0xa78783e3,0x503b7cae,0x12499}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1544d9f7f7a8f17c,0x3fba3812f5aab2c8,0xad77678cb545f63e,0x74c9a5dc12f8ed9f,0x14a0806fc1e0ec1d,0x57d5606c34f3fb25,0xa78783e39c8c9733,0x12499503b7cae}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d83,0x57ac,0xb73f,0xb74d,0x1869,0x3588,0x43,0x915,0x7f31,0x82eb,0x4487,0xb830,0x6627,0x70a7,0x9911,0x5646,0x4779,0xe113,0x168c,0x925d,0xc1e8,0xd347,0xa95e,0xd5a6,0x7deb,0xbeb,0x72,0xf755,0x306,0x9ee2,0x7ef9,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x57ac5d83,0xb74db73f,0x35881869,0x9150043,0x82eb7f31,0xb8304487,0x70a76627,0x56469911,0xe1134779,0x925d168c,0xd347c1e8,0xd5a6a95e,0xbeb7deb,0xf7550072,0x9ee20306,0x27ef9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb74db73f57ac5d83,0x915004335881869,0xb830448782eb7f31,0x5646991170a76627,0x925d168ce1134779,0xd5a6a95ed347c1e8,0xf75500720beb7deb,0x27ef99ee20306}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa147,0xdc6c,0x2717,0x3a99,0xf287,0x5880,0xe40e,0xba88,0xcebe,0x132c,0xecd3,0xfd7e,0xec4a,0xe2cb,0x4b44,0xf0da,0xf23c,0x4079,0xa160,0x21af,0xac9,0x16a1,0x2a4f,0x9782,0xfc54,0x66d3,0x2472,0x3733,0x50f,0x26ab,0x91e5,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdc6ca147,0x3a992717,0x5880f287,0xba88e40e,0x132ccebe,0xfd7eecd3,0xe2cbec4a,0xf0da4b44,0x4079f23c,0x21afa160,0x16a10ac9,0x97822a4f,0x66d3fc54,0x37332472,0x26ab050f,0xa91e5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3a992717dc6ca147,0xba88e40e5880f287,0xfd7eecd3132ccebe,0xf0da4b44e2cbec4a,0x21afa1604079f23c,0x97822a4f16a10ac9,0x3733247266d3fc54,0xa91e526ab050f}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6f0b,0x3478,0x5aeb,0x64,0x9a1a,0xecff,0xccf0,0x2fab,0xf3a8,0x718a,0x97e7,0xc31a,0xa0cd,0xb872,0x514e,0x5ee1,0x4b79,0x4af9,0xd0c3,0x97c6,0x9591,0x2370,0xa987,0xa5e6,0xe201,0x8730,0x3150,0x1980,0x8452,0x3b83,0x25c9,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x34786f0b,0x645aeb,0xecff9a1a,0x2fabccf0,0x718af3a8,0xc31a97e7,0xb872a0cd,0x5ee1514e,0x4af94b79,0x97c6d0c3,0x23709591,0xa5e6a987,0x8730e201,0x19803150,0x3b838452,0xb25c9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x645aeb34786f0b,0x2fabccf0ecff9a1a,0xc31a97e7718af3a8,0x5ee1514eb872a0cd,0x97c6d0c34af94b79,0xa5e6a98723709591,0x198031508730e201,0xb25c93b838452}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1de7,0x7f69,0xdefe,0xfc6b,0x6fd5,0xc100,0x5188,0x1318,0x416e,0x10dd,0x33ac,0x4260,0x8985,0x1d0e,0x5b13,0xd02e,0x6fb5,0x6e28,0x9b7d,0x4f72,0x9665,0xd5f3,0xf00d,0xda5f,0x98f2,0xd778,0x4b2a,0x958d,0xfcef,0xd837,0x4a93,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f691de7,0xfc6bdefe,0xc1006fd5,0x13185188,0x10dd416e,0x426033ac,0x1d0e8985,0xd02e5b13,0x6e286fb5,0x4f729b7d,0xd5f39665,0xda5ff00d,0xd77898f2,0x958d4b2a,0xd837fcef,0x34a93}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfc6bdefe7f691de7,0x13185188c1006fd5,0x426033ac10dd416e,0xd02e5b131d0e8985,0x4f729b7d6e286fb5,0xda5ff00dd5f39665,0x958d4b2ad77898f2,0x34a93d837fcef}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8527,0x81f3,0xcb8f,0x5e0d,0x7c93,0x7448,0x613,0xedcf,0x7d31,0x77c7,0x19dc,0x8ace,0xbfb8,0xa582,0x9ccc,0x28df,0xb6e0,0x4f69,0x33e6,0x546b,0xcfb2,0x1627,0x53ed,0xdc8d,0xd80b,0xb843,0xc438,0xb942,0x8fb5,0xb3c0,0xc1dc,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81f38527,0x5e0dcb8f,0x74487c93,0xedcf0613,0x77c77d31,0x8ace19dc,0xa582bfb8,0x28df9ccc,0x4f69b6e0,0x546b33e6,0x1627cfb2,0xdc8d53ed,0xb843d80b,0xb942c438,0xb3c08fb5,0x2c1dc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e0dcb8f81f38527,0xedcf061374487c93,0x8ace19dc77c77d31,0x28df9ccca582bfb8,0x546b33e64f69b6e0,0xdc8d53ed1627cfb2,0xb942c438b843d80b,0x2c1dcb3c08fb5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x90f5,0xcb87,0xa514,0xff9b,0x65e5,0x1300,0x330f,0xd054,0xc57,0x8e75,0x6818,0x3ce5,0x5f32,0x478d,0xaeb1,0xa11e,0xb486,0xb506,0x2f3c,0x6839,0x6a6e,0xdc8f,0x5678,0x5a19,0x1dfe,0x78cf,0xceaf,0xe67f,0x7bad,0xc47c,0xda36,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcb8790f5,0xff9ba514,0x130065e5,0xd054330f,0x8e750c57,0x3ce56818,0x478d5f32,0xa11eaeb1,0xb506b486,0x68392f3c,0xdc8f6a6e,0x5a195678,0x78cf1dfe,0xe67fceaf,0xc47c7bad,0x4da36}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xff9ba514cb8790f5,0xd054330f130065e5,0x3ce568188e750c57,0xa11eaeb1478d5f32,0x68392f3cb506b486,0x5a195678dc8f6a6e,0xe67fceaf78cf1dfe,0x4da36c47c7bad}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x14d5, 0x1163, 0xf47, 0x337, 0x71e, 0x4ad, 0x1071, 0x19d4, 0x11d9, 0xdce, 0x186a, 0x1785, 0x13c8, 0x695, 0xe1b, 0x54b, 0x174f, 0xd0c, 0x17cb, 0x17db, 0xb41, 0xb78, 0x1a46, 0x66f, 0x23, 0x836, 0x19ce, 0xcdf, 0x90b, 0x6fb, 0x1508, 0x1bb5, 0x6aa, 0x1219, 0x1bba, 0x1d67, 0x1e46, 0x8d8, 0x62c} +#elif RADIX == 32 +{0x1a6af50e, 0xef478b1, 0xb4e3c33, 0xea41c49, 0xdce8ece, 0x2f0b86a, 0xd9a5679, 0x14f2a5b8, 0xf96d0cb, 0x2d06fb7, 0xfd232de, 0x6c02333, 0x137f39c8, 0x37da42d, 0x15bb5a84, 0xea4326a, 0x123759f7, 0x9c7} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x3c3377a3c58e9abd, 0x3a3b39d483892b4e, 0xd9a56791785c3537, 0xbbe5b432e9e54b70, 0x666fd232de16837d, 0xd216cdfce720d804, 0x4864d55bb5a841be, 0x72363c8dd67ddd} +#else +{0x66ef478b1d357a, 0x139d483892b4e3c, 0x1c8bc2e1a9b9d1d, 0xba7952dc366959, 0x1c2d06fb77cb686, 0xd804666fd232d, 0xdf690b66fe739, 0x1752193556ed6a1, 0xe46c791bacfb} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1d37, 0x1c58, 0x1bd1, 0x10cd, 0x9c7, 0x92b, 0x41c, 0xe75, 0x1476, 0x1373, 0xe1a, 0x5e1, 0xcf2, 0x19a5, 0x1b86, 0x1952, 0x5d3, 0x1b43, 0x1df2, 0xdf6, 0x2d0, 0x12de, 0x1e91, 0x199b, 0x1008, 0x120d, 0x1e73, 0x1b37, 0x1a42, 0x1be, 0xd42, 0x16ed, 0x9aa, 0x1486, 0x1eee, 0x1759, 0x791, 0x236, 0x5bb} +#elif RADIX == 32 +{0xe9becab, 0x1bbd1e2c, 0xad38f0c, 0x13a90712, 0x1373a3b3, 0x8bc2e1a, 0x366959e, 0x1d3ca96e, 0x1be5b432, 0x10b41bed, 0x1bf48cb7, 0x1b008cc, 0xcdfce72, 0xdf690b, 0x156ed6a1, 0x1ba90c9a, 0x1c8dd67d, 0xd31} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8f0cdde8f163a6fb, 0xce8ece7520e24ad3, 0x366959e45e170d4d, 0x6ef96d0cba7952dc, 0x199bf48cb785a0df, 0xb485b37f39c83601, 0x52193556ed6a106f, 0x488d8f23759f77} +#else +{0x19bbd1e2c74df6, 0xce7520e24ad38f, 0xf22f0b86a6e747, 0x12e9e54b70d9a56, 0xf0b41beddf2da1, 0x83601199bf48cb, 0x837da42d9bf9ce, 0x1dd4864d55bb5a8, 0x911b1e46eb3e} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x8f6, 0xe30, 0x75, 0xaf7, 0xb3c, 0x1672, 0x1e05, 0x157a, 0x16b1, 0x1fd, 0x3c2, 0x114d, 0x1000, 0x1b4f, 0x1f37, 0xc0e, 0xdd, 0x4de, 0xdff, 0x55e, 0x1a2f, 0x353, 0xc4a, 0x1225, 0x9ed, 0x9ff, 0x1493, 0x18e6, 0x96c, 0x163c, 0xa76, 0x1c78, 0x11b4, 0x1087, 0x1519, 0xc82, 0x3e0, 0x7d4, 0xf5} +#elif RADIX == 32 +{0x47b122a, 0xe075718, 0x1c9678af, 0xbd7816c, 0x1fdb58d, 0x229a3c2, 0x1bed3e00, 0xdd6077c, 0x1bfe4de0, 0x1e8bcabc, 0x56250d4, 0x1fe9ed91, 0x39a9269, 0xb1e25b3, 0x9c7853b, 0x6610f1b, 0x1f0320aa, 0x7a0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x78af703ab8c11ec4, 0xf6d6357af02d9c96, 0xbed3e00114d1e107, 0xe6ff93781bac0ef9, 0xb2256250d4f45e55, 0x12d98e6a49a7fd3d, 0xc21e369c7853b58f, 0xe9f507c0c82a8c} +#else +{0x15ee07571823d89, 0x357af02d9c9678, 0x8a68f083fb6b, 0x6eb03be6fb4f8, 0x9e8bcabcdff26f, 0x7fd3db2256250d, 0x1ac7896cc73524d, 0x330878da71e14e, 0x23ea0f819055} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1227, 0x1240, 0x423, 0xd84, 0x1dc1, 0x982, 0x1cb3, 0x14e1, 0x16eb, 0x1409, 0xf49, 0xec8, 0x888, 0xe0b, 0x1c45, 0x176, 0x49e, 0x1d40, 0x1e6b, 0x7a3, 0xfba, 0x175f, 0x1908, 0xb88, 0x168c, 0x1324, 0x159f, 0x1077, 0xac3, 0x10b4, 0x478, 0x240, 0x1682, 0x14f, 0x1599, 0x152f, 0x1197, 0xad5, 0x133} +#elif RADIX == 32 +{0x91396c4, 0x8423920, 0xbb82d8, 0x70f2cd3, 0x1409b75d, 0x1d90f49, 0x2b82d11, 0x9e0bb71, 0x1cd7d402, 0x1bee8f47, 0x8c845d7, 0x4968c5c, 0x1deb3f3, 0x85a2b0e, 0x424023c, 0x6429f68, 0xcbd4beb, 0xac} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x82d84211c90244e5, 0x26dd74e1e59a60bb, 0x2b82d110ec87a4d0, 0x3f35f50093c176e2, 0x8b88c845d7df747a, 0x1587077acfcc92d1, 0x853ed0424023c42d, 0x12ab5632f52facc} +#else +{0x1b08423920489cb, 0x174e1e59a60bb82, 0x887643d268136e, 0x24f05db88ae0b4, 0xfbee8f47e6bea0, 0xc92d18b88c845d, 0x2168ac383bd67e, 0x13214fb4109008f, 0xa56ac65ea5f5} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1544, 0x1dea, 0x162d, 0x73d, 0x6d1, 0x1511, 0x5f2, 0x275, 0x1aff, 0x1c7, 0x1d84, 0x1875, 0x10df, 0x2e0, 0x70b, 0x9eb, 0x897, 0xf0f, 0xa5d, 0xf38, 0x108c, 0x1c12, 0x1649, 0x1849, 0x9b8, 0x2bc, 0x1b0, 0xd0e, 0xfdb, 0x8ee, 0x1b0b, 0x1fdc, 0xc1, 0x1771, 0x1776, 0xa12, 0x1392, 0xd10, 0x618} +#elif RADIX == 32 +{0xaa27395, 0x1b62def5, 0x44da273, 0x13a97caa, 0x1c7d7f8, 0x1f0ebd84, 0x58b821b, 0x974f59c, 0x14baf0f4, 0x14231e70, 0x9b24f04, 0x1789b8c2, 0x14383602, 0x14773f6d, 0x3fdcd85, 0x1daee20c, 0x1c9284ae, 0xd04} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa273db16f7aaa89c, 0x1f5fe2752f95444d, 0x58b821bf875ec207, 0x852ebc3d12e9eb38, 0x1849b24f04a118f3, 0x9fb6d0e0d80af137, 0x5dc4183fdcd85a3b, 0x183442724a12bbb} +#else +{0xe7b62def555139, 0x1e2752f95444da2, 0xdfc3af61038faf, 0x144ba7ace162e08, 0x94231e70a5d787, 0xaf1371849b24f0, 0xd1dcfdb68706c0, 0xed771060ff7361, 0x156884e494257} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1756, 0x1187, 0x608, 0x637, 0x5c5, 0x459, 0x12f2, 0x9a1, 0x314, 0xe7f, 0x1c73, 0x27f, 0xa8d, 0x17f8, 0x1e33, 0x1878, 0x1c21, 0x123b, 0xb76, 0x7ea, 0x157, 0x16b4, 0xad7, 0x413, 0x56e, 0x4f3, 0x881, 0x1319, 0x1cc3, 0x1813, 0x1575, 0x1f0, 0x13f9, 0x1ef4, 0x8ae, 0x17c8, 0xd48, 0x157d, 0x5ea} +#elif RADIX == 32 +{0x1bab7032, 0xe6088c3, 0x164b8a63, 0xd0cbc88, 0xe7f18a2, 0x144ffc73, 0x19dfe151, 0x21c3c78, 0x16ed23be, 0x55cfd4, 0x1356bdad, 0x1e656e20, 0xc651024, 0x1c09f30e, 0x121f0aba, 0xbbde93f, 0xa45f211, 0x8eb} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x8a637304461eeadc, 0xfc6289a19791164b, 0x9dfe151a27fe39b9, 0xa5bb48ef843878f1, 0xc41356bdad02ae7e, 0xf98731944093ccad, 0x7bd27f21f0abae04, 0x155f5a917c8457} +#else +{0xc6e6088c3dd5b8, 0x89a19791164b8a, 0x8d13ff1cdcfe31, 0x1e10e1e3c677f85, 0x1a055cfd4b7691d, 0x13ccadc41356bda, 0x17027cc398ca204, 0x15def49fc87c2ae, 0x2abeb522f908} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xbba, 0x1eb6, 0x49a, 0x12a5, 0x12d2, 0x30a, 0x172f, 0x174d, 0x1231, 0x1036, 0x122e, 0x158, 0x743, 0xf10, 0x1e52, 0x18c7, 0x152e, 0x13b1, 0x7ae, 0x128d, 0x9c4, 0x848, 0x4, 0x1e64, 0x1e6f, 0x10ca, 0x3d4, 0x164, 0x1c8, 0x3e2, 0x4e8, 0x27b, 0x1d32, 0x1cc2, 0x1c60, 0x7a8, 0x13df, 0x1f6b, 0x6ad} +#elif RADIX == 32 +{0x5dd7eaa, 0xa49af5b, 0x2a5a52a, 0x1a6dcbc6, 0x1036918d, 0xc2b122e, 0x93c40e8, 0x12ec63f9, 0xf5d3b1a, 0x271251a, 0x4002212, 0x195e6ff3, 0x5907a90, 0x1f10720, 0x427b274, 0x183985d3, 0x1ef9ea38, 0x45c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xa52a524d7ad9775f, 0xda46374db978c2a5, 0x93c40e8615891740, 0xd3d74ec6a5d8c7f2, 0xfe64002212138928, 0x83901641ea432bcd, 0x730ba6427b2740f8, 0x11fdae7be7a8e30} +#else +{0x54a49af5b2eebf, 0x374db978c2a5a5, 0x1430ac48ba06d23, 0x1a97631fca4f103, 0x4271251a7ae9d8, 0x32bcdfe6400221, 0x7c41c80b20f52, 0xc1cc2e9909ec9d, 0x8fb5cf7cf51c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1d5e, 0x18e6, 0xc97, 0x1db2, 0x9df, 0x19d3, 0x1564, 0x1a3a, 0x90, 0xea5, 0xd74, 0x19fc, 0xf84, 0xadd, 0x2e5, 0x10bb, 0x183f, 0x1334, 0xa50, 0x54b, 0xd22, 0x1295, 0xf11, 0xfa1, 0x1810, 0xa3, 0xa81, 0x1026, 0x2b2, 0x19ee, 0x1a4a, 0xf8a, 0xfb3, 0x1463, 0x19c5, 0x42c, 0x830, 0x562, 0x3db} +#elif RADIX == 32 +{0xeaf491f, 0x4c97c73, 0x14d3bfdb, 0x11d55933, 0xea50486, 0x133f8d74, 0x12ab75f0, 0x3f85d8b, 0x14a1334c, 0xb488a96, 0x1788ca5, 0x1478107d, 0x995020, 0xcf70aca, 0x6f8ad25, 0x1168c6fb, 0x1810b33, 0x892} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xbfdb264be39babd2, 0x94121a3aab2674d3, 0x2ab75f099fc6ba3a, 0xb5284cd307f0bb17, 0xfa1788ca55a4454, 0x8565026540828f02, 0xd18df66f8ad2567b, 0x7958906042cce2} +#else +{0x1b64c97c73757a4, 0x1a3aab2674d3bf, 0x184cfe35d1d4a09, 0xc1fc2ec5caadd7, 0xab488a96a5099a, 0x28f020fa1788ca, 0xb3dc2b28132a04, 0x18b4637d9be2b49, 0xf2b120c08599} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5fd3,0xc1bb,0x3527,0x289e,0x97fd,0xf5ce,0xa8e1,0xfbf2,0x8f04,0xb5e7,0xdf66,0xcb44,0x5b5,0x8314,0x31c,0x6e5c,0xa6b9,0x3134,0x3d19,0x5ea9,0x860d,0x37fe,0x8003,0xafb9,0xbfdd,0xf377,0xa36d,0xde5a,0xa9df,0x8da,0xc872,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1bb5fd3,0x289e3527,0xf5ce97fd,0xfbf2a8e1,0xb5e78f04,0xcb44df66,0x831405b5,0x6e5c031c,0x3134a6b9,0x5ea93d19,0x37fe860d,0xafb98003,0xf377bfdd,0xde5aa36d,0x8daa9df,0xbc872}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x289e3527c1bb5fd3,0xfbf2a8e1f5ce97fd,0xcb44df66b5e78f04,0x6e5c031c831405b5,0x5ea93d193134a6b9,0xafb9800337fe860d,0xde5aa36df377bfdd,0xbc87208daa9df}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb354,0x6a4f,0xd461,0xf7db,0x4aec,0x6786,0xff6,0xb274,0xfcf4,0x66d,0x97e9,0x277e,0x5e43,0x68a3,0xb1fa,0x6062,0xa56a,0x8c2b,0x67ed,0xd926,0x444a,0x4883,0x5bc5,0x8084,0x1f0a,0x209e,0x3b85,0x4eb6,0x14fe,0xb973,0xb05c,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6a4fb354,0xf7dbd461,0x67864aec,0xb2740ff6,0x66dfcf4,0x277e97e9,0x68a35e43,0x6062b1fa,0x8c2ba56a,0xd92667ed,0x4883444a,0x80845bc5,0x209e1f0a,0x4eb63b85,0xb97314fe,0xab05c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7dbd4616a4fb354,0xb2740ff667864aec,0x277e97e9066dfcf4,0x6062b1fa68a35e43,0xd92667ed8c2ba56a,0x80845bc54883444a,0x4eb63b85209e1f0a,0xab05cb97314fe}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9c41,0x213b,0x2271,0x4d2a,0xca4c,0x987c,0xf3fd,0x8462,0x84ba,0x5504,0xf930,0x5ca1,0xb075,0x84d2,0xb16,0x1bc1,0xe1ac,0xfeb5,0xe84e,0x4bb0,0xf6b6,0x57b6,0x3d98,0x97f4,0xda24,0x9866,0x1aae,0xb84,0x36ec,0xfcb7,0x4a2d,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x213b9c41,0x4d2a2271,0x987cca4c,0x8462f3fd,0x550484ba,0x5ca1f930,0x84d2b075,0x1bc10b16,0xfeb5e1ac,0x4bb0e84e,0x57b6f6b6,0x97f43d98,0x9866da24,0xb841aae,0xfcb736ec,0xf4a2d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4d2a2271213b9c41,0x8462f3fd987cca4c,0x5ca1f930550484ba,0x1bc10b1684d2b075,0x4bb0e84efeb5e1ac,0x97f43d9857b6f6b6,0xb841aae9866da24,0xf4a2dfcb736ec}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa02d,0x3e44,0xcad8,0xd761,0x6802,0xa31,0x571e,0x40d,0x70fb,0x4a18,0x2099,0x34bb,0xfa4a,0x7ceb,0xfce3,0x91a3,0x5946,0xcecb,0xc2e6,0xa156,0x79f2,0xc801,0x7ffc,0x5046,0x4022,0xc88,0x5c92,0x21a5,0x5620,0xf725,0x378d,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3e44a02d,0xd761cad8,0xa316802,0x40d571e,0x4a1870fb,0x34bb2099,0x7cebfa4a,0x91a3fce3,0xcecb5946,0xa156c2e6,0xc80179f2,0x50467ffc,0xc884022,0x21a55c92,0xf7255620,0x4378d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd761cad83e44a02d,0x40d571e0a316802,0x34bb20994a1870fb,0x91a3fce37cebfa4a,0xa156c2e6cecb5946,0x50467ffcc80179f2,0x21a55c920c884022,0x4378df7255620}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x718a,0xe24a,0xae5,0xa4d6,0xd401,0xf453,0x9f91,0x69ce,0x7d19,0xfa11,0x9273,0x4e63,0xf33a,0xde49,0xe08f,0x746a,0x243d,0x52bb,0x43b6,0xe4c,0x1bdd,0x380d,0xdf64,0x74fe,0x4dfa,0x584f,0xa4d6,0xd71b,0xf067,0xf070,0x717e,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe24a718a,0xa4d60ae5,0xf453d401,0x69ce9f91,0xfa117d19,0x4e639273,0xde49f33a,0x746ae08f,0x52bb243d,0xe4c43b6,0x380d1bdd,0x74fedf64,0x584f4dfa,0xd71ba4d6,0xf070f067,0xf717e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa4d60ae5e24a718a,0x69ce9f91f453d401,0x4e639273fa117d19,0x746ae08fde49f33a,0xe4c43b652bb243d,0x74fedf64380d1bdd,0xd71ba4d6584f4dfa,0xf717ef070f067}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d93,0x7845,0xd1d0,0xe045,0xfa74,0x6b6,0x9400,0xad36,0x4e68,0xd3f6,0x9b00,0x7ca0,0xab22,0xfac,0x1fb6,0xb42f,0x57db,0xb2e3,0xbc5b,0x2b2d,0x94fa,0xc77e,0x34e2,0x2918,0x6ce9,0xf9dd,0x68cf,0xd4a2,0xbc59,0x6050,0xda60,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x78455d93,0xe045d1d0,0x6b6fa74,0xad369400,0xd3f64e68,0x7ca09b00,0xfacab22,0xb42f1fb6,0xb2e357db,0x2b2dbc5b,0xc77e94fa,0x291834e2,0xf9dd6ce9,0xd4a268cf,0x6050bc59,0x5da60}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe045d1d078455d93,0xad36940006b6fa74,0x7ca09b00d3f64e68,0xb42f1fb60facab22,0x2b2dbc5bb2e357db,0x291834e2c77e94fa,0xd4a268cff9dd6ce9,0x5da606050bc59}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6dc,0x5d39,0xac2b,0x2d81,0xc9b8,0xf398,0xdab5,0x8e30,0xb3b2,0x1b25,0x7102,0x8cd2,0x952e,0x7c35,0xb4f3,0x52b8,0x5789,0xb877,0x6906,0x8d31,0x98a6,0x8a10,0x2b3,0x1667,0x856,0xa935,0xfc76,0xc8ec,0x6044,0x9148,0x4f02,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d39c6dc,0x2d81ac2b,0xf398c9b8,0x8e30dab5,0x1b25b3b2,0x8cd27102,0x7c35952e,0x52b8b4f3,0xb8775789,0x8d316906,0x8a1098a6,0x166702b3,0xa9350856,0xc8ecfc76,0x91486044,0xd4f02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d81ac2b5d39c6dc,0x8e30dab5f398c9b8,0x8cd271021b25b3b2,0x52b8b4f37c35952e,0x8d316906b8775789,0x166702b38a1098a6,0xc8ecfc76a9350856,0xd4f0291486044}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x8e76,0x1db5,0xf51a,0x5b29,0x2bfe,0xbac,0x606e,0x9631,0x82e6,0x5ee,0x6d8c,0xb19c,0xcc5,0x21b6,0x1f70,0x8b95,0xdbc2,0xad44,0xbc49,0xf1b3,0xe422,0xc7f2,0x209b,0x8b01,0xb205,0xa7b0,0x5b29,0x28e4,0xf98,0xf8f,0x8e81}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1db58e76,0x5b29f51a,0xbac2bfe,0x9631606e,0x5ee82e6,0xb19c6d8c,0x21b60cc5,0x8b951f70,0xad44dbc2,0xf1b3bc49,0xc7f2e422,0x8b01209b,0xa7b0b205,0x28e45b29,0xf8f0f98,0x8e81}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5b29f51a1db58e76,0x9631606e0bac2bfe,0xb19c6d8c05ee82e6,0x8b951f7021b60cc5,0xf1b3bc49ad44dbc2,0x8b01209bc7f2e422,0x28e45b29a7b0b205,0x8e810f8f0f98}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0xca6, 0xc89, 0x411, 0x17e9, 0x188d, 0xad5, 0x8de, 0x403, 0x183a, 0x1e0d, 0x28c, 0xfdc, 0xbcc, 0xd0, 0xa3a, 0xb2e, 0x140c, 0x11b4, 0x4bb, 0x10da, 0xb22, 0x1a4c, 0xbfa, 0xbd5, 0xae6, 0xeff, 0x465, 0x8df, 0xcf9, 0x1c4a, 0x1807, 0x1462, 0xead, 0x1c43, 0x12a3, 0x1ec2, 0x1e12, 0xad0, 0x1cd} +#elif RADIX == 32 +{0x1653222c, 0x12411644, 0x15711b7e, 0x1a3795, 0x1e0dc1d1, 0x11fb828c, 0x1d034179, 0xc59728, 0x9771b4a, 0x2c8a1b4, 0x155fd693, 0x1feae65e, 0x37c8cae, 0x1e2533e5, 0x1b462c03, 0x8f886ea, 0x1097b0a5, 0x487} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1b7e9208b22594c8, 0x3707440346f2b571, 0xd0341798fdc14678, 0xa25dc6d2818b2e51, 0xcbd55fd69316450d, 0x99f28df232bbfd5c, 0xf10dd5b462c03f12, 0xeab43c25ec2951} +#else +{0xfd2411644b2991, 0x1440346f2b5711b, 0x1cc7ee0a33c1b83, 0xa062cb94740d05, 0x62c8a1b44bb8da, 0x1bfd5ccbd55fd69, 0x1f894cf946f9195, 0x147c43756d18b00, 0x2568784bd852} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0xb2b, 0xb22, 0x904, 0xdfa, 0xe23, 0x12b5, 0x1a37, 0x1100, 0xe0e, 0x783, 0xa3, 0x3f7, 0x2f3, 0x1034, 0x128e, 0x2cb, 0x503, 0x1c6d, 0x112e, 0x1436, 0x2c8, 0x1693, 0xafe, 0x12f5, 0x1ab9, 0xbbf, 0x1919, 0xa37, 0x133e, 0x1f12, 0x1601, 0xd18, 0x1bab, 0x1f10, 0x14a8, 0x17b0, 0x784, 0xab4, 0x653} +#elif RADIX == 32 +{0x595f7f3, 0x14904591, 0xd5c46df, 0x8068de5, 0x7837074, 0xc7ee0a3, 0x740d05e, 0x103165ca, 0x25dc6d2, 0x18b2286d, 0x1557f5a4, 0x17fab997, 0x8df232b, 0x1f894cf9, 0x16d18b00, 0xa3e21ba, 0x1c25ec29, 0x521} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x46dfa4822c89657d, 0xdc1d100d1bcad5c, 0x740d05e63f70519e, 0x689771b4a062cb94, 0x32f557f5a4c59143, 0xa67ca37c8caeff57, 0x7c43756d18b00fc4, 0x1aaad0f097b0a54} +#else +{0x1bf49045912cafb, 0x1d100d1bcad5c46, 0xf31fb828cf06e0, 0x12818b2e51d0341, 0x98b2286d12ee36, 0xeff5732f557f5a, 0x7e2533e51be465, 0x151f10dd5b462c0, 0x1a55a1e12f614} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x517, 0x18a8, 0x1a92, 0x94f, 0x1bb0, 0xf2c, 0x43, 0x5a8, 0x1463, 0x1b4b, 0x1a1c, 0x1c0e, 0x148a, 0x7f5, 0x6a3, 0x820, 0x1fc7, 0x141c, 0x1c2b, 0xd98, 0x48c, 0x587, 0x1b23, 0x1fb5, 0x4c0, 0x179c, 0x169e, 0x1927, 0x16b8, 0x1beb, 0x6bb, 0x1923, 0x2b7, 0x146d, 0x32b, 0xd85, 0x1a89, 0x1fb0, 0x2be} +#elif RADIX == 32 +{0x28bb412, 0x1fa92c54, 0xb376094, 0xd4010de, 0x1b4ba319, 0xb81da1c, 0x119fd691, 0x1c74101a, 0x185741cf, 0x19231b31, 0x15d91961, 0x1384c0fd, 0x49ed3d7, 0x1df5dae3, 0xf92335d, 0xae8da2b, 0x144b6146, 0xa86} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6094fd4962a0a2ed, 0x2e8c65a8021bcb37, 0x19fd6915c0ed0e6d, 0x8e15d073f8e82035, 0x1fb5d91961c918d9, 0xed71927b4f5e7098, 0xd1b456f92335defa, 0x7ec3512d85195} +#else +{0x129fa92c54145da, 0x65a8021bcb3760, 0x8ae07687369746, 0xfe3a080d467f5a, 0x39231b31c2ba0e, 0x1e70981fb5d9196, 0xf7d76b8c93da7a, 0x5746d15be48cd7, 0xfd86a25b0a3} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x147b, 0x14f1, 0xfdd, 0xb2a, 0xff7, 0x1426, 0xce1, 0x19a8, 0x1bf3, 0xbdd, 0x16dd, 0x1339, 0x10dd, 0x8f4, 0x1d29, 0x1b05, 0x1ee, 0x187b, 0x118a, 0x1e55, 0xcde, 0x1a18, 0x1b1f, 0x1648, 0x1c75, 0x1db8, 0xa2a, 0x1ab6, 0x1fa, 0xb0a, 0x1bdf, 0x1d18, 0x1a98, 0x12d9, 0x13df, 0x6e0, 0xa3c, 0x537, 0x345} +#elif RADIX == 32 +{0x1a3dbe03, 0x14fdda78, 0x99feeb2, 0xd433868, 0xbdddf9e, 0x166736dd, 0x14a3d21b, 0x1eed82f4, 0x31587b0, 0x337bcab, 0x8d8fe86, 0x171c75b2, 0xad9455d, 0x158507eb, 0x11d18def, 0x17e5b3a9, 0x11e1b827, 0x13a} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xeeb2a7eed3c68f6f, 0x777e79a8670d099f, 0x4a3d21bb339b6eaf, 0x58c561ec3ddb05e9, 0xb648d8fe8619bde5, 0x83f5ab651576e38e, 0xcb67531d18defac2, 0xd94dd4786e09ef} +#else +{0x1654fdda78d1edf, 0x79a8670d099fee, 0xdd99cdb757bbbf, 0x10f76c17a528f48, 0xc337bcab18ac3d, 0x16e38eb648d8fe8, 0x1d6141fad5b28ab, 0x1bf2d9d4c74637b, 0x29ba8f0dc13} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xc4b, 0x1f6e, 0xcba, 0x1a23, 0x8a1, 0x7c3, 0x1a45, 0x1ca3, 0x6a9, 0x643, 0x3b, 0xc83, 0x208, 0x21a, 0xd43, 0x1805, 0x1078, 0x9af, 0x80a, 0x1555, 0x50d, 0x1eb8, 0xa49, 0x161c, 0x1eee, 0xe1b, 0xf4b, 0x9de, 0x117e, 0x14f8, 0xea7, 0xd18, 0x112a, 0x1a38, 0x1cc7, 0x1c36, 0xe5, 0x10fa, 0x411} +#elif RADIX == 32 +{0x625cd26, 0x6cbafb7, 0x10d143a2, 0x51e914f, 0x643354f, 0x190603b, 0x1886841, 0x78c02b5, 0x10149af8, 0x1436aaa, 0x1c524fae, 0x37eeeb0, 0x779e96e, 0x1a7c45f9, 0x14d18753, 0x11f47112, 0x72f0db9, 0x6d0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x43a2365d7db98973, 0xcd53ca3d229f0d1, 0x18868410c8301d99, 0x540526be0f18056a, 0xd61c524fae0a1b55, 0x22fc9de7a5b86fdd, 0xe8e2254d18753d3e, 0x7c3e81cbc36e63} +#else +{0x1446cbafb7312e6, 0x13ca3d229f0d143, 0x864180ecc866a, 0x183c6015a8621a1, 0x1c1436aaa80a4d7, 0x186fddd61c524fa, 0x1e9f117e4ef3d2d, 0x18fa388953461d4, 0xf87d039786dc} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x9d5, 0x0, 0x181d, 0xced, 0x1fe0, 0x267, 0xc65, 0x1a4d, 0x9e3, 0x1f0c, 0x5d, 0xbae, 0x276, 0x1551, 0x1684, 0x1eab, 0x17f0, 0x1b20, 0xae6, 0xbc3, 0x95, 0x17c3, 0xfd8, 0x1359, 0x3f5, 0x12b6, 0x1410, 0x113, 0x1a19, 0x1c1d, 0xd91, 0x1446, 0x1233, 0x170, 0x1c50, 0x13ac, 0x6eb, 0x926, 0x3bf} +#elif RADIX == 32 +{0x4eac70e, 0x1b81d000, 0x19ffc0ce, 0x126b1944, 0x1f0c4f1e, 0x1975c05d, 0x255444e, 0x1f0f55da, 0x15cdb20b, 0x18255786, 0x197ec5f0, 0x16c3f59a, 0x44e8212, 0x1e0ee864, 0x74466c8, 0x1402e123, 0x175ceb38, 0xc31} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc0cedc0e80013ab1, 0x313c7a4d632899ff, 0x255444ecbae02efc, 0x35736c82fe1eabb4, 0xb3597ec5f0c12abc, 0x7432113a084ad87e, 0x5c24674466c8f07, 0x14a498dd73ace28} +#else +{0x19db81d00027563, 0x7a4d632899ffc0, 0x765d70177e189e, 0xbf87aaed095511, 0x18255786ae6d90, 0xad87eb3597ec5f, 0x783ba19089d042, 0xa0170919d119b2, 0xe4931bae759c} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1997, 0xa5a, 0x4c4, 0x155d, 0x70b, 0x12f, 0xe9d, 0xfe0, 0x147c, 0x9b6, 0x18ea, 0xf41, 0x1636, 0x1707, 0x1a7e, 0x1326, 0x76d, 0xbef, 0x9fe, 0x1bb4, 0xe22, 0x200, 0x1a11, 0x7e6, 0x1709, 0x1be9, 0x1507, 0x1c63, 0xb6f, 0xceb, 0x1b88, 0x1ef6, 0x16b7, 0x20f, 0x1497, 0x1e1c, 0x26e, 0x139d, 0x330} +#elif RADIX == 32 +{0xccbbc7d, 0x1a4c452d, 0xbce1755, 0x1f03a742, 0x9b6a3e3, 0x19e838ea, 0x1f5c1ec6, 0x16d99369, 0x13fcbef3, 0x388b768, 0x6d08880, 0x1d37093f, 0x118ea0fb, 0x675adbf, 0xfef6dc4, 0x5c41f6b, 0x13778729, 0x568} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x1755d262296b32ef, 0xda8f8fe074e84bce, 0xf5c1ec6cf41c7526, 0x44ff2fbcedb326d3, 0x27e6d088801c45bb, 0xd6dfc63a83efa6e1, 0x883ed6fef6dc433a, 0x34e744dde1ca4b} +#else +{0xaba4c452d665de, 0x18fe074e84bce17, 0x367a0e3a936d47, 0x13b6cc9b4fd707b, 0x388b7689fe5f7, 0xfa6e127e6d0888, 0x19d6b6fe31d41f, 0x12e20fb5bfbdb71, 0x69ce89bbc394} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1bf, 0x197b, 0x1b4, 0x1a8a, 0xd22, 0x1cb5, 0x298, 0x76b, 0x16b6, 0x5aa, 0x54b, 0x1b63, 0x1d59, 0x2dc, 0xfe1, 0x1b24, 0x1725, 0x9a8, 0x2dd, 0x150f, 0x12de, 0x9d9, 0x2fd, 0x95f, 0xcc1, 0x1ffd, 0x101b, 0x707, 0x1d9d, 0x464, 0x39e, 0x97b, 0x8cf, 0x4a5, 0xed1, 0x9c3, 0x1b66, 0x1521, 0x112} +#elif RADIX == 32 +{0x10df9458, 0x141b4cbd, 0xd5a45a8, 0x1b58a639, 0x5aab5b1, 0x76c654b, 0x108b73ab, 0x125d923f, 0x5ba9a8b, 0xcb7aa1e, 0x1f17ea76, 0x1facc14a, 0x1c1e037f, 0x2327674, 0x1e97b1cf, 0x14494a8c, 0x1b3270dd, 0x50e} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x45a8a0da65ec37e5, 0xaad6c76b14c72d5a, 0x8b73ab3b632a596, 0xf16ea6a2e4bb247f, 0x295f17ea7665bd50, 0x3b3a70780dfff598, 0x929519e97b1cf119, 0x254876cc9c3768} +#else +{0x15141b4cbd86fca, 0xc76b14c72d5a45, 0x159db1952cb556b, 0xb92ec91fc22dce, 0xccb7aa1e2dd4d4, 0x1ff598295f17ea7, 0x188c9d9d383c06f, 0x1a24a5467a5ec73, 0x4a90ed99386e} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe7eb,0x27c8,0x739b,0x6eaa,0x7a17,0xf593,0xac1c,0x4a84,0x1a27,0x7771,0xe67e,0xea3d,0x4596,0xa34b,0x8edd,0xc51c,0x7c15,0xd1a1,0x2551,0x481b,0x402e,0xfed0,0x8b82,0x1eab,0xc98b,0x20fa,0x7143,0x6abf,0x463a,0x475f,0x510f,0x9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27c8e7eb,0x6eaa739b,0xf5937a17,0x4a84ac1c,0x77711a27,0xea3de67e,0xa34b4596,0xc51c8edd,0xd1a17c15,0x481b2551,0xfed0402e,0x1eab8b82,0x20fac98b,0x6abf7143,0x475f463a,0x9510f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6eaa739b27c8e7eb,0x4a84ac1cf5937a17,0xea3de67e77711a27,0xc51c8edda34b4596,0x481b2551d1a17c15,0x1eab8b82fed0402e,0x6abf714320fac98b,0x9510f475f463a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e28,0x9e31,0xdab6,0x138c,0xc3c0,0x5193,0x444d,0xb2b7,0xf371,0x5630,0xb08b,0xc700,0x2404,0x3f08,0xc3f,0xbd7c,0x963b,0xd892,0x7bb2,0x429d,0x19d8,0xf277,0x853d,0x9aac,0x9bfa,0x42cd,0xf5e8,0x9e40,0x8a41,0x15a8,0x9c23,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e319e28,0x138cdab6,0x5193c3c0,0xb2b7444d,0x5630f371,0xc700b08b,0x3f082404,0xbd7c0c3f,0xd892963b,0x429d7bb2,0xf27719d8,0x9aac853d,0x42cd9bfa,0x9e40f5e8,0x15a88a41,0x69c23}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x138cdab69e319e28,0xb2b7444d5193c3c0,0xc700b08b5630f371,0xbd7c0c3f3f082404,0x429d7bb2d892963b,0x9aac853df27719d8,0x9e40f5e842cd9bfa,0x69c2315a88a41}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x66d1,0x8ee,0x9219,0x9d61,0x13a4,0xfc63,0xc3ee,0xdf2a,0x1353,0x2ef,0xc391,0x8ad8,0x953b,0xb014,0x1029,0xa4b2,0x61a3,0xfc07,0xf3a8,0x199c,0xe6c8,0x6a41,0x6eb7,0xb459,0xa187,0x2f4e,0x9ec3,0x8b4e,0x5321,0x38b,0x5b21,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8ee66d1,0x9d619219,0xfc6313a4,0xdf2ac3ee,0x2ef1353,0x8ad8c391,0xb014953b,0xa4b21029,0xfc0761a3,0x199cf3a8,0x6a41e6c8,0xb4596eb7,0x2f4ea187,0x8b4e9ec3,0x38b5321,0x35b21}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d61921908ee66d1,0xdf2ac3eefc6313a4,0x8ad8c39102ef1353,0xa4b21029b014953b,0x199cf3a8fc0761a3,0xb4596eb76a41e6c8,0x8b4e9ec32f4ea187,0x35b21038b5321}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1815,0xd837,0x8c64,0x9155,0x85e8,0xa6c,0x53e3,0xb57b,0xe5d8,0x888e,0x1981,0x15c2,0xba69,0x5cb4,0x7122,0x3ae3,0x83ea,0x2e5e,0xdaae,0xb7e4,0xbfd1,0x12f,0x747d,0xe154,0x3674,0xdf05,0x8ebc,0x9540,0xb9c5,0xb8a0,0xaef0,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8371815,0x91558c64,0xa6c85e8,0xb57b53e3,0x888ee5d8,0x15c21981,0x5cb4ba69,0x3ae37122,0x2e5e83ea,0xb7e4daae,0x12fbfd1,0xe154747d,0xdf053674,0x95408ebc,0xb8a0b9c5,0x6aef0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91558c64d8371815,0xb57b53e30a6c85e8,0x15c21981888ee5d8,0x3ae371225cb4ba69,0xb7e4daae2e5e83ea,0xe154747d012fbfd1,0x95408ebcdf053674,0x6aef0b8a0b9c5}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb83a,0x5e7a,0x2c9b,0xd483,0xeff9,0x71e9,0x4a21,0x2eae,0x921,0xbb26,0x6bf2,0xb038,0xeac9,0xc05a,0xd498,0x34fb,0x7ca,0xaae9,0x2674,0x81de,0x471f,0x7dbe,0x88c9,0xa354,0x9f03,0x5301,0x9acc,0x7c82,0xc479,0x732,0xdc7b,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5e7ab83a,0xd4832c9b,0x71e9eff9,0x2eae4a21,0xbb260921,0xb0386bf2,0xc05aeac9,0x34fbd498,0xaae907ca,0x81de2674,0x7dbe471f,0xa35488c9,0x53019f03,0x7c829acc,0x732c479,0x8dc7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4832c9b5e7ab83a,0x2eae4a2171e9eff9,0xb0386bf2bb260921,0x34fbd498c05aeac9,0x81de2674aae907ca,0xa35488c97dbe471f,0x7c829acc53019f03,0x8dc7b0732c479}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x4733,0xaeba,0xf3d4,0x84bf,0x453a,0xa71a,0xe0fa,0x4604,0xf02b,0x9bc2,0xb114,0x5fc5,0x5f8d,0x1a8d,0x2302,0x175d,0x3655,0x8351,0x51b,0x698c,0xc745,0x8c83,0xdd6a,0xdd4b,0x682f,0x80b7,0xd1fc,0xe320,0xca30,0xc1d3,0xc365}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaeba4733,0x84bff3d4,0xa71a453a,0x4604e0fa,0x9bc2f02b,0x5fc5b114,0x1a8d5f8d,0x175d2302,0x83513655,0x698c051b,0x8c83c745,0xdd4bdd6a,0x80b7682f,0xe320d1fc,0xc1d3ca30,0xc365}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x84bff3d4aeba4733,0x4604e0faa71a453a,0x5fc5b1149bc2f02b,0x175d23021a8d5f8d,0x698c051b83513655,0xdd4bdd6a8c83c745,0xe320d1fc80b7682f,0xc365c1d3ca30}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe32c,0x5173,0xdcb0,0xe05d,0x3a7e,0x6e8c,0xfd38,0xbed7,0x5fe0,0xa986,0x26f1,0xedf0,0x8fc7,0x1dbc,0xa48e,0x2e70,0x6648,0xe767,0xe8c3,0xf05b,0x26aa,0x63b6,0xf8f6,0x5304,0x7042,0x7c93,0x54a2,0xe675,0xd3ea,0x2b1,0xb36e,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5173e32c,0xe05ddcb0,0x6e8c3a7e,0xbed7fd38,0xa9865fe0,0xedf026f1,0x1dbc8fc7,0x2e70a48e,0xe7676648,0xf05be8c3,0x63b626aa,0x5304f8f6,0x7c937042,0xe67554a2,0x2b1d3ea,0x8b36e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe05ddcb05173e32c,0xbed7fd386e8c3a7e,0xedf026f1a9865fe0,0x2e70a48e1dbc8fc7,0xf05be8c3e7676648,0x5304f8f663b626aa,0xe67554a27c937042,0x8b36e02b1d3ea}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x47c6,0xa185,0xd364,0x2b7c,0x1006,0x8e16,0xb5de,0xd151,0xf6de,0x44d9,0x940d,0x4fc7,0x1536,0x3fa5,0x2b67,0xcb04,0xf835,0x5516,0xd98b,0x7e21,0xb8e0,0x8241,0x7736,0x5cab,0x60fc,0xacfe,0x6533,0x837d,0x3b86,0xf8cd,0x2384,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa18547c6,0x2b7cd364,0x8e161006,0xd151b5de,0x44d9f6de,0x4fc7940d,0x3fa51536,0xcb042b67,0x5516f835,0x7e21d98b,0x8241b8e0,0x5cab7736,0xacfe60fc,0x837d6533,0xf8cd3b86,0x72384}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b7cd364a18547c6,0xd151b5de8e161006,0x4fc7940d44d9f6de,0xcb042b673fa51536,0x7e21d98b5516f835,0x5cab77368241b8e0,0x837d6533acfe60fc,0x72384f8cd3b86}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x116d, 0x165f, 0x7d3, 0x488, 0xe08, 0xb12, 0x2e0, 0x18b4, 0xdbb, 0x181e, 0xe50, 0x19b8, 0xc17, 0x1c82, 0x1cf6, 0x7b9, 0xebb, 0x1c0a, 0x183a, 0x1f42, 0x1a3e, 0x176b, 0x19fa, 0x7e4, 0x1646, 0x1dc5, 0x1e9d, 0x4b3, 0x18df, 0xf2d, 0xe2e, 0x2e3, 0x903, 0x19ef, 0x1f94, 0x237, 0x18ef, 0x26c, 0x12f} +#elif RADIX == 32 +{0x18b69673, 0x107d3b2f, 0x49c1048, 0x5a0b816, 0x181e6dde, 0x1f370e50, 0x1b720982, 0xbb3dcf3, 0x1075c0a7, 0x1e8fbe85, 0x4cfd5da, 0x18b6463f, 0x12cfd3bd, 0x796e37c, 0x62e3717, 0x533de90, 0x7788dff, 0x2e6} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x104883e9d97e2da5, 0x79b778b41702c49c, 0xb720982f9b872860, 0x2c1d7029d767b9e7, 0xc7e4cfd5daf47df4, 0x71be4b3f4ef716c8, 0x67bd2062e37173cb, 0x1089b31de237fca} +#else +{0x9107d3b2fc5b4b, 0x178b41702c49c10, 0x17cdc394303cdb, 0x75d9ee79edc826, 0x15e8fbe8583ae05, 0x1716c8c7e4cfd5d, 0x19e5b8df259fa77, 0x1299ef4818b8dc5, 0x613663bc46ff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1c5d, 0x1d97, 0x1f4, 0x122, 0x1382, 0x2c4, 0xb8, 0x1e2d, 0x136e, 0x607, 0x394, 0x1e6e, 0x1305, 0x1720, 0xf3d, 0x19ee, 0x13ae, 0x1702, 0x160e, 0x17d0, 0x1e8f, 0x15da, 0x67e, 0x11f9, 0xd91, 0xf71, 0x1fa7, 0x192c, 0xe37, 0x13cb, 0x1b8b, 0x18b8, 0x1a40, 0x67b, 0x1fe5, 0x188d, 0x63b, 0x189b, 0x47b} +#elif RADIX == 32 +{0x1e2ed505, 0x41f4ecb, 0x11270412, 0x11682e05, 0x6079b77, 0x17cdc394, 0x1edc8260, 0x1aecf73c, 0xc1d7029, 0x17a3efa1, 0x1933f576, 0xe2d918f, 0x4b3f4ef, 0x19e5b8df, 0x18b8dc5, 0x194cf7a4, 0x11de237f, 0x159} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x41220fa765f8bb5, 0x1e6dde2d05c0b127, 0xedc8260be6e1ca18, 0xb075c0a75d9ee79, 0x31f933f576bd1f7d, 0xdc6f92cfd3bdc5b2, 0x99ef4818b8dc5cf2, 0x6e26cc7788dff2} +#else +{0x2441f4ecbf176a, 0x1de2d05c0b12704, 0x105f370e50c0f36, 0x9d767b9e7b7209, 0xd7a3efa160eb81, 0x1dc5b231f933f57, 0xe796e37c967e9d, 0x1ca67bd2062e371, 0xdc4d98ef11bf} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0x51d, 0x1394, 0xcca, 0x1568, 0x1790, 0x11d6, 0x18aa, 0xe65, 0x1e8e, 0x4fe, 0xab9, 0x1496, 0x167d, 0x1b42, 0x1f85, 0x1d7a, 0x8c4, 0x17ea, 0x1269, 0x16, 0x1fbf, 0x8b5, 0x6f4, 0x1202, 0x17c4, 0x427, 0x1273, 0x14f, 0x49c, 0xfba, 0x1b3b, 0x13cd, 0x10ee, 0x634, 0x10ae, 0x2c4, 0x10b4, 0x1377, 0xfe} +#elif RADIX == 32 +{0x28e92dc, 0x10cca9ca, 0x15af2156, 0x132e2aa3, 0x4fef473, 0x1692cab9, 0x2ed0acf, 0xc4ebd7e, 0x4d37ea4, 0xfefc02d, 0x237a22d, 0x4f7c490, 0x53e4e64, 0x17dd1270, 0x1d3cdd9d, 0xb8c690e, 0x5a0b121, 0x1bc} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x215686654e50a3a4, 0xfbd1ce65c55475af, 0x2ed0acfb49655c93, 0x6934dfa9189d7afc, 0x920237a22d7f7e01, 0x893814f939909ef8, 0x18d21dd3cdd9dbee, 0x134dde1682c4857} +#else +{0xad0cca9ca14749, 0x1ce65c55475af21, 0x7da4b2ae49fde8, 0x46275ebf0bb42b, 0x1afefc02d269bf5, 0x109ef8920237a22, 0xdf7449c0a7c9cc, 0x15c6348774f3767, 0xb9bbc2d05890} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x17ab, 0x1e1a, 0x1bfe, 0x1f73, 0x1eb9, 0xf30, 0x1cca, 0x1aaf, 0xbea, 0xa1b, 0xb73, 0x86d, 0x1c13, 0x1c31, 0x1e6e, 0x1fbf, 0x968, 0x10f0, 0xb53, 0x1418, 0x11c6, 0x65f, 0x188, 0x2c7, 0x79b, 0xa9, 0xa92, 0x12b0, 0x1b53, 0x1564, 0xfa7, 0x1fd7, 0xa5b, 0xb32, 0x1bc8, 0xc90, 0x11ee, 0x1f6, 0x3f2} +#elif RADIX == 32 +{0xbd5cad1, 0x7bfef0d, 0xc3d73f7, 0x157f329e, 0xa1b5f56, 0xd0dab73, 0x1770c782, 0x168fdff9, 0x16a70f04, 0x1c71a830, 0x70c4197, 0x15279b16, 0xac15240, 0x1ab26d4e, 0x17fd77d3, 0x121664a5, 0xf732437, 0xa34} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x73f73dff786af572, 0x6d7d5aafe653cc3d, 0x770c782686d5b9a8, 0x85a9c3c12d1fbff3, 0x62c70c4197e38d41, 0x36a72b054902a4f3, 0x2cc94b7fd77d3d59, 0x1307da3dcc90de4} +#else +{0x1ee7bfef0d5eae5, 0x15aafe653cc3d73, 0x13436adcd436be, 0x4b47effcddc31e, 0xfc71a830b53878, 0x2a4f362c70c419, 0x1eac9b539582a48, 0x190b3252dff5df4, 0xb0fb47b9921b} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x166f, 0x4b7, 0x1268, 0x18f5, 0x10a9, 0x17ea, 0x105e, 0x1090, 0x1c31, 0x624, 0xec6, 0xea1, 0x17d2, 0xf55, 0x10d3, 0x8fb, 0x9ab, 0x1ae2, 0x952, 0xcab, 0x100d, 0x702, 0xc4d, 0x1387, 0x344, 0xdaf, 0x1566, 0xf8c, 0x1e1c, 0x6f1, 0x1af9, 0xf1, 0xd6d, 0xa06, 0xb5c, 0x62c, 0x2e9, 0x1131, 0x683} +#elif RADIX == 32 +{0x1b37fb85, 0xb26825b, 0x1aa1538f, 0x48417af, 0x624e18c, 0x9d42ec6, 0x9bd56fa, 0x1ab47dc3, 0x12a5ae24, 0x14035956, 0x76269c0, 0x15e3449c, 0x1e32accd, 0x1378f871, 0x1a0f1d7c, 0x17140cd6, 0x17498b16, 0x608} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x538f593412decdfe, 0x9386309082f5faa1, 0x9bd56fa4ea176318, 0xb4a96b893568fb86, 0x93876269c0a01aca, 0x7c38f8cab336bc68, 0x2819ada0f1d7c9bc, 0x17c4c45d262c5ae} +#else +{0x11eb26825bd9bfd, 0x309082f5faa153, 0x1d2750bb18c49c3, 0x4d5a3ee1a6f55b, 0x14035956952d71, 0x16bc6893876269c, 0x4de3e1c7c65599, 0xb8a066b683c75f, 0x148988ba4c58b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x826, 0x1efe, 0xa95, 0x174d, 0x11b5, 0x1184, 0x1d4, 0x1024, 0x1d44, 0x349, 0x83c, 0x665, 0x4a2, 0x1288, 0x473, 0xa16, 0xe54, 0xafc, 0x6e2, 0x13f1, 0x217, 0x11e4, 0x1988, 0xe26, 0xd9a, 0x168f, 0x3d, 0x1436, 0x311, 0x148d, 0x168f, 0x1ad8, 0x1156, 0xb8, 0x193f, 0x1655, 0x279, 0x5cd, 0x65e} +#elif RADIX == 32 +{0x41378c1, 0x1aa95f7f, 0x1236b74, 0x1207523, 0x349ea24, 0x8cca83c, 0x19ca2094, 0x5450b11, 0xdc4afc7, 0x85e7e2, 0x6cc4479, 0x11ed9a71, 0x10d807b6, 0x1a468c46, 0xdad8b47, 0xfc17115, 0x13cd9572, 0xe8} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x6b74d54afbf904de, 0x27a890240ea46123, 0x9ca2094466541e0d, 0x13712bf1ca8a1623, 0x4e26cc4479042f3f, 0x462343601eda3db3, 0x82e22adad8b47d23, 0x517344f3655c9f} +#else +{0xe9aa95f7f209bc, 0x90240ea461236b, 0xa2332a0f0693d4, 0x72a28588e72882, 0x12085e7e26e257e, 0x1a3db34e26cc447, 0x1e91a311a1b00f6, 0x7e0b88ab6b62d1, 0xa2e689e6cab9} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x1aab, 0xe01, 0x1bf3, 0x122d, 0xd71, 0x34e, 0x153b, 0x1444, 0x1d19, 0x1165, 0x1496, 0x568, 0x12d4, 0x105c, 0x1129, 0x2c7, 0x1706, 0x359, 0x1a4f, 0x114, 0x758, 0x1780, 0x1617, 0x1485, 0x1147, 0xa4f, 0x1f77, 0xf13, 0x1547, 0x103c, 0x352, 0x125d, 0xb1e, 0x1526, 0x1708, 0xfb5, 0x17bf, 0x1d55, 0x6bc} +#elif RADIX == 32 +{0x1d55ffc5, 0x1bbf3700, 0x139ae322, 0x2254ec6, 0x1165e8cd, 0x10ad1496, 0x14c1725a, 0x106163c4, 0x149e359b, 0x1d60229, 0x5b0bde0, 0x9f147a4, 0x1c4feeea, 0x81e551d, 0x1d25d1a9, 0x22a4cb1, 0x1dfbed6e, 0x72d} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xe322ddf9b807557f, 0x97a33444a9d8d39a, 0x4c1725a8568a4b45, 0x4d278d66e0c2c789, 0xf485b0bde00eb011, 0x2a8ef13fbba93e28, 0x549963d25d1a940f, 0x197556f7efb5b84} +#else +{0x45bbf3700eaaff, 0x13444a9d8d39ae3, 0xd42b4525a2cbd1, 0x1b830b1e25305c9, 0x1d60229a4f1ac, 0x93e28f485b0bde, 0xa079547789fddd, 0x1152658f49746a, 0x17eaadefdf6b7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x204, 0x9f6, 0x1dba, 0x110e, 0x6ea, 0x112a, 0xa11, 0xd06, 0x15aa, 0x1f0b, 0xeec, 0xef1, 0x1edc, 0x1604, 0x65b, 0x129, 0x39d, 0x8f8, 0x5d5, 0x672, 0x150a, 0x233, 0xc20, 0x12ba, 0x1855, 0x15a6, 0xd50, 0x1c71, 0x15b7, 0xf04, 0x579, 0x16d2, 0xbac, 0x4c9, 0xaf5, 0x514, 0xf27, 0xef, 0x36a} +#elif RADIX == 32 +{0x10240be, 0x1ddba4fb, 0xa8dd510, 0x8328462, 0x1f0bad53, 0x11de2eec, 0xdd813db, 0x19d09499, 0xbaa8f81, 0x1d428ce4, 0x1a61008c, 0x14d85595, 0x11c5aa15, 0x178256df, 0x196d22bc, 0x1d4992ba, 0x19394515, 0x27b} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd510eedd27d84090, 0x2eb54d06508c4a8d, 0xdd813db8ef17767c, 0x22eaa3e073a12932, 0xb2ba61008cea1467, 0x2b6fc716a8569b0a, 0x93257596d22bcbc1, 0x503bde4e51457a} +#else +{0x21ddba4fb08120, 0x14d06508c4a8dd5, 0xdc778bbb3e175a, 0x1ce84a4cb7604f, 0x19d428ce45d547c, 0x169b0ab2ba61008, 0x5e095b7e38b542, 0x1ea4c95d65b48af, 0xa077bc9ca28a} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2ce5,0xf14f,0xd32e,0x18c2,0x5217,0x2833,0xa7fc,0x4090,0xd9d5,0x4d89,0xe770,0x3801,0xbca0,0x6ac9,0x5432,0x2ee5,0x4356,0x827a,0xdf1a,0x8911,0x4102,0xf05a,0x3e5,0x18b9,0xf66f,0x77ad,0xd99b,0xea2f,0xa030,0x36bb,0xe071,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf14f2ce5,0x18c2d32e,0x28335217,0x4090a7fc,0x4d89d9d5,0x3801e770,0x6ac9bca0,0x2ee55432,0x827a4356,0x8911df1a,0xf05a4102,0x18b903e5,0x77adf66f,0xea2fd99b,0x36bba030,0xde071}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x18c2d32ef14f2ce5,0x4090a7fc28335217,0x3801e7704d89d9d5,0x2ee554326ac9bca0,0x8911df1a827a4356,0x18b903e5f05a4102,0xea2fd99b77adf66f,0xde07136bba030}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x752,0x8c67,0x260,0x14f,0x138a,0x865,0x5e4d,0x4db9,0xca26,0x7a2c,0x4a7e,0x9422,0xb2f6,0xbdc8,0x841d,0x7e33,0x5677,0x38a6,0x9633,0x15fc,0xacce,0xbe04,0xdcef,0xb16c,0xd57b,0x7a5e,0x8395,0x1f8f,0xe4b9,0xe383,0x4be3,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c670752,0x14f0260,0x865138a,0x4db95e4d,0x7a2cca26,0x94224a7e,0xbdc8b2f6,0x7e33841d,0x38a65677,0x15fc9633,0xbe04acce,0xb16cdcef,0x7a5ed57b,0x1f8f8395,0xe383e4b9,0x64be3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14f02608c670752,0x4db95e4d0865138a,0x94224a7e7a2cca26,0x7e33841dbdc8b2f6,0x15fc963338a65677,0xb16cdcefbe04acce,0x1f8f83957a5ed57b,0x64be3e383e4b9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9b63,0x40f2,0x5177,0x535f,0x5098,0x2f26,0x18a2,0x5e1c,0x5d70,0x33e9,0x1db9,0x2397,0xaccc,0xb98b,0xcae6,0x87bf,0xd43f,0xb329,0xf2d0,0x2f67,0x779c,0xd2ab,0x4369,0xc67d,0xd065,0x9550,0xf06b,0x1a2b,0xc6e,0x9b2b,0xbb64,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x40f29b63,0x535f5177,0x2f265098,0x5e1c18a2,0x33e95d70,0x23971db9,0xb98baccc,0x87bfcae6,0xb329d43f,0x2f67f2d0,0xd2ab779c,0xc67d4369,0x9550d065,0x1a2bf06b,0x9b2b0c6e,0x3bb64}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x535f517740f29b63,0x5e1c18a22f265098,0x23971db933e95d70,0x87bfcae6b98baccc,0x2f67f2d0b329d43f,0xc67d4369d2ab779c,0x1a2bf06b9550d065,0x3bb649b2b0c6e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xd31b,0xeb0,0x2cd1,0xe73d,0xade8,0xd7cc,0x5803,0xbf6f,0x262a,0xb276,0x188f,0xc7fe,0x435f,0x9536,0xabcd,0xd11a,0xbca9,0x7d85,0x20e5,0x76ee,0xbefd,0xfa5,0xfc1a,0xe746,0x990,0x8852,0x2664,0x15d0,0x5fcf,0xc944,0x1f8e,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xeb0d31b,0xe73d2cd1,0xd7ccade8,0xbf6f5803,0xb276262a,0xc7fe188f,0x9536435f,0xd11aabcd,0x7d85bca9,0x76ee20e5,0xfa5befd,0xe746fc1a,0x88520990,0x15d02664,0xc9445fcf,0x21f8e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe73d2cd10eb0d31b,0xbf6f5803d7ccade8,0xc7fe188fb276262a,0xd11aabcd9536435f,0x76ee20e57d85bca9,0xe746fc1a0fa5befd,0x15d0266488520990,0x21f8ec9445fcf}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4222,0xe40c,0x843f,0x3518,0x72d1,0xa757,0xb4e5,0x4347,0x3326,0xc267,0x30d,0xb77e,0x9907,0xcb8c,0xd175,0x8cf2,0x5440,0xb876,0x2316,0xa715,0xf0ab,0x9e96,0xa72f,0xcd7f,0x1e06,0xa42f,0x985f,0xdc2d,0xd9ee,0xe71e,0x2ae0,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe40c4222,0x3518843f,0xa75772d1,0x4347b4e5,0xc2673326,0xb77e030d,0xcb8c9907,0x8cf2d175,0xb8765440,0xa7152316,0x9e96f0ab,0xcd7fa72f,0xa42f1e06,0xdc2d985f,0xe71ed9ee,0x82ae0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3518843fe40c4222,0x4347b4e5a75772d1,0xb77e030dc2673326,0x8cf2d175cb8c9907,0xa7152316b8765440,0xcd7fa72f9e96f0ab,0xdc2d985fa42f1e06,0x82ae0e71ed9ee}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x11ac,0x1c90,0x6c62,0x15fd,0x1924,0x5851,0x60c6,0x744c,0x80fd,0xa6b,0x5654,0x51a1,0x6589,0x803f,0xf265,0x4132,0x96d2,0x7497,0xcf0b,0x65,0x2e51,0x2bc,0x4203,0x3aad,0x1f2,0x5b40,0xcc1a,0x67e4,0xdfd3,0xba17,0x7a8c,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1c9011ac,0x15fd6c62,0x58511924,0x744c60c6,0xa6b80fd,0x51a15654,0x803f6589,0x4132f265,0x749796d2,0x65cf0b,0x2bc2e51,0x3aad4203,0x5b4001f2,0x67e4cc1a,0xba17dfd3,0x37a8c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x15fd6c621c9011ac,0x744c60c658511924,0x51a156540a6b80fd,0x4132f265803f6589,0x65cf0b749796d2,0x3aad420302bc2e51,0x67e4cc1a5b4001f2,0x37a8cba17dfd3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x99f9,0x50f4,0xd750,0xb0a2,0xfdaa,0x6986,0x6b4b,0x34be,0x7bd5,0x3974,0xe05,0x8c18,0x6bb8,0xbb5a,0xcc33,0x63b5,0x943b,0xec49,0xb4ef,0xbdc4,0x5a2a,0x2fc8,0x85ad,0x1291,0xa29f,0x9618,0x721b,0x93f6,0xb40f,0x2e85,0xdfbb,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x50f499f9,0xb0a2d750,0x6986fdaa,0x34be6b4b,0x39747bd5,0x8c180e05,0xbb5a6bb8,0x63b5cc33,0xec49943b,0xbdc4b4ef,0x2fc85a2a,0x129185ad,0x9618a29f,0x93f6721b,0x2e85b40f,0xbdfbb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb0a2d75050f499f9,0x34be6b4b6986fdaa,0x8c180e0539747bd5,0x63b5cc33bb5a6bb8,0xbdc4b4efec49943b,0x129185ad2fc85a2a,0x93f6721b9618a29f,0xbdfbb2e85b40f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf4c0,0x4ff5,0x2aee,0x3e90,0x49,0xb2af,0xf257,0x111c,0xead0,0xc1d5,0xc7d9,0x8a7c,0x9579,0xf62,0xe1f6,0xb43c,0x8f3f,0x14ca,0x1b7b,0xc209,0xac8,0xf5cd,0xdfc0,0x5d39,0x9d8d,0x9c9a,0x2e6e,0xba54,0x79d5,0x4f02,0x1cfc,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4ff5f4c0,0x3e902aee,0xb2af0049,0x111cf257,0xc1d5ead0,0x8a7cc7d9,0xf629579,0xb43ce1f6,0x14ca8f3f,0xc2091b7b,0xf5cd0ac8,0x5d39dfc0,0x9c9a9d8d,0xba542e6e,0x4f0279d5,0x21cfc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e902aee4ff5f4c0,0x111cf257b2af0049,0x8a7cc7d9c1d5ead0,0xb43ce1f60f629579,0xc2091b7b14ca8f3f,0x5d39dfc0f5cd0ac8,0xba542e6e9c9a9d8d,0x21cfc4f0279d5}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1eb,0x1730,0x3343,0xcef3,0x2add,0x7615,0x353e,0xd52b,0x9951,0xc1,0x2292,0x69d0,0x4a9f,0xc1bd,0xfec7,0xd332,0x72b7,0x67f8,0xaa27,0x61a4,0x33dd,0x8ec0,0xfe1d,0x9a69,0x38ac,0x60f,0x209b,0xbb33,0x55b1,0x13f5,0x5c80,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x173001eb,0xcef33343,0x76152add,0xd52b353e,0xc19951,0x69d02292,0xc1bd4a9f,0xd332fec7,0x67f872b7,0x61a4aa27,0x8ec033dd,0x9a69fe1d,0x60f38ac,0xbb33209b,0x13f555b1,0xc5c80}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcef33343173001eb,0xd52b353e76152add,0x69d0229200c19951,0xd332fec7c1bd4a9f,0x61a4aa2767f872b7,0x9a69fe1d8ec033dd,0xbb33209b060f38ac,0xc5c8013f555b1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6607,0xaf0b,0x28af,0x4f5d,0x255,0x9679,0x94b4,0xcb41,0x842a,0xc68b,0xf1fa,0x73e7,0x9447,0x44a5,0x33cc,0x9c4a,0x6bc4,0x13b6,0x4b10,0x423b,0xa5d5,0xd037,0x7a52,0xed6e,0x5d60,0x69e7,0x8de4,0x6c09,0x4bf0,0xd17a,0x2044,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaf0b6607,0x4f5d28af,0x96790255,0xcb4194b4,0xc68b842a,0x73e7f1fa,0x44a59447,0x9c4a33cc,0x13b66bc4,0x423b4b10,0xd037a5d5,0xed6e7a52,0x69e75d60,0x6c098de4,0xd17a4bf0,0x42044}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4f5d28afaf0b6607,0xcb4194b496790255,0x73e7f1fac68b842a,0x9c4a33cc44a59447,0x423b4b1013b66bc4,0xed6e7a52d037a5d5,0x6c098de469e75d60,0x42044d17a4bf0}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2ce5,0xf14f,0xd32e,0x18c2,0x5217,0x2833,0xa7fc,0x4090,0xd9d5,0x4d89,0xe770,0x3801,0xbca0,0x6ac9,0x5432,0x2ee5,0x4356,0x827a,0xdf1a,0x8911,0x4102,0xf05a,0x3e5,0x18b9,0xf66f,0x77ad,0xd99b,0xea2f,0xa030,0x36bb,0xe071,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf14f2ce5,0x18c2d32e,0x28335217,0x4090a7fc,0x4d89d9d5,0x3801e770,0x6ac9bca0,0x2ee55432,0x827a4356,0x8911df1a,0xf05a4102,0x18b903e5,0x77adf66f,0xea2fd99b,0x36bba030,0xde071}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x18c2d32ef14f2ce5,0x4090a7fc28335217,0x3801e7704d89d9d5,0x2ee554326ac9bca0,0x8911df1a827a4356,0x18b903e5f05a4102,0xea2fd99b77adf66f,0xde07136bba030}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x752,0x8c67,0x260,0x14f,0x138a,0x865,0x5e4d,0x4db9,0xca26,0x7a2c,0x4a7e,0x9422,0xb2f6,0xbdc8,0x841d,0x7e33,0x5677,0x38a6,0x9633,0x15fc,0xacce,0xbe04,0xdcef,0xb16c,0xd57b,0x7a5e,0x8395,0x1f8f,0xe4b9,0xe383,0x4be3,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c670752,0x14f0260,0x865138a,0x4db95e4d,0x7a2cca26,0x94224a7e,0xbdc8b2f6,0x7e33841d,0x38a65677,0x15fc9633,0xbe04acce,0xb16cdcef,0x7a5ed57b,0x1f8f8395,0xe383e4b9,0x64be3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14f02608c670752,0x4db95e4d0865138a,0x94224a7e7a2cca26,0x7e33841dbdc8b2f6,0x15fc963338a65677,0xb16cdcefbe04acce,0x1f8f83957a5ed57b,0x64be3e383e4b9}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9b63,0x40f2,0x5177,0x535f,0x5098,0x2f26,0x18a2,0x5e1c,0x5d70,0x33e9,0x1db9,0x2397,0xaccc,0xb98b,0xcae6,0x87bf,0xd43f,0xb329,0xf2d0,0x2f67,0x779c,0xd2ab,0x4369,0xc67d,0xd065,0x9550,0xf06b,0x1a2b,0xc6e,0x9b2b,0xbb64,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x40f29b63,0x535f5177,0x2f265098,0x5e1c18a2,0x33e95d70,0x23971db9,0xb98baccc,0x87bfcae6,0xb329d43f,0x2f67f2d0,0xd2ab779c,0xc67d4369,0x9550d065,0x1a2bf06b,0x9b2b0c6e,0x3bb64}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x535f517740f29b63,0x5e1c18a22f265098,0x23971db933e95d70,0x87bfcae6b98baccc,0x2f67f2d0b329d43f,0xc67d4369d2ab779c,0x1a2bf06b9550d065,0x3bb649b2b0c6e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xd31b,0xeb0,0x2cd1,0xe73d,0xade8,0xd7cc,0x5803,0xbf6f,0x262a,0xb276,0x188f,0xc7fe,0x435f,0x9536,0xabcd,0xd11a,0xbca9,0x7d85,0x20e5,0x76ee,0xbefd,0xfa5,0xfc1a,0xe746,0x990,0x8852,0x2664,0x15d0,0x5fcf,0xc944,0x1f8e,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xeb0d31b,0xe73d2cd1,0xd7ccade8,0xbf6f5803,0xb276262a,0xc7fe188f,0x9536435f,0xd11aabcd,0x7d85bca9,0x76ee20e5,0xfa5befd,0xe746fc1a,0x88520990,0x15d02664,0xc9445fcf,0x21f8e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe73d2cd10eb0d31b,0xbf6f5803d7ccade8,0xc7fe188fb276262a,0xd11aabcd9536435f,0x76ee20e57d85bca9,0xe746fc1a0fa5befd,0x15d0266488520990,0x21f8ec9445fcf}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2111,0xf206,0x421f,0x9a8c,0xb968,0xd3ab,0xda72,0x21a3,0x9993,0xe133,0x186,0xdbbf,0x4c83,0xe5c6,0x68ba,0x4679,0x2a20,0x5c3b,0x918b,0xd38a,0x7855,0xcf4b,0xd397,0x66bf,0x8f03,0xd217,0xcc2f,0x6e16,0x6cf7,0x738f,0x1570,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf2062111,0x9a8c421f,0xd3abb968,0x21a3da72,0xe1339993,0xdbbf0186,0xe5c64c83,0x467968ba,0x5c3b2a20,0xd38a918b,0xcf4b7855,0x66bfd397,0xd2178f03,0x6e16cc2f,0x738f6cf7,0x41570}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9a8c421ff2062111,0x21a3da72d3abb968,0xdbbf0186e1339993,0x467968bae5c64c83,0xd38a918b5c3b2a20,0x66bfd397cf4b7855,0x6e16cc2fd2178f03,0x41570738f6cf7}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8d6,0xe48,0xb631,0xafe,0x8c92,0x2c28,0x3063,0xba26,0xc07e,0x535,0xab2a,0xa8d0,0xb2c4,0xc01f,0x7932,0x2099,0xcb69,0xba4b,0xe785,0x8032,0x1728,0x815e,0xa101,0x1d56,0xf9,0x2da0,0x660d,0xb3f2,0xefe9,0x5d0b,0xbd46,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4808d6,0xafeb631,0x2c288c92,0xba263063,0x535c07e,0xa8d0ab2a,0xc01fb2c4,0x20997932,0xba4bcb69,0x8032e785,0x815e1728,0x1d56a101,0x2da000f9,0xb3f2660d,0x5d0befe9,0x1bd46}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafeb6310e4808d6,0xba2630632c288c92,0xa8d0ab2a0535c07e,0x20997932c01fb2c4,0x8032e785ba4bcb69,0x1d56a101815e1728,0xb3f2660d2da000f9,0x1bd465d0befe9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x129d,0xdd4c,0xe2b2,0xca3b,0x6c0b,0x9c8b,0x68f9,0x412,0x51a8,0x7583,0xae25,0xb80d,0x35d5,0x387b,0x4ba1,0x66e1,0x754,0xf6b6,0x3d8c,0x650,0xa955,0x214f,0xc05f,0x16d2,0x9ce4,0x246f,0x123e,0x3ed3,0xa07f,0x2e24,0x8964,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdd4c129d,0xca3be2b2,0x9c8b6c0b,0x41268f9,0x758351a8,0xb80dae25,0x387b35d5,0x66e14ba1,0xf6b60754,0x6503d8c,0x214fa955,0x16d2c05f,0x246f9ce4,0x3ed3123e,0x2e24a07f,0x58964}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xca3be2b2dd4c129d,0x41268f99c8b6c0b,0xb80dae25758351a8,0x66e14ba1387b35d5,0x6503d8cf6b60754,0x16d2c05f214fa955,0x3ed3123e246f9ce4,0x589642e24a07f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e3f,0xbc60,0xa44c,0x253c,0xa75e,0xa9f9,0x326f,0x9f9f,0x14aa,0xa47f,0x3889,0x5ee3,0x87d,0x933f,0x6cba,0x6222,0xcd43,0xa8c9,0xa815,0x992a,0x643a,0xc1d3,0x4cff,0xf675,0xf30b,0x7e2a,0x5248,0xb9e4,0xa454,0x2c53,0x525b,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbc609e3f,0x253ca44c,0xa9f9a75e,0x9f9f326f,0xa47f14aa,0x5ee33889,0x933f087d,0x62226cba,0xa8c9cd43,0x992aa815,0xc1d3643a,0xf6754cff,0x7e2af30b,0xb9e45248,0x2c53a454,0x3525b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x253ca44cbc609e3f,0x9f9f326fa9f9a75e,0x5ee33889a47f14aa,0x62226cba933f087d,0x992aa815a8c9cd43,0xf6754cffc1d3643a,0xb9e452487e2af30b,0x3525b2c53a454}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x584d,0xa517,0xb681,0x45de,0xc2ea,0x7c58,0x123,0xe0fd,0xfd80,0x6c5b,0xf669,0xddc5,0xb21a,0xcaa9,0xc7a0,0x37ec,0xf8c6,0x12e7,0xe984,0xe812,0xef9f,0x128a,0x9fca,0x41f5,0x118f,0x5c32,0xf1cf,0x78c5,0x9424,0x2ae3,0x60d2,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa517584d,0x45deb681,0x7c58c2ea,0xe0fd0123,0x6c5bfd80,0xddc5f669,0xcaa9b21a,0x37ecc7a0,0x12e7f8c6,0xe812e984,0x128aef9f,0x41f59fca,0x5c32118f,0x78c5f1cf,0x2ae39424,0x260d2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45deb681a517584d,0xe0fd01237c58c2ea,0xddc5f6696c5bfd80,0x37ecc7a0caa9b21a,0xe812e98412e7f8c6,0x41f59fca128aef9f,0x78c5f1cf5c32118f,0x260d22ae39424}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed63,0x22b3,0x1d4d,0x35c4,0x93f4,0x6374,0x9706,0xfbed,0xae57,0x8a7c,0x51da,0x47f2,0xca2a,0xc784,0xb45e,0x991e,0xf8ab,0x949,0xc273,0xf9af,0x56aa,0xdeb0,0x3fa0,0xe92d,0x631b,0xdb90,0xedc1,0xc12c,0x5f80,0xd1db,0x769b,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x22b3ed63,0x35c41d4d,0x637493f4,0xfbed9706,0x8a7cae57,0x47f251da,0xc784ca2a,0x991eb45e,0x949f8ab,0xf9afc273,0xdeb056aa,0xe92d3fa0,0xdb90631b,0xc12cedc1,0xd1db5f80,0xa769b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x35c41d4d22b3ed63,0xfbed9706637493f4,0x47f251da8a7cae57,0x991eb45ec784ca2a,0xf9afc2730949f8ab,0xe92d3fa0deb056aa,0xc12cedc1db90631b,0xa769bd1db5f80}}} +#endif +}}}, {{{ +#if 0 +#elif RADIX == 16 +{0x1414, 0x1912, 0xddb, 0xb9, 0x121b, 0x195d, 0x6c5, 0xe75, 0x1d16, 0xcaf, 0x1a05, 0x15cc, 0x1537, 0x1c57, 0x141a, 0x1b9d, 0x5b0, 0x9e7, 0xf10, 0x300, 0x1d5c, 0xc13, 0x245, 0x1b91, 0x352, 0x730, 0xd55, 0x1ca3, 0x1dac, 0x7b0, 0x15b9, 0x1ba6, 0x7d6, 0xba4, 0xc12, 0x649, 0xf1, 0xd51, 0x107} +#elif RADIX == 32 +{0xa0a1383, 0x12ddbc89, 0x1764360b, 0x13a9b172, 0xcafe8b3, 0x1eb99a05, 0xd715ea6, 0x1b0dced0, 0x1e209e72, 0x1f570600, 0x11122b04, 0x60352dc, 0x128daaa7, 0x13d876b3, 0xdba6adc, 0x497487d, 0x7899258, 0x208} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x360b96ede44a8284, 0xbfa2ce75362e5764, 0xd715ea6f5ccd02b2, 0x788279cb61b9da0, 0x5b91122b04fab830, 0x3b59ca36aa9cc06a, 0x2e90fadba6adc9ec, 0x17b5441e2649609} +#else +{0x172ddbc8950509, 0xce75362e576436, 0x137ae6681595fd1, 0x12d86e76835c57a, 0x9f570600f104f3, 0x1cc06a5b91122b0, 0x4f61dace51b554, 0x24ba43eb6e9ab7, 0x146a883c4c92c} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, {{ +#if 0 +#elif RADIX == 16 +{0x1507, 0x1e44, 0xb76, 0x182e, 0xc86, 0xe57, 0x9b1, 0x139d, 0x1f45, 0xb2b, 0x681, 0x1d73, 0x1d4d, 0x1715, 0xd06, 0x6e7, 0x196c, 0x279, 0x3c4, 0xc0, 0x1f57, 0xb04, 0x891, 0x16e4, 0xd4, 0x9cc, 0x1b55, 0x728, 0x76b, 0x9ec, 0x156e, 0x16e9, 0x1f5, 0x12e9, 0xb04, 0x992, 0x83c, 0x1b54, 0x2c1} +#elif RADIX == 32 +{0xa83b449, 0x1cb76f22, 0x15d90d82, 0x1cea6c5c, 0xb2bfa2c, 0x17ae6681, 0x35c57a9, 0x16c373b4, 0x788279c, 0x7d5c180, 0x4448ac1, 0x1980d4b7, 0x1ca36aa9, 0x4f61dac, 0xb6e9ab7, 0x125d21f, 0x1e26496, 0x122} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd82e5bb7912a0ed, 0xafe8b39d4d8b95d9, 0x35c57a9bd73340ac, 0x1e209e72d86e768, 0x96e4448ac13eae0c, 0xed6728daaa7301a, 0x4ba43eb6e9ab727b, 0x1ed51078992582} +#else +{0x105cb76f22541da, 0xb39d4d8b95d90d, 0x14deb99a05657f4, 0x1cb61b9da0d715e, 0x27d5c1803c413c, 0x7301a96e4448ac, 0x193d876b3946d55, 0x92e90fadba6ad, 0x3daa20f1324b} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, true}, {{{ +#if 0 +#elif RADIX == 16 +{0xa07, 0x1f97, 0x13c4, 0xb69, 0x15ec, 0x161d, 0x194, 0x135c, 0xe18, 0x119a, 0x684, 0x199, 0x1a93, 0x906, 0x62e, 0x1ad4, 0xc99, 0x40b, 0x10df, 0xf12, 0x9ee, 0x93, 0x1837, 0x42d, 0x1ea3, 0x1967, 0x1d41, 0x422, 0x2d5, 0x17d0, 0x1550, 0x1c2d, 0x139a, 0x152b, 0xa57, 0x1072, 0x13bf, 0x1fe7, 0x57a} +#elif RADIX == 32 +{0x1503e7ec, 0x133c4fcb, 0x76bd8b6, 0x1ae0652c, 0x119a70c4, 0xc332684, 0x17241b52, 0x99d6a18, 0x1be40b6, 0x1a7b9e25, 0xdc1b824, 0xcfea321, 0x108ba839, 0xbe80b54, 0x15c2daa8, 0x15ea5739, 0x1dfc1c94, 0xd3c} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xd8b699e27e5d40f9, 0x69c3135c0ca5876b, 0x7241b52619934246, 0x286f902d933ad431, 0x642dc1b824d3dcf1, 0x5aa422ea0e59fd4, 0xd4ae735c2daa85f4, 0x1a7f9e77f07252b} +#else +{0x16d33c4fcba81f3, 0x1135c0ca5876bd8, 0x930cc9a12334e1, 0x164ceb50c5c906d, 0x9a7b9e250df205, 0x59fd4642dc1b82, 0x2fa02d52117507, 0xaf52b9cd70b6aa, 0x19ff3cefe0e4a} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x2, 0x2d8, 0x113e, 0xa74, 0x660, 0x141f, 0x64f, 0x885, 0x46, 0x17b9, 0x94f, 0x1b44, 0x361, 0xbf6, 0x1f17, 0x583, 0x18b3, 0x118e, 0x9ba, 0x49f, 0x1fc3, 0x13eb, 0x11c8, 0xcc8, 0x1b2d, 0x8c, 0x9c6, 0x1d9, 0xf33, 0x53d, 0x129a, 0x1b4a, 0x65, 0x169a, 0xe74, 0x544, 0x17e3, 0x1f0f, 0x2a6} +#elif RADIX == 32 +{0x1324b, 0x913e16c, 0x7ccc0a7, 0x42993e8, 0x17b90232, 0x768894f, 0xbafd86c, 0xb32c1fc, 0x137518ec, 0x1ff0c93e, 0x88e44fa, 0x119b2d66, 0x76538c0, 0x29ebccc, 0xbb4a94d, 0x1d2d3406, 0x1f19511c, 0x3fd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xc0a7489f0b60004c, 0xe408c885327d07cc, 0xbafd86c3b444a7de, 0xf4dd463b166583f8, 0xacc88e44faff8649, 0x5e661d94e3023365, 0x5a680cbb4a94d14f, 0xf7c3efc654473a} +#else +{0x14e913e16c00099, 0xc885327d07ccc0, 0x161da2253ef7204, 0xc59960fe2ebf61, 0x15ff0c93e9ba8c7, 0x23365acc88e44f, 0x8a7af330eca718, 0xe969a032ed2a53, 0x3f87df8ca88e} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0xe6e, 0xc55, 0xb5a, 0x1be4, 0x10f8, 0x1175, 0x1ada, 0x13de, 0xa0d, 0x1cb, 0x6f3, 0x91f, 0x70c, 0x12ef, 0x1403, 0x115a, 0x1205, 0x1705, 0xb8a, 0x490, 0x681, 0x1a6f, 0xd49, 0x2ca, 0x7e2, 0x1ad8, 0x1aa6, 0x9e8, 0x1f0f, 0x1df, 0xc32, 0xd30, 0x1a34, 0xfc4, 0x1519, 0x1cde, 0x7c9, 0x12da, 0x157} +#elif RADIX == 32 +{0x17371973, 0x8b5a62a, 0x1d61f1be, 0x1ef6b6a2, 0x1cb506c, 0x1123e6f3, 0x1cbbce1, 0x58ad50, 0x17157059, 0x19a04920, 0xa6a4e9b, 0x1b07e216, 0x7a354da, 0xeffc3d, 0x8d30619, 0x65f89a3, 0x1e4f37aa, 0x651} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf1be45ad3155cdc6, 0x2d41b3ded6d45d61, 0x1cbbce1891f37987, 0x5c55c1640b15aa0, 0x42ca6a4e9bcd0249, 0xfe1e9e8d536b60fc, 0xbf13468d30619077, 0x9cb68f93cdea8c} +#else +{0x17c8b5a62ab9b8c, 0x1b3ded6d45d61f1, 0x10c48f9bcc396a0, 0x1902c56a8072ef3, 0x179a04920b8ab82, 0xb60fc42ca6a4e9, 0x83bff0f4f46a9b, 0x32fc4d1a34c186, 0x1396d1f279bd5} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0xc71, 0x167c, 0x1de2, 0x708, 0xb78, 0x1797, 0x16d0, 0xc73, 0x1f29, 0x1014, 0x1753, 0x1dd9, 0x1326, 0xab2, 0x1e6e, 0x51a, 0x32d, 0x7c1, 0x127b, 0x1b08, 0xcd4, 0x5fd, 0x159a, 0xb2c, 0x137d, 0x28f, 0xc4f, 0x121a, 0x16dd, 0x1771, 0xa7b, 0x11b9, 0xe86, 0x199c, 0x1cb5, 0x2db, 0x14b3, 0x1e97, 0x7b} +#elif RADIX == 32 +{0x638892e, 0x11de2b3e, 0x5d6f070, 0x39db42f, 0x1014f94b, 0x1bbb3753, 0x172aca64, 0x12d28d79, 0x4f67c11, 0xb353611, 0xcacd17f, 0x11f37d59, 0x86989e2, 0x1bb8db76, 0xd1b953d, 0xd7338e8, 0x598b6f9, 0x7bd} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xf0708ef159f18e22, 0x53e52c73b685e5d6, 0x72aca64ddd9ba9c0, 0x893d9f0465a51af3, 0xab2cacd17f59a9b0, 0x6dbb21a6278a3e6f, 0xe671d0d1b953dddc, 0x7fa5e9662dbe5a} +#else +{0xe11de2b3e31c44, 0x12c73b685e5d6f0, 0x126eecdd4e029f2, 0x1196946bcdcab29, 0x1eb35361127b3e0, 0xa3e6fab2cacd17, 0xeee36dd90d313c, 0x16b99c74346e54f, 0xff4bd2cc5b7c} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}, {{ +#if 0 +#elif RADIX == 16 +{0x111d, 0x19ac, 0x1a8f, 0xc58, 0xaa, 0xdc, 0x13de, 0x1dc, 0x17a6, 0x1e3d, 0x198a, 0x40a, 0x120b, 0x17ba, 0x91c, 0x1858, 0xee4, 0x33b, 0x18aa, 0x1124, 0x5f8, 0x37d, 0xf3e, 0xa4b, 0x1e1, 0x2bd, 0x1ff2, 0x1a56, 0x1168, 0x739, 0x1fee, 0x190c, 0x13e9, 0xd07, 0x17fd, 0x1b9e, 0x198b, 0x1faa, 0xd2} +#elif RADIX == 32 +{0x88e8fa0, 0x11a8fcd6, 0x170154c5, 0xee4f781, 0x1e3dbd30, 0xc81598a, 0xe5eea41, 0xe4c2c24, 0x115433b7, 0x97e2249, 0xb79f0df, 0x17a1e152, 0x95bfe42, 0x39cc5a3, 0x1390cff7, 0x1f5a0f3e, 0xc5ee7af, 0xd56} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x54c58d47e6b223a3, 0xf6f4c1dc9ef03701, 0xe5eea41640acc578, 0x4c550ceddc985848, 0x2a4b79f0df4bf112, 0x62d1a56ff90af43c, 0xb41e7d390cff71ce, 0x187eab317b9ebfe} +#else +{0x18b1a8fcd644747, 0xc1dc9ef0370154, 0xb205662bc7b7a, 0x177261612397ba9, 0x1e97e22498aa19d, 0xaf43c2a4b79f0d, 0x18e73168d2b7fc8, 0x1fad079f4e433fd, 0x15fd5662f73d7} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x63c, 0x609, 0x89c, 0x1f09, 0x9c9, 0x1e89, 0x1826, 0x1460, 0x15d6, 0xa52, 0xbb2, 0x1b93, 0x1f90, 0xa2f, 0x3b3, 0x1a76, 0x1c29, 0x17fc, 0x864, 0x55a, 0x1a9b, 0x7fa, 0x7ee, 0x75f, 0x1b4b, 0x15e6, 0xd75, 0x1238, 0x847, 0x1711, 0x9e7, 0xa37, 0x4b6, 0x1264, 0x3e1, 0xf87, 0x1c47, 0x706, 0x20b} +#elif RADIX == 32 +{0x131e26c1, 0x1289c304, 0x25393f0, 0x30609bd, 0xa52aeb5, 0x3726bb2, 0x19a8bff2, 0x29d3b0e, 0x10c97fce, 0x16a6cab4, 0x1f3f71fe, 0x1cdb4b3a, 0x8e1aeb5, 0x1b88a11e, 0xca374f3, 0x1864c84b, 0x23be1c7, 0xab7} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x93f0944e1824c789, 0x4abad460c137a253, 0x9a8bff21b935d929, 0xa4325ff3853a761d, 0x675f3f71feb53655, 0x508f2386bad79b69, 0xc99096ca374f3dc4, 0x129c1b88ef871f0} +#else +{0x1e1289c30498f13, 0xd460c137a25393, 0x190dc9aec94a55d, 0xe14e9d8766a2ff, 0x1d6a6cab4864bfe, 0x179b69675f3f71f, 0x1ee2284791c35d6, 0x1c326425b28dd3c, 0xa383711df0e3} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x39f7,0x51a0,0x71ea,0x7557,0x794c,0x6b5e,0x6a81,0x9aa7,0xd8dd,0xab85,0xe387,0x2121,0x1086,0x7989,0xe273,0xf813,0xebd5,0xb13f,0x9ef5,0xc6d5,0x2da2,0x14f8,0xecf3,0x24c4,0xf485,0xc8de,0xb9ef,0xb213,0xbc4d,0xe587,0xd591,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x51a039f7,0x755771ea,0x6b5e794c,0x9aa76a81,0xab85d8dd,0x2121e387,0x79891086,0xf813e273,0xb13febd5,0xc6d59ef5,0x14f82da2,0x24c4ecf3,0xc8def485,0xb213b9ef,0xe587bc4d,0xdd591}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x755771ea51a039f7,0x9aa76a816b5e794c,0x2121e387ab85d8dd,0xf813e27379891086,0xc6d59ef5b13febd5,0x24c4ecf314f82da2,0xb213b9efc8def485,0xdd591e587bc4d}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc5d4,0x133f,0xc116,0x2a9e,0xacf5,0xaedd,0x6173,0xdacf,0x6448,0xa33e,0x6d36,0x5013,0x2093,0x59f6,0xe571,0x906d,0x37c9,0xe4ab,0xb92a,0xbe30,0x1d49,0xde58,0xffc8,0x47ff,0xe0cb,0x6230,0x6128,0x8679,0x731c,0xc5e,0x66c7,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x133fc5d4,0x2a9ec116,0xaeddacf5,0xdacf6173,0xa33e6448,0x50136d36,0x59f62093,0x906de571,0xe4ab37c9,0xbe30b92a,0xde581d49,0x47ffffc8,0x6230e0cb,0x86796128,0xc5e731c,0xd66c7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2a9ec116133fc5d4,0xdacf6173aeddacf5,0x50136d36a33e6448,0x906de57159f62093,0xbe30b92ae4ab37c9,0x47ffffc8de581d49,0x867961286230e0cb,0xd66c70c5e731c}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55ad,0x2e3e,0xd0dc,0x8dad,0x4e0a,0xe1d0,0x3e27,0x81af,0x1bb4,0xa5fa,0x52f2,0x5bd4,0x2b9b,0xddfe,0x36,0xbdd4,0xf99a,0x3027,0x21d2,0x7b29,0x10ee,0x2146,0x6864,0xec5c,0x6bbd,0x540f,0xbc15,0xe4a1,0xee,0x3d9c,0xdf51,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2e3e55ad,0x8dadd0dc,0xe1d04e0a,0x81af3e27,0xa5fa1bb4,0x5bd452f2,0xddfe2b9b,0xbdd40036,0x3027f99a,0x7b2921d2,0x214610ee,0xec5c6864,0x540f6bbd,0xe4a1bc15,0x3d9c00ee,0x4df51}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8dadd0dc2e3e55ad,0x81af3e27e1d04e0a,0x5bd452f2a5fa1bb4,0xbdd40036ddfe2b9b,0x7b2921d23027f99a,0xec5c6864214610ee,0xe4a1bc15540f6bbd,0x4df513d9c00ee}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc609,0xae5f,0x8e15,0x8aa8,0x86b3,0x94a1,0x957e,0x6558,0x2722,0x547a,0x1c78,0xdede,0xef79,0x8676,0x1d8c,0x7ec,0x142a,0x4ec0,0x610a,0x392a,0xd25d,0xeb07,0x130c,0xdb3b,0xb7a,0x3721,0x4610,0x4dec,0x43b2,0x1a78,0x2a6e,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xae5fc609,0x8aa88e15,0x94a186b3,0x6558957e,0x547a2722,0xdede1c78,0x8676ef79,0x7ec1d8c,0x4ec0142a,0x392a610a,0xeb07d25d,0xdb3b130c,0x37210b7a,0x4dec4610,0x1a7843b2,0x22a6e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8aa88e15ae5fc609,0x6558957e94a186b3,0xdede1c78547a2722,0x7ec1d8c8676ef79,0x392a610a4ec0142a,0xdb3b130ceb07d25d,0x4dec461037210b7a,0x22a6e1a7843b2}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +#endif +}}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e37,0x619b,0xa159,0x8865,0xab15,0x85c2,0xb3b,0x57ce,0x8108,0xa8d6,0xfeb0,0x8cf0,0xef13,0xc7e1,0x6936,0xc3a9,0xd8f2,0x9c5d,0x7c68,0x7ba2,0xf4da,0x4c63,0x845b,0x22eb,0xbedd,0x37a0,0x24f3,0x7019,0x2855,0x6905,0xb81c,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x619b9e37,0x8865a159,0x85c2ab15,0x57ce0b3b,0xa8d68108,0x8cf0feb0,0xc7e1ef13,0xc3a96936,0x9c5dd8f2,0x7ba27c68,0x4c63f4da,0x22eb845b,0x37a0bedd,0x701924f3,0x69052855,0x3b81c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8865a159619b9e37,0x57ce0b3b85c2ab15,0x8cf0feb0a8d68108,0xc3a96936c7e1ef13,0x7ba27c689c5dd8f2,0x22eb845b4c63f4da,0x701924f337a0bedd,0x3b81c69052855}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x92b5,0x1309,0xc1ee,0xadd1,0x165,0x4911,0xaf0c,0x4a4f,0x5374,0xd4b2,0x926f,0xacc0,0xfd2f,0xeb63,0x7c68,0xc188,0x41ce,0x152e,0x6cfe,0x9a22,0xadb,0x933,0x438c,0x5fef,0xe17a,0x82aa,0x7732,0x8c5b,0xfa7b,0x4cd4,0xdcee,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x130992b5,0xadd1c1ee,0x49110165,0x4a4faf0c,0xd4b25374,0xacc0926f,0xeb63fd2f,0xc1887c68,0x152e41ce,0x9a226cfe,0x9330adb,0x5fef438c,0x82aae17a,0x8c5b7732,0x4cd4fa7b,0x6dcee}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xadd1c1ee130992b5,0x4a4faf0c49110165,0xacc0926fd4b25374,0xc1887c68eb63fd2f,0x9a226cfe152e41ce,0x5fef438c09330adb,0x8c5b773282aae17a,0x6dcee4cd4fa7b}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77b7,0xc00c,0x743e,0x91b3,0xc92c,0x3be,0xc9e8,0x4b6b,0x519c,0xed1b,0x857f,0x2be7,0x2270,0x64a0,0x3a21,0xd5ec,0xd5d1,0x2392,0x175a,0xa58f,0x5c36,0x3908,0x5f46,0x1875,0xee40,0xcd4a,0x7e0b,0x8eda,0x87e0,0xc28c,0x6e24,0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc00c77b7,0x91b3743e,0x3bec92c,0x4b6bc9e8,0xed1b519c,0x2be7857f,0x64a02270,0xd5ec3a21,0x2392d5d1,0xa58f175a,0x39085c36,0x18755f46,0xcd4aee40,0x8eda7e0b,0xc28c87e0,0xd6e24}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91b3743ec00c77b7,0x4b6bc9e803bec92c,0x2be7857fed1b519c,0xd5ec3a2164a02270,0xa58f175a2392d5d1,0x18755f4639085c36,0x8eda7e0bcd4aee40,0xd6e24c28c87e0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x61c9,0x9e64,0x5ea6,0x779a,0x54ea,0x7a3d,0xf4c4,0xa831,0x7ef7,0x5729,0x14f,0x730f,0x10ec,0x381e,0x96c9,0x3c56,0x270d,0x63a2,0x8397,0x845d,0xb25,0xb39c,0x7ba4,0xdd14,0x4122,0xc85f,0xdb0c,0x8fe6,0xd7aa,0x96fa,0x47e3,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e6461c9,0x779a5ea6,0x7a3d54ea,0xa831f4c4,0x57297ef7,0x730f014f,0x381e10ec,0x3c5696c9,0x63a2270d,0x845d8397,0xb39c0b25,0xdd147ba4,0xc85f4122,0x8fe6db0c,0x96fad7aa,0xc47e3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x779a5ea69e6461c9,0xa831f4c47a3d54ea,0x730f014f57297ef7,0x3c5696c9381e10ec,0x845d839763a2270d,0xdd147ba4b39c0b25,0x8fe6db0cc85f4122,0xc47e396fad7aa}}} +#endif +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.h new file mode 100644 index 0000000000..1cc782a5bd --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.h @@ -0,0 +1,31 @@ +#ifndef ENDOMORPHISM_ACTION_H +#define ENDOMORPHISM_ACTION_H +#include +#include +#include +/** Type for precomputed endomorphism rings applied to precomputed torsion bases. + * + * Precomputed by the precompute scripts. + * + * @typedef curve_with_endomorphism_ring_t + * + * @struct curve_with_endomorphism_ring + **/ +typedef struct curve_with_endomorphism_ring { + ec_curve_t curve; + ec_basis_t basis_even; + ibz_mat_2x2_t action_i, action_j, action_k; + ibz_mat_2x2_t action_gen2, action_gen3, action_gen4; +} curve_with_endomorphism_ring_t; +#define CURVE_E0 (CURVES_WITH_ENDOMORPHISMS->curve) +#define BASIS_EVEN (CURVES_WITH_ENDOMORPHISMS->basis_even) +#define ACTION_I (CURVES_WITH_ENDOMORPHISMS->action_i) +#define ACTION_J (CURVES_WITH_ENDOMORPHISMS->action_j) +#define ACTION_K (CURVES_WITH_ENDOMORPHISMS->action_k) +#define ACTION_GEN2 (CURVES_WITH_ENDOMORPHISMS->action_gen2) +#define ACTION_GEN3 (CURVES_WITH_ENDOMORPHISMS->action_gen3) +#define ACTION_GEN4 (CURVES_WITH_ENDOMORPHISMS->action_gen4) +#define NUM_ALTERNATE_STARTING_CURVES 6 +#define ALTERNATE_STARTING_CURVES (CURVES_WITH_ENDOMORPHISMS+1) +extern const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7]; +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/finit.c new file mode 100644 index 0000000000..b3808edf07 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/finit.c @@ -0,0 +1,122 @@ +#include "internal.h" + +void +quat_alg_init_set(quat_alg_t *alg, const ibz_t *p) +{ + ibz_init(&(*alg).p); + ibz_copy(&(*alg).p, p); +} +void +quat_alg_finalize(quat_alg_t *alg) +{ + ibz_finalize(&(*alg).p); +} + +void +quat_alg_elem_init(quat_alg_elem_t *elem) +{ + ibz_vec_4_init(&(*elem).coord); + ibz_init(&(*elem).denom); + ibz_set(&(*elem).denom, 1); +} +void +quat_alg_elem_finalize(quat_alg_elem_t *elem) +{ + ibz_vec_4_finalize(&(*elem).coord); + ibz_finalize(&(*elem).denom); +} + +void +ibz_vec_2_init(ibz_vec_2_t *vec) +{ + ibz_init(&((*vec)[0])); + ibz_init(&((*vec)[1])); +} + +void +ibz_vec_2_finalize(ibz_vec_2_t *vec) +{ + ibz_finalize(&((*vec)[0])); + ibz_finalize(&((*vec)[1])); +} + +void +ibz_vec_4_init(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_init(&(*vec)[i]); + } +} +void +ibz_vec_4_finalize(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_finalize(&(*vec)[i]); + } +} + +void +ibz_mat_2x2_init(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +ibz_mat_4x4_init(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +quat_lattice_init(quat_lattice_t *lat) +{ + ibz_mat_4x4_init(&(*lat).basis); + ibz_init(&(*lat).denom); + ibz_set(&(*lat).denom, 1); +} +void +quat_lattice_finalize(quat_lattice_t *lat) +{ + ibz_finalize(&(*lat).denom); + ibz_mat_4x4_finalize(&(*lat).basis); +} + +void +quat_left_ideal_init(quat_left_ideal_t *lideal) +{ + quat_lattice_init(&(*lideal).lattice); + ibz_init(&(*lideal).norm); + (*lideal).parent_order = NULL; +} +void +quat_left_ideal_finalize(quat_left_ideal_t *lideal) +{ + ibz_finalize(&(*lideal).norm); + quat_lattice_finalize(&(*lideal).lattice); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.c new file mode 100644 index 0000000000..f2992d8c7f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: PD and Apache-2.0 + +/* FIPS202 implementation based on code from PQClean, + * which is in turn based based on the public domain implementation in + * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html + * by Ronny Van Keer + * and the public domain "TweetFips202" implementation + * from https://twitter.com/tweetfips202 + * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ + +#include +#include +#include +#include + +#include "fips202.h" + +#define NROUNDS 24 +#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) + +/************************************************* + * Name: load64 + * + * Description: Load 8 bytes into uint64_t in little-endian order + * + * Arguments: - const uint8_t *x: pointer to input byte array + * + * Returns the loaded 64-bit unsigned integer + **************************************************/ +static uint64_t load64(const uint8_t *x) { + uint64_t r = 0; + for (size_t i = 0; i < 8; ++i) { + r |= (uint64_t)x[i] << 8 * i; + } + + return r; +} + +/************************************************* + * Name: store64 + * + * Description: Store a 64-bit integer to a byte array in little-endian order + * + * Arguments: - uint8_t *x: pointer to the output byte array + * - uint64_t u: input 64-bit unsigned integer + **************************************************/ +static void store64(uint8_t *x, uint64_t u) { + for (size_t i = 0; i < 8; ++i) { + x[i] = (uint8_t) (u >> 8 * i); + } +} + +/* Keccak round constants */ +static const uint64_t KeccakF_RoundConstants[NROUNDS] = { + 0x0000000000000001ULL, 0x0000000000008082ULL, + 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, + 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, + 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, + 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, + 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, + 0x0000000080000001ULL, 0x8000000080008008ULL +}; + +/************************************************* + * Name: KeccakF1600_StatePermute + * + * Description: The Keccak F1600 Permutation + * + * Arguments: - uint64_t *state: pointer to input/output Keccak state + **************************************************/ +static void KeccakF1600_StatePermute(uint64_t *state) { + int round; + + uint64_t Aba, Abe, Abi, Abo, Abu; + uint64_t Aga, Age, Agi, Ago, Agu; + uint64_t Aka, Ake, Aki, Ako, Aku; + uint64_t Ama, Ame, Ami, Amo, Amu; + uint64_t Asa, Ase, Asi, Aso, Asu; + uint64_t BCa, BCe, BCi, BCo, BCu; + uint64_t Da, De, Di, Do, Du; + uint64_t Eba, Ebe, Ebi, Ebo, Ebu; + uint64_t Ega, Ege, Egi, Ego, Egu; + uint64_t Eka, Eke, Eki, Eko, Eku; + uint64_t Ema, Eme, Emi, Emo, Emu; + uint64_t Esa, Ese, Esi, Eso, Esu; + + // copyFromState(A, state) + Aba = state[0]; + Abe = state[1]; + Abi = state[2]; + Abo = state[3]; + Abu = state[4]; + Aga = state[5]; + Age = state[6]; + Agi = state[7]; + Ago = state[8]; + Agu = state[9]; + Aka = state[10]; + Ake = state[11]; + Aki = state[12]; + Ako = state[13]; + Aku = state[14]; + Ama = state[15]; + Ame = state[16]; + Ami = state[17]; + Amo = state[18]; + Amu = state[19]; + Asa = state[20]; + Ase = state[21]; + Asi = state[22]; + Aso = state[23]; + Asu = state[24]; + + for (round = 0; round < NROUNDS; round += 2) { + // prepareTheta + BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; + BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; + BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; + BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; + BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; + + // thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Aba ^= Da; + BCa = Aba; + Age ^= De; + BCe = ROL(Age, 44); + Aki ^= Di; + BCi = ROL(Aki, 43); + Amo ^= Do; + BCo = ROL(Amo, 21); + Asu ^= Du; + BCu = ROL(Asu, 14); + Eba = BCa ^ ((~BCe) & BCi); + Eba ^= KeccakF_RoundConstants[round]; + Ebe = BCe ^ ((~BCi) & BCo); + Ebi = BCi ^ ((~BCo) & BCu); + Ebo = BCo ^ ((~BCu) & BCa); + Ebu = BCu ^ ((~BCa) & BCe); + + Abo ^= Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka ^= Da; + BCi = ROL(Aka, 3); + Ame ^= De; + BCo = ROL(Ame, 45); + Asi ^= Di; + BCu = ROL(Asi, 61); + Ega = BCa ^ ((~BCe) & BCi); + Ege = BCe ^ ((~BCi) & BCo); + Egi = BCi ^ ((~BCo) & BCu); + Ego = BCo ^ ((~BCu) & BCa); + Egu = BCu ^ ((~BCa) & BCe); + + Abe ^= De; + BCa = ROL(Abe, 1); + Agi ^= Di; + BCe = ROL(Agi, 6); + Ako ^= Do; + BCi = ROL(Ako, 25); + Amu ^= Du; + BCo = ROL(Amu, 8); + Asa ^= Da; + BCu = ROL(Asa, 18); + Eka = BCa ^ ((~BCe) & BCi); + Eke = BCe ^ ((~BCi) & BCo); + Eki = BCi ^ ((~BCo) & BCu); + Eko = BCo ^ ((~BCu) & BCa); + Eku = BCu ^ ((~BCa) & BCe); + + Abu ^= Du; + BCa = ROL(Abu, 27); + Aga ^= Da; + BCe = ROL(Aga, 36); + Ake ^= De; + BCi = ROL(Ake, 10); + Ami ^= Di; + BCo = ROL(Ami, 15); + Aso ^= Do; + BCu = ROL(Aso, 56); + Ema = BCa ^ ((~BCe) & BCi); + Eme = BCe ^ ((~BCi) & BCo); + Emi = BCi ^ ((~BCo) & BCu); + Emo = BCo ^ ((~BCu) & BCa); + Emu = BCu ^ ((~BCa) & BCe); + + Abi ^= Di; + BCa = ROL(Abi, 62); + Ago ^= Do; + BCe = ROL(Ago, 55); + Aku ^= Du; + BCi = ROL(Aku, 39); + Ama ^= Da; + BCo = ROL(Ama, 41); + Ase ^= De; + BCu = ROL(Ase, 2); + Esa = BCa ^ ((~BCe) & BCi); + Ese = BCe ^ ((~BCi) & BCo); + Esi = BCi ^ ((~BCo) & BCu); + Eso = BCo ^ ((~BCu) & BCa); + Esu = BCu ^ ((~BCa) & BCe); + + // prepareTheta + BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; + BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; + BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; + BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; + BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; + + // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu ^ ROL(BCe, 1); + De = BCa ^ ROL(BCi, 1); + Di = BCe ^ ROL(BCo, 1); + Do = BCi ^ ROL(BCu, 1); + Du = BCo ^ ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^ ((~BCe) & BCi); + Aba ^= KeccakF_RoundConstants[round + 1]; + Abe = BCe ^ ((~BCi) & BCo); + Abi = BCi ^ ((~BCo) & BCu); + Abo = BCo ^ ((~BCu) & BCa); + Abu = BCu ^ ((~BCa) & BCe); + + Ebo ^= Do; + BCa = ROL(Ebo, 28); + Egu ^= Du; + BCe = ROL(Egu, 20); + Eka ^= Da; + BCi = ROL(Eka, 3); + Eme ^= De; + BCo = ROL(Eme, 45); + Esi ^= Di; + BCu = ROL(Esi, 61); + Aga = BCa ^ ((~BCe) & BCi); + Age = BCe ^ ((~BCi) & BCo); + Agi = BCi ^ ((~BCo) & BCu); + Ago = BCo ^ ((~BCu) & BCa); + Agu = BCu ^ ((~BCa) & BCe); + + Ebe ^= De; + BCa = ROL(Ebe, 1); + Egi ^= Di; + BCe = ROL(Egi, 6); + Eko ^= Do; + BCi = ROL(Eko, 25); + Emu ^= Du; + BCo = ROL(Emu, 8); + Esa ^= Da; + BCu = ROL(Esa, 18); + Aka = BCa ^ ((~BCe) & BCi); + Ake = BCe ^ ((~BCi) & BCo); + Aki = BCi ^ ((~BCo) & BCu); + Ako = BCo ^ ((~BCu) & BCa); + Aku = BCu ^ ((~BCa) & BCe); + + Ebu ^= Du; + BCa = ROL(Ebu, 27); + Ega ^= Da; + BCe = ROL(Ega, 36); + Eke ^= De; + BCi = ROL(Eke, 10); + Emi ^= Di; + BCo = ROL(Emi, 15); + Eso ^= Do; + BCu = ROL(Eso, 56); + Ama = BCa ^ ((~BCe) & BCi); + Ame = BCe ^ ((~BCi) & BCo); + Ami = BCi ^ ((~BCo) & BCu); + Amo = BCo ^ ((~BCu) & BCa); + Amu = BCu ^ ((~BCa) & BCe); + + Ebi ^= Di; + BCa = ROL(Ebi, 62); + Ego ^= Do; + BCe = ROL(Ego, 55); + Eku ^= Du; + BCi = ROL(Eku, 39); + Ema ^= Da; + BCo = ROL(Ema, 41); + Ese ^= De; + BCu = ROL(Ese, 2); + Asa = BCa ^ ((~BCe) & BCi); + Ase = BCe ^ ((~BCi) & BCo); + Asi = BCi ^ ((~BCo) & BCu); + Aso = BCo ^ ((~BCu) & BCa); + Asu = BCu ^ ((~BCa) & BCe); + } + + // copyToState(state, A) + state[0] = Aba; + state[1] = Abe; + state[2] = Abi; + state[3] = Abo; + state[4] = Abu; + state[5] = Aga; + state[6] = Age; + state[7] = Agi; + state[8] = Ago; + state[9] = Agu; + state[10] = Aka; + state[11] = Ake; + state[12] = Aki; + state[13] = Ako; + state[14] = Aku; + state[15] = Ama; + state[16] = Ame; + state[17] = Ami; + state[18] = Amo; + state[19] = Amu; + state[20] = Asa; + state[21] = Ase; + state[22] = Asi; + state[23] = Aso; + state[24] = Asu; +} + +/************************************************* + * Name: keccak_absorb + * + * Description: Absorb step of Keccak; + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, + size_t mlen, uint8_t p) { + size_t i; + uint8_t t[200]; + + /* Zero state */ + for (i = 0; i < 25; ++i) { + s[i] = 0; + } + + while (mlen >= r) { + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(m + 8 * i); + } + + KeccakF1600_StatePermute(s); + mlen -= r; + m += r; + } + + for (i = 0; i < r; ++i) { + t[i] = 0; + } + for (i = 0; i < mlen; ++i) { + t[i] = m[i]; + } + t[i] = p; + t[r - 1] |= 128; + for (i = 0; i < r / 8; ++i) { + s[i] ^= load64(t + 8 * i); + } +} + +/************************************************* + * Name: keccak_squeezeblocks + * + * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. + * Modifies the state. Can be called multiple times to keep + * squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *h: pointer to output blocks + * - size_t nblocks: number of blocks to be + * squeezed (written to h) + * - uint64_t *s: pointer to input/output Keccak state + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, + uint64_t *s, uint32_t r) { + while (nblocks > 0) { + KeccakF1600_StatePermute(s); + for (size_t i = 0; i < (r >> 3); i++) { + store64(h + 8 * i, s[i]); + } + h += r; + nblocks--; + } +} + +/************************************************* + * Name: keccak_inc_init + * + * Description: Initializes the incremental Keccak state to zero. + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + **************************************************/ +static void keccak_inc_init(uint64_t *s_inc) { + size_t i; + + for (i = 0; i < 25; ++i) { + s_inc[i] = 0; + } + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_absorb + * + * Description: Incremental keccak absorb + * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - const uint8_t *m: pointer to input to be absorbed into s + * - size_t mlen: length of input in bytes + **************************************************/ +static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, + size_t mlen) { + size_t i; + + /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ + while (mlen + s_inc[25] >= r) { + for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { + /* Take the i'th byte from message + xor with the s_inc[25] + i'th byte of the state; little-endian */ + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + mlen -= (size_t)(r - s_inc[25]); + m += r - s_inc[25]; + s_inc[25] = 0; + + KeccakF1600_StatePermute(s_inc); + } + + for (i = 0; i < mlen; i++) { + s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); + } + s_inc[25] += mlen; +} + +/************************************************* + * Name: keccak_inc_finalize + * + * Description: Finalizes Keccak absorb phase, prepares for squeezing + * + * Arguments: - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + * - uint8_t p: domain-separation byte for different + * Keccak-derived functions + **************************************************/ +static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { + /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, + so we can always use one more byte for p in the current state. */ + s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); + s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); + s_inc[25] = 0; +} + +/************************************************* + * Name: keccak_inc_squeeze + * + * Description: Incremental Keccak squeeze; can be called on byte-level + * + * Arguments: - uint8_t *h: pointer to output bytes + * - size_t outlen: number of bytes to be squeezed + * - uint64_t *s_inc: pointer to input/output incremental state + * First 25 values represent Keccak state. + * 26th value represents either the number of absorbed bytes + * that have not been permuted, or not-yet-squeezed bytes. + * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) + **************************************************/ +static void keccak_inc_squeeze(uint8_t *h, size_t outlen, + uint64_t *s_inc, uint32_t r) { + size_t i; + + /* First consume any bytes we still have sitting around */ + for (i = 0; i < outlen && i < s_inc[25]; i++) { + /* There are s_inc[25] bytes left, so r - s_inc[25] is the first + available byte. We consume from there, i.e., up to r. */ + h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] -= i; + + /* Then squeeze the remaining necessary blocks */ + while (outlen > 0) { + KeccakF1600_StatePermute(s_inc); + + for (i = 0; i < outlen && i < r; i++) { + h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); + } + h += i; + outlen -= i; + s_inc[25] = r - i; + } +} + +void shake128_inc_init(shake128incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); +} + +void shake128_inc_finalize(shake128incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); +} + +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); +} + +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake128_inc_ctx_release(shake128incctx *state) { + (void)state; +} + +void shake256_inc_init(shake256incctx *state) { + keccak_inc_init(state->ctx); +} + +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); +} + +void shake256_inc_finalize(shake256incctx *state) { + keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); +} + +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { + keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); +} + +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void shake256_inc_ctx_release(shake256incctx *state) { + (void)state; +} + + +/************************************************* + * Name: shake128_absorb + * + * Description: Absorb step of the SHAKE128 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake128_squeezeblocks + * + * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of + * SHAKE128_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake128ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); +} + +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake128_ctx_release(shake128ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake256_absorb + * + * Description: Absorb step of the SHAKE256 XOF. + * non-incremental, starts by zeroeing the state. + * + * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state + * - const uint8_t *input: pointer to input to be absorbed + * into s + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { + keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); +} + +/************************************************* + * Name: shake256_squeezeblocks + * + * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of + * SHAKE256_RATE bytes each. Modifies the state. Can be called + * multiple times to keep squeezing, i.e., is incremental. + * + * Arguments: - uint8_t *output: pointer to output blocks + * - size_t nblocks: number of blocks to be squeezed + * (written to output) + * - shake256ctx *state: pointer to input/output Keccak state + **************************************************/ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { + keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); +} + +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); +} + +/** Release the allocated state. Call only once. */ +void shake256_ctx_release(shake256ctx *state) { + (void)state; +} + +/************************************************* + * Name: shake128 + * + * Description: SHAKE128 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE128_RATE; + uint8_t t[SHAKE128_RATE]; + shake128ctx s; + + shake128_absorb(&s, input, inlen); + shake128_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE128_RATE; + outlen -= nblocks * SHAKE128_RATE; + + if (outlen) { + shake128_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake128_ctx_release(&s); +} + +/************************************************* + * Name: shake256 + * + * Description: SHAKE256 XOF with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - size_t outlen: requested output length in bytes + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen) { + size_t nblocks = outlen / SHAKE256_RATE; + uint8_t t[SHAKE256_RATE]; + shake256ctx s; + + shake256_absorb(&s, input, inlen); + shake256_squeezeblocks(output, nblocks, &s); + + output += nblocks * SHAKE256_RATE; + outlen -= nblocks * SHAKE256_RATE; + + if (outlen) { + shake256_squeezeblocks(t, 1, &s); + for (size_t i = 0; i < outlen; ++i) { + output[i] = t[i]; + } + } + shake256_ctx_release(&s); +} + +void sha3_256_inc_init(sha3_256incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_256_inc_ctx_release(sha3_256incctx *state) { + (void)state; +} + +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); +} + +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { + uint8_t t[SHA3_256_RATE]; + keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); + + sha3_256_inc_ctx_release(state); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_256 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_256_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); + + for (size_t i = 0; i < 32; i++) { + output[i] = t[i]; + } +} + +void sha3_384_inc_init(sha3_384incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); +} + +void sha3_384_inc_ctx_release(sha3_384incctx *state) { + (void)state; +} + +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { + uint8_t t[SHA3_384_RATE]; + keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); + + sha3_384_inc_ctx_release(state); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_384 + * + * Description: SHA3-256 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_384_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); + + for (size_t i = 0; i < 48; i++) { + output[i] = t[i]; + } +} + +void sha3_512_inc_init(sha3_512incctx *state) { + keccak_inc_init(state->ctx); +} + +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { + memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); +} + +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { + keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); +} + +void sha3_512_inc_ctx_release(sha3_512incctx *state) { + (void)state; +} + +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { + uint8_t t[SHA3_512_RATE]; + keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); + + keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); + + sha3_512_inc_ctx_release(state); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} + +/************************************************* + * Name: sha3_512 + * + * Description: SHA3-512 with non-incremental API + * + * Arguments: - uint8_t *output: pointer to output + * - const uint8_t *input: pointer to input + * - size_t inlen: length of input in bytes + **************************************************/ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { + uint64_t s[25]; + uint8_t t[SHA3_512_RATE]; + + /* Absorb input */ + keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); + + /* Squeeze output */ + keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); + + for (size_t i = 0; i < 64; i++) { + output[i] = t[i]; + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h new file mode 100644 index 0000000000..c29ebd8f9d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef FIPS202_H +#define FIPS202_H + +#include +#include + +#define SHAKE128_RATE 168 +#define SHAKE256_RATE 136 +#define SHA3_256_RATE 136 +#define SHA3_384_RATE 104 +#define SHA3_512_RATE 72 + +#define PQC_SHAKEINCCTX_U64WORDS 26 +#define PQC_SHAKECTX_U64WORDS 25 + +#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) +#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake128incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake128ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} shake256incctx; + +// Context for non-incremental API +typedef struct { + uint64_t ctx[PQC_SHAKECTX_U64WORDS]; +} shake256ctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_256incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_384incctx; + +// Context for incremental API +typedef struct { + uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; +} sha3_512incctx; + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); +/* Free the state */ +void shake128_ctx_release(shake128ctx *state); +/* Copy the state. */ +void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); + +/* Initialize incremental hashing API */ +void shake128_inc_init(shake128incctx *state); +/* Absorb more information into the XOF. + * + * Can be called multiple times. + */ +void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); +/* Finalize the XOF for squeezing */ +void shake128_inc_finalize(shake128incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); +/* Copy the context of the SHAKE128 XOF */ +void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); +/* Free the context of the SHAKE128 XOF */ +void shake128_inc_ctx_release(shake128incctx *state); + +/* Initialize the state and absorb the provided input. + * + * This function does not support being called multiple times + * with the same state. + */ +void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); +/* Free the context held by this XOF */ +void shake256_ctx_release(shake256ctx *state); +/* Copy the context held by this XOF */ +void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); + +/* Initialize incremental hashing API */ +void shake256_inc_init(shake256incctx *state); +void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); +/* Prepares for squeeze phase */ +void shake256_inc_finalize(shake256incctx *state); +/* Squeeze output out of the sponge. + * + * Supports being called multiple times + */ +void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); +/* Copy the state */ +void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); +/* Free the state */ +void shake256_inc_ctx_release(shake256incctx *state); + +/* One-stop SHAKE128 call */ +void shake128(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* One-stop SHAKE256 call */ +void shake256(uint8_t *output, size_t outlen, + const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_256_inc_init(sha3_256incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); +/* Copy the context */ +void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_256_inc_ctx_release(sha3_256incctx *state); + +void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_384_inc_init(sha3_384incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); +/* Copy the context */ +void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_384_inc_ctx_release(sha3_384incctx *state); + +/* One-stop SHA3-384 shop */ +void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); + +/* Initialize the incremental hashing state */ +void sha3_512_inc_init(sha3_512incctx *state); +/* Absorb blocks into SHA3 */ +void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); +/* Obtain the output of the function and free `state` */ +void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); +/* Copy the context */ +void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); +/* Release the state, don't use if `_finalize` has been used */ +void sha3_512_inc_ctx_release(sha3_512incctx *state); + +/* One-stop SHA3-512 shop */ +void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.c new file mode 100644 index 0000000000..48e2937f17 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.c @@ -0,0 +1,15 @@ +#include + +/* + * If ctl == 0x00000000, then *d is set to a0 + * If ctl == 0xFFFFFFFF, then *d is set to a1 + * ctl MUST be either 0x00000000 or 0xFFFFFFFF. + */ +void +fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) +{ + digit_t cw = (int32_t)ctl; + for (unsigned int i = 0; i < NWORDS_FIELD; i++) { + (*d)[i] = (*a0)[i] ^ (cw & ((*a0)[i] ^ (*a1)[i])); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.h new file mode 100644 index 0000000000..1241d5801e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.h @@ -0,0 +1,48 @@ +#ifndef FP_H +#define FP_H + +//////////////////////////////////////////////// NOTE: this is placed here for now +#include +#include +#include +#include +#include +#include +#include +#include + +typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements + +extern const digit_t ONE[NWORDS_FIELD]; +extern const digit_t ZERO[NWORDS_FIELD]; +// extern const digit_t PM1O3[NWORDS_FIELD]; + +void fp_set_small(fp_t *x, const digit_t val); +void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val); +void fp_set_zero(fp_t *x); +void fp_set_one(fp_t *x); +uint32_t fp_is_equal(const fp_t *a, const fp_t *b); +uint32_t fp_is_zero(const fp_t *a); +void fp_copy(fp_t *out, const fp_t *a); + +void fp_encode(void *dst, const fp_t *a); +void fp_decode_reduce(fp_t *d, const void *src, size_t len); +uint32_t fp_decode(fp_t *d, const void *src); + +void fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl); +void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl); + +void fp_add(fp_t *out, const fp_t *a, const fp_t *b); +void fp_sub(fp_t *out, const fp_t *a, const fp_t *b); +void fp_neg(fp_t *out, const fp_t *a); +void fp_sqr(fp_t *out, const fp_t *a); +void fp_mul(fp_t *out, const fp_t *a, const fp_t *b); + +void fp_inv(fp_t *x); +uint32_t fp_is_square(const fp_t *a); +void fp_sqrt(fp_t *a); +void fp_half(fp_t *out, const fp_t *a); +void fp_exp3div4(fp_t *out, const fp_t *a); +void fp_div3(fp_t *out, const fp_t *a); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp2.c new file mode 100644 index 0000000000..a2589525f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp2.c @@ -0,0 +1,328 @@ +#include +#include +#include + +/* Arithmetic modulo X^2 + 1 */ + +void +fp2_set_small(fp2_t *x, const digit_t val) +{ + fp_set_small(&(x->re), val); + fp_set_zero(&(x->im)); +} + +void +fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n) +{ + fp_mul_small(&x->re, &y->re, n); + fp_mul_small(&x->im, &y->im, n); +} + +void +fp2_set_one(fp2_t *x) +{ + fp_set_one(&(x->re)); + fp_set_zero(&(x->im)); +} + +void +fp2_set_zero(fp2_t *x) +{ + fp_set_zero(&(x->re)); + fp_set_zero(&(x->im)); +} + +// Is a GF(p^2) element zero? +// Returns 0xFF...FF (true) if a=0, 0 (false) otherwise +uint32_t +fp2_is_zero(const fp2_t *a) +{ + return fp_is_zero(&(a->re)) & fp_is_zero(&(a->im)); +} + +// Compare two GF(p^2) elements in constant time +// Returns 0xFF...FF (true) if a=b, 0 (false) otherwise +uint32_t +fp2_is_equal(const fp2_t *a, const fp2_t *b) +{ + return fp_is_equal(&(a->re), &(b->re)) & fp_is_equal(&(a->im), &(b->im)); +} + +// Is a GF(p^2) element one? +// Returns 0xFF...FF (true) if a=1, 0 (false) otherwise +uint32_t +fp2_is_one(const fp2_t *a) +{ + return fp_is_equal(&(a->re), &ONE) & fp_is_zero(&(a->im)); +} + +void +fp2_copy(fp2_t *x, const fp2_t *y) +{ + fp_copy(&(x->re), &(y->re)); + fp_copy(&(x->im), &(y->im)); +} + +void +fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_add(&(x->re), &(y->re), &(z->re)); + fp_add(&(x->im), &(y->im), &(z->im)); +} + +void +fp2_add_one(fp2_t *x, const fp2_t *y) +{ + fp_add(&x->re, &y->re, &ONE); + fp_copy(&x->im, &y->im); +} + +void +fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_sub(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &(y->im), &(z->im)); +} + +void +fp2_neg(fp2_t *x, const fp2_t *y) +{ + fp_neg(&(x->re), &(y->re)); + fp_neg(&(x->im), &(y->im)); +} + +void +fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z) +{ + fp_t t0, t1; + + fp_add(&t0, &(y->re), &(y->im)); + fp_add(&t1, &(z->re), &(z->im)); + fp_mul(&t0, &t0, &t1); + fp_mul(&t1, &(y->im), &(z->im)); + fp_mul(&(x->re), &(y->re), &(z->re)); + fp_sub(&(x->im), &t0, &t1); + fp_sub(&(x->im), &(x->im), &(x->re)); + fp_sub(&(x->re), &(x->re), &t1); +} + +void +fp2_sqr(fp2_t *x, const fp2_t *y) +{ + fp_t sum, diff; + + fp_add(&sum, &(y->re), &(y->im)); + fp_sub(&diff, &(y->re), &(y->im)); + fp_mul(&(x->im), &(y->re), &(y->im)); + fp_add(&(x->im), &(x->im), &(x->im)); + fp_mul(&(x->re), &sum, &diff); +} + +void +fp2_inv(fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + fp_inv(&t0); + fp_mul(&(x->re), &(x->re), &t0); + fp_mul(&(x->im), &(x->im), &t0); + fp_neg(&(x->im), &(x->im)); +} + +uint32_t +fp2_is_square(const fp2_t *x) +{ + fp_t t0, t1; + + fp_sqr(&t0, &(x->re)); + fp_sqr(&t1, &(x->im)); + fp_add(&t0, &t0, &t1); + + return fp_is_square(&t0); +} + +void +fp2_sqrt(fp2_t *a) +{ + fp_t x0, x1, t0, t1; + + /* From "Optimized One-Dimensional SQIsign Verification on Intel and + * Cortex-M4" by Aardal et al: https://eprint.iacr.org/2024/1563 */ + + // x0 = \delta = sqrt(a0^2 + a1^2). + fp_sqr(&x0, &(a->re)); + fp_sqr(&x1, &(a->im)); + fp_add(&x0, &x0, &x1); + fp_sqrt(&x0); + // If a1 = 0, there is a risk of \delta = -a0, which makes x0 = 0 below. + // In that case, we restore the value \delta = a0. + fp_select(&x0, &x0, &(a->re), fp_is_zero(&(a->im))); + // x0 = \delta + a0, t0 = 2 * x0. + fp_add(&x0, &x0, &(a->re)); + fp_add(&t0, &x0, &x0); + + // x1 = t0^(p-3)/4 + fp_exp3div4(&x1, &t0); + + // x0 = x0 * x1, x1 = x1 * a1, t1 = (2x0)^2. + fp_mul(&x0, &x0, &x1); + fp_mul(&x1, &x1, &(a->im)); + fp_add(&t1, &x0, &x0); + fp_sqr(&t1, &t1); + // If t1 = t0, return x0 + x1*i, otherwise x1 - x0*i. + fp_sub(&t0, &t0, &t1); + uint32_t f = fp_is_zero(&t0); + fp_neg(&t1, &x0); + fp_copy(&t0, &x1); + fp_select(&t0, &t0, &x0, f); + fp_select(&t1, &t1, &x1, f); + + // Check if t0 is zero + uint32_t t0_is_zero = fp_is_zero(&t0); + + // Check whether t0, t1 are odd + // Note: we encode to ensure canonical representation + uint8_t tmp_bytes[FP_ENCODED_BYTES]; + fp_encode(tmp_bytes, &t0); + uint32_t t0_is_odd = -((uint32_t)tmp_bytes[0] & 1); + fp_encode(tmp_bytes, &t1); + uint32_t t1_is_odd = -((uint32_t)tmp_bytes[0] & 1); + + // We negate the output if: + // t0 is odd, or + // t0 is zero and t1 is odd + uint32_t negate_output = t0_is_odd | (t0_is_zero & t1_is_odd); + fp_neg(&x0, &t0); + fp_select(&(a->re), &t0, &x0, negate_output); + fp_neg(&x0, &t1); + fp_select(&(a->im), &t1, &x0, negate_output); +} + +uint32_t +fp2_sqrt_verify(fp2_t *a) +{ + fp2_t t0, t1; + + fp2_copy(&t0, a); + fp2_sqrt(a); + fp2_sqr(&t1, a); + + return (fp2_is_equal(&t0, &t1)); +} + +void +fp2_half(fp2_t *x, const fp2_t *y) +{ + fp_half(&(x->re), &(y->re)); + fp_half(&(x->im), &(y->im)); +} + +void +fp2_batched_inv(fp2_t *x, int len) +{ + fp2_t t1[len], t2[len]; + fp2_t inverse; + + // x = x0,...,xn + // t1 = x0, x0*x1, ... ,x0 * x1 * ... * xn + fp2_copy(&t1[0], &x[0]); + for (int i = 1; i < len; i++) { + fp2_mul(&t1[i], &t1[i - 1], &x[i]); + } + + // inverse = 1/ (x0 * x1 * ... * xn) + fp2_copy(&inverse, &t1[len - 1]); + fp2_inv(&inverse); + + fp2_copy(&t2[0], &inverse); + // t2 = 1/ (x0 * x1 * ... * xn), 1/ (x0 * x1 * ... * x(n-1)) , ... , 1/xO + for (int i = 1; i < len; i++) { + fp2_mul(&t2[i], &t2[i - 1], &x[len - i]); + } + + fp2_copy(&x[0], &t2[len - 1]); + + for (int i = 1; i < len; i++) { + fp2_mul(&x[i], &t1[i - 1], &t2[len - i - 1]); + } +} + +// exponentiation using square and multiply +// Warning!! Not constant time! +void +fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size) +{ + fp2_t acc; + digit_t bit; + + fp2_copy(&acc, x); + fp2_set_one(out); + + // Iterate over each word of exp + for (int j = 0; j < size; j++) { + // Iterate over each bit of the word + for (int i = 0; i < RADIX; i++) { + bit = (exp[j] >> i) & 1; + if (bit == 1) { + fp2_mul(out, out, &acc); + } + fp2_sqr(&acc, &acc); + } + } +} + +void +fp2_print(const char *name, const fp2_t *a) +{ + printf("%s0x", name); + + uint8_t buf[FP_ENCODED_BYTES]; + fp_encode(&buf, &a->re); // Encoding ensures canonical rep + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + + printf(" + i*0x"); + + fp_encode(&buf, &a->im); + for (int i = 0; i < FP_ENCODED_BYTES; i++) { + printf("%02x", buf[FP_ENCODED_BYTES - i - 1]); + } + printf("\n"); +} + +void +fp2_encode(void *dst, const fp2_t *a) +{ + uint8_t *buf = dst; + fp_encode(buf, &(a->re)); + fp_encode(buf + FP_ENCODED_BYTES, &(a->im)); +} + +uint32_t +fp2_decode(fp2_t *d, const void *src) +{ + const uint8_t *buf = src; + uint32_t re, im; + + re = fp_decode(&(d->re), buf); + im = fp_decode(&(d->im), buf + FP_ENCODED_BYTES); + return re & im; +} + +void +fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl) +{ + fp_select(&(d->re), &(a0->re), &(a1->re), ctl); + fp_select(&(d->im), &(a0->im), &(a1->im), ctl); +} + +void +fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl) +{ + fp_cswap(&(a->re), &(b->re), ctl); + fp_cswap(&(a->im), &(b->im), ctl); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp2.h new file mode 100644 index 0000000000..00e673b7ca --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp2.h @@ -0,0 +1,41 @@ +#ifndef FP2_H +#define FP2_H + +#include +#include "fp.h" +#include + +// Structure for representing elements in GF(p^2) +typedef struct fp2_t +{ + fp_t re, im; +} fp2_t; + +void fp2_set_small(fp2_t *x, const digit_t val); +void fp2_mul_small(fp2_t *x, const fp2_t *y, uint32_t n); +void fp2_set_one(fp2_t *x); +void fp2_set_zero(fp2_t *x); +uint32_t fp2_is_zero(const fp2_t *a); +uint32_t fp2_is_equal(const fp2_t *a, const fp2_t *b); +uint32_t fp2_is_one(const fp2_t *a); +void fp2_copy(fp2_t *x, const fp2_t *y); +void fp2_add(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_add_one(fp2_t *x, const fp2_t *y); +void fp2_sub(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_neg(fp2_t *x, const fp2_t *y); +void fp2_mul(fp2_t *x, const fp2_t *y, const fp2_t *z); +void fp2_sqr(fp2_t *x, const fp2_t *y); +void fp2_inv(fp2_t *x); +uint32_t fp2_is_square(const fp2_t *x); +void fp2_sqrt(fp2_t *x); +uint32_t fp2_sqrt_verify(fp2_t *a); +void fp2_half(fp2_t *x, const fp2_t *y); +void fp2_batched_inv(fp2_t *x, int len); +void fp2_pow_vartime(fp2_t *out, const fp2_t *x, const digit_t *exp, const int size); +void fp2_print(const char *name, const fp2_t *a); +void fp2_encode(void *dst, const fp2_t *a); +uint32_t fp2_decode(fp2_t *d, const void *src); +void fp2_select(fp2_t *d, const fp2_t *a0, const fp2_t *a1, uint32_t ctl); +void fp2_cswap(fp2_t *a, fp2_t *b, uint32_t ctl); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_constants.h new file mode 100644 index 0000000000..094cb4de22 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_constants.h @@ -0,0 +1,17 @@ +#if RADIX == 32 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 16 +#else +#define NWORDS_FIELD 18 +#endif +#define NWORDS_ORDER 16 +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +#define NWORDS_FIELD 8 +#else +#define NWORDS_FIELD 9 +#endif +#define NWORDS_ORDER 8 +#endif +#define BITS 512 +#define LOG2P 9 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c new file mode 100644 index 0000000000..f002495c59 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c @@ -0,0 +1,1517 @@ +// clang-format off +// Command line : python monty.py 32 +// 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +#ifdef RADIX_32 + +#include +#include + +#define sspint int32_t +#define spint uint32_t +#define udpint uint64_t +#define dpint uint64_t + +#define Wordlength 32 +#define Nlimbs 18 +#define Radix 29 +#define Nbits 505 +#define Nbytes 64 + +#define MONTGOMERY +// propagate carries +inline static spint prop(spint *n) { + int i; + spint mask = ((spint)1 << 29u) - (spint)1; + sspint carry = (sspint)n[0]; + carry >>= 29u; + n[0] &= mask; + for (i = 1; i < 17; i++) { + carry += (sspint)n[i]; + n[i] = (spint)carry & mask; + carry >>= 29u; + } + n[17] += (spint)carry; + return -((n[17] >> 1) >> 30u); +} + +// propagate carries and add p if negative, propagate carries again +inline static int flatten(spint *n) { + spint carry = prop(n); + n[0] -= (spint)1u & carry; + n[17] += ((spint)0xd80u) & carry; + (void)prop(n); + return (int)(carry & 1); +} + +// Montgomery final subtract +static int modfsb(spint *n) { + n[0] += (spint)1u; + n[17] -= (spint)0xd80u; + return flatten(n); +} + +// Modular addition - reduce less than 2p +static void modadd(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] + b[0]; + n[1] = a[1] + b[1]; + n[2] = a[2] + b[2]; + n[3] = a[3] + b[3]; + n[4] = a[4] + b[4]; + n[5] = a[5] + b[5]; + n[6] = a[6] + b[6]; + n[7] = a[7] + b[7]; + n[8] = a[8] + b[8]; + n[9] = a[9] + b[9]; + n[10] = a[10] + b[10]; + n[11] = a[11] + b[11]; + n[12] = a[12] + b[12]; + n[13] = a[13] + b[13]; + n[14] = a[14] + b[14]; + n[15] = a[15] + b[15]; + n[16] = a[16] + b[16]; + n[17] = a[17] + b[17]; + n[0] += (spint)2u; + n[17] -= (spint)0x1b00u; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[17] += ((spint)0x1b00u) & carry; + (void)prop(n); +} + +// Modular subtraction - reduce less than 2p +static void modsub(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] - b[0]; + n[1] = a[1] - b[1]; + n[2] = a[2] - b[2]; + n[3] = a[3] - b[3]; + n[4] = a[4] - b[4]; + n[5] = a[5] - b[5]; + n[6] = a[6] - b[6]; + n[7] = a[7] - b[7]; + n[8] = a[8] - b[8]; + n[9] = a[9] - b[9]; + n[10] = a[10] - b[10]; + n[11] = a[11] - b[11]; + n[12] = a[12] - b[12]; + n[13] = a[13] - b[13]; + n[14] = a[14] - b[14]; + n[15] = a[15] - b[15]; + n[16] = a[16] - b[16]; + n[17] = a[17] - b[17]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[17] += ((spint)0x1b00u) & carry; + (void)prop(n); +} + +// Modular negation +static void modneg(const spint *b, spint *n) { + spint carry; + n[0] = (spint)0 - b[0]; + n[1] = (spint)0 - b[1]; + n[2] = (spint)0 - b[2]; + n[3] = (spint)0 - b[3]; + n[4] = (spint)0 - b[4]; + n[5] = (spint)0 - b[5]; + n[6] = (spint)0 - b[6]; + n[7] = (spint)0 - b[7]; + n[8] = (spint)0 - b[8]; + n[9] = (spint)0 - b[9]; + n[10] = (spint)0 - b[10]; + n[11] = (spint)0 - b[11]; + n[12] = (spint)0 - b[12]; + n[13] = (spint)0 - b[13]; + n[14] = (spint)0 - b[14]; + n[15] = (spint)0 - b[15]; + n[16] = (spint)0 - b[16]; + n[17] = (spint)0 - b[17]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[17] += ((spint)0x1b00u) & carry; + (void)prop(n); +} + +// Overflow limit = 18446744073709551616 +// maximum possible = 5188148641189065362 +// Modular multiplication, c=a*b mod 2p +static void modmul(const spint *a, const spint *b, spint *c) { + dpint t = 0; + spint p17 = 0xd80u; + spint q = ((spint)1 << 29u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + t += (dpint)a[0] * b[0]; + spint v0 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[1]; + t += (dpint)a[1] * b[0]; + spint v1 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[2]; + t += (dpint)a[1] * b[1]; + t += (dpint)a[2] * b[0]; + spint v2 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[3]; + t += (dpint)a[1] * b[2]; + t += (dpint)a[2] * b[1]; + t += (dpint)a[3] * b[0]; + spint v3 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[4]; + t += (dpint)a[1] * b[3]; + t += (dpint)a[2] * b[2]; + t += (dpint)a[3] * b[1]; + t += (dpint)a[4] * b[0]; + spint v4 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[5]; + t += (dpint)a[1] * b[4]; + t += (dpint)a[2] * b[3]; + t += (dpint)a[3] * b[2]; + t += (dpint)a[4] * b[1]; + t += (dpint)a[5] * b[0]; + spint v5 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[6]; + t += (dpint)a[1] * b[5]; + t += (dpint)a[2] * b[4]; + t += (dpint)a[3] * b[3]; + t += (dpint)a[4] * b[2]; + t += (dpint)a[5] * b[1]; + t += (dpint)a[6] * b[0]; + spint v6 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[7]; + t += (dpint)a[1] * b[6]; + t += (dpint)a[2] * b[5]; + t += (dpint)a[3] * b[4]; + t += (dpint)a[4] * b[3]; + t += (dpint)a[5] * b[2]; + t += (dpint)a[6] * b[1]; + t += (dpint)a[7] * b[0]; + spint v7 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[8]; + t += (dpint)a[1] * b[7]; + t += (dpint)a[2] * b[6]; + t += (dpint)a[3] * b[5]; + t += (dpint)a[4] * b[4]; + t += (dpint)a[5] * b[3]; + t += (dpint)a[6] * b[2]; + t += (dpint)a[7] * b[1]; + t += (dpint)a[8] * b[0]; + spint v8 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[9]; + t += (dpint)a[1] * b[8]; + t += (dpint)a[2] * b[7]; + t += (dpint)a[3] * b[6]; + t += (dpint)a[4] * b[5]; + t += (dpint)a[5] * b[4]; + t += (dpint)a[6] * b[3]; + t += (dpint)a[7] * b[2]; + t += (dpint)a[8] * b[1]; + t += (dpint)a[9] * b[0]; + spint v9 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[10]; + t += (dpint)a[1] * b[9]; + t += (dpint)a[2] * b[8]; + t += (dpint)a[3] * b[7]; + t += (dpint)a[4] * b[6]; + t += (dpint)a[5] * b[5]; + t += (dpint)a[6] * b[4]; + t += (dpint)a[7] * b[3]; + t += (dpint)a[8] * b[2]; + t += (dpint)a[9] * b[1]; + t += (dpint)a[10] * b[0]; + spint v10 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[11]; + t += (dpint)a[1] * b[10]; + t += (dpint)a[2] * b[9]; + t += (dpint)a[3] * b[8]; + t += (dpint)a[4] * b[7]; + t += (dpint)a[5] * b[6]; + t += (dpint)a[6] * b[5]; + t += (dpint)a[7] * b[4]; + t += (dpint)a[8] * b[3]; + t += (dpint)a[9] * b[2]; + t += (dpint)a[10] * b[1]; + t += (dpint)a[11] * b[0]; + spint v11 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[12]; + t += (dpint)a[1] * b[11]; + t += (dpint)a[2] * b[10]; + t += (dpint)a[3] * b[9]; + t += (dpint)a[4] * b[8]; + t += (dpint)a[5] * b[7]; + t += (dpint)a[6] * b[6]; + t += (dpint)a[7] * b[5]; + t += (dpint)a[8] * b[4]; + t += (dpint)a[9] * b[3]; + t += (dpint)a[10] * b[2]; + t += (dpint)a[11] * b[1]; + t += (dpint)a[12] * b[0]; + spint v12 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[13]; + t += (dpint)a[1] * b[12]; + t += (dpint)a[2] * b[11]; + t += (dpint)a[3] * b[10]; + t += (dpint)a[4] * b[9]; + t += (dpint)a[5] * b[8]; + t += (dpint)a[6] * b[7]; + t += (dpint)a[7] * b[6]; + t += (dpint)a[8] * b[5]; + t += (dpint)a[9] * b[4]; + t += (dpint)a[10] * b[3]; + t += (dpint)a[11] * b[2]; + t += (dpint)a[12] * b[1]; + t += (dpint)a[13] * b[0]; + spint v13 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[14]; + t += (dpint)a[1] * b[13]; + t += (dpint)a[2] * b[12]; + t += (dpint)a[3] * b[11]; + t += (dpint)a[4] * b[10]; + t += (dpint)a[5] * b[9]; + t += (dpint)a[6] * b[8]; + t += (dpint)a[7] * b[7]; + t += (dpint)a[8] * b[6]; + t += (dpint)a[9] * b[5]; + t += (dpint)a[10] * b[4]; + t += (dpint)a[11] * b[3]; + t += (dpint)a[12] * b[2]; + t += (dpint)a[13] * b[1]; + t += (dpint)a[14] * b[0]; + spint v14 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[15]; + t += (dpint)a[1] * b[14]; + t += (dpint)a[2] * b[13]; + t += (dpint)a[3] * b[12]; + t += (dpint)a[4] * b[11]; + t += (dpint)a[5] * b[10]; + t += (dpint)a[6] * b[9]; + t += (dpint)a[7] * b[8]; + t += (dpint)a[8] * b[7]; + t += (dpint)a[9] * b[6]; + t += (dpint)a[10] * b[5]; + t += (dpint)a[11] * b[4]; + t += (dpint)a[12] * b[3]; + t += (dpint)a[13] * b[2]; + t += (dpint)a[14] * b[1]; + t += (dpint)a[15] * b[0]; + spint v15 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[16]; + t += (dpint)a[1] * b[15]; + t += (dpint)a[2] * b[14]; + t += (dpint)a[3] * b[13]; + t += (dpint)a[4] * b[12]; + t += (dpint)a[5] * b[11]; + t += (dpint)a[6] * b[10]; + t += (dpint)a[7] * b[9]; + t += (dpint)a[8] * b[8]; + t += (dpint)a[9] * b[7]; + t += (dpint)a[10] * b[6]; + t += (dpint)a[11] * b[5]; + t += (dpint)a[12] * b[4]; + t += (dpint)a[13] * b[3]; + t += (dpint)a[14] * b[2]; + t += (dpint)a[15] * b[1]; + t += (dpint)a[16] * b[0]; + spint v16 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[0] * b[17]; + t += (dpint)a[1] * b[16]; + t += (dpint)a[2] * b[15]; + t += (dpint)a[3] * b[14]; + t += (dpint)a[4] * b[13]; + t += (dpint)a[5] * b[12]; + t += (dpint)a[6] * b[11]; + t += (dpint)a[7] * b[10]; + t += (dpint)a[8] * b[9]; + t += (dpint)a[9] * b[8]; + t += (dpint)a[10] * b[7]; + t += (dpint)a[11] * b[6]; + t += (dpint)a[12] * b[5]; + t += (dpint)a[13] * b[4]; + t += (dpint)a[14] * b[3]; + t += (dpint)a[15] * b[2]; + t += (dpint)a[16] * b[1]; + t += (dpint)a[17] * b[0]; + t += (dpint)v0 * (dpint)p17; + spint v17 = ((spint)t & mask); + t >>= 29; + t += (dpint)a[1] * b[17]; + t += (dpint)a[2] * b[16]; + t += (dpint)a[3] * b[15]; + t += (dpint)a[4] * b[14]; + t += (dpint)a[5] * b[13]; + t += (dpint)a[6] * b[12]; + t += (dpint)a[7] * b[11]; + t += (dpint)a[8] * b[10]; + t += (dpint)a[9] * b[9]; + t += (dpint)a[10] * b[8]; + t += (dpint)a[11] * b[7]; + t += (dpint)a[12] * b[6]; + t += (dpint)a[13] * b[5]; + t += (dpint)a[14] * b[4]; + t += (dpint)a[15] * b[3]; + t += (dpint)a[16] * b[2]; + t += (dpint)a[17] * b[1]; + t += (dpint)v1 * (dpint)p17; + c[0] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[2] * b[17]; + t += (dpint)a[3] * b[16]; + t += (dpint)a[4] * b[15]; + t += (dpint)a[5] * b[14]; + t += (dpint)a[6] * b[13]; + t += (dpint)a[7] * b[12]; + t += (dpint)a[8] * b[11]; + t += (dpint)a[9] * b[10]; + t += (dpint)a[10] * b[9]; + t += (dpint)a[11] * b[8]; + t += (dpint)a[12] * b[7]; + t += (dpint)a[13] * b[6]; + t += (dpint)a[14] * b[5]; + t += (dpint)a[15] * b[4]; + t += (dpint)a[16] * b[3]; + t += (dpint)a[17] * b[2]; + t += (dpint)v2 * (dpint)p17; + c[1] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[3] * b[17]; + t += (dpint)a[4] * b[16]; + t += (dpint)a[5] * b[15]; + t += (dpint)a[6] * b[14]; + t += (dpint)a[7] * b[13]; + t += (dpint)a[8] * b[12]; + t += (dpint)a[9] * b[11]; + t += (dpint)a[10] * b[10]; + t += (dpint)a[11] * b[9]; + t += (dpint)a[12] * b[8]; + t += (dpint)a[13] * b[7]; + t += (dpint)a[14] * b[6]; + t += (dpint)a[15] * b[5]; + t += (dpint)a[16] * b[4]; + t += (dpint)a[17] * b[3]; + t += (dpint)v3 * (dpint)p17; + c[2] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[4] * b[17]; + t += (dpint)a[5] * b[16]; + t += (dpint)a[6] * b[15]; + t += (dpint)a[7] * b[14]; + t += (dpint)a[8] * b[13]; + t += (dpint)a[9] * b[12]; + t += (dpint)a[10] * b[11]; + t += (dpint)a[11] * b[10]; + t += (dpint)a[12] * b[9]; + t += (dpint)a[13] * b[8]; + t += (dpint)a[14] * b[7]; + t += (dpint)a[15] * b[6]; + t += (dpint)a[16] * b[5]; + t += (dpint)a[17] * b[4]; + t += (dpint)v4 * (dpint)p17; + c[3] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[5] * b[17]; + t += (dpint)a[6] * b[16]; + t += (dpint)a[7] * b[15]; + t += (dpint)a[8] * b[14]; + t += (dpint)a[9] * b[13]; + t += (dpint)a[10] * b[12]; + t += (dpint)a[11] * b[11]; + t += (dpint)a[12] * b[10]; + t += (dpint)a[13] * b[9]; + t += (dpint)a[14] * b[8]; + t += (dpint)a[15] * b[7]; + t += (dpint)a[16] * b[6]; + t += (dpint)a[17] * b[5]; + t += (dpint)v5 * (dpint)p17; + c[4] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[6] * b[17]; + t += (dpint)a[7] * b[16]; + t += (dpint)a[8] * b[15]; + t += (dpint)a[9] * b[14]; + t += (dpint)a[10] * b[13]; + t += (dpint)a[11] * b[12]; + t += (dpint)a[12] * b[11]; + t += (dpint)a[13] * b[10]; + t += (dpint)a[14] * b[9]; + t += (dpint)a[15] * b[8]; + t += (dpint)a[16] * b[7]; + t += (dpint)a[17] * b[6]; + t += (dpint)v6 * (dpint)p17; + c[5] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[7] * b[17]; + t += (dpint)a[8] * b[16]; + t += (dpint)a[9] * b[15]; + t += (dpint)a[10] * b[14]; + t += (dpint)a[11] * b[13]; + t += (dpint)a[12] * b[12]; + t += (dpint)a[13] * b[11]; + t += (dpint)a[14] * b[10]; + t += (dpint)a[15] * b[9]; + t += (dpint)a[16] * b[8]; + t += (dpint)a[17] * b[7]; + t += (dpint)v7 * (dpint)p17; + c[6] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[8] * b[17]; + t += (dpint)a[9] * b[16]; + t += (dpint)a[10] * b[15]; + t += (dpint)a[11] * b[14]; + t += (dpint)a[12] * b[13]; + t += (dpint)a[13] * b[12]; + t += (dpint)a[14] * b[11]; + t += (dpint)a[15] * b[10]; + t += (dpint)a[16] * b[9]; + t += (dpint)a[17] * b[8]; + t += (dpint)v8 * (dpint)p17; + c[7] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[9] * b[17]; + t += (dpint)a[10] * b[16]; + t += (dpint)a[11] * b[15]; + t += (dpint)a[12] * b[14]; + t += (dpint)a[13] * b[13]; + t += (dpint)a[14] * b[12]; + t += (dpint)a[15] * b[11]; + t += (dpint)a[16] * b[10]; + t += (dpint)a[17] * b[9]; + t += (dpint)v9 * (dpint)p17; + c[8] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[10] * b[17]; + t += (dpint)a[11] * b[16]; + t += (dpint)a[12] * b[15]; + t += (dpint)a[13] * b[14]; + t += (dpint)a[14] * b[13]; + t += (dpint)a[15] * b[12]; + t += (dpint)a[16] * b[11]; + t += (dpint)a[17] * b[10]; + t += (dpint)v10 * (dpint)p17; + c[9] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[11] * b[17]; + t += (dpint)a[12] * b[16]; + t += (dpint)a[13] * b[15]; + t += (dpint)a[14] * b[14]; + t += (dpint)a[15] * b[13]; + t += (dpint)a[16] * b[12]; + t += (dpint)a[17] * b[11]; + t += (dpint)v11 * (dpint)p17; + c[10] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[12] * b[17]; + t += (dpint)a[13] * b[16]; + t += (dpint)a[14] * b[15]; + t += (dpint)a[15] * b[14]; + t += (dpint)a[16] * b[13]; + t += (dpint)a[17] * b[12]; + t += (dpint)v12 * (dpint)p17; + c[11] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[13] * b[17]; + t += (dpint)a[14] * b[16]; + t += (dpint)a[15] * b[15]; + t += (dpint)a[16] * b[14]; + t += (dpint)a[17] * b[13]; + t += (dpint)v13 * (dpint)p17; + c[12] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[14] * b[17]; + t += (dpint)a[15] * b[16]; + t += (dpint)a[16] * b[15]; + t += (dpint)a[17] * b[14]; + t += (dpint)v14 * (dpint)p17; + c[13] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[15] * b[17]; + t += (dpint)a[16] * b[16]; + t += (dpint)a[17] * b[15]; + t += (dpint)v15 * (dpint)p17; + c[14] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[16] * b[17]; + t += (dpint)a[17] * b[16]; + t += (dpint)v16 * (dpint)p17; + c[15] = ((spint)t & mask); + t >>= 29; + t += (dpint)a[17] * b[17]; + t += (dpint)v17 * (dpint)p17; + c[16] = ((spint)t & mask); + t >>= 29; + c[17] = (spint)t; +} + +// Modular squaring, c=a*a mod 2p +static void modsqr(const spint *a, spint *c) { + udpint tot; + udpint t = 0; + spint p17 = 0xd80u; + spint q = ((spint)1 << 29u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + tot = (udpint)a[0] * a[0]; + t = tot; + spint v0 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[1]; + tot *= 2; + t += tot; + spint v1 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[2]; + tot *= 2; + tot += (udpint)a[1] * a[1]; + t += tot; + spint v2 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[3]; + tot += (udpint)a[1] * a[2]; + tot *= 2; + t += tot; + spint v3 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[4]; + tot += (udpint)a[1] * a[3]; + tot *= 2; + tot += (udpint)a[2] * a[2]; + t += tot; + spint v4 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[5]; + tot += (udpint)a[1] * a[4]; + tot += (udpint)a[2] * a[3]; + tot *= 2; + t += tot; + spint v5 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[6]; + tot += (udpint)a[1] * a[5]; + tot += (udpint)a[2] * a[4]; + tot *= 2; + tot += (udpint)a[3] * a[3]; + t += tot; + spint v6 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[7]; + tot += (udpint)a[1] * a[6]; + tot += (udpint)a[2] * a[5]; + tot += (udpint)a[3] * a[4]; + tot *= 2; + t += tot; + spint v7 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[8]; + tot += (udpint)a[1] * a[7]; + tot += (udpint)a[2] * a[6]; + tot += (udpint)a[3] * a[5]; + tot *= 2; + tot += (udpint)a[4] * a[4]; + t += tot; + spint v8 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[9]; + tot += (udpint)a[1] * a[8]; + tot += (udpint)a[2] * a[7]; + tot += (udpint)a[3] * a[6]; + tot += (udpint)a[4] * a[5]; + tot *= 2; + t += tot; + spint v9 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[10]; + tot += (udpint)a[1] * a[9]; + tot += (udpint)a[2] * a[8]; + tot += (udpint)a[3] * a[7]; + tot += (udpint)a[4] * a[6]; + tot *= 2; + tot += (udpint)a[5] * a[5]; + t += tot; + spint v10 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[11]; + tot += (udpint)a[1] * a[10]; + tot += (udpint)a[2] * a[9]; + tot += (udpint)a[3] * a[8]; + tot += (udpint)a[4] * a[7]; + tot += (udpint)a[5] * a[6]; + tot *= 2; + t += tot; + spint v11 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[12]; + tot += (udpint)a[1] * a[11]; + tot += (udpint)a[2] * a[10]; + tot += (udpint)a[3] * a[9]; + tot += (udpint)a[4] * a[8]; + tot += (udpint)a[5] * a[7]; + tot *= 2; + tot += (udpint)a[6] * a[6]; + t += tot; + spint v12 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[13]; + tot += (udpint)a[1] * a[12]; + tot += (udpint)a[2] * a[11]; + tot += (udpint)a[3] * a[10]; + tot += (udpint)a[4] * a[9]; + tot += (udpint)a[5] * a[8]; + tot += (udpint)a[6] * a[7]; + tot *= 2; + t += tot; + spint v13 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[14]; + tot += (udpint)a[1] * a[13]; + tot += (udpint)a[2] * a[12]; + tot += (udpint)a[3] * a[11]; + tot += (udpint)a[4] * a[10]; + tot += (udpint)a[5] * a[9]; + tot += (udpint)a[6] * a[8]; + tot *= 2; + tot += (udpint)a[7] * a[7]; + t += tot; + spint v14 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[15]; + tot += (udpint)a[1] * a[14]; + tot += (udpint)a[2] * a[13]; + tot += (udpint)a[3] * a[12]; + tot += (udpint)a[4] * a[11]; + tot += (udpint)a[5] * a[10]; + tot += (udpint)a[6] * a[9]; + tot += (udpint)a[7] * a[8]; + tot *= 2; + t += tot; + spint v15 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[16]; + tot += (udpint)a[1] * a[15]; + tot += (udpint)a[2] * a[14]; + tot += (udpint)a[3] * a[13]; + tot += (udpint)a[4] * a[12]; + tot += (udpint)a[5] * a[11]; + tot += (udpint)a[6] * a[10]; + tot += (udpint)a[7] * a[9]; + tot *= 2; + tot += (udpint)a[8] * a[8]; + t += tot; + spint v16 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[0] * a[17]; + tot += (udpint)a[1] * a[16]; + tot += (udpint)a[2] * a[15]; + tot += (udpint)a[3] * a[14]; + tot += (udpint)a[4] * a[13]; + tot += (udpint)a[5] * a[12]; + tot += (udpint)a[6] * a[11]; + tot += (udpint)a[7] * a[10]; + tot += (udpint)a[8] * a[9]; + tot *= 2; + t += tot; + t += (udpint)v0 * p17; + spint v17 = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[1] * a[17]; + tot += (udpint)a[2] * a[16]; + tot += (udpint)a[3] * a[15]; + tot += (udpint)a[4] * a[14]; + tot += (udpint)a[5] * a[13]; + tot += (udpint)a[6] * a[12]; + tot += (udpint)a[7] * a[11]; + tot += (udpint)a[8] * a[10]; + tot *= 2; + tot += (udpint)a[9] * a[9]; + t += tot; + t += (udpint)v1 * p17; + c[0] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[2] * a[17]; + tot += (udpint)a[3] * a[16]; + tot += (udpint)a[4] * a[15]; + tot += (udpint)a[5] * a[14]; + tot += (udpint)a[6] * a[13]; + tot += (udpint)a[7] * a[12]; + tot += (udpint)a[8] * a[11]; + tot += (udpint)a[9] * a[10]; + tot *= 2; + t += tot; + t += (udpint)v2 * p17; + c[1] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[3] * a[17]; + tot += (udpint)a[4] * a[16]; + tot += (udpint)a[5] * a[15]; + tot += (udpint)a[6] * a[14]; + tot += (udpint)a[7] * a[13]; + tot += (udpint)a[8] * a[12]; + tot += (udpint)a[9] * a[11]; + tot *= 2; + tot += (udpint)a[10] * a[10]; + t += tot; + t += (udpint)v3 * p17; + c[2] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[4] * a[17]; + tot += (udpint)a[5] * a[16]; + tot += (udpint)a[6] * a[15]; + tot += (udpint)a[7] * a[14]; + tot += (udpint)a[8] * a[13]; + tot += (udpint)a[9] * a[12]; + tot += (udpint)a[10] * a[11]; + tot *= 2; + t += tot; + t += (udpint)v4 * p17; + c[3] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[5] * a[17]; + tot += (udpint)a[6] * a[16]; + tot += (udpint)a[7] * a[15]; + tot += (udpint)a[8] * a[14]; + tot += (udpint)a[9] * a[13]; + tot += (udpint)a[10] * a[12]; + tot *= 2; + tot += (udpint)a[11] * a[11]; + t += tot; + t += (udpint)v5 * p17; + c[4] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[6] * a[17]; + tot += (udpint)a[7] * a[16]; + tot += (udpint)a[8] * a[15]; + tot += (udpint)a[9] * a[14]; + tot += (udpint)a[10] * a[13]; + tot += (udpint)a[11] * a[12]; + tot *= 2; + t += tot; + t += (udpint)v6 * p17; + c[5] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[7] * a[17]; + tot += (udpint)a[8] * a[16]; + tot += (udpint)a[9] * a[15]; + tot += (udpint)a[10] * a[14]; + tot += (udpint)a[11] * a[13]; + tot *= 2; + tot += (udpint)a[12] * a[12]; + t += tot; + t += (udpint)v7 * p17; + c[6] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[8] * a[17]; + tot += (udpint)a[9] * a[16]; + tot += (udpint)a[10] * a[15]; + tot += (udpint)a[11] * a[14]; + tot += (udpint)a[12] * a[13]; + tot *= 2; + t += tot; + t += (udpint)v8 * p17; + c[7] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[9] * a[17]; + tot += (udpint)a[10] * a[16]; + tot += (udpint)a[11] * a[15]; + tot += (udpint)a[12] * a[14]; + tot *= 2; + tot += (udpint)a[13] * a[13]; + t += tot; + t += (udpint)v9 * p17; + c[8] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[10] * a[17]; + tot += (udpint)a[11] * a[16]; + tot += (udpint)a[12] * a[15]; + tot += (udpint)a[13] * a[14]; + tot *= 2; + t += tot; + t += (udpint)v10 * p17; + c[9] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[11] * a[17]; + tot += (udpint)a[12] * a[16]; + tot += (udpint)a[13] * a[15]; + tot *= 2; + tot += (udpint)a[14] * a[14]; + t += tot; + t += (udpint)v11 * p17; + c[10] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[12] * a[17]; + tot += (udpint)a[13] * a[16]; + tot += (udpint)a[14] * a[15]; + tot *= 2; + t += tot; + t += (udpint)v12 * p17; + c[11] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[13] * a[17]; + tot += (udpint)a[14] * a[16]; + tot *= 2; + tot += (udpint)a[15] * a[15]; + t += tot; + t += (udpint)v13 * p17; + c[12] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[14] * a[17]; + tot += (udpint)a[15] * a[16]; + tot *= 2; + t += tot; + t += (udpint)v14 * p17; + c[13] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[15] * a[17]; + tot *= 2; + tot += (udpint)a[16] * a[16]; + t += tot; + t += (udpint)v15 * p17; + c[14] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[16] * a[17]; + tot *= 2; + t += tot; + t += (udpint)v16 * p17; + c[15] = ((spint)t & mask); + t >>= 29; + tot = (udpint)a[17] * a[17]; + t += tot; + t += (udpint)v17 * p17; + c[16] = ((spint)t & mask); + t >>= 29; + c[17] = (spint)t; +} + +// copy +static void modcpy(const spint *a, spint *c) { + int i; + for (i = 0; i < 18; i++) { + c[i] = a[i]; + } +} + +// square n times +static void modnsqr(spint *a, int n) { + int i; + for (i = 0; i < n; i++) { + modsqr(a, a); + } +} + +// Calculate progenitor +static void modpro(const spint *w, spint *z) { + spint x[18]; + spint t0[18]; + spint t1[18]; + spint t2[18]; + spint t3[18]; + spint t4[18]; + spint t5[18]; + spint t6[18]; + modcpy(w, x); + modcpy(x, z); + modnsqr(z, 2); + modmul(x, z, t0); + modmul(x, t0, z); + modsqr(z, t1); + modmul(x, t1, t1); + modsqr(t1, t3); + modsqr(t3, t2); + modmul(t3, t2, t4); + modsqr(t4, t5); + modcpy(t5, t2); + modnsqr(t2, 2); + modsqr(t2, t6); + modmul(t2, t6, t6); + modmul(t5, t6, t5); + modnsqr(t5, 5); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 12); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 2); + modmul(t2, t5, t5); + modmul(t4, t5, t4); + modsqr(t4, t5); + modmul(t2, t5, t2); + modmul(t4, t2, t4); + modnsqr(t4, 27); + modmul(t2, t4, t2); + modmul(t1, t2, t2); + modcpy(t2, t4); + modnsqr(t4, 2); + modmul(t3, t4, t3); + modnsqr(t3, 58); + modmul(t2, t3, t2); + modmul(z, t2, z); + modcpy(z, t2); + modnsqr(t2, 4); + modmul(t1, t2, t1); + modmul(t0, t1, t0); + modmul(t1, t0, t1); + modsqr(t1, t2); + modmul(t0, t2, t0); + modcpy(t0, t2); + modnsqr(t2, 2); + modmul(t0, t2, t2); + modmul(t1, t2, t1); + modmul(t0, t1, t0); + modnsqr(t1, 128); + modmul(t0, t1, t1); + modnsqr(t1, 128); + modmul(t0, t1, t0); + modnsqr(t0, 119); + modmul(z, t0, z); +} + +// calculate inverse, provide progenitor h if available +static void modinv(const spint *x, const spint *h, spint *z) { + spint s[18]; + spint t[18]; + if (h == NULL) { + modpro(x, t); + } else { + modcpy(h, t); + } + modcpy(x, s); + modnsqr(t, 2); + modmul(s, t, z); +} + +// Convert m to n-residue form, n=nres(m) +static void nres(const spint *m, spint *n) { + const spint c[18] = {0x19a29700u, 0x12f6878u, 0x17b425edu, 0x1a12f684u, + 0x97b425eu, 0x1da12f68u, 0x1097b425u, 0xbda12f6u, + 0xd097b42u, 0x4bda12fu, 0x1ed097b4u, 0x84bda12u, + 0x5ed097bu, 0x1684bda1u, 0x25ed097u, 0xf684bdau, + 0x1425ed09u, 0x4bdu}; + modmul(m, c, n); +} + +// Convert n back to normal form, m=redc(n) +static void redc(const spint *n, spint *m) { + int i; + spint c[18]; + c[0] = 1; + for (i = 1; i < 18; i++) { + c[i] = 0; + } + modmul(n, c, m); + (void)modfsb(m); +} + +// is unity? +static int modis1(const spint *a) { + int i; + spint c[18]; + spint c0; + spint d = 0; + redc(a, c); + for (i = 1; i < 18; i++) { + d |= c[i]; + } + c0 = (spint)c[0]; + return ((spint)1 & ((d - (spint)1) >> 29u) & + (((c0 ^ (spint)1) - (spint)1) >> 29u)); +} + +// is zero? +static int modis0(const spint *a) { + int i; + spint c[18]; + spint d = 0; + redc(a, c); + for (i = 0; i < 18; i++) { + d |= c[i]; + } + return ((spint)1 & ((d - (spint)1) >> 29u)); +} + +// set to zero +static void modzer(spint *a) { + int i; + for (i = 0; i < 18; i++) { + a[i] = 0; + } +} + +// set to one +static void modone(spint *a) { + int i; + a[0] = 1; + for (i = 1; i < 18; i++) { + a[i] = 0; + } + nres(a, a); +} + +// set to integer +static void modint(int x, spint *a) { + int i; + a[0] = (spint)x; + for (i = 1; i < 18; i++) { + a[i] = 0; + } + nres(a, a); +} + +// Modular multiplication by an integer, c=a*b mod 2p +static void modmli(const spint *a, int b, spint *c) { + spint t[18]; + modint(b, t); + modmul(a, t, c); +} + +// Test for quadratic residue +static int modqr(const spint *h, const spint *x) { + spint r[18]; + if (h == NULL) { + modpro(x, r); + modsqr(r, r); + } else { + modsqr(h, r); + } + modmul(r, x, r); + return modis1(r) | modis0(x); +} + +// conditional move g to f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcmv(int b, const spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t; + spint r = 0x5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 18; i++) { + s = g[i]; + t = f[i]; + f[i] = c0 * t + c1 * s; + f[i] -= r * (t + s); + } +} + +// conditional swap g and f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcsw(int b, volatile spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t, w; + spint r = 0x5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 18; i++) { + s = g[i]; + t = f[i]; + w = r * (t + s); + f[i] = c0 * t + c1 * s; + f[i] -= w; + g[i] = c0 * s + c1 * t; + g[i] -= w; + } +} + +// Modular square root, provide progenitor h if available, NULL if not +static void modsqrt(const spint *x, const spint *h, spint *r) { + spint s[18]; + spint y[18]; + if (h == NULL) { + modpro(x, y); + } else { + modcpy(h, y); + } + modmul(y, x, s); + modcpy(s, r); +} + +// shift left by less than a word +static void modshl(unsigned int n, spint *a) { + int i; + a[17] = ((a[17] << n)) | (a[16] >> (29u - n)); + for (i = 16; i > 0; i--) { + a[i] = ((a[i] << n) & (spint)0x1fffffff) | (a[i - 1] >> (29u - n)); + } + a[0] = (a[0] << n) & (spint)0x1fffffff; +} + +// shift right by less than a word. Return shifted out part +static int modshr(unsigned int n, spint *a) { + int i; + spint r = a[0] & (((spint)1 << n) - (spint)1); + for (i = 0; i < 17; i++) { + a[i] = (a[i] >> n) | ((a[i + 1] << (29u - n)) & (spint)0x1fffffff); + } + a[17] = a[17] >> n; + return r; +} + +// set a= 2^r +static void mod2r(unsigned int r, spint *a) { + unsigned int n = r / 29u; + unsigned int m = r % 29u; + modzer(a); + if (r >= 64 * 8) + return; + a[n] = 1; + a[n] <<= m; + nres(a, a); +} + +// export to byte array +static void modexp(const spint *a, char *b) { + int i; + spint c[18]; + redc(a, c); + for (i = 63; i >= 0; i--) { + b[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +// import from byte array +// returns 1 if in range, else 0 +static int modimp(const char *b, spint *a) { + int i, res; + for (i = 0; i < 18; i++) { + a[i] = 0; + } + for (i = 0; i < 64; i++) { + modshl(8, a); + a[0] += (spint)(unsigned char)b[i]; + } + res = modfsb(a); + nres(a, a); + return res; +} + +// determine sign +static int modsign(const spint *a) { + spint c[18]; + redc(a, c); + return c[0] % 2; +} + +// return true if equal +static int modcmp(const spint *a, const spint *b) { + spint c[18], d[18]; + int i, eq = 1; + redc(a, c); + redc(b, d); + for (i = 0; i < 18; i++) { + eq &= (((c[i] ^ d[i]) - 1) >> 29) & 1; + } + return eq; +} + +// clang-format on +/****************************************************************************** + API functions calling generated code above + ******************************************************************************/ + +#include + +const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +const digit_t ONE[NWORDS_FIELD] = { 0x00025ed0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000800 }; +// Montgomery representation of 2^-1 +static const digit_t TWO_INV[NWORDS_FIELD] = { 0x00012f68, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000400 }; +// Montgomery representation of 3^-1 +static const digit_t THREE_INV[NWORDS_FIELD] = { + 0x15561f9a, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, + 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x00000baa +}; +// Montgomery representation of 2^512 +static const digit_t R2[NWORDS_FIELD] = { 0x03c668a5, 0x0f684bda, 0x1425ed09, 0x12f684bd, 0x1b425ed0, 0x012f684b, + 0x17b425ed, 0x1a12f684, 0x097b425e, 0x1da12f68, 0x1097b425, 0x0bda12f6, + 0x0d097b42, 0x04bda12f, 0x1ed097b4, 0x084bda12, 0x05ed097b, 0x00000a21 }; + +void +fp_set_small(fp_t *x, const digit_t val) +{ + modint((int)val, *x); +} + +void +fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) +{ + modmli(*a, (int)val, *x); +} + +void +fp_set_zero(fp_t *x) +{ + modzer(*x); +} + +void +fp_set_one(fp_t *x) +{ + modone(*x); +} + +uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return -(uint32_t)modcmp(*a, *b); +} + +uint32_t +fp_is_zero(const fp_t *a) +{ + return -(uint32_t)modis0(*a); +} + +void +fp_copy(fp_t *out, const fp_t *a) +{ + modcpy(*a, *out); +} + +void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + modcsw((int)(ctl & 0x1), *a, *b); +} + +void +fp_add(fp_t *out, const fp_t *a, const fp_t *b) +{ + modadd(*a, *b, *out); +} + +void +fp_sub(fp_t *out, const fp_t *a, const fp_t *b) +{ + modsub(*a, *b, *out); +} + +void +fp_neg(fp_t *out, const fp_t *a) +{ + modneg(*a, *out); +} + +void +fp_sqr(fp_t *out, const fp_t *a) +{ + modsqr(*a, *out); +} + +void +fp_mul(fp_t *out, const fp_t *a, const fp_t *b) +{ + modmul(*a, *b, *out); +} + +void +fp_inv(fp_t *x) +{ + modinv(*x, NULL, *x); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + return -(uint32_t)modqr(NULL, *a); +} + +void +fp_sqrt(fp_t *a) +{ + modsqrt(*a, NULL, *a); +} + +void +fp_half(fp_t *out, const fp_t *a) +{ + modmul(TWO_INV, *a, *out); +} + +void +fp_exp3div4(fp_t *out, const fp_t *a) +{ + modpro(*a, *out); +} + +void +fp_div3(fp_t *out, const fp_t *a) +{ + modmul(THREE_INV, *a, *out); +} + +void +fp_encode(void *dst, const fp_t *a) +{ + // Modified version of modexp() + int i; + spint c[18]; + redc(*a, c); + for (i = 0; i < 64; i++) { + ((char *)dst)[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +uint32_t +fp_decode(fp_t *d, const void *src) +{ + // Modified version of modimp() + int i; + spint res; + const unsigned char *b = src; + for (i = 0; i < 18; i++) { + (*d)[i] = 0; + } + for (i = 63; i >= 0; i--) { + modshl(8, *d); + (*d)[0] += (spint)b[i]; + } + res = (spint)-modfsb(*d); + nres(*d, *d); + // If the value was canonical then res = -1; otherwise, res = 0 + for (i = 0; i < 18; i++) { + (*d)[i] &= res; + } + return (uint32_t)res; +} + +static inline unsigned char +add_carry(unsigned char cc, spint a, spint b, spint *d) +{ + udpint t = (udpint)a + (udpint)b + cc; + *d = (spint)t; + return (unsigned char)(t >> Wordlength); +} + +static void +partial_reduce(spint *out, const spint *src) +{ + spint h, l, quo, rem; + unsigned char cc; + + // Split value in high (12 bits) and low (500 bits) parts. + h = src[15] >> 20; + l = src[15] & 0x000FFFFF; + + // 27*2^500 = 1 mod q; hence, we add floor(h/27) + (h mod 27)*2^500 + // to the low part. + quo = (h * 0x12F7) >> 17; + rem = h - (27 * quo); + cc = add_carry(0, src[0], quo, &out[0]); + cc = add_carry(cc, src[1], 0, &out[1]); + cc = add_carry(cc, src[2], 0, &out[2]); + cc = add_carry(cc, src[3], 0, &out[3]); + cc = add_carry(cc, src[4], 0, &out[4]); + cc = add_carry(cc, src[5], 0, &out[5]); + cc = add_carry(cc, src[6], 0, &out[6]); + cc = add_carry(cc, src[7], 0, &out[7]); + cc = add_carry(cc, src[8], 0, &out[8]); + cc = add_carry(cc, src[9], 0, &out[9]); + cc = add_carry(cc, src[10], 0, &out[10]); + cc = add_carry(cc, src[11], 0, &out[11]); + cc = add_carry(cc, src[12], 0, &out[12]); + cc = add_carry(cc, src[13], 0, &out[13]); + cc = add_carry(cc, src[14], 0, &out[14]); + (void)add_carry(cc, l, rem << 20, &out[15]); +} + +// Little-endian encoding of a 32-bit integer. +static inline void +enc32le(void *dst, uint32_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); +} + +// Little-endian decoding of a 32-bit integer. +static inline uint32_t +dec32le(const void *src) +{ + const uint8_t *buf = src; + return (spint)buf[0] | ((spint)buf[1] << 8) | ((spint)buf[2] << 16) | ((spint)buf[3] << 24); +} + +void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + uint32_t t[16]; // Stores Nbytes * 8 bits + uint8_t tmp[64]; // Nbytes + const uint8_t *b = src; + + fp_set_zero(d); + if (len == 0) { + return; + } + + size_t rem = len % 64; + if (rem != 0) { + // Input size is not a multiple of 64, we decode a partial + // block, which is already less than 2^500. + size_t k = len - rem; + memcpy(tmp, b + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + fp_decode(d, tmp); + len = k; + } + // Process all remaining blocks, in descending address order. + while (len > 0) { + fp_mul(d, d, &R2); + len -= 64; + t[0] = dec32le(b + len); + t[1] = dec32le(b + len + 4); + t[2] = dec32le(b + len + 8); + t[3] = dec32le(b + len + 12); + t[4] = dec32le(b + len + 16); + t[5] = dec32le(b + len + 20); + t[6] = dec32le(b + len + 24); + t[7] = dec32le(b + len + 28); + t[8] = dec32le(b + len + 32); + t[9] = dec32le(b + len + 36); + t[10] = dec32le(b + len + 40); + t[11] = dec32le(b + len + 44); + t[12] = dec32le(b + len + 48); + t[13] = dec32le(b + len + 52); + t[14] = dec32le(b + len + 56); + t[15] = dec32le(b + len + 60); + partial_reduce(t, t); + enc32le(tmp, t[0]); + enc32le(tmp + 4, t[1]); + enc32le(tmp + 8, t[2]); + enc32le(tmp + 12, t[3]); + enc32le(tmp + 16, t[4]); + enc32le(tmp + 20, t[5]); + enc32le(tmp + 24, t[6]); + enc32le(tmp + 28, t[7]); + enc32le(tmp + 32, t[8]); + enc32le(tmp + 36, t[9]); + enc32le(tmp + 40, t[10]); + enc32le(tmp + 44, t[11]); + enc32le(tmp + 48, t[12]); + enc32le(tmp + 52, t[13]); + enc32le(tmp + 56, t[14]); + enc32le(tmp + 60, t[15]); + fp_t a; + fp_decode(&a, tmp); + fp_add(d, d, &a); + } +} + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c new file mode 100644 index 0000000000..c187e878eb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c @@ -0,0 +1,973 @@ +// clang-format off +// Command line : python monty.py 64 +// 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +#ifdef RADIX_64 + +#include +#include + +#define sspint int64_t +#define spint uint64_t +#define udpint __uint128_t +#define dpint __uint128_t + +#define Wordlength 64 +#define Nlimbs 9 +#define Radix 57 +#define Nbits 505 +#define Nbytes 64 + +#define MONTGOMERY +// propagate carries +inline static spint prop(spint *n) { + int i; + spint mask = ((spint)1 << 57u) - (spint)1; + sspint carry = (sspint)n[0]; + carry >>= 57u; + n[0] &= mask; + for (i = 1; i < 8; i++) { + carry += (sspint)n[i]; + n[i] = (spint)carry & mask; + carry >>= 57u; + } + n[8] += (spint)carry; + return -((n[8] >> 1) >> 62u); +} + +// propagate carries and add p if negative, propagate carries again +inline static int flatten(spint *n) { + spint carry = prop(n); + n[0] -= (spint)1u & carry; + n[8] += ((spint)0x1b00000000000u) & carry; + (void)prop(n); + return (int)(carry & 1); +} + +// Montgomery final subtract +inline static int modfsb(spint *n) { + n[0] += (spint)1u; + n[8] -= (spint)0x1b00000000000u; + return flatten(n); +} + +// Modular addition - reduce less than 2p +inline static void modadd(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] + b[0]; + n[1] = a[1] + b[1]; + n[2] = a[2] + b[2]; + n[3] = a[3] + b[3]; + n[4] = a[4] + b[4]; + n[5] = a[5] + b[5]; + n[6] = a[6] + b[6]; + n[7] = a[7] + b[7]; + n[8] = a[8] + b[8]; + n[0] += (spint)2u; + n[8] -= (spint)0x3600000000000u; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[8] += ((spint)0x3600000000000u) & carry; + (void)prop(n); +} + +// Modular subtraction - reduce less than 2p +inline static void modsub(const spint *a, const spint *b, spint *n) { + spint carry; + n[0] = a[0] - b[0]; + n[1] = a[1] - b[1]; + n[2] = a[2] - b[2]; + n[3] = a[3] - b[3]; + n[4] = a[4] - b[4]; + n[5] = a[5] - b[5]; + n[6] = a[6] - b[6]; + n[7] = a[7] - b[7]; + n[8] = a[8] - b[8]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[8] += ((spint)0x3600000000000u) & carry; + (void)prop(n); +} + +// Modular negation +inline static void modneg(const spint *b, spint *n) { + spint carry; + n[0] = (spint)0 - b[0]; + n[1] = (spint)0 - b[1]; + n[2] = (spint)0 - b[2]; + n[3] = (spint)0 - b[3]; + n[4] = (spint)0 - b[4]; + n[5] = (spint)0 - b[5]; + n[6] = (spint)0 - b[6]; + n[7] = (spint)0 - b[7]; + n[8] = (spint)0 - b[8]; + carry = prop(n); + n[0] -= (spint)2u & carry; + n[8] += ((spint)0x3600000000000u) & carry; + (void)prop(n); +} + +// Overflow limit = 340282366920938463463374607431768211456 +// maximum possible = 186991140039668477603471750259015689 +// Modular multiplication, c=a*b mod 2p +inline static void modmul(const spint *a, const spint *b, spint *c) { + dpint t = 0; + spint p8 = 0x1b00000000000u; + spint q = ((spint)1 << 57u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + t += (dpint)a[0] * b[0]; + spint v0 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[0] * b[1]; + t += (dpint)a[1] * b[0]; + spint v1 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[0] * b[2]; + t += (dpint)a[1] * b[1]; + t += (dpint)a[2] * b[0]; + spint v2 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[0] * b[3]; + t += (dpint)a[1] * b[2]; + t += (dpint)a[2] * b[1]; + t += (dpint)a[3] * b[0]; + spint v3 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[0] * b[4]; + t += (dpint)a[1] * b[3]; + t += (dpint)a[2] * b[2]; + t += (dpint)a[3] * b[1]; + t += (dpint)a[4] * b[0]; + spint v4 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[0] * b[5]; + t += (dpint)a[1] * b[4]; + t += (dpint)a[2] * b[3]; + t += (dpint)a[3] * b[2]; + t += (dpint)a[4] * b[1]; + t += (dpint)a[5] * b[0]; + spint v5 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[0] * b[6]; + t += (dpint)a[1] * b[5]; + t += (dpint)a[2] * b[4]; + t += (dpint)a[3] * b[3]; + t += (dpint)a[4] * b[2]; + t += (dpint)a[5] * b[1]; + t += (dpint)a[6] * b[0]; + spint v6 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[0] * b[7]; + t += (dpint)a[1] * b[6]; + t += (dpint)a[2] * b[5]; + t += (dpint)a[3] * b[4]; + t += (dpint)a[4] * b[3]; + t += (dpint)a[5] * b[2]; + t += (dpint)a[6] * b[1]; + t += (dpint)a[7] * b[0]; + spint v7 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[0] * b[8]; + t += (dpint)a[1] * b[7]; + t += (dpint)a[2] * b[6]; + t += (dpint)a[3] * b[5]; + t += (dpint)a[4] * b[4]; + t += (dpint)a[5] * b[3]; + t += (dpint)a[6] * b[2]; + t += (dpint)a[7] * b[1]; + t += (dpint)a[8] * b[0]; + t += (dpint)v0 * (dpint)p8; + spint v8 = ((spint)t & mask); + t >>= 57; + t += (dpint)a[1] * b[8]; + t += (dpint)a[2] * b[7]; + t += (dpint)a[3] * b[6]; + t += (dpint)a[4] * b[5]; + t += (dpint)a[5] * b[4]; + t += (dpint)a[6] * b[3]; + t += (dpint)a[7] * b[2]; + t += (dpint)a[8] * b[1]; + t += (dpint)v1 * (dpint)p8; + c[0] = ((spint)t & mask); + t >>= 57; + t += (dpint)a[2] * b[8]; + t += (dpint)a[3] * b[7]; + t += (dpint)a[4] * b[6]; + t += (dpint)a[5] * b[5]; + t += (dpint)a[6] * b[4]; + t += (dpint)a[7] * b[3]; + t += (dpint)a[8] * b[2]; + t += (dpint)v2 * (dpint)p8; + c[1] = ((spint)t & mask); + t >>= 57; + t += (dpint)a[3] * b[8]; + t += (dpint)a[4] * b[7]; + t += (dpint)a[5] * b[6]; + t += (dpint)a[6] * b[5]; + t += (dpint)a[7] * b[4]; + t += (dpint)a[8] * b[3]; + t += (dpint)v3 * (dpint)p8; + c[2] = ((spint)t & mask); + t >>= 57; + t += (dpint)a[4] * b[8]; + t += (dpint)a[5] * b[7]; + t += (dpint)a[6] * b[6]; + t += (dpint)a[7] * b[5]; + t += (dpint)a[8] * b[4]; + t += (dpint)v4 * (dpint)p8; + c[3] = ((spint)t & mask); + t >>= 57; + t += (dpint)a[5] * b[8]; + t += (dpint)a[6] * b[7]; + t += (dpint)a[7] * b[6]; + t += (dpint)a[8] * b[5]; + t += (dpint)v5 * (dpint)p8; + c[4] = ((spint)t & mask); + t >>= 57; + t += (dpint)a[6] * b[8]; + t += (dpint)a[7] * b[7]; + t += (dpint)a[8] * b[6]; + t += (dpint)v6 * (dpint)p8; + c[5] = ((spint)t & mask); + t >>= 57; + t += (dpint)a[7] * b[8]; + t += (dpint)a[8] * b[7]; + t += (dpint)v7 * (dpint)p8; + c[6] = ((spint)t & mask); + t >>= 57; + t += (dpint)a[8] * b[8]; + t += (dpint)v8 * (dpint)p8; + c[7] = ((spint)t & mask); + t >>= 57; + c[8] = (spint)t; +} + +// Modular squaring, c=a*a mod 2p +inline static void modsqr(const spint *a, spint *c) { + udpint tot; + udpint t = 0; + spint p8 = 0x1b00000000000u; + spint q = ((spint)1 << 57u); // q is unsaturated radix + spint mask = (spint)(q - (spint)1); + tot = (udpint)a[0] * a[0]; + t = tot; + spint v0 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[0] * a[1]; + tot *= 2; + t += tot; + spint v1 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[0] * a[2]; + tot *= 2; + tot += (udpint)a[1] * a[1]; + t += tot; + spint v2 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[0] * a[3]; + tot += (udpint)a[1] * a[2]; + tot *= 2; + t += tot; + spint v3 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[0] * a[4]; + tot += (udpint)a[1] * a[3]; + tot *= 2; + tot += (udpint)a[2] * a[2]; + t += tot; + spint v4 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[0] * a[5]; + tot += (udpint)a[1] * a[4]; + tot += (udpint)a[2] * a[3]; + tot *= 2; + t += tot; + spint v5 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[0] * a[6]; + tot += (udpint)a[1] * a[5]; + tot += (udpint)a[2] * a[4]; + tot *= 2; + tot += (udpint)a[3] * a[3]; + t += tot; + spint v6 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[0] * a[7]; + tot += (udpint)a[1] * a[6]; + tot += (udpint)a[2] * a[5]; + tot += (udpint)a[3] * a[4]; + tot *= 2; + t += tot; + spint v7 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[0] * a[8]; + tot += (udpint)a[1] * a[7]; + tot += (udpint)a[2] * a[6]; + tot += (udpint)a[3] * a[5]; + tot *= 2; + tot += (udpint)a[4] * a[4]; + t += tot; + t += (udpint)v0 * p8; + spint v8 = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[1] * a[8]; + tot += (udpint)a[2] * a[7]; + tot += (udpint)a[3] * a[6]; + tot += (udpint)a[4] * a[5]; + tot *= 2; + t += tot; + t += (udpint)v1 * p8; + c[0] = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[2] * a[8]; + tot += (udpint)a[3] * a[7]; + tot += (udpint)a[4] * a[6]; + tot *= 2; + tot += (udpint)a[5] * a[5]; + t += tot; + t += (udpint)v2 * p8; + c[1] = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[3] * a[8]; + tot += (udpint)a[4] * a[7]; + tot += (udpint)a[5] * a[6]; + tot *= 2; + t += tot; + t += (udpint)v3 * p8; + c[2] = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[4] * a[8]; + tot += (udpint)a[5] * a[7]; + tot *= 2; + tot += (udpint)a[6] * a[6]; + t += tot; + t += (udpint)v4 * p8; + c[3] = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[5] * a[8]; + tot += (udpint)a[6] * a[7]; + tot *= 2; + t += tot; + t += (udpint)v5 * p8; + c[4] = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[6] * a[8]; + tot *= 2; + tot += (udpint)a[7] * a[7]; + t += tot; + t += (udpint)v6 * p8; + c[5] = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[7] * a[8]; + tot *= 2; + t += tot; + t += (udpint)v7 * p8; + c[6] = ((spint)t & mask); + t >>= 57; + tot = (udpint)a[8] * a[8]; + t += tot; + t += (udpint)v8 * p8; + c[7] = ((spint)t & mask); + t >>= 57; + c[8] = (spint)t; +} + +// copy +inline static void modcpy(const spint *a, spint *c) { + int i; + for (i = 0; i < 9; i++) { + c[i] = a[i]; + } +} + +// square n times +static void modnsqr(spint *a, int n) { + int i; + for (i = 0; i < n; i++) { + modsqr(a, a); + } +} + +// Calculate progenitor +static void modpro(const spint *w, spint *z) { + spint x[9]; + spint t0[9]; + spint t1[9]; + spint t2[9]; + spint t3[9]; + spint t4[9]; + spint t5[9]; + spint t6[9]; + modcpy(w, x); + modcpy(x, z); + modnsqr(z, 2); + modmul(x, z, t0); + modmul(x, t0, z); + modsqr(z, t1); + modmul(x, t1, t1); + modsqr(t1, t3); + modsqr(t3, t2); + modmul(t3, t2, t4); + modsqr(t4, t5); + modcpy(t5, t2); + modnsqr(t2, 2); + modsqr(t2, t6); + modmul(t2, t6, t6); + modmul(t5, t6, t5); + modnsqr(t5, 5); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 12); + modmul(t2, t5, t2); + modcpy(t2, t5); + modnsqr(t5, 2); + modmul(t2, t5, t5); + modmul(t4, t5, t4); + modsqr(t4, t5); + modmul(t2, t5, t2); + modmul(t4, t2, t4); + modnsqr(t4, 27); + modmul(t2, t4, t2); + modmul(t1, t2, t2); + modcpy(t2, t4); + modnsqr(t4, 2); + modmul(t3, t4, t3); + modnsqr(t3, 58); + modmul(t2, t3, t2); + modmul(z, t2, z); + modcpy(z, t2); + modnsqr(t2, 4); + modmul(t1, t2, t1); + modmul(t0, t1, t0); + modmul(t1, t0, t1); + modsqr(t1, t2); + modmul(t0, t2, t0); + modcpy(t0, t2); + modnsqr(t2, 2); + modmul(t0, t2, t2); + modmul(t1, t2, t1); + modmul(t0, t1, t0); + modnsqr(t1, 128); + modmul(t0, t1, t1); + modnsqr(t1, 128); + modmul(t0, t1, t0); + modnsqr(t0, 119); + modmul(z, t0, z); +} + +// calculate inverse, provide progenitor h if available +static void modinv(const spint *x, const spint *h, spint *z) { + spint s[9]; + spint t[9]; + if (h == NULL) { + modpro(x, t); + } else { + modcpy(h, t); + } + modcpy(x, s); + modnsqr(t, 2); + modmul(s, t, z); +} + +// Convert m to n-residue form, n=nres(m) +static void nres(const spint *m, spint *n) { + const spint c[9] = { + 0x25ed097b43c668u, 0x84bda12f684bdau, 0xd097b425ed097bu, + 0x1da12f684bda12fu, 0x17b425ed097b425u, 0x12f684bda12f684u, + 0x25ed097b425ed0u, 0x84bda12f684bdau, 0x117b425ed097bu}; + modmul(m, c, n); +} + +// Convert n back to normal form, m=redc(n) +static void redc(const spint *n, spint *m) { + int i; + spint c[9]; + c[0] = 1; + for (i = 1; i < 9; i++) { + c[i] = 0; + } + modmul(n, c, m); + (void)modfsb(m); +} + +// is unity? +static int modis1(const spint *a) { + int i; + spint c[9]; + spint c0; + spint d = 0; + redc(a, c); + for (i = 1; i < 9; i++) { + d |= c[i]; + } + c0 = (spint)c[0]; + return ((spint)1 & ((d - (spint)1) >> 57u) & + (((c0 ^ (spint)1) - (spint)1) >> 57u)); +} + +// is zero? +static int modis0(const spint *a) { + int i; + spint c[9]; + spint d = 0; + redc(a, c); + for (i = 0; i < 9; i++) { + d |= c[i]; + } + return ((spint)1 & ((d - (spint)1) >> 57u)); +} + +// set to zero +static void modzer(spint *a) { + int i; + for (i = 0; i < 9; i++) { + a[i] = 0; + } +} + +// set to one +static void modone(spint *a) { + int i; + a[0] = 1; + for (i = 1; i < 9; i++) { + a[i] = 0; + } + nres(a, a); +} + +// set to integer +static void modint(int x, spint *a) { + int i; + a[0] = (spint)x; + for (i = 1; i < 9; i++) { + a[i] = 0; + } + nres(a, a); +} + +// Modular multiplication by an integer, c=a*b mod 2p +inline static void modmli(const spint *a, int b, spint *c) { + spint t[9]; + modint(b, t); + modmul(a, t, c); +} + +// Test for quadratic residue +static int modqr(const spint *h, const spint *x) { + spint r[9]; + if (h == NULL) { + modpro(x, r); + modsqr(r, r); + } else { + modsqr(h, r); + } + modmul(r, x, r); + return modis1(r) | modis0(x); +} + +// conditional move g to f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcmv(int b, const spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t; + spint r = 0x3cc3c33c5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 9; i++) { + s = g[i]; + t = f[i]; + f[i] = c0 * t + c1 * s; + f[i] -= r * (t + s); + } +} + +// conditional swap g and f if d=1 +// strongly recommend inlining be disabled using compiler specific syntax +static void modcsw(int b, volatile spint *g, volatile spint *f) { + int i; + spint c0, c1, s, t, w; + spint r = 0x3cc3c33c5aa5a55au; + c0 = (1 - b) + r; + c1 = b + r; + for (i = 0; i < 9; i++) { + s = g[i]; + t = f[i]; + w = r * (t + s); + f[i] = c0 * t + c1 * s; + f[i] -= w; + g[i] = c0 * s + c1 * t; + g[i] -= w; + } +} + +// Modular square root, provide progenitor h if available, NULL if not +static void modsqrt(const spint *x, const spint *h, spint *r) { + spint s[9]; + spint y[9]; + if (h == NULL) { + modpro(x, y); + } else { + modcpy(h, y); + } + modmul(y, x, s); + modcpy(s, r); +} + +// shift left by less than a word +static void modshl(unsigned int n, spint *a) { + int i; + a[8] = ((a[8] << n)) | (a[7] >> (57u - n)); + for (i = 7; i > 0; i--) { + a[i] = ((a[i] << n) & (spint)0x1ffffffffffffff) | (a[i - 1] >> (57u - n)); + } + a[0] = (a[0] << n) & (spint)0x1ffffffffffffff; +} + +// shift right by less than a word. Return shifted out part +static int modshr(unsigned int n, spint *a) { + int i; + spint r = a[0] & (((spint)1 << n) - (spint)1); + for (i = 0; i < 8; i++) { + a[i] = (a[i] >> n) | ((a[i + 1] << (57u - n)) & (spint)0x1ffffffffffffff); + } + a[8] = a[8] >> n; + return r; +} + +// set a= 2^r +static void mod2r(unsigned int r, spint *a) { + unsigned int n = r / 57u; + unsigned int m = r % 57u; + modzer(a); + if (r >= 64 * 8) + return; + a[n] = 1; + a[n] <<= m; + nres(a, a); +} + +// export to byte array +static void modexp(const spint *a, char *b) { + int i; + spint c[9]; + redc(a, c); + for (i = 63; i >= 0; i--) { + b[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +// import from byte array +// returns 1 if in range, else 0 +static int modimp(const char *b, spint *a) { + int i, res; + for (i = 0; i < 9; i++) { + a[i] = 0; + } + for (i = 0; i < 64; i++) { + modshl(8, a); + a[0] += (spint)(unsigned char)b[i]; + } + res = modfsb(a); + nres(a, a); + return res; +} + +// determine sign +static int modsign(const spint *a) { + spint c[9]; + redc(a, c); + return c[0] % 2; +} + +// return true if equal +static int modcmp(const spint *a, const spint *b) { + spint c[9], d[9]; + int i, eq = 1; + redc(a, c); + redc(b, d); + for (i = 0; i < 9; i++) { + eq &= (((c[i] ^ d[i]) - 1) >> 57) & 1; + } + return eq; +} + +// clang-format on +/****************************************************************************** + API functions calling generated code above + ******************************************************************************/ + +#include + +const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +const digit_t ONE[NWORDS_FIELD] = { 0x000000000000012f, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000b00000000000 }; +// Montgomery representation of 2^-1 +static const digit_t TWO_INV[NWORDS_FIELD] = { 0x0000000000000097, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0001300000000000 }; +// Montgomery representation of 3^-1 +static const digit_t THREE_INV[NWORDS_FIELD] = { 0x00aaaaaaaaaaab0f, 0x0155555555555555, 0x00aaaaaaaaaaaaaa, + 0x0155555555555555, 0x00aaaaaaaaaaaaaa, 0x0155555555555555, + 0x00aaaaaaaaaaaaaa, 0x0155555555555555, 0x00015aaaaaaaaaaa }; +// Montgomery representation of 2^512 +static const digit_t R2[NWORDS_FIELD] = { 0x0012f684bda1e334, 0x01425ed097b425ed, 0x01684bda12f684bd, + 0x01ed097b425ed097, 0x00bda12f684bda12, 0x0097b425ed097b42, + 0x0012f684bda12f68, 0x01425ed097b425ed, 0x00008bda12f684bd }; + +void +fp_set_small(fp_t *x, const digit_t val) +{ + modint((int)val, *x); +} + +void +fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) +{ + modmli(*a, (int)val, *x); +} + +void +fp_set_zero(fp_t *x) +{ + modzer(*x); +} + +void +fp_set_one(fp_t *x) +{ + modone(*x); +} + +uint32_t +fp_is_equal(const fp_t *a, const fp_t *b) +{ + return -(uint32_t)modcmp(*a, *b); +} + +uint32_t +fp_is_zero(const fp_t *a) +{ + return -(uint32_t)modis0(*a); +} + +void +fp_copy(fp_t *out, const fp_t *a) +{ + modcpy(*a, *out); +} + +void +fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) +{ + modcsw((int)(ctl & 0x1), *a, *b); +} + +void +fp_add(fp_t *out, const fp_t *a, const fp_t *b) +{ + modadd(*a, *b, *out); +} + +void +fp_sub(fp_t *out, const fp_t *a, const fp_t *b) +{ + modsub(*a, *b, *out); +} + +void +fp_neg(fp_t *out, const fp_t *a) +{ + modneg(*a, *out); +} + +void +fp_sqr(fp_t *out, const fp_t *a) +{ + modsqr(*a, *out); +} + +void +fp_mul(fp_t *out, const fp_t *a, const fp_t *b) +{ + modmul(*a, *b, *out); +} + +void +fp_inv(fp_t *x) +{ + modinv(*x, NULL, *x); +} + +uint32_t +fp_is_square(const fp_t *a) +{ + return -(uint32_t)modqr(NULL, *a); +} + +void +fp_sqrt(fp_t *a) +{ + modsqrt(*a, NULL, *a); +} + +void +fp_half(fp_t *out, const fp_t *a) +{ + modmul(TWO_INV, *a, *out); +} + +void +fp_exp3div4(fp_t *out, const fp_t *a) +{ + modpro(*a, *out); +} + +void +fp_div3(fp_t *out, const fp_t *a) +{ + modmul(THREE_INV, *a, *out); +} + +void +fp_encode(void *dst, const fp_t *a) +{ + // Modified version of modexp() + int i; + spint c[9]; + redc(*a, c); + for (i = 0; i < 64; i++) { + ((char *)dst)[i] = c[0] & (spint)0xff; + (void)modshr(8, c); + } +} + +uint32_t +fp_decode(fp_t *d, const void *src) +{ + // Modified version of modimp() + int i; + spint res; + const unsigned char *b = src; + for (i = 0; i < 9; i++) { + (*d)[i] = 0; + } + for (i = 63; i >= 0; i--) { + modshl(8, *d); + (*d)[0] += (spint)b[i]; + } + res = (spint)-modfsb(*d); + nres(*d, *d); + // If the value was canonical then res = -1; otherwise, res = 0 + for (i = 0; i < 9; i++) { + (*d)[i] &= res; + } + return (uint32_t)res; +} + +static inline unsigned char +add_carry(unsigned char cc, spint a, spint b, spint *d) +{ + udpint t = (udpint)a + (udpint)b + cc; + *d = (spint)t; + return (unsigned char)(t >> Wordlength); +} + +static void +partial_reduce(spint *out, const spint *src) +{ + spint h, l, quo, rem; + unsigned char cc; + + // Split value in high (12 bits) and low (500 bits) parts. + h = src[7] >> 52; + l = src[7] & 0x000FFFFFFFFFFFFF; + + // 27*2^500 = 1 mod q; hence, we add floor(h/27) + (h mod 27)*2^500 + // to the low part. + quo = (h * 0x12F7) >> 17; + rem = h - (27 * quo); + cc = add_carry(0, src[0], quo, &out[0]); + cc = add_carry(cc, src[1], 0, &out[1]); + cc = add_carry(cc, src[2], 0, &out[2]); + cc = add_carry(cc, src[3], 0, &out[3]); + cc = add_carry(cc, src[4], 0, &out[4]); + cc = add_carry(cc, src[5], 0, &out[5]); + cc = add_carry(cc, src[6], 0, &out[6]); + (void)add_carry(cc, l, rem << 52, &out[7]); +} + +// Little-endian encoding of a 64-bit integer. +static inline void +enc64le(void *dst, uint64_t x) +{ + uint8_t *buf = dst; + buf[0] = (uint8_t)x; + buf[1] = (uint8_t)(x >> 8); + buf[2] = (uint8_t)(x >> 16); + buf[3] = (uint8_t)(x >> 24); + buf[4] = (uint8_t)(x >> 32); + buf[5] = (uint8_t)(x >> 40); + buf[6] = (uint8_t)(x >> 48); + buf[7] = (uint8_t)(x >> 56); +} + +// Little-endian decoding of a 64-bit integer. +static inline uint64_t +dec64le(const void *src) +{ + const uint8_t *buf = src; + return (spint)buf[0] | ((spint)buf[1] << 8) | ((spint)buf[2] << 16) | ((spint)buf[3] << 24) | + ((spint)buf[4] << 32) | ((spint)buf[5] << 40) | ((spint)buf[6] << 48) | ((spint)buf[7] << 56); +} + +void +fp_decode_reduce(fp_t *d, const void *src, size_t len) +{ + uint64_t t[8]; // Stores Nbytes * 8 bits + uint8_t tmp[64]; // Nbytes + const uint8_t *b = src; + + fp_set_zero(d); + if (len == 0) { + return; + } + + size_t rem = len % 64; + if (rem != 0) { + // Input size is not a multiple of 64, we decode a partial + // block, which is already less than 2^500. + size_t k = len - rem; + memcpy(tmp, b + k, len - k); + memset(tmp + len - k, 0, (sizeof tmp) - (len - k)); + fp_decode(d, tmp); + len = k; + } + // Process all remaining blocks, in descending address order. + while (len > 0) { + fp_mul(d, d, &R2); + len -= 64; + t[0] = dec64le(b + len); + t[1] = dec64le(b + len + 8); + t[2] = dec64le(b + len + 16); + t[3] = dec64le(b + len + 24); + t[4] = dec64le(b + len + 32); + t[5] = dec64le(b + len + 40); + t[6] = dec64le(b + len + 48); + t[7] = dec64le(b + len + 56); + partial_reduce(t, t); + enc64le(tmp, t[0]); + enc64le(tmp + 8, t[1]); + enc64le(tmp + 16, t[2]); + enc64le(tmp + 24, t[3]); + enc64le(tmp + 32, t[4]); + enc64le(tmp + 40, t[5]); + enc64le(tmp + 48, t[6]); + enc64le(tmp + 56, t[7]); + fp_t a; + fp_decode(&a, tmp); + fp_add(d, d, &a); + } +} + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.c new file mode 100644 index 0000000000..0424108019 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.c @@ -0,0 +1,93 @@ +#include +#include + +void +double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2) +{ + ec_dbl(&out->P1, &in->P1, &E1E2->E1); + ec_dbl(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + memmove(out, in, sizeof(theta_couple_point_t)); + } else { + double_couple_point(out, in, E1E2); + for (unsigned i = 0; i < n - 1; i++) { + double_couple_point(out, out, E1E2); + } + } +} + +void +add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2) +{ + ADD(&out->P1, &T1->P1, &T2->P1, &E1E2->E1); + ADD(&out->P2, &T1->P2, &T2->P2, &E1E2->E2); +} + +void +double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + DBL(&out->P1, &in->P1, &E1E2->E1); + DBL(&out->P2, &in->P2, &E1E2->E2); +} + +void +double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2) +{ + if (n == 0) { + *out = *in; + } else if (n == 1) { + double_couple_jac_point(out, in, E1E2); + } else { + fp2_t a1, a2, t1, t2; + + jac_to_ws(&out->P1, &t1, &a1, &in->P1, &E1E2->E1); + jac_to_ws(&out->P2, &t2, &a2, &in->P2, &E1E2->E2); + + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + for (unsigned i = 0; i < n - 1; i++) { + DBLW(&out->P1, &t1, &out->P1, &t1); + DBLW(&out->P2, &t2, &out->P2, &t2); + } + + jac_from_ws(&out->P1, &out->P1, &a1, &E1E2->E1); + jac_from_ws(&out->P2, &out->P2, &a2, &E1E2->E2); + } +} + +void +couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP) +{ + jac_to_xz(&P->P1, &xyP->P1); + jac_to_xz(&P->P2, &xyP->P2); +} + +void +copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2) +{ + // Copy the basis on E1 to (P, _) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P1, &B1->P); + copy_point(&ker->T2.P1, &B1->Q); + copy_point(&ker->T1m2.P1, &B1->PmQ); + + // Copy the basis on E2 to (_, P) on T1, T2 and T1 - T2 + copy_point(&ker->T1.P2, &B2->P); + copy_point(&ker->T2.P2, &B2->Q); + copy_point(&ker->T1m2.P2, &B2->PmQ); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.h new file mode 100644 index 0000000000..2b16e23834 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.h @@ -0,0 +1,435 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The HD-isogenies algorithm required by the signature + * + */ + +#ifndef HD_H +#define HD_H + +#include +#include +#include + +/** @defgroup hd_module Abelian surfaces and their isogenies + * @{ + */ + +#define HD_extra_torsion 2 + +/** @defgroup hd_struct Data structures for dimension 2 + * @{ + */ + +/** @brief Type for couple point with XZ coordinates + * @typedef theta_couple_point_t + * + * @struct theta_couple_point + * + * Structure for the couple point on an elliptic product + * using XZ coordinates + */ +typedef struct theta_couple_point +{ + ec_point_t P1; + ec_point_t P2; +} theta_couple_point_t; + +/** @brief Type for three couple points T1, T2, T1-T2 with XZ coordinates + * @typedef theta_kernel_couple_points_t + * + * @struct theta_kernel_couple_points + * + * Structure for a triple of theta couple points T1, T2 and T1 - T2 + */ +typedef struct theta_kernel_couple_points +{ + theta_couple_point_t T1; + theta_couple_point_t T2; + theta_couple_point_t T1m2; +} theta_kernel_couple_points_t; + +/** @brief Type for couple point with XYZ coordinates + * @typedef theta_couple_jac_point_t + * + * @struct theta_couple_jac_point + * + * Structure for the couple point on an elliptic product + * using XYZ coordinates + */ +typedef struct theta_couple_jac_point +{ + jac_point_t P1; + jac_point_t P2; +} theta_couple_jac_point_t; + +/** @brief Type for couple curve * + * @typedef theta_couple_curve_t + * + * @struct theta_couple_curve + * + * the theta_couple_curve structure + */ +typedef struct theta_couple_curve +{ + ec_curve_t E1; + ec_curve_t E2; +} theta_couple_curve_t; + +/** @brief Type for a product E1 x E2 with corresponding bases + * @typedef theta_couple_curve_with_basis_t + * + * @struct theta_couple_curve_with_basis + * + * tType for a product E1 x E2 with corresponding bases Ei[2^n] + */ +typedef struct theta_couple_curve_with_basis +{ + ec_curve_t E1; + ec_curve_t E2; + ec_basis_t B1; + ec_basis_t B2; +} theta_couple_curve_with_basis_t; + +/** @brief Type for theta point * + * @typedef theta_point_t + * + * @struct theta_point + * + * the theta_point structure used + */ +typedef struct theta_point +{ + fp2_t x; + fp2_t y; + fp2_t z; + fp2_t t; +} theta_point_t; + +/** @brief Type for theta point with repeating components + * @typedef theta_point_compact_t + * + * @struct theta_point_compact + * + * the theta_point structure used for points with repeated components + */ +typedef struct theta_point_compact +{ + fp2_t x; + fp2_t y; +} theta_point_compact_t; + +/** @brief Type for theta structure * + * @typedef theta_structure_t + * + * @struct theta_structure + * + * the theta_structure structure used + */ +typedef struct theta_structure +{ + theta_point_t null_point; + bool precomputation; + + // Eight precomputed values used for doubling and + // (2,2)-isogenies. + fp2_t XYZ0; + fp2_t YZT0; + fp2_t XZT0; + fp2_t XYT0; + + fp2_t xyz0; + fp2_t yzt0; + fp2_t xzt0; + fp2_t xyt0; +} theta_structure_t; + +/** @brief A 2x2 matrix used for action by translation + * @typedef translation_matrix_t + * + * @struct translation_matrix + * + * Structure to hold 4 fp2_t elements representing a 2x2 matrix used when computing + * a compatible theta structure during gluing. + */ +typedef struct translation_matrix +{ + fp2_t g00; + fp2_t g01; + fp2_t g10; + fp2_t g11; +} translation_matrix_t; + +/** @brief A 4x4 matrix used for basis changes + * @typedef basis_change_matrix_t + * + * @struct basis_change_matrix + * + * Structure to hold 16 elements representing a 4x4 matrix used for changing + * the basis of a theta point. + */ +typedef struct basis_change_matrix +{ + fp2_t m[4][4]; +} basis_change_matrix_t; + +/** @brief Type for gluing (2,2) theta isogeny * + * @typedef theta_gluing_t + * + * @struct theta_gluing + * + * the theta_gluing structure + */ +typedef struct theta_gluing +{ + + theta_couple_curve_t domain; + theta_couple_jac_point_t xyK1_8; + theta_point_compact_t imageK1_8; + basis_change_matrix_t M; + theta_point_t precomputation; + theta_point_t codomain; + +} theta_gluing_t; + +/** @brief Type for standard (2,2) theta isogeny * + * @typedef theta_isogeny_t + * + * @struct theta_isogeny + * + * the theta_isogeny structure + */ +typedef struct theta_isogeny +{ + theta_point_t T1_8; + theta_point_t T2_8; + bool hadamard_bool_1; + bool hadamard_bool_2; + theta_structure_t domain; + theta_point_t precomputation; + theta_structure_t codomain; +} theta_isogeny_t; + +/** @brief Type for splitting isomorphism * + * @typedef theta_splitting_t + * + * @struct theta_splitting + * + * the theta_splitting structure + */ +typedef struct theta_splitting +{ + basis_change_matrix_t M; + theta_structure_t B; + +} theta_splitting_t; + +// end of hd_struct +/** + * @} + */ + +/** @defgroup hd_functions Functions for dimension 2 + * @{ + */ + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_point(theta_couple_point_t *out, const theta_couple_point_t *in, const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param n : the number of iteration + * @param E1E2 an elliptic product + * @param in the theta couple point in the elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_point_iter(theta_couple_point_t *out, + unsigned n, + const theta_couple_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the addition of two points in (X : Y : Z) coordinates on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param T1 the theta couple jac point in the elliptic product + * @param T2 the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1, P2), (Q1, Q2) + * out = (P1 + Q1, P2 + Q2) + * + **/ +void add_couple_jac_points(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *T1, + const theta_couple_jac_point_t *T2, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the double of the theta couple point in on the elliptic product E12 + * + * @param out Output: the theta_couple_point + * @param in the theta couple point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2] (P1,P2) + * + */ +void double_couple_jac_point(theta_couple_jac_point_t *out, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief Compute the iterated double of the theta couple jac point in on the elliptic product E12 + * + * @param out Output: the theta_couple_jac_point + * @param n : the number of iteration + * @param in the theta couple jac point in the elliptic product + * @param E1E2 an elliptic product + * in = (P1,P2) + * out = [2^n] (P1,P2) + * + */ +void double_couple_jac_point_iter(theta_couple_jac_point_t *out, + unsigned n, + const theta_couple_jac_point_t *in, + const theta_couple_curve_t *E1E2); + +/** + * @brief A forgetful function which returns (X : Z) points given a pair of (X : Y : Z) points + * + * @param P Output: the theta_couple_point + * @param xyP : the theta_couple_jac_point + **/ +void couple_jac_to_xz(theta_couple_point_t *P, const theta_couple_jac_point_t *xyP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it does extra isotropy + * checks on the kernel. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success 0 on failure + * + */ +int theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Compute a (2,2) isogeny chain in dimension 2 between elliptic + * products in the theta_model and evaluate at a list of points of the form + * (P1,0) or (0,P2). Returns 0 if the codomain fails to split (or there is + * an error during the computation) and 1 otherwise. + * Compared to theta_chain_compute_and_eval, it selects a random Montgomery + * model of the codomain. + * + * @param n : the length of the isogeny chain + * @param E12 an elliptic curve product + * @param ker T1, T2 and T1-T2. couple points on E12[2^(n+2)] + * @param extra_torsion boolean indicating if we give the points in E12[2^n] or + * E12[2^(n+HD_extra_torsion)] + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @returns 1 on success, 0 on failure + * + */ +int theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP); + +/** + * @brief Given a bases B1 on E1 and B2 on E2 copies this to create a kernel + * on E1 x E2 as couple points T1, T2 and T1 - T2 + * + * @param ker Output: a kernel for dim_two_isogenies (T1, T2, T1-T2) + * @param B1 Input basis on E1 + * @param B2 Input basis on E2 + **/ +void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B1, const ec_basis_t *B2); + +/** + * @brief Given a couple of points (P1, P2) on a couple of curves (E1, E2) + * this function tests if both points are of order exactly 2^t + * + * @param T: couple point (P1, P2) + * @param E: a couple of curves (E1, E2) + * @param t: an integer + * @returns 0xFFFFFFFF on success, 0 on failure + */ +static int +test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) +{ + int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); + int check_P2 = test_point_order_twof(&T->P2, &E->E2, t); + + return check_P1 & check_P2; +} + +// end of hd_functions +/** + * @} + */ +// end of hd_module +/** + * @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c new file mode 100644 index 0000000000..a697ac7eb1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c @@ -0,0 +1,143 @@ +#include + +#define FP2_ZERO 0 +#define FP2_ONE 1 +#define FP2_I 2 +#define FP2_MINUS_ONE 3 +#define FP2_MINUS_I 4 + +const int EVEN_INDEX[10][2] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 0}, {1, 2}, {2, 0}, {2, 1}, {3, 0}, {3, 3}}; +const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}; +const fp2_t FP2_CONSTANTS[5] = {{ +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +#elif RADIX == 32 +{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +#else +{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf} +#elif RADIX == 32 +{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff} +#else +{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +}, { +#if 0 +#elif RADIX == 16 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 32 +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#else +{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +#endif +#endif +, +#if 0 +#elif RADIX == 16 +{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf} +#elif RADIX == 32 +{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f} +#elif RADIX == 64 +#if defined(SQISIGN_GF_IMPL_BROADWELL) +{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff} +#else +{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff} +#endif +#endif +}}; +const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10] = {{{{FP2_ONE, FP2_I, FP2_ONE, FP2_I}, {FP2_ONE, FP2_MINUS_I, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_MINUS_ONE, FP2_MINUS_I}, {FP2_MINUS_ONE, FP2_I, FP2_MINUS_ONE, FP2_I}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_ONE, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}}; +const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6] = {{{{FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}}}, {{{FP2_ZERO, FP2_ZERO, FP2_ZERO, FP2_ONE}, {FP2_ZERO, FP2_ZERO, FP2_ONE, FP2_ZERO}, {FP2_ZERO, FP2_ONE, FP2_ZERO, FP2_ZERO}, {FP2_ONE, FP2_ZERO, FP2_ZERO, FP2_ZERO}}}, {{{FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE}, {FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}}}, {{{FP2_ONE, FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_MINUS_ONE, FP2_ONE, FP2_ONE}, {FP2_MINUS_ONE, FP2_ONE, FP2_MINUS_ONE, FP2_ONE}, {FP2_ONE, FP2_ONE, FP2_ONE, FP2_ONE}}}, {{{FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}}}, {{{FP2_ONE, FP2_I, FP2_I, FP2_MINUS_ONE}, {FP2_I, FP2_ONE, FP2_MINUS_ONE, FP2_I}, {FP2_I, FP2_MINUS_ONE, FP2_ONE, FP2_I}, {FP2_MINUS_ONE, FP2_I, FP2_I, FP2_ONE}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.h new file mode 100644 index 0000000000..b3147a42a9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.h @@ -0,0 +1,18 @@ +#ifndef HD_SPLITTING_H +#define HD_SPLITTING_H + +#include +#include + +typedef struct precomp_basis_change_matrix { + uint8_t m[4][4]; +} precomp_basis_change_matrix_t; + +extern const int EVEN_INDEX[10][2]; +extern const int CHI_EVAL[4][4]; +extern const fp2_t FP2_CONSTANTS[5]; +extern const precomp_basis_change_matrix_t SPLITTING_TRANSFORMS[10]; +extern const precomp_basis_change_matrix_t NORMALIZATION_TRANSFORMS[6]; + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c new file mode 100644 index 0000000000..1fb4c0f139 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c @@ -0,0 +1,210 @@ +#include "hnf_internal.h" +#include "internal.h" + +// HNF test function +int +ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) +{ + int res = 1; + int found; + int ind = 0; + ibz_t zero; + ibz_init(&zero); + // upper triangular + for (int i = 0; i < 4; i++) { + // upper triangular + for (int j = 0; j < i; j++) { + res = res && ibz_is_zero(&((*mat)[i][j])); + } + // find first non 0 element of line + found = 0; + for (int j = i; j < 4; j++) { + if (found) { + // all values are positive, and first non-0 is the largest of that line + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + } else { + if (!ibz_is_zero(&((*mat)[i][j]))) { + found = 1; + ind = j; + // mustbe non-negative + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + } + } + } + } + // check that first nom-zero elements ndex per column is strictly increasing + int linestart = -1; + int i = 0; + for (int j = 0; j < 4; j++) { + while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + i = i + 1; + } + if (i != 4) { + res = res && (linestart < i); + } + i = 0; + } + ibz_finalize(&zero); + return res; +} + +// Untested HNF helpers +// centered mod +void +ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b, + const ibz_t *mod) +{ + ibz_t prod, m; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_finalize(&m); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m; + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + } + ibz_finalize(&m); +} + +// no need to center this, and not 0 +void +ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m, s; + ibz_init(&m); + ibz_init(&s); + ibz_copy(&s, scalar); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); + ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + } + ibz_finalize(&m); + ibz_finalize(&s); +} + +// Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic +// Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 +// assumes ibz_xgcd outputs u,v which are small in absolute value (as described in the +// book) +void +ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec_4_t *generators, const ibz_t *mod) +{ + int i = 3; + assert(generator_number > 3); + int n = generator_number; + int j = n - 1; + int k = n - 1; + ibz_t b, u, v, d, q, m, coeff_1, coeff_2, r; + ibz_vec_4_t c; + ibz_vec_4_t a[generator_number]; + ibz_vec_4_t w[4]; + ibz_init(&b); + ibz_init(&d); + ibz_init(&u); + ibz_init(&v); + ibz_init(&r); + ibz_init(&m); + ibz_init(&q); + ibz_init(&coeff_1); + ibz_init(&coeff_2); + ibz_vec_4_init(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_init(&(w[h])); + ibz_vec_4_init(&(a[h])); + ibz_copy(&(a[h][0]), &(generators[h][0])); + ibz_copy(&(a[h][1]), &(generators[h][1])); + ibz_copy(&(a[h][2]), &(generators[h][2])); + ibz_copy(&(a[h][3]), &(generators[h][3])); + } + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_copy(&m, mod); + while (i != -1) { + while (j != 0) { + j = j - 1; + if (!ibz_is_zero(&(a[j][i]))) { + // assumtion that ibz_xgcd outputs u,v which are small in absolute + // value is needed here also, needs u non 0, but v can be 0 if needed + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); + ibz_div(&coeff_1, &r, &(a[k][i]), &d); + ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_neg(&coeff_2, &coeff_2); + ibz_vec_4_linear_combination_mod( + &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m + ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy + } + } + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult + if (ibz_is_zero(&(w[i][i]))) { + ibz_copy(&(w[i][i]), &m); + } + for (int h = i + 1; h < 4; h++) { + ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_neg(&q, &q); + ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); + } + ibz_div(&m, &r, &m, &d); + assert(ibz_is_zero(&r)); + if (i != 0) { + k = k - 1; + i = i - 1; + j = k; + if (ibz_is_zero(&(a[k][i]))) + ibz_copy(&(a[k][i]), &m); + + } else { + k = k - 1; + i = i - 1; + j = k; + } + } + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + } + } + + ibz_finalize(&b); + ibz_finalize(&d); + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&coeff_1); + ibz_finalize(&coeff_2); + ibz_finalize(&m); + ibz_vec_4_finalize(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_finalize(&(w[h])); + ibz_vec_4_finalize(&(a[h])); + } +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.c new file mode 100644 index 0000000000..b2db5b54c9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.c @@ -0,0 +1,182 @@ +#include "hnf_internal.h" +#include "internal.h" + +// Small helper for integers +void +ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod) +{ + ibz_t m, t; + ibz_init(&m); + ibz_init(&t); + ibz_mod(&m, x, mod); + ibz_set(&t, ibz_is_zero(&m)); + ibz_mul(&t, &t, mod); + ibz_add(res, &m, &t); + ibz_finalize(&m); + ibz_finalize(&t); +} + +// centered and rather positive then negative +void +ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod) +{ + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_t tmp, d, t; + ibz_init(&tmp); + ibz_init(&d); + ibz_init(&t); + ibz_div_floor(&d, &tmp, mod, &ibz_const_two); + ibz_mod_not_zero(&tmp, a, mod); + ibz_set(&t, ibz_cmp(&tmp, &d) > 0); + ibz_mul(&t, &t, mod); + ibz_sub(remainder, &tmp, &t); + ibz_finalize(&tmp); + ibz_finalize(&d); + ibz_finalize(&t); +} + +// if c, res = x, else res = y +void +ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c) +{ + ibz_t s, t, r; + ibz_init(&r); + ibz_init(&s); + ibz_init(&t); + ibz_set(&s, c != 0); + ibz_sub(&t, &ibz_const_one, &s); + ibz_mul(&r, &s, x); + ibz_mul(res, &t, y); + ibz_add(res, &r, res); + ibz_finalize(&r); + ibz_finalize(&s); + ibz_finalize(&t); +} + +// mpz_gcdext specification specifies unique outputs used here +void +ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const ibz_t *y) +{ + if (ibz_is_zero(x) & ibz_is_zero(y)) { + ibz_set(d, 1); + ibz_set(u, 1); + ibz_set(v, 0); + return; + } + ibz_t q, r, x1, y1; + ibz_init(&q); + ibz_init(&r); + ibz_init(&x1); + ibz_init(&y1); + ibz_copy(&x1, x); + ibz_copy(&y1, y); + + // xgcd + ibz_xgcd(d, u, v, &x1, &y1); + + // make sure u!=0 (v can be 0 if needed) + // following GMP specification, u == 0 implies y|x + if (ibz_is_zero(u)) { + if (!ibz_is_zero(&x1)) { + if (ibz_is_zero(&y1)) { + ibz_set(&y1, 1); + } + ibz_div(&q, &r, &x1, &y1); + assert(ibz_is_zero(&r)); + ibz_sub(v, v, &q); + } + ibz_set(u, 1); + } + if (!ibz_is_zero(&x1)) { + // Make sure ux > 0 (and as small as possible) + assert(ibz_cmp(d, &ibz_const_zero) > 0); + ibz_mul(&r, &x1, &y1); + int neg = ibz_cmp(&r, &ibz_const_zero) < 0; + ibz_mul(&q, &x1, u); + while (ibz_cmp(&q, &ibz_const_zero) <= 0) { + ibz_div(&q, &r, &y1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_add(u, u, &q); + ibz_div(&q, &r, &x1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_sub(v, v, &q); + + ibz_mul(&q, &x1, u); + } + } + +#ifndef NDEBUG + int res = 0; + ibz_t sum, prod, test, cmp; + ibz_init(&sum); + ibz_init(&prod); + ibz_init(&cmp); + ibz_init(&test); + // sign correct + res = res | !(ibz_cmp(d, &ibz_const_zero) >= 0); + if (ibz_is_zero(&x1) && ibz_is_zero(&y1)) { + res = res | !(ibz_is_zero(v) && ibz_is_one(u) && ibz_is_one(d)); + } else { + if (!ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &x1, u); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) > 0); + ibz_mul(&sum, &sum, &y1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) <= 0); + + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &y1, v); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) <= 0); + ibz_mul(&sum, &sum, &x1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) < 0); + } else { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + if (ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + ibz_abs(&prod, v); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_one(u)); + } else { + ibz_abs(&prod, u); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_zero(v)); + } + } + + // Bezout coeffs + ibz_mul(&sum, &x1, u); + ibz_mul(&prod, &y1, v); + ibz_add(&sum, &sum, &prod); + res = res | !(ibz_cmp(&sum, d) == 0); + } + assert(!res); + ibz_finalize(&sum); + ibz_finalize(&prod); + ibz_finalize(&cmp); + ibz_finalize(&test); + +#endif + + ibz_finalize(&x1); + ibz_finalize(&y1); + ibz_finalize(&q); + ibz_finalize(&r); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.h new file mode 100644 index 0000000000..5ecc871bb4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.h @@ -0,0 +1,94 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for functions internal to the HNF computation and its tests + */ + +#ifndef QUAT_HNF_HELPERS_H +#define QUAT_HNF_HELPERS_H + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup quat_hnf_helpers Internal functions for the HNF computation and tests + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_helpers_ibz Internal renamed GMP functions for the HNF computation + */ + +/** + * @brief GCD and Bézout coefficients u, v such that ua + bv = gcd + * + * @param gcd Output: Set to the gcd of a and b + * @param u Output: integer such that ua+bv=gcd + * @param v Output: Integer such that ua+bv=gcd + * @param a + * @param b + */ +void ibz_xgcd(ibz_t *gcd, + ibz_t *u, + ibz_t *v, + const ibz_t *a, + const ibz_t *b); // integers, dim4, test/integers, test/dim4 + +/** @} + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_integer_helpers Integer functions internal to the HNF computation and tests + * @{ + */ + +/** @brief x mod mod, with x in [1,mod] + * + * @param res Output: res = x [mod] and 0 0 + */ +void ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod); + +/** @brief x mod mod, with x in ]-mod/2,mod/2] + * + * Centered and rather positive then negative. + * + * @param remainder Output: remainder = x [mod] and -mod/2 0 + */ +void ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod); + +/** @brief if c then x else y + * + * @param res Output: if c, res = x, else res = y + * @param x + * @param y + * @param c condition: must be 0 or 1 + */ +void ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c); + +/** @brief d = gcd(x,y)>0 and d = ux+vy and u!= 0 and d>0 and u, v of small absolute value, u not 0 + * + * More precisely: + * If x and y are both non 0, -|xy|/d +#else +#include +#endif + +void +ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) +{ + mpz_gcdext(*gcd, *u, *v, *a, *b); +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.c new file mode 100644 index 0000000000..0743974345 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.c @@ -0,0 +1,338 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Scalar multiplication [x]P + [y]Q where x and y are stored +// inside an ibz_vec_2_t [x, y] and P, Q \in E[2^f] +void +ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve) +{ + digit_t scalars[2][NWORDS_ORDER]; + ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); + ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); +} + +// Given an ideal, computes the scalars s0, s1 which determine the kernel generator +// of the equivalent isogeny +void +id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lideal) +{ + ibz_t tmp; + ibz_init(&tmp); + + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + // construct the matrix of the dual of alpha on the 2^f-torsion + { + quat_alg_elem_t alpha; + quat_alg_elem_init(&alpha); + + int lideal_generator_ok UNUSED = quat_lideal_generator(&alpha, lideal, &QUATALG_PINFTY); + assert(lideal_generator_ok); + quat_alg_conj(&alpha, &alpha); + + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + quat_change_to_O0_basis(&coeffs, &alpha); + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + } + } + + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&alpha); + } + + // find the kernel of alpha modulo the norm of the ideal + { + const ibz_t *const norm = &lideal->norm; + + ibz_mod(&(*vec)[0], &mat[0][0], norm); + ibz_mod(&(*vec)[1], &mat[1][0], norm); + ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + if (ibz_is_even(&tmp)) { + ibz_mod(&(*vec)[0], &mat[0][1], norm); + ibz_mod(&(*vec)[1], &mat[1][1], norm); + } +#ifndef NDEBUG + ibz_gcd(&tmp, &(*vec)[0], norm); + ibz_gcd(&tmp, &(*vec)[1], &tmp); + assert(!ibz_cmp(&tmp, &ibz_const_one)); +#endif + } + + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&tmp); +} + +// helper function to apply a matrix to a basis of E[2^f] +// works in place +int +matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f) +{ + digit_t scalars[2][NWORDS_ORDER] = { 0 }; + int ret; + + ibz_t tmp, pow_two; + ibz_init(&tmp); + ibz_init(&pow_two); + ibz_pow(&pow_two, &ibz_const_two, f); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // reduction mod 2f + ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); + ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); + ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); + ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][0]); + ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); + + // second basis element S = [c]P + [d]Q + ibz_to_digit_array(scalars[0], &(*mat)[0][1]); + ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); + + // Their difference R - S = [a - c]P + [b - d]Q + ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[0], &tmp); + ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_mod(&tmp, &tmp, &pow_two); + ibz_to_digit_array(scalars[1], &tmp); + ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); + + ibz_finalize(&tmp); + ibz_finalize(&pow_two); + + return ret; +} + +// helper function to apply some endomorphism of E0 on the precomputed basis of E[2^f] +// works in place +void +endomorphism_application_even_basis(ec_basis_t *bas, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_vec_4_t coeffs; + ibz_vec_4_init(&coeffs); + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_t content; + ibz_init(&content); + + // decomposing theta on the basis + quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); + assert(ibz_is_odd(&content)); + + ibz_set(&mat[0][0], 0); + ibz_set(&mat[0][1], 0); + ibz_set(&mat[1][0], 0); + ibz_set(&mat[1][1], 0); + + // computing the matrix + + for (unsigned i = 0; i < 2; ++i) { + ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + for (unsigned j = 0; j < 2; ++j) { + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); + ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&mat[i][j], &mat[i][j], &content); + } + } + + // and now we apply it + matrix_application_even_basis(bas, E, &mat, f); + + ibz_vec_4_finalize(&coeffs); + ibz_mat_2x2_finalize(&mat); + ibz_finalize(&content); + + ibz_finalize(&tmp); +} + +// compute the ideal whose kernel is generated by vec2[0]*BO[0] + vec2[1]*B0[1] where B0 is the +// canonical basis of E0 +void +id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f) +{ + + // algorithm: apply endomorphisms 1 and j+(1+k)/2 to the kernel point, + // the result should form a basis of the respective torsion subgroup. + // then apply i to the kernel point and decompose over said basis. + // hence we have an equation a*P + b*[j+(1+k)/2]P == [i]P, which will + // easily reveal an endomorphism that kills P. + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + if (f == TORSION_EVEN_POWER) { + ibz_copy(&two_pow, &TORSION_PLUS_2POWER); + } else { + ibz_pow(&two_pow, &ibz_const_two, f); + } + + { + ibz_mat_2x2_t mat; + ibz_mat_2x2_init(&mat); + + ibz_copy(&mat[0][0], &(*vec2)[0]); + ibz_copy(&mat[1][0], &(*vec2)[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); + ibz_copy(&mat[0][1], &vec[0]); + ibz_copy(&mat[1][1], &vec[1]); + + ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); + ibz_add(&mat[0][1], &mat[0][1], &vec[0]); + ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + + ibz_mod(&mat[0][1], &mat[0][1], &two_pow); + ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + + ibz_mat_2x2_t inv; + ibz_mat_2x2_init(&inv); + { + int inv_ok UNUSED = ibz_mat_2x2_inv_mod(&inv, &mat, &two_pow); + assert(inv_ok); + } + ibz_mat_2x2_finalize(&mat); + + ibz_mat_2x2_eval(&vec, &ACTION_I, vec2); + ibz_mat_2x2_eval(&vec, &inv, &vec); + + ibz_mat_2x2_finalize(&inv); + } + + // final result: a - i + b*(j+(1+k)/2) + quat_alg_elem_t gen; + quat_alg_elem_init(&gen); + ibz_set(&gen.denom, 2); + ibz_add(&gen.coord[0], &vec[0], &vec[0]); + ibz_set(&gen.coord[1], -2); + ibz_add(&gen.coord[2], &vec[1], &vec[1]); + ibz_copy(&gen.coord[3], &vec[1]); + ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_vec_2_finalize(&vec); + + quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + assert(0 == ibz_cmp(&lideal->norm, &two_pow)); + + quat_alg_elem_finalize(&gen); + ibz_finalize(&two_pow); +} + +// finds mat such that: +// (mat*v).B2 = v.B1 +// where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q +// mat encodes the coordinates of the points of B1 in the basis B2 +// specifically requires B1 or B2 to be "full" w.r.t to the 2^n torsion, so that we use tate +// full = 0 assumes B2 is "full" so the easier case. +// if we want to switch the role of B2 and B1, we invert the matrix, e.g. set full = 1 +static void +_change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f, + bool invert) +{ + digit_t x1[NWORDS_ORDER] = { 0 }, x2[NWORDS_ORDER] = { 0 }, x3[NWORDS_ORDER] = { 0 }, x4[NWORDS_ORDER] = { 0 }; + +#ifndef NDEBUG + int e_full = TORSION_EVEN_POWER; + int e_diff = e_full - f; +#endif + + // Ensure the input basis has points of order 2^f + if (invert) { + assert(test_basis_order_twof(B1, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B1, B2, E, f); + mp_invert_matrix(x1, x2, x3, x4, f, NWORDS_ORDER); + } else { + assert(test_basis_order_twof(B2, E, e_full)); + ec_dlog_2_tate(x1, x2, x3, x4, B2, B1, E, f); + } + +#ifndef NDEBUG + { + if (invert) { + ec_point_t test, test2; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->P, E); + assert(ec_is_equal(&test, &test2)); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test2, e_diff, &B1->Q, E); + assert(ec_is_equal(&test, &test2)); + } else { + ec_point_t test; + ec_biscalar_mul(&test, x1, x2, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->P))); + + ec_biscalar_mul(&test, x3, x4, f, B2, E); + ec_dbl_iter(&test, e_diff, &test, E); + assert(ec_is_equal(&test, &(B1->Q))); + } + } +#endif + + // Copy the results into the matrix + ibz_copy_digit_array(&((*mat)[0][0]), x1); + ibz_copy_digit_array(&((*mat)[1][0]), x2); + ibz_copy_digit_array(&((*mat)[0][1]), x3); + ibz_copy_digit_array(&((*mat)[1][1]), x4); +} + +void +change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, false); +} + +void +change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f) +{ + _change_of_basis_matrix_tate(mat, B1, B2, E, f, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.h new file mode 100644 index 0000000000..1b4eaae3c5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.h @@ -0,0 +1,280 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief The id2iso algorithms + */ + +#ifndef ID2ISO_H +#define ID2ISO_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @defgroup id2iso_id2iso Ideal to isogeny conversion + * @{ + */ +static const quat_represent_integer_params_t QUAT_represent_integer_params = { + .algebra = &QUATALG_PINFTY, /// The level-specific quaternion algebra + .order = &(EXTREMAL_ORDERS[0]), // The special extremal order O0 + .primality_test_iterations = QUAT_primality_num_iter // precompted bound on the iteration number in primality tests +}; + +/*************************** Functions *****************************/ + +/** @defgroup id2iso_others Other functions needed for id2iso + * @{ + */ + +/** + * @brief Scalar multiplication [x]P + [y]Q where x and y are stored inside an + * ibz_vec_2_t [x, y] and P, Q in E[2^f] + * + * @param res Output: the point R = [x]P + [y]Q + * @param scalar_vec: a vector of ibz type elements (x, y) + * @param f: an integer such that P, Q are in E[2^f] + * @param PQ: an x-only basis x(P), x(Q) and x(P-Q) + * @param curve: the curve E the points P, Q, R are defined on + * + */ +void ec_biscalar_mul_ibz_vec(ec_point_t *res, + const ibz_vec_2_t *scalar_vec, + const int f, + const ec_basis_t *PQ, + const ec_curve_t *curve); + +/** + * @brief Translating an ideal of norm 2^f dividing p²-1 into the corresponding + * kernel coefficients + * + * @param ker_dlog Output : two coefficients indicating the decomposition of the + * kernel over the canonical basis of E0[2^f] + * @param lideal_input : O0-ideal corresponding to the ideal to be translated of + * norm 2^f + * + */ +void id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *ker_dlog, const quat_left_ideal_t *lideal_input); + +/** + * @brief Applies some 2x2 matrix on a basis of E[2^TORSION_EVEN_POWER] + * + * @param P the basis + * @param E the curve + * @param mat the matrix + * @param f TORSION_EVEN_POWER + * @returns 1 if success, 0 if error + * + * helper function, works in place + * + */ +int matrix_application_even_basis(ec_basis_t *P, const ec_curve_t *E, ibz_mat_2x2_t *mat, int f); + +/** + * @brief Applies some endomorphism of an alternate curve to E[f] + * + * @param P the basis + * @param index_alternate_curve index of the alternate order in the list of precomputed extremal + * orders + * @param E the curve (E is not required to be the alternate curve in question since in the end we + * only apply a matrix) + * @param theta the endomorphism + * @param f TORSION_EVEN_POWER + * + * helper function, works in place + * + */ +void endomorphism_application_even_basis(ec_basis_t *P, + const int index_alternate_curve, + const ec_curve_t *E, + const quat_alg_elem_t *theta, + int f); + +/** + * @brief Translating a kernel on the curve E0, represented as a vector with + * respect to the precomputed 2^f-torsion basis, into the corresponding O0-ideal + * + * @param lideal Output : the output O0-ideal + * @param f : exponent definining the norm of the ideal to compute + * @param vec2 : length-2 vector giving the 2-power part of the kernel with + * respect to the precomputed 2^f basis + * + */ +void id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t *vec2, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B2 = v.B1 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^f] + * @param B2 the target basis for E[2^e] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2 + */ +void change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, const ec_basis_t *B1, const ec_basis_t *B2, ec_curve_t *E, int f); + +/** + * @brief Change of basis matrix for full basis B2 + * Finds mat such that: + * (mat*v).B1 = [2^e-f]*v.B2 + * where "." is the dot product, defined as (v1,v2).(P,Q) = v1*P + v2*Q + * + * @param mat the computed change of basis matrix + * @param B1 the source basis for E[2^e] + * @param B2 the target basis for E[2^f] + * @param E the elliptic curve + * @param f 2^f is the order of the points of the input basis + * + * mat encodes the coordinates of the points of B1 in the basis B2, by + * applying change_of_basis_matrix_tate and inverting the outcome + */ +void change_of_basis_matrix_tate_invert(ibz_mat_2x2_t *mat, + const ec_basis_t *B1, + const ec_basis_t *B2, + ec_curve_t *E, + int f); + +/** @} + */ + +/** @defgroup id2iso_arbitrary Arbitrary isogeny evaluation + * @{ + */ +/** + * @brief Function to find elements u, v, d1, d2, beta1, beta2 for the ideal to isogeny + * + * @param u Output: integer + * @param v Output: integer + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param d1 Output: integer + * @param d2 Output: integer + * @param index_alternate_order_1 Output: small integer (index of an alternate order) + * @param index_alternate_order_2 Output: small integer (index of an alternate order) + * @param target : integer, target norm + * @param lideal : O0-ideal defining the search space + * @param Bpoo : quaternion algebra + * @param num_alternate_order number of alternate order we consider + * @returns 1 if the computation succeeds, 0 otherwise + * + * Let us write ti = index_alternate_order_i, + * we look for u,v,beta1,beta2,d1,d2,t1,t2 + * such that u d1 + v d2 = target + * and where di = norm(betai)/norm(Ii), where the ideal Ii is equal to overbar{Ji} * lideal and + * betai is in Ii where Ji is a connecting ideal between the maximal order O0 and O_ti t1,t2 must be + * contained between 0 and num_alternate_order This corresponds to the function SuitableIdeals in + * the spec + */ +int find_uv(ibz_t *u, + ibz_t *v, + quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *d1, + ibz_t *d2, + int *index_alternate_order_1, + int *index_alternate_order_2, + const ibz_t *target, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo, + int num_alternate_order); + +/** + * @brief Computes an arbitrary isogeny of fixed degree starting from E0 + * and evaluates it a list of points of the form (P1,0) or (0,P2). + * + * @param lideal Output : an ideal of norm u + * @param u : integer + * @param small : bit indicating if we the value of u is "small" meaning that we + expect it to be + * around sqrt{p}, in that case we use a length slightly above + * @param E34 Output: the codomain curve + * @param P12 Input/Output: pointer to points to be pushed through the isogeny + (in-place) + * @param numP: length of the list of points given in P12 (can be zero) + * @param index_alternate_order : index of the special extremal order to be used (in the list of + these orders) + * @returns the length of the chain if the computation succeeded, zero upon + failure + * + * F is an isogeny encoding an isogeny [adjust]*phi : E0 -> Eu of degree u + * note that the codomain of F can be either Eu x Eu' or Eu' x Eu for some curve + Eu' + */ +int fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, + const ibz_t *u, + bool small, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + const int index_alternate_order); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param beta1 Output: quaternion element + * @param beta2 Output: quaternion element + * @param u Output: integer + * @param v Output: integer + * @param d1 Output: integer + * @param d2 Output: integer + * @param codomain the codomain of the isogeny corresponding to lideal + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : O0 - ideal in input + * @param Bpoo : the quaternion algebra + * @returns 1 if the computation succeeded, 0 otherwise + * + * Compute the codomain and image on the basis of E0 of the isogeny + * E0 -> codomain corresponding to lideal + * + * There is some integer e >= 0 such that + * 2^e * u, 2^e * v,beta1, beta2, d1, d2 are the output of find_uv + * on input target = 2^TORSION_PLUS_EVEN_POWER and lideal + * + * codomain and basis are computed with the help of a dimension 2 isogeny + * of degree 2^TORSION_PLUS_EVEN_POWER - e using a Kani diagram + * + */ +int dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, + quat_alg_elem_t *beta2, + ibz_t *u, + ibz_t *v, + ibz_t *d1, + ibz_t *d2, + ec_curve_t *codomain, + ec_basis_t *basis, + const quat_left_ideal_t *lideal, + const quat_alg_t *Bpoo); + +/** + * @brief Translating an ideal into a representation of the corresponding + * isogeny + * + * @param basis Output : evaluation of the canonical basis of E0 through the + * ideal corresponding to lideal + * @param lideal : ideal in input + * @param codomain + * @returns 1 if the computation succeeds, 0 otherwise + * + * This is a wrapper around the ideal to isogeny clapotis function + */ +int dim2id2iso_arbitrary_isogeny_evaluation(ec_basis_t *basis, ec_curve_t *codomain, const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ideal.c new file mode 100644 index 0000000000..9cf863a104 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ideal.c @@ -0,0 +1,323 @@ +#include +#include +#include "internal.h" + +// assumes parent order and lattice correctly set, computes and sets the norm +void +quat_lideal_norm(quat_left_ideal_t *lideal) +{ + quat_lattice_index(&(lideal->norm), &(lideal->lattice), (lideal->parent_order)); + int ok UNUSED = ibz_sqrt(&(lideal->norm), &(lideal->norm)); + assert(ok); +} + +// assumes parent order and lattice correctly set, recomputes and verifies its norm +static int +quat_lideal_norm_verify(const quat_left_ideal_t *lideal) +{ + int res; + ibz_t index; + ibz_init(&index); + quat_lattice_index(&index, &(lideal->lattice), (lideal->parent_order)); + ibz_sqrt(&index, &index); + res = (ibz_cmp(&(lideal->norm), &index) == 0); + ibz_finalize(&index); + return (res); +} + +void +quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) +{ + copy->parent_order = copied->parent_order; + ibz_copy(©->norm, &copied->norm); + ibz_copy(©->lattice.denom, &copied->lattice.denom); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + } + } +} + +void +quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(quat_lattice_contains(NULL, order, x)); + ibz_t norm_n, norm_d; + ibz_init(&norm_n); + ibz_init(&norm_d); + + // Multiply order on the right by x + quat_lattice_alg_elem_mul(&(lideal->lattice), order, x, alg); + + // Reduce denominator. This conserves HNF + quat_lattice_reduce_denom(&lideal->lattice, &lideal->lattice); + + // Compute norm and check it's integral + quat_alg_norm(&norm_n, &norm_d, x, alg); + assert(ibz_is_one(&norm_d)); + ibz_copy(&lideal->norm, &norm_n); + + // Set order + lideal->parent_order = order; + ibz_finalize(&norm_n); + ibz_finalize(&norm_d); +} + +void +quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(!quat_alg_elem_is_zero(x)); + + quat_lattice_t ON; + quat_lattice_init(&ON); + + // Compute ideal generated by x + quat_lideal_create_principal(lideal, x, order, alg); + + // Compute ideal generated by N (without reducing denominator) + ibz_mat_4x4_scalar_mul(&ON.basis, N, &order->basis); + ibz_copy(&ON.denom, &order->denom); + + // Add lattices (reduces denominators) + quat_lattice_add(&lideal->lattice, &lideal->lattice, &ON); + // Set order + lideal->parent_order = order; + // Compute norm + quat_lideal_norm(lideal); + + quat_lattice_finalize(&ON); +} + +int +quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + ibz_t norm_int, norm_n, gcd, r, q, norm_denom; + ibz_vec_4_t vec; + ibz_vec_4_init(&vec); + ibz_init(&norm_denom); + ibz_init(&norm_int); + ibz_init(&norm_n); + ibz_init(&r); + ibz_init(&q); + ibz_init(&gcd); + int a, b, c, d; + int found = 0; + int int_norm = 0; + while (1) { + int_norm++; + for (a = -int_norm; a <= int_norm; a++) { + for (b = -int_norm + abs(a); b <= int_norm - abs(a); b++) { + for (c = -int_norm + abs(a) + abs(b); c <= int_norm - abs(a) - abs(b); c++) { + d = int_norm - abs(a) - abs(b) - abs(c); + ibz_vec_4_set(&vec, a, b, c, d); + ibz_vec_4_content(&gcd, &vec); + if (ibz_is_one(&gcd)) { + ibz_mat_4x4_eval(&(gen->coord), &(lideal->lattice.basis), &vec); + ibz_copy(&(gen->denom), &(lideal->lattice.denom)); + quat_alg_norm(&norm_int, &norm_denom, gen, alg); + assert(ibz_is_one(&norm_denom)); + ibz_div(&q, &r, &norm_int, &(lideal->norm)); + assert(ibz_is_zero(&r)); + ibz_gcd(&gcd, &(lideal->norm), &q); + found = (0 == ibz_cmp(&gcd, &ibz_const_one)); + if (found) + goto fin; + } + } + } + } + } +fin:; + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&norm_denom); + ibz_finalize(&norm_int); + ibz_finalize(&norm_n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&gcd); + return (found); +} + +void +quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t norm, norm_d; + ibz_init(&norm); + ibz_init(&norm_d); + quat_lattice_alg_elem_mul(&(product->lattice), &(lideal->lattice), alpha, alg); + product->parent_order = lideal->parent_order; + quat_alg_norm(&norm, &norm_d, alpha, alg); + ibz_mul(&(product->norm), &(lideal->norm), &norm); + assert(ibz_divides(&(product->norm), &norm_d)); + ibz_div(&(product->norm), &norm, &(product->norm), &norm_d); + assert(quat_lideal_norm_verify(lideal)); + ibz_finalize(&norm_d); + ibz_finalize(&norm); +} + +void +quat_lideal_add(quat_left_ideal_t *sum, const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_add(&sum->lattice, &I1->lattice, &I2->lattice); + sum->parent_order = I1->parent_order; + quat_lideal_norm(sum); +} + +void +quat_lideal_inter(quat_left_ideal_t *inter, + const quat_left_ideal_t *I1, + const quat_left_ideal_t *I2, + const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_intersect(&inter->lattice, &I1->lattice, &I2->lattice); + inter->parent_order = I1->parent_order; + quat_lideal_norm(inter); +} + +int +quat_lideal_equals(const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((I2->parent_order), alg)); + assert(quat_order_is_maximal((I1->parent_order), alg)); + return (I1->parent_order == I2->parent_order) & (ibz_cmp(&I1->norm, &I2->norm) == 0) & + quat_lattice_equal(&I1->lattice, &I2->lattice); +} + +void +quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lattice_conjugate_without_hnf(inv, &(lideal->lattice)); + ibz_mul(&(inv->denom), &(inv->denom), &(lideal->norm)); +} + +// following the implementation of ideal isomorphisms in the code of LearningToSQI's sage +// implementation of SQIsign +void +quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal1->parent_order), alg)); + assert(quat_order_is_maximal((lideal2->parent_order), alg)); + assert(lideal1->parent_order == lideal2->parent_order); + quat_lattice_t inv; + quat_lattice_init(&inv); + quat_lideal_inverse_lattice_without_hnf(&inv, lideal1, alg); + quat_lattice_mul(trans, &inv, &(lideal2->lattice), alg); + quat_lattice_finalize(&inv); +} + +void +quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lideal_right_transporter(order, lideal, lideal, alg); +} + +void +quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + quat_lattice_gram(G, &(lideal->lattice), alg); + + // divide by norm · denominator² + ibz_t divisor, rmd; + ibz_init(&divisor); + ibz_init(&rmd); + + ibz_mul(&divisor, &(lideal->lattice.denom), &(lideal->lattice.denom)); + ibz_mul(&divisor, &divisor, &(lideal->norm)); + + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + assert(ibz_is_zero(&rmd)); + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i - 1; j++) { + ibz_copy(&(*G)[j][i], &(*G)[i][j]); + } + } + + ibz_finalize(&rmd); + ibz_finalize(&divisor); +} + +void +quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + quat_lideal_right_order(new_parent_order, lideal, alg); + quat_lattice_conjugate_without_hnf(&(conj->lattice), &(lideal->lattice)); + conj->parent_order = new_parent_order; + ibz_copy(&(conj->norm), &(lideal->norm)); +} + +int +quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg_t *alg) +{ + int ok = 0; + ibz_t det, sqr, div; + ibz_mat_4x4_t transposed, norm, prod; + ibz_init(&det); + ibz_init(&sqr); + ibz_init(&div); + ibz_mat_4x4_init(&transposed); + ibz_mat_4x4_init(&norm); + ibz_mat_4x4_init(&prod); + ibz_mat_4x4_transpose(&transposed, &(order->basis)); + // multiply gram matrix by 2 because of reduced trace + ibz_mat_4x4_identity(&norm); + ibz_copy(&(norm[2][2]), &(alg->p)); + ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); + ibz_mat_4x4_mul(&prod, &transposed, &norm); + ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &prod); + ibz_mul(&div, &(order->denom), &(order->denom)); + ibz_mul(&div, &div, &div); + ibz_mul(&div, &div, &div); + ibz_div(&sqr, &div, &det, &div); + ok = ibz_is_zero(&div); + ok = ok & ibz_sqrt(disc, &sqr); + ibz_finalize(&det); + ibz_finalize(&div); + ibz_finalize(&sqr); + ibz_mat_4x4_finalize(&transposed); + ibz_mat_4x4_finalize(&norm); + ibz_mat_4x4_finalize(&prod); + return (ok); +} + +int +quat_order_is_maximal(const quat_lattice_t *order, const quat_alg_t *alg) +{ + int res; + ibz_t disc; + ibz_init(&disc); + quat_order_discriminant(&disc, order, alg); + res = (ibz_cmp(&disc, &(alg->p)) == 0); + ibz_finalize(&disc); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.c new file mode 100644 index 0000000000..b0462dc8b5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.c @@ -0,0 +1,791 @@ +#include "intbig_internal.h" +#include +#include +#include +#include +#include +#include + +// #define DEBUG_VERBOSE + +#ifdef DEBUG_VERBOSE +#define DEBUG_STR_PRINTF(x) printf("%s\n", (x)); + +static void +DEBUG_STR_FUN_INT_MP(const char *op, int arg1, const ibz_t *arg2) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s\n", op, arg1, arg2_str); +} + +static void +DEBUG_STR_FUN_3(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + printf("%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_MP2_INT(const char *op, const ibz_t *arg1, const ibz_t *arg2, int arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%s,%s,%x\n", op, arg1_str, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_INT_MP2(const char *op, int arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + if (arg1 >= 0) + printf("%s,%x,%s,%s\n", op, arg1, arg2_str, arg3_str); + else + printf("%s,-%x,%s,%s\n", op, -arg1, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_INT_MP_INT(const char *op, int arg1, const ibz_t *arg2, int arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s,%x\n", op, arg1, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3, const ibz_t *arg4) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + int arg4_size = ibz_size_in_base(arg4, 16); + char arg4_str[arg4_size + 2]; + ibz_convert_to_str(arg4, arg4_str, 16); + + printf("%s,%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str, arg4_str); +} +#else +#define DEBUG_STR_PRINTF(x) +#define DEBUG_STR_FUN_INT_MP(op, arg1, arg2) +#define DEBUG_STR_FUN_3(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP2(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP_INT(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_4(op, arg1, arg2, arg3, arg4) +#endif + +/** @defgroup ibz_t Constants + * @{ + */ + +const __mpz_struct ibz_const_zero[1] = { + { + ._mp_alloc = 0, + ._mp_size = 0, + ._mp_d = (mp_limb_t[]){ 0 }, + } +}; + +const __mpz_struct ibz_const_one[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 1 }, + } +}; + +const __mpz_struct ibz_const_two[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 2 }, + } +}; + +const __mpz_struct ibz_const_three[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 3 }, + } +}; + +void +ibz_init(ibz_t *x) +{ + mpz_init(*x); +} + +void +ibz_finalize(ibz_t *x) +{ + mpz_clear(*x); +} + +void +ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_add(*sum, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_sub(*diff, *a, *b); + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_mul(*prod, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_neg(ibz_t *neg, const ibz_t *a) +{ + mpz_neg(*neg, *a); +} + +void +ibz_abs(ibz_t *abs, const ibz_t *a) +{ + mpz_abs(*abs, *a); +} + +void +ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_tdiv_qr(*quotient, *remainder, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp; + ibz_init(&a_cp); + ibz_copy(&a_cp, a); +#endif + mpz_tdiv_q_2exp(*quotient, *a, exp); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); + ibz_finalize(&a_cp); +#endif +} + +void +ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) +{ + mpz_fdiv_qr(*q, *r, *n, *d); +} + +void +ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) +{ + mpz_mod(*r, *a, *b); +} + +unsigned long int +ibz_mod_ui(const mpz_t *n, unsigned long int d) +{ + return mpz_fdiv_ui(*n, d); +} + +int +ibz_divides(const ibz_t *a, const ibz_t *b) +{ + return mpz_divisible_p(*a, *b); +} + +void +ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) +{ + mpz_pow_ui(*pow, *x, e); +} + +void +ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) +{ + mpz_powm(*pow, *x, *e, *m); + DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); +} + +int +ibz_two_adic(ibz_t *pow) +{ + return mpz_scan1(*pow, 0); +} + +int +ibz_cmp(const ibz_t *a, const ibz_t *b) +{ + int ret = mpz_cmp(*a, *b); + DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); + return ret; +} + +int +ibz_is_zero(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); + return ret; +} + +int +ibz_is_one(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 1); + DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); + return ret; +} + +int +ibz_cmp_int32(const ibz_t *x, int32_t y) +{ + int ret = mpz_cmp_si(*x, (signed long int)y); + DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); + return ret; +} + +int +ibz_is_even(const ibz_t *x) +{ + int ret = !mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); + return ret; +} + +int +ibz_is_odd(const ibz_t *x) +{ + int ret = mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); + return ret; +} + +void +ibz_set(ibz_t *i, int32_t x) +{ + mpz_set_si(*i, x); +} + +int +ibz_convert_to_str(const ibz_t *i, char *str, int base) +{ + if (!str || (base != 10 && base != 16)) + return 0; + + mpz_get_str(str, base, *i); + + return 1; +} + +void +ibz_print(const ibz_t *num, int base) +{ + assert(base == 10 || base == 16); + + int num_size = ibz_size_in_base(num, base); + char num_str[num_size + 2]; + ibz_convert_to_str(num, num_str, base); + printf("%s", num_str); +} + +int +ibz_set_from_str(ibz_t *i, const char *str, int base) +{ + return (1 + mpz_set_str(*i, str, base)); +} + +void +ibz_copy(ibz_t *target, const ibz_t *value) +{ + mpz_set(*target, *value); +} + +void +ibz_swap(ibz_t *a, ibz_t *b) +{ + mpz_swap(*a, *b); +} + +int32_t +ibz_get(const ibz_t *i) +{ +#if LONG_MAX == INT32_MAX + return (int32_t)mpz_get_si(*i); +#elif LONG_MAX > INT32_MAX + // Extracts the sign bit and the 31 least significant bits + signed long int t = mpz_get_si(*i); + return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); +#else +#error Unsupported configuration: LONG_MAX must be >= INT32_MAX +#endif +} + +int +ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) +{ + int randret; + int ret = 1; + mpz_t tmp; + mpz_t bmina; + mpz_init(bmina); + mpz_sub(bmina, *b, *a); + + if (mpz_sgn(bmina) == 0) { + mpz_set(*rand, *a); + mpz_clear(bmina); + return 1; + } + + size_t len_bits = mpz_sizeinbase(bmina, 2); + size_t len_bytes = (len_bits + 7) / 8; + size_t sizeof_limb = sizeof(mp_limb_t); + size_t sizeof_limb_bits = sizeof_limb * 8; + size_t len_limbs = (len_bytes + sizeof_limb - 1) / sizeof_limb; + + mp_limb_t mask = ((mp_limb_t)-1) >> (sizeof_limb_bits - len_bits) % sizeof_limb_bits; + mp_limb_t r[len_limbs]; + +#ifndef NDEBUG + { + for (size_t i = 0; i < len_limbs; ++i) + r[i] = (mp_limb_t)-1; + r[len_limbs - 1] = mask; + mpz_t check; + mpz_roinit_n(check, r, len_limbs); + assert(mpz_cmp(check, bmina) >= 0); // max sampled value >= b - a + mpz_t bmina2; + mpz_init(bmina2); + mpz_add(bmina2, bmina, bmina); + assert(mpz_cmp(check, bmina2) < 0); // max sampled value < 2 * (b - a) + mpz_clear(bmina2); + } +#endif + + do { + randret = randombytes((unsigned char *)r, len_bytes); + if (randret != 0) { + ret = 0; + goto err; + } +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < len_limbs; ++i) + r[i] = BSWAP_DIGIT(r[i]); +#endif + r[len_limbs - 1] &= mask; + mpz_roinit_n(tmp, r, len_limbs); + if (mpz_cmp(tmp, bmina) <= 0) + break; + } while (1); + + mpz_add(*rand, tmp, *a); +err: + mpz_clear(bmina); + return ret; +} + +int +ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b) +{ + uint32_t diff, mask; + int32_t rand32; + + if (!(a >= 0 && b >= 0 && b > a)) { + printf("a = %d b = %d\n", a, b); + } + assert(a >= 0 && b >= 0 && b > a); + + diff = b - a; + + // Create a mask with 1 + ceil(log2(diff)) least significant bits set +#if (defined(__GNUC__) || defined(__clang__)) && INT_MAX == INT32_MAX + mask = (1 << (32 - __builtin_clz((uint32_t)diff))) - 1; +#else + uint32_t diff2 = diff, tmp; + + mask = (diff2 > 0xFFFF) << 4; + diff2 >>= mask; + + tmp = (diff2 > 0xFF) << 3; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0xF) << 2; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0x3) << 1; + diff2 >>= tmp; + mask |= tmp; + + mask |= diff2 >> 1; + + mask = (1 << (mask + 1)) - 1; +#endif + + assert(mask >= diff && mask < 2 * diff); + + // Rejection sampling + do { + randombytes((unsigned char *)&rand32, sizeof(rand32)); + +#ifdef TARGET_BIG_ENDIAN + rand32 = BSWAP32(rand32); +#endif + + rand32 &= mask; + } while (rand32 > (int32_t)diff); + + rand32 += a; + ibz_set(rand, rand32); + + return 1; +} + +int +ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) +{ + int ret = 1; + mpz_t m_big; + + // m_big = 2 * m + mpz_init_set_si(m_big, m); + mpz_add(m_big, m_big, m_big); + + // Sample in [0, 2*m] + ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); + + // Adjust to range [-m, m] + mpz_sub_ui(*rand, *rand, m); + + mpz_clear(m_big); + + return ret; +} + +int +ibz_rand_interval_bits(ibz_t *rand, uint32_t m) +{ + int ret = 1; + mpz_t tmp; + mpz_t low; + mpz_init_set_ui(tmp, 1); + mpz_mul_2exp(tmp, tmp, m); + mpz_init(low); + mpz_neg(low, tmp); + ret = ibz_rand_interval(rand, &low, &tmp); + mpz_clear(tmp); + mpz_clear(low); + if (ret != 1) + goto err; + mpz_sub_ui(*rand, *rand, (unsigned long int)m); + return ret; +err: + mpz_clear(tmp); + mpz_clear(low); + return ret; +} + +int +ibz_bitsize(const ibz_t *a) +{ + return (int)mpz_sizeinbase(*a, 2); +} + +int +ibz_size_in_base(const ibz_t *a, int base) +{ + return (int)mpz_sizeinbase(*a, base); +} + +void +ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) +{ + mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); +} + +void +ibz_to_digits(digit_t *target, const ibz_t *ibz) +{ + // From the GMP documentation: + // "If op is zero then the count returned will be zero and nothing written to rop." + // The next line ensures zero is written to the first limb of target if ibz is zero; + // target is then overwritten by the actual value if it is not. + target[0] = 0; + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); +} + +int +ibz_probab_prime(const ibz_t *n, int reps) +{ + int ret = mpz_probab_prime_p(*n, reps); + DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); + return ret; +} + +void +ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) +{ + mpz_gcd(*gcd, *a, *b); +} + +int +ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) +{ + return (mpz_invert(*inv, *a, *mod) ? 1 : 0); +} + +int +ibz_legendre(const ibz_t *a, const ibz_t *p) +{ + return mpz_legendre(*a, *p); +} + +int +ibz_sqrt(ibz_t *sqrt, const ibz_t *a) +{ + if (mpz_perfect_square_p(*a)) { + mpz_sqrt(*sqrt, *a); + return 1; + } else { + return 0; + } +} + +void +ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) +{ + mpz_sqrt(*sqrt, *a); +} + +int +ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) +{ +#ifndef NDEBUG + assert(ibz_probab_prime(p, 100)); +#endif + // Case a = 0 + { + ibz_t test; + ibz_init(&test); + ibz_mod(&test, a, p); + if (ibz_is_zero(&test)) { + ibz_set(sqrt, 0); + } + ibz_finalize(&test); + } +#ifdef DEBUG_VERBOSE + ibz_t a_cp, p_cp; + ibz_init(&a_cp); + ibz_init(&p_cp); + ibz_copy(&a_cp, a); + ibz_copy(&p_cp, p); +#endif + + mpz_t amod, tmp, exp, a4, a2, q, z, qnr, x, y, b, pm1; + mpz_init(amod); + mpz_init(tmp); + mpz_init(exp); + mpz_init(a4); + mpz_init(a2); + mpz_init(q); + mpz_init(z); + mpz_init(qnr); + mpz_init(x); + mpz_init(y); + mpz_init(b); + mpz_init(pm1); + + int ret = 1; + + mpz_mod(amod, *a, *p); + if (mpz_cmp_ui(amod, 0) < 0) { + mpz_add(amod, *p, amod); + } + + if (mpz_legendre(amod, *p) != 1) { + ret = 0; + goto end; + } + + mpz_sub_ui(pm1, *p, 1); + + if (mpz_mod_ui(tmp, *p, 4) == 3) { + // p % 4 == 3 + mpz_add_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(*sqrt, amod, tmp, *p); + } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + // p % 8 == 5 + mpz_sub_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + if (!mpz_cmp_ui(tmp, 1)) { + mpz_add_ui(tmp, *p, 3); + mpz_fdiv_q_2exp(tmp, tmp, 3); + mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + } else { + mpz_sub_ui(tmp, *p, 5); + mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 + mpz_mul_2exp(a4, amod, 2); // 4*a + mpz_powm(tmp, a4, tmp, *p); + + mpz_mul_2exp(a2, amod, 1); + mpz_mul(tmp, a2, tmp); + mpz_mod(*sqrt, tmp, *p); + } + } else { + // p % 8 == 1 -> Shanks-Tonelli + int e = 0; + mpz_sub_ui(q, *p, 1); + while (mpz_tstbit(q, e) == 0) + e++; + mpz_fdiv_q_2exp(q, q, e); + + // 1. find generator - non-quadratic residue + mpz_set_ui(qnr, 2); + while (mpz_legendre(qnr, *p) != -1) + mpz_add_ui(qnr, qnr, 1); + mpz_powm(z, qnr, q, *p); + + // 2. Initialize + mpz_set(y, z); + mpz_powm(y, amod, q, *p); // y = a^q mod p + + mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 + mpz_fdiv_q_2exp(tmp, tmp, 1); + + mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + + mpz_set_ui(exp, 1); + mpz_mul_2exp(exp, exp, e - 2); + + for (int i = 0; i < e; ++i) { + mpz_powm(b, y, exp, *p); + + if (!mpz_cmp(b, pm1)) { + mpz_mul(x, x, z); + mpz_mod(x, x, *p); + + mpz_mul(y, y, z); + mpz_mul(y, y, z); + mpz_mod(y, y, *p); + } + + mpz_powm_ui(z, z, 2, *p); + mpz_fdiv_q_2exp(exp, exp, 1); + } + + mpz_set(*sqrt, x); + } + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sqrt_mod_p", sqrt, &a_cp, &p_cp); + ibz_finalize(&a_cp); + ibz_finalize(&p_cp); +#endif + +end: + mpz_clear(amod); + mpz_clear(tmp); + mpz_clear(exp); + mpz_clear(a4); + mpz_clear(a2); + mpz_clear(q); + mpz_clear(z); + mpz_clear(qnr); + mpz_clear(x); + mpz_clear(y); + mpz_clear(b); + mpz_clear(pm1); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.h new file mode 100644 index 0000000000..a0c2c02477 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.h @@ -0,0 +1,303 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for big integers in the reference implementation + */ + +#ifndef INTBIG_H +#define INTBIG_H + +#include +#if defined(MINI_GMP) +#include +#include +#else +#include +#endif +#include +#include + +/** @ingroup quat_quat + * @defgroup ibz_all Signed big integers (gmp-based) + * @{ + */ + +/** @defgroup ibz_t Precise number types + * @{ + */ + +/** @brief Type for signed long integers + * + * @typedef ibz_t + * + * For integers of arbitrary size, used by intbig module, using gmp + */ +typedef mpz_t ibz_t; + +/** @} + */ + +/** @defgroup ibz_c Constants + * @{ + */ + +/** + * Constant zero + */ +extern const ibz_t ibz_const_zero; + +/** + * Constant one + */ +extern const ibz_t ibz_const_one; + +/** + * Constant two + */ +extern const ibz_t ibz_const_two; + +/** + * Constant three + */ +extern const ibz_t ibz_const_three; + +/** @} + */ + +/** @defgroup ibz_finit Constructors and Destructors + * @{ + */ + +void ibz_init(ibz_t *x); +void ibz_finalize(ibz_t *x); + +/** @} + */ + +/** @defgroup ibz_za Basic integer arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b); + +/** @brief diff=a-b + */ +void ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b); + +/** @brief prod=a*b + */ +void ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b); + +/** @brief neg=-a + */ +void ibz_neg(ibz_t *neg, const ibz_t *a); + +/** @brief abs=|a| + */ +void ibz_abs(ibz_t *abs, const ibz_t *a); + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards zero. + */ +void ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b); + +/** @brief Euclidean division of a by 2^exp + * + * Computes a right shift of abs(a) by exp bits, then sets sign(quotient) to sign(a). + * + * Division and rounding is as in ibz_div. + */ +void ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp); + +/** @brief Two adic valuation computation + * + * Computes the position of the first 1 in the binary representation of the integer given in input + * + * When this number is a power of two this gives the two adic valuation of the integer + */ +int ibz_two_adic(ibz_t *pow); + +/** @brief r = a mod b + * + * Assumes valid inputs + * The sign of the divisor is ignored, the result is always non-negative + */ +void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); + +unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); + +/** @brief Test if a = 0 mod b + */ +int ibz_divides(const ibz_t *a, const ibz_t *b); + +/** @brief pow=x^e + * + * Assumes valid inputs, The case 0^0 yields 1. + */ +void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e); + +/** @brief pow=(x^e) mod m + * + * Assumes valid inputs + */ +void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibz_cmp(const ibz_t *a, const ibz_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibz_is_zero(const ibz_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibz_is_one(const ibz_t *x); + +/** @brief Compare x to y + * + * @returns 0 if x=y, positive if x>y, negative if x= 0 and target must hold sufficient elements to hold ibz + * + * @param target Target digit_t array + * @param ibz ibz source ibz_t element + */ +void ibz_to_digits(digit_t *target, const ibz_t *ibz); +#define ibz_to_digit_array(T, I) \ + do { \ + memset((T), 0, sizeof(T)); \ + ibz_to_digits((T), (I)); \ + } while (0) + +/** @brief get int32_t equal to the lowest bits of i + * + * Should not be used to get the value of i if its bitsize is close to 32 bit + * It can however be used on any i to get an int32_t of the same parity as i (and same value modulo + * 4) + * + * @param i Input integer + */ +int32_t ibz_get(const ibz_t *i); + +/** @brief generate random value in [a, b] + * assumed that a >= 0 and b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b); + +/** @brief generate random value in [-m, m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m); + +/** @brief Bitsize of a. + * + * @returns Bitsize of a. + * + */ +int ibz_bitsize(const ibz_t *a); + +/** @brief Size of a in given base. + * + * @returns Size of a in given base. + * + */ +int ibz_size_in_base(const ibz_t *a, int base); + +/** @} + */ + +/** @defgroup ibz_n Number theory functions + * @{ + */ + +/** + * @brief Greatest common divisor + * + * @param gcd Output: Set to the gcd of a and b + * @param a + * @param b + */ +void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b); + +/** + * @brief Modular inverse + * + * @param inv Output: Set to the integer in [0,mod[ such that a*inv = 1 mod (mod) if it exists + * @param a + * @param mod + * @returns 1 if inverse exists and was computed, 0 otherwise + */ +int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod); + +/** + * @brief Floor of Integer square root + * + * @param sqrt Output: Set to the floor of an integer square root + * @param a number of which a floor of an integer square root is searched + */ +void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig_internal.h new file mode 100644 index 0000000000..de4762a6d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig_internal.h @@ -0,0 +1,123 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for big integer functions only used in quaternion functions + */ + +#ifndef INTBIG_INTERNAL_H +#define INTBIG_INTERNAL_H + +#include "intbig.h" + +/** @internal + * @ingroup quat_helpers + * @defgroup ibz_helper Internal integer functions (gmp-based) + * @{ + */ + +/********************************************************************/ + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards minus infinity. + */ +void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d); + +/** @brief generate random value in [a, b] + * assumed that a >= 0, b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b); + +/** @brief generate random value in [-2^m, 2^m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_bits(ibz_t *rand, uint32_t m); + +/** @brief set str to a string containing the representation of i in base + * + * Base should be 10 or 16 + * + * str should be an array of length enough to store the representation of in + * in base, which can be obtained by ibz_sizeinbase(i, base) + 2, where the 2 + * is for the sign and the null terminator + * + * Case for base 16 does not matter + * + * @returns 1 if the integer could be converted to a string, 0 otherwise + */ +int ibz_convert_to_str(const ibz_t *i, char *str, int base); + +/** @brief print num in base to stdout + * + * Base should be 10 or 16 + */ +void ibz_print(const ibz_t *num, int base); + +/** @brief set i to integer contained in string when read as number in base + * + * Base should be 10 or 16, and the number should be written without ponctuation or whitespaces + * + * Case for base 16 does not matter + * + * @returns 1 if the string could be converted to an integer, 0 otherwise + */ +int ibz_set_from_str(ibz_t *i, const char *str, int base); + +/** + * @brief Probabilistic primality test + * + * @param n The number to test + * @param reps Number of Miller-Rabin repetitions. The more, the slower and the less likely are + * false positives + * @return 1 if probably prime, 0 if certainly not prime, 2 if certainly prime + * + * Using GMP's implementation: + * + * From GMP's documentation: "This function performs some trial divisions, a Baillie-PSW probable + * prime test, then reps-24 Miller-Rabin probabilistic primality tests." + */ +int ibz_probab_prime(const ibz_t *n, int reps); + +/** + * @brief Square root modulo a prime + * + * @returns 1 if square root of a mod p exists and was computed, 0 otherwise + * @param sqrt Output: Set to a square root of a mod p if any exist + * @param a number of which a square root mod p is searched + * @param p assumed prime + */ +int ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p); + +/** + * @brief Integer square root of a perfect square + * + * @returns 1 if an integer square root of a exists and was computed, 0 otherwise + * @param sqrt Output: Set to a integer square root of a if any exist + * @param a number of which an integer square root is searched + */ +int ibz_sqrt(ibz_t *sqrt, const ibz_t *a); + +/** + * @brief Legendre symbol of a mod p + * + * @returns Legendre symbol of a mod p + * @param a + * @param p assumed prime + * + * Uses GMP's implementation + * + * If output is 1, a is a square mod p, if -1, not. If 0, it is divisible by p + */ +int ibz_legendre(const ibz_t *a, const ibz_t *p); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/integers.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/integers.c new file mode 100644 index 0000000000..ec7cda05eb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/integers.c @@ -0,0 +1,116 @@ +#include +#include "internal.h" +#include +#include +#include + +// Random prime generation for tests +int +ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations) +{ + assert(bitsize != 0); + int found = 0; + ibz_t two_pow, two_powp; + + ibz_init(&two_pow); + ibz_init(&two_powp); + ibz_pow(&two_pow, &ibz_const_two, (bitsize - 1) - (0 != is3mod4)); + ibz_pow(&two_powp, &ibz_const_two, bitsize - (0 != is3mod4)); + + int cnt = 0; + while (!found) { + cnt++; + if (cnt % 100000 == 0) { + printf("Random prime generation is still running after %d attempts, this is not " + "normal! The expected number of attempts is %d \n", + cnt, + bitsize); + } + ibz_rand_interval(p, &two_pow, &two_powp); + ibz_add(p, p, p); + if (is3mod4) { + ibz_add(p, p, p); + ibz_add(p, &ibz_const_two, p); + } + ibz_add(p, &ibz_const_one, p); + + found = ibz_probab_prime(p, probability_test_iterations); + } + ibz_finalize(&two_pow); + ibz_finalize(&two_powp); + return found; +} + +// solves x^2 + n y^2 == p for positive integers x, y +// assumes that p is prime and -n mod p is a square +int +ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p) +{ + ibz_t r0, r1, r2, a, prod; + ibz_init(&r0); + ibz_init(&r1); + ibz_init(&r2); + ibz_init(&a); + ibz_init(&prod); + + int res = 0; + + // manage case p = 2 separately + if (!ibz_cmp(p, &ibz_const_two)) { + if (ibz_is_one(n)) { + ibz_set(x, 1); + ibz_set(y, 1); + res = 1; + } + goto done; + } + // manage case p = n separately + if (!ibz_cmp(p, n)) { + ibz_set(x, 0); + ibz_set(y, 1); + res = 1; + goto done; + } + + // test coprimality (should always be ok in our cases) + ibz_gcd(&r2, p, n); + if (!ibz_is_one(&r2)) + goto done; + + // get sqrt of -n mod p + ibz_neg(&r2, n); + if (!ibz_sqrt_mod_p(&r2, &r2, p)) + goto done; + + // run loop + ibz_copy(&prod, p); + ibz_copy(&r1, p); + ibz_copy(&r0, p); + while (ibz_cmp(&prod, p) >= 0) { + ibz_div(&a, &r0, &r2, &r1); + ibz_mul(&prod, &r0, &r0); + ibz_copy(&r2, &r1); + ibz_copy(&r1, &r0); + } + // test if result is solution + ibz_sub(&a, p, &prod); + ibz_div(&a, &r2, &a, n); + if (!ibz_is_zero(&r2)) + goto done; + if (!ibz_sqrt(y, &a)) + goto done; + + ibz_copy(x, &r0); + ibz_mul(&a, y, y); + ibz_mul(&a, &a, n); + ibz_add(&prod, &prod, &a); + res = !ibz_cmp(&prod, p); + +done: + ibz_finalize(&r0); + ibz_finalize(&r1); + ibz_finalize(&r2); + ibz_finalize(&a); + ibz_finalize(&prod); + return res; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/internal.h new file mode 100644 index 0000000000..edbba345f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/internal.h @@ -0,0 +1,812 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for helper functions for quaternion algebra implementation + */ + +#ifndef QUAT_HELPER_H +#define QUAT_HELPER_H + +#include +#include +#include "intbig_internal.h" + +/** @internal + * @ingroup quat_quat + * @defgroup quat_helpers Quaternion module internal functions + * @{ + */ + +/** @internal + * @defgroup quat_alg_helpers Helper functions for the alg library + * @{ + */ + +/** @internal + * @brief helper function for initializing small quaternion algebras. + */ +void quat_alg_init_set_ui(quat_alg_t *alg, + unsigned int p); // test/lattice, test/ideal, test/algebra + +/** @brief a*b + * + * Multiply two coordinate vectors as elements of the algebra in basis (1,i,j,ij) with i^2 = -1, j^2 + * = -p + * + * @param res Output: Will contain product + * @param a + * @param b + * @param alg The quaternion algebra + */ +void quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg); + +/** @brief a=b + * + * Test if a and b represent the same quaternion algebra element + * + * @param a + * @param b + * @returns 1 if a=b, 0 otherwise + */ +int quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + * + * x is 0 iff all coordinates in x->coord are 0 + */ +int quat_alg_elem_is_zero(const quat_alg_elem_t *x); + +/** @brief Compute same denominator form of two quaternion algebra elements + * + * res_a=a and res_b=b (representing the same element) and res_a.denom = res_b.denom + * + * @param res_a + * @param res_b + * @param a + * @param b + */ +void quat_alg_equal_denom(quat_alg_elem_t *res_a, + quat_alg_elem_t *res_b, + const quat_alg_elem_t *a, + const quat_alg_elem_t *b); + +/** @brief Copies the given values into an algebra element, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Sets an algebra element to the given integer values, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_set(quat_alg_elem_t *elem, + int32_t denom, + int32_t coord0, + int32_t coord1, + int32_t coord2, + int32_t coord3); + +/** + * @brief Creates algebra element from scalar + * + * Resulting element has 1-coordinate equal to numerator/denominator + * + * @param elem Output: algebra element with numerator/denominator as first coordiante + * (1-coordinate), 0 elsewhere (i,j,ij coordinates) + * @param numerator + * @param denominator Assumed non zero + */ +void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator); + +/** @brief a+b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief a-b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Multiplies algebra element by integer scalar, without normalizing it + * + * @param res Output + * @param scalar Integer + * @param elem Algebra element + */ +void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_helpers Helper functions for functions for matrices or vectors in dimension 4 + * @{ + */ + +/** @internal + * @defgroup quat_inv_helpers Helper functions for the integer matrix inversion function + * @{ + */ + +/** @brief a1a2+b1b2+c1c2 + * + * @param coeff Output: The coefficien which was computed as a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief -a1a2+b1b2-c1c2 + * + * @param coeff Output: The coefficien which was computed as -a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief Matrix determinant and a matrix inv such that inv/det is the inverse matrix of the input + * + * Implemented following the methof of 2x2 minors explained at Method from + * https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf (visited on 3rd of May + * 2023, 16h15 CEST) + * + * @returns 1 if the determinant of mat is not 0 and an inverse was computed, 0 otherwise + * @param inv Output: Will contain an integer matrix which, dividet by det, will yield the rational + * inverse of the matrix if it exists, can be NULL + * @param det Output: Will contain the determinant of the input matrix, can be NULL + * @param mat Matrix of which the inverse will be computed + */ +int ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_lat_helpers Helper functions on vectors and matrices used mainly for lattices + * @{ + */ + +/** @brief Copy all values from one vector to another + * + * @param new Output: is set to same values as vec + * @param vec + */ +void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec); + +/** @brief set res to values coord0,coord1,coord2,coord3 + * + * @param res Output: Will contain vector (coord0,coord1,coord2,coord3) + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Set a vector of 4 integers to given values + * + * @param vec Output: is set to given coordinates + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3); + +/** @brief a+b + * + * Add two integer 4-vectors + * + * @param res Output: Will contain sum + * @param a + * @param b + */ +void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief a-b + * + * Substract two integer 4-vectors + * + * @param res Output: Will contain difference + * @param a + * @param b + */ +void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief x=0 + * + * Test if a vector x has only zero coordinates + * + * @returns 0 if x has at least one non-zero coordinates, 1 otherwise + * @param x + */ +int ibz_vec_4_is_zero(const ibz_vec_4_t *x); + +/** @brief Compute the linear combination lc = coeff_a vec_a + coeff_b vec_b + * + * @param lc Output: linear combination lc = coeff_a vec_a + coeff_b vec_b + * @param coeff_a Scalar multiplied to vec_a + * @param vec_a + * @param coeff_b Scalar multiplied to vec_b + * @param vec_b + */ +void ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b); + +/** @brief multiplies all values in vector by same scalar + * + * @param prod Output + * @param scalar + * @param vec + */ +void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief divides all values in vector by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param vec + */ +int ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief Negation for vectors of 4 integers + * + * @param neg Output: is set to -vec + * @param vec + */ +void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec); + +/** + * @brief content of a 4-vector of integers + * + * The content is the GCD of all entries. + * + * @param v A 4-vector of integers + * @param content Output: the resulting gcd + */ +void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v); + +/** @brief -mat for mat a 4x4 integer matrix + * + * @param neg Output: is set to -mat + * @param mat Input matrix + */ +void ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat); + +/** @brief Set all coefficients of a matrix to zero for 4x4 integer matrices + * + * @param zero + */ +void ibz_mat_4x4_zero(ibz_mat_4x4_t *zero); + +/** @brief Set a matrix to the identity for 4x4 integer matrices + * + * @param id + */ +void ibz_mat_4x4_identity(ibz_mat_4x4_t *id); + +/** @brief Test equality to identity for 4x4 integer matrices + * + * @returns 1 if mat is the identity matrix, 0 otherwise + * @param mat + */ +int ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat); + +/** @brief Equality test for 4x4 integer matrices + * + * @returns 1 if equal, 0 otherwise + * @param mat1 + * @param mat2 + */ +int ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat); + +/** @brief Matrix by integer multiplication + * + * @param prod Output + * @param scalar + * @param mat + */ +void ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** @brief gcd of all values in matrix + * + * @param gcd Output + * @param mat + */ +void ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat); + +/** @brief Verifies whether the 4x4 input matrix is in Hermite Normal Form + * + * @returns 1 if mat is in HNF, 0 otherwise + * @param mat Matrix to be tested + */ +int ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat); + +/** @brief Hermite Normal Form of a matrix of 8 integer vectors, computed using a multiple of its + * determinant as modulo + * + * Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic + * Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 + * + * @param hnf Output: Matrix in Hermite Normal Form generating the same lattice as generators + * @param generators matrix whose colums generate the same lattice than the output + * @param generator_number number of generators given + * @param mod integer, must be a multiple of the volume of the lattice generated by the columns of + * generators + */ +void ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, + int generator_number, + const ibz_vec_4_t *generators, + const ibz_t *mod); + +/** @} + */ +/** @} + */ + +/** @internal + * @defgroup quat_dim2_helpers Helper functions for dimension 2 + * @{ + */ + +/** @brief Set vector coefficients to the given integers + * + * @param vec Output: Vector + * @param a0 + * @param a1 + */ +void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1); // test/dim2 + +/** @brief Set matrix coefficients to the given integers + * + * @param mat Output: Matrix + * @param a00 + * @param a01 + * @param a10 + * @param a11 + */ +void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11); // test/dim2 + +void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, + const ibz_mat_2x2_t *b); // unused + +/** @brief Determinant of a 2x2 integer matrix given as 4 integers + * + * @param det Output: Determinant of the matrix + * @param a11 matrix coefficient (upper left corner) + * @param a12 matrix coefficient (upper right corner) + * @param a21 matrix coefficient (lower left corner) + * @param a22 matrix coefficient (lower right corner) + */ +void ibz_mat_2x2_det_from_ibz(ibz_t *det, + const ibz_t *a11, + const ibz_t *a12, + const ibz_t *a21, + const ibz_t *a22); // dim4 + +/** + * @brief a*b for 2x2 integer matrices modulo m + * + * @param prod Output matrix + * @param mat_a Input matrix + * @param mat_b Input matrix + * @param m Integer modulo + */ +void ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, + const ibz_mat_2x2_t *mat_a, + const ibz_mat_2x2_t *mat_b, + const ibz_t *m); // test/dim2 +/** @} + */ + +/** @internal + * @defgroup quat_lattice_helper Helper functions for the lattice library (dimension 4) + * @{ + */ + +/** + * @brief Modifies a lattice to put it in hermite normal form + * + * In-place modification of the lattice. + * + * @param lat input lattice + * + * On a correct lattice this function changes nothing (since it is already in HNF), but it can be + * used to put a handmade one in correct form in order to use the other lattice functions. + */ +void quat_lattice_hnf(quat_lattice_t *lat); // lattice, test/lattice, test/algebra, + +/** + * @brief Lattice equality + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if both lattices are equal, 0 otherwise + * @param lat1 + * @param lat2 + */ +int quat_lattice_equal(const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice, test/ideal + +/** + * @brief Lattice inclusion test + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if sublat is included in overlat, 0 otherwise + * @param sublat Lattice whose inclusion in overlat will be testes + * @param overlat + */ +int quat_lattice_inclusion(const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // test/lattice, test/ideal + +/** @brief Divides basis and denominator of a lattice by their gcd + * + * @param reduced Output + * @param lat Lattice + */ +void quat_lattice_reduce_denom(quat_lattice_t *reduced, + const quat_lattice_t *lat); // lattice, ideal, + +/** @brief a+b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + */ +void quat_lattice_add(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice + +/** @brief a*b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + * @param alg The quaternion algebra + */ +void quat_lattice_mul(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2, + const quat_alg_t *alg); // ideal, lattie, test/ideal, test/lattice + +/** + * @brief Computes the dual lattice of lat, without putting its basis in HNF + * + * This function returns a lattice not under HNF. For careful internal use only. + * + * Computation method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted + * on 19 of May 2023, 12h40 CEST + * + * @param dual Output: The dual lattice of lat. ATTENTION: is not under HNF. hnf computation must be + * applied before using lattice functions on it + * @param lat lattice, the dual of it will be computed + */ +void quat_lattice_dual_without_hnf(quat_lattice_t *dual, + const quat_lattice_t *lat); // lattice, ideal + +/** + * @brief Multiply all columns of lat with coord (as algebra elements) + * + * The columns and coord are seen as algebra elements in basis 1,i,j,ij, i^2 = -1, j^2 = -p). Coord + * is multiplied to the right of lat. + * + * The output matrix is not under HNF. + * + * @param prod Output: Matrix not under HND whose columns represent the algebra elements obtained as + * L*coord for L column of lat. + * @param lat Matrix whose columns are algebra elements in basis (1,i,j,ij) + * @param coord Integer coordinate algebra element in basis (1,i,j,ij) + * @param alg The quaternion algebra + */ +void quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg); // lattice + +/** @brief The index of sublat into overlat + * + * Assumes inputs are in HNF. + * + * @param index Output + * @param sublat A lattice in HNF, must be sublattice of overlat + * @param overlat A lattice in HNF, must be overlattice of sublat + */ +void quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // ideal + +/** @brief Compute the Gram matrix of the quaternion trace bilinear form + * + * Given a lattice of the quaternion algebra, computes the Gram matrix + * of the bilinear form + * + * 〈a,b〉 := [lattice->denom^2] Tr(a·conj(b)) + * + * multiplied by the square of the denominator of the lattice. + * + * This matrix always has integer entries. + * + * @param G Output: Gram matrix of the trace bilinear form on the lattice, multiplied by the square + * of the denominator of the lattice + * @param lattice A lattice + * @param alg The quaternion algebra + */ +void quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @brief Compute an integer parallelogram containing the ball of + * given radius for the positive definite quadratic form defined by + * the Gram matrix G. + * + * The computed parallelogram is defined by the vectors + * + * (x₁ x₂ x₃ x₄) · U + * + * with x_i ∈ [ -box[i], box[i] ]. + * + * @param box Output: bounds of the parallelogram + * @param U Output: Unimodular transformation defining the parallelogram + * @param G Gram matrix of the quadratic form, must be full rank + * @param radius Radius of the ball, must be non-negative + * @returns 0 if the box only contains the origin, 1 otherwise + */ +int quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius); + +/** @} + */ + +/** @internal + * @defgroup quat_lideal_helper Helper functions for ideals and orders + * @{ + */ +/** @brief Set norm of an ideal given its lattice and parent order + * + * @param lideal In/Output: Ideal which has lattice and parent_order correctly set, but not + * necessarily the norm. Will have norm correctly set too. + */ +void quat_lideal_norm(quat_left_ideal_t *lideal); // ideal + +/** + * @brief Left principal ideal of order, generated by x + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element + * + * Creates the left ideal in 'order' generated by the element 'x' + */ +void quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg); // ideal, test/ideal + +/** + * @brief Equality test for left ideals + * + * @returns 1 if both left ideals are equal, 0 otherwise + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +int quat_lideal_equals(const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // test/ideal + +/** + * @brief Sum of two left ideals + * + * @param sum Output: Left ideal which is the sum of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_add(quat_left_ideal_t *sum, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // Not used outside + +/** + * @brief Left ideal product of left ideal I and element alpha + * + * @param product Output: lideal I*alpha, must have integer norm + * @param lideal left ideal + * @param alpha element multiplied to lideal to get the product ideal + * @param alg the quaternion algebra + * + * I*alpha where I is a left-ideal and alpha an element of the algebra + * + * The resulting ideal must have an integer norm + * + */ +void quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg); // test/ideal + +/** @brief Computes the inverse ideal (for a left ideal of a maximal order) without putting it under + * HNF + * + * This function returns a lattice not under HNF. For careful internal use only + * + * Computes the inverse ideal for lideal as conjugate(lideal)/norm(lideal) + * + * @param inv Output: lattice which is lattice representation of the inverse ideal of lideal + * ATTENTION: is not under HNF. hnf computation must be applied before using lattice functions on it + * @param lideal Left ideal of a maximal order in alg + * @param alg The quaternion algebra + */ +void quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** @brief Computes the right transporter of two left ideals of the same maximal order + * + * Following the implementation of ideal isomorphisms in the code of LearningToSQI's sage + * implementation of SQIsign. Computes the right transporter of (J:I) as inverse(I)J. + * + * @param trans Output: lattice which is right transporter from lideal1 to lideal2 (lideal2:lideal1) + * @param lideal1 Left ideal of the same maximal order than lideal1 in alg + * @param lideal2 Left ideal of the same maximal order than lideal1 in alg + * @param alg The quaternion algebra + */ +void quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Right order of a left ideal + * + * @param order Output: right order of the given ideal + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** + * @brief Gram matrix of the trace map of the ideal class + * + * Compute the Gram matrix of the bilinear form + * + * 〈a, b〉 := Tr(a·conj(b)) / norm(lideal) + * + * on the basis of the ideal. This matrix has integer entries and its + * integer congruence class only depends on the ideal class. + * + * @param G Output: Gram matrix of the trace map + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg); + +/** @brief Test if order is maximal + * + * Checks if the discriminant of the order equals the prime p defining the quaternion algebra. + * + * It is not verified whether the order is really an order. The output 1 only means that if it is an + * order, then it is maximal. + * + * @returns 1 if order is maximal (assuming it is an order), 0 otherwise + * @param order An order of the quaternion algebra (assumes to be an order, this is not tested) + * @param alg The quaternion algebra + */ +int quat_order_is_maximal(const quat_lattice_t *order, + const quat_alg_t *alg); // ideal (only in asserts) + +/** @brief Compute the discriminant of an order as sqrt(det(gram(reduced_norm))) + * + * @param disc: Output: The discriminant sqrt(det(gram(reduced_norm))) + * @param order An order of the quaternion algebra + * @param alg The quaternion algebra + */ +int quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, + const quat_alg_t *alg); // ideal + +/** @} + */ + +/** @internal + * @ingroup quat_normeq + * @{ + */ + +/** @brief Set lattice to O0 + * + * @param O0 Lattice to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set(quat_lattice_t *O0); + +/** @brief Set p-extremal maximal order to O0 + * + * @param O0 p-extremal order to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0); + +/** + * @brief Create an element of a extremal maximal order from its coefficients + * + * @param elem Output: the quaternion element + * @param order the order + * @param coeffs the vector of 4 ibz coefficients + * @param Bpoo quaternion algebra + * + * elem = x + z*y + z*u + t*z*v + * where coeffs = [x,y,u,v] and t = order.t z = order.z + * + */ +void quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo); // normeq, untested + +/** @} + */ +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/isog.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/isog.h new file mode 100644 index 0000000000..b251ca3cdc --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/isog.h @@ -0,0 +1,28 @@ +#ifndef _ISOG_H_ +#define _ISOG_H_ +#include +#include + +/* KPS structure for isogenies of degree 2 or 4 */ +typedef struct +{ + ec_point_t K; +} ec_kps2_t; +typedef struct +{ + ec_point_t K[3]; +} ec_kps4_t; + +void xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P); // degree-2 isogeny construction +void xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24); + +void xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P); // degree-4 isogeny construction +void xisog_4_singular(ec_kps4_t *kps, ec_point_t *B24, const ec_point_t P, ec_point_t A24); + +void xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps); +void xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps); + +void xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps); +void xeval_4_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_point_t P, const ec_kps4_t *kps); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/isog_chains.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/isog_chains.c new file mode 100644 index 0000000000..abc9808057 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/isog_chains.c @@ -0,0 +1,241 @@ +#include "isog.h" +#include + +// since we use degree 4 isogeny steps, we need to handle the odd case with care +static uint32_t +ec_eval_even_strategy(ec_curve_t *curve, + ec_point_t *points, + unsigned len_points, + const ec_point_t *kernel, + const int isog_len) +{ + ec_curve_normalize_A24(curve); + ec_point_t A24; + copy_point(&A24, &curve->A24); + + int space = 1; + for (int i = 1; i < isog_len; i *= 2) + ++space; + + // Stack of remaining kernel points and their associated orders + ec_point_t splits[space]; + uint16_t todo[space]; + splits[0] = *kernel; + todo[0] = isog_len; + + int current = 0; // Pointer to current top of stack + + // Chain of 4-isogenies + for (int j = 0; j < isog_len / 2; ++j) { + assert(current >= 0); + assert(todo[current] >= 1); + // Get the next point of order 4 + while (todo[current] != 2) { + assert(todo[current] >= 3); + // A new split will be added + ++current; + assert(current < space); + // We set the seed of the new split to be computed and saved + copy_point(&splits[current], &splits[current - 1]); + // if we copied from the very first element, then we perform one additional doubling + unsigned num_dbls = todo[current - 1] / 4 * 2 + todo[current - 1] % 2; + todo[current] = todo[current - 1] - num_dbls; + while (num_dbls--) + xDBL_A24(&splits[current], &splits[current], &A24, false); + } + + if (j == 0) { + assert(fp2_is_one(&A24.z)); + if (!ec_is_four_torsion(&splits[current], curve)) + return -1; + + ec_point_t T; + xDBL_A24(&T, &splits[current], &A24, false); + if (fp2_is_zero(&T.x)) + return -1; // special isogenies not allowed + } else { + assert(todo[current] == 2); +#ifndef NDEBUG + if (fp2_is_zero(&splits[current].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + + ec_point_t test; + xDBL_A24(&test, &splits[current], &A24, false); + if (fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly zero before doubling"); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + } + + // Evaluate 4-isogeny + ec_kps4_t kps4; + xisog_4(&kps4, &A24, splits[current]); + xeval_4(splits, splits, current, &kps4); + for (int i = 0; i < current; ++i) + todo[i] -= 2; + xeval_4(points, points, len_points, &kps4); + + --current; + } + assert(isog_len % 2 ? !current : current == -1); + + // Final 2-isogeny + if (isog_len % 2) { +#ifndef NDEBUG + if (fp2_is_zero(&splits[0].z)) + debug_print("splitting point z coordinate is unexpectedly zero"); + ec_point_t test; + copy_point(&test, &splits[0]); + xDBL_A24(&test, &test, &A24, false); + if (!fp2_is_zero(&test.z)) + debug_print("z coordinate is unexpectedly not zero after doubling"); +#endif + + // We need to check the order of this point in case there were no 4-isogenies + if (isog_len == 1 && !ec_is_two_torsion(&splits[0], curve)) + return -1; + if (fp2_is_zero(&splits[0].x)) { + // special isogenies not allowed + // this case can only happen if isog_len == 1; otherwise the + // previous 4-isogenies we computed ensure that $T=(0:1)$ is put + // as the kernel of the dual isogeny + return -1; + } + + ec_kps2_t kps2; + xisog_2(&kps2, &A24, splits[0]); + xeval_2(points, points, len_points, &kps2); + } + + // Output curve in the form (A:C) + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + + return 0; +} + +uint32_t +ec_eval_even(ec_curve_t *image, ec_isog_even_t *phi, ec_point_t *points, unsigned len_points) +{ + copy_curve(image, &phi->curve); + return ec_eval_even_strategy(image, points, len_points, &phi->kernel, phi->length); +} + +// naive implementation +uint32_t +ec_eval_small_chain(ec_curve_t *curve, + const ec_point_t *kernel, + int len, + ec_point_t *points, + unsigned len_points, + bool special) // do we allow special isogenies? +{ + + ec_point_t A24; + AC_to_A24(&A24, curve); + + ec_kps2_t kps; + ec_point_t small_K, big_K; + copy_point(&big_K, kernel); + + for (int i = 0; i < len; i++) { + copy_point(&small_K, &big_K); + // small_K = big_K; + for (int j = 0; j < len - i - 1; j++) { + xDBL_A24(&small_K, &small_K, &A24, false); + } + // Check the order of the point before the first isogeny step + if (i == 0 && !ec_is_two_torsion(&small_K, curve)) + return (uint32_t)-1; + // Perform isogeny step + if (fp2_is_zero(&small_K.x)) { + if (special) { + ec_point_t B24; + xisog_2_singular(&kps, &B24, A24); + xeval_2_singular(&big_K, &big_K, 1, &kps); + xeval_2_singular(points, points, len_points, &kps); + copy_point(&A24, &B24); + } else { + return (uint32_t)-1; + } + } else { + xisog_2(&kps, &A24, small_K); + xeval_2(&big_K, &big_K, 1, &kps); + xeval_2(points, points, len_points, &kps); + } + } + A24_to_AC(curve, &A24); + + curve->is_A24_computed_and_normalized = false; + return 0; +} + +uint32_t +ec_isomorphism(ec_isom_t *isom, const ec_curve_t *from, const ec_curve_t *to) +{ + fp2_t t0, t1, t2, t3, t4; + + fp2_mul(&t0, &from->A, &from->C); + fp2_mul(&t1, &to->A, &to->C); + + fp2_mul(&t2, &t1, &to->C); // toA*toC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*toA*toC^2 + fp2_sqr(&t3, &to->A); + fp2_mul(&t3, &t3, &to->A); // toA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->Nx, &t3, &t2); // 2*toA^3-9*toA*toC^2 + fp2_mul(&t2, &t0, &from->A); // fromA^2*fromC + fp2_sqr(&t3, &from->C); + fp2_mul(&t3, &t3, &from->C); // fromC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*fromC^3 + fp2_sub(&t3, &t3, &t2); // 3*fromC^3-fromA^2*fromC + fp2_mul(&isom->Nx, &isom->Nx, &t3); // lambda_x = (2*toA^3-9*toA*toC^2)*(3*fromC^3-fromA^2*fromC) + + fp2_mul(&t2, &t0, &from->C); // fromA*fromC^2 + fp2_add(&t3, &t2, &t2); + fp2_add(&t3, &t3, &t3); + fp2_add(&t3, &t3, &t3); + fp2_add(&t2, &t2, &t3); // 9*fromA*fromC^2 + fp2_sqr(&t3, &from->A); + fp2_mul(&t3, &t3, &from->A); // fromA^3 + fp2_add(&t3, &t3, &t3); + fp2_sub(&isom->D, &t3, &t2); // 2*fromA^3-9*fromA*fromC^2 + fp2_mul(&t2, &t1, &to->A); // toA^2*toC + fp2_sqr(&t3, &to->C); + fp2_mul(&t3, &t3, &to->C); // toC^3 + fp2_add(&t4, &t3, &t3); + fp2_add(&t3, &t4, &t3); // 3*toC^3 + fp2_sub(&t3, &t3, &t2); // 3*toC^3-toA^2*toC + fp2_mul(&isom->D, &isom->D, &t3); // lambda_z = (2*fromA^3-9*fromA*fromC^2)*(3*toC^3-toA^2*toC) + + // Mont -> SW -> SW -> Mont + fp2_mul(&t0, &to->C, &from->A); + fp2_mul(&t0, &t0, &isom->Nx); // lambda_x*toC*fromA + fp2_mul(&t1, &from->C, &to->A); + fp2_mul(&t1, &t1, &isom->D); // lambda_z*fromC*toA + fp2_sub(&isom->Nz, &t0, &t1); // lambda_x*toC*fromA - lambda_z*fromC*toA + fp2_mul(&t0, &from->C, &to->C); + fp2_add(&t1, &t0, &t0); + fp2_add(&t0, &t0, &t1); // 3*fromC*toC + fp2_mul(&isom->D, &isom->D, &t0); // 3*lambda_z*fromC*toC + fp2_mul(&isom->Nx, &isom->Nx, &t0); // 3*lambda_x*fromC*toC + + return (fp2_is_zero(&isom->Nx) | fp2_is_zero(&isom->D)); +} + +void +ec_iso_eval(ec_point_t *P, ec_isom_t *isom) +{ + fp2_t tmp; + fp2_mul(&P->x, &P->x, &isom->Nx); + fp2_mul(&tmp, &P->z, &isom->Nz); + fp2_add(&P->x, &P->x, &tmp); + fp2_mul(&P->z, &P->z, &isom->D); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/keygen.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/keygen.c new file mode 100644 index 0000000000..c1c206c99d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/keygen.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +void +secret_key_init(secret_key_t *sk) +{ + quat_left_ideal_init(&(sk->secret_ideal)); + ibz_mat_2x2_init(&(sk->mat_BAcan_to_BA0_two)); + ec_curve_init(&sk->curve); +} + +void +secret_key_finalize(secret_key_t *sk) +{ + quat_left_ideal_finalize(&(sk->secret_ideal)); + ibz_mat_2x2_finalize(&(sk->mat_BAcan_to_BA0_two)); +} + +int +protocols_keygen(public_key_t *pk, secret_key_t *sk) +{ + int found = 0; + ec_basis_t B_0_two; + + // iterating until a solution has been found + while (!found) { + + found = quat_sampling_random_ideal_O0_given_norm( + &sk->secret_ideal, &SEC_DEGREE, 1, &QUAT_represent_integer_params, NULL); + + // replacing the secret key ideal by a shorter equivalent one for efficiency + found = found && quat_lideal_prime_norm_reduced_equivalent( + &sk->secret_ideal, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + + // ideal to isogeny clapotis + + found = found && dim2id2iso_arbitrary_isogeny_evaluation(&B_0_two, &sk->curve, &sk->secret_ideal); + } + + // Assert the isogeny was found and images have the correct order + assert(test_basis_order_twof(&B_0_two, &sk->curve, TORSION_EVEN_POWER)); + + // Compute a deterministic basis with a hint to speed up verification + pk->hint_pk = ec_curve_to_basis_2f_to_hint(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER); + + // Assert the deterministic basis we computed has the correct order + assert(test_basis_order_twof(&sk->canonical_basis, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the 2x2 matrix basis change from the canonical basis to the evaluation of our secret + // isogeny + change_of_basis_matrix_tate( + &sk->mat_BAcan_to_BA0_two, &sk->canonical_basis, &B_0_two, &sk->curve, TORSION_EVEN_POWER); + + // Set the public key from the codomain curve + copy_curve(&pk->curve, &sk->curve); + pk->curve.is_A24_computed_and_normalized = false; // We don't send any precomputation + + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c new file mode 100644 index 0000000000..8c49b21d20 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c @@ -0,0 +1,190 @@ +#include +#include "lll_internals.h" +#include "internal.h" + +#include "dpe.h" + +// Access entry of symmetric matrix +#define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + dpe_t dpe_const_one, dpe_const_DELTABAR; + + dpe_init(dpe_const_one); + dpe_set_ui(dpe_const_one, 1); + + dpe_init(dpe_const_DELTABAR); + dpe_set_d(dpe_const_DELTABAR, DELTABAR); + + // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions + dpe_t r[4][4], u[4][4], lovasz[4]; + for (int i = 0; i < 4; i++) { + dpe_init(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_init(r[i][j]); + dpe_init(u[i][j]); + } + } + + // threshold for swaps + dpe_t delta_bar; + dpe_init(delta_bar); + dpe_set_d(delta_bar, DELTABAR); + + // Other work variables + dpe_t Xf, tmpF; + dpe_init(Xf); + dpe_init(tmpF); + ibz_t X, tmpI; + ibz_init(&X); + ibz_init(&tmpI); + + // Main L² loop + dpe_set_z(r[0][0], (*G)[0][0]); + int kappa = 1; + while (kappa < 4) { + // size reduce b_κ + int done = 0; + while (!done) { + // Recompute the κ-th row of the Choleski Factorisation + // Loop invariant: + // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 + for (int j = 0; j <= kappa; j++) { + dpe_set_z(r[kappa][j], (*G)[kappa][j]); + for (int k = 0; k < j; k++) { + dpe_mul(tmpF, r[kappa][k], u[j][k]); + dpe_sub(r[kappa][j], r[kappa][j], tmpF); + } + if (j < kappa) + dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + } + + done = 1; + // size reduce + for (int i = kappa - 1; i >= 0; i--) { + if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + done = 0; + dpe_set(Xf, u[kappa][i]); + dpe_round(Xf, Xf); + dpe_get_z(X, Xf); + // Update basis: b_κ ← b_κ - X·b_i + for (int j = 0; j < 4; j++) { + ibz_mul(&tmpI, &X, &(*basis)[j][i]); + ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + } + // Update lower half of the Gram matrix + // = - 2X + X² = + // - X - X( - X·) + //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 + ibz_mul(&tmpI, &X, &(*G)[kappa][i]); + ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + for (int j = 0; j < 4; j++) { // works because i < κ + // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 + ibz_mul(&tmpI, &X, SYM((*G), i, j)); + ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + } + // After the loop: + //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, + /// b_i〉) = 〈b_κ - X·b_i, b_κ - X·b_i〉 + // + // Update u[kappa][j] + for (int j = 0; j < i; j++) { + dpe_mul(tmpF, Xf, u[i][j]); + dpe_sub(u[kappa][j], u[kappa][j], tmpF); + } + } + } + } + + // Check Lovasz' conditions + // lovasz[0] = ‖b_κ‖² + dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] + for (int i = 1; i < kappa; i++) { + dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); + dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + } + int swap; + for (swap = kappa; swap > 0; swap--) { + dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); + if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + break; + } + + // Insert b_κ before b_swap + if (kappa != swap) { + // Insert b_κ before b_swap in the basis and in the lower half Gram matrix + for (int j = kappa; j > swap; j--) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + if (i == j - 1) + ibz_swap(&(*G)[i][i], &(*G)[j][j]); + else if (i != j) + ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + } + } + // Copy row u[κ] and r[κ] in swap position, ignore what follows + for (int i = 0; i < swap; i++) { + dpe_set(u[swap][i], u[kappa][i]); + dpe_set(r[swap][i], r[kappa][i]); + } + dpe_set(r[swap][swap], lovasz[swap]); + // swap complete + kappa = swap; + } + + kappa += 1; + } + +#ifndef NDEBUG + // Check size-reducedness + for (int i = 0; i < 4; i++) + for (int j = 0; j < i; j++) { + dpe_abs(u[i][j], u[i][j]); + assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + } + // Check Lovasz' conditions + for (int i = 1; i < 4; i++) { + dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); + dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); + dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); + assert(dpe_cmp(tmpF, r[i][i]) <= 0); + } +#endif + + // Fill in the upper half of the Gram matrix + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + + // Clearinghouse + ibz_finalize(&X); + ibz_finalize(&tmpI); + dpe_clear(dpe_const_one); + dpe_clear(dpe_const_DELTABAR); + dpe_clear(Xf); + dpe_clear(tmpF); + dpe_clear(delta_bar); + for (int i = 0; i < 4; i++) { + dpe_clear(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_clear(r[i][j]); + dpe_clear(u[i][j]); + } + } +} + +int +quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_mat_4x4_t G; // Gram Matrix + ibz_mat_4x4_init(&G); + quat_lattice_gram(&G, lattice, alg); + ibz_mat_4x4_copy(red, &lattice->basis); + quat_lll_core(&G, red); + ibz_mat_4x4_finalize(&G); + return 0; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lat_ball.c new file mode 100644 index 0000000000..c7bbb9682f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lat_ball.c @@ -0,0 +1,139 @@ +#include +#include +#include +#include "internal.h" +#include "lll_internals.h" + +int +quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius) +{ + ibz_t denom, rem; + ibz_init(&denom); + ibz_init(&rem); + ibz_mat_4x4_t dualG; + ibz_mat_4x4_init(&dualG); + +// Compute the Gram matrix of the dual lattice +#ifndef NDEBUG + int inv_check = ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); + assert(inv_check); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); +#endif + // Initialize the dual lattice basis to the identity matrix + ibz_mat_4x4_identity(U); + // Reduce the dual lattice + quat_lll_core(&dualG, U); + + // Compute the parallelogram's bounds + int trivial = 1; + for (int i = 0; i < 4; i++) { + ibz_mul(&(*box)[i], &dualG[i][i], radius); + ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); + ibz_sqrt_floor(&(*box)[i], &(*box)[i]); + trivial &= ibz_is_zero(&(*box)[i]); + } + + // Compute the transpose transformation matrix +#ifndef NDEBUG + int inv = ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#endif + // U is unitary, det(U) = ± 1 + ibz_mat_4x4_scalar_mul(U, &denom, U); +#ifndef NDEBUG + assert(inv); + ibz_abs(&denom, &denom); + assert(ibz_is_one(&denom)); +#endif + + ibz_mat_4x4_finalize(&dualG); + ibz_finalize(&denom); + ibz_finalize(&rem); + return !trivial; +} + +int +quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius) +{ + assert(ibz_cmp(radius, &ibz_const_zero) > 0); + + ibz_vec_4_t box; + ibz_vec_4_init(&box); + ibz_mat_4x4_t U, G; + ibz_mat_4x4_init(&U); + ibz_mat_4x4_init(&G); + ibz_vec_4_t x; + ibz_vec_4_init(&x); + ibz_t rad, tmp; + ibz_init(&rad); + ibz_init(&tmp); + + // Compute the Gram matrix of the lattice + quat_lattice_gram(&G, lattice, alg); + + // Correct ball radius by the denominator + ibz_mul(&rad, radius, &lattice->denom); + ibz_mul(&rad, &rad, &lattice->denom); + // Correct by 2 (Gram matrix corresponds to twice the norm) + ibz_mul(&rad, &rad, &ibz_const_two); + + // Compute a bounding parallelogram for the ball, stop if it only + // contains the origin + int ok = quat_lattice_bound_parallelogram(&box, &U, &G, &rad); + if (!ok) + goto err; + + // Rejection sampling from the parallelogram +#ifndef NDEBUG + int cnt = 0; +#endif + do { + // Sample vector + for (int i = 0; i < 4; i++) { + if (ibz_is_zero(&box[i])) { + ibz_copy(&x[i], &ibz_const_zero); + } else { + ibz_add(&tmp, &box[i], &box[i]); + ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); + ibz_sub(&x[i], &x[i], &box[i]); + if (!ok) + goto err; + } + } + // Map to parallelogram + ibz_mat_4x4_eval_t(&x, &x, &U); + // Evaluate quadratic form + quat_qf_eval(&tmp, &G, &x); +#ifndef NDEBUG + cnt++; + if (cnt % 100 == 0) + printf("Lattice sampling rejected %d times", cnt - 1); +#endif + } while (ibz_is_zero(&tmp) || (ibz_cmp(&tmp, &rad) > 0)); + + // Evaluate linear combination + ibz_mat_4x4_eval(&(res->coord), &(lattice->basis), &x); + ibz_copy(&(res->denom), &(lattice->denom)); + quat_alg_normalize(res); + +#ifndef NDEBUG + // Check norm is smaller than radius + quat_alg_norm(&tmp, &rad, res, alg); + ibz_mul(&rad, &rad, radius); + assert(ibz_cmp(&tmp, &rad) <= 0); +#endif + +err: + ibz_finalize(&rad); + ibz_finalize(&tmp); + ibz_vec_4_finalize(&x); + ibz_mat_4x4_finalize(&U); + ibz_mat_4x4_finalize(&G); + ibz_vec_4_finalize(&box); + return ok; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lattice.c new file mode 100644 index 0000000000..c98bae9499 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lattice.c @@ -0,0 +1,328 @@ +#include +#include +#include "internal.h" + +// helper functions +int +quat_lattice_equal(const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + int equal = 1; + quat_lattice_t a, b; + quat_lattice_init(&a); + quat_lattice_init(&b); + quat_lattice_reduce_denom(&a, lat1); + quat_lattice_reduce_denom(&b, lat2); + ibz_abs(&(a.denom), &(a.denom)); + ibz_abs(&(b.denom), &(b.denom)); + quat_lattice_hnf(&a); + quat_lattice_hnf(&b); + equal = equal && (ibz_cmp(&(a.denom), &(b.denom)) == 0); + equal = equal && ibz_mat_4x4_equal(&(a.basis), &(b.basis)); + quat_lattice_finalize(&a); + quat_lattice_finalize(&b); + return (equal); +} + +// sublattice test +int +quat_lattice_inclusion(const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + int res; + quat_lattice_t sum; + quat_lattice_init(&sum); + quat_lattice_add(&sum, overlat, sublat); + res = quat_lattice_equal(&sum, overlat); + quat_lattice_finalize(&sum); + return (res); +} + +void +quat_lattice_reduce_denom(quat_lattice_t *reduced, const quat_lattice_t *lat) +{ + ibz_t gcd; + ibz_init(&gcd); + ibz_mat_4x4_gcd(&gcd, &(lat->basis)); + ibz_gcd(&gcd, &gcd, &(lat->denom)); + ibz_mat_4x4_scalar_div(&(reduced->basis), &gcd, &(lat->basis)); + ibz_div(&(reduced->denom), &gcd, &(lat->denom), &gcd); + ibz_abs(&(reduced->denom), &(reduced->denom)); + ibz_finalize(&gcd); +} + +void +quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat) +{ + ibz_mat_4x4_copy(&(conj->basis), &(lat->basis)); + ibz_copy(&(conj->denom), &(lat->denom)); + + for (int row = 1; row < 4; ++row) { + for (int col = 0; col < 4; ++col) { + ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + } + } +} + +// Method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_dual_without_hnf(quat_lattice_t *dual, const quat_lattice_t *lat) +{ + ibz_mat_4x4_t inv; + ibz_t det; + ibz_init(&det); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + ibz_mat_4x4_transpose(&inv, &inv); + // dual_denom = det/lat_denom + ibz_mat_4x4_scalar_mul(&(dual->basis), &(lat->denom), &inv); + ibz_copy(&(dual->denom), &det); + + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); +} + +void +quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + ibz_vec_4_t generators[8]; + ibz_mat_4x4_t tmp; + ibz_t det1, det2, detprod; + ibz_init(&det1); + ibz_init(&det2); + ibz_init(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_init(&(generators[i])); + ibz_mat_4x4_init(&tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); + assert(!ibz_is_zero(&det1)); + assert(!ibz_is_zero(&det2)); + ibz_gcd(&detprod, &det1, &det2); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 8, generators, &detprod); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_mat_4x4_finalize(&tmp); + ibz_finalize(&det1); + ibz_finalize(&det2); + ibz_finalize(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + quat_lattice_t dual1, dual2, dual_res; + quat_lattice_init(&dual1); + quat_lattice_init(&dual2); + quat_lattice_init(&dual_res); + quat_lattice_dual_without_hnf(&dual1, lat1); + + quat_lattice_dual_without_hnf(&dual2, lat2); + quat_lattice_add(&dual_res, &dual1, &dual2); + quat_lattice_dual_without_hnf(res, &dual_res); + quat_lattice_hnf(res); // could be removed if we do not expect HNF any more + quat_lattice_finalize(&dual1); + quat_lattice_finalize(&dual2); + quat_lattice_finalize(&dual_res); +} + +void +quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg) +{ + ibz_vec_4_t p, a; + ibz_vec_4_init(&p); + ibz_vec_4_init(&a); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + quat_alg_coord_mul(&p, &a, coord, alg); + ibz_copy(&((*prod)[0][i]), &(p[0])); + ibz_copy(&((*prod)[1][i]), &(p[1])); + ibz_copy(&((*prod)[2][i]), &(p[2])); + ibz_copy(&((*prod)[3][i]), &(p[3])); + } + ibz_vec_4_finalize(&p); + ibz_vec_4_finalize(&a); +} + +void +quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg) +{ + quat_lattice_mat_alg_coord_mul_without_hnf(&(prod->basis), &(lat->basis), &(elem->coord), alg); + ibz_mul(&(prod->denom), &(lat->denom), &(elem->denom)); + quat_lattice_hnf(prod); +} + +void +quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2, const quat_alg_t *alg) +{ + ibz_vec_4_t elem1, elem2, elem_res; + ibz_vec_4_t generators[16]; + ibz_mat_4x4_t detmat; + ibz_t det; + quat_lattice_t lat_res; + ibz_init(&det); + ibz_mat_4x4_init(&detmat); + quat_lattice_init(&lat_res); + ibz_vec_4_init(&elem1); + ibz_vec_4_init(&elem2); + ibz_vec_4_init(&elem_res); + for (int i = 0; i < 16; i++) + ibz_vec_4_init(&(generators[i])); + for (int k = 0; k < 4; k++) { + ibz_vec_4_copy_ibz( + &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz( + &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); + for (int j = 0; j < 4; j++) { + if (k == 0) + ibz_copy(&(detmat[i][j]), &(elem_res[j])); + ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + } + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &detmat); + ibz_abs(&det, &det); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 16, generators, &det); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_vec_4_finalize(&elem1); + ibz_vec_4_finalize(&elem2); + ibz_vec_4_finalize(&elem_res); + quat_lattice_finalize(&lat_res); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&(detmat)); + for (int i = 0; i < 16; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// lattice assumed of full rank +int +quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x) +{ + int divisible = 0; + ibz_vec_4_t work_coord; + ibz_mat_4x4_t inv; + ibz_t det, prod; + ibz_init(&prod); + ibz_init(&det); + ibz_vec_4_init(&work_coord); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + assert(!ibz_is_zero(&det)); + ibz_mat_4x4_eval(&work_coord, &inv, &(x->coord)); + ibz_vec_4_scalar_mul(&(work_coord), &(lat->denom), &work_coord); + ibz_mul(&prod, &(x->denom), &det); + divisible = ibz_vec_4_scalar_div(&work_coord, &prod, &work_coord); + // copy result + if (divisible && (coord != NULL)) { + for (int i = 0; i < 4; i++) { + ibz_copy(&((*coord)[i]), &(work_coord[i])); + } + } + ibz_finalize(&prod); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); + ibz_vec_4_finalize(&work_coord); + return (divisible); +} + +void +quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + ibz_t tmp, det; + ibz_init(&tmp); + ibz_init(&det); + + // det = det(sublat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &sublat->basis); + // tmp = (overlat->denom)⁴ + ibz_mul(&tmp, &overlat->denom, &overlat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // index = (overlat->denom)⁴ · det(sublat->basis) + ibz_mul(index, &det, &tmp); + // tmp = (sublat->denom)⁴ + ibz_mul(&tmp, &sublat->denom, &sublat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // det = det(overlat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &overlat->basis); + // tmp = (sublat->denom)⁴ · det(overlat->basis) + ibz_mul(&tmp, &tmp, &det); + // index = index / tmp + ibz_div(index, &tmp, index, &tmp); + assert(ibz_is_zero(&tmp)); + // index = |index| + ibz_abs(index, index); + + ibz_finalize(&tmp); + ibz_finalize(&det); +} + +void +quat_lattice_hnf(quat_lattice_t *lat) +{ + ibz_t mod; + ibz_vec_4_t generators[4]; + ibz_init(&mod); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &mod, &(lat->basis)); + ibz_abs(&mod, &mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_init(&(generators[i])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + } + } + ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); + quat_lattice_reduce_denom(lat, lat); + ibz_finalize(&mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +void +quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_t tmp; + ibz_init(&tmp); + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_set(&(*G)[i][j], 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + if (k >= 2) + ibz_mul(&tmp, &tmp, &alg->p); + ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + } + ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + } + } + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + } + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_applications.c new file mode 100644 index 0000000000..6c763b8c04 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_applications.c @@ -0,0 +1,127 @@ +#include +#include +#include "lll_internals.h" + +void +quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t gram_corrector; + ibz_init(&gram_corrector); + ibz_mul(&gram_corrector, &(lideal->lattice.denom), &(lideal->lattice.denom)); + quat_lideal_class_gram(gram, lideal, alg); + ibz_mat_4x4_copy(reduced, &(lideal->lattice.basis)); + quat_lll_core(gram, reduced); + ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); + for (int i = 0; i < 4; i++) { + ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + for (int j = i + 1; j < 4; j++) { + ibz_set(&((*gram)[i][j]), 0); + } + } + ibz_finalize(&gram_corrector); +} + +void +quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + ibz_mat_4x4_t red; + ibz_mat_4x4_init(&red); + + quat_lattice_mul(&(prod->lattice), &(lideal1->lattice), &(lideal2->lattice), alg); + prod->parent_order = lideal1->parent_order; + quat_lideal_norm(prod); + quat_lideal_reduce_basis(&red, gram, prod, alg); + ibz_mat_4x4_copy(&(prod->lattice.basis), &red); + + ibz_mat_4x4_finalize(&red); +} + +int +quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff) +{ + ibz_mat_4x4_t gram, red; + ibz_mat_4x4_init(&gram); + ibz_mat_4x4_init(&red); + + int found = 0; + + // computing the reduced basis + quat_lideal_reduce_basis(&red, &gram, lideal, alg); + + quat_alg_elem_t new_alpha; + quat_alg_elem_init(&new_alpha); + ibz_t tmp, remainder, adjusted_norm; + ibz_init(&tmp); + ibz_init(&remainder); + ibz_init(&adjusted_norm); + + ibz_mul(&adjusted_norm, &lideal->lattice.denom, &lideal->lattice.denom); + + int ctr = 0; + + // equiv_num_iter = (2 * equiv_bound_coeff + 1)^4 + assert(equiv_bound_coeff < (1 << 20)); + int equiv_num_iter = (2 * equiv_bound_coeff + 1); + equiv_num_iter = equiv_num_iter * equiv_num_iter; + equiv_num_iter = equiv_num_iter * equiv_num_iter; + + while (!found && ctr < equiv_num_iter) { + ctr++; + // we select our linear combination at random + ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + + // computation of the norm of the vector sampled + quat_qf_eval(&tmp, &gram, &new_alpha.coord); + + // compute the norm of the equivalent ideal + // can be improved by removing the power of two first and the odd part only if the trial + // division failed (this should always be called on an ideal of norm 2^x * N for some + // big prime N ) + ibz_div(&tmp, &remainder, &tmp, &adjusted_norm); + + // debug : check that the remainder is zero + assert(ibz_is_zero(&remainder)); + + // pseudo-primality test + if (ibz_probab_prime(&tmp, primality_num_iter)) { + + // computes the generator using a matrix multiplication + ibz_mat_4x4_eval(&new_alpha.coord, &red, &new_alpha.coord); + ibz_copy(&new_alpha.denom, &lideal->lattice.denom); + assert(quat_lattice_contains(NULL, &lideal->lattice, &new_alpha)); + + quat_alg_conj(&new_alpha, &new_alpha); + ibz_mul(&new_alpha.denom, &new_alpha.denom, &lideal->norm); + quat_lideal_mul(lideal, lideal, &new_alpha, alg); + assert(ibz_probab_prime(&lideal->norm, primality_num_iter)); + + found = 1; + break; + } + } + assert(found); + + ibz_finalize(&tmp); + ibz_finalize(&remainder); + ibz_finalize(&adjusted_norm); + quat_alg_elem_finalize(&new_alpha); + + ibz_mat_4x4_finalize(&gram); + ibz_mat_4x4_finalize(&red); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_internals.h new file mode 100644 index 0000000000..e8d90141ac --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_internals.h @@ -0,0 +1,238 @@ +#ifndef LLL_INTERNALS_H +#define LLL_INTERNALS_H + +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations of functions only used for the LLL tets + */ + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup lll_internal Functions only used for LLL or its tests + * @{ + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_params Parameters used by the L2 implementation (floats) and its tests (ints) + * @{ + */ + +#define DELTABAR 0.995 +#define DELTA_NUM 99 +#define DELTA_DENOM 100 + +#define ETABAR 0.505 +#define EPSILON_NUM 1 +#define EPSILON_DENOM 100 + +#define PREC 64 +/** + * @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup ibq_t Types for rationals + * @{ + */ + +/** @brief Type for fractions of integers + * + * @typedef ibq_t + * + * For fractions of integers of arbitrary size, used by intbig module, using gmp + */ +typedef ibz_t ibq_t[2]; +typedef ibq_t ibq_vec_4_t[4]; +typedef ibq_t ibq_mat_4x4_t[4][4]; + +/**@} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_ibq_c Constructors and Destructors and Printers + * @{ + */ + +void ibq_init(ibq_t *x); +void ibq_finalize(ibq_t *x); + +void ibq_mat_4x4_init(ibq_mat_4x4_t *mat); +void ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat); + +void ibq_vec_4_init(ibq_vec_4_t *vec); +void ibq_vec_4_finalize(ibq_vec_4_t *vec); + +void ibq_mat_4x4_print(const ibq_mat_4x4_t *mat); +void ibq_vec_4_print(const ibq_vec_4_t *vec); + +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_qa Basic fraction arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b); + +/** @brief diff=a-b + */ +void ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b); + +/** @brief neg=-x + */ +void ibq_neg(ibq_t *neg, const ibq_t *x); + +/** @brief abs=|x| + */ +void ibq_abs(ibq_t *abs, const ibq_t *x); + +/** @brief prod=a*b + */ +void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b); + +/** @brief inv=1/x + * + * @returns 0 if x is 0, 1 if inverse exists and was computed + */ +int ibq_inv(ibq_t *inv, const ibq_t *x); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibq_cmp(const ibq_t *a, const ibq_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibq_is_zero(const ibq_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibq_is_one(const ibq_t *x); + +/** @brief Set q to a/b if b not 0 + * + * @returns 1 if b not 0 and q is set, 0 otherwise + */ +int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b); + +/** @brief Copy value into target + */ +void ibq_copy(ibq_t *target, const ibq_t *value); + +/** @brief Checks if q is an integer + * + * @returns 1 if yes, 0 if not + */ +int ibq_is_ibz(const ibq_t *q); + +/** + * @brief Converts a fraction q to an integer y, if q is an integer. + * + * @returns 1 if z is an integer, 0 if not + */ +int ibq_to_ibz(ibz_t *z, const ibq_t *q); +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup quat_lll_verify_helpers Helper functions for lll verification in dimension 4 + * @{ + */ + +/** @brief Set ibq to parameters delta and eta = 1/2 + epsilon using L2 constants + */ +void quat_lll_set_ibq_parameters(ibq_t *delta, ibq_t *eta); + +/** @brief Set an ibq vector to 4 given integer coefficients + */ +void ibq_vec_4_copy_ibz(ibq_vec_4_t *vec, + const ibz_t *coeff0, + const ibz_t *coeff1, + const ibz_t *coeff2, + const ibz_t *coeff3); // dim4, test/dim4 + +/** @brief Bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 for ibz_q + */ +void quat_lll_bilinear(ibq_t *b, const ibq_vec_4_t *vec0, const ibq_vec_4_t *vec1, + const ibz_t *q); // dim4, test/dim4 + +/** @brief Outputs the transposition of the orthogonalised matrix of mat (as fractions) + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +void quat_lll_gram_schmidt_transposed_with_ibq(ibq_mat_4x4_t *orthogonalised_transposed, + const ibz_mat_4x4_t *mat, + const ibz_t *q); // dim4 + +/** @brief Verifies if mat is lll-reduced for parameter coeff and norm defined by q + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +int quat_lll_verify(const ibz_mat_4x4_t *mat, + const ibq_t *delta, + const ibq_t *eta, + const quat_alg_t *alg); // test/lattice, test/dim4 + /** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_internal_gram Internal LLL function + * @{ + */ + +/** @brief In-place L2 reduction core function + * + * Given a lattice basis represented by the columns of a 4x4 matrix + * and the Gram matrix of its bilinear form, L2-reduces the basis + * in-place and updates the Gram matrix accordingly. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param G In/Output: Gram matrix of the lattice basis + * @param basis In/Output: lattice basis + */ +void quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis); + +/** + * @brief LLL reduction on 4-dimensional lattice + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param red Output: LLL reduced basis + * @param lattice In/Output: lattice with 4-dimensional basis + * @param alg The quaternion algebra + */ +int quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @} + */ + +// end of lll_internal +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lvlx.cmake b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lvlx.cmake new file mode 100644 index 0000000000..9b8c0f9287 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lvlx.cmake @@ -0,0 +1,12 @@ +set(SOURCE_FILES_ID2ISO_GENERIC_REF + ${LVLX_DIR}/id2iso.c + ${LVLX_DIR}/dim2id2iso.c +) + +add_library(${LIB_ID2ISO_${SVARIANT_UPPER}} STATIC ${SOURCE_FILES_ID2ISO_GENERIC_REF}) +target_link_libraries(${LIB_ID2ISO_${SVARIANT_UPPER}} ${LIB_QUATERNION} ${LIB_PRECOMP_${SVARIANT_UPPER}} ${LIB_MP} ${LIB_GF_${SVARIANT_UPPER}} ${LIB_EC_${SVARIANT_UPPER}} ${LIB_HD_${SVARIANT_UPPER}}) +target_include_directories(${LIB_ID2ISO_${SVARIANT_UPPER}} PRIVATE ${INC_PUBLIC} ${INC_PRECOMP_${SVARIANT_UPPER}} ${INC_QUATERNION} ${INC_MP} ${INC_GF} ${INC_GF_${SVARIANT_UPPER}} ${INC_EC} ${INC_HD} ${INC_ID2ISO} ${INC_COMMON}) +target_compile_options(${LIB_ID2ISO_${SVARIANT_UPPER}} PRIVATE ${C_OPT_FLAGS}) +target_compile_definitions(${LIB_ID2ISO_${SVARIANT_UPPER}} PUBLIC SQISIGN_VARIANT=${SVARIANT_LOWER}) + +add_subdirectory(test) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.c new file mode 100644 index 0000000000..4956beda50 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include + +void +sqisign_secure_free(void *mem, size_t size) +{ + if (mem) { + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); + free(mem); + } +} +void +sqisign_secure_clear(void *mem, size_t size) +{ + typedef void *(*memset_t)(void *, int, size_t); + static volatile memset_t memset_func = memset; + memset_func(mem, 0, size); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.h new file mode 100644 index 0000000000..ab8f6c6481 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef MEM_H +#define MEM_H +#include +#include + +/** + * Clears and frees allocated memory. + * + * @param[out] mem Memory to be cleared and freed. + * @param size Size of memory to be cleared and freed. + */ +void sqisign_secure_free(void *mem, size_t size); + +/** + * Clears memory. + * + * @param[out] mem Memory to be cleared. + * @param size Size of memory to be cleared. + */ +void sqisign_secure_clear(void *mem, size_t size); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c new file mode 100644 index 0000000000..396d505aec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c @@ -0,0 +1,73 @@ +#include +#include +#if defined(MINI_GMP) +#include "mini-gmp.h" +#else +// This configuration is used only for testing +#include +#endif +#include + +// Exported for testing +int +mini_mpz_legendre(const mpz_t a, const mpz_t p) +{ + int res = 0; + mpz_t e; + mpz_init_set(e, p); + mpz_sub_ui(e, e, 1); + mpz_fdiv_q_2exp(e, e, 1); + mpz_powm(e, a, e, p); + + if (mpz_cmp_ui(e, 1) <= 0) { + res = mpz_get_si(e); + } else { + res = -1; + } + mpz_clear(e); + return res; +} + +#if defined(MINI_GMP) +int +mpz_legendre(const mpz_t a, const mpz_t p) +{ + return mini_mpz_legendre(a, p); +} +#endif + +// Exported for testing +double +mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + double ret; + int tmp_exp; + mpz_t tmp; + + // Handle the case where op is 0 + if (mpz_cmp_ui(op, 0) == 0) { + *exp = 0; + return 0.0; + } + + *exp = mpz_sizeinbase(op, 2); + + mpz_init_set(tmp, op); + + if (*exp > DBL_MAX_EXP) { + mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); + } + + ret = frexp(mpz_get_d(tmp), &tmp_exp); + mpz_clear(tmp); + + return ret; +} + +#if defined(MINI_GMP) +double +mpz_get_d_2exp(signed long int *exp, const mpz_t op) +{ + return mini_mpz_get_d_2exp(exp, op); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.h new file mode 100644 index 0000000000..0113cfdfe6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.h @@ -0,0 +1,19 @@ +#ifndef MINI_GMP_EXTRA_H +#define MINI_GMP_EXTRA_H + +#if defined MINI_GMP +#include "mini-gmp.h" + +typedef long mp_exp_t; + +int mpz_legendre(const mpz_t a, const mpz_t p); +double mpz_get_d_2exp(signed long int *exp, const mpz_t op); +#else +// This configuration is used only for testing +#include +#endif + +int mini_mpz_legendre(const mpz_t a, const mpz_t p); +double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.c new file mode 100644 index 0000000000..3830ab2031 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.c @@ -0,0 +1,4671 @@ +/* Note: The code from mini-gmp is modifed from the original by + commenting out the definition of GMP_LIMB_BITS */ + +/* + mini-gmp, a minimalistic implementation of a GNU GMP subset. + + Contributed to the GNU project by Niels Möller + Additional functionalities and improvements by Marco Bodrato. + +Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* NOTE: All functions in this file which are not declared in + mini-gmp.h are internal, and are not intended to be compatible + with GMP or with future versions of mini-gmp. */ + +/* Much of the material copied from GMP files, including: gmp-impl.h, + longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, + mpn/generic/lshift.c, mpn/generic/mul_1.c, + mpn/generic/mul_basecase.c, mpn/generic/rshift.c, + mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, + mpn/generic/submul_1.c. */ + +#include +#include +#include +#include +#include +#include + +#include "mini-gmp.h" + +#if !defined(MINI_GMP_DONT_USE_FLOAT_H) +#include +#endif + + +/* Macros */ +/* Removed from here as it is passed as a compiler command-line definition */ +/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ + +#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) +#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) + +#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) +#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) + +#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) +#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) + +#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) +#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) + +#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) + +#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 +#define GMP_DBL_MANT_BITS DBL_MANT_DIG +#else +#define GMP_DBL_MANT_BITS (53) +#endif + +/* Return non-zero if xp,xsize and yp,ysize overlap. + If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no + overlap. If both these are false, there's an overlap. */ +#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ + ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) + +#define gmp_assert_nocarry(x) do { \ + mp_limb_t __cy = (x); \ + assert (__cy == 0); \ + (void) (__cy); \ + } while (0) + +#define gmp_clz(count, x) do { \ + mp_limb_t __clz_x = (x); \ + unsigned __clz_c = 0; \ + int LOCAL_SHIFT_BITS = 8; \ + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ + for (; \ + (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ + __clz_c += 8) \ + { __clz_x <<= LOCAL_SHIFT_BITS; } \ + for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ + __clz_x <<= 1; \ + (count) = __clz_c; \ + } while (0) + +#define gmp_ctz(count, x) do { \ + mp_limb_t __ctz_x = (x); \ + unsigned __ctz_c = 0; \ + gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ + (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ + } while (0) + +#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) + (bl); \ + (sh) = (ah) + (bh) + (__x < (al)); \ + (sl) = __x; \ + } while (0) + +#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + mp_limb_t __x; \ + __x = (al) - (bl); \ + (sh) = (ah) - (bh) - ((al) < (bl)); \ + (sl) = __x; \ + } while (0) + +#define gmp_umul_ppmm(w1, w0, u, v) \ + do { \ + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ + if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned int __ww = (unsigned int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ + { \ + unsigned long int __ww = (unsigned long int) (u) * (v); \ + w0 = (mp_limb_t) __ww; \ + w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ + } \ + else { \ + mp_limb_t __x0, __x1, __x2, __x3; \ + unsigned __ul, __vl, __uh, __vh; \ + mp_limb_t __u = (u), __v = (v); \ + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ + \ + __ul = __u & GMP_LLIMB_MASK; \ + __uh = __u >> (GMP_LIMB_BITS / 2); \ + __vl = __v & GMP_LLIMB_MASK; \ + __vh = __v >> (GMP_LIMB_BITS / 2); \ + \ + __x0 = (mp_limb_t) __ul * __vl; \ + __x1 = (mp_limb_t) __ul * __vh; \ + __x2 = (mp_limb_t) __uh * __vl; \ + __x3 = (mp_limb_t) __uh * __vh; \ + \ + __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ + (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ + } \ + } while (0) + +/* If mp_limb_t is of size smaller than int, plain u*v implies + automatic promotion to *signed* int, and then multiply may overflow + and cause undefined behavior. Explicitly cast to unsigned int for + that case. */ +#define gmp_umullo_limb(u, v) \ + ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) + +#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ + do { \ + mp_limb_t _qh, _ql, _r, _mask; \ + gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ + gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ + _r = (nl) - gmp_umullo_limb (_qh, (d)); \ + _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ + _qh += _mask; \ + _r += _mask & (d); \ + if (_r >= (d)) \ + { \ + _r -= (d); \ + _qh++; \ + } \ + \ + (r) = _r; \ + (q) = _qh; \ + } while (0) + +#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ + do { \ + mp_limb_t _q0, _t1, _t0, _mask; \ + gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ + gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ + \ + /* Compute the two most significant limbs of n - q'd */ \ + (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ + gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ + (q)++; \ + \ + /* Conditionally adjust q and the remainders */ \ + _mask = - (mp_limb_t) ((r1) >= _q0); \ + (q) += _mask; \ + gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ + if ((r1) >= (d1)) \ + { \ + if ((r1) > (d1) || (r0) >= (d0)) \ + { \ + (q)++; \ + gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ + } \ + } \ + } while (0) + +/* Swap macros. */ +#define MP_LIMB_T_SWAP(x, y) \ + do { \ + mp_limb_t __mp_limb_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_limb_t_swap__tmp; \ + } while (0) +#define MP_SIZE_T_SWAP(x, y) \ + do { \ + mp_size_t __mp_size_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_size_t_swap__tmp; \ + } while (0) +#define MP_BITCNT_T_SWAP(x,y) \ + do { \ + mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_bitcnt_t_swap__tmp; \ + } while (0) +#define MP_PTR_SWAP(x, y) \ + do { \ + mp_ptr __mp_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_ptr_swap__tmp; \ + } while (0) +#define MP_SRCPTR_SWAP(x, y) \ + do { \ + mp_srcptr __mp_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mp_srcptr_swap__tmp; \ + } while (0) + +#define MPN_PTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_PTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) +#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ + do { \ + MP_SRCPTR_SWAP (xp, yp); \ + MP_SIZE_T_SWAP (xs, ys); \ + } while(0) + +#define MPZ_PTR_SWAP(x, y) \ + do { \ + mpz_ptr __mpz_ptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_ptr_swap__tmp; \ + } while (0) +#define MPZ_SRCPTR_SWAP(x, y) \ + do { \ + mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ + (x) = (y); \ + (y) = __mpz_srcptr_swap__tmp; \ + } while (0) + +const int mp_bits_per_limb = GMP_LIMB_BITS; + + +/* Memory allocation and other helper functions. */ +static void +gmp_die (const char *msg) +{ + fprintf (stderr, "%s\n", msg); + abort(); +} + +static void * +gmp_default_alloc (size_t size) +{ + void *p; + + assert (size > 0); + + p = malloc (size); + if (!p) + gmp_die("gmp_default_alloc: Virtual memory exhausted."); + + return p; +} + +static void * +gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) +{ + void * p; + + p = realloc (old, new_size); + + if (!p) + gmp_die("gmp_default_realloc: Virtual memory exhausted."); + + return p; +} + +static void +gmp_default_free (void *p, size_t unused_size) +{ + free (p); +} + +static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; +static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; +static void (*gmp_free_func) (void *, size_t) = gmp_default_free; + +void +mp_get_memory_functions (void *(**alloc_func) (size_t), + void *(**realloc_func) (void *, size_t, size_t), + void (**free_func) (void *, size_t)) +{ + if (alloc_func) + *alloc_func = gmp_allocate_func; + + if (realloc_func) + *realloc_func = gmp_reallocate_func; + + if (free_func) + *free_func = gmp_free_func; +} + +void +mp_set_memory_functions (void *(*alloc_func) (size_t), + void *(*realloc_func) (void *, size_t, size_t), + void (*free_func) (void *, size_t)) +{ + if (!alloc_func) + alloc_func = gmp_default_alloc; + if (!realloc_func) + realloc_func = gmp_default_realloc; + if (!free_func) + free_func = gmp_default_free; + + gmp_allocate_func = alloc_func; + gmp_reallocate_func = realloc_func; + gmp_free_func = free_func; +} + +#define gmp_alloc(size) ((*gmp_allocate_func)((size))) +#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) +#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) + +static mp_ptr +gmp_alloc_limbs (mp_size_t size) +{ + return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); +} + +static mp_ptr +gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) +{ + assert (size > 0); + return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); +} + +static void +gmp_free_limbs (mp_ptr old, mp_size_t size) +{ + gmp_free (old, size * sizeof (mp_limb_t)); +} + + +/* MPN interface */ + +void +mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + mp_size_t i; + for (i = 0; i < n; i++) + d[i] = s[i]; +} + +void +mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) +{ + while (--n >= 0) + d[n] = s[n]; +} + +int +mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + while (--n >= 0) + { + if (ap[n] != bp[n]) + return ap[n] > bp[n] ? 1 : -1; + } + return 0; +} + +static int +mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + if (an != bn) + return an < bn ? -1 : 1; + else + return mpn_cmp (ap, bp, an); +} + +static mp_size_t +mpn_normalized_size (mp_srcptr xp, mp_size_t n) +{ + while (n > 0 && xp[n-1] == 0) + --n; + return n; +} + +int +mpn_zero_p(mp_srcptr rp, mp_size_t n) +{ + return mpn_normalized_size (rp, n) == 0; +} + +void +mpn_zero (mp_ptr rp, mp_size_t n) +{ + while (--n >= 0) + rp[n] = 0; +} + +mp_limb_t +mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + i = 0; + do + { + mp_limb_t r = ap[i] + b; + /* Carry out */ + b = (r < b); + rp[i] = r; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b, r; + a = ap[i]; b = bp[i]; + r = a + cy; + cy = (r < cy); + r += b; + cy += (r < b); + rp[i] = r; + } + return cy; +} + +mp_limb_t +mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_add_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) +{ + mp_size_t i; + + assert (n > 0); + + i = 0; + do + { + mp_limb_t a = ap[i]; + /* Carry out */ + mp_limb_t cy = a < b; + rp[i] = a - b; + b = cy; + } + while (++i < n); + + return b; +} + +mp_limb_t +mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mp_size_t i; + mp_limb_t cy; + + for (i = 0, cy = 0; i < n; i++) + { + mp_limb_t a, b; + a = ap[i]; b = bp[i]; + b += cy; + cy = (b < cy); + cy += (a < b); + rp[i] = a - b; + } + return cy; +} + +mp_limb_t +mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) +{ + mp_limb_t cy; + + assert (an >= bn); + + cy = mpn_sub_n (rp, ap, bp, bn); + if (an > bn) + cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); + return cy; +} + +mp_limb_t +mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl + lpl; + cl += lpl < rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) +{ + mp_limb_t ul, cl, hpl, lpl, rl; + + assert (n >= 1); + + cl = 0; + do + { + ul = *up++; + gmp_umul_ppmm (hpl, lpl, ul, vl); + + lpl += cl; + cl = (lpl < cl) + hpl; + + rl = *rp; + lpl = rl - lpl; + cl += lpl > rl; + *rp++ = lpl; + } + while (--n != 0); + + return cl; +} + +mp_limb_t +mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn >= 1); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); + assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); + + /* We first multiply by the low order limb. This result can be + stored, not added, to rp. We also avoid a loop for zeroing this + way. */ + + rp[un] = mpn_mul_1 (rp, up, un, vp[0]); + + /* Now accumulate the product of up[] and the next higher limb from + vp[]. */ + + while (--vn >= 1) + { + rp += 1, vp += 1; + rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); + } + return rp[un]; +} + +void +mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) +{ + mpn_mul (rp, ap, n, bp, n); +} + +void +mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) +{ + mpn_mul (rp, ap, n, ap, n); +} + +mp_limb_t +mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + up += n; + rp += n; + + tnc = GMP_LIMB_BITS - cnt; + low_limb = *--up; + retval = low_limb >> tnc; + high_limb = (low_limb << cnt); + + while (--n != 0) + { + low_limb = *--up; + *--rp = high_limb | (low_limb >> tnc); + high_limb = (low_limb << cnt); + } + *--rp = high_limb; + + return retval; +} + +mp_limb_t +mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) +{ + mp_limb_t high_limb, low_limb; + unsigned int tnc; + mp_limb_t retval; + + assert (n >= 1); + assert (cnt >= 1); + assert (cnt < GMP_LIMB_BITS); + + tnc = GMP_LIMB_BITS - cnt; + high_limb = *up++; + retval = (high_limb << tnc); + low_limb = high_limb >> cnt; + + while (--n != 0) + { + high_limb = *up++; + *rp++ = low_limb | (high_limb << tnc); + low_limb = high_limb >> cnt; + } + *rp = low_limb; + + return retval; +} + +static mp_bitcnt_t +mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, + mp_limb_t ux) +{ + unsigned cnt; + + assert (ux == 0 || ux == GMP_LIMB_MAX); + assert (0 <= i && i <= un ); + + while (limb == 0) + { + i++; + if (i == un) + return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); + limb = ux ^ up[i]; + } + gmp_ctz (cnt, limb); + return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; +} + +mp_bitcnt_t +mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, 0); +} + +mp_bitcnt_t +mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) +{ + mp_size_t i; + i = bit / GMP_LIMB_BITS; + + return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), + i, ptr, i, GMP_LIMB_MAX); +} + +void +mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (--n >= 0) + *rp++ = ~ *up++; +} + +mp_limb_t +mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) +{ + while (*up == 0) + { + *rp = 0; + if (!--n) + return 0; + ++up; ++rp; + } + *rp = - *up; + mpn_com (++rp, ++up, --n); + return 1; +} + + +/* MPN division interface. */ + +/* The 3/2 inverse is defined as + + m = floor( (B^3-1) / (B u1 + u0)) - B +*/ +mp_limb_t +mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) +{ + mp_limb_t r, m; + + { + mp_limb_t p, ql; + unsigned ul, uh, qh; + + assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); + /* For notation, let b denote the half-limb base, so that B = b^2. + Split u1 = b uh + ul. */ + ul = u1 & GMP_LLIMB_MASK; + uh = u1 >> (GMP_LIMB_BITS / 2); + + /* Approximation of the high half of quotient. Differs from the 2/1 + inverse of the half limb uh, since we have already subtracted + u0. */ + qh = (u1 ^ GMP_LIMB_MAX) / uh; + + /* Adjust to get a half-limb 3/2 inverse, i.e., we want + + qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u + = floor( (b (~u) + b-1) / u), + + and the remainder + + r = b (~u) + b-1 - qh (b uh + ul) + = b (~u - qh uh) + b-1 - qh ul + + Subtraction of qh ul may underflow, which implies adjustments. + But by normalization, 2 u >= B > qh ul, so we need to adjust by + at most 2. + */ + + r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; + + p = (mp_limb_t) qh * ul; + /* Adjustment steps taken from udiv_qrnnd_c */ + if (r < p) + { + qh--; + r += u1; + if (r >= u1) /* i.e. we didn't get carry when adding to r */ + if (r < p) + { + qh--; + r += u1; + } + } + r -= p; + + /* Low half of the quotient is + + ql = floor ( (b r + b-1) / u1). + + This is a 3/2 division (on half-limbs), for which qh is a + suitable inverse. */ + + p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; + /* Unlike full-limb 3/2, we can add 1 without overflow. For this to + work, it is essential that ql is a full mp_limb_t. */ + ql = (p >> (GMP_LIMB_BITS / 2)) + 1; + + /* By the 3/2 trick, we don't need the high half limb. */ + r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; + + if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) + { + ql--; + r += u1; + } + m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; + if (r >= u1) + { + m++; + r -= u1; + } + } + + /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a + 3/2 inverse. */ + if (u0 > 0) + { + mp_limb_t th, tl; + r = ~r; + r += u0; + if (r < u0) + { + m--; + if (r >= u1) + { + m--; + r -= u1; + } + r -= u1; + } + gmp_umul_ppmm (th, tl, u0, m); + r += th; + if (r < th) + { + m--; + m -= ((r > u1) | ((r == u1) & (tl > u0))); + } + } + + return m; +} + +struct gmp_div_inverse +{ + /* Normalization shift count. */ + unsigned shift; + /* Normalized divisor (d0 unused for mpn_div_qr_1) */ + mp_limb_t d1, d0; + /* Inverse, for 2/1 or 3/2. */ + mp_limb_t di; +}; + +static void +mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) +{ + unsigned shift; + + assert (d > 0); + gmp_clz (shift, d); + inv->shift = shift; + inv->d1 = d << shift; + inv->di = mpn_invert_limb (inv->d1); +} + +static void +mpn_div_qr_2_invert (struct gmp_div_inverse *inv, + mp_limb_t d1, mp_limb_t d0) +{ + unsigned shift; + + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 <<= shift; + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); +} + +static void +mpn_div_qr_invert (struct gmp_div_inverse *inv, + mp_srcptr dp, mp_size_t dn) +{ + assert (dn > 0); + + if (dn == 1) + mpn_div_qr_1_invert (inv, dp[0]); + else if (dn == 2) + mpn_div_qr_2_invert (inv, dp[1], dp[0]); + else + { + unsigned shift; + mp_limb_t d1, d0; + + d1 = dp[dn-1]; + d0 = dp[dn-2]; + assert (d1 > 0); + gmp_clz (shift, d1); + inv->shift = shift; + if (shift > 0) + { + d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); + d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); + } + inv->d1 = d1; + inv->d0 = d0; + inv->di = mpn_invert_3by2 (d1, d0); + } +} + +/* Not matching current public gmp interface, rather corresponding to + the sbpi1_div_* functions. */ +static mp_limb_t +mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + mp_limb_t d, di; + mp_limb_t r; + mp_ptr tp = NULL; + mp_size_t tn = 0; + + if (inv->shift > 0) + { + /* Shift, reusing qp area if possible. In-place shift if qp == np. */ + tp = qp; + if (!tp) + { + tn = nn; + tp = gmp_alloc_limbs (tn); + } + r = mpn_lshift (tp, np, nn, inv->shift); + np = tp; + } + else + r = 0; + + d = inv->d1; + di = inv->di; + while (--nn >= 0) + { + mp_limb_t q; + + gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); + if (qp) + qp[nn] = q; + } + if (tn) + gmp_free_limbs (tp, tn); + + return r >> inv->shift; +} + +static void +mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + const struct gmp_div_inverse *inv) +{ + unsigned shift; + mp_size_t i; + mp_limb_t d1, d0, di, r1, r0; + + assert (nn >= 2); + shift = inv->shift; + d1 = inv->d1; + d0 = inv->d0; + di = inv->di; + + if (shift > 0) + r1 = mpn_lshift (np, np, nn, shift); + else + r1 = 0; + + r0 = np[nn - 1]; + + i = nn - 2; + do + { + mp_limb_t n0, q; + n0 = np[i]; + gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + if (shift > 0) + { + assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); + r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); + r1 >>= shift; + } + + np[1] = r1; + np[0] = r0; +} + +static void +mpn_div_qr_pi1 (mp_ptr qp, + mp_ptr np, mp_size_t nn, mp_limb_t n1, + mp_srcptr dp, mp_size_t dn, + mp_limb_t dinv) +{ + mp_size_t i; + + mp_limb_t d1, d0; + mp_limb_t cy, cy1; + mp_limb_t q; + + assert (dn > 2); + assert (nn >= dn); + + d1 = dp[dn - 1]; + d0 = dp[dn - 2]; + + assert ((d1 & GMP_LIMB_HIGHBIT) != 0); + /* Iteration variable is the index of the q limb. + * + * We divide + * by + */ + + i = nn - dn; + do + { + mp_limb_t n0 = np[dn-1+i]; + + if (n1 == d1 && n0 == d0) + { + q = GMP_LIMB_MAX; + mpn_submul_1 (np+i, dp, dn, q); + n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ + } + else + { + gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); + + cy = mpn_submul_1 (np + i, dp, dn-2, q); + + cy1 = n0 < cy; + n0 = n0 - cy; + cy = n1 < cy1; + n1 = n1 - cy1; + np[dn-2+i] = n0; + + if (cy != 0) + { + n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); + q--; + } + } + + if (qp) + qp[i] = q; + } + while (--i >= 0); + + np[dn - 1] = n1; +} + +static void +mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, + mp_srcptr dp, mp_size_t dn, + const struct gmp_div_inverse *inv) +{ + assert (dn > 0); + assert (nn >= dn); + + if (dn == 1) + np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); + else if (dn == 2) + mpn_div_qr_2_preinv (qp, np, nn, inv); + else + { + mp_limb_t nh; + unsigned shift; + + assert (inv->d1 == dp[dn-1]); + assert (inv->d0 == dp[dn-2]); + assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); + + shift = inv->shift; + if (shift > 0) + nh = mpn_lshift (np, np, nn, shift); + else + nh = 0; + + mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); + + if (shift > 0) + gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); + } +} + +static void +mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) +{ + struct gmp_div_inverse inv; + mp_ptr tp = NULL; + + assert (dn > 0); + assert (nn >= dn); + + mpn_div_qr_invert (&inv, dp, dn); + if (dn > 2 && inv.shift > 0) + { + tp = gmp_alloc_limbs (dn); + gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); + dp = tp; + } + mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); + if (tp) + gmp_free_limbs (tp, dn); +} + + +/* MPN base conversion. */ +static unsigned +mpn_base_power_of_two_p (unsigned b) +{ + switch (b) + { + case 2: return 1; + case 4: return 2; + case 8: return 3; + case 16: return 4; + case 32: return 5; + case 64: return 6; + case 128: return 7; + case 256: return 8; + default: return 0; + } +} + +struct mpn_base_info +{ + /* bb is the largest power of the base which fits in one limb, and + exp is the corresponding exponent. */ + unsigned exp; + mp_limb_t bb; +}; + +static void +mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) +{ + mp_limb_t m; + mp_limb_t p; + unsigned exp; + + m = GMP_LIMB_MAX / b; + for (exp = 1, p = b; p <= m; exp++) + p *= b; + + info->exp = exp; + info->bb = p; +} + +static mp_bitcnt_t +mpn_limb_size_in_base_2 (mp_limb_t u) +{ + unsigned shift; + + assert (u > 0); + gmp_clz (shift, u); + return GMP_LIMB_BITS - shift; +} + +static size_t +mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) +{ + unsigned char mask; + size_t sn, j; + mp_size_t i; + unsigned shift; + + sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) + + bits - 1) / bits; + + mask = (1U << bits) - 1; + + for (i = 0, j = sn, shift = 0; j-- > 0;) + { + unsigned char digit = up[i] >> shift; + + shift += bits; + + if (shift >= GMP_LIMB_BITS && ++i < un) + { + shift -= GMP_LIMB_BITS; + digit |= up[i] << (bits - shift); + } + sp[j] = digit & mask; + } + return sn; +} + +/* We generate digits from the least significant end, and reverse at + the end. */ +static size_t +mpn_limb_get_str (unsigned char *sp, mp_limb_t w, + const struct gmp_div_inverse *binv) +{ + mp_size_t i; + for (i = 0; w > 0; i++) + { + mp_limb_t h, l, r; + + h = w >> (GMP_LIMB_BITS - binv->shift); + l = w << binv->shift; + + gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); + assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); + r >>= binv->shift; + + sp[i] = r; + } + return i; +} + +static size_t +mpn_get_str_other (unsigned char *sp, + int base, const struct mpn_base_info *info, + mp_ptr up, mp_size_t un) +{ + struct gmp_div_inverse binv; + size_t sn; + size_t i; + + mpn_div_qr_1_invert (&binv, base); + + sn = 0; + + if (un > 1) + { + struct gmp_div_inverse bbinv; + mpn_div_qr_1_invert (&bbinv, info->bb); + + do + { + mp_limb_t w; + size_t done; + w = mpn_div_qr_1_preinv (up, up, un, &bbinv); + un -= (up[un-1] == 0); + done = mpn_limb_get_str (sp + sn, w, &binv); + + for (sn += done; done < info->exp; done++) + sp[sn++] = 0; + } + while (un > 1); + } + sn += mpn_limb_get_str (sp + sn, up[0], &binv); + + /* Reverse order */ + for (i = 0; 2*i + 1 < sn; i++) + { + unsigned char t = sp[i]; + sp[i] = sp[sn - i - 1]; + sp[sn - i - 1] = t; + } + + return sn; +} + +size_t +mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) +{ + unsigned bits; + + assert (un > 0); + assert (up[un-1] > 0); + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_get_str_bits (sp, bits, up, un); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_get_str_other (sp, base, &info, up, un); + } +} + +static mp_size_t +mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, + unsigned bits) +{ + mp_size_t rn; + mp_limb_t limb; + unsigned shift; + + for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) + { + limb |= (mp_limb_t) sp[sn] << shift; + shift += bits; + if (shift >= GMP_LIMB_BITS) + { + shift -= GMP_LIMB_BITS; + rp[rn++] = limb; + /* Next line is correct also if shift == 0, + bits == 8, and mp_limb_t == unsigned char. */ + limb = (unsigned int) sp[sn] >> (bits - shift); + } + } + if (limb != 0) + rp[rn++] = limb; + else + rn = mpn_normalized_size (rp, rn); + return rn; +} + +/* Result is usually normalized, except for all-zero input, in which + case a single zero limb is written at *RP, and 1 is returned. */ +static mp_size_t +mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, + mp_limb_t b, const struct mpn_base_info *info) +{ + mp_size_t rn; + mp_limb_t w; + unsigned k; + size_t j; + + assert (sn > 0); + + k = 1 + (sn - 1) % info->exp; + + j = 0; + w = sp[j++]; + while (--k != 0) + w = w * b + sp[j++]; + + rp[0] = w; + + for (rn = 1; j < sn;) + { + mp_limb_t cy; + + w = sp[j++]; + for (k = 1; k < info->exp; k++) + w = w * b + sp[j++]; + + cy = mpn_mul_1 (rp, rp, rn, info->bb); + cy += mpn_add_1 (rp, rp, rn, w); + if (cy > 0) + rp[rn++] = cy; + } + assert (j == sn); + + return rn; +} + +mp_size_t +mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) +{ + unsigned bits; + + if (sn == 0) + return 0; + + bits = mpn_base_power_of_two_p (base); + if (bits) + return mpn_set_str_bits (rp, sp, sn, bits); + else + { + struct mpn_base_info info; + + mpn_get_base_info (&info, base); + return mpn_set_str_other (rp, sp, sn, base, &info); + } +} + + +/* MPZ interface */ +void +mpz_init (mpz_t r) +{ + static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; + + r->_mp_alloc = 0; + r->_mp_size = 0; + r->_mp_d = (mp_ptr) &dummy_limb; +} + +/* The utility of this function is a bit limited, since many functions + assigns the result variable using mpz_swap. */ +void +mpz_init2 (mpz_t r, mp_bitcnt_t bits) +{ + mp_size_t rn; + + bits -= (bits != 0); /* Round down, except if 0 */ + rn = 1 + bits / GMP_LIMB_BITS; + + r->_mp_alloc = rn; + r->_mp_size = 0; + r->_mp_d = gmp_alloc_limbs (rn); +} + +void +mpz_clear (mpz_t r) +{ + if (r->_mp_alloc) + gmp_free_limbs (r->_mp_d, r->_mp_alloc); +} + +static mp_ptr +mpz_realloc (mpz_t r, mp_size_t size) +{ + size = GMP_MAX (size, 1); + + if (r->_mp_alloc) + r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); + else + r->_mp_d = gmp_alloc_limbs (size); + r->_mp_alloc = size; + + if (GMP_ABS (r->_mp_size) > size) + r->_mp_size = 0; + + return r->_mp_d; +} + +/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ +#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ + ? mpz_realloc(z,n) \ + : (z)->_mp_d) + +/* MPZ assignment and basic conversions. */ +void +mpz_set_si (mpz_t r, signed long int x) +{ + if (x >= 0) + mpz_set_ui (r, x); + else /* (x < 0) */ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); + mpz_neg (r, r); + } + else + { + r->_mp_size = -1; + MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); + } +} + +void +mpz_set_ui (mpz_t r, unsigned long int x) +{ + if (x > 0) + { + r->_mp_size = 1; + MPZ_REALLOC (r, 1)[0] = x; + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + while (x >>= LOCAL_GMP_LIMB_BITS) + { + ++ r->_mp_size; + MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; + } + } + } + else + r->_mp_size = 0; +} + +void +mpz_set (mpz_t r, const mpz_t x) +{ + /* Allow the NOP r == x */ + if (r != x) + { + mp_size_t n; + mp_ptr rp; + + n = GMP_ABS (x->_mp_size); + rp = MPZ_REALLOC (r, n); + + mpn_copyi (rp, x->_mp_d, n); + r->_mp_size = x->_mp_size; + } +} + +void +mpz_init_set_si (mpz_t r, signed long int x) +{ + mpz_init (r); + mpz_set_si (r, x); +} + +void +mpz_init_set_ui (mpz_t r, unsigned long int x) +{ + mpz_init (r); + mpz_set_ui (r, x); +} + +void +mpz_init_set (mpz_t r, const mpz_t x) +{ + mpz_init (r); + mpz_set (r, x); +} + +int +mpz_fits_slong_p (const mpz_t u) +{ + return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; +} + +static int +mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) +{ + int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; + mp_limb_t ulongrem = 0; + + if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) + ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; + + return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); +} + +int +mpz_fits_ulong_p (const mpz_t u) +{ + mp_size_t us = u->_mp_size; + + return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); +} + +int +mpz_fits_sint_p (const mpz_t u) +{ + return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; +} + +int +mpz_fits_uint_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; +} + +int +mpz_fits_sshort_p (const mpz_t u) +{ + return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; +} + +int +mpz_fits_ushort_p (const mpz_t u) +{ + return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; +} + +long int +mpz_get_si (const mpz_t u) +{ + unsigned long r = mpz_get_ui (u); + unsigned long c = -LONG_MAX - LONG_MIN; + + if (u->_mp_size < 0) + /* This expression is necessary to properly handle -LONG_MIN */ + return -(long) c - (long) ((r - c) & LONG_MAX); + else + return (long) (r & LONG_MAX); +} + +unsigned long int +mpz_get_ui (const mpz_t u) +{ + if (GMP_LIMB_BITS < GMP_ULONG_BITS) + { + int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; + unsigned long r = 0; + mp_size_t n = GMP_ABS (u->_mp_size); + n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); + while (--n >= 0) + r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; + return r; + } + + return u->_mp_size == 0 ? 0 : u->_mp_d[0]; +} + +size_t +mpz_size (const mpz_t u) +{ + return GMP_ABS (u->_mp_size); +} + +mp_limb_t +mpz_getlimbn (const mpz_t u, mp_size_t n) +{ + if (n >= 0 && n < GMP_ABS (u->_mp_size)) + return u->_mp_d[n]; + else + return 0; +} + +void +mpz_realloc2 (mpz_t x, mp_bitcnt_t n) +{ + mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); +} + +mp_srcptr +mpz_limbs_read (mpz_srcptr x) +{ + return x->_mp_d; +} + +mp_ptr +mpz_limbs_modify (mpz_t x, mp_size_t n) +{ + assert (n > 0); + return MPZ_REALLOC (x, n); +} + +mp_ptr +mpz_limbs_write (mpz_t x, mp_size_t n) +{ + return mpz_limbs_modify (x, n); +} + +void +mpz_limbs_finish (mpz_t x, mp_size_t xs) +{ + mp_size_t xn; + xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); + x->_mp_size = xs < 0 ? -xn : xn; +} + +static mpz_srcptr +mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + x->_mp_alloc = 0; + x->_mp_d = (mp_ptr) xp; + x->_mp_size = xs; + return x; +} + +mpz_srcptr +mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) +{ + mpz_roinit_normal_n (x, xp, xs); + mpz_limbs_finish (x, xs); + return x; +} + + +/* Conversions and comparison to double. */ +void +mpz_set_d (mpz_t r, double x) +{ + int sign; + mp_ptr rp; + mp_size_t rn, i; + double B; + double Bi; + mp_limb_t f; + + /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is + zero or infinity. */ + if (x != x || x == x * 0.5) + { + r->_mp_size = 0; + return; + } + + sign = x < 0.0 ; + if (sign) + x = - x; + + if (x < 1.0) + { + r->_mp_size = 0; + return; + } + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + for (rn = 1; x >= B; rn++) + x *= Bi; + + rp = MPZ_REALLOC (r, rn); + + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + i = rn-1; + rp[i] = f; + while (--i >= 0) + { + x = B * x; + f = (mp_limb_t) x; + x -= f; + assert (x < 1.0); + rp[i] = f; + } + + r->_mp_size = sign ? - rn : rn; +} + +void +mpz_init_set_d (mpz_t r, double x) +{ + mpz_init (r); + mpz_set_d (r, x); +} + +double +mpz_get_d (const mpz_t u) +{ + int m; + mp_limb_t l; + mp_size_t un; + double x; + double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + + un = GMP_ABS (u->_mp_size); + + if (un == 0) + return 0.0; + + l = u->_mp_d[--un]; + gmp_clz (m, l); + m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + + for (x = l; --un >= 0;) + { + x = B*x; + if (m > 0) { + l = u->_mp_d[un]; + m -= GMP_LIMB_BITS; + if (m < 0) + l &= GMP_LIMB_MAX << -m; + x += l; + } + } + + if (u->_mp_size < 0) + x = -x; + + return x; +} + +int +mpz_cmpabs_d (const mpz_t x, double d) +{ + mp_size_t xn; + double B, Bi; + mp_size_t i; + + xn = x->_mp_size; + d = GMP_ABS (d); + + if (xn != 0) + { + xn = GMP_ABS (xn); + + B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); + Bi = 1.0 / B; + + /* Scale d so it can be compared with the top limb. */ + for (i = 1; i < xn; i++) + d *= Bi; + + if (d >= B) + return -1; + + /* Compare floor(d) to top limb, subtract and cancel when equal. */ + for (i = xn; i-- > 0;) + { + mp_limb_t f, xl; + + f = (mp_limb_t) d; + xl = x->_mp_d[i]; + if (xl > f) + return 1; + else if (xl < f) + return -1; + d = B * (d - f); + } + } + return - (d > 0.0); +} + +int +mpz_cmp_d (const mpz_t x, double d) +{ + if (x->_mp_size < 0) + { + if (d >= 0.0) + return -1; + else + return -mpz_cmpabs_d (x, d); + } + else + { + if (d < 0.0) + return 1; + else + return mpz_cmpabs_d (x, d); + } +} + + +/* MPZ comparisons and the like. */ +int +mpz_sgn (const mpz_t u) +{ + return GMP_CMP (u->_mp_size, 0); +} + +int +mpz_cmp_si (const mpz_t u, long v) +{ + mp_size_t usize = u->_mp_size; + + if (v >= 0) + return mpz_cmp_ui (u, v); + else if (usize >= 0) + return 1; + else + return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); +} + +int +mpz_cmp_ui (const mpz_t u, unsigned long v) +{ + mp_size_t usize = u->_mp_size; + + if (usize < 0) + return -1; + else + return mpz_cmpabs_ui (u, v); +} + +int +mpz_cmp (const mpz_t a, const mpz_t b) +{ + mp_size_t asize = a->_mp_size; + mp_size_t bsize = b->_mp_size; + + if (asize != bsize) + return (asize < bsize) ? -1 : 1; + else if (asize >= 0) + return mpn_cmp (a->_mp_d, b->_mp_d, asize); + else + return mpn_cmp (b->_mp_d, a->_mp_d, -asize); +} + +int +mpz_cmpabs_ui (const mpz_t u, unsigned long v) +{ + mp_size_t un = GMP_ABS (u->_mp_size); + + if (! mpn_absfits_ulong_p (u->_mp_d, un)) + return 1; + else + { + unsigned long uu = mpz_get_ui (u); + return GMP_CMP(uu, v); + } +} + +int +mpz_cmpabs (const mpz_t u, const mpz_t v) +{ + return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), + v->_mp_d, GMP_ABS (v->_mp_size)); +} + +void +mpz_abs (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = GMP_ABS (r->_mp_size); +} + +void +mpz_neg (mpz_t r, const mpz_t u) +{ + mpz_set (r, u); + r->_mp_size = -r->_mp_size; +} + +void +mpz_swap (mpz_t u, mpz_t v) +{ + MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); + MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); +} + + +/* MPZ addition and subtraction */ + + +void +mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_t bb; + mpz_init_set_ui (bb, b); + mpz_add (r, a, bb); + mpz_clear (bb); +} + +void +mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) +{ + mpz_ui_sub (r, b, a); + mpz_neg (r, r); +} + +void +mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) +{ + mpz_neg (r, b); + mpz_add_ui (r, r, a); +} + +static mp_size_t +mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + mp_ptr rp; + mp_limb_t cy; + + if (an < bn) + { + MPZ_SRCPTR_SWAP (a, b); + MP_SIZE_T_SWAP (an, bn); + } + + rp = MPZ_REALLOC (r, an + 1); + cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); + + rp[an] = cy; + + return an + cy; +} + +static mp_size_t +mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t an = GMP_ABS (a->_mp_size); + mp_size_t bn = GMP_ABS (b->_mp_size); + int cmp; + mp_ptr rp; + + cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); + if (cmp > 0) + { + rp = MPZ_REALLOC (r, an); + gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); + return mpn_normalized_size (rp, an); + } + else if (cmp < 0) + { + rp = MPZ_REALLOC (r, bn); + gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); + return -mpn_normalized_size (rp, bn); + } + else + return 0; +} + +void +mpz_add (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_add (r, a, b); + else + rn = mpz_abs_sub (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + +void +mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) +{ + mp_size_t rn; + + if ( (a->_mp_size ^ b->_mp_size) >= 0) + rn = mpz_abs_sub (r, a, b); + else + rn = mpz_abs_add (r, a, b); + + r->_mp_size = a->_mp_size >= 0 ? rn : - rn; +} + + +/* MPZ multiplication */ +void +mpz_mul_si (mpz_t r, const mpz_t u, long int v) +{ + if (v < 0) + { + mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); + mpz_neg (r, r); + } + else + mpz_mul_ui (r, u, v); +} + +void +mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t vv; + mpz_init_set_ui (vv, v); + mpz_mul (r, u, vv); + mpz_clear (vv); + return; +} + +void +mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) +{ + int sign; + mp_size_t un, vn, rn; + mpz_t t; + mp_ptr tp; + + un = u->_mp_size; + vn = v->_mp_size; + + if (un == 0 || vn == 0) + { + r->_mp_size = 0; + return; + } + + sign = (un ^ vn) < 0; + + un = GMP_ABS (un); + vn = GMP_ABS (vn); + + mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); + + tp = t->_mp_d; + if (un >= vn) + mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); + else + mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); + + rn = un + vn; + rn -= tp[rn-1] == 0; + + t->_mp_size = sign ? - rn : rn; + mpz_swap (r, t); + mpz_clear (t); +} + +void +mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) +{ + mp_size_t un, rn; + mp_size_t limbs; + unsigned shift; + mp_ptr rp; + + un = GMP_ABS (u->_mp_size); + if (un == 0) + { + r->_mp_size = 0; + return; + } + + limbs = bits / GMP_LIMB_BITS; + shift = bits % GMP_LIMB_BITS; + + rn = un + limbs + (shift > 0); + rp = MPZ_REALLOC (r, rn); + if (shift > 0) + { + mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); + rp[rn-1] = cy; + rn -= (cy == 0); + } + else + mpn_copyd (rp + limbs, u->_mp_d, un); + + mpn_zero (rp, limbs); + + r->_mp_size = (u->_mp_size < 0) ? - rn : rn; +} + +void +mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) +{ + mpz_t t; + mpz_init_set_ui (t, v); + mpz_mul (t, u, t); + mpz_sub (r, r, t); + mpz_clear (t); +} + +void +mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_add (r, r, t); + mpz_clear (t); +} + +void +mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t t; + mpz_init (t); + mpz_mul (t, u, v); + mpz_sub (r, r, t); + mpz_clear (t); +} + + +/* MPZ division */ +enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; + +/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ +static int +mpz_div_qr (mpz_t q, mpz_t r, + const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) +{ + mp_size_t ns, ds, nn, dn, qs; + ns = n->_mp_size; + ds = d->_mp_size; + + if (ds == 0) + gmp_die("mpz_div_qr: Divide by zero."); + + if (ns == 0) + { + if (q) + q->_mp_size = 0; + if (r) + r->_mp_size = 0; + return 0; + } + + nn = GMP_ABS (ns); + dn = GMP_ABS (ds); + + qs = ds ^ ns; + + if (nn < dn) + { + if (mode == GMP_DIV_CEIL && qs >= 0) + { + /* q = 1, r = n - d */ + if (r) + mpz_sub (r, n, d); + if (q) + mpz_set_ui (q, 1); + } + else if (mode == GMP_DIV_FLOOR && qs < 0) + { + /* q = -1, r = n + d */ + if (r) + mpz_add (r, n, d); + if (q) + mpz_set_si (q, -1); + } + else + { + /* q = 0, r = d */ + if (r) + mpz_set (r, n); + if (q) + q->_mp_size = 0; + } + return 1; + } + else + { + mp_ptr np, qp; + mp_size_t qn, rn; + mpz_t tq, tr; + + mpz_init_set (tr, n); + np = tr->_mp_d; + + qn = nn - dn + 1; + + if (q) + { + mpz_init2 (tq, qn * GMP_LIMB_BITS); + qp = tq->_mp_d; + } + else + qp = NULL; + + mpn_div_qr (qp, np, nn, d->_mp_d, dn); + + if (qp) + { + qn -= (qp[qn-1] == 0); + + tq->_mp_size = qs < 0 ? -qn : qn; + } + rn = mpn_normalized_size (np, dn); + tr->_mp_size = ns < 0 ? - rn : rn; + + if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) + { + if (q) + mpz_sub_ui (tq, tq, 1); + if (r) + mpz_add (tr, tr, d); + } + else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) + { + if (q) + mpz_add_ui (tq, tq, 1); + if (r) + mpz_sub (tr, tr, d); + } + + if (q) + { + mpz_swap (tq, q); + mpz_clear (tq); + } + if (r) + mpz_swap (tr, r); + + mpz_clear (tr); + + return rn != 0; + } +} + +void +mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); +} + +void +mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) +{ + mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); +} + +static void +mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t un, qn; + mp_size_t limb_cnt; + mp_ptr qp; + int adjust; + + un = u->_mp_size; + if (un == 0) + { + q->_mp_size = 0; + return; + } + limb_cnt = bit_index / GMP_LIMB_BITS; + qn = GMP_ABS (un) - limb_cnt; + bit_index %= GMP_LIMB_BITS; + + if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ + /* Note: Below, the final indexing at limb_cnt is valid because at + that point we have qn > 0. */ + adjust = (qn <= 0 + || !mpn_zero_p (u->_mp_d, limb_cnt) + || (u->_mp_d[limb_cnt] + & (((mp_limb_t) 1 << bit_index) - 1))); + else + adjust = 0; + + if (qn <= 0) + qn = 0; + else + { + qp = MPZ_REALLOC (q, qn); + + if (bit_index != 0) + { + mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); + qn -= qp[qn - 1] == 0; + } + else + { + mpn_copyi (qp, u->_mp_d + limb_cnt, qn); + } + } + + q->_mp_size = qn; + + if (adjust) + mpz_add_ui (q, q, 1); + if (un < 0) + mpz_neg (q, q); +} + +static void +mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, + enum mpz_div_round_mode mode) +{ + mp_size_t us, un, rn; + mp_ptr rp; + mp_limb_t mask; + + us = u->_mp_size; + if (us == 0 || bit_index == 0) + { + r->_mp_size = 0; + return; + } + rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + assert (rn > 0); + + rp = MPZ_REALLOC (r, rn); + un = GMP_ABS (us); + + mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); + + if (rn > un) + { + /* Quotient (with truncation) is zero, and remainder is + non-zero */ + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* Have to negate and sign extend. */ + mp_size_t i; + + gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); + for (i = un; i < rn - 1; i++) + rp[i] = GMP_LIMB_MAX; + + rp[rn-1] = mask; + us = -us; + } + else + { + /* Just copy */ + if (r != u) + mpn_copyi (rp, u->_mp_d, un); + + rn = un; + } + } + else + { + if (r != u) + mpn_copyi (rp, u->_mp_d, rn - 1); + + rp[rn-1] = u->_mp_d[rn-1] & mask; + + if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ + { + /* If r != 0, compute 2^{bit_count} - r. */ + mpn_neg (rp, rp, rn); + + rp[rn-1] &= mask; + + /* us is not used for anything else, so we can modify it + here to indicate flipped sign. */ + us = -us; + } + } + rn = mpn_normalized_size (rp, rn); + r->_mp_size = us < 0 ? -rn : rn; +} + +void +mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); +} + +void +mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); +} + +void +mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) +{ + mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); +} + +void +mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) +{ + gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_p (const mpz_t n, const mpz_t d) +{ + return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + +int +mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) +{ + mpz_t t; + int res; + + /* a == b (mod 0) iff a == b */ + if (mpz_sgn (m) == 0) + return (mpz_cmp (a, b) == 0); + + mpz_init (t); + mpz_sub (t, a, b); + res = mpz_divisible_p (t, m); + mpz_clear (t); + + return res; +} + +static unsigned long +mpz_div_qr_ui (mpz_t q, mpz_t r, + const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) +{ + unsigned long ret; + mpz_t rr, dd; + + mpz_init (rr); + mpz_init_set_ui (dd, d); + mpz_div_qr (q, rr, n, dd, mode); + mpz_clear (dd); + ret = mpz_get_ui (rr); + + if (r) + mpz_swap (r, rr); + mpz_clear (rr); + + return ret; +} + +unsigned long +mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); +} +unsigned long +mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} +unsigned long +mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_cdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); +} + +unsigned long +mpz_fdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); +} + +unsigned long +mpz_tdiv_ui (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); +} + +unsigned long +mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); +} + +void +mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) +{ + gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); +} + +int +mpz_divisible_ui_p (const mpz_t n, unsigned long d) +{ + return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; +} + + +/* GCD */ +static mp_limb_t +mpn_gcd_11 (mp_limb_t u, mp_limb_t v) +{ + unsigned shift; + + assert ( (u | v) > 0); + + if (u == 0) + return v; + else if (v == 0) + return u; + + gmp_ctz (shift, u | v); + + u >>= shift; + v >>= shift; + + if ( (u & 1) == 0) + MP_LIMB_T_SWAP (u, v); + + while ( (v & 1) == 0) + v >>= 1; + + while (u != v) + { + if (u > v) + { + u -= v; + do + u >>= 1; + while ( (u & 1) == 0); + } + else + { + v -= u; + do + v >>= 1; + while ( (v & 1) == 0); + } + } + return u << shift; +} + +mp_size_t +mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) +{ + assert (un >= vn); + assert (vn > 0); + assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); + assert (vp[vn-1] > 0); + assert ((up[0] | vp[0]) & 1); + + if (un > vn) + mpn_div_qr (NULL, up, un, vp, vn); + + un = mpn_normalized_size (up, vn); + if (un == 0) + { + mpn_copyi (rp, vp, vn); + return vn; + } + + if (!(vp[0] & 1)) + MPN_PTR_SWAP (up, un, vp, vn); + + while (un > 1 || vn > 1) + { + int shift; + assert (vp[0] & 1); + + while (up[0] == 0) + { + up++; + un--; + } + gmp_ctz (shift, up[0]); + if (shift > 0) + { + gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); + un -= (up[un-1] == 0); + } + + if (un < vn) + MPN_PTR_SWAP (up, un, vp, vn); + else if (un == vn) + { + int c = mpn_cmp (up, vp, un); + if (c == 0) + { + mpn_copyi (rp, up, un); + return un; + } + else if (c < 0) + MP_PTR_SWAP (up, vp); + } + + gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); + un = mpn_normalized_size (up, un); + } + rp[0] = mpn_gcd_11 (up[0], vp[0]); + return 1; +} + +unsigned long +mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) +{ + mpz_t t; + mpz_init_set_ui(t, v); + mpz_gcd (t, u, t); + if (v > 0) + v = mpz_get_ui (t); + + if (g) + mpz_swap (t, g); + + mpz_clear (t); + + return v; +} + +static mp_bitcnt_t +mpz_make_odd (mpz_t r) +{ + mp_bitcnt_t shift; + + assert (r->_mp_size > 0); + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + shift = mpn_scan1 (r->_mp_d, 0); + mpz_tdiv_q_2exp (r, r, shift); + + return shift; +} + +void +mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv; + mp_bitcnt_t uz, vz, gz; + + if (u->_mp_size == 0) + { + mpz_abs (g, v); + return; + } + if (v->_mp_size == 0) + { + mpz_abs (g, u); + return; + } + + mpz_init (tu); + mpz_init (tv); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + if (tu->_mp_size < tv->_mp_size) + mpz_swap (tu, tv); + + tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); + mpz_mul_2exp (g, tu, gz); + + mpz_clear (tu); + mpz_clear (tv); +} + +void +mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) +{ + mpz_t tu, tv, s0, s1, t0, t1; + mp_bitcnt_t uz, vz, gz; + mp_bitcnt_t power; + int cmp; + + if (u->_mp_size == 0) + { + /* g = 0 u + sgn(v) v */ + signed long sign = mpz_sgn (v); + mpz_abs (g, v); + if (s) + s->_mp_size = 0; + if (t) + mpz_set_si (t, sign); + return; + } + + if (v->_mp_size == 0) + { + /* g = sgn(u) u + 0 v */ + signed long sign = mpz_sgn (u); + mpz_abs (g, u); + if (s) + mpz_set_si (s, sign); + if (t) + t->_mp_size = 0; + return; + } + + mpz_init (tu); + mpz_init (tv); + mpz_init (s0); + mpz_init (s1); + mpz_init (t0); + mpz_init (t1); + + mpz_abs (tu, u); + uz = mpz_make_odd (tu); + mpz_abs (tv, v); + vz = mpz_make_odd (tv); + gz = GMP_MIN (uz, vz); + + uz -= gz; + vz -= gz; + + /* Cofactors corresponding to odd gcd. gz handled later. */ + if (tu->_mp_size < tv->_mp_size) + { + mpz_swap (tu, tv); + MPZ_SRCPTR_SWAP (u, v); + MPZ_PTR_SWAP (s, t); + MP_BITCNT_T_SWAP (uz, vz); + } + + /* Maintain + * + * u = t0 tu + t1 tv + * v = s0 tu + s1 tv + * + * where u and v denote the inputs with common factors of two + * eliminated, and det (s0, t0; s1, t1) = 2^p. Then + * + * 2^p tu = s1 u - t1 v + * 2^p tv = -s0 u + t0 v + */ + + /* After initial division, tu = q tv + tu', we have + * + * u = 2^uz (tu' + q tv) + * v = 2^vz tv + * + * or + * + * t0 = 2^uz, t1 = 2^uz q + * s0 = 0, s1 = 2^vz + */ + + mpz_tdiv_qr (t1, tu, tu, tv); + mpz_mul_2exp (t1, t1, uz); + + mpz_setbit (s1, vz); + power = uz + vz; + + if (tu->_mp_size > 0) + { + mp_bitcnt_t shift; + shift = mpz_make_odd (tu); + mpz_setbit (t0, uz + shift); + power += shift; + + for (;;) + { + int c; + c = mpz_cmp (tu, tv); + if (c == 0) + break; + + if (c < 0) + { + /* tv = tv' + tu + * + * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' + * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ + + mpz_sub (tv, tv, tu); + mpz_add (t0, t0, t1); + mpz_add (s0, s0, s1); + + shift = mpz_make_odd (tv); + mpz_mul_2exp (t1, t1, shift); + mpz_mul_2exp (s1, s1, shift); + } + else + { + mpz_sub (tu, tu, tv); + mpz_add (t1, t0, t1); + mpz_add (s1, s0, s1); + + shift = mpz_make_odd (tu); + mpz_mul_2exp (t0, t0, shift); + mpz_mul_2exp (s0, s0, shift); + } + power += shift; + } + } + else + mpz_setbit (t0, uz); + + /* Now tv = odd part of gcd, and -s0 and t0 are corresponding + cofactors. */ + + mpz_mul_2exp (tv, tv, gz); + mpz_neg (s0, s0); + + /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To + adjust cofactors, we need u / g and v / g */ + + mpz_divexact (s1, v, tv); + mpz_abs (s1, s1); + mpz_divexact (t1, u, tv); + mpz_abs (t1, t1); + + while (power-- > 0) + { + /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ + if (mpz_odd_p (s0) || mpz_odd_p (t0)) + { + mpz_sub (s0, s0, s1); + mpz_add (t0, t0, t1); + } + assert (mpz_even_p (t0) && mpz_even_p (s0)); + mpz_tdiv_q_2exp (s0, s0, 1); + mpz_tdiv_q_2exp (t0, t0, 1); + } + + /* Choose small cofactors (they should generally satify + + |s| < |u| / 2g and |t| < |v| / 2g, + + with some documented exceptions). Always choose the smallest s, + if there are two choices for s with same absolute value, choose + the one with smallest corresponding t (this asymmetric condition + is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ + mpz_add (s1, s0, s1); + mpz_sub (t1, t0, t1); + cmp = mpz_cmpabs (s0, s1); + if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) + { + mpz_swap (s0, s1); + mpz_swap (t0, t1); + } + if (u->_mp_size < 0) + mpz_neg (s0, s0); + if (v->_mp_size < 0) + mpz_neg (t0, t0); + + mpz_swap (g, tv); + if (s) + mpz_swap (s, s0); + if (t) + mpz_swap (t, t0); + + mpz_clear (tu); + mpz_clear (tv); + mpz_clear (s0); + mpz_clear (s1); + mpz_clear (t0); + mpz_clear (t1); +} + +void +mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) +{ + mpz_t g; + + if (u->_mp_size == 0 || v->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + mpz_init (g); + + mpz_gcd (g, u, v); + mpz_divexact (g, u, g); + mpz_mul (r, g, v); + + mpz_clear (g); + mpz_abs (r, r); +} + +void +mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) +{ + if (v == 0 || u->_mp_size == 0) + { + r->_mp_size = 0; + return; + } + + v /= mpz_gcd_ui (NULL, u, v); + mpz_mul_ui (r, u, v); + + mpz_abs (r, r); +} + +int +mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) +{ + mpz_t g, tr; + int invertible; + + if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) + return 0; + + mpz_init (g); + mpz_init (tr); + + mpz_gcdext (g, tr, NULL, u, m); + invertible = (mpz_cmp_ui (g, 1) == 0); + + if (invertible) + { + if (tr->_mp_size < 0) + { + if (m->_mp_size >= 0) + mpz_add (tr, tr, m); + else + mpz_sub (tr, tr, m); + } + mpz_swap (r, tr); + } + + mpz_clear (g); + mpz_clear (tr); + return invertible; +} + + +/* Higher level operations (sqrt, pow and root) */ + +void +mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) +{ + unsigned long bit; + mpz_t tr; + mpz_init_set_ui (tr, 1); + + bit = GMP_ULONG_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (e & bit) + mpz_mul (tr, tr, b); + bit >>= 1; + } + while (bit > 0); + + mpz_swap (r, tr); + mpz_clear (tr); +} + +void +mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) +{ + mpz_t b; + + mpz_init_set_ui (b, blimb); + mpz_pow_ui (r, b, e); + mpz_clear (b); +} + +void +mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) +{ + mpz_t tr; + mpz_t base; + mp_size_t en, mn; + mp_srcptr mp; + struct gmp_div_inverse minv; + unsigned shift; + mp_ptr tp = NULL; + + en = GMP_ABS (e->_mp_size); + mn = GMP_ABS (m->_mp_size); + if (mn == 0) + gmp_die ("mpz_powm: Zero modulo."); + + if (en == 0) + { + mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); + return; + } + + mp = m->_mp_d; + mpn_div_qr_invert (&minv, mp, mn); + shift = minv.shift; + + if (shift > 0) + { + /* To avoid shifts, we do all our reductions, except the final + one, using a *normalized* m. */ + minv.shift = 0; + + tp = gmp_alloc_limbs (mn); + gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); + mp = tp; + } + + mpz_init (base); + + if (e->_mp_size < 0) + { + if (!mpz_invert (base, b, m)) + gmp_die ("mpz_powm: Negative exponent and non-invertible base."); + } + else + { + mp_size_t bn; + mpz_abs (base, b); + + bn = base->_mp_size; + if (bn >= mn) + { + mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); + bn = mn; + } + + /* We have reduced the absolute value. Now take care of the + sign. Note that we get zero represented non-canonically as + m. */ + if (b->_mp_size < 0) + { + mp_ptr bp = MPZ_REALLOC (base, mn); + gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); + bn = mn; + } + base->_mp_size = mpn_normalized_size (base->_mp_d, bn); + } + mpz_init_set_ui (tr, 1); + + while (--en >= 0) + { + mp_limb_t w = e->_mp_d[en]; + mp_limb_t bit; + + bit = GMP_LIMB_HIGHBIT; + do + { + mpz_mul (tr, tr, tr); + if (w & bit) + mpz_mul (tr, tr, base); + if (tr->_mp_size > mn) + { + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + bit >>= 1; + } + while (bit > 0); + } + + /* Final reduction */ + if (tr->_mp_size >= mn) + { + minv.shift = shift; + mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); + tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); + } + if (tp) + gmp_free_limbs (tp, mn); + + mpz_swap (r, tr); + mpz_clear (tr); + mpz_clear (base); +} + +void +mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) +{ + mpz_t e; + + mpz_init_set_ui (e, elimb); + mpz_powm (r, b, e, m); + mpz_clear (e); +} + +/* x=trunc(y^(1/z)), r=y-x^z */ +void +mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) +{ + int sgn; + mp_bitcnt_t bc; + mpz_t t, u; + + sgn = y->_mp_size < 0; + if ((~z & sgn) != 0) + gmp_die ("mpz_rootrem: Negative argument, with even root."); + if (z == 0) + gmp_die ("mpz_rootrem: Zeroth root."); + + if (mpz_cmpabs_ui (y, 1) <= 0) { + if (x) + mpz_set (x, y); + if (r) + r->_mp_size = 0; + return; + } + + mpz_init (u); + mpz_init (t); + bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; + mpz_setbit (t, bc); + + if (z == 2) /* simplify sqrt loop: z-1 == 1 */ + do { + mpz_swap (u, t); /* u = x */ + mpz_tdiv_q (t, y, u); /* t = y/x */ + mpz_add (t, t, u); /* t = y/x + x */ + mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + else /* z != 2 */ { + mpz_t v; + + mpz_init (v); + if (sgn) + mpz_neg (t, t); + + do { + mpz_swap (u, t); /* u = x */ + mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ + mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ + mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ + mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ + mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ + } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ + + mpz_clear (v); + } + + if (r) { + mpz_pow_ui (t, u, z); + mpz_sub (r, y, t); + } + if (x) + mpz_swap (x, u); + mpz_clear (u); + mpz_clear (t); +} + +int +mpz_root (mpz_t x, const mpz_t y, unsigned long z) +{ + int res; + mpz_t r; + + mpz_init (r); + mpz_rootrem (x, r, y, z); + res = r->_mp_size == 0; + mpz_clear (r); + + return res; +} + +/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ +void +mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) +{ + mpz_rootrem (s, r, u, 2); +} + +void +mpz_sqrt (mpz_t s, const mpz_t u) +{ + mpz_rootrem (s, NULL, u, 2); +} + +int +mpz_perfect_square_p (const mpz_t u) +{ + if (u->_mp_size <= 0) + return (u->_mp_size == 0); + else + return mpz_root (NULL, u, 2); +} + +int +mpn_perfect_square_p (mp_srcptr p, mp_size_t n) +{ + mpz_t t; + + assert (n > 0); + assert (p [n-1] != 0); + return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); +} + +mp_size_t +mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) +{ + mpz_t s, r, u; + mp_size_t res; + + assert (n > 0); + assert (p [n-1] != 0); + + mpz_init (r); + mpz_init (s); + mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); + + assert (s->_mp_size == (n+1)/2); + mpn_copyd (sp, s->_mp_d, s->_mp_size); + mpz_clear (s); + res = r->_mp_size; + if (rp) + mpn_copyd (rp, r->_mp_d, res); + mpz_clear (r); + return res; +} + +/* Combinatorics */ + +void +mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) +{ + mpz_set_ui (x, n + (n == 0)); + if (m + 1 < 2) return; + while (n > m + 1) + mpz_mul_ui (x, x, n -= m); +} + +void +mpz_2fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 2); +} + +void +mpz_fac_ui (mpz_t x, unsigned long n) +{ + mpz_mfac_uiui (x, n, 1); +} + +void +mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) +{ + mpz_t t; + + mpz_set_ui (r, k <= n); + + if (k > (n >> 1)) + k = (k <= n) ? n - k : 0; + + mpz_init (t); + mpz_fac_ui (t, k); + + for (; k > 0; --k) + mpz_mul_ui (r, r, n--); + + mpz_divexact (r, r, t); + mpz_clear (t); +} + + +/* Primality testing */ + +/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ +/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ +static int +gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) +{ + int c, bit = 0; + + assert (b & 1); + assert (a != 0); + /* assert (mpn_gcd_11 (a, b) == 1); */ + + /* Below, we represent a and b shifted right so that the least + significant one bit is implicit. */ + b >>= 1; + + gmp_ctz(c, a); + a >>= 1; + + for (;;) + { + a >>= c; + /* (2/b) = -1 if b = 3 or 5 mod 8 */ + bit ^= c & (b ^ (b >> 1)); + if (a < b) + { + if (a == 0) + return bit & 1 ? -1 : 1; + bit ^= a & b; + a = b - a; + b -= a; + } + else + { + a -= b; + assert (a != 0); + } + + gmp_ctz(c, a); + ++c; + } +} + +static void +gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) +{ + mpz_mod (Qk, Qk, n); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + mpz_mul (V, V, V); + mpz_submul_ui (V, Qk, 2); + mpz_tdiv_r (V, V, n); + /* Q^{2k} = (Q^k)^2 */ + mpz_mul (Qk, Qk, Qk); +} + +/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ +/* with P=1, Q=Q; k = (n>>b0)|1. */ +/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ +/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ +static int +gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, + mp_bitcnt_t b0, const mpz_t n) +{ + mp_bitcnt_t bs; + mpz_t U; + int res; + + assert (b0 > 0); + assert (Q <= - (LONG_MIN / 2)); + assert (Q >= - (LONG_MAX / 2)); + assert (mpz_cmp_ui (n, 4) > 0); + assert (mpz_odd_p (n)); + + mpz_init_set_ui (U, 1); /* U1 = 1 */ + mpz_set_ui (V, 1); /* V1 = 1 */ + mpz_set_si (Qk, Q); + + for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) + { + /* U_{2k} <- U_k * V_k */ + mpz_mul (U, U, V); + /* V_{2k} <- V_k ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + /* A step k->k+1 is performed if the bit in $n$ is 1 */ + /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ + /* should be 1 in $n+1$ (bs == b0) */ + if (b0 == bs || mpz_tstbit (n, bs)) + { + /* Q^{k+1} <- Q^k * Q */ + mpz_mul_si (Qk, Qk, Q); + /* U_{k+1} <- (U_k + V_k) / 2 */ + mpz_swap (U, V); /* Keep in V the old value of U_k */ + mpz_add (U, U, V); + /* We have to compute U/2, so we need an even value, */ + /* equivalent (mod n) */ + if (mpz_odd_p (U)) + mpz_add (U, U, n); + mpz_tdiv_q_2exp (U, U, 1); + /* V_{k+1} <-(D*U_k + V_k) / 2 = + U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ + mpz_mul_si (V, V, -2*Q); + mpz_add (V, U, V); + mpz_tdiv_r (V, V, n); + } + mpz_tdiv_r (U, U, n); + } + + res = U->_mp_size == 0; + mpz_clear (U); + return res; +} + +/* Performs strong Lucas' test on x, with parameters suggested */ +/* for the BPSW test. Qk is only passed to recycle a variable. */ +/* Requires GCD (x,6) = 1.*/ +static int +gmp_stronglucas (const mpz_t x, mpz_t Qk) +{ + mp_bitcnt_t b0; + mpz_t V, n; + mp_limb_t maxD, D; /* The absolute value is stored. */ + long Q; + mp_limb_t tl; + + /* Test on the absolute value. */ + mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); + + assert (mpz_odd_p (n)); + /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ + if (mpz_root (Qk, n, 2)) + return 0; /* A square is composite. */ + + /* Check Ds up to square root (in case, n is prime) + or avoid overflows */ + maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; + + D = 3; + /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ + /* For those Ds we have (D/n) = (n/|D|) */ + do + { + if (D >= maxD) + return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ + D += 2; + tl = mpz_tdiv_ui (n, D); + if (tl == 0) + return 0; + } + while (gmp_jacobi_coprime (tl, D) == 1); + + mpz_init (V); + + /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ + b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); + /* b0 = mpz_scan0 (n, 0); */ + + /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ + Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); + + if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ + while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ + /* V <- V ^ 2 - 2Q^k */ + /* Q^{2k} = (Q^k)^2 */ + gmp_lucas_step_k_2k (V, Qk, n); + + mpz_clear (V); + return (b0 != 0); +} + +static int +gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, + const mpz_t q, mp_bitcnt_t k) +{ + assert (k > 0); + + /* Caller must initialize y to the base. */ + mpz_powm (y, y, q, n); + + if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) + return 1; + + while (--k > 0) + { + mpz_powm_ui (y, y, 2, n); + if (mpz_cmp (y, nm1) == 0) + return 1; + } + return 0; +} + +/* This product is 0xc0cfd797, and fits in 32 bits. */ +#define GMP_PRIME_PRODUCT \ + (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) + +/* Bit (p+1)/2 is set, for each odd prime <= 61 */ +#define GMP_PRIME_MASK 0xc96996dcUL + +int +mpz_probab_prime_p (const mpz_t n, int reps) +{ + mpz_t nm1; + mpz_t q; + mpz_t y; + mp_bitcnt_t k; + int is_prime; + int j; + + /* Note that we use the absolute value of n only, for compatibility + with the real GMP. */ + if (mpz_even_p (n)) + return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; + + /* Above test excludes n == 0 */ + assert (n->_mp_size != 0); + + if (mpz_cmpabs_ui (n, 64) < 0) + return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; + + if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) + return 0; + + /* All prime factors are >= 31. */ + if (mpz_cmpabs_ui (n, 31*31) < 0) + return 2; + + mpz_init (nm1); + mpz_init (q); + + /* Find q and k, where q is odd and n = 1 + 2**k * q. */ + mpz_abs (nm1, n); + nm1->_mp_d[0] -= 1; + /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ + k = mpn_scan1 (nm1->_mp_d, 0); + mpz_tdiv_q_2exp (q, nm1, k); + + /* BPSW test */ + mpz_init_set_ui (y, 2); + is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); + reps -= 24; /* skip the first 24 repetitions */ + + /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = + j^2 + j + 41 using Euler's polynomial. We potentially stop early, + if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > + 30 (a[30] == 971 > 31*31 == 961). */ + + for (j = 0; is_prime & (j < reps); j++) + { + mpz_set_ui (y, (unsigned long) j*j+j+41); + if (mpz_cmp (y, nm1) >= 0) + { + /* Don't try any further bases. This "early" break does not affect + the result for any reasonable reps value (<=5000 was tested) */ + assert (j >= 30); + break; + } + is_prime = gmp_millerrabin (n, nm1, y, q, k); + } + mpz_clear (nm1); + mpz_clear (q); + mpz_clear (y); + + return is_prime; +} + + +/* Logical operations and bit manipulation. */ + +/* Numbers are treated as if represented in two's complement (and + infinitely sign extended). For a negative values we get the two's + complement from -x = ~x + 1, where ~ is bitwise complement. + Negation transforms + + xxxx10...0 + + into + + yyyy10...0 + + where yyyy is the bitwise complement of xxxx. So least significant + bits, up to and including the first one bit, are unchanged, and + the more significant bits are all complemented. + + To change a bit from zero to one in a negative number, subtract the + corresponding power of two from the absolute value. This can never + underflow. To change a bit from one to zero, add the corresponding + power of two, and this might overflow. E.g., if x = -001111, the + two's complement is 110001. Clearing the least significant bit, we + get two's complement 110000, and -010000. */ + +int +mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t limb_index; + unsigned shift; + mp_size_t ds; + mp_size_t dn; + mp_limb_t w; + int bit; + + ds = d->_mp_size; + dn = GMP_ABS (ds); + limb_index = bit_index / GMP_LIMB_BITS; + if (limb_index >= dn) + return ds < 0; + + shift = bit_index % GMP_LIMB_BITS; + w = d->_mp_d[limb_index]; + bit = (w >> shift) & 1; + + if (ds < 0) + { + /* d < 0. Check if any of the bits below is set: If so, our bit + must be complemented. */ + if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) + return bit ^ 1; + while (--limb_index >= 0) + if (d->_mp_d[limb_index] > 0) + return bit ^ 1; + } + return bit; +} + +static void +mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_limb_t bit; + mp_ptr dp; + + dn = GMP_ABS (d->_mp_size); + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + if (limb_index >= dn) + { + mp_size_t i; + /* The bit should be set outside of the end of the number. + We have to increase the size of the number. */ + dp = MPZ_REALLOC (d, limb_index + 1); + + dp[limb_index] = bit; + for (i = dn; i < limb_index; i++) + dp[i] = 0; + dn = limb_index + 1; + } + else + { + mp_limb_t cy; + + dp = d->_mp_d; + + cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); + if (cy > 0) + { + dp = MPZ_REALLOC (d, dn + 1); + dp[dn++] = cy; + } + } + + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +static void +mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) +{ + mp_size_t dn, limb_index; + mp_ptr dp; + mp_limb_t bit; + + dn = GMP_ABS (d->_mp_size); + dp = d->_mp_d; + + limb_index = bit_index / GMP_LIMB_BITS; + bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); + + assert (limb_index < dn); + + gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, + dn - limb_index, bit)); + dn = mpn_normalized_size (dp, dn); + d->_mp_size = (d->_mp_size < 0) ? - dn : dn; +} + +void +mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (!mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_add_bit (d, bit_index); + else + mpz_abs_sub_bit (d, bit_index); + } +} + +void +mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index)) + { + if (d->_mp_size >= 0) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); + } +} + +void +mpz_combit (mpz_t d, mp_bitcnt_t bit_index) +{ + if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) + mpz_abs_sub_bit (d, bit_index); + else + mpz_abs_add_bit (d, bit_index); +} + +void +mpz_com (mpz_t r, const mpz_t u) +{ + mpz_add_ui (r, u, 1); + mpz_neg (r, r); +} + +void +mpz_and (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + r->_mp_size = 0; + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc & vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is positive, higher limbs don't matter. */ + rn = vx ? un : vn; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul & vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul & vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, rn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc | vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + /* If the smaller input is negative, by sign extension higher limbs + don't matter. */ + rn = vx ? vn : un; + + rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = ( (ul | vl) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < rn; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = ( (ul | vx) ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[rn++] = rc; + else + rn = mpn_normalized_size (rp, rn); + + r->_mp_size = rx ? -rn : rn; +} + +void +mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_ptr up, vp, rp; + + mp_limb_t ux, vx, rx; + mp_limb_t uc, vc, rc; + mp_limb_t ul, vl, rl; + + un = GMP_ABS (u->_mp_size); + vn = GMP_ABS (v->_mp_size); + if (un < vn) + { + MPZ_SRCPTR_SWAP (u, v); + MP_SIZE_T_SWAP (un, vn); + } + if (vn == 0) + { + mpz_set (r, u); + return; + } + + uc = u->_mp_size < 0; + vc = v->_mp_size < 0; + rc = uc ^ vc; + + ux = -uc; + vx = -vc; + rx = -rc; + + rp = MPZ_REALLOC (r, un + (mp_size_t) rc); + + up = u->_mp_d; + vp = v->_mp_d; + + i = 0; + do + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + vl = (vp[i] ^ vx) + vc; + vc = vl < vc; + + rl = (ul ^ vl ^ rx) + rc; + rc = rl < rc; + rp[i] = rl; + } + while (++i < vn); + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ ux) + uc; + uc = ul < uc; + + rl = (ul ^ ux) + rc; + rc = rl < rc; + rp[i] = rl; + } + if (rc) + rp[un++] = rc; + else + un = mpn_normalized_size (rp, un); + + r->_mp_size = rx ? -un : un; +} + +static unsigned +gmp_popcount_limb (mp_limb_t x) +{ + unsigned c; + + /* Do 16 bits at a time, to avoid limb-sized constants. */ + int LOCAL_SHIFT_BITS = 16; + for (c = 0; x > 0;) + { + unsigned w = x - ((x >> 1) & 0x5555); + w = ((w >> 2) & 0x3333) + (w & 0x3333); + w = (w >> 4) + w; + w = ((w >> 8) & 0x000f) + (w & 0x000f); + c += w; + if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) + x >>= LOCAL_SHIFT_BITS; + else + x = 0; + } + return c; +} + +mp_bitcnt_t +mpn_popcount (mp_srcptr p, mp_size_t n) +{ + mp_size_t i; + mp_bitcnt_t c; + + for (c = 0, i = 0; i < n; i++) + c += gmp_popcount_limb (p[i]); + + return c; +} + +mp_bitcnt_t +mpz_popcount (const mpz_t u) +{ + mp_size_t un; + + un = u->_mp_size; + + if (un < 0) + return ~(mp_bitcnt_t) 0; + + return mpn_popcount (u->_mp_d, un); +} + +mp_bitcnt_t +mpz_hamdist (const mpz_t u, const mpz_t v) +{ + mp_size_t un, vn, i; + mp_limb_t uc, vc, ul, vl, comp; + mp_srcptr up, vp; + mp_bitcnt_t c; + + un = u->_mp_size; + vn = v->_mp_size; + + if ( (un ^ vn) < 0) + return ~(mp_bitcnt_t) 0; + + comp = - (uc = vc = (un < 0)); + if (uc) + { + assert (vn < 0); + un = -un; + vn = -vn; + } + + up = u->_mp_d; + vp = v->_mp_d; + + if (un < vn) + MPN_SRCPTR_SWAP (up, un, vp, vn); + + for (i = 0, c = 0; i < vn; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + vl = (vp[i] ^ comp) + vc; + vc = vl < vc; + + c += gmp_popcount_limb (ul ^ vl); + } + assert (vc == 0); + + for (; i < un; i++) + { + ul = (up[i] ^ comp) + uc; + uc = ul < uc; + + c += gmp_popcount_limb (ul ^ comp); + } + + return c; +} + +mp_bitcnt_t +mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit + for u<0. Notice this test picks up any u==0 too. */ + if (i >= un) + return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); + + up = u->_mp_d; + ux = 0; + limb = up[i]; + + if (starting_bit != 0) + { + if (us < 0) + { + ux = mpn_zero_p (up, i); + limb = ~ limb + ux; + ux = - (mp_limb_t) (limb >= ux); + } + + /* Mask to 0 all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + } + + return mpn_common_scan (limb, i, up, un, ux); +} + +mp_bitcnt_t +mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) +{ + mp_ptr up; + mp_size_t us, un, i; + mp_limb_t limb, ux; + + us = u->_mp_size; + ux = - (mp_limb_t) (us >= 0); + un = GMP_ABS (us); + i = starting_bit / GMP_LIMB_BITS; + + /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for + u<0. Notice this test picks up all cases of u==0 too. */ + if (i >= un) + return (ux ? starting_bit : ~(mp_bitcnt_t) 0); + + up = u->_mp_d; + limb = up[i] ^ ux; + + if (ux == 0) + limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ + + /* Mask all bits before starting_bit, thus ignoring them. */ + limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); + + return mpn_common_scan (limb, i, up, un, ux); +} + + +/* MPZ base conversion. */ + +size_t +mpz_sizeinbase (const mpz_t u, int base) +{ + mp_size_t un, tn; + mp_srcptr up; + mp_ptr tp; + mp_bitcnt_t bits; + struct gmp_div_inverse bi; + size_t ndigits; + + assert (base >= 2); + assert (base <= 62); + + un = GMP_ABS (u->_mp_size); + if (un == 0) + return 1; + + up = u->_mp_d; + + bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); + switch (base) + { + case 2: + return bits; + case 4: + return (bits + 1) / 2; + case 8: + return (bits + 2) / 3; + case 16: + return (bits + 3) / 4; + case 32: + return (bits + 4) / 5; + /* FIXME: Do something more clever for the common case of base + 10. */ + } + + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, up, un); + mpn_div_qr_1_invert (&bi, base); + + tn = un; + ndigits = 0; + do + { + ndigits++; + mpn_div_qr_1_preinv (tp, tp, tn, &bi); + tn -= (tp[tn-1] == 0); + } + while (tn > 0); + + gmp_free_limbs (tp, un); + return ndigits; +} + +char * +mpz_get_str (char *sp, int base, const mpz_t u) +{ + unsigned bits; + const char *digits; + mp_size_t un; + size_t i, sn, osn; + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + if (base > 1) + { + if (base <= 36) + digits = "0123456789abcdefghijklmnopqrstuvwxyz"; + else if (base > 62) + return NULL; + } + else if (base >= -1) + base = 10; + else + { + base = -base; + if (base > 36) + return NULL; + } + + sn = 1 + mpz_sizeinbase (u, base); + if (!sp) + { + osn = 1 + sn; + sp = (char *) gmp_alloc (osn); + } + else + osn = 0; + un = GMP_ABS (u->_mp_size); + + if (un == 0) + { + sp[0] = '0'; + sn = 1; + goto ret; + } + + i = 0; + + if (u->_mp_size < 0) + sp[i++] = '-'; + + bits = mpn_base_power_of_two_p (base); + + if (bits) + /* Not modified in this case. */ + sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); + else + { + struct mpn_base_info info; + mp_ptr tp; + + mpn_get_base_info (&info, base); + tp = gmp_alloc_limbs (un); + mpn_copyi (tp, u->_mp_d, un); + + sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); + gmp_free_limbs (tp, un); + } + + for (; i < sn; i++) + sp[i] = digits[(unsigned char) sp[i]]; + +ret: + sp[sn] = '\0'; + if (osn && osn != sn + 1) + sp = (char*) gmp_realloc (sp, osn, sn + 1); + return sp; +} + +int +mpz_set_str (mpz_t r, const char *sp, int base) +{ + unsigned bits, value_of_a; + mp_size_t rn, alloc; + mp_ptr rp; + size_t dn, sn; + int sign; + unsigned char *dp; + + assert (base == 0 || (base >= 2 && base <= 62)); + + while (isspace( (unsigned char) *sp)) + sp++; + + sign = (*sp == '-'); + sp += sign; + + if (base == 0) + { + if (sp[0] == '0') + { + if (sp[1] == 'x' || sp[1] == 'X') + { + base = 16; + sp += 2; + } + else if (sp[1] == 'b' || sp[1] == 'B') + { + base = 2; + sp += 2; + } + else + base = 8; + } + else + base = 10; + } + + if (!*sp) + { + r->_mp_size = 0; + return -1; + } + sn = strlen(sp); + dp = (unsigned char *) gmp_alloc (sn); + + value_of_a = (base > 36) ? 36 : 10; + for (dn = 0; *sp; sp++) + { + unsigned digit; + + if (isspace ((unsigned char) *sp)) + continue; + else if (*sp >= '0' && *sp <= '9') + digit = *sp - '0'; + else if (*sp >= 'a' && *sp <= 'z') + digit = *sp - 'a' + value_of_a; + else if (*sp >= 'A' && *sp <= 'Z') + digit = *sp - 'A' + 10; + else + digit = base; /* fail */ + + if (digit >= (unsigned) base) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + + dp[dn++] = digit; + } + + if (!dn) + { + gmp_free (dp, sn); + r->_mp_size = 0; + return -1; + } + bits = mpn_base_power_of_two_p (base); + + if (bits > 0) + { + alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_bits (rp, dp, dn, bits); + } + else + { + struct mpn_base_info info; + mpn_get_base_info (&info, base); + alloc = (dn + info.exp - 1) / info.exp; + rp = MPZ_REALLOC (r, alloc); + rn = mpn_set_str_other (rp, dp, dn, base, &info); + /* Normalization, needed for all-zero input. */ + assert (rn > 0); + rn -= rp[rn-1] == 0; + } + assert (rn <= alloc); + gmp_free (dp, sn); + + r->_mp_size = sign ? - rn : rn; + + return 0; +} + +int +mpz_init_set_str (mpz_t r, const char *sp, int base) +{ + mpz_init (r); + return mpz_set_str (r, sp, base); +} + +size_t +mpz_out_str (FILE *stream, int base, const mpz_t x) +{ + char *str; + size_t len, n; + + str = mpz_get_str (NULL, base, x); + if (!str) + return 0; + len = strlen (str); + n = fwrite (str, 1, len, stream); + gmp_free (str, len + 1); + return n; +} + + +static int +gmp_detect_endian (void) +{ + static const int i = 2; + const unsigned char *p = (const unsigned char *) &i; + return 1 - *p; +} + +/* Import and export. Does not support nails. */ +void +mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, + size_t nails, const void *src) +{ + const unsigned char *p; + ptrdiff_t word_step; + mp_ptr rp; + mp_size_t rn; + + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes already copied to this limb (starting from + the low end). */ + size_t bytes; + /* The index where the limb should be stored, when completed. */ + mp_size_t i; + + if (nails != 0) + gmp_die ("mpz_import: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) src; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); + rp = MPZ_REALLOC (r, rn); + + for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) + { + size_t j; + for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) + { + limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); + if (bytes == sizeof(mp_limb_t)) + { + rp[i++] = limb; + bytes = 0; + limb = 0; + } + } + } + assert (i + (bytes > 0) == rn); + if (limb != 0) + rp[i++] = limb; + else + i = mpn_normalized_size (rp, i); + + r->_mp_size = i; +} + +void * +mpz_export (void *r, size_t *countp, int order, size_t size, int endian, + size_t nails, const mpz_t u) +{ + size_t count; + mp_size_t un; + + if (nails != 0) + gmp_die ("mpz_export: Nails not supported."); + + assert (order == 1 || order == -1); + assert (endian >= -1 && endian <= 1); + assert (size > 0 || u->_mp_size == 0); + + un = u->_mp_size; + count = 0; + if (un != 0) + { + size_t k; + unsigned char *p; + ptrdiff_t word_step; + /* The current (partial) limb. */ + mp_limb_t limb; + /* The number of bytes left to do in this limb. */ + size_t bytes; + /* The index where the limb was read. */ + mp_size_t i; + + un = GMP_ABS (un); + + /* Count bytes in top limb. */ + limb = u->_mp_d[un-1]; + assert (limb != 0); + + k = (GMP_LIMB_BITS <= CHAR_BIT); + if (!k) + { + do { + int LOCAL_CHAR_BIT = CHAR_BIT; + k++; limb >>= LOCAL_CHAR_BIT; + } while (limb != 0); + } + /* else limb = 0; */ + + count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; + + if (!r) + r = gmp_alloc (count * size); + + if (endian == 0) + endian = gmp_detect_endian (); + + p = (unsigned char *) r; + + word_step = (order != endian) ? 2 * size : 0; + + /* Process bytes from the least significant end, so point p at the + least significant word. */ + if (order == 1) + { + p += size * (count - 1); + word_step = - word_step; + } + + /* And at least significant byte of that word. */ + if (endian == 1) + p += (size - 1); + + for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) + { + size_t j; + for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) + { + if (sizeof (mp_limb_t) == 1) + { + if (i < un) + *p = u->_mp_d[i++]; + else + *p = 0; + } + else + { + int LOCAL_CHAR_BIT = CHAR_BIT; + if (bytes == 0) + { + if (i < un) + limb = u->_mp_d[i++]; + bytes = sizeof (mp_limb_t); + } + *p = limb; + limb >>= LOCAL_CHAR_BIT; + bytes--; + } + } + } + assert (i == un); + assert (k == count); + } + + if (countp) + *countp = count; + + return r; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.h new file mode 100644 index 0000000000..f28cb360ce --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.h @@ -0,0 +1,311 @@ +/* mini-gmp, a minimalistic implementation of a GNU GMP subset. + +Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +/* About mini-gmp: This is a minimal implementation of a subset of the + GMP interface. It is intended for inclusion into applications which + have modest bignums needs, as a fallback when the real GMP library + is not installed. + + This file defines the public interface. */ + +#ifndef __MINI_GMP_H__ +#define __MINI_GMP_H__ + +/* For size_t */ +#include + +#if defined (__cplusplus) +extern "C" { +#endif + +void mp_set_memory_functions (void *(*) (size_t), + void *(*) (void *, size_t, size_t), + void (*) (void *, size_t)); + +void mp_get_memory_functions (void *(**) (size_t), + void *(**) (void *, size_t, size_t), + void (**) (void *, size_t)); + +#ifndef MINI_GMP_LIMB_TYPE +#define MINI_GMP_LIMB_TYPE long +#endif + +typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; +typedef long mp_size_t; +typedef unsigned long mp_bitcnt_t; + +typedef mp_limb_t *mp_ptr; +typedef const mp_limb_t *mp_srcptr; + +typedef struct +{ + int _mp_alloc; /* Number of *limbs* allocated and pointed + to by the _mp_d field. */ + int _mp_size; /* abs(_mp_size) is the number of limbs the + last field points to. If _mp_size is + negative this is a negative number. */ + mp_limb_t *_mp_d; /* Pointer to the limbs. */ +} __mpz_struct; + +typedef __mpz_struct mpz_t[1]; + +typedef __mpz_struct *mpz_ptr; +typedef const __mpz_struct *mpz_srcptr; + +extern const int mp_bits_per_limb; + +void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); +void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); +void mpn_zero (mp_ptr, mp_size_t); + +int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); +int mpn_zero_p (mp_srcptr, mp_size_t); + +mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); +mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); +void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); +int mpn_perfect_square_p (mp_srcptr, mp_size_t); +mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); +mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); + +mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); +mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); + +mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); +mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); + +void mpn_com (mp_ptr, mp_srcptr, mp_size_t); +mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); + +mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); + +mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); +#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) + +size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); +mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); + +void mpz_init (mpz_t); +void mpz_init2 (mpz_t, mp_bitcnt_t); +void mpz_clear (mpz_t); + +#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) +#define mpz_even_p(z) (! mpz_odd_p (z)) + +int mpz_sgn (const mpz_t); +int mpz_cmp_si (const mpz_t, long); +int mpz_cmp_ui (const mpz_t, unsigned long); +int mpz_cmp (const mpz_t, const mpz_t); +int mpz_cmpabs_ui (const mpz_t, unsigned long); +int mpz_cmpabs (const mpz_t, const mpz_t); +int mpz_cmp_d (const mpz_t, double); +int mpz_cmpabs_d (const mpz_t, double); + +void mpz_abs (mpz_t, const mpz_t); +void mpz_neg (mpz_t, const mpz_t); +void mpz_swap (mpz_t, mpz_t); + +void mpz_add_ui (mpz_t, const mpz_t, unsigned long); +void mpz_add (mpz_t, const mpz_t, const mpz_t); +void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); +void mpz_sub (mpz_t, const mpz_t, const mpz_t); + +void mpz_mul_si (mpz_t, const mpz_t, long int); +void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_mul (mpz_t, const mpz_t, const mpz_t); +void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_addmul (mpz_t, const mpz_t, const mpz_t); +void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); +void mpz_submul (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); +void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); +void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); + +void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); +void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); + +void mpz_mod (mpz_t, const mpz_t, const mpz_t); + +void mpz_divexact (mpz_t, const mpz_t, const mpz_t); + +int mpz_divisible_p (const mpz_t, const mpz_t); +int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); + +unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); +unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); +unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); + +unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); + +void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); + +int mpz_divisible_ui_p (const mpz_t, unsigned long); + +unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); +void mpz_gcd (mpz_t, const mpz_t, const mpz_t); +void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); +void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); +void mpz_lcm (mpz_t, const mpz_t, const mpz_t); +int mpz_invert (mpz_t, const mpz_t, const mpz_t); + +void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); +void mpz_sqrt (mpz_t, const mpz_t); +int mpz_perfect_square_p (const mpz_t); + +void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); +void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); +void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); +void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); + +void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); +int mpz_root (mpz_t, const mpz_t, unsigned long); + +void mpz_fac_ui (mpz_t, unsigned long); +void mpz_2fac_ui (mpz_t, unsigned long); +void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); +void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); + +int mpz_probab_prime_p (const mpz_t, int); + +int mpz_tstbit (const mpz_t, mp_bitcnt_t); +void mpz_setbit (mpz_t, mp_bitcnt_t); +void mpz_clrbit (mpz_t, mp_bitcnt_t); +void mpz_combit (mpz_t, mp_bitcnt_t); + +void mpz_com (mpz_t, const mpz_t); +void mpz_and (mpz_t, const mpz_t, const mpz_t); +void mpz_ior (mpz_t, const mpz_t, const mpz_t); +void mpz_xor (mpz_t, const mpz_t, const mpz_t); + +mp_bitcnt_t mpz_popcount (const mpz_t); +mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); +mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); +mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); + +int mpz_fits_slong_p (const mpz_t); +int mpz_fits_ulong_p (const mpz_t); +int mpz_fits_sint_p (const mpz_t); +int mpz_fits_uint_p (const mpz_t); +int mpz_fits_sshort_p (const mpz_t); +int mpz_fits_ushort_p (const mpz_t); +long int mpz_get_si (const mpz_t); +unsigned long int mpz_get_ui (const mpz_t); +double mpz_get_d (const mpz_t); +size_t mpz_size (const mpz_t); +mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); + +void mpz_realloc2 (mpz_t, mp_bitcnt_t); +mp_srcptr mpz_limbs_read (mpz_srcptr); +mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); +mp_ptr mpz_limbs_write (mpz_t, mp_size_t); +void mpz_limbs_finish (mpz_t, mp_size_t); +mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); + +#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} + +void mpz_set_si (mpz_t, signed long int); +void mpz_set_ui (mpz_t, unsigned long int); +void mpz_set (mpz_t, const mpz_t); +void mpz_set_d (mpz_t, double); + +void mpz_init_set_si (mpz_t, signed long int); +void mpz_init_set_ui (mpz_t, unsigned long int); +void mpz_init_set (mpz_t, const mpz_t); +void mpz_init_set_d (mpz_t, double); + +size_t mpz_sizeinbase (const mpz_t, int); +char *mpz_get_str (char *, int, const mpz_t); +int mpz_set_str (mpz_t, const char *, int); +int mpz_init_set_str (mpz_t, const char *, int); + +/* This long list taken from gmp.h. */ +/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, + defines EOF but not FILE. */ +#if defined (FILE) \ + || defined (H_STDIO) \ + || defined (_H_STDIO) /* AIX */ \ + || defined (_STDIO_H) /* glibc, Sun, SCO */ \ + || defined (_STDIO_H_) /* BSD, OSF */ \ + || defined (__STDIO_H) /* Borland */ \ + || defined (__STDIO_H__) /* IRIX */ \ + || defined (_STDIO_INCLUDED) /* HPUX */ \ + || defined (__dj_include_stdio_h_) /* DJGPP */ \ + || defined (_FILE_DEFINED) /* Microsoft */ \ + || defined (__STDIO__) /* Apple MPW MrC */ \ + || defined (_MSL_STDIO_H) /* Metrowerks */ \ + || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ + || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ + || defined (__STDIO_LOADED) /* VMS */ \ + || defined (_STDIO) /* HPE NonStop */ \ + || defined (__DEFINED_FILE) /* musl */ +size_t mpz_out_str (FILE *, int, const mpz_t); +#endif + +void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); +void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); + +#if defined (__cplusplus) +} +#endif +#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.c new file mode 100644 index 0000000000..27f4a963db --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.c @@ -0,0 +1,357 @@ +#include +#include +#include +#include + +// double-wide multiplication +void +MUL(digit_t *out, const digit_t a, const digit_t b) +{ +#ifdef RADIX_32 + uint64_t r = (uint64_t)a * b; + out[0] = r & 0xFFFFFFFFUL; + out[1] = r >> 32; + +#elif defined(RADIX_64) && defined(_MSC_VER) + uint64_t umul_hi; + out[0] = _umul128(a, b, &umul_hi); + out[1] = umul_hi; + +#elif defined(RADIX_64) && defined(HAVE_UINT128) + unsigned __int128 umul_tmp; + umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); + out[0] = (uint64_t)umul_tmp; + out[1] = (uint64_t)(umul_tmp >> 64); + +#else + register digit_t al, ah, bl, bh, temp; + digit_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; + digit_t mask_low = (digit_t)(-1) >> (sizeof(digit_t) * 4), mask_high = (digit_t)(-1) << (sizeof(digit_t) * 4); + al = a & mask_low; // Low part + ah = a >> (sizeof(digit_t) * 4); // High part + bl = b & mask_low; + bh = b >> (sizeof(digit_t) * 4); + + albl = al * bl; + albh = al * bh; + ahbl = ah * bl; + ahbh = ah * bh; + out[0] = albl & mask_low; // out00 + + res1 = albl >> (sizeof(digit_t) * 4); + res2 = ahbl & mask_low; + res3 = albh & mask_low; + temp = res1 + res2 + res3; + carry = temp >> (sizeof(digit_t) * 4); + out[0] ^= temp << (sizeof(digit_t) * 4); // out01 + + res1 = ahbl >> (sizeof(digit_t) * 4); + res2 = albh >> (sizeof(digit_t) * 4); + res3 = ahbh & mask_low; + temp = res1 + res2 + res3 + carry; + out[1] = temp & mask_low; // out10 + carry = temp & mask_high; + out[1] ^= (ahbh & mask_high) + carry; // out11 + +#endif +} + +void +mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision addition + unsigned int i, carry = 0; + + for (i = 0; i < nwords; i++) { + ADDC(c[i], carry, a[i], b[i], carry); + } +} + +digit_t +mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision right shift by 1...RADIX-1 + digit_t bit_out = x[0] & 1; + + for (unsigned int i = 0; i < nwords - 1; i++) { + SHIFTR(x[i + 1], x[i], shift, x[i], RADIX); + } + x[nwords - 1] >>= shift; + return bit_out; +} + +void +mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision left shift by 1...RADIX-1 + + for (int i = nwords - 1; i > 0; i--) { + SHIFTL(x[i], x[i - 1], shift, x[i], RADIX); + } + x[0] <<= shift; +} + +void +multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ + int t = shift; + while (t > RADIX - 1) { + mp_shiftl(x, RADIX - 1, nwords); + t = t - (RADIX - 1); + } + mp_shiftl(x, t, nwords); +} + +// The below functions were taken from the EC module + +void +mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision subtraction, assuming a > b + unsigned int i, borrow = 0; + + for (i = 0; i < nwords; i++) { + SUBC(c[i], borrow, a[i], b[i], borrow); + } +} + +void +select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords) +{ // Select c <- a if mask = 0, select c <- b if mask = 1...1 + + for (int i = 0; i < nwords; i++) { + c[i] = ((a[i] ^ b[i]) & mask) ^ a[i]; + } +} + +void +swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords) +{ // Swap entries + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then a <- b and b <- a + digit_t temp; + + for (int i = 0; i < nwords; i++) { + temp = option & (a[i] ^ b[i]); + a[i] = temp ^ a[i]; + b[i] = temp ^ b[i]; + } +} + +int +mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords) +{ // Multiprecision comparison, a=b? : (1) a>b, (0) a=b, (-1) a= 0; i--) { + if (a[i] > b[i]) + return 1; + else if (a[i] < b[i]) + return -1; + } + return 0; +} + +bool +mp_is_zero(const digit_t *a, unsigned int nwords) +{ // Is a multiprecision element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + digit_t r = 0; + + for (unsigned int i = 0; i < nwords; i++) + r |= a[i] ^ 0; + + return (bool)is_digit_zero_ct(r); +} + +void +mp_mul2(digit_t *c, const digit_t *a, const digit_t *b) +{ // Multiprecision multiplication fixed to two-digit operands + unsigned int carry = 0; + digit_t t0[2], t1[2], t2[2]; + + MUL(t0, a[0], b[0]); + MUL(t1, a[0], b[1]); + ADDC(t0[1], carry, t0[1], t1[0], carry); + ADDC(t1[1], carry, 0, t1[1], carry); + MUL(t2, a[1], b[1]); + ADDC(t2[0], carry, t2[0], t1[1], carry); + ADDC(t2[1], carry, 0, t2[1], carry); + c[0] = t0[0]; + c[1] = t0[1]; + c[2] = t2[0]; + c[3] = t2[1]; +} + +void +mp_print(const digit_t *a, size_t nwords) +{ + printf("0x"); + for (size_t i = 0; i < nwords; i++) { +#ifdef RADIX_32 + printf("%08" PRIx32, a[nwords - i - 1]); // Print each word with 8 hex digits +#elif defined(RADIX_64) + printf("%016" PRIx64, a[nwords - i - 1]); // Print each word with 16 hex digits +#endif + } +} + +void +mp_copy(digit_t *b, const digit_t *a, size_t nwords) +{ + for (size_t i = 0; i < nwords; i++) { + b[i] = a[i]; + } +} + +void +mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords) +{ + // Multiprecision multiplication, c = a*b, for nwords-digit inputs, with nwords-digit output + // explicitly does not use the higher half of c, as we do not need in our applications + digit_t carry, UV[2], t[nwords], cc[nwords]; + + for (size_t i = 0; i < nwords; i++) { + cc[i] = 0; + } + + for (size_t i = 0; i < nwords; i++) { + + MUL(t, a[i], b[0]); + + for (size_t j = 1; j < nwords - 1; j++) { + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + t[j + 1] = UV[1] + carry; + } + + int j = nwords - 1; + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + + mp_add(&cc[i], &cc[i], t, nwords - i); + } + + mp_copy(c, cc, nwords); +} + +void +mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords) +{ // Multiprecision modulo 2^e, with 0 <= a < 2^(e) + unsigned int i, q = e >> LOG2RADIX, r = e & (RADIX - 1); + + if (q < nwords) { + a[q] &= ((digit_t)1 << r) - 1; + + for (i = q + 1; i < nwords; i++) { + a[i] = 0; + } + } +} + +void +mp_neg(digit_t *a, unsigned int nwords) +{ // negates a + for (size_t i = 0; i < nwords; i++) { + a[i] ^= -1; + } + + a[0] += 1; +} + +bool +mp_is_one(const digit_t *x, unsigned int nwords) +{ // returns true if x represents 1, and false otherwise + if (x[0] != 1) { + return false; + } + + for (size_t i = 1; i < nwords; i++) { + if (x[i] != 0) { + return false; + } + } + return true; +} + +void +mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) +{ // Inversion modulo 2^e, using Newton's method and Hensel lifting + // we take the first power of 2 larger than e to use + // requires a to be odd, of course + // returns b such that a*b = 1 mod 2^e + assert((a[0] & 1) == 1); + + digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + mp_copy(aa, a, nwords); + + mp_one[0] = 1; + for (unsigned int i = 1; i < nwords; i++) { + mp_one[i] = 0; + } + + int p = 1; + while ((1 << p) < e) { + p++; + } + p -= 2; // using k = 4 for initial inverse + int w = (1 << (p + 2)); + + mp_mod_2exp(aa, w, nwords); + mp_add(x, aa, aa, nwords); + mp_add(x, x, aa, nwords); // should be 3a + x[0] ^= (1 << 1); // so that x equals (3a)^2 xor 2 + mp_mod_2exp(x, w, nwords); // now x*a = 1 mod 2^4, which we lift + + mp_mul(tmp, aa, x, nwords); + mp_neg(tmp, nwords); + mp_add(y, mp_one, tmp, nwords); + + // Hensel lifting for p rounds + for (int i = 0; i < p; i++) { + mp_add(tmp, mp_one, y, nwords); + mp_mul(x, x, tmp, nwords); + mp_mul(y, y, y, nwords); + } + + mp_mod_2exp(x, w, nwords); + mp_copy(b, x, nwords); + + // verify results + mp_mul(x, x, aa, nwords); + mp_mod_2exp(x, w, nwords); + assert(mp_is_one(x, nwords)); +} + +void +mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords) +{ + // given a matrix ( ( a, b ), (c, d) ) of values mod 2^e + // returns the inverse matrix gamma ( (d, -b), (-c, a) ) + // where gamma is the inverse of the determinant a*d - b*c + // assumes the matrix is invertible, otherwises, inversion of determinant fails + + int p = 1; + while ((1 << p) < e) { + p++; + } + int w = (1 << (p)); + + digit_t det[nwords], tmp[nwords], resa[nwords], resb[nwords], resc[nwords], resd[nwords]; + mp_mul(tmp, r1, s2, nwords); + mp_mul(det, r2, s1, nwords); + mp_sub(det, tmp, det, nwords); + mp_inv_2e(det, det, e, nwords); + + mp_mul(resa, det, s2, nwords); + mp_mul(resb, det, r2, nwords); + mp_mul(resc, det, s1, nwords); + mp_mul(resd, det, r1, nwords); + + mp_neg(resb, nwords); + mp_neg(resc, nwords); + + mp_mod_2exp(resa, w, nwords); + mp_mod_2exp(resb, w, nwords); + mp_mod_2exp(resc, w, nwords); + mp_mod_2exp(resd, w, nwords); + + mp_copy(r1, resa, nwords); + mp_copy(r2, resb, nwords); + mp_copy(s1, resc, nwords); + mp_copy(s2, resd, nwords); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.h new file mode 100644 index 0000000000..b3733b520d --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.h @@ -0,0 +1,88 @@ +#ifndef MP_H +#define MP_H + +#include +#include +#include + +// Functions taken from the GF module + +void mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +digit_t mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords); +void multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords); +void MUL(digit_t *out, const digit_t a, const digit_t b); + +// Functions taken from the EC module + +void mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords); +void select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords); +void swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords); +int mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords); +bool mp_is_zero(const digit_t *a, unsigned int nwords); +void mp_mul2(digit_t *c, const digit_t *a, const digit_t *b); + +// Further functions for multiprecision arithmetic +void mp_print(const digit_t *a, size_t nwords); +void mp_copy(digit_t *b, const digit_t *a, size_t nwords); +void mp_neg(digit_t *a, unsigned int nwords); +bool mp_is_one(const digit_t *x, unsigned int nwords); +void mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords); +void mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords); +void mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords); +void mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords); + +#define mp_is_odd(x, nwords) (((nwords) != 0) & (int)(x)[0]) +#define mp_is_even(x, nwords) (!mp_is_odd(x, nwords)) + +/********************** Constant-time unsigned comparisons ***********************/ + +// The following functions return 1 (TRUE) if condition is true, 0 (FALSE) otherwise +static inline unsigned int +is_digit_nonzero_ct(digit_t x) +{ // Is x != 0? + return (unsigned int)((x | (0 - x)) >> (RADIX - 1)); +} + +static inline unsigned int +is_digit_zero_ct(digit_t x) +{ // Is x = 0? + return (unsigned int)(1 ^ is_digit_nonzero_ct(x)); +} + +static inline unsigned int +is_digit_lessthan_ct(digit_t x, digit_t y) +{ // Is x < y? + return (unsigned int)((x ^ ((x ^ y) | ((x - y) ^ y))) >> (RADIX - 1)); +} + +/********************** Platform-independent macros for digit-size operations + * **********************/ + +// Digit addition with carry +#define ADDC(sumOut, carryOut, addend1, addend2, carryIn) \ + { \ + digit_t tempReg = (addend1) + (digit_t)(carryIn); \ + (sumOut) = (addend2) + tempReg; \ + (carryOut) = (is_digit_lessthan_ct(tempReg, (digit_t)(carryIn)) | is_digit_lessthan_ct((sumOut), tempReg)); \ + } + +// Digit subtraction with borrow +#define SUBC(differenceOut, borrowOut, minuend, subtrahend, borrowIn) \ + { \ + digit_t tempReg = (minuend) - (subtrahend); \ + unsigned int borrowReg = \ + (is_digit_lessthan_ct((minuend), (subtrahend)) | ((borrowIn) & is_digit_zero_ct(tempReg))); \ + (differenceOut) = tempReg - (digit_t)(borrowIn); \ + (borrowOut) = borrowReg; \ + } + +// Shift right with flexible datatype +#define SHIFTR(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((lowIn) >> (shift)) ^ ((highIn) << (DigitSize - (shift))); + +// Digit shift left +#define SHIFTL(highIn, lowIn, shift, shiftOut, DigitSize) \ + (shiftOut) = ((highIn) << (shift)) ^ ((lowIn) >> (RADIX - (shift))); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/normeq.c new file mode 100644 index 0000000000..8c133dd095 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/normeq.c @@ -0,0 +1,369 @@ +#include +#include "internal.h" + +/** @file + * + * @authors Antonin Leroux + * + * @brief Functions related to norm equation solving or special extremal orders + */ + +void +quat_lattice_O0_set(quat_lattice_t *O0) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(O0->basis[i][j]), 0); + } + } + ibz_set(&(O0->denom), 2); + ibz_set(&(O0->basis[0][0]), 2); + ibz_set(&(O0->basis[1][1]), 2); + ibz_set(&(O0->basis[2][2]), 1); + ibz_set(&(O0->basis[1][2]), 1); + ibz_set(&(O0->basis[3][3]), 1); + ibz_set(&(O0->basis[0][3]), 1); +} + +void +quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) +{ + ibz_set(&O0->z.coord[1], 1); + ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.denom, 1); + ibz_set(&O0->t.denom, 1); + O0->q = 1; + quat_lattice_O0_set(&(O0->order)); +} + +void +quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo) +{ + + // var dec + quat_alg_elem_t quat_temp; + + // var init + quat_alg_elem_init(&quat_temp); + + // elem = x + quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + + // quat_temp = i*y + quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); + + // elem = x + i*y + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = z * j + quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + + // elem = x + i* + z*j + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = t * j * i + quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); + + // elem = x + i*y + j*z + j*i*t + quat_alg_add(elem, elem, &quat_temp); + + quat_alg_elem_finalize(&quat_temp); +} + +int +quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params) +{ + + if (ibz_is_even(n_gamma)) { + return 0; + } + // var dec + int found; + ibz_t cornacchia_target; + ibz_t adjusted_n_gamma, q; + ibz_t bound, sq_bound, temp; + ibz_t test; + ibz_vec_4_t coeffs; // coeffs = [x,y,z,t] + quat_alg_elem_t quat_temp; + + if (non_diag) + assert(params->order->q % 4 == 1); + + // var init + found = 0; + ibz_init(&bound); + ibz_init(&test); + ibz_init(&temp); + ibz_init(&q); + ibz_init(&sq_bound); + ibz_vec_4_init(&coeffs); + quat_alg_elem_init(&quat_temp); + ibz_init(&adjusted_n_gamma); + ibz_init(&cornacchia_target); + + ibz_set(&q, params->order->q); + + // this could be removed in the current state + int standard_order = (params->order->q == 1); + + // adjusting the norm of gamma (multiplying by 4 to find a solution in an order of odd level) + if (non_diag || standard_order) { + ibz_mul(&adjusted_n_gamma, n_gamma, &ibz_const_two); + ibz_mul(&adjusted_n_gamma, &adjusted_n_gamma, &ibz_const_two); + } else { + ibz_copy(&adjusted_n_gamma, n_gamma); + } + // computation of the first bound = sqrt (adjust_n_gamma / p - q) + ibz_div(&sq_bound, &bound, &adjusted_n_gamma, &((params->algebra)->p)); + ibz_set(&temp, params->order->q); + ibz_sub(&sq_bound, &sq_bound, &temp); + ibz_sqrt_floor(&bound, &sq_bound); + + // the size of the search space is roughly n_gamma / (p√q) + ibz_t counter; + ibz_init(&counter); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_sqrt_floor(&temp, &temp); + ibz_div(&counter, &temp, &adjusted_n_gamma, &temp); + + // entering the main loop + while (!found && ibz_cmp(&counter, &ibz_const_zero) != 0) { + // decreasing the counter + ibz_sub(&counter, &counter, &ibz_const_one); + + // we start by sampling the first coordinate + ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + + // then, we sample the second coordinate + // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) + ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); + ibz_sub(&temp, &adjusted_n_gamma, &temp); + ibz_mul(&sq_bound, &q, &(params->algebra->p)); + ibz_div(&temp, &sq_bound, &temp, &sq_bound); + ibz_sqrt_floor(&temp, &temp); + + if (ibz_cmp(&temp, &ibz_const_zero) == 0) { + continue; + } + // sampling the second value + ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + + // compute cornacchia_target = n_gamma - p * (z² + q*t²) + ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &q, &temp); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); + ibz_sub(&cornacchia_target, &adjusted_n_gamma, &cornacchia_target); + assert(ibz_cmp(&cornacchia_target, &ibz_const_zero) > 0); + + // applying cornacchia + if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) + found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + else + found = 0; + + if (found && non_diag && standard_order) { + // check that we can divide by two at least once + // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 + // we must have x = t mod 2 and y = z mod 2 + // if q=1 we can simply swap x and y + if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { + ibz_swap(&coeffs[1], &coeffs[0]); + } + // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the + // resulting endomorphism will behave well for dim 2 computations + found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && + ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + } + if (found) { + +#ifndef NDEBUG + ibz_set(&temp, (params->order->q)); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_add(&temp, &temp, &test); + assert(0 == ibz_cmp(&temp, &cornacchia_target)); + + ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); + ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_set(&temp, (params->order->q)); + ibz_mul(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &temp, &(params->algebra->p)); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); +#endif + // translate x,y,z,t into the quaternion element gamma + quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); +#ifndef NDEBUG + quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs[0]))); + assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); + assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); +#endif + // making gamma primitive + // coeffs contains the coefficients of primitivized gamma in the basis of order + quat_alg_make_primitive(&coeffs, &temp, gamma, &((params->order)->order)); + + if (non_diag || standard_order) + found = (ibz_cmp(&temp, &ibz_const_two) == 0); + else + found = (ibz_cmp(&temp, &ibz_const_one) == 0); + } + } + + if (found) { + // new gamma + ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); + ibz_copy(&gamma->coord[0], &coeffs[0]); + ibz_copy(&gamma->coord[1], &coeffs[1]); + ibz_copy(&gamma->coord[2], &coeffs[2]); + ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->denom, &(((params->order)->order).denom)); + } + // var finalize + ibz_finalize(&counter); + ibz_finalize(&bound); + ibz_finalize(&temp); + ibz_finalize(&sq_bound); + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&quat_temp); + ibz_finalize(&adjusted_n_gamma); + ibz_finalize(&cornacchia_target); + ibz_finalize(&q); + ibz_finalize(&test); + + return found; +} + +int +quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor) +{ + + ibz_t n_temp, norm_d; + ibz_t disc; + quat_alg_elem_t gen, gen_rerand; + int found = 0; + ibz_init(&n_temp); + ibz_init(&norm_d); + ibz_init(&disc); + quat_alg_elem_init(&gen); + quat_alg_elem_init(&gen_rerand); + + // when the norm is prime we can be quite efficient + // by avoiding to run represent integer + // the first step is to generate one ideal of the correct norm + if (is_prime) { + + // we find a quaternion element of norm divisible by norm + while (!found) { + // generating a trace-zero element at random + ibz_set(&gen.coord[0], 0); + ibz_sub(&n_temp, norm, &ibz_const_one); + for (int i = 1; i < 4; i++) + ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + + // and finally the negation mod norm + ibz_neg(&disc, &n_temp); + ibz_mod(&disc, &disc, norm); + // now we check that -n is a square mod norm + // and if the square root exists we compute it + found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = found && !quat_alg_elem_is_zero(&gen); + } + } else { + assert(prime_cofactor != NULL); + // if it is not prime or we don't know if it is prime, we may just use represent integer + // and use a precomputed prime as cofactor + assert(!ibz_is_zero(norm)); + ibz_mul(&n_temp, prime_cofactor, norm); + found = quat_represent_integer(&gen, &n_temp, 0, params); + found = found && !quat_alg_elem_is_zero(&gen); + } +#ifndef NDEBUG + if (found) { + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_mod(&n_temp, &n_temp, norm); + assert(ibz_cmp(&n_temp, &ibz_const_zero) == 0); + } +#endif + + // now we just have to rerandomize the class of the ideal generated by gen + found = 0; + while (!found) { + for (int i = 0; i < 4; i++) { + ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + } + quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_gcd(&disc, &n_temp, norm); + found = ibz_is_one(&disc); + found = found && !quat_alg_elem_is_zero(&gen_rerand); + } + + quat_alg_mul(&gen, &gen, &gen_rerand, (params->algebra)); + // in both cases, whether norm is prime or not prime, + // gen is not divisible by any integer factor of the target norm + // therefore the call below will yield an ideal of the correct norm + quat_lideal_create(lideal, &gen, norm, &((params->order)->order), (params->algebra)); + assert(ibz_cmp(norm, &(lideal->norm)) == 0); + + ibz_finalize(&n_temp); + quat_alg_elem_finalize(&gen); + quat_alg_elem_finalize(&gen_rerand); + ibz_finalize(&norm_d); + ibz_finalize(&disc); + return (found); +} + +void +quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_copy(&(*vec)[2], &el->coord[2]); + ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) + ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) + ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); + ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); + ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); + + assert(ibz_divides(&(*vec)[0], &el->denom)); + assert(ibz_divides(&(*vec)[1], &el->denom)); + assert(ibz_divides(&(*vec)[2], &el->denom)); + assert(ibz_divides(&(*vec)[3], &el->denom)); + + ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); + ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); + ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); + ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/printer.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/printer.c new file mode 100644 index 0000000000..6d6a3ca9b7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/printer.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +void +ibz_mat_2x2_print(const ibz_mat_2x2_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_print(&((*mat)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibz_mat_4x4_print(const ibz_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibz_vec_2_print(const ibz_vec_2_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 2; i++) { + ibz_print(&((*vec)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibz_vec_4_print(const ibz_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +quat_lattice_print(const quat_lattice_t *lat) +{ + printf("lattice\n"); + printf("denominator: "); + ibz_print(&(lat->denom), 10); + printf("\n"); + printf("basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lat->basis)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +quat_alg_print(const quat_alg_t *alg) +{ + printf("quaternion algebra ramified at "); + ibz_print(&(alg->p), 10); + printf(" and infinity\n\n"); +} + +void +quat_alg_elem_print(const quat_alg_elem_t *elem) +{ + printf("denominator: "); + ibz_print(&(elem->denom), 10); + printf("\n"); + printf("coordinates: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((elem->coord)[i]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +quat_left_ideal_print(const quat_left_ideal_t *lideal) +{ + printf("left ideal\n"); + printf("norm: "); + ibz_print(&(lideal->norm), 10); + printf("\n"); + printf("denominator: "); + ibz_print(&(lideal->lattice.denom), 10); + printf("\n"); + printf("basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lideal->lattice.basis)[i][j]), 10); + printf(" "); + } + if (i != 3) { + printf("\n "); + } else { + printf("\n"); + } + } + if ((lideal->parent_order) != NULL) { + printf("parent order denominator: "); + ibz_print(&(lideal->parent_order->denom), 10); + printf("\n"); + printf("parent order basis: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((lideal->parent_order->basis)[i][j]), 10); + printf(" "); + } + printf("\n "); + } + } else { + printf("Parent order not given!\n"); + } + printf("\n"); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion.h new file mode 100644 index 0000000000..a567657464 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion.h @@ -0,0 +1,708 @@ +/** @file + * + * @authors Luca De Feo, Sina Schaeffler + * + * @brief Declarations for quaternion algebra operations + */ + +#ifndef QUATERNION_H +#define QUATERNION_H + +// #include +#include +#include "intbig.h" +#include + +/** @defgroup quat_quat Quaternion algebra + * @{ + */ + +/** @defgroup quat_vec_t Types for integer vectors and matrices + * @{ + */ + +/** @brief Type for vector of 2 big integers + * + * @typedef ibz_vec_2_t + */ +typedef ibz_t ibz_vec_2_t[2]; + +/** @brief Type for vectors of 4 integers + * + * @typedef ibz_vec_4_t + * + * Represented as a vector of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_vec_4_t[4]; + +/** @brief Type for 2 by 2 matrices of integers + * + * @typedef ibz_mat_2x2_t + * + * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_2x2_t[2][2]; + +/** @brief Type for 4 by 4 matrices of integers + * + * @typedef ibz_mat_4x4_t + * + * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements + */ +typedef ibz_t ibz_mat_4x4_t[4][4]; +/** + * @} + */ + +/** @defgroup quat_quat_t Types for quaternion algebras + * @{ + */ + +/** @brief Type for quaternion algebras + * + * @typedef quat_alg_t + * + * @struct quat_alg + * + * The quaternion algebra ramified at p = 3 mod 4 and ∞. + */ +typedef struct quat_alg +{ + ibz_t p; ///< Prime number, must be = 3 mod 4. +} quat_alg_t; + +/** @brief Type for quaternion algebra elements + * + * @typedef quat_alg_elem_t + * + * @struct quat_alg_elem + * + * Represented as a array *coord* of 4 ibz_t integers and a common ibz_t denominator *denom*. + * + * The representation is not necessarily normalized, that is, gcd(denom, content(coord)) might not + * be 1. For getting a normalized representation, use the quat_alg_normalize function + * + * The elements are always represented in basis (1,i,j,ij) of the quaternion algebra, with i^2=-1 + * and j^2 = -p + */ +typedef struct quat_alg_elem +{ + ibz_t denom; ///< Denominator by which all coordinates are divided (big integer, must not be 0) + ibz_vec_4_t coord; ///< Numerators of the 4 coordinates of the quaternion algebra element in basis (1,i,j,ij) +} quat_alg_elem_t; + +/** @brief Type for lattices in dimension 4 + * + * @typedef quat_lattice_t + * + * @struct quat_lattice + * + * Represented as a rational (`frac`) times an integreal lattice (`basis`) + * + * The basis is such that its columns divided by its denominator are elements of + * the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + * + * All lattices must have full rank (4) + */ +typedef struct quat_lattice +{ + ibz_t denom; ///< Denominator by which the basis is divided (big integer, must not be 0) + ibz_mat_4x4_t basis; ///< Integer basis of the lattice (its columns divided by denom are + ///< algebra elements in the usual basis) +} quat_lattice_t; + +/** @brief Type for left ideals of maximal orders in quaternion algebras + * + * @typedef quat_left_ideal_t + * + * @struct quat_left_ideal + * + * The basis of the lattice representing it is such that its columns divided by its denominator are + * elements of the quaternion algebra, represented in basis (1,i,j,ij) where i^2 = -1, j^2 = -p. + */ +typedef struct quat_left_ideal +{ + quat_lattice_t lattice; ///< lattice representing the ideal + ibz_t norm; ///< norm of the lattice + const quat_lattice_t *parent_order; ///< should be a maximal order +} quat_left_ideal_t; +/** @} + */ + +/** @brief Type for extremal maximal orders + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + * The basis of the order representing it is in hermite normal form, and its columns divid +ed by its denominator are elements of the quaternion algebra, represented in basis (1,z,t, +tz) where z^2 = -q, t^2 = -p. +*/ +typedef struct quat_p_extremal_maximal_order +{ + quat_lattice_t order; ///< the order represented as a lattice + quat_alg_elem_t z; ///< the element of small discriminant + quat_alg_elem_t t; ///< the element of norm p orthogonal to z + uint32_t q; ///< the absolute value of the square of z +} quat_p_extremal_maximal_order_t; + +/** @brief Type for represent integer parameters + * + * @typedef quat_p_extremal_maximal_order_t + * + * @struct quat_p_extremal_maximal_order + * + */ +typedef struct quat_represent_integer_params +{ + int primality_test_iterations; ///< Primality test iterations + const quat_p_extremal_maximal_order_t *order; ///< The standard extremal maximal order + const quat_alg_t *algebra; ///< The quaternion algebra +} quat_represent_integer_params_t; + +/*************************** Functions *****************************/ + +/** @defgroup quat_c Constructors and Destructors + * @{ + */ +void quat_alg_init_set(quat_alg_t *alg, const ibz_t *p); +void quat_alg_finalize(quat_alg_t *alg); + +void quat_alg_elem_init(quat_alg_elem_t *elem); +void quat_alg_elem_finalize(quat_alg_elem_t *elem); + +void ibz_vec_2_init(ibz_vec_2_t *vec); +void ibz_vec_2_finalize(ibz_vec_2_t *vec); + +void ibz_vec_4_init(ibz_vec_4_t *vec); +void ibz_vec_4_finalize(ibz_vec_4_t *vec); + +void ibz_mat_2x2_init(ibz_mat_2x2_t *mat); +void ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat); + +void ibz_mat_4x4_init(ibz_mat_4x4_t *mat); +void ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat); + +void quat_lattice_init(quat_lattice_t *lat); +void quat_lattice_finalize(quat_lattice_t *lat); + +void quat_left_ideal_init(quat_left_ideal_t *lideal); +void quat_left_ideal_finalize(quat_left_ideal_t *lideal); +/** @} + */ + +/** @defgroup quat_printers Print functions for types from the quaternion module + * @{ + */ +void ibz_mat_2x2_print(const ibz_mat_2x2_t *mat); +void ibz_mat_4x4_print(const ibz_mat_4x4_t *mat); +void ibz_vec_2_print(const ibz_vec_2_t *vec); +void ibz_vec_4_print(const ibz_vec_4_t *vec); + +void quat_lattice_print(const quat_lattice_t *lat); +void quat_alg_print(const quat_alg_t *alg); +void quat_alg_elem_print(const quat_alg_elem_t *elem); +void quat_left_ideal_print(const quat_left_ideal_t *lideal); + +/** @} + */ + +/** @defgroup quat_int Integer functions for quaternion algebra + * @{ + */ + +/** @defgroup quat_int_mat Integer matrix and vector functions + * @{ + */ + +/** @brief Copy matrix + * + * @param copy Output: Matrix into which copied will be copied + * @param copied + */ +void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied); + +/** + * @brief Inverse of 2x2 integer matrices modulo m + * + * @param inv Output matrix + * @param mat Input matrix + * @param m Integer modulo + * @return 1 if inverse exists 0 otherwise + */ +int ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m); + +/** @brief mat*vec in dimension 2 for integers + * + * @param res Output vector + * @param mat Input vector + * @param vec Input vector + */ +void ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, + const ibz_mat_4x4_t *mat); // dim4, lattice, test/dim4, ideal + +/** @brief transpose a 4x4 integer matrix + * + * @param transposed Output: is set to the transposition of mat + * @param mat Input matrix + */ +void ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat); + +/** @brief a*b for a,b integer 4x4 matrices + * + * Naive implementation + * + * @param res Output: A 4x4 integer matrix + * @param a + * @param b + */ +void ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b); + +/** @brief divides all values in matrix by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param mat + */ +int ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** + * @brief mat*vec + * + * + * @param res Output: coordinate vector + * @param mat Integer 4x4 matrix + * @param vec Integer vector (coordinate vector) + * + * Multiplies 4x4 integer matrix mat by a 4-integers column vector vec + */ +void ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec); + +/** + * @brief vec*mat + * + * + * @param res Output: coordinate vector. + * @param vec Integer vector (coordinate vector) + * @param mat Integer 4x4 matrix + * + * Multiplies 4x4 integer matrix mat by a 4-integers row vector vec (on the left) + */ +void ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @defgroup quat_integer Higher-level integer functions for quaternion algebra + * @{ + */ + +/** + * @brief Generates a random prime + * + * A number is accepted as prime if it passes a 30-round Miller-Rabin test. + * This function is fairly inefficient and mostly meant for tests. + * + * @returns 1 if a prime is found, 0 otherwise + * @param p Output: The prime (if found) + * @param is3mod4 If 1, the prime is required to be 3 mod 4, if 0 no congruence condition is imposed + * @param bitsize Maximal size of output prime + * @param probability_test_iterations Miller-Rabin iteartions for probabilistic primality testing in + * rejection sampling + */ +int ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations); + +/** + * @brief Find integers x and y such that x^2 + n*y^2 = p + * + * Uses Cornacchia's algorithm, should be used only for prime p + * + * @param x Output + * @param y Output + * @param n first parameter defining the equation + * @param p seond parameter defining the equation, must be prime + * @return 1 if success, 0 otherwise + */ +int ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p); + +/** @} + */ + +/** @defgroup quat_qf Quadratic form functions + * @{ + */ + +/** + * @brief Quadratic form evaluation + * + * qf and coord must be represented in the same basis. + * + * @param res Output: coordinate vector + * @param qf Quadratic form (4x4 integer matrix) + * @param coord Integer vector (coordinate vector) + */ +void quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord); +/** @} + */ + +/** @} + */ + +/** @defgroup quat_quat_f Quaternion algebra functions + * @{ + */ +/** + * @brief Copies an algebra element + * + * @param copy Output: The element into which another one is copied + * @param copied Source element copied into copy + */ +void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied); + +void quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg); + +/** @brief reduced norm of alg_elem x + * + * @param res_num Output: rational which will contain the numerator of the reduced norm of a + * @param res_denom Output: rational which will contain the denominator of the reduced norm of a (it + * is 1 if the norm is integer) + * @param x Algebra element whose norm is computed + * @param alg The quaternion algebra + */ +void quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *x, const quat_alg_t *alg); + +/** @brief Normalize representation of alg_elem x + * + * @param x Algebra element whose representation will be normalized + * + * Modification of x. + * Sets coord and denom of x so that gcd(denom, content(coord))=1 + * without changing the value of x = (coord0/denom, coord1/denom, coord2/denom, coord3/denom). + */ +void quat_alg_normalize(quat_alg_elem_t *x); + +/** + * @brief Standard involution in a quaternion algebra + * + * @param conj Output: image of x by standard involution of the quaternion algebra alg + * @param x element of alg whose image is searched + */ +void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x); + +/** + * @brief Given `x` ∈ `order`, factor it into its primitive and impritive parts + * + * Given `x` ∈ `order`, return a coordinate vector `primitive_x` and an integer `content` + * such that `x` = `content` · Λ `primitive_x`, where Λ is the basis of `order` + * and `x` / `content` is primitive in `order`. + * + * @param primitive_x Output: coordinates of a primitive element of `order` (in `order`'s basis) + * @param content Output: content of `x`'s coordinate vector in order's basis + * @param order order of `alg` + * @param x element of order, must be in `order` + */ +void quat_alg_make_primitive(ibz_vec_4_t *primitive_x, + ibz_t *content, + const quat_alg_elem_t *x, + const quat_lattice_t *order); + +// end quat_quat_f +/** @} + */ + +/** @defgroup quat_lat_f Lattice functions + * @{ + */ + +void quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2); + +/** + * @brief Test whether x ∈ lat. If so, compute its coordinates in lat's basis. + * + * @param coord Output: Set to the coordinates of x in lat. May be NULL. + * @param lat The lattice, not necessarily in HNF but full rank + * @param x An element of the quaternion algebra + * @return true if x ∈ lat + */ +int quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x); + +/** + * @brief Conjugate of a lattice with basis not in HNF + * + * @param conj Output: The lattice conjugate to lat. ATTENTION: is not under HNF + * @param lat Input lattice + */ +void quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat); + +/** + * @brief Multiply a lattice and an algebra element + * + * The element is multiplied to the right of the lattice + * + * @param prod Output: Lattice lat*elem + * @param lat Input lattice + * @param elem Algebra element + * @param alg The quaternion algebra + */ +void quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg); // ideal + +/** + * @brief Sample from the intersection of a lattice with a ball + * + * Sample a uniform non-zero vector of norm ≤ `radius` from the lattice. + * + * @param res Output: sampled quaternion from the lattice + * @param lattice Input lattice + * @param alg The quaternion algebra + * @param radius The ball radius (quaternion norm) + * @return 0 if an error occurred (ball too small or RNG error), 1 otherwise + */ +int quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius); + +// end quat_lat_f +/** @} + */ + +/** @defgroup quat_lideal_f Functions for left ideals + * @{ + */ + +/** @defgroup quat_lideal_c Creating left ideals + * @{ + */ + +/** + * @brief Left ideal of order, generated by x and N as order*x+order*N + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element. Must be non-zero + * @param N generating integer + * + * Creates the left ideal in order generated by the element x and the integer N. + * If x is not divisible (inside the order) by any integer divisor n>1 of N, + * then the norm of the output ideal is N. + * + */ +void quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg); + +/** @} + */ + +/** @defgroup quat_lideal_gen Generators of left ideals + * @{ + */ + +/** + * @brief Generator of 'lideal' + * + * @returns 1 if such a generator was found, 0 otherwise + * @param gen Output: non scalar generator of lideal + * @param lideal left ideal + * @param alg the quaternion algebra + * + * Ideal is generated by gen and the ideal's norm + * + * Bound has as default value QUATERNION_lideal_generator_search_bound + */ +int quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg); +/** @} + */ + +/** @defgroup quat_lideal_op Operations on left ideals + * @{ + */ + +/** + * @brief Copies an ideal + * + * @param copy Output: The ideal into which another one is copied + * @param copied Source ideal copied into copy. The parent order is not copied (only the pointer). + */ +void quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied); + +/** + * @brief Conjugate of a left ideal (not in HNF) + * + * @param conj Output: Ideal conjugate to lideal, with norm and parent order correctly set, but its + * lattice not in HNF + * @param new_parent_order Output: Will be set to the right order of lideal, and serve as parent + * order for conj (so must have at least the lifetime of conj) + * @param lideal input left ideal (of which conj will be the conjugate) + * @param alg the quaternion algebra + */ +void quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); + +/** + * @brief Intersection of two left ideals + * + * @param intersection Output: Left ideal which is the intersection of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_inter(quat_left_ideal_t *intersection, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief L2-reduce the basis of the left ideal, without considering its denominator + * + * This function reduce the basis of the lattice of the ideal, but it does completely ignore its + * denominator. So the outputs of this function must still e divided by the appropriate power of + * lideal.lattice.denom. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param reduced Output: Lattice defining the ideal, which has its basis in a lll-reduced form. + * Must be divided by lideal.lattice.denom before usage + * @param gram Output: Matrix of the quadratic form given by the norm on the basis of the reduced + * ideal, divided by the norm of the ideal + * @param lideal ideal whose basis will be reduced + * @param alg the quaternion algebra + */ +void quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // replaces lideal_lll + +/** + * @brief Multplies two ideals and L2-reduces the lattice of the result + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param prod Output: The product ideal with its lattice basis being L2-reduced + * @param gram Output: Gram matrix of the reduced norm (as quadratic but not bilinear form) on the + * basis of prod, divided by the norm of prod + * @param lideal1 Ideal at left in the product + * @param lideal2 Ideal at right in the product + * @param alg The quaternion algebra + */ +void quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Replaces an ideal by a smaller equivalent one of prime norm + * + * @returns 1 if the computation succeeded and 0 otherwise + * @param lideal In- and Output: Ideal to be replaced + * @param alg The quaternion algebra + * @param primality_num_iter number of repetition for primality testing + * @param equiv_bound_coeff bound on the coefficients for the candidates + */ +int quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff); + +/** @} + */ + +// end quat_lideal_f +/** @} + */ + +/** @defgroup quat_normeq Functions specific to special extremal maximal orders + * @{ + */ + +/** + * @brief Representing an integer by the quadratic norm form of a maximal extremal order + * + * @returns 1 if the computation succeeded + * @param gamma Output: a quaternion element + * @param n_gamma Target norm of gamma. n_gamma must be odd. If n_gamma/(p*params.order->q) < + * 2^QUAT_repres_bound_input failure is likely + * @param non_diag If set to 1 (instead of 0) and the order is O0, an additional property is ensured + * @param params Represent integer parameters specifying the algebra, the special extremal order, + * the number of trials for finding gamma and the number of iterations of the primality test. + * Special requirements apply if non-diag is set to 1 + * + * This algorithm finds a primitive quaternion element gamma of n_gamma inside any maximal extremal + * order. Failure is possible. Most efficient for the standard order. + * + * If non-diag is set to 1,this algorithm finds a primitive quaternion element gamma with some + * special properties used in fixed degree isogeny of n_gamma inside any maximal extremal order such + * that params->order->q=1 mod 4. Failure is possible. Most efficient for the standard order. The + * most important property is to avoid diagonal isogenies, meaning that the gamma returned by the + * algorithm must not be contained inside ZZ + 2 O where O is the maximal order params->order When O + * is the special order O0 corresponding to j=1728, we further need to avoid endomorphisms of E0xE0 + * and there is another requirement + * + * If non-diag is set to 1, the number of trials for finding gamma (in params), the number of + * iterations of the primality test and the value of params->order->q is required to be 1 mod 4 + */ +int quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params); + +/** @brief Basis change to (1,i,(i+j)/2,(1+ij)/2) for elements of O0 + * + * Change the basis in which an element is give from 1,i,j,ij to (1,i,(i+j)/2,(1+ij)/2) the ususal + * basis of the special maximal order O0 Only for elements of O0 + * + * @param vec Output: Coordinates of el in basis (1,i,(i+j)/2,(1+ij)/2) + * @param el Imput: An algebra element in O0 + */ +void quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el); + +/** + * @brief Random O0-ideal of given norm + * + * Much faster if norm is prime and is_prime is set to 1 + * + * @param lideal Output: O0-ideal of norm norm + * @param norm Norm of the ideal to be found + * @param is_prime Indicates if norm is prime: 1 if it is, 0 otherwise + * @param params Represent Integer parameters from the level-dependent constants + * @param prime_cofactor Prime distinct from the prime p defining the algebra but of similar size + * and coprime to norm. If is_prime is 1, it might be NULL. + * @returns 1 if success, 0 if no ideal found or randomness failed + */ +int quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor); +// end quat_normeq +/** @} + */ +// end quat_quat +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_constants.h new file mode 100644 index 0000000000..a2f4b52b93 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_constants.h @@ -0,0 +1,6 @@ +#include +#define QUAT_primality_num_iter 32 +#define QUAT_repres_bound_input 21 +#define QUAT_equiv_bound_coeff 64 +#define FINDUV_box_size 3 +#define FINDUV_cube_size 2400 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.c new file mode 100644 index 0000000000..98b792431a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.c @@ -0,0 +1,3176 @@ +#include +#include +#include +const ibz_t QUAT_prime_cofactor = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x200}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x200000000000000}}} +#endif +; +const quat_alg_t QUATALG_PINFTY = { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x1af}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x1afffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x1afffffffffffff}}} +#endif +}; +const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 1}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2f6d,0xbfbd,0x6af0,0xbcd3,0x5c61,0x8f62,0x9b0b,0xd78a,0x3142,0x61aa,0x4716,0x208,0x93c7,0x43bd,0x97d6,0xda1a,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xd7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbfbd2f6d,0xbcd36af0,0x8f625c61,0xd78a9b0b,0x61aa3142,0x2084716,0x43bd93c7,0xda1a97d6,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xd7ffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbcd36af0bfbd2f6d,0xd78a9b0b8f625c61,0x208471661aa3142,0xda1a97d643bd93c7,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xd7ffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9add,0x156b,0x8705,0x6bb9,0x8bdf,0xd034,0x21a6,0xb827,0x44e9,0x34c7,0x3da3,0xa9fd,0xcebd,0x3ec0,0xcd63,0xca1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x156b9add,0x6bb98705,0xd0348bdf,0xb82721a6,0x34c744e9,0xa9fd3da3,0x3ec0cebd,0xca1cd63}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6bb98705156b9add,0xb82721a6d0348bdf,0xa9fd3da334c744e9,0xca1cd633ec0cebd}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 5}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1f45,0x5630,0xd526,0x9cc7,0x1aab,0x114d,0x87b3,0xbb27,0xc6b6,0xe50,0x8bb4,0x813f,0xff7a,0xf810,0xa8d3,0x66ee,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56301f45,0x9cc7d526,0x114d1aab,0xbb2787b3,0xe50c6b6,0x813f8bb4,0xf810ff7a,0x66eea8d3,0xfffffffc,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9cc7d52656301f45,0xbb2787b3114d1aab,0x813f8bb40e50c6b6,0x66eea8d3f810ff7a,0xfffffffffffffffc,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x233f,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38d9233f,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d9233f,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 37}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x3b03,0xe541,0x6454,0x6f9,0x3808,0xb93,0x7509,0x2b52,0xed1,0xf4fe,0x8961,0x4869,0x4671,0xdd21,0x4c4c,0x70b0,0xfff9,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe5413b03,0x6f96454,0xb933808,0x2b527509,0xf4fe0ed1,0x48698961,0xdd214671,0x70b04c4c,0xfffffff9,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6f96454e5413b03,0x2b5275090b933808,0x48698961f4fe0ed1,0x70b04c4cdd214671,0xfffffffffffffff9,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe953,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf5ace953,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace953,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 61}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x7013,0x423f,0x42b7,0x3f3d,0x82a,0x9883,0x52bf,0xfede,0x8018,0xa449,0xf571,0xb8a,0x3139,0xbe7,0x439d,0x9e1f,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd8}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x423f7013,0x3f3d42b7,0x9883082a,0xfede52bf,0xa4498018,0xb8af571,0xbe73139,0x9e1f439d,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0xd80000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3f3d42b7423f7013,0xfede52bf9883082a,0xb8af571a4498018,0x9e1f439d0be73139,0x2,0x0,0x0,0xd8000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca2d,0x34af,0xea29,0x177b,0x91ed,0x86ca,0x588a,0xe94d,0x55df,0x4621,0xa1e4,0x67d7,0xb617,0x6a1,0x88f5,0x87b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x34afca2d,0x177bea29,0x86ca91ed,0xe94d588a,0x462155df,0x67d7a1e4,0x6a1b617,0x87b88f5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x177bea2934afca2d,0xe94d588a86ca91ed,0x67d7a1e4462155df,0x87b88f506a1b617}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 97}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8f1a,0x6fa2,0xe7d3,0x5101,0xf0c5,0x6c62,0xea1,0x3d18,0x4367,0xbd09,0xfc99,0x3a0e,0x35a4,0xf247,0x5fb2,0xc2a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6fa28f1a,0x5101e7d3,0x6c62f0c5,0x3d180ea1,0xbd094367,0x3a0efc99,0xf24735a4,0xc2a5fb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5101e7d36fa28f1a,0x3d180ea16c62f0c5,0x3a0efc99bd094367,0xc2a5fb2f24735a4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf03,0xa322,0x8523,0x93d,0x3bc,0x4b50,0x33ca,0x56b5,0x863f,0x87c8,0x38bf,0x98c6,0x8ce8,0xfab7,0xfc02,0x78ed}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa322cf03,0x93d8523,0x4b5003bc,0x56b533ca,0x87c8863f,0x98c638bf,0xfab78ce8,0x78edfc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x93d8523a322cf03,0x56b533ca4b5003bc,0x98c638bf87c8863f,0x78edfc02fab78ce8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x24ed,0x1400,0x74a1,0x1310,0xce8a,0x1c0d,0x512a,0x3500,0x2451,0x6992,0x892c,0x3cdb,0x45d8,0x520,0x420,0xf11f,0x6cb,0xbe4d,0xd06c,0xcbe4,0x4d06,0x6cbe,0xe4d0,0x6cb,0xbe4d,0xd06c,0xcbe4,0x4d06,0x6cbe,0xe4d0,0x6cb,0x15}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x140024ed,0x131074a1,0x1c0dce8a,0x3500512a,0x69922451,0x3cdb892c,0x52045d8,0xf11f0420,0xbe4d06cb,0xcbe4d06c,0x6cbe4d06,0x6cbe4d0,0xd06cbe4d,0x4d06cbe4,0xe4d06cbe,0x1506cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x131074a1140024ed,0x3500512a1c0dce8a,0x3cdb892c69922451,0xf11f0420052045d8,0xcbe4d06cbe4d06cb,0x6cbe4d06cbe4d06,0x4d06cbe4d06cbe4d,0x1506cbe4d06cbe}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8f1a,0x6fa2,0xe7d3,0x5101,0xf0c5,0x6c62,0xea1,0x3d18,0x4367,0xbd09,0xfc99,0x3a0e,0x35a4,0xf247,0x5fb2,0xc2a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6fa28f1a,0x5101e7d3,0x6c62f0c5,0x3d180ea1,0xbd094367,0x3a0efc99,0xf24735a4,0xc2a5fb2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5101e7d36fa28f1a,0x3d180ea16c62f0c5,0x3a0efc99bd094367,0xc2a5fb2f24735a4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x98b3,0xd2e,0x314c,0x5199,0x7a5a,0xb592,0xbd65,0x1ef7,0x7d32,0x94fd,0x6cfe,0x68e3,0xcda6,0x8d91,0xfb73,0x88}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd2e98b3,0x5199314c,0xb5927a5a,0x1ef7bd65,0x94fd7d32,0x68e36cfe,0x8d91cda6,0x88fb73}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5199314c0d2e98b3,0x1ef7bd65b5927a5a,0x68e36cfe94fd7d32,0x88fb738d91cda6}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf03,0xa322,0x8523,0x93d,0x3bc,0x4b50,0x33ca,0x56b5,0x863f,0x87c8,0x38bf,0x98c6,0x8ce8,0xfab7,0xfc02,0x78ed}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa322cf03,0x93d8523,0x4b5003bc,0x56b533ca,0x87c8863f,0x98c638bf,0xfab78ce8,0x78edfc02}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x93d8523a322cf03,0x56b533ca4b5003bc,0x98c638bf87c8863f,0x78edfc02fab78ce8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 113}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x9c90,0x5de8,0xf815,0x67c5,0x989,0xc9,0x7c9e,0x180b,0x526d,0xdf5a,0x3386,0xea88,0x580a,0x24c5,0x5507,0x3bad,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x438}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x5de89c90,0x67c5f815,0xc90989,0x180b7c9e,0xdf5a526d,0xea883386,0x24c5580a,0x3bad5507,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x4380000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x67c5f8155de89c90,0x180b7c9e00c90989,0xea883386df5a526d,0x3bad550724c5580a,0x10,0x0,0x0,0x438000000000000}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa1f8,0x1530,0xa6be,0x126c,0xfd3b,0xbdd9,0xb3bc,0x8495,0x5457,0x1985,0xcfae,0xf440,0x4ea6,0x84ba,0x6881,0x2eb1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1530a1f8,0x126ca6be,0xbdd9fd3b,0x8495b3bc,0x19855457,0xf440cfae,0x84ba4ea6,0x2eb16881}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x126ca6be1530a1f8,0x8495b3bcbdd9fd3b,0xf440cfae19855457,0x2eb1688184ba4ea6}}} +#endif +}}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, 149}}; +const quat_left_ideal_t CONNECTING_IDEALS[7] = {{{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdb03,0x2777,0xbc36,0x4be5,0x38dd,0xd474,0x83b4,0x41a7,0x5426,0xa361,0x1f00,0xc617,0xe350,0x8cb4,0x2b1c,0xaa2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2777db03,0x4be5bc36,0xd47438dd,0x41a783b4,0xa3615426,0xc6171f00,0x8cb4e350,0xaa22b1c}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4be5bc362777db03,0x41a783b4d47438dd,0xc6171f00a3615426,0xaa22b1c8cb4e350}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd9c7,0x9715,0x12ad,0x4a84,0xd0ee,0xb276,0x7344,0xf5a4,0xda41,0x2e90,0x1415,0xe548,0x3eb7,0x1d14,0x3d52,0x1a9f}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9715d9c7,0x4a8412ad,0xb276d0ee,0xf5a47344,0x2e90da41,0xe5481415,0x1d143eb7,0x1a9f3d52}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4a8412ad9715d9c7,0xf5a47344b276d0ee,0xe54814152e90da41,0x1a9f3d521d143eb7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda65,0xdf46,0xe771,0xcb34,0x84e5,0xc375,0xfb7c,0x1ba5,0x1734,0xe8f9,0x998a,0x55af,0x9104,0x54e4,0xb437,0x12a0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf46da65,0xcb34e771,0xc37584e5,0x1ba5fb7c,0xe8f91734,0x55af998a,0x54e49104,0x12a0b437}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcb34e771df46da65,0x1ba5fb7cc37584e5,0x55af998ae8f91734,0x12a0b43754e49104}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e7d,0xd8b2,0x8be,0xf2e3,0x7c3e,0x1572,0x7609,0xf4ae,0x8366,0xb93e,0x53ec,0x9b03,0x6573,0xae18,0x41b0,0x707}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd8b26e7d,0xf2e308be,0x15727c3e,0xf4ae7609,0xb93e8366,0x9b0353ec,0xae186573,0x70741b0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf2e308bed8b26e7d,0xf4ae760915727c3e,0x9b0353ecb93e8366,0x70741b0ae186573}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1595,0x819b,0xe0c3,0x8b65,0xe55f,0x5790,0xb373,0x30e9,0xe798,0x6bc0,0x74b1,0xb6c5,0xa184,0xbb4c,0x3cca,0xcd7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x819b1595,0x8b65e0c3,0x5790e55f,0x30e9b373,0x6bc0e798,0xb6c574b1,0xbb4ca184,0xcd73cca}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8b65e0c3819b1595,0x30e9b3735790e55f,0xb6c574b16bc0e798,0xcd73ccabb4ca184}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc209,0x2d26,0x74c1,0x3f24,0xb0cf,0x3681,0x14be,0x92cc,0xb57f,0x127f,0x644f,0x28e4,0x837c,0xb4b2,0x3f3d,0x9ef}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d26c209,0x3f2474c1,0x3681b0cf,0x92cc14be,0x127fb57f,0x28e4644f,0xb4b2837c,0x9ef3f3d}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3f2474c12d26c209,0x92cc14be3681b0cf,0x28e4644f127fb57f,0x9ef3f3db4b2837c}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9427,0xa69c,0xda24,0xb3a7,0x4f9a,0x22fc,0xa39a,0xcb05,0xd93e,0x923d,0xb97d,0xad95,0x3374,0x96bd,0xbdeb,0x51}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa69c9427,0xb3a7da24,0x22fc4f9a,0xcb05a39a,0x923dd93e,0xad95b97d,0x96bd3374,0x51bdeb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3a7da24a69c9427,0xcb05a39a22fc4f9a,0xad95b97d923dd93e,0x51bdeb96bd3374}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1f4f,0xcff8,0x8a18,0x405f,0xbfc2,0x4b46,0x2fab,0x911a,0x1385,0xe540,0x5687,0x7768,0x556f,0xbcad,0x9e99,0xdb7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcff81f4f,0x405f8a18,0x4b46bfc2,0x911a2fab,0xe5401385,0x77685687,0xbcad556f,0xdb79e99}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x405f8a18cff81f4f,0x911a2fab4b46bfc2,0x77685687e5401385,0xdb79e99bcad556f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x59bb,0xbb4a,0xb21e,0x7a03,0x87ae,0xb721,0xe9a2,0x2e0f,0xf662,0xbbbe,0x802,0x127f,0x4472,0xa9b5,0xae42,0x704}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbb4a59bb,0x7a03b21e,0xb72187ae,0x2e0fe9a2,0xbbbef662,0x127f0802,0xa9b54472,0x704ae42}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7a03b21ebb4a59bb,0x2e0fe9a2b72187ae,0x127f0802bbbef662,0x704ae42a9b54472}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa3e3,0x12fb,0x32f3,0xb40f,0x4bbe,0x537d,0xbefc,0xdda9,0x8954,0xaca9,0xaaf3,0xc020,0x17da,0xf48f,0x88fd,0x21a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x12fba3e3,0xb40f32f3,0x537d4bbe,0xdda9befc,0xaca98954,0xc020aaf3,0xf48f17da,0x21a88fd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb40f32f312fba3e3,0xdda9befc537d4bbe,0xc020aaf3aca98954,0x21a88fdf48f17da}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xb938,0xecc6,0xa73e,0x1f10,0xfb92,0xfc6b,0x4373,0x1c26,0x1cb,0x5c8f,0xe4f1,0xbf81,0xc0e7,0xd1f7,0x9e1a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb938d647,0xa73eecc6,0xfb921f10,0x4373fc6b,0x1cb1c26,0xe4f15c8f,0xc0e7bf81,0x9e1ad1f7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa73eecc6b938d647,0x4373fc6bfb921f10,0xe4f15c8f01cb1c26,0x9e1ad1f7c0e7bf81}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d15,0xe61a,0xfdc,0xada7,0xb567,0x2787,0xddb4,0x908e,0x52bd,0x573a,0x3c1,0x5289,0x6bae,0xdabb,0xad7a,0x501a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe61a3d15,0xada70fdc,0x2787b567,0x908eddb4,0x573a52bd,0x528903c1,0xdabb6bae,0x501aad7a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xada70fdce61a3d15,0x908eddb42787b567,0x528903c1573a52bd,0x501aad7adabb6bae}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc0eb,0xf94,0x78d,0x1b2f,0x47a5,0xcae4,0x9c58,0xc3f8,0x5cff,0xce65,0xc11c,0x8e58,0x387,0xc7ef,0x2f9f,0x12df}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94c0eb,0x1b2f078d,0xcae447a5,0xc3f89c58,0xce655cff,0x8e58c11c,0xc7ef0387,0x12df2f9f}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b2f078d0f94c0eb,0xc3f89c58cae447a5,0x8e58c11cce655cff,0x12df2f9fc7ef0387}}} +#endif +, &MAXORD_O0}, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9203,0x57ee,0x3867,0xdf50,0xd8ad,0xbe9c,0x9e30,0x7a77,0xcd0f,0x77d9,0xbb7f,0x65f1,0x1b16,0xbbf5,0xe5c0,0x2563}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57ee9203,0xdf503867,0xbe9cd8ad,0x7a779e30,0x77d9cd0f,0x65f1bb7f,0xbbf51b16,0x2563e5c0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf50386757ee9203,0x7a779e30be9cd8ad,0x65f1bb7f77d9cd0f,0x2563e5c0bbf51b16}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc883,0xbf3a,0x5485,0xa330,0xfbe1,0x5f72,0xc008,0xaa3b,0xa7aa,0x2aba,0x1e74,0xe83d,0x71aa,0x3276,0x2812,0xb15}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbf3ac883,0xa3305485,0x5f72fbe1,0xaa3bc008,0x2abaa7aa,0xe83d1e74,0x327671aa,0xb152812}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3305485bf3ac883,0xaa3bc0085f72fbe1,0xe83d1e742abaa7aa,0xb152812327671aa}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}}, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad43,0x8b94,0x4676,0xc140,0xea47,0x8f07,0xaf1c,0x1259,0x3a5d,0xd14a,0x6cf9,0xa717,0xc660,0x7735,0x86e9,0x183c}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b94ad43,0xc1404676,0x8f07ea47,0x1259af1c,0xd14a3a5d,0xa7176cf9,0x7735c660,0x183c86e9}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc14046768b94ad43,0x1259af1c8f07ea47,0xa7176cf9d14a3a5d,0x183c86e97735c660}}} +#endif +, &MAXORD_O0}}; +const quat_alg_elem_t CONJUGATING_ELEMENTS[7] = {{ +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +#endif +}}, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +#endif +, { +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#endif +, +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +#endif +}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.h new file mode 100644 index 0000000000..a5eb1106e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.h @@ -0,0 +1,12 @@ +#include +#define MAXORD_O0 (EXTREMAL_ORDERS->order) +#define STANDARD_EXTREMAL_ORDER (EXTREMAL_ORDERS[0]) +#define NUM_ALTERNATE_EXTREMAL_ORDERS 6 +#define ALTERNATE_EXTREMAL_ORDERS (EXTREMAL_ORDERS+1) +#define ALTERNATE_CONNECTING_IDEALS (CONNECTING_IDEALS+1) +#define ALTERNATE_CONJUGATING_ELEMENTS (CONJUGATING_ELEMENTS+1) +extern const ibz_t QUAT_prime_cofactor; +extern const quat_alg_t QUATALG_PINFTY; +extern const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7]; +extern const quat_left_ideal_t CONNECTING_IDEALS[7]; +extern const quat_alg_elem_t CONJUGATING_ELEMENTS[7]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c new file mode 100644 index 0000000000..372cc0de81 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: Apache-2.0 and Unknown +// +/* +NIST-developed software is provided by NIST as a public service. You may use, +copy, and distribute copies of the software in any medium, provided that you +keep intact this entire notice. You may improve, modify, and create derivative +works of the software or any portion of the software, and you may copy and +distribute such modifications or works. Modified works should carry a notice +stating that you changed the software and should note the date and nature of any +such change. Please explicitly acknowledge the National Institute of Standards +and Technology as the source of the software. + +NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF +ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, +WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS +NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR +ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE +ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, +INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR +USEFULNESS OF THE SOFTWARE. + +You are solely responsible for determining the appropriateness of using and +distributing the software and you assume all risks associated with its use, +including but not limited to the risks and costs of program errors, compliance +with applicable laws, damage to or loss of data, programs or equipment, and the +unavailability or interruption of operation. This software is not intended to be +used in any situation where a failure could cause risk of injury or damage to +property. The software developed by NIST employees is not subject to copyright +protection within the United States. +*/ + +#include +#include + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +#define RNG_SUCCESS 0 +#define RNG_BAD_MAXLEN -1 +#define RNG_BAD_OUTBUF -2 +#define RNG_BAD_REQ_LEN -3 + +static inline void AES256_ECB(const unsigned char *key, + const unsigned char *ctr, unsigned char *buffer) { + AES_ECB_encrypt(ctr, key, buffer); +} + +typedef struct { + unsigned char Key[32]; + unsigned char V[16]; + int reseed_counter; +} AES256_CTR_DRBG_struct; + +void AES256_CTR_DRBG_Update(const unsigned char *provided_data, + unsigned char *Key, unsigned char *V); + +AES256_CTR_DRBG_struct DRBG_ctx; + +#ifndef CTRDRBG_TEST_BENCH +static +#endif + void + randombytes_init_nist(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + unsigned char seed_material[48]; + + (void)security_strength; // Unused parameter + memcpy(seed_material, entropy_input, 48); + if (personalization_string) + for (int i = 0; i < 48; i++) { + seed_material[i] ^= personalization_string[i]; + } + memset(DRBG_ctx.Key, 0x00, 32); + memset(DRBG_ctx.V, 0x00, 16); + AES256_CTR_DRBG_Update(seed_material, DRBG_ctx.Key, DRBG_ctx.V); + DRBG_ctx.reseed_counter = 1; +} + +#ifndef CTRDRBG_TEST_BENCH +static +#endif + int + randombytes_nist(unsigned char *x, size_t xlen) { + unsigned char block[16]; + size_t i = 0; + + while (xlen > 0) { + // increment V + for (int j = 15; j >= 0; j--) { + if (DRBG_ctx.V[j] == 0xff) { + DRBG_ctx.V[j] = 0x00; + } else { + DRBG_ctx.V[j]++; + break; + } + } + AES256_ECB(DRBG_ctx.Key, DRBG_ctx.V, block); + if (xlen > 15) { + memcpy(x + i, block, 16); + i += 16; + xlen -= 16; + } else { + memcpy(x + i, block, xlen); + i += xlen; + xlen = 0; + } + } + AES256_CTR_DRBG_Update(NULL, DRBG_ctx.Key, DRBG_ctx.V); + DRBG_ctx.reseed_counter++; + + return 0; +} + +void AES256_CTR_DRBG_Update(const unsigned char *provided_data, + unsigned char *Key, unsigned char *V) { + unsigned char temp[48]; + + for (int i = 0; i < 3; i++) { + // increment V + for (int j = 15; j >= 0; j--) { + if (V[j] == 0xff) { + V[j] = 0x00; + } else { + V[j]++; + break; + } + } + + AES256_ECB(Key, V, temp + 16 * i); + } + if (provided_data != NULL) + for (int i = 0; i < 48; i++) { + temp[i] ^= provided_data[i]; + } + memcpy(Key, temp, 32); + memcpy(V, temp + 32, 16); +} + +#ifdef RANDOMBYTES_C +SQISIGN_API +int randombytes(unsigned char *random_array, unsigned long long nbytes) { + int ret = randombytes_nist(random_array, nbytes); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); +#endif + return ret; +} + +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) { + randombytes_init_nist(entropy_input, personalization_string, + security_strength); +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_system.c new file mode 100644 index 0000000000..689c29b242 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_system.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: MIT + +/* +The MIT License +Copyright (c) 2017 Daan Sprenkels +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +#include + +#ifdef ENABLE_CT_TESTING +#include +#endif + +// In the case that are compiling on linux, we need to define _GNU_SOURCE +// *before* randombytes.h is included. Otherwise SYS_getrandom will not be +// declared. +#if defined(__linux__) || defined(__GNU__) +#define _GNU_SOURCE +#endif /* defined(__linux__) || defined(__GNU__) */ + +#if defined(_WIN32) +/* Windows */ +#include +#include /* CryptAcquireContext, CryptGenRandom */ +#endif /* defined(_WIN32) */ + +/* wasi */ +#if defined(__wasi__) +#include +#endif + +/* kFreeBSD */ +#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) +#define GNU_KFREEBSD +#endif + +#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +/* Linux */ +// We would need to include , but not every target has access +// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. +// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the +// linux repo. +#define RNDGETENTCNT 0x80045200 + +#include +#include +#include +#include +#include +#include +#include +#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ + ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) +#define USE_GLIBC +#include +#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ + (__GLIBC_MINOR__ > 24)) */ +#include +#include +#include +#include + +// We need SSIZE_MAX as the maximum read len from /dev/urandom +#if !defined(SSIZE_MAX) +#define SSIZE_MAX (SIZE_MAX / 2 - 1) +#endif /* defined(SSIZE_MAX) */ + +#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ + +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ +#include +#if defined(BSD) +#include +#endif +/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ +#if defined(__GNU__) +#undef BSD +#endif +#endif + +#if defined(__EMSCRIPTEN__) +#include +#include +#include +#include +#endif /* defined(__EMSCRIPTEN__) */ + +#if defined(_WIN32) +static int +randombytes_win32_randombytes(void *buf, size_t n) +{ + HCRYPTPROV ctx; + BOOL tmp; + DWORD to_read = 0; + const size_t MAX_DWORD = 0xFFFFFFFF; + + tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); + if (tmp == FALSE) + return -1; + + while (n > 0) { + to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); + tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); + if (tmp == FALSE) + return -1; + buf = ((char *)buf) + to_read; + n -= to_read; + } + + tmp = CryptReleaseContext(ctx, 0); + if (tmp == FALSE) + return -1; + + return 0; +} +#endif /* defined(_WIN32) */ + +#if defined(__wasi__) +static int +randombytes_wasi_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(__wasi__) */ + +#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) +#if defined(USE_GLIBC) +// getrandom is declared in glibc. +#elif defined(SYS_getrandom) +static ssize_t +getrandom(void *buf, size_t buflen, unsigned int flags) +{ + return syscall(SYS_getrandom, buf, buflen, flags); +} +#endif + +static int +randombytes_linux_randombytes_getrandom(void *buf, size_t n) +{ + /* I have thought about using a separate PRF, seeded by getrandom, but + * it turns out that the performance of getrandom is good enough + * (250 MB/s on my laptop). + */ + size_t offset = 0, chunk; + int ret; + while (n > 0) { + /* getrandom does not allow chunks larger than 33554431 */ + chunk = n <= 33554431 ? n : 33554431; + do { + ret = getrandom((char *)buf + offset, chunk, 0); + } while (ret == -1 && errno == EINTR); + if (ret < 0) + return ret; + offset += ret; + n -= ret; + } + assert(n == 0); + return 0; +} +#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ + defined(SYS_getrandom)) */ + +#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) + +#if defined(__linux__) +static int +randombytes_linux_read_entropy_ioctl(int device, int *entropy) +{ + return ioctl(device, RNDGETENTCNT, entropy); +} + +static int +randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) +{ + int retcode; + do { + rewind(stream); + retcode = fscanf(stream, "%d", entropy); + } while (retcode != 1 && errno == EINTR); + if (retcode != 1) { + return -1; + } + return 0; +} + +static int +randombytes_linux_wait_for_entropy(int device) +{ + /* We will block on /dev/random, because any increase in the OS' entropy + * level will unblock the request. I use poll here (as does libsodium), + * because we don't *actually* want to read from the device. */ + enum + { + IOCTL, + PROC + } strategy = IOCTL; + const int bits = 128; + struct pollfd pfd; + int fd; + FILE *proc_file; + int retcode, retcode_error = 0; // Used as return codes throughout this function + int entropy = 0; + + /* If the device has enough entropy already, we will want to return early */ + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + // printf("errno: %d (%s)\n", errno, strerror(errno)); + if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { + // The ioctl call on /dev/urandom has failed due to a + // - ENOTTY (unsupported action), or + // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). + // + // We will fall back to reading from + // `/proc/sys/kernel/random/entropy_avail`. This less ideal, + // because it allocates a file descriptor, and it may not work + // in a chroot. But at this point it seems we have no better + // options left. + strategy = PROC; + // Open the entropy count file + proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); + if (proc_file == NULL) { + return -1; + } + } else if (retcode != 0) { + // Unrecoverable ioctl error + return -1; + } + if (entropy >= bits) { + return 0; + } + + do { + fd = open("/dev/random", O_RDONLY); + } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ + if (fd == -1) { + /* Unrecoverable IO error */ + return -1; + } + + pfd.fd = fd; + pfd.events = POLLIN; + for (;;) { + retcode = poll(&pfd, 1, -1); + if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { + continue; + } else if (retcode == 1) { + if (strategy == IOCTL) { + retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); + } else if (strategy == PROC) { + retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); + } else { + return -1; // Unreachable + } + + if (retcode != 0) { + // Unrecoverable I/O error + retcode_error = retcode; + break; + } + if (entropy >= bits) { + break; + } + } else { + // Unreachable: poll() should only return -1 or 1 + retcode_error = -1; + break; + } + } + do { + retcode = close(fd); + } while (retcode == -1 && errno == EINTR); + if (strategy == PROC) { + do { + retcode = fclose(proc_file); + } while (retcode == -1 && errno == EINTR); + } + if (retcode_error != 0) { + return retcode_error; + } + return retcode; +} +#endif /* defined(__linux__) */ + +static int +randombytes_linux_randombytes_urandom(void *buf, size_t n) +{ + int fd; + size_t offset = 0, count; + ssize_t tmp; + do { + fd = open("/dev/urandom", O_RDONLY); + } while (fd == -1 && errno == EINTR); + if (fd == -1) + return -1; +#if defined(__linux__) + if (randombytes_linux_wait_for_entropy(fd) == -1) + return -1; +#endif + + while (n > 0) { + count = n <= SSIZE_MAX ? n : SSIZE_MAX; + tmp = read(fd, (char *)buf + offset, count); + if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { + continue; + } + if (tmp == -1) + return -1; /* Unrecoverable IO error */ + offset += tmp; + n -= tmp; + } + close(fd); + assert(n == 0); + return 0; +} +#endif /* defined(__linux__) && !defined(SYS_getrandom) */ + +#if defined(BSD) +static int +randombytes_bsd_randombytes(void *buf, size_t n) +{ + arc4random_buf(buf, n); + return 0; +} +#endif /* defined(BSD) */ + +#if defined(__EMSCRIPTEN__) +static int +randombytes_js_randombytes_nodejs(void *buf, size_t n) +{ + const int ret = EM_ASM_INT( + { + var crypto; + try { + crypto = require('crypto'); + } catch (error) { + return -2; + } + try { + writeArrayToMemory(crypto.randomBytes($1), $0); + return 0; + } catch (error) { + return -1; + } + }, + buf, + n); + switch (ret) { + case 0: + return 0; + case -1: + errno = EINVAL; + return -1; + case -2: + errno = ENOSYS; + return -1; + } + assert(false); // Unreachable +} +#endif /* defined(__EMSCRIPTEN__) */ + +SQISIGN_API +int +randombytes_select(unsigned char *buf, unsigned long long n) +{ +#if defined(__EMSCRIPTEN__) + return randombytes_js_randombytes_nodejs(buf, n); +#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) +#if defined(USE_GLIBC) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#elif defined(SYS_getrandom) + /* Use getrandom system call */ + return randombytes_linux_randombytes_getrandom(buf, n); +#else + /* When we have enough entropy, we can read from /dev/urandom */ + return randombytes_linux_randombytes_urandom(buf, n); +#endif +#elif defined(BSD) + /* Use arc4random system call */ + return randombytes_bsd_randombytes(buf, n); +#elif defined(_WIN32) + /* Use windows API */ + return randombytes_win32_randombytes(buf, n); +#elif defined(__wasi__) + /* Use WASI */ + return randombytes_wasi_randombytes(buf, n); +#else +#error "randombytes(...) is not supported on this platform" +#endif +} + +#ifdef RANDOMBYTES_SYSTEM +SQISIGN_API +int +randombytes(unsigned char *x, unsigned long long xlen) +{ + + int ret = randombytes_select(x, (size_t)xlen); +#ifdef ENABLE_CT_TESTING + VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); +#endif + return ret; +} + +SQISIGN_API +void +randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength) +{ + (void)entropy_input; + (void)personalization_string; + (void)security_strength; +} +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rationals.c new file mode 100644 index 0000000000..0c5387e5e8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rationals.c @@ -0,0 +1,233 @@ +#include +#include "internal.h" +#include "lll_internals.h" + +void +ibq_init(ibq_t *x) +{ + ibz_init(&((*x)[0])); + ibz_init(&((*x)[1])); + ibz_set(&((*x)[1]), 1); +} + +void +ibq_finalize(ibq_t *x) +{ + ibz_finalize(&((*x)[0])); + ibz_finalize(&((*x)[1])); +} + +void +ibq_mat_4x4_init(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_init(&(*mat)[i][j]); + } + } +} +void +ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_finalize(&(*mat)[i][j]); + } + } +} + +void +ibq_vec_4_init(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_init(&(*vec)[i]); + } +} +void +ibq_vec_4_finalize(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_finalize(&(*vec)[i]); + } +} + +void +ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j][0]), 10); + printf("/"); + ibz_print(&((*mat)[i][j][1]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibq_vec_4_print(const ibq_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i][0]), 10); + printf("/"); + ibz_print(&((*vec)[i][1]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibq_reduce(ibq_t *x) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); + ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + assert(ibz_is_zero(&r)); + ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + assert(ibz_is_zero(&r)); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +void +ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) +{ + ibz_t add, prod; + ibz_init(&add); + ibz_init(&prod); + + ibz_mul(&add, &((*a)[0]), &((*b)[1])); + ibz_mul(&prod, &((*b)[0]), &((*a)[1])); + ibz_add(&((*sum)[0]), &add, &prod); + ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_finalize(&add); + ibz_finalize(&prod); +} + +void +ibq_neg(ibq_t *neg, const ibq_t *x) +{ + ibz_copy(&((*neg)[1]), &((*x)[1])); + ibz_neg(&((*neg)[0]), &((*x)[0])); +} + +void +ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b) +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, b); + ibq_add(diff, a, &neg); + ibq_finalize(&neg); +} + +void +ibq_abs(ibq_t *abs, const ibq_t *x) // once +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, x); + if (ibq_cmp(x, &neg) < 0) + ibq_copy(abs, &neg); + else + ibq_copy(abs, x); + ibq_finalize(&neg); +} + +void +ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) +{ + ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); + ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); +} + +int +ibq_inv(ibq_t *inv, const ibq_t *x) +{ + int res = !ibq_is_zero(x); + if (res) { + ibz_copy(&((*inv)[0]), &((*x)[0])); + ibz_copy(&((*inv)[1]), &((*x)[1])); + ibz_swap(&((*inv)[1]), &((*inv)[0])); + } + return (res); +} + +int +ibq_cmp(const ibq_t *a, const ibq_t *b) +{ + ibz_t x, y; + ibz_init(&x); + ibz_init(&y); + ibz_copy(&x, &((*a)[0])); + ibz_copy(&y, &((*b)[0])); + ibz_mul(&y, &y, &((*a)[1])); + ibz_mul(&x, &x, &((*b)[1])); + if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + int res = ibz_cmp(&x, &y); + ibz_finalize(&x); + ibz_finalize(&y); + return (res); +} + +int +ibq_is_zero(const ibq_t *x) +{ + return ibz_is_zero(&((*x)[0])); +} + +int +ibq_is_one(const ibq_t *x) +{ + return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); +} + +int +ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) +{ + ibz_copy(&((*q)[0]), a); + ibz_copy(&((*q)[1]), b); + return !ibz_is_zero(b); +} + +void +ibq_copy(ibq_t *target, const ibq_t *value) // once +{ + ibz_copy(&((*target)[0]), &((*value)[0])); + ibz_copy(&((*target)[1]), &((*value)[1])); +} + +int +ibq_is_ibz(const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_mod(&r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} + +int +ibq_to_ibz(ibz_t *z, const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h new file mode 100644 index 0000000000..0a9ca0e465 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef rng_h +#define rng_h + +#include + +/** + * Randombytes initialization. + * Initialization may be needed for some random number generators (e.g. CTR-DRBG). + * + * @param[in] entropy_input 48 bytes entropy input + * @param[in] personalization_string Personalization string + * @param[in] security_strength Security string + */ +SQISIGN_API +void randombytes_init(unsigned char *entropy_input, + unsigned char *personalization_string, + int security_strength); + +/** + * Random byte generation using /dev/urandom. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes_select(unsigned char *x, unsigned long long xlen); + +/** + * Random byte generation. + * The caller is responsible to allocate sufficient memory to hold x. + * + * @param[out] x Memory to hold the random bytes. + * @param[in] xlen Number of random bytes to be generated + * @return int 0 on success, -1 otherwise + */ +SQISIGN_API +int randombytes(unsigned char *x, unsigned long long xlen); + +#endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sig.h new file mode 100644 index 0000000000..4c33510084 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sig.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 + +#ifndef SQISIGN_H +#define SQISIGN_H + +#include +#include + +#if defined(ENABLE_SIGN) +/** + * SQIsign keypair generation. + * + * The implementation corresponds to SQIsign.CompactKeyGen() in the SQIsign spec. + * The caller is responsible to allocate sufficient memory to hold pk and sk. + * + * @param[out] pk SQIsign public key + * @param[out] sk SQIsign secret key + * @return int status code + */ +SQISIGN_API +int sqisign_keypair(unsigned char *pk, unsigned char *sk); + +/** + * SQIsign signature generation. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] sm Signature concatenated with message + * @param[out] smlen Pointer to the length of sm + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); +#endif + +/** + * SQIsign open signature. + * + * The implementation performs SQIsign.verify(). If the signature verification succeeded, the + * original message is stored in m. Keys provided is a compact public key. The caller is responsible + * to allocate sufficient memory to hold m. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sm Signature concatenated with message + * @param[in] smlen Length of sm + * @param[in] pk Compacted public key + * @return int status code + */ +SQISIGN_API +int sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk); + +/** + * SQIsign verify signature. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk); + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sign.c new file mode 100644 index 0000000000..9216bbe4d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sign.c @@ -0,0 +1,634 @@ +#include +#include +#include +#include +#include +#include + +// compute the commitment with ideal to isogeny clapotis +// and apply it to the basis of E0 (together with the multiplication by some scalar u) +static bool +commit(ec_curve_t *E_com, ec_basis_t *basis_even_com, quat_left_ideal_t *lideal_com) +{ + + bool found = false; + + found = quat_sampling_random_ideal_O0_given_norm(lideal_com, &COM_DEGREE, 1, &QUAT_represent_integer_params, NULL); + // replacing it with a shorter prime norm equivalent ideal + found = found && quat_lideal_prime_norm_reduced_equivalent( + lideal_com, &QUATALG_PINFTY, QUAT_primality_num_iter, QUAT_equiv_bound_coeff); + // ideal to isogeny clapotis + found = found && dim2id2iso_arbitrary_isogeny_evaluation(basis_even_com, E_com, lideal_com); + return found; +} + +static void +compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const signature_t *sig, const secret_key_t *sk) +{ + ibz_vec_2_t vec; + ibz_vec_2_init(&vec); + + // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge + // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the + // 2^TORSION_EVEN_POWER torsion of EA + ibz_set(&vec[0], 1); + ibz_copy_digit_array(&vec[1], sig->chall_coeff); + + // now we compute the ideal associated to the challenge + // for that, we need to find vec such that + // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // is the image through the secret key isogeny of the canonical basis E0 + ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); + + // lideal_chall_two is the pullback of the ideal challenge through the secret key ideal + id2iso_kernel_dlogs_to_ideal_even(lideal_chall_two, &vec, TORSION_EVEN_POWER); + assert(ibz_cmp(&lideal_chall_two->norm, &TORSION_PLUS_2POWER) == 0); + + ibz_vec_2_finalize(&vec); +} + +static void +sample_response(quat_alg_elem_t *x, const quat_lattice_t *lattice, const ibz_t *lattice_content) +{ + ibz_t bound; + ibz_init(&bound); + ibz_pow(&bound, &ibz_const_two, SQIsign_response_length); + ibz_sub(&bound, &bound, &ibz_const_one); + ibz_mul(&bound, &bound, lattice_content); + + int ok UNUSED = quat_lattice_sample_from_ball(x, lattice, &QUATALG_PINFTY, &bound); + assert(ok); + + ibz_finalize(&bound); +} + +static void +compute_response_quat_element(quat_alg_elem_t *resp_quat, + ibz_t *lattice_content, + const secret_key_t *sk, + const quat_left_ideal_t *lideal_chall_two, + const quat_left_ideal_t *lideal_commit) +{ + quat_left_ideal_t lideal_chall_secret; + quat_lattice_t lattice_hom_chall_to_com, lat_commit; + + // Init + quat_left_ideal_init(&lideal_chall_secret); + quat_lattice_init(&lat_commit); + quat_lattice_init(&lattice_hom_chall_to_com); + + // lideal_chall_secret = lideal_secret * lideal_chall_two + quat_lideal_inter(&lideal_chall_secret, lideal_chall_two, &(sk->secret_ideal), &QUATALG_PINFTY); + + // now we compute lideal_com_to_chall which is dual(Icom)* lideal_chall_secret + quat_lattice_conjugate_without_hnf(&lat_commit, &(lideal_commit->lattice)); + quat_lattice_intersect(&lattice_hom_chall_to_com, &lideal_chall_secret.lattice, &lat_commit); + + // sampling the smallest response + ibz_mul(lattice_content, &lideal_chall_secret.norm, &lideal_commit->norm); + sample_response(resp_quat, &lattice_hom_chall_to_com, lattice_content); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_secret); + quat_lattice_finalize(&lat_commit); + quat_lattice_finalize(&lattice_hom_chall_to_com); +} + +static void +compute_backtracking_signature(signature_t *sig, quat_alg_elem_t *resp_quat, ibz_t *lattice_content, ibz_t *remain) +{ + uint_fast8_t backtracking; + ibz_t tmp; + ibz_init(&tmp); + + ibz_vec_4_t dummy_coord; + ibz_vec_4_init(&dummy_coord); + + quat_alg_make_primitive(&dummy_coord, &tmp, resp_quat, &MAXORD_O0); + ibz_mul(&resp_quat->denom, &resp_quat->denom, &tmp); + assert(quat_lattice_contains(NULL, &MAXORD_O0, resp_quat)); + + // the backtracking is the common part of the response and the challenge + // its degree is the scalar tmp computed above such that quat_resp is in tmp * O0. + backtracking = ibz_two_adic(&tmp); + sig->backtracking = backtracking; + + ibz_pow(&tmp, &ibz_const_two, backtracking); + ibz_div(lattice_content, remain, lattice_content, &tmp); + + ibz_finalize(&tmp); + ibz_vec_4_finalize(&dummy_coord); +} + +static uint_fast8_t +compute_random_aux_norm_and_helpers(signature_t *sig, + ibz_t *random_aux_norm, + ibz_t *degree_resp_inv, + ibz_t *remain, + const ibz_t *lattice_content, + quat_alg_elem_t *resp_quat, + quat_left_ideal_t *lideal_com_resp, + quat_left_ideal_t *lideal_commit) +{ + uint_fast8_t pow_dim2_deg_resp; + uint_fast8_t exp_diadic_val_full_resp; + + ibz_t tmp, degree_full_resp, degree_odd_resp, norm_d; + + // Init + ibz_init(°ree_full_resp); + ibz_init(°ree_odd_resp); + ibz_init(&norm_d); + ibz_init(&tmp); + + quat_alg_norm(°ree_full_resp, &norm_d, resp_quat, &QUATALG_PINFTY); + + // dividing by n(lideal_com) * n(lideal_secret_chall) + assert(ibz_is_one(&norm_d)); + ibz_div(°ree_full_resp, remain, °ree_full_resp, lattice_content); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); + + // computing the diadic valuation + exp_diadic_val_full_resp = ibz_two_adic(°ree_full_resp); + sig->two_resp_length = exp_diadic_val_full_resp; + + // removing the power of two part + ibz_pow(&tmp, &ibz_const_two, exp_diadic_val_full_resp); + ibz_div(°ree_odd_resp, remain, °ree_full_resp, &tmp); + assert(ibz_cmp(remain, &ibz_const_zero) == 0); +#ifndef NDEBUG + ibz_pow(&tmp, &ibz_const_two, SQIsign_response_length - sig->backtracking); + assert(ibz_cmp(&tmp, °ree_odd_resp) > 0); +#endif + + // creating the ideal + quat_alg_conj(resp_quat, resp_quat); + + // setting the norm + ibz_mul(&tmp, &lideal_commit->norm, °ree_odd_resp); + quat_lideal_create(lideal_com_resp, resp_quat, &tmp, &MAXORD_O0, &QUATALG_PINFTY); + + // now we compute the ideal_aux + // computing the norm + pow_dim2_deg_resp = SQIsign_response_length - exp_diadic_val_full_resp - sig->backtracking; + ibz_pow(remain, &ibz_const_two, pow_dim2_deg_resp); + ibz_sub(random_aux_norm, remain, °ree_odd_resp); + + // multiplying by 2^HD_extra_torsion to account for the fact that + // we use extra torsion above the kernel + for (int i = 0; i < HD_extra_torsion; i++) + ibz_mul(remain, remain, &ibz_const_two); + + ibz_invmod(degree_resp_inv, °ree_odd_resp, remain); + + ibz_finalize(°ree_full_resp); + ibz_finalize(°ree_odd_resp); + ibz_finalize(&norm_d); + ibz_finalize(&tmp); + + return pow_dim2_deg_resp; +} + +static int +evaluate_random_aux_isogeny_signature(ec_curve_t *E_aux, + ec_basis_t *B_aux, + const ibz_t *norm, + const quat_left_ideal_t *lideal_com_resp) +{ + quat_left_ideal_t lideal_aux; + quat_left_ideal_t lideal_aux_resp_com; + + // Init + quat_left_ideal_init(&lideal_aux); + quat_left_ideal_init(&lideal_aux_resp_com); + + // sampling the ideal at random + int found = quat_sampling_random_ideal_O0_given_norm( + &lideal_aux, norm, 0, &QUAT_represent_integer_params, &QUAT_prime_cofactor); + + if (found) { + // pushing forward + quat_lideal_inter(&lideal_aux_resp_com, lideal_com_resp, &lideal_aux, &QUATALG_PINFTY); + + // now we evaluate this isogeny on the basis of E0 + found = dim2id2iso_arbitrary_isogeny_evaluation(B_aux, E_aux, &lideal_aux_resp_com); + + // Clean up + quat_left_ideal_finalize(&lideal_aux_resp_com); + quat_left_ideal_finalize(&lideal_aux); + } + + return found; +} + +static int +compute_dim2_isogeny_challenge(theta_couple_curve_with_basis_t *codomain, + theta_couple_curve_with_basis_t *domain, + const ibz_t *degree_resp_inv, + int pow_dim2_deg_resp, + int exp_diadic_val_full_resp, + int reduced_order) +{ + // now, we compute the isogeny Phi : Ecom x Eaux -> Echl' x Eaux' + // where Echl' is 2^exp_diadic_val_full_resp isogenous to Echal + // ker Phi = <(Bcom_can.P,Baux.P),(Bcom_can.Q,Baux.Q)> + + // preparing the domain + theta_couple_curve_t EcomXEaux; + copy_curve(&EcomXEaux.E1, &domain->E1); + copy_curve(&EcomXEaux.E2, &domain->E2); + + // preparing the kernel + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, &domain->B1, &domain->B2); + + // dividing by the degree of the response + digit_t scalar[NWORDS_ORDER]; + ibz_to_digit_array(scalar, degree_resp_inv); + ec_mul(&dim_two_ker.T1.P2, scalar, reduced_order, &dim_two_ker.T1.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T2.P2, scalar, reduced_order, &dim_two_ker.T2.P2, &EcomXEaux.E2); + ec_mul(&dim_two_ker.T1m2.P2, scalar, reduced_order, &dim_two_ker.T1m2.P2, &EcomXEaux.E2); + + // and multiplying by 2^exp_diadic... + double_couple_point_iter(&dim_two_ker.T1, exp_diadic_val_full_resp, &dim_two_ker.T1, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T2, exp_diadic_val_full_resp, &dim_two_ker.T2, &EcomXEaux); + double_couple_point_iter(&dim_two_ker.T1m2, exp_diadic_val_full_resp, &dim_two_ker.T1m2, &EcomXEaux); + + theta_couple_point_t pushed_points[3]; + theta_couple_point_t *const Tev1 = pushed_points + 0, *const Tev2 = pushed_points + 1, + *const Tev1m2 = pushed_points + 2; + + // Set points on the commitment curve + copy_point(&Tev1->P1, &domain->B1.P); + copy_point(&Tev2->P1, &domain->B1.Q); + copy_point(&Tev1m2->P1, &domain->B1.PmQ); + + // Zero points on the aux curve + ec_point_init(&Tev1->P2); + ec_point_init(&Tev2->P2); + ec_point_init(&Tev1m2->P2); + + theta_couple_curve_t codomain_product; + + // computation of the dim2 isogeny + if (!theta_chain_compute_and_eval_randomized(pow_dim2_deg_resp, + &EcomXEaux, + &dim_two_ker, + true, + &codomain_product, + pushed_points, + sizeof(pushed_points) / sizeof(*pushed_points))) + return 0; + + assert(test_couple_point_order_twof(Tev1, &codomain_product, reduced_order)); + + // Set the auxiliary curve + copy_curve(&codomain->E1, &codomain_product.E2); + + // Set the codomain curve from the dim 2 isogeny + // it should always be the first curve + copy_curve(&codomain->E2, &codomain_product.E1); + + // Set the evaluated basis points + copy_point(&codomain->B1.P, &Tev1->P2); + copy_point(&codomain->B1.Q, &Tev2->P2); + copy_point(&codomain->B1.PmQ, &Tev1m2->P2); + + copy_point(&codomain->B2.P, &Tev1->P1); + copy_point(&codomain->B2.Q, &Tev2->P1); + copy_point(&codomain->B2.PmQ, &Tev1m2->P1); + return 1; +} + +static int +compute_small_chain_isogeny_signature(ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2, + const quat_alg_elem_t *resp_quat, + int pow_dim2_deg_resp, + int length) +{ + int ret = 1; + + ibz_t two_pow; + ibz_init(&two_pow); + + ibz_vec_2_t vec_resp_two; + ibz_vec_2_init(&vec_resp_two); + + quat_left_ideal_t lideal_resp_two; + quat_left_ideal_init(&lideal_resp_two); + + // computing the ideal + ibz_pow(&two_pow, &ibz_const_two, length); + + // we compute the generator of the challenge ideal + quat_lideal_create(&lideal_resp_two, resp_quat, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); + + // computing the coefficients of the kernel in terms of the basis of O0 + id2iso_ideal_to_kernel_dlogs_even(&vec_resp_two, &lideal_resp_two); + + ec_point_t points[3]; + copy_point(&points[0], &B_chall_2->P); + copy_point(&points[1], &B_chall_2->Q); + copy_point(&points[2], &B_chall_2->PmQ); + + // getting down to the right order and applying the matrix + ec_dbl_iter_basis(B_chall_2, pow_dim2_deg_resp + HD_extra_torsion, B_chall_2, E_chall_2); + assert(test_basis_order_twof(B_chall_2, E_chall_2, length)); + + ec_point_t ker; + // applying the vector to find the kernel + ec_biscalar_mul_ibz_vec(&ker, &vec_resp_two, length, B_chall_2, E_chall_2); + assert(test_point_order_twof(&ker, E_chall_2, length)); + + // computing the isogeny and pushing the points + if (ec_eval_small_chain(E_chall_2, &ker, length, points, 3, true)) { + ret = 0; + } + + // copying the result + copy_point(&B_chall_2->P, &points[0]); + copy_point(&B_chall_2->Q, &points[1]); + copy_point(&B_chall_2->PmQ, &points[2]); + + ibz_finalize(&two_pow); + ibz_vec_2_finalize(&vec_resp_two); + quat_left_ideal_finalize(&lideal_resp_two); + + return ret; +} + +static int +compute_challenge_codomain_signature(const signature_t *sig, + secret_key_t *sk, + ec_curve_t *E_chall, + const ec_curve_t *E_chall_2, + ec_basis_t *B_chall_2) +{ + ec_isog_even_t phi_chall; + ec_basis_t bas_sk; + copy_basis(&bas_sk, &sk->canonical_basis); + + phi_chall.curve = sk->curve; + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + assert(test_basis_order_twof(&bas_sk, &sk->curve, TORSION_EVEN_POWER)); + + // Compute the kernel + { + ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_sk.P, &bas_sk.Q, &bas_sk.PmQ, &sk->curve); + } + assert(test_point_order_twof(&phi_chall.kernel, &sk->curve, TORSION_EVEN_POWER)); + + // Double kernel to get correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &sk->curve); + + assert(test_point_order_twof(&phi_chall.kernel, E_chall, phi_chall.length)); + + // Compute the codomain from challenge isogeny + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + +#ifndef NDEBUG + fp2_t j_chall, j_codomain; + ec_j_inv(&j_codomain, E_chall_2); + ec_j_inv(&j_chall, E_chall); + // apparently its always the second one curve + assert(fp2_is_equal(&j_chall, &j_codomain)); +#endif + + // applying the isomorphism from E_chall_2 to E_chall + ec_isom_t isom; + if (ec_isomorphism(&isom, E_chall_2, E_chall)) + return 0; // error due to a corner case with 1/p probability + ec_iso_eval(&B_chall_2->P, &isom); + ec_iso_eval(&B_chall_2->Q, &isom); + ec_iso_eval(&B_chall_2->PmQ, &isom); + + return 1; +} + +static void +set_aux_curve_signature(signature_t *sig, ec_curve_t *E_aux) +{ + ec_normalize_curve(E_aux); + fp2_copy(&sig->E_aux_A, &E_aux->A); +} + +static void +compute_and_set_basis_change_matrix(signature_t *sig, + const ec_basis_t *B_aux_2, + ec_basis_t *B_chall_2, + ec_curve_t *E_aux_2, + ec_curve_t *E_chall, + int f) +{ + // Matrices for change of bases matrices + ibz_mat_2x2_t mat_Baux2_to_Baux2_can, mat_Bchall_can_to_Bchall; + ibz_mat_2x2_init(&mat_Baux2_to_Baux2_can); + ibz_mat_2x2_init(&mat_Bchall_can_to_Bchall); + + // Compute canonical bases + ec_basis_t B_can_chall, B_aux_2_can; + sig->hint_chall = ec_curve_to_basis_2f_to_hint(&B_can_chall, E_chall, TORSION_EVEN_POWER); + sig->hint_aux = ec_curve_to_basis_2f_to_hint(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_aux_2_can, E_aux_2, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(B_aux_2, E_aux_2, f)); + fp2_t w0; + weil(&w0, f, &B_aux_2->P, &B_aux_2->Q, &B_aux_2->PmQ, E_aux_2); + } +#endif + + // compute the matrix to go from B_aux_2 to B_aux_2_can + change_of_basis_matrix_tate_invert(&mat_Baux2_to_Baux2_can, &B_aux_2_can, B_aux_2, E_aux_2, f); + + // apply the change of basis to B_chall_2 + matrix_application_even_basis(B_chall_2, E_chall, &mat_Baux2_to_Baux2_can, f); + +#ifndef NDEBUG + { + // Ensure all points have the desired order + assert(test_basis_order_twof(&B_can_chall, E_chall, TORSION_EVEN_POWER)); + } +#endif + + // compute the matrix to go from B_chall_can to B_chall_2 + change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); + + // Assert all values in the matrix are of the expected size for packing + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + + // Set the basis change matrix to signature + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + + // Finalise the matrices + ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); + ibz_mat_2x2_finalize(&mat_Baux2_to_Baux2_can); +} + +int +protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l) +{ + int ret = 0; + int reduced_order = 0; // work around false positive gcc warning + + uint_fast8_t pow_dim2_deg_resp; + assert(SQIsign_response_length <= (intmax_t)UINT_FAST8_MAX); // otherwise we might need more bits there + + ibz_t remain, lattice_content, random_aux_norm, degree_resp_inv; + ibz_init(&remain); + ibz_init(&lattice_content); + ibz_init(&random_aux_norm); + ibz_init(°ree_resp_inv); + + quat_alg_elem_t resp_quat; + quat_alg_elem_init(&resp_quat); + + quat_left_ideal_t lideal_commit, lideal_com_resp; + quat_left_ideal_init(&lideal_commit); + quat_left_ideal_init(&lideal_com_resp); + + // This structure holds two curves E1 x E2 together with a basis + // Bi of E[2^n] for each of these curves + theta_couple_curve_with_basis_t Ecom_Eaux; + // This structure holds two curves E1 x E2 together with a basis + // Bi of Ei[2^n] + theta_couple_curve_with_basis_t Eaux2_Echall2; + + // This will hold the challenge curve + ec_curve_t E_chall = sk->curve; + + ec_curve_init(&Ecom_Eaux.E1); + ec_curve_init(&Ecom_Eaux.E2); + + while (!ret) { + + // computing the commitment + ret = commit(&Ecom_Eaux.E1, &Ecom_Eaux.B1, &lideal_commit); + + // start again if the commitment generation has failed + if (!ret) { + continue; + } + + // Hash the message to a kernel generator + // i.e. a scalar such that ker = P + [s]Q + hash_to_challenge(&sig->chall_coeff, pk, &Ecom_Eaux.E1, m, l); + // Compute the challenge ideal and response quaternion element + { + quat_left_ideal_t lideal_chall_two; + quat_left_ideal_init(&lideal_chall_two); + + // computing the challenge ideal + compute_challenge_ideal_signature(&lideal_chall_two, sig, sk); + compute_response_quat_element(&resp_quat, &lattice_content, sk, &lideal_chall_two, &lideal_commit); + + // Clean up + quat_left_ideal_finalize(&lideal_chall_two); + } + + // computing the amount of backtracking we're making + // and removing it + compute_backtracking_signature(sig, &resp_quat, &lattice_content, &remain); + + // creating lideal_com * lideal_resp + // we first compute the norm of lideal_resp + // norm of the resp_quat + pow_dim2_deg_resp = compute_random_aux_norm_and_helpers(sig, + &random_aux_norm, + °ree_resp_inv, + &remain, + &lattice_content, + &resp_quat, + &lideal_com_resp, + &lideal_commit); + + // notational conventions: + // B0 = canonical basis of E0 + // B_com = image through commitment isogeny (odd degree) of canonical basis of E0 + // B_aux = image through aux_resp_com isogeny (odd degree) of canonical basis of E0 + + if (pow_dim2_deg_resp > 0) { + // Evaluate the random aux ideal on the curve E0 and its basis to find E_aux and B_aux + ret = + evaluate_random_aux_isogeny_signature(&Ecom_Eaux.E2, &Ecom_Eaux.B2, &random_aux_norm, &lideal_com_resp); + + // auxiliary isogeny computation failed we must start again + if (!ret) { + continue; + } + +#ifndef NDEBUG + // testing that the order of the points in the bases is as expected + assert(test_basis_order_twof(&Ecom_Eaux.B1, &Ecom_Eaux.E1, TORSION_EVEN_POWER)); + assert(test_basis_order_twof(&Ecom_Eaux.B2, &Ecom_Eaux.E2, TORSION_EVEN_POWER)); +#endif + + // applying the matrix to compute Baux + // first, we reduce to the relevant order + reduced_order = pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length; + ec_dbl_iter_basis(&Ecom_Eaux.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Ecom_Eaux.B2, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B2, &Ecom_Eaux.E2); + + // Given all the above data, compute a dim two isogeny with domain + // E_com x E_aux + // and codomain + // E_aux_2 x E_chall_2 (note: E_chall_2 is isomorphic to E_chall) + // and evaluated points stored as bases in + // B_aux_2 on E_aux_2 + // B_chall_2 on E_chall_2 + ret = compute_dim2_isogeny_challenge( + &Eaux2_Echall2, &Ecom_Eaux, °ree_resp_inv, pow_dim2_deg_resp, sig->two_resp_length, reduced_order); + if (!ret) + continue; + } else { + // No 2d isogeny needed, so simulate a "Kani matrix" identity here + copy_curve(&Eaux2_Echall2.E1, &Ecom_Eaux.E1); + copy_curve(&Eaux2_Echall2.E2, &Ecom_Eaux.E1); + + reduced_order = sig->two_resp_length; + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + ec_dbl_iter_basis(&Eaux2_Echall2.B1, TORSION_EVEN_POWER - reduced_order, &Ecom_Eaux.B1, &Ecom_Eaux.E1); + copy_basis(&Eaux2_Echall2.B2, &Eaux2_Echall2.B1); + } + + // computation of the remaining small chain of two isogenies when needed + if (sig->two_resp_length > 0) { + if (!compute_small_chain_isogeny_signature( + &Eaux2_Echall2.E2, &Eaux2_Echall2.B2, &resp_quat, pow_dim2_deg_resp, sig->two_resp_length)) { + assert(0); // this shouldn't fail + } + } + + // computation of the challenge codomain + if (!compute_challenge_codomain_signature(sig, sk, &E_chall, &Eaux2_Echall2.E2, &Eaux2_Echall2.B2)) + assert(0); // this shouldn't fail + } + + // Set to the signature the Montgomery A-coefficient of E_aux_2 + set_aux_curve_signature(sig, &Eaux2_Echall2.E1); + + // Set the basis change matrix from canonical bases to the supplied bases + compute_and_set_basis_change_matrix( + sig, &Eaux2_Echall2.B1, &Eaux2_Echall2.B2, &Eaux2_Echall2.E1, &E_chall, reduced_order); + + quat_alg_elem_finalize(&resp_quat); + quat_left_ideal_finalize(&lideal_commit); + quat_left_ideal_finalize(&lideal_com_resp); + + ibz_finalize(&lattice_content); + ibz_finalize(&remain); + ibz_finalize(°ree_resp_inv); + ibz_finalize(&random_aux_norm); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/signature.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/signature.h new file mode 100644 index 0000000000..ba38c360e6 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/signature.h @@ -0,0 +1,97 @@ +/** @file + * + * @brief The key generation and signature protocols + */ + +#ifndef SIGNATURE_H +#define SIGNATURE_H + +#include +#include +#include +#include + +/** @defgroup signature SQIsignHD key generation and signature protocols + * @{ + */ +/** @defgroup signature_t Types for SQIsignHD key generation and signature protocols + * @{ + */ + +/** @brief Type for the secret keys + * + * @typedef secret_key_t + * + * @struct secret_key + * + */ +typedef struct secret_key +{ + ec_curve_t curve; /// the public curve, but with little precomputations + quat_left_ideal_t secret_ideal; + ibz_mat_2x2_t mat_BAcan_to_BA0_two; // mat_BA0_to_BAcan*BA0 = BAcan, where BAcan is the + // canonical basis of EA[2^e], and BA0 the image of the + // basis of E0[2^e] through the secret isogeny + ec_basis_t canonical_basis; // the canonical basis of the public key curve +} secret_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void secret_key_init(secret_key_t *sk); +void secret_key_finalize(secret_key_t *sk); + +/** + * @brief Key generation + * + * @param pk Output: will contain the public key + * @param sk Output: will contain the secret key + * @returns 1 if success, 0 otherwise + */ +int protocols_keygen(public_key_t *pk, secret_key_t *sk); + +/** + * @brief Signature computation + * + * @param sig Output: will contain the signature + * @param sk secret key + * @param pk public key + * @param m message + * @param l size + * @returns 1 if success, 0 otherwise + */ +int protocols_sign(signature_t *sig, const public_key_t *pk, secret_key_t *sk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a secret key as a byte array + * + * @param enc : Byte array to encode the secret key (including public key) in + * @param sk : Secret key to encode + * @param pk : Public key to encode + */ +void secret_key_to_bytes(unsigned char *enc, const secret_key_t *sk, const public_key_t *pk); + +/** + * @brief Decodes a secret key (and public key) from a byte array + * + * @param sk : Structure to decode the secret key in + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +void secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign.c new file mode 100644 index 0000000000..7335c38d9a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign.c @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#if defined(ENABLE_SIGN) +#include +#endif + +#if defined(ENABLE_SIGN) +SQISIGN_API +int +sqisign_keypair(unsigned char *pk, unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + secret_key_init(&skt); + + ret = !protocols_keygen(&pkt, &skt); + + secret_key_to_bytes(sk, &skt, &pkt); + public_key_to_bytes(pk, &pkt); + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign(unsigned char *sm, + unsigned long long *smlen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + memmove(sm + SIGNATURE_BYTES, m, mlen); + + ret = !protocols_sign(&sigt, &pkt, &skt, sm + SIGNATURE_BYTES, mlen); + if (ret != 0) { + *smlen = 0; + goto err; + } + + signature_to_bytes(sm, &sigt); + *smlen = SIGNATURE_BYTES + mlen; + +err: + secret_key_finalize(&skt); + return ret; +} + +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk) +{ + int ret = 0; + secret_key_t skt; + public_key_t pkt = { 0 }; + signature_t sigt; + secret_key_init(&skt); + secret_key_from_bytes(&skt, &pkt, sk); + + ret = !protocols_sign(&sigt, &pkt, &skt, m, mlen); + if (ret != 0) { + *slen = 0; + goto err; + } + + signature_to_bytes(s, &sigt); + *slen = SIGNATURE_BYTES; + +err: + secret_key_finalize(&skt); + return ret; +} +#endif + +SQISIGN_API +int +sqisign_open(unsigned char *m, + unsigned long long *mlen, + const unsigned char *sm, + unsigned long long smlen, + const unsigned char *pk) +{ + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sm); + + ret = !protocols_verify(&sigt, &pkt, sm + SIGNATURE_BYTES, smlen - SIGNATURE_BYTES); + + if (!ret) { + *mlen = smlen - SIGNATURE_BYTES; + memmove(m, sm + SIGNATURE_BYTES, *mlen); + } else { + *mlen = 0; + memset(m, 0, smlen - SIGNATURE_BYTES); + } + + return ret; +} + +SQISIGN_API +int +sqisign_verify(const unsigned char *m, + unsigned long long mlen, + const unsigned char *sig, + unsigned long long siglen, + const unsigned char *pk) +{ + + int ret = 0; + public_key_t pkt = { 0 }; + signature_t sigt; + + public_key_from_bytes(&pkt, pk); + signature_from_bytes(&sigt, sig); + + ret = !protocols_verify(&sigt, &pkt, m, mlen); + + return ret; +} + +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk) +{ + return sqisign_verify(m, mlen, sig, siglen, pk); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h new file mode 100644 index 0000000000..007d2572b9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h @@ -0,0 +1,1071 @@ + +#ifndef SQISIGN_NAMESPACE_H +#define SQISIGN_NAMESPACE_H + +//#define DISABLE_NAMESPACING + +#if defined(_WIN32) +#define SQISIGN_API __declspec(dllexport) +#else +#define SQISIGN_API __attribute__((visibility("default"))) +#endif + +#define PARAM_JOIN3_(a, b, c) sqisign_##a##_##b##_##c +#define PARAM_JOIN3(a, b, c) PARAM_JOIN3_(a, b, c) +#define PARAM_NAME3(end, s) PARAM_JOIN3(SQISIGN_VARIANT, end, s) + +#define PARAM_JOIN2_(a, b) sqisign_##a##_##b +#define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) +#define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) + +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + +#if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) +#if defined(SQISIGN_BUILD_TYPE_REF) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) +#elif defined(SQISIGN_BUILD_TYPE_OPT) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(opt, s) +#elif defined(SQISIGN_BUILD_TYPE_BROADWELL) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(broadwell, s) +#elif defined(SQISIGN_BUILD_TYPE_ARM64CRYPTO) +#define SQISIGN_NAMESPACE(s) PARAM_NAME3(arm64crypto, s) +#else +#error "Build type not known" +#endif + +#else +#define SQISIGN_NAMESPACE(s) s +#endif + +// Namespacing symbols exported from algebra.c: +#undef quat_alg_add +#undef quat_alg_conj +#undef quat_alg_coord_mul +#undef quat_alg_elem_copy +#undef quat_alg_elem_copy_ibz +#undef quat_alg_elem_equal +#undef quat_alg_elem_is_zero +#undef quat_alg_elem_mul_by_scalar +#undef quat_alg_elem_set +#undef quat_alg_equal_denom +#undef quat_alg_init_set_ui +#undef quat_alg_make_primitive +#undef quat_alg_mul +#undef quat_alg_norm +#undef quat_alg_normalize +#undef quat_alg_scalar +#undef quat_alg_sub + +#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) + +// Namespacing symbols exported from api.c: +#undef crypto_sign +#undef crypto_sign_keypair +#undef crypto_sign_open + +#define crypto_sign SQISIGN_NAMESPACE(crypto_sign) +#define crypto_sign_keypair SQISIGN_NAMESPACE(crypto_sign_keypair) +#define crypto_sign_open SQISIGN_NAMESPACE(crypto_sign_open) + +// Namespacing symbols exported from basis.c: +#undef ec_curve_to_basis_2f_from_hint +#undef ec_curve_to_basis_2f_to_hint +#undef ec_recover_y +#undef lift_basis +#undef lift_basis_normalized + +#define ec_curve_to_basis_2f_from_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_from_hint) +#define ec_curve_to_basis_2f_to_hint SQISIGN_NAMESPACE(ec_curve_to_basis_2f_to_hint) +#define ec_recover_y SQISIGN_NAMESPACE(ec_recover_y) +#define lift_basis SQISIGN_NAMESPACE(lift_basis) +#define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) + +// Namespacing symbols exported from biextension.c: +#undef clear_cofac +#undef ec_dlog_2_tate +#undef ec_dlog_2_weil +#undef fp2_frob +#undef reduced_tate +#undef weil + +#define clear_cofac SQISIGN_NAMESPACE(clear_cofac) +#define ec_dlog_2_tate SQISIGN_NAMESPACE(ec_dlog_2_tate) +#define ec_dlog_2_weil SQISIGN_NAMESPACE(ec_dlog_2_weil) +#define fp2_frob SQISIGN_NAMESPACE(fp2_frob) +#define reduced_tate SQISIGN_NAMESPACE(reduced_tate) +#define weil SQISIGN_NAMESPACE(weil) + +// Namespacing symbols exported from common.c: +#undef hash_to_challenge +#undef public_key_finalize +#undef public_key_init + +#define hash_to_challenge SQISIGN_NAMESPACE(hash_to_challenge) +#define public_key_finalize SQISIGN_NAMESPACE(public_key_finalize) +#define public_key_init SQISIGN_NAMESPACE(public_key_init) + +// Namespacing symbols exported from dim2.c: +#undef ibz_2x2_mul_mod +#undef ibz_mat_2x2_add +#undef ibz_mat_2x2_copy +#undef ibz_mat_2x2_det_from_ibz +#undef ibz_mat_2x2_eval +#undef ibz_mat_2x2_inv_mod +#undef ibz_mat_2x2_set +#undef ibz_vec_2_set + +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) + +// Namespacing symbols exported from dim2id2iso.c: +#undef dim2id2iso_arbitrary_isogeny_evaluation +#undef dim2id2iso_ideal_to_isogeny_clapotis +#undef find_uv +#undef fixed_degree_isogeny_and_eval + +#define dim2id2iso_arbitrary_isogeny_evaluation SQISIGN_NAMESPACE(dim2id2iso_arbitrary_isogeny_evaluation) +#define dim2id2iso_ideal_to_isogeny_clapotis SQISIGN_NAMESPACE(dim2id2iso_ideal_to_isogeny_clapotis) +#define find_uv SQISIGN_NAMESPACE(find_uv) +#define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) + +// Namespacing symbols exported from dim4.c: +#undef ibz_inv_dim4_make_coeff_mpm +#undef ibz_inv_dim4_make_coeff_pmp +#undef ibz_mat_4x4_copy +#undef ibz_mat_4x4_equal +#undef ibz_mat_4x4_eval +#undef ibz_mat_4x4_eval_t +#undef ibz_mat_4x4_gcd +#undef ibz_mat_4x4_identity +#undef ibz_mat_4x4_inv_with_det_as_denom +#undef ibz_mat_4x4_is_identity +#undef ibz_mat_4x4_mul +#undef ibz_mat_4x4_negate +#undef ibz_mat_4x4_scalar_div +#undef ibz_mat_4x4_scalar_mul +#undef ibz_mat_4x4_transpose +#undef ibz_mat_4x4_zero +#undef ibz_vec_4_add +#undef ibz_vec_4_content +#undef ibz_vec_4_copy +#undef ibz_vec_4_copy_ibz +#undef ibz_vec_4_is_zero +#undef ibz_vec_4_linear_combination +#undef ibz_vec_4_negate +#undef ibz_vec_4_scalar_div +#undef ibz_vec_4_scalar_mul +#undef ibz_vec_4_set +#undef ibz_vec_4_sub +#undef quat_qf_eval + +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) + +// Namespacing symbols exported from ec.c: +#undef cswap_points +#undef ec_biscalar_mul +#undef ec_curve_init +#undef ec_curve_init_from_A +#undef ec_curve_normalize_A24 +#undef ec_curve_verify_A +#undef ec_dbl +#undef ec_dbl_iter +#undef ec_dbl_iter_basis +#undef ec_has_zero_coordinate +#undef ec_is_basis_four_torsion +#undef ec_is_equal +#undef ec_is_four_torsion +#undef ec_is_two_torsion +#undef ec_is_zero +#undef ec_j_inv +#undef ec_ladder3pt +#undef ec_mul +#undef ec_normalize_curve +#undef ec_normalize_curve_and_A24 +#undef ec_normalize_point +#undef ec_point_init +#undef select_point +#undef xADD +#undef xDBL +#undef xDBLADD +#undef xDBLMUL +#undef xDBL_A24 +#undef xDBL_E0 +#undef xMUL + +#define cswap_points SQISIGN_NAMESPACE(cswap_points) +#define ec_biscalar_mul SQISIGN_NAMESPACE(ec_biscalar_mul) +#define ec_curve_init SQISIGN_NAMESPACE(ec_curve_init) +#define ec_curve_init_from_A SQISIGN_NAMESPACE(ec_curve_init_from_A) +#define ec_curve_normalize_A24 SQISIGN_NAMESPACE(ec_curve_normalize_A24) +#define ec_curve_verify_A SQISIGN_NAMESPACE(ec_curve_verify_A) +#define ec_dbl SQISIGN_NAMESPACE(ec_dbl) +#define ec_dbl_iter SQISIGN_NAMESPACE(ec_dbl_iter) +#define ec_dbl_iter_basis SQISIGN_NAMESPACE(ec_dbl_iter_basis) +#define ec_has_zero_coordinate SQISIGN_NAMESPACE(ec_has_zero_coordinate) +#define ec_is_basis_four_torsion SQISIGN_NAMESPACE(ec_is_basis_four_torsion) +#define ec_is_equal SQISIGN_NAMESPACE(ec_is_equal) +#define ec_is_four_torsion SQISIGN_NAMESPACE(ec_is_four_torsion) +#define ec_is_two_torsion SQISIGN_NAMESPACE(ec_is_two_torsion) +#define ec_is_zero SQISIGN_NAMESPACE(ec_is_zero) +#define ec_j_inv SQISIGN_NAMESPACE(ec_j_inv) +#define ec_ladder3pt SQISIGN_NAMESPACE(ec_ladder3pt) +#define ec_mul SQISIGN_NAMESPACE(ec_mul) +#define ec_normalize_curve SQISIGN_NAMESPACE(ec_normalize_curve) +#define ec_normalize_curve_and_A24 SQISIGN_NAMESPACE(ec_normalize_curve_and_A24) +#define ec_normalize_point SQISIGN_NAMESPACE(ec_normalize_point) +#define ec_point_init SQISIGN_NAMESPACE(ec_point_init) +#define select_point SQISIGN_NAMESPACE(select_point) +#define xADD SQISIGN_NAMESPACE(xADD) +#define xDBL SQISIGN_NAMESPACE(xDBL) +#define xDBLADD SQISIGN_NAMESPACE(xDBLADD) +#define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) +#define xMUL SQISIGN_NAMESPACE(xMUL) + +// Namespacing symbols exported from ec_jac.c: +#undef ADD +#undef DBL +#undef DBLW +#undef copy_jac_point +#undef jac_from_ws +#undef jac_init +#undef jac_is_equal +#undef jac_neg +#undef jac_to_ws +#undef jac_to_xz +#undef jac_to_xz_add_components +#undef select_jac_point + +#define ADD SQISIGN_NAMESPACE(ADD) +#define DBL SQISIGN_NAMESPACE(DBL) +#define DBLW SQISIGN_NAMESPACE(DBLW) +#define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) +#define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) +#define jac_init SQISIGN_NAMESPACE(jac_init) +#define jac_is_equal SQISIGN_NAMESPACE(jac_is_equal) +#define jac_neg SQISIGN_NAMESPACE(jac_neg) +#define jac_to_ws SQISIGN_NAMESPACE(jac_to_ws) +#define jac_to_xz SQISIGN_NAMESPACE(jac_to_xz) +#define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) +#define select_jac_point SQISIGN_NAMESPACE(select_jac_point) + +// Namespacing symbols exported from encode_signature.c: +#undef secret_key_from_bytes +#undef secret_key_to_bytes + +#define secret_key_from_bytes SQISIGN_NAMESPACE(secret_key_from_bytes) +#define secret_key_to_bytes SQISIGN_NAMESPACE(secret_key_to_bytes) + +// Namespacing symbols exported from encode_verification.c: +#undef public_key_from_bytes +#undef public_key_to_bytes +#undef signature_from_bytes +#undef signature_to_bytes + +#define public_key_from_bytes SQISIGN_NAMESPACE(public_key_from_bytes) +#define public_key_to_bytes SQISIGN_NAMESPACE(public_key_to_bytes) +#define signature_from_bytes SQISIGN_NAMESPACE(signature_from_bytes) +#define signature_to_bytes SQISIGN_NAMESPACE(signature_to_bytes) + +// Namespacing symbols exported from finit.c: +#undef ibz_mat_2x2_finalize +#undef ibz_mat_2x2_init +#undef ibz_mat_4x4_finalize +#undef ibz_mat_4x4_init +#undef ibz_vec_2_finalize +#undef ibz_vec_2_init +#undef ibz_vec_4_finalize +#undef ibz_vec_4_init +#undef quat_alg_elem_finalize +#undef quat_alg_elem_init +#undef quat_alg_finalize +#undef quat_alg_init_set +#undef quat_lattice_finalize +#undef quat_lattice_init +#undef quat_left_ideal_finalize +#undef quat_left_ideal_init + +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) + +// Namespacing symbols exported from fp.c: +#undef fp_select +#undef p +#undef p2 + +#define fp_select SQISIGN_NAMESPACE(fp_select) +#define p SQISIGN_NAMESPACE(p) +#define p2 SQISIGN_NAMESPACE(p2) + +// Namespacing symbols exported from fp.c, fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_exp3div4 +#undef fp_inv +#undef fp_is_square +#undef fp_sqrt + +#define fp_exp3div4 SQISIGN_NAMESPACE(fp_exp3div4) +#define fp_inv SQISIGN_NAMESPACE(fp_inv) +#define fp_is_square SQISIGN_NAMESPACE(fp_is_square) +#define fp_sqrt SQISIGN_NAMESPACE(fp_sqrt) + +// Namespacing symbols exported from fp2.c: +#undef fp2_add +#undef fp2_add_one +#undef fp2_batched_inv +#undef fp2_copy +#undef fp2_cswap +#undef fp2_decode +#undef fp2_encode +#undef fp2_half +#undef fp2_inv +#undef fp2_is_equal +#undef fp2_is_one +#undef fp2_is_square +#undef fp2_is_zero +#undef fp2_mul +#undef fp2_mul_small +#undef fp2_neg +#undef fp2_pow_vartime +#undef fp2_print +#undef fp2_select +#undef fp2_set_one +#undef fp2_set_small +#undef fp2_set_zero +#undef fp2_sqr +#undef fp2_sqrt +#undef fp2_sqrt_verify +#undef fp2_sub + +#define fp2_add SQISIGN_NAMESPACE(fp2_add) +#define fp2_add_one SQISIGN_NAMESPACE(fp2_add_one) +#define fp2_batched_inv SQISIGN_NAMESPACE(fp2_batched_inv) +#define fp2_copy SQISIGN_NAMESPACE(fp2_copy) +#define fp2_cswap SQISIGN_NAMESPACE(fp2_cswap) +#define fp2_decode SQISIGN_NAMESPACE(fp2_decode) +#define fp2_encode SQISIGN_NAMESPACE(fp2_encode) +#define fp2_half SQISIGN_NAMESPACE(fp2_half) +#define fp2_inv SQISIGN_NAMESPACE(fp2_inv) +#define fp2_is_equal SQISIGN_NAMESPACE(fp2_is_equal) +#define fp2_is_one SQISIGN_NAMESPACE(fp2_is_one) +#define fp2_is_square SQISIGN_NAMESPACE(fp2_is_square) +#define fp2_is_zero SQISIGN_NAMESPACE(fp2_is_zero) +#define fp2_mul SQISIGN_NAMESPACE(fp2_mul) +#define fp2_mul_small SQISIGN_NAMESPACE(fp2_mul_small) +#define fp2_neg SQISIGN_NAMESPACE(fp2_neg) +#define fp2_pow_vartime SQISIGN_NAMESPACE(fp2_pow_vartime) +#define fp2_print SQISIGN_NAMESPACE(fp2_print) +#define fp2_select SQISIGN_NAMESPACE(fp2_select) +#define fp2_set_one SQISIGN_NAMESPACE(fp2_set_one) +#define fp2_set_small SQISIGN_NAMESPACE(fp2_set_small) +#define fp2_set_zero SQISIGN_NAMESPACE(fp2_set_zero) +#define fp2_sqr SQISIGN_NAMESPACE(fp2_sqr) +#define fp2_sqrt SQISIGN_NAMESPACE(fp2_sqrt) +#define fp2_sqrt_verify SQISIGN_NAMESPACE(fp2_sqrt_verify) +#define fp2_sub SQISIGN_NAMESPACE(fp2_sub) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c: +#undef fp_copy +#undef fp_cswap +#undef fp_decode +#undef fp_decode_reduce +#undef fp_div3 +#undef fp_encode +#undef fp_half +#undef fp_is_equal +#undef fp_is_zero +#undef fp_mul_small +#undef fp_neg +#undef fp_set_one +#undef fp_set_small +#undef fp_set_zero + +#define fp_copy SQISIGN_NAMESPACE(fp_copy) +#define fp_cswap SQISIGN_NAMESPACE(fp_cswap) +#define fp_decode SQISIGN_NAMESPACE(fp_decode) +#define fp_decode_reduce SQISIGN_NAMESPACE(fp_decode_reduce) +#define fp_div3 SQISIGN_NAMESPACE(fp_div3) +#define fp_encode SQISIGN_NAMESPACE(fp_encode) +#define fp_half SQISIGN_NAMESPACE(fp_half) +#define fp_is_equal SQISIGN_NAMESPACE(fp_is_equal) +#define fp_is_zero SQISIGN_NAMESPACE(fp_is_zero) +#define fp_mul_small SQISIGN_NAMESPACE(fp_mul_small) +#define fp_neg SQISIGN_NAMESPACE(fp_neg) +#define fp_set_one SQISIGN_NAMESPACE(fp_set_one) +#define fp_set_small SQISIGN_NAMESPACE(fp_set_small) +#define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) + +// Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef fp_add +#undef fp_mul +#undef fp_sqr +#undef fp_sub + +#define fp_add SQISIGN_NAMESPACE(fp_add) +#define fp_mul SQISIGN_NAMESPACE(fp_mul) +#define fp_sqr SQISIGN_NAMESPACE(fp_sqr) +#define fp_sub SQISIGN_NAMESPACE(fp_sub) + +// Namespacing symbols exported from gf27500.c: +#undef gf27500_decode +#undef gf27500_decode_reduce +#undef gf27500_div +#undef gf27500_div3 +#undef gf27500_encode +#undef gf27500_invert +#undef gf27500_legendre +#undef gf27500_sqrt + +#define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) +#define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) +#define gf27500_div SQISIGN_NAMESPACE(gf27500_div) +#define gf27500_div3 SQISIGN_NAMESPACE(gf27500_div3) +#define gf27500_encode SQISIGN_NAMESPACE(gf27500_encode) +#define gf27500_invert SQISIGN_NAMESPACE(gf27500_invert) +#define gf27500_legendre SQISIGN_NAMESPACE(gf27500_legendre) +#define gf27500_sqrt SQISIGN_NAMESPACE(gf27500_sqrt) + +// Namespacing symbols exported from gf27500.c, gf5248.c, gf65376.c: +#undef fp2_mul_c0 +#undef fp2_mul_c1 +#undef fp2_sq_c0 +#undef fp2_sq_c1 + +#define fp2_mul_c0 SQISIGN_NAMESPACE(fp2_mul_c0) +#define fp2_mul_c1 SQISIGN_NAMESPACE(fp2_mul_c1) +#define fp2_sq_c0 SQISIGN_NAMESPACE(fp2_sq_c0) +#define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) + +// Namespacing symbols exported from gf5248.c: +#undef gf5248_decode +#undef gf5248_decode_reduce +#undef gf5248_div +#undef gf5248_div3 +#undef gf5248_encode +#undef gf5248_invert +#undef gf5248_legendre +#undef gf5248_sqrt + +#define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) +#define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) +#define gf5248_div SQISIGN_NAMESPACE(gf5248_div) +#define gf5248_div3 SQISIGN_NAMESPACE(gf5248_div3) +#define gf5248_encode SQISIGN_NAMESPACE(gf5248_encode) +#define gf5248_invert SQISIGN_NAMESPACE(gf5248_invert) +#define gf5248_legendre SQISIGN_NAMESPACE(gf5248_legendre) +#define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) + +// Namespacing symbols exported from gf65376.c: +#undef gf65376_decode +#undef gf65376_decode_reduce +#undef gf65376_div +#undef gf65376_div3 +#undef gf65376_encode +#undef gf65376_invert +#undef gf65376_legendre +#undef gf65376_sqrt + +#define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) +#define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) +#define gf65376_div SQISIGN_NAMESPACE(gf65376_div) +#define gf65376_div3 SQISIGN_NAMESPACE(gf65376_div3) +#define gf65376_encode SQISIGN_NAMESPACE(gf65376_encode) +#define gf65376_invert SQISIGN_NAMESPACE(gf65376_invert) +#define gf65376_legendre SQISIGN_NAMESPACE(gf65376_legendre) +#define gf65376_sqrt SQISIGN_NAMESPACE(gf65376_sqrt) + +// Namespacing symbols exported from hd.c: +#undef add_couple_jac_points +#undef copy_bases_to_kernel +#undef couple_jac_to_xz +#undef double_couple_jac_point +#undef double_couple_jac_point_iter +#undef double_couple_point +#undef double_couple_point_iter + +#define add_couple_jac_points SQISIGN_NAMESPACE(add_couple_jac_points) +#define copy_bases_to_kernel SQISIGN_NAMESPACE(copy_bases_to_kernel) +#define couple_jac_to_xz SQISIGN_NAMESPACE(couple_jac_to_xz) +#define double_couple_jac_point SQISIGN_NAMESPACE(double_couple_jac_point) +#define double_couple_jac_point_iter SQISIGN_NAMESPACE(double_couple_jac_point_iter) +#define double_couple_point SQISIGN_NAMESPACE(double_couple_point) +#define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) + +// Namespacing symbols exported from hnf.c: +#undef ibz_mat_4x4_is_hnf +#undef ibz_mat_4xn_hnf_mod_core +#undef ibz_vec_4_copy_mod +#undef ibz_vec_4_linear_combination_mod +#undef ibz_vec_4_scalar_mul_mod + +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) + +// Namespacing symbols exported from hnf_internal.c: +#undef ibz_centered_mod +#undef ibz_conditional_assign +#undef ibz_mod_not_zero +#undef ibz_xgcd_with_u_not_0 + +#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) + +// Namespacing symbols exported from ibz_division.c: +#undef ibz_xgcd + +#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) + +// Namespacing symbols exported from id2iso.c: +#undef change_of_basis_matrix_tate +#undef change_of_basis_matrix_tate_invert +#undef ec_biscalar_mul_ibz_vec +#undef endomorphism_application_even_basis +#undef id2iso_ideal_to_kernel_dlogs_even +#undef id2iso_kernel_dlogs_to_ideal_even +#undef matrix_application_even_basis + +#define change_of_basis_matrix_tate SQISIGN_NAMESPACE(change_of_basis_matrix_tate) +#define change_of_basis_matrix_tate_invert SQISIGN_NAMESPACE(change_of_basis_matrix_tate_invert) +#define ec_biscalar_mul_ibz_vec SQISIGN_NAMESPACE(ec_biscalar_mul_ibz_vec) +#define endomorphism_application_even_basis SQISIGN_NAMESPACE(endomorphism_application_even_basis) +#define id2iso_ideal_to_kernel_dlogs_even SQISIGN_NAMESPACE(id2iso_ideal_to_kernel_dlogs_even) +#define id2iso_kernel_dlogs_to_ideal_even SQISIGN_NAMESPACE(id2iso_kernel_dlogs_to_ideal_even) +#define matrix_application_even_basis SQISIGN_NAMESPACE(matrix_application_even_basis) + +// Namespacing symbols exported from ideal.c: +#undef quat_lideal_add +#undef quat_lideal_class_gram +#undef quat_lideal_conjugate_without_hnf +#undef quat_lideal_copy +#undef quat_lideal_create +#undef quat_lideal_create_principal +#undef quat_lideal_equals +#undef quat_lideal_generator +#undef quat_lideal_inter +#undef quat_lideal_inverse_lattice_without_hnf +#undef quat_lideal_mul +#undef quat_lideal_norm +#undef quat_lideal_right_order +#undef quat_lideal_right_transporter +#undef quat_order_discriminant +#undef quat_order_is_maximal + +#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) + +// Namespacing symbols exported from intbig.c: +#undef ibz_abs +#undef ibz_add +#undef ibz_bitsize +#undef ibz_cmp +#undef ibz_cmp_int32 +#undef ibz_convert_to_str +#undef ibz_copy +#undef ibz_copy_digits +#undef ibz_div +#undef ibz_div_2exp +#undef ibz_div_floor +#undef ibz_divides +#undef ibz_finalize +#undef ibz_gcd +#undef ibz_get +#undef ibz_init +#undef ibz_invmod +#undef ibz_is_even +#undef ibz_is_odd +#undef ibz_is_one +#undef ibz_is_zero +#undef ibz_legendre +#undef ibz_mod +#undef ibz_mod_ui +#undef ibz_mul +#undef ibz_neg +#undef ibz_pow +#undef ibz_pow_mod +#undef ibz_print +#undef ibz_probab_prime +#undef ibz_rand_interval +#undef ibz_rand_interval_bits +#undef ibz_rand_interval_i +#undef ibz_rand_interval_minm_m +#undef ibz_set +#undef ibz_set_from_str +#undef ibz_size_in_base +#undef ibz_sqrt +#undef ibz_sqrt_floor +#undef ibz_sqrt_mod_p +#undef ibz_sub +#undef ibz_swap +#undef ibz_to_digits +#undef ibz_two_adic + +#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) +#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) +#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) + +// Namespacing symbols exported from integers.c: +#undef ibz_cornacchia_prime +#undef ibz_generate_random_prime + +#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) + +// Namespacing symbols exported from isog_chains.c: +#undef ec_eval_even +#undef ec_eval_small_chain +#undef ec_iso_eval +#undef ec_isomorphism + +#define ec_eval_even SQISIGN_NAMESPACE(ec_eval_even) +#define ec_eval_small_chain SQISIGN_NAMESPACE(ec_eval_small_chain) +#define ec_iso_eval SQISIGN_NAMESPACE(ec_iso_eval) +#define ec_isomorphism SQISIGN_NAMESPACE(ec_isomorphism) + +// Namespacing symbols exported from keygen.c: +#undef protocols_keygen +#undef secret_key_finalize +#undef secret_key_init + +#define protocols_keygen SQISIGN_NAMESPACE(protocols_keygen) +#define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) +#define secret_key_init SQISIGN_NAMESPACE(secret_key_init) + +// Namespacing symbols exported from l2.c: +#undef quat_lattice_lll +#undef quat_lll_core + +#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) + +// Namespacing symbols exported from lat_ball.c: +#undef quat_lattice_bound_parallelogram +#undef quat_lattice_sample_from_ball + +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) + +// Namespacing symbols exported from lattice.c: +#undef quat_lattice_add +#undef quat_lattice_alg_elem_mul +#undef quat_lattice_conjugate_without_hnf +#undef quat_lattice_contains +#undef quat_lattice_dual_without_hnf +#undef quat_lattice_equal +#undef quat_lattice_gram +#undef quat_lattice_hnf +#undef quat_lattice_inclusion +#undef quat_lattice_index +#undef quat_lattice_intersect +#undef quat_lattice_mat_alg_coord_mul_without_hnf +#undef quat_lattice_mul +#undef quat_lattice_reduce_denom + +#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) + +// Namespacing symbols exported from lll_applications.c: +#undef quat_lideal_lideal_mul_reduced +#undef quat_lideal_prime_norm_reduced_equivalent +#undef quat_lideal_reduce_basis + +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) + +// Namespacing symbols exported from lll_verification.c: +#undef ibq_vec_4_copy_ibz +#undef quat_lll_bilinear +#undef quat_lll_gram_schmidt_transposed_with_ibq +#undef quat_lll_set_ibq_parameters +#undef quat_lll_verify + +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) + +// Namespacing symbols exported from mem.c: +#undef sqisign_secure_clear +#undef sqisign_secure_free + +#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) + +// Namespacing symbols exported from mp.c: +#undef MUL +#undef mp_add +#undef mp_compare +#undef mp_copy +#undef mp_inv_2e +#undef mp_invert_matrix +#undef mp_is_one +#undef mp_is_zero +#undef mp_mod_2exp +#undef mp_mul +#undef mp_mul2 +#undef mp_neg +#undef mp_print +#undef mp_shiftl +#undef mp_shiftr +#undef mp_sub +#undef multiple_mp_shiftl +#undef select_ct +#undef swap_ct + +#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) +#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) +#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) +#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) +#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) +#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) + +// Namespacing symbols exported from normeq.c: +#undef quat_change_to_O0_basis +#undef quat_lattice_O0_set +#undef quat_lattice_O0_set_extremal +#undef quat_order_elem_create +#undef quat_represent_integer +#undef quat_sampling_random_ideal_O0_given_norm + +#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) + +// Namespacing symbols exported from printer.c: +#undef ibz_mat_2x2_print +#undef ibz_mat_4x4_print +#undef ibz_vec_2_print +#undef ibz_vec_4_print +#undef quat_alg_elem_print +#undef quat_alg_print +#undef quat_lattice_print +#undef quat_left_ideal_print + +#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) + +// Namespacing symbols exported from random_input_generation.c: +#undef quat_test_input_random_ideal_generation +#undef quat_test_input_random_ideal_lattice_generation +#undef quat_test_input_random_lattice_generation + +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) + +// Namespacing symbols exported from rationals.c: +#undef ibq_abs +#undef ibq_add +#undef ibq_cmp +#undef ibq_copy +#undef ibq_finalize +#undef ibq_init +#undef ibq_inv +#undef ibq_is_ibz +#undef ibq_is_one +#undef ibq_is_zero +#undef ibq_mat_4x4_finalize +#undef ibq_mat_4x4_init +#undef ibq_mat_4x4_print +#undef ibq_mul +#undef ibq_neg +#undef ibq_reduce +#undef ibq_set +#undef ibq_sub +#undef ibq_to_ibz +#undef ibq_vec_4_finalize +#undef ibq_vec_4_init +#undef ibq_vec_4_print + +#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) + +// Namespacing symbols exported from sign.c: +#undef protocols_sign + +#define protocols_sign SQISIGN_NAMESPACE(protocols_sign) + +// Namespacing symbols exported from sqisign.c: +#undef sqisign_keypair +#undef sqisign_open +#undef sqisign_sign +#undef sqisign_sign_signature +#undef sqisign_verify +#undef sqisign_verify_signature + +#define sqisign_keypair SQISIGN_NAMESPACE(sqisign_keypair) +#define sqisign_open SQISIGN_NAMESPACE(sqisign_open) +#define sqisign_sign SQISIGN_NAMESPACE(sqisign_sign) +#define sqisign_sign_signature SQISIGN_NAMESPACE(sqisign_sign_signature) +#define sqisign_verify SQISIGN_NAMESPACE(sqisign_verify) +#define sqisign_verify_signature SQISIGN_NAMESPACE(sqisign_verify_signature) + +// Namespacing symbols exported from theta_isogenies.c: +#undef theta_chain_compute_and_eval +#undef theta_chain_compute_and_eval_randomized +#undef theta_chain_compute_and_eval_verify + +#define theta_chain_compute_and_eval SQISIGN_NAMESPACE(theta_chain_compute_and_eval) +#define theta_chain_compute_and_eval_randomized SQISIGN_NAMESPACE(theta_chain_compute_and_eval_randomized) +#define theta_chain_compute_and_eval_verify SQISIGN_NAMESPACE(theta_chain_compute_and_eval_verify) + +// Namespacing symbols exported from theta_structure.c: +#undef double_iter +#undef double_point +#undef is_product_theta_point +#undef theta_precomputation + +#define double_iter SQISIGN_NAMESPACE(double_iter) +#define double_point SQISIGN_NAMESPACE(double_point) +#define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) +#define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) + +// Namespacing symbols exported from verify.c: +#undef protocols_verify + +#define protocols_verify SQISIGN_NAMESPACE(protocols_verify) + +// Namespacing symbols exported from xeval.c: +#undef xeval_2 +#undef xeval_2_singular +#undef xeval_4 + +#define xeval_2 SQISIGN_NAMESPACE(xeval_2) +#define xeval_2_singular SQISIGN_NAMESPACE(xeval_2_singular) +#define xeval_4 SQISIGN_NAMESPACE(xeval_4) + +// Namespacing symbols exported from xisog.c: +#undef xisog_2 +#undef xisog_2_singular +#undef xisog_4 + +#define xisog_2 SQISIGN_NAMESPACE(xisog_2) +#define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) +#define xisog_4 SQISIGN_NAMESPACE(xisog_4) + +// Namespacing symbols from precomp: +#undef BASIS_E0_PX +#undef BASIS_E0_QX +#undef p_cofactor_for_2f +#undef CURVES_WITH_ENDOMORPHISMS +#undef EVEN_INDEX +#undef CHI_EVAL +#undef FP2_CONSTANTS +#undef SPLITTING_TRANSFORMS +#undef NORMALIZATION_TRANSFORMS +#undef QUAT_prime_cofactor +#undef QUATALG_PINFTY +#undef EXTREMAL_ORDERS +#undef CONNECTING_IDEALS +#undef CONJUGATING_ELEMENTS +#undef TWO_TO_SECURITY_BITS +#undef TORSION_PLUS_2POWER +#undef SEC_DEGREE +#undef COM_DEGREE + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + + +#endif + diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_parameters.txt b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_parameters.txt new file mode 100644 index 0000000000..947af4bbbe --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_parameters.txt @@ -0,0 +1,3 @@ +lvl = 5 +p = 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +num_orders = 7 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.c new file mode 100644 index 0000000000..478a9ab25b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.c @@ -0,0 +1,1283 @@ +#include "theta_isogenies.h" +#include +#include +#include +#include +#include + +// Select a base change matrix in constant time, with M1 a regular +// base change matrix and M2 a precomputed base change matrix +// If option = 0 then M <- M1, else if option = 0xFF...FF then M <- M2 +static inline void +select_base_change_matrix(basis_change_matrix_t *M, + const basis_change_matrix_t *M1, + const precomp_basis_change_matrix_t *M2, + const uint32_t option) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + fp2_select(&M->m[i][j], &M1->m[i][j], &FP2_CONSTANTS[M2->m[i][j]], option); +} + +// Set a regular base change matrix from a precomputed one +static inline void +set_base_change_matrix_from_precomp(basis_change_matrix_t *res, const precomp_basis_change_matrix_t *M) +{ + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + res->m[i][j] = FP2_CONSTANTS[M->m[i][j]]; +} + +static inline void +choose_index_theta_point(fp2_t *res, int ind, const theta_point_t *T) +{ + const fp2_t *src = NULL; + switch (ind % 4) { + case 0: + src = &T->x; + break; + case 1: + src = &T->y; + break; + case 2: + src = &T->z; + break; + case 3: + src = &T->t; + break; + default: + assert(0); + } + fp2_copy(res, src); +} + +// same as apply_isomorphism method but more efficient when the t component of P is zero. +static void +apply_isomorphism_general(theta_point_t *res, + const basis_change_matrix_t *M, + const theta_point_t *P, + const bool Pt_not_zero) +{ + fp2_t x1; + theta_point_t temp; + + fp2_mul(&temp.x, &P->x, &M->m[0][0]); + fp2_mul(&x1, &P->y, &M->m[0][1]); + fp2_add(&temp.x, &temp.x, &x1); + fp2_mul(&x1, &P->z, &M->m[0][2]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&temp.y, &P->x, &M->m[1][0]); + fp2_mul(&x1, &P->y, &M->m[1][1]); + fp2_add(&temp.y, &temp.y, &x1); + fp2_mul(&x1, &P->z, &M->m[1][2]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&temp.z, &P->x, &M->m[2][0]); + fp2_mul(&x1, &P->y, &M->m[2][1]); + fp2_add(&temp.z, &temp.z, &x1); + fp2_mul(&x1, &P->z, &M->m[2][2]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&temp.t, &P->x, &M->m[3][0]); + fp2_mul(&x1, &P->y, &M->m[3][1]); + fp2_add(&temp.t, &temp.t, &x1); + fp2_mul(&x1, &P->z, &M->m[3][2]); + fp2_add(&temp.t, &temp.t, &x1); + + if (Pt_not_zero) { + fp2_mul(&x1, &P->t, &M->m[0][3]); + fp2_add(&temp.x, &temp.x, &x1); + + fp2_mul(&x1, &P->t, &M->m[1][3]); + fp2_add(&temp.y, &temp.y, &x1); + + fp2_mul(&x1, &P->t, &M->m[2][3]); + fp2_add(&temp.z, &temp.z, &x1); + + fp2_mul(&x1, &P->t, &M->m[3][3]); + fp2_add(&temp.t, &temp.t, &x1); + } + + fp2_copy(&res->x, &temp.x); + fp2_copy(&res->y, &temp.y); + fp2_copy(&res->z, &temp.z); + fp2_copy(&res->t, &temp.t); +} + +static void +apply_isomorphism(theta_point_t *res, const basis_change_matrix_t *M, const theta_point_t *P) +{ + apply_isomorphism_general(res, M, P, true); +} + +// set res = M1 * M2 with matrix multiplication +static void +base_change_matrix_multiplication(basis_change_matrix_t *res, + const basis_change_matrix_t *M1, + const basis_change_matrix_t *M2) +{ + basis_change_matrix_t tmp; + fp2_t sum, m_ik, m_kj; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + fp2_set_zero(&sum); + for (int k = 0; k < 4; k++) { + m_ik = M1->m[i][k]; + m_kj = M2->m[k][j]; + fp2_mul(&m_ik, &m_ik, &m_kj); + fp2_add(&sum, &sum, &m_ik); + } + tmp.m[i][j] = sum; + } + } + *res = tmp; +} + +// compute the theta_point corresponding to the couple of point T on an elliptic product +static void +base_change(theta_point_t *out, const theta_gluing_t *phi, const theta_couple_point_t *T) +{ + theta_point_t null_point; + + // null_point = (a : b : c : d) + // a = P1.x P2.x, b = P1.x P2.z, c = P1.z P2.x, d = P1.z P2.z + fp2_mul(&null_point.x, &T->P1.x, &T->P2.x); + fp2_mul(&null_point.y, &T->P1.x, &T->P2.z); + fp2_mul(&null_point.z, &T->P2.x, &T->P1.z); + fp2_mul(&null_point.t, &T->P1.z, &T->P2.z); + + // Apply the basis change + apply_isomorphism(out, &phi->M, &null_point); +} + +static void +action_by_translation_z_and_det(fp2_t *z_inv, fp2_t *det_inv, const ec_point_t *P4, const ec_point_t *P2) +{ + // Store the Z-coordinate to invert + fp2_copy(z_inv, &P4->z); + + // Then collect detij = xij wij - uij zij + fp2_t tmp; + fp2_mul(det_inv, &P4->x, &P2->z); + fp2_mul(&tmp, &P4->z, &P2->x); + fp2_sub(det_inv, det_inv, &tmp); +} + +static void +action_by_translation_compute_matrix(translation_matrix_t *G, + const ec_point_t *P4, + const ec_point_t *P2, + const fp2_t *z_inv, + const fp2_t *det_inv) +{ + fp2_t tmp; + + // Gi.g10 = uij xij /detij - xij/zij + fp2_mul(&tmp, &P4->x, z_inv); + fp2_mul(&G->g10, &P4->x, &P2->x); + fp2_mul(&G->g10, &G->g10, det_inv); + fp2_sub(&G->g10, &G->g10, &tmp); + + // Gi.g11 = uij zij * detij + fp2_mul(&G->g11, &P2->x, det_inv); + fp2_mul(&G->g11, &G->g11, &P4->z); + + // Gi.g00 = -Gi.g11 + fp2_neg(&G->g00, &G->g11); + + // Gi.g01 = - wij zij detij + fp2_mul(&G->g01, &P2->z, det_inv); + fp2_mul(&G->g01, &G->g01, &P4->z); + fp2_neg(&G->g01, &G->g01); +} + +// Returns 1 if the basis is as expected and 0 otherwise +// We only expect this to fail for malformed signatures, so +// do not require this to run in constant time. +static int +verify_two_torsion(const theta_couple_point_t *K1_2, const theta_couple_point_t *K2_2, const theta_couple_curve_t *E12) +{ + // First check if any point in K1_2 or K2_2 is zero, if they are then the points did not have + // order 8 when we started gluing + if (ec_is_zero(&K1_2->P1) | ec_is_zero(&K1_2->P2) | ec_is_zero(&K2_2->P1) | ec_is_zero(&K2_2->P2)) { + return 0; + } + + // Now ensure that P1, Q1 and P2, Q2 are independent. For points of order two this means + // that they're not the same + if (ec_is_equal(&K1_2->P1, &K2_2->P1) | ec_is_equal(&K1_2->P2, &K2_2->P2)) { + return 0; + } + + // Finally, double points to ensure all points have order exactly 0 + theta_couple_point_t O1, O2; + double_couple_point(&O1, K1_2, E12); + double_couple_point(&O2, K2_2, E12); + // If this check fails then the points had order 2*f for some f, and the kernel is malformed. + if (!(ec_is_zero(&O1.P1) & ec_is_zero(&O1.P2) & ec_is_zero(&O2.P1) & ec_is_zero(&O2.P2))) { + return 0; + } + + return 1; +} + +// Computes the action by translation for four points +// (P1, P2) and (Q1, Q2) on E1 x E2 simultaneously to +// save on inversions. +// Returns 0 if any of Pi or Qi does not have order 2 +// and 1 otherwise +static int +action_by_translation(translation_matrix_t *Gi, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute points of order 2 from Ki_4 + theta_couple_point_t K1_2, K2_2; + double_couple_point(&K1_2, K1_4, E12); + double_couple_point(&K2_2, K2_4, E12); + + if (!verify_two_torsion(&K1_2, &K2_2, E12)) { + return 0; + } + + // We need to invert four Z coordinates and + // four determinants which we do with batched + // inversion + fp2_t inverses[8]; + action_by_translation_z_and_det(&inverses[0], &inverses[4], &K1_4->P1, &K1_2.P1); + action_by_translation_z_and_det(&inverses[1], &inverses[5], &K1_4->P2, &K1_2.P2); + action_by_translation_z_and_det(&inverses[2], &inverses[6], &K2_4->P1, &K2_2.P1); + action_by_translation_z_and_det(&inverses[3], &inverses[7], &K2_4->P2, &K2_2.P2); + + fp2_batched_inv(inverses, 8); + if (fp2_is_zero(&inverses[0])) + return 0; // something was wrong with our input (which somehow was not caught by + // verify_two_torsion) + + action_by_translation_compute_matrix(&Gi[0], &K1_4->P1, &K1_2.P1, &inverses[0], &inverses[4]); + action_by_translation_compute_matrix(&Gi[1], &K1_4->P2, &K1_2.P2, &inverses[1], &inverses[5]); + action_by_translation_compute_matrix(&Gi[2], &K2_4->P1, &K2_2.P1, &inverses[2], &inverses[6]); + action_by_translation_compute_matrix(&Gi[3], &K2_4->P2, &K2_2.P2, &inverses[3], &inverses[7]); + + return 1; +} + +// Given the appropriate four torsion, computes the +// change of basis to compute the correct theta null +// point. +// Returns 0 if the order of K1_4 or K2_4 is not 4 +static int +gluing_change_of_basis(basis_change_matrix_t *M, + const theta_couple_point_t *K1_4, + const theta_couple_point_t *K2_4, + const theta_couple_curve_t *E12) +{ + // Compute the four 2x2 matrices for the action by translation + // on the four points: + translation_matrix_t Gi[4]; + if (!action_by_translation(Gi, K1_4, K2_4, E12)) + return 0; + + // Computation of the 4x4 matrix from Mij + // t001, t101 (resp t002, t102) first column of M11 * M21 (resp M12 * M22) + fp2_t t001, t101, t002, t102, tmp; + + fp2_mul(&t001, &Gi[0].g00, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g01, &Gi[2].g10); + fp2_add(&t001, &t001, &tmp); + + fp2_mul(&t101, &Gi[0].g10, &Gi[2].g00); + fp2_mul(&tmp, &Gi[0].g11, &Gi[2].g10); + fp2_add(&t101, &t101, &tmp); + + fp2_mul(&t002, &Gi[1].g00, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g01, &Gi[3].g10); + fp2_add(&t002, &t002, &tmp); + + fp2_mul(&t102, &Gi[1].g10, &Gi[3].g00); + fp2_mul(&tmp, &Gi[1].g11, &Gi[3].g10); + fp2_add(&t102, &t102, &tmp); + + // trace for the first row + fp2_set_one(&M->m[0][0]); + fp2_mul(&tmp, &t001, &t002); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g00); + fp2_add(&M->m[0][0], &M->m[0][0], &tmp); + + fp2_mul(&M->m[0][1], &t001, &t102); + fp2_mul(&tmp, &Gi[2].g00, &Gi[3].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + fp2_mul(&tmp, &Gi[0].g00, &Gi[1].g10); + fp2_add(&M->m[0][1], &M->m[0][1], &tmp); + + fp2_mul(&M->m[0][2], &t101, &t002); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g00); + fp2_add(&M->m[0][2], &M->m[0][2], &tmp); + + fp2_mul(&M->m[0][3], &t101, &t102); + fp2_mul(&tmp, &Gi[2].g10, &Gi[3].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + fp2_mul(&tmp, &Gi[0].g10, &Gi[1].g10); + fp2_add(&M->m[0][3], &M->m[0][3], &tmp); + + // Compute the action of (0,out.K2_4.P2) for the second row + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][1]); + fp2_mul(&M->m[1][0], &Gi[3].g00, &M->m[0][0]); + fp2_add(&M->m[1][0], &M->m[1][0], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][1]); + fp2_mul(&M->m[1][1], &Gi[3].g10, &M->m[0][0]); + fp2_add(&M->m[1][1], &M->m[1][1], &tmp); + + fp2_mul(&tmp, &Gi[3].g01, &M->m[0][3]); + fp2_mul(&M->m[1][2], &Gi[3].g00, &M->m[0][2]); + fp2_add(&M->m[1][2], &M->m[1][2], &tmp); + + fp2_mul(&tmp, &Gi[3].g11, &M->m[0][3]); + fp2_mul(&M->m[1][3], &Gi[3].g10, &M->m[0][2]); + fp2_add(&M->m[1][3], &M->m[1][3], &tmp); + + // compute the action of (K1_4.P1,0) for the third row + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][2]); + fp2_mul(&M->m[2][0], &Gi[0].g00, &M->m[0][0]); + fp2_add(&M->m[2][0], &M->m[2][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[0][3]); + fp2_mul(&M->m[2][1], &Gi[0].g00, &M->m[0][1]); + fp2_add(&M->m[2][1], &M->m[2][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][2]); + fp2_mul(&M->m[2][2], &Gi[0].g10, &M->m[0][0]); + fp2_add(&M->m[2][2], &M->m[2][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[0][3]); + fp2_mul(&M->m[2][3], &Gi[0].g10, &M->m[0][1]); + fp2_add(&M->m[2][3], &M->m[2][3], &tmp); + + // compute the action of (K1_4.P1,K2_4.P2) for the final row + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][2]); + fp2_mul(&M->m[3][0], &Gi[0].g00, &M->m[1][0]); + fp2_add(&M->m[3][0], &M->m[3][0], &tmp); + + fp2_mul(&tmp, &Gi[0].g01, &M->m[1][3]); + fp2_mul(&M->m[3][1], &Gi[0].g00, &M->m[1][1]); + fp2_add(&M->m[3][1], &M->m[3][1], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][2]); + fp2_mul(&M->m[3][2], &Gi[0].g10, &M->m[1][0]); + fp2_add(&M->m[3][2], &M->m[3][2], &tmp); + + fp2_mul(&tmp, &Gi[0].g11, &M->m[1][3]); + fp2_mul(&M->m[3][3], &Gi[0].g10, &M->m[1][1]); + fp2_add(&M->m[3][3], &M->m[3][3], &tmp); + + return 1; +} + +/** + * @brief Compute the gluing isogeny from an elliptic product + * + * @param out Output: the theta_gluing + * @param K1_8 a couple point + * @param E12 an elliptic curve product + * @param K2_8 a point in E2[8] + * + * out : E1xE2 -> A of kernel [4](K1_8,K2_8) + * if the kernel supplied has the incorrect order, or gluing seems malformed, + * returns 0, otherwise returns 1. + */ +static int +gluing_compute(theta_gluing_t *out, + const theta_couple_curve_t *E12, + const theta_couple_jac_point_t *xyK1_8, + const theta_couple_jac_point_t *xyK2_8, + bool verify) +{ + // Ensure that we have been given the eight torsion +#ifndef NDEBUG + { + int check = test_jac_order_twof(&xyK1_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK1_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P1, &E12->E1, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK1_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P1 does not have order 8"); + check = test_jac_order_twof(&xyK2_8->P2, &E12->E2, 3); + if (!check) + debug_print("xyK2_8->P2 does not have order 8"); + } +#endif + + out->xyK1_8 = *xyK1_8; + out->domain = *E12; + + // Given points in E[8] x E[8] we need the four torsion below + theta_couple_jac_point_t xyK1_4, xyK2_4; + + double_couple_jac_point(&xyK1_4, xyK1_8, E12); + double_couple_jac_point(&xyK2_4, xyK2_8, E12); + + // Convert from (X:Y:Z) coordinates to (X:Z) + theta_couple_point_t K1_8, K2_8; + theta_couple_point_t K1_4, K2_4; + + couple_jac_to_xz(&K1_8, xyK1_8); + couple_jac_to_xz(&K2_8, xyK2_8); + couple_jac_to_xz(&K1_4, &xyK1_4); + couple_jac_to_xz(&K2_4, &xyK2_4); + + // Set the basis change matrix, if we have not been given a valid K[8] for this computation + // gluing_change_of_basis will detect this and return 0 + if (!gluing_change_of_basis(&out->M, &K1_4, &K2_4, E12)) { + debug_print("gluing failed as kernel does not have correct order"); + return 0; + } + + // apply the base change to the kernel + theta_point_t TT1, TT2; + + base_change(&TT1, out, &K1_8); + base_change(&TT2, out, &K2_8); + + // compute the codomain + to_squared_theta(&TT1, &TT1); + to_squared_theta(&TT2, &TT2); + + // If the kernel is well formed then TT1.t and TT2.t are zero + // if they are not, we exit early as the signature we are validating + // is probably malformed + if (!(fp2_is_zero(&TT1.t) & fp2_is_zero(&TT2.t))) { + debug_print("gluing failed TT1.t or TT2.t is not zero"); + return 0; + } + // Test our projective factors are non zero + if (fp2_is_zero(&TT1.x) | fp2_is_zero(&TT2.x) | fp2_is_zero(&TT1.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT1.z)) + return 0; // invalid input + + // Projective factor: Ax + fp2_mul(&out->codomain.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.y, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.z, &TT1.x, &TT2.z); + fp2_set_zero(&out->codomain.t); + // Projective factor: ABCxz + fp2_mul(&out->precomputation.x, &TT1.y, &TT2.z); + fp2_copy(&out->precomputation.y, &out->codomain.z); + fp2_copy(&out->precomputation.z, &out->codomain.y); + fp2_set_zero(&out->precomputation.t); + + // Compute the two components of phi(K1_8) = (x:x:y:y). + fp2_mul(&out->imageK1_8.x, &TT1.x, &out->precomputation.x); + fp2_mul(&out->imageK1_8.y, &TT1.z, &out->precomputation.z); + + // If K1_8 and K2_8 are our 8-torsion points, this ensures that the + // 4-torsion points [2]K1_8 and [2]K2_8 are isotropic. + if (verify) { + fp2_t t1, t2; + fp2_mul(&t1, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&out->imageK1_8.x, &t1)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t2, &t1)) + return 0; + } + + // compute the final codomain + hadamard(&out->codomain, &out->codomain); + return 1; +} + +// sub routine of the gluing eval +static void +gluing_eval_point(theta_point_t *image, const theta_couple_jac_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T1, T2; + add_components_t add_comp1, add_comp2; + + // Compute the cross addition components of P1+Q1 and P2+Q2 + jac_to_xz_add_components(&add_comp1, &P->P1, &phi->xyK1_8.P1, &phi->domain.E1); + jac_to_xz_add_components(&add_comp2, &P->P2, &phi->xyK1_8.P2, &phi->domain.E2); + + // Compute T1 and T2 derived from the cross addition components. + fp2_mul(&T1.x, &add_comp1.u, &add_comp2.u); // T1x = u1u2 + fp2_mul(&T2.t, &add_comp1.v, &add_comp2.v); // T2t = v1v2 + fp2_add(&T1.x, &T1.x, &T2.t); // T1x = u1u2 + v1v2 + fp2_mul(&T1.y, &add_comp1.u, &add_comp2.w); // T1y = u1w2 + fp2_mul(&T1.z, &add_comp1.w, &add_comp2.u); // T1z = w1u2 + fp2_mul(&T1.t, &add_comp1.w, &add_comp2.w); // T1t = w1w2 + fp2_add(&T2.x, &add_comp1.u, &add_comp1.v); // T2x = (u1+v1) + fp2_add(&T2.y, &add_comp2.u, &add_comp2.v); // T2y = (u2+v2) + fp2_mul(&T2.x, &T2.x, &T2.y); // T2x = (u1+v1)(u2+v2) + fp2_sub(&T2.x, &T2.x, &T1.x); // T1x = v1u2 + u1v2 + fp2_mul(&T2.y, &add_comp1.v, &add_comp2.w); // T2y = v1w2 + fp2_mul(&T2.z, &add_comp1.w, &add_comp2.v); // T2z = w1v2 + fp2_set_zero(&T2.t); // T2t = 0 + + // Apply the basis change and compute their respective square + // theta(P+Q) = M.T1 - M.T2 and theta(P-Q) = M.T1 + M.T2 + apply_isomorphism_general(&T1, &phi->M, &T1, true); + apply_isomorphism_general(&T2, &phi->M, &T2, false); + pointwise_square(&T1, &T1); + pointwise_square(&T2, &T2); + + // the difference between the two is therefore theta(P+Q)theta(P-Q) + // whose hadamard transform is then the product of the dual + // theta_points of phi(P) and phi(Q). + fp2_sub(&T1.x, &T1.x, &T2.x); + fp2_sub(&T1.y, &T1.y, &T2.y); + fp2_sub(&T1.z, &T1.z, &T2.z); + fp2_sub(&T1.t, &T1.t, &T2.t); + hadamard(&T1, &T1); + + // Compute (x, y, z, t) + // As imageK1_8 = (x:x:y:y), its inverse is (y:y:x:x). + fp2_mul(&image->x, &T1.x, &phi->imageK1_8.y); + fp2_mul(&image->y, &T1.y, &phi->imageK1_8.y); + fp2_mul(&image->z, &T1.z, &phi->imageK1_8.x); + fp2_mul(&image->t, &T1.t, &phi->imageK1_8.x); + + hadamard(image, image); +} + +// Same as gluing_eval_point but in the very special case where we already know that the point will +// have a zero coordinate at the place where the zero coordinate of the dual_theta_nullpoint would +// have made the computation difficult +static int +gluing_eval_point_special_case(theta_point_t *image, const theta_couple_point_t *P, const theta_gluing_t *phi) +{ + theta_point_t T; + + // Apply the basis change + base_change(&T, phi, P); + + // Apply the to_squared_theta transform + to_squared_theta(&T, &T); + + // This coordinate should always be 0 in a gluing because D=0. + // If this is not the case, something went very wrong, so reject + if (!fp2_is_zero(&T.t)) + return 0; + + // Compute (x, y, z, t) + fp2_mul(&image->x, &T.x, &phi->precomputation.x); + fp2_mul(&image->y, &T.y, &phi->precomputation.y); + fp2_mul(&image->z, &T.z, &phi->precomputation.z); + fp2_set_zero(&image->t); + + hadamard(image, image); + return 1; +} + +/** + * @brief Evaluate a gluing isogeny from an elliptic product on a basis + * + * @param image1 Output: the theta_point of the image of the first couple of points + * @param image2 Output : the theta point of the image of the second couple of points + * @param xyT1: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param xyT2: A pair of points (X : Y : Z) on E1E2 to glue using phi + * @param phi : a gluing isogeny E1 x E2 -> A + * + **/ +static void +gluing_eval_basis(theta_point_t *image1, + theta_point_t *image2, + const theta_couple_jac_point_t *xyT1, + const theta_couple_jac_point_t *xyT2, + const theta_gluing_t *phi) +{ + gluing_eval_point(image1, xyT1, phi); + gluing_eval_point(image2, xyT2, phi); +} + +/** + * @brief Compute a (2,2) isogeny in dimension 2 in the theta_model + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_8 a point in A[8] + * @param T2_8 a point in A[8] + * @param hadamard_bool_1 a boolean used for the last two steps of the chain + * @param hadamard_bool_2 a boolean used for the last two steps of the chain + * + * out : A -> B of kernel [4](T1_8,T2_8) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * verify: add extra sanity check to ensure our 8-torsion points are coherent with the isogeny + * + */ +static int +theta_isogeny_compute(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_8, + const theta_point_t *T2_8, + bool hadamard_bool_1, + bool hadamard_bool_2, + bool verify) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_8; + out->T2_8 = *T2_8; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_8); + to_squared_theta(&TT1, &TT1); + hadamard(&TT2, T2_8); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_8); + to_squared_theta(&TT2, T2_8); + } + + fp2_t t1, t2; + + // Test that our projective factor ABCDxzw is non zero, where + // TT1=(Ax, Bx, Cy, Dy), TT2=(Az, Bw, Cz, Dw) + // But ABCDxzw=0 can only happen if we had an unexpected splitting in + // the isogeny chain. + // In either case reject + // (this is not strictly necessary, we could just return (0:0:0:0)) + if (fp2_is_zero(&TT2.x) | fp2_is_zero(&TT2.y) | fp2_is_zero(&TT2.z) | fp2_is_zero(&TT2.t) | fp2_is_zero(&TT1.x) | + fp2_is_zero(&TT1.y)) + return 0; + + fp2_mul(&t1, &TT1.x, &TT2.y); + fp2_mul(&t2, &TT1.y, &TT2.x); + fp2_mul(&out->codomain.null_point.x, &TT2.x, &t1); + fp2_mul(&out->codomain.null_point.y, &TT2.y, &t2); + fp2_mul(&out->codomain.null_point.z, &TT2.z, &t1); + fp2_mul(&out->codomain.null_point.t, &TT2.t, &t2); + fp2_t t3; + fp2_mul(&t3, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.x, &t3, &TT1.y); + fp2_mul(&out->precomputation.y, &t3, &TT1.x); + fp2_copy(&out->precomputation.z, &out->codomain.null_point.t); + fp2_copy(&out->precomputation.t, &out->codomain.null_point.z); + + // If T1_8 and T2_8 are our 8-torsion points, this ensures that the + // 4-torsion points 2T1_8 and 2T2_8 are isotropic. + if (verify) { + fp2_mul(&t1, &TT1.x, &out->precomputation.x); + fp2_mul(&t2, &TT1.y, &out->precomputation.y); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT1.z, &out->precomputation.z); + fp2_mul(&t2, &TT1.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.x, &out->precomputation.x); + fp2_mul(&t2, &TT2.z, &out->precomputation.z); + if (!fp2_is_equal(&t1, &t2)) + return 0; + fp2_mul(&t1, &TT2.y, &out->precomputation.y); + fp2_mul(&t2, &TT2.t, &out->precomputation.t); + if (!fp2_is_equal(&t1, &t2)) + return 0; + } + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } + return 1; +} + +/** + * @brief Compute a (2,2) isogeny when only the 4 torsion above the kernel is known and not the 8 + * torsion + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_4 a point in A[4] + * @param T2_4 a point in A[4] + * @param hadamard_bool_1 a boolean + * @param hadamard_bool_2 a boolean + * + * out : A -> B of kernel [2](T1_4,T2_4) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_4(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_4, + const theta_point_t *T2_4, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_4; + out->T2_8 = *T2_4; + out->codomain.precomputation = false; + + theta_point_t TT1, TT2; + // we will compute: + // TT1 = (xAB, _ , xCD, _) + // TT2 = (AA,BB,CC,DD) + + // fp2_t xA_inv,zA_inv,tB_inv; + + if (hadamard_bool_1) { + hadamard(&TT1, T1_4); + to_squared_theta(&TT1, &TT1); + + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT1, T1_4); + to_squared_theta(&TT2, &A->null_point); + } + + fp2_t sqaabb, sqaacc; + fp2_mul(&sqaabb, &TT2.x, &TT2.y); + fp2_mul(&sqaacc, &TT2.x, &TT2.z); + // No need to check the square roots, only used for signing. + // sqaabb = sqrt(AA*BB) + fp2_sqrt(&sqaabb); + // sqaacc = sqrt(AA*CC) + fp2_sqrt(&sqaacc); + + // we compute out->codomain.null_point = (xAB * sqaacc * AA, xAB *sqaabb *sqaacc, xCD*sqaabb * + // AA) out->precomputation = (xAB * BB * CC *DD , sqaabb * CC * DD * xAB , sqaacc * BB* DD * xAB + // , xCD * sqaabb *sqaacc * BB) + + fp2_mul(&out->codomain.null_point.y, &sqaabb, &sqaacc); + fp2_mul(&out->precomputation.t, &out->codomain.null_point.y, &TT1.z); + fp2_mul(&out->codomain.null_point.y, &out->codomain.null_point.y, + &TT1.x); // done for out->codomain.null_point.y + + fp2_mul(&out->codomain.null_point.t, &TT1.z, &sqaabb); + fp2_mul(&out->codomain.null_point.t, &out->codomain.null_point.t, + &TT2.x); // done for out->codomain.null_point.t + + fp2_mul(&out->codomain.null_point.x, &TT1.x, &TT2.x); + fp2_mul(&out->codomain.null_point.z, &out->codomain.null_point.x, + &TT2.z); // done for out->codomain.null_point.z + fp2_mul(&out->codomain.null_point.x, &out->codomain.null_point.x, + &sqaacc); // done for out->codomain.null_point.x + + fp2_mul(&out->precomputation.x, &TT1.x, &TT2.t); + fp2_mul(&out->precomputation.z, &out->precomputation.x, &TT2.y); + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.z); + fp2_mul(&out->precomputation.y, &out->precomputation.x, &sqaabb); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &out->precomputation.z, &sqaacc); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +/** + * @brief Compute a (2,2) isogeny when only the kernel is known and not the 8 or 4 torsion above + * + * @param out Output: the theta_isogeny + * @param A a theta null point for the domain + * @param T1_2 a point in A[2] + * @param T2_2 a point in A[2] + * @param hadamard_bool_1 a boolean + * @param boo2 a boolean + * + * out : A -> B of kernel (T1_2,T2_2) + * hadamard_bool_1 controls if the domain is in standard or dual coordinates + * hadamard_bool_2 controls if the codomain is in standard or dual coordinates + * + */ +static void +theta_isogeny_compute_2(theta_isogeny_t *out, + const theta_structure_t *A, + const theta_point_t *T1_2, + const theta_point_t *T2_2, + bool hadamard_bool_1, + bool hadamard_bool_2) +{ + out->hadamard_bool_1 = hadamard_bool_1; + out->hadamard_bool_2 = hadamard_bool_2; + out->domain = *A; + out->T1_8 = *T1_2; + out->T2_8 = *T2_2; + out->codomain.precomputation = false; + + theta_point_t TT2; + // we will compute: + // TT2 = (AA,BB,CC,DD) + + if (hadamard_bool_1) { + hadamard(&TT2, &A->null_point); + to_squared_theta(&TT2, &TT2); + } else { + to_squared_theta(&TT2, &A->null_point); + } + + // we compute out->codomain.null_point = (AA,sqaabb, sqaacc, sqaadd) + // out->precomputation = ( BB * CC *DD , sqaabb * CC * DD , sqaacc * BB* DD , sqaadd * BB * CC) + fp2_copy(&out->codomain.null_point.x, &TT2.x); + fp2_mul(&out->codomain.null_point.y, &TT2.x, &TT2.y); + fp2_mul(&out->codomain.null_point.z, &TT2.x, &TT2.z); + fp2_mul(&out->codomain.null_point.t, &TT2.x, &TT2.t); + // No need to check the square roots, only used for signing. + fp2_sqrt(&out->codomain.null_point.y); + fp2_sqrt(&out->codomain.null_point.z); + fp2_sqrt(&out->codomain.null_point.t); + + fp2_mul(&out->precomputation.x, &TT2.z, &TT2.t); + fp2_mul(&out->precomputation.y, + &out->precomputation.x, + &out->codomain.null_point.y); // done for out->precomputation.y + fp2_mul(&out->precomputation.x, &out->precomputation.x, &TT2.y); // done for out->precomputation.x + fp2_mul(&out->precomputation.z, &TT2.t, &out->codomain.null_point.z); + fp2_mul(&out->precomputation.z, &out->precomputation.z, &TT2.y); // done for out->precomputation.z + fp2_mul(&out->precomputation.t, &TT2.z, &out->codomain.null_point.t); + fp2_mul(&out->precomputation.t, &out->precomputation.t, &TT2.y); // done for out->precomputation.t + + if (hadamard_bool_2) { + hadamard(&out->codomain.null_point, &out->codomain.null_point); + } +} + +static void +theta_isogeny_eval(theta_point_t *out, const theta_isogeny_t *phi, const theta_point_t *P) +{ + if (phi->hadamard_bool_1) { + hadamard(out, P); + to_squared_theta(out, out); + } else { + to_squared_theta(out, P); + } + fp2_mul(&out->x, &out->x, &phi->precomputation.x); + fp2_mul(&out->y, &out->y, &phi->precomputation.y); + fp2_mul(&out->z, &out->z, &phi->precomputation.z); + fp2_mul(&out->t, &out->t, &phi->precomputation.t); + + if (phi->hadamard_bool_2) { + hadamard(out, out); + } +} + +#if defined(ENABLE_SIGN) +// Sample a random secret index in [0, 5] to select one of the 6 normalisation +// matrices for the normalisation of the output of the (2,2)-chain during +// splitting +static unsigned char +sample_random_index(void) +{ + // To avoid bias in reduction we should only consider integers smaller + // than 2^32 which are a multiple of 6, so we only reduce bytes with a + // value in [0, 4294967292-1]. + // We have 4294967292/2^32 = ~99.9999999% chance that the first try is "good". + unsigned char seed_arr[4]; + uint32_t seed; + + do { + randombytes(seed_arr, 4); + seed = (seed_arr[0] | (seed_arr[1] << 8) | (seed_arr[2] << 16) | (seed_arr[3] << 24)); + } while (seed >= 4294967292U); + + uint32_t secret_index = seed - (((uint64_t)seed * 2863311531U) >> 34) * 6; + assert(secret_index == seed % 6); // ensure the constant time trick above works + return (unsigned char)secret_index; +} +#endif + +static bool +splitting_compute(theta_splitting_t *out, const theta_structure_t *A, int zero_index, bool randomize) + +{ + // init + uint32_t ctl; + uint32_t count = 0; + fp2_t U_cst, t1, t2; + + memset(&out->M, 0, sizeof(basis_change_matrix_t)); + + // enumerate through all indices + for (int i = 0; i < 10; i++) { + fp2_set_zero(&U_cst); + for (int t = 0; t < 4; t++) { + // Iterate through the null point + choose_index_theta_point(&t2, t, &A->null_point); + choose_index_theta_point(&t1, t ^ EVEN_INDEX[i][1], &A->null_point); + + // Compute t1 * t2 + fp2_mul(&t1, &t1, &t2); + // If CHI_EVAL(i,t) is +1 we want ctl to be 0 and + // If CHI_EVAL(i,t) is -1 we want ctl to be 0xFF..FF + ctl = (uint32_t)(CHI_EVAL[EVEN_INDEX[i][0]][t] >> 1); + assert(ctl == 0 || ctl == 0xffffffff); + + fp2_neg(&t2, &t1); + fp2_select(&t1, &t1, &t2, ctl); + + // Then we compute U_cst ± (t1 * t2) + fp2_add(&U_cst, &U_cst, &t1); + } + + // If U_cst is 0 then update the splitting matrix + ctl = fp2_is_zero(&U_cst); + count -= ctl; + select_base_change_matrix(&out->M, &out->M, &SPLITTING_TRANSFORMS[i], ctl); + if (zero_index != -1 && i == zero_index && + !ctl) { // extra checks if we know exactly where the 0 index should be + return 0; + } + } + +#if defined(ENABLE_SIGN) + // Pick a random normalization matrix + if (randomize) { + unsigned char secret_index = sample_random_index(); + basis_change_matrix_t Mrandom; + + set_base_change_matrix_from_precomp(&Mrandom, &NORMALIZATION_TRANSFORMS[0]); + + // Use a constant time selection to pick the index we want + for (unsigned char i = 1; i < 6; i++) { + // When i == secret_index, mask == 0 and 0xFF..FF otherwise + int32_t mask = i - secret_index; + mask = (mask | -mask) >> 31; + select_base_change_matrix(&Mrandom, &Mrandom, &NORMALIZATION_TRANSFORMS[i], ~mask); + } + base_change_matrix_multiplication(&out->M, &Mrandom, &out->M); + } +#else + assert(!randomize); +#endif + + // apply the isomorphism to ensure the null point is compatible with splitting + apply_isomorphism(&out->B.null_point, &out->M, &A->null_point); + + // splitting was successful only if exactly one zero was identified + return count == 1; +} + +static int +theta_product_structure_to_elliptic_product(theta_couple_curve_t *E12, theta_structure_t *A) +{ + fp2_t xx, yy; + + // This should be true from our computations in splitting_compute + // but still check this for sanity + if (!is_product_theta_point(&A->null_point)) + return 0; + + ec_curve_init(&(E12->E1)); + ec_curve_init(&(E12->E2)); + + // A valid elliptic theta null point has no zero coordinate + if (fp2_is_zero(&A->null_point.x) | fp2_is_zero(&A->null_point.y) | fp2_is_zero(&A->null_point.z)) + return 0; + + // xx = x², yy = y² + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.y); + // xx = x^4, yy = y^4 + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A2 = -2(x^4+y^4)/(x^4-y^4) + fp2_add(&E12->E2.A, &xx, &yy); + fp2_sub(&E12->E2.C, &xx, &yy); + fp2_add(&E12->E2.A, &E12->E2.A, &E12->E2.A); + fp2_neg(&E12->E2.A, &E12->E2.A); + + // same with x,z + fp2_sqr(&xx, &A->null_point.x); + fp2_sqr(&yy, &A->null_point.z); + fp2_sqr(&xx, &xx); + fp2_sqr(&yy, &yy); + + // A1 = -2(x^4+z^4)/(x^4-z^4) + fp2_add(&E12->E1.A, &xx, &yy); + fp2_sub(&E12->E1.C, &xx, &yy); + fp2_add(&E12->E1.A, &E12->E1.A, &E12->E1.A); + fp2_neg(&E12->E1.A, &E12->E1.A); + + if (fp2_is_zero(&E12->E1.C) | fp2_is_zero(&E12->E2.C)) + return 0; + + return 1; +} + +static int +theta_point_to_montgomery_point(theta_couple_point_t *P12, const theta_point_t *P, const theta_structure_t *A) +{ + fp2_t temp; + const fp2_t *x, *z; + + if (!is_product_theta_point(P)) + return 0; + + x = &P->x; + z = &P->y; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->z; + z = &P->t; + } + if (fp2_is_zero(x) & fp2_is_zero(z)) { + return 0; // at this point P=(0:0:0:0) so is invalid + } + // P2.X = A.null_point.y * P.x + A.null_point.x * P.y + // P2.Z = - A.null_point.y * P.x + A.null_point.x * P.y + fp2_mul(&P12->P2.x, &A->null_point.y, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P2.z, &temp, &P12->P2.x); + fp2_add(&P12->P2.x, &P12->P2.x, &temp); + + x = &P->x; + z = &P->z; + if (fp2_is_zero(x) & fp2_is_zero(z)) { + x = &P->y; + z = &P->t; + } + // P1.X = A.null_point.z * P.x + A.null_point.x * P.z + // P1.Z = -A.null_point.z * P.x + A.null_point.x * P.z + fp2_mul(&P12->P1.x, &A->null_point.z, x); + fp2_mul(&temp, &A->null_point.x, z); + fp2_sub(&P12->P1.z, &temp, &P12->P1.x); + fp2_add(&P12->P1.x, &P12->P1.x, &temp); + return 1; +} + +static int +_theta_chain_compute_impl(unsigned n, + theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP, + bool verify, + bool randomize) +{ + theta_structure_t theta; + + // lift the basis + theta_couple_jac_point_t xyT1, xyT2; + + ec_basis_t bas1 = { .P = ker->T1.P1, .Q = ker->T2.P1, .PmQ = ker->T1m2.P1 }; + ec_basis_t bas2 = { .P = ker->T1.P2, .Q = ker->T2.P2, .PmQ = ker->T1m2.P2 }; + if (!lift_basis(&xyT1.P1, &xyT2.P1, &bas1, &E12->E1)) + return 0; + if (!lift_basis(&xyT1.P2, &xyT2.P2, &bas2, &E12->E2)) + return 0; + + const unsigned extra = HD_extra_torsion * extra_torsion; + +#ifndef NDEBUG + assert(extra == 0 || extra == 2); // only cases implemented + if (!test_point_order_twof(&bas2.P, &E12->E2, n + extra)) + debug_print("bas2.P does not have correct order"); + + if (!test_jac_order_twof(&xyT2.P2, &E12->E2, n + extra)) + debug_print("xyT2.P2 does not have correct order"); +#endif + + theta_point_t pts[numP ? numP : 1]; + + int space = 1; + for (unsigned i = 1; i < n; i *= 2) + ++space; + + uint16_t todo[space]; + todo[0] = n - 2 + extra; + + int current = 0; + + // kernel points for the gluing isogeny + theta_couple_jac_point_t jacQ1[space], jacQ2[space]; + jacQ1[0] = xyT1; + jacQ2[0] = xyT2; + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + // the gluing isogeny is quite a bit more expensive than the others, + // so we adjust the usual splitting rule here a little bit: towards + // the end of the doubling chain it will be cheaper to recompute the + // doublings after evaluation than to push the intermediate points. + const unsigned num_dbls = todo[current - 1] >= 16 ? todo[current - 1] / 2 : todo[current - 1] - 1; + assert(num_dbls && num_dbls < todo[current - 1]); + double_couple_jac_point_iter(&jacQ1[current], num_dbls, &jacQ1[current - 1], E12); + double_couple_jac_point_iter(&jacQ2[current], num_dbls, &jacQ2[current - 1], E12); + todo[current] = todo[current - 1] - num_dbls; + } + + // kernel points for the remaining isogeny steps + theta_point_t thetaQ1[space], thetaQ2[space]; + + // the gluing step + theta_gluing_t first_step; + { + assert(todo[current] == 1); + + // compute the gluing isogeny + if (!gluing_compute(&first_step, E12, &jacQ1[current], &jacQ2[current], verify)) + return 0; + + // evaluate + for (unsigned j = 0; j < numP; ++j) { + assert(ec_is_zero(&P12[j].P1) || ec_is_zero(&P12[j].P2)); + if (!gluing_eval_point_special_case(&pts[j], &P12[j], &first_step)) + return 0; + } + + // push kernel points through gluing isogeny + for (int j = 0; j < current; ++j) { + gluing_eval_basis(&thetaQ1[j], &thetaQ2[j], &jacQ1[j], &jacQ2[j], &first_step); + --todo[j]; + } + + --current; + } + + // set-up the theta_structure for the first codomain + theta.null_point = first_step.codomain; + theta.precomputation = 0; + theta_precomputation(&theta); + + theta_isogeny_t step; + + // and now we do the remaining steps + for (unsigned i = 1; current >= 0 && todo[current]; ++i) { + assert(current < space); + while (todo[current] != 1) { + assert(todo[current] >= 2); + ++current; + assert(current < space); + const unsigned num_dbls = todo[current - 1] / 2; + assert(num_dbls && num_dbls < todo[current - 1]); + double_iter(&thetaQ1[current], &theta, &thetaQ1[current - 1], num_dbls); + double_iter(&thetaQ2[current], &theta, &thetaQ2[current - 1], num_dbls); + todo[current] = todo[current - 1] - num_dbls; + } + + // computing the next step + int ret; + if (i == n - 2) // penultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 0, verify); + else if (i == n - 1) // ultimate step + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 1, 0, false); + else + ret = theta_isogeny_compute(&step, &theta, &thetaQ1[current], &thetaQ2[current], 0, 1, verify); + if (!ret) + return 0; + + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + + // updating the codomain + theta = step.codomain; + + // pushing the kernel + assert(todo[current] == 1); + for (int j = 0; j < current; ++j) { + theta_isogeny_eval(&thetaQ1[j], &step, &thetaQ1[j]); + theta_isogeny_eval(&thetaQ2[j], &step, &thetaQ2[j]); + assert(todo[j]); + --todo[j]; + } + + --current; + } + + assert(current == -1); + + if (!extra_torsion) { + if (n >= 3) { + // in the last step we've skipped pushing the kernel since current was == 0, let's do it now + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + } + + // penultimate step + theta_isogeny_compute_4(&step, &theta, &thetaQ1[0], &thetaQ2[0], 0, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + theta_isogeny_eval(&thetaQ1[0], &step, &thetaQ1[0]); + theta_isogeny_eval(&thetaQ2[0], &step, &thetaQ2[0]); + + // ultimate step + theta_isogeny_compute_2(&step, &theta, &thetaQ1[0], &thetaQ2[0], 1, 0); + for (unsigned j = 0; j < numP; ++j) + theta_isogeny_eval(&pts[j], &step, &pts[j]); + theta = step.codomain; + } + + // final splitting step + theta_splitting_t last_step; + + bool is_split = splitting_compute(&last_step, &theta, extra_torsion ? 8 : -1, randomize); + + if (!is_split) { + debug_print("kernel did not generate an isogeny between elliptic products"); + return 0; + } + + if (!theta_product_structure_to_elliptic_product(E34, &last_step.B)) + return 0; + + // evaluate + for (size_t j = 0; j < numP; ++j) { + apply_isomorphism(&pts[j], &last_step.M, &pts[j]); + if (!theta_point_to_montgomery_point(&P12[j], &pts[j], &last_step.B)) + return 0; + } + + return 1; +} + +int +theta_chain_compute_and_eval(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, false); +} + +// Like theta_chain_compute_and_eval, adding extra verification checks; +// used in the signature verification +int +theta_chain_compute_and_eval_verify(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, true, false); +} + +int +theta_chain_compute_and_eval_randomized(unsigned n, + /*const*/ theta_couple_curve_t *E12, + const theta_kernel_couple_points_t *ker, + bool extra_torsion, + theta_couple_curve_t *E34, + theta_couple_point_t *P12, + size_t numP) +{ + return _theta_chain_compute_impl(n, E12, ker, extra_torsion, E34, P12, numP, false, true); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.h new file mode 100644 index 0000000000..d151811fe7 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.h @@ -0,0 +1,18 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta isogeny header + */ + +#ifndef THETA_ISOGENY_H +#define THETA_ISOGENY_H + +#include +#include +#include +#include "theta_structure.h" +#include +#include + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_structure.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_structure.c new file mode 100644 index 0000000000..ce97ac61a8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_structure.c @@ -0,0 +1,78 @@ +#include "theta_structure.h" +#include + +void +theta_precomputation(theta_structure_t *A) +{ + + if (A->precomputation) { + return; + } + + theta_point_t A_dual; + to_squared_theta(&A_dual, &A->null_point); + + fp2_t t1, t2; + fp2_mul(&t1, &A_dual.x, &A_dual.y); + fp2_mul(&t2, &A_dual.z, &A_dual.t); + fp2_mul(&A->XYZ0, &t1, &A_dual.z); + fp2_mul(&A->XYT0, &t1, &A_dual.t); + fp2_mul(&A->YZT0, &t2, &A_dual.y); + fp2_mul(&A->XZT0, &t2, &A_dual.x); + + fp2_mul(&t1, &A->null_point.x, &A->null_point.y); + fp2_mul(&t2, &A->null_point.z, &A->null_point.t); + fp2_mul(&A->xyz0, &t1, &A->null_point.z); + fp2_mul(&A->xyt0, &t1, &A->null_point.t); + fp2_mul(&A->yzt0, &t2, &A->null_point.y); + fp2_mul(&A->xzt0, &t2, &A->null_point.x); + + A->precomputation = true; +} + +void +double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in) +{ + to_squared_theta(out, in); + fp2_sqr(&out->x, &out->x); + fp2_sqr(&out->y, &out->y); + fp2_sqr(&out->z, &out->z); + fp2_sqr(&out->t, &out->t); + + if (!A->precomputation) { + theta_precomputation(A); + } + fp2_mul(&out->x, &out->x, &A->YZT0); + fp2_mul(&out->y, &out->y, &A->XZT0); + fp2_mul(&out->z, &out->z, &A->XYT0); + fp2_mul(&out->t, &out->t, &A->XYZ0); + + hadamard(out, out); + + fp2_mul(&out->x, &out->x, &A->yzt0); + fp2_mul(&out->y, &out->y, &A->xzt0); + fp2_mul(&out->z, &out->z, &A->xyt0); + fp2_mul(&out->t, &out->t, &A->xyz0); +} + +void +double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp) +{ + if (exp == 0) { + *out = *in; + } else { + double_point(out, A, in); + for (int i = 1; i < exp; i++) { + double_point(out, A, out); + } + } +} + +uint32_t +is_product_theta_point(const theta_point_t *P) +{ + fp2_t t1, t2; + fp2_mul(&t1, &P->x, &P->t); + fp2_mul(&t2, &P->y, &P->z); + return fp2_is_equal(&t1, &t2); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_structure.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_structure.h new file mode 100644 index 0000000000..fc630b750a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_structure.h @@ -0,0 +1,135 @@ +/** @file + * + * @authors Antonin Leroux + * + * @brief the theta structure header + */ + +#ifndef THETA_STRUCTURE_H +#define THETA_STRUCTURE_H + +#include +#include +#include + +/** @internal + * @ingroup hd_module + * @defgroup hd_theta Functions for theta structures + * @{ + */ + +/** + * @brief Perform the hadamard transform on a theta point + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x+y+z+t, x-y+z-t, x+y-z-t, x-y-z+t) + * + */ +static inline void +hadamard(theta_point_t *out, const theta_point_t *in) +{ + fp2_t t1, t2, t3, t4; + + // t1 = x + y + fp2_add(&t1, &in->x, &in->y); + // t2 = x - y + fp2_sub(&t2, &in->x, &in->y); + // t3 = z + t + fp2_add(&t3, &in->z, &in->t); + // t4 = z - t + fp2_sub(&t4, &in->z, &in->t); + + fp2_add(&out->x, &t1, &t3); + fp2_add(&out->y, &t2, &t4); + fp2_sub(&out->z, &t1, &t3); + fp2_sub(&out->t, &t2, &t4); +} + +/** + * @brief Square the coordinates of a theta point + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2, y^2, z^2, t^2) + * + */ +static inline void +pointwise_square(theta_point_t *out, const theta_point_t *in) +{ + fp2_sqr(&out->x, &in->x); + fp2_sqr(&out->y, &in->y); + fp2_sqr(&out->z, &in->z); + fp2_sqr(&out->t, &in->t); +} + +/** + * @brief Square the coordinates and then perform the hadamard transform + * + * @param out Output: the theta_point + * @param in a theta point* + * in = (x,y,z,t) + * out = (x^2+y^2+z^2+t^2, x^2-y^2+z^2-t^2, x^2+y^2-z^2-t^2, x^2-y^2-z^2+t^2) + * + */ +static inline void +to_squared_theta(theta_point_t *out, const theta_point_t *in) +{ + pointwise_square(out, in); + hadamard(out, out); +} + +/** + * @brief Perform the theta structure precomputation + * + * @param A Output: the theta_structure + * + * if A.null_point = (x,y,z,t) + * if (xx,yy,zz,tt) = to_squared_theta(A.null_point) + * Computes y0,z0,t0,Y0,Z0,T0 = x/y,x/z,x/t,XX/YY,XX/ZZ,XX/TT + * + */ +void theta_precomputation(theta_structure_t *A); + +/** + * @brief Compute the double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * in = (x,y,z,t) + * out = [2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_point(theta_point_t *out, theta_structure_t *A, const theta_point_t *in); + +/** + * @brief Compute the iterated double of the theta point in on the theta struc A + * + * @param out Output: the theta_point + * @param A a theta structure + * @param in a theta point in the theta structure A + * @param exp the exponent + * in = (x,y,z,t) + * out = [2^2] (x,y,z,t) + * /!\ assumes that no coordinates is zero and that the precomputation of A has been done + * + */ +void double_iter(theta_point_t *out, theta_structure_t *A, const theta_point_t *in, int exp); + +/* + * @brief Check if a theta point is a product theta point + * + * @param P a theta point + * @return 0xFFFFFFFF if true, zero otherwise + */ +uint32_t is_product_theta_point(const theta_point_t *P); + +// end hd_theta +/** + * @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.c new file mode 100644 index 0000000000..242ea08fe2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.c @@ -0,0 +1,75 @@ +#include +#include + +static clock_t global_timer; + +clock_t +tic(void) +{ + global_timer = clock(); + return global_timer; +} + +float +tac(void) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); + return ms; +} + +float +TAC(const char *str) +{ + float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); +#ifndef NDEBUG + printf("%s [%d ms]\n", str, (int)ms); +#endif + return ms; +} + +float +toc(const clock_t t) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + return ms; +} + +float +TOC(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,clock()-t); + // return (float) (clock()-t); +} + +float +TOC_clock(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, clock() - t); + return (float)(clock() - t); +} + +clock_t +dclock(const clock_t t) +{ + return (clock() - t); +} + +float +clock_to_time(const clock_t t, const char *str) +{ + float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); + printf("%s [%d ms]\n", str, (int)ms); + return ms; + // printf("%s [%ld]\n",str,t); + // return (float) (t); +} + +float +clock_print(const clock_t t, const char *str) +{ + printf("%s [%ld]\n", str, t); + return (float)(t); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.h new file mode 100644 index 0000000000..5a6a505fc1 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.h @@ -0,0 +1,49 @@ + +#ifndef TOOLS_H +#define TOOLS_H + +#include + +// Debug printing: +// https://stackoverflow.com/questions/1644868/define-macro-for-debug-printing-in-c +#ifndef NDEBUG +#define DEBUG_PRINT 1 +#else +#define DEBUG_PRINT 0 +#endif + +#ifndef __FILE_NAME__ +#define __FILE_NAME__ "NA" +#endif + +#ifndef __LINE__ +#define __LINE__ 0 +#endif + +#ifndef __func__ +#define __func__ "NA" +#endif + +#define debug_print(fmt) \ + do { \ + if (DEBUG_PRINT) \ + printf("warning: %s, file %s, line %d, function %s().\n", \ + fmt, \ + __FILE_NAME__, \ + __LINE__, \ + __func__); \ + } while (0) + + +clock_t tic(void); +float tac(void); /* time in ms since last tic */ +float TAC(const char *str); /* same, but prints it with label 'str' */ +float toc(const clock_t t); /* time in ms since t */ +float TOC(const clock_t t, const char *str); /* same, but prints it with label 'str' */ +float TOC_clock(const clock_t t, const char *str); + +clock_t dclock(const clock_t t); // return the clock cycle diff between now and t +float clock_to_time(const clock_t t, + const char *str); // convert the number of clock cycles t to time +float clock_print(const clock_t t, const char *str); +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.c new file mode 100644 index 0000000000..6fb2f97637 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.c @@ -0,0 +1,43 @@ +#include +#include +#include +const ibz_t TWO_TO_SECURITY_BITS = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t TORSION_PLUS_2POWER = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100000}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10000000000000}}} +#endif +; +const ibz_t SEC_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; +const ibz_t COM_DEGREE = +#if 0 +#elif GMP_LIMB_BITS == 16 +{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 32 +{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#elif GMP_LIMB_BITS == 64 +{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +#endif +; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.h new file mode 100644 index 0000000000..363f86e6ac --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.h @@ -0,0 +1,6 @@ +#include +#define TORSION_2POWER_BYTES 63 +extern const ibz_t TWO_TO_SECURITY_BITS; +extern const ibz_t TORSION_PLUS_2POWER; +extern const ibz_t SEC_DEGREE; +extern const ibz_t COM_DEGREE; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tutil.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tutil.h new file mode 100644 index 0000000000..59f162093e --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tutil.h @@ -0,0 +1,36 @@ +#ifndef TUTIL_H +#define TUTIL_H + +#include +#include + +#if defined(__GNUC__) || defined(__clang__) +#define BSWAP16(i) __builtin_bswap16((i)) +#define BSWAP32(i) __builtin_bswap32((i)) +#define BSWAP64(i) __builtin_bswap64((i)) +#define UNUSED __attribute__((unused)) +#else +#define BSWAP16(i) ((((i) >> 8) & 0xff) | (((i) & 0xff00) << 8)) +#define BSWAP32(i) \ + ((((i) >> 24) & 0xff) | (((i) >> 8) & 0xff00) | (((i) & 0xff00) << 8) | ((i) << 24)) +#define BSWAP64(i) ((BSWAP32((i) >> 32) & 0xffffffff) | (BSWAP32(i) << 32) +#define UNUSED +#endif + +#if defined(RADIX_64) +#define digit_t uint64_t +#define sdigit_t int64_t +#define RADIX 64 +#define LOG2RADIX 6 +#define BSWAP_DIGIT(i) BSWAP64(i) +#elif defined(RADIX_32) +#define digit_t uint32_t +#define sdigit_t int32_t +#define RADIX 32 +#define LOG2RADIX 5 +#define BSWAP_DIGIT(i) BSWAP32(i) +#else +#error "Radix must be 32bit or 64 bit" +#endif + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/verification.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/verification.h new file mode 100644 index 0000000000..af674691da --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/verification.h @@ -0,0 +1,123 @@ +/** @file + * + * @brief The verification protocol + */ + +#ifndef VERIFICATION_H +#define VERIFICATION_H + +#include +#include + +/** @defgroup verification SQIsignHD verification protocol + * @{ + */ + +/** @defgroup verification_t Types for SQIsignHD verification protocol + * @{ + */ + +typedef digit_t scalar_t[NWORDS_ORDER]; +typedef scalar_t scalar_mtx_2x2_t[2][2]; + +/** @brief Type for the signature + * + * @typedef signature_t + * + * @struct signature + * + */ +typedef struct signature +{ + fp2_t E_aux_A; // the Montgomery A-coefficient for the auxiliary curve + uint8_t backtracking; + uint8_t two_resp_length; + scalar_mtx_2x2_t mat_Bchall_can_to_B_chall; // the matrix of the desired basis + scalar_t chall_coeff; + uint8_t hint_aux; + uint8_t hint_chall; +} signature_t; + +/** @brief Type for the public keys + * + * @typedef public_key_t + * + * @struct public_key + * + */ +typedef struct public_key +{ + ec_curve_t curve; // the normalized A-coefficient of the Montgomery curve + uint8_t hint_pk; +} public_key_t; + +/** @} + */ + +/*************************** Functions *****************************/ + +void public_key_init(public_key_t *pk); +void public_key_finalize(public_key_t *pk); + +void hash_to_challenge(scalar_t *scalar, + const public_key_t *pk, + const ec_curve_t *com_curve, + const unsigned char *message, + size_t length); + +/** + * @brief Verification + * + * @param sig signature + * @param pk public key + * @param m message + * @param l size + * @returns 1 if the signature verifies, 0 otherwise + */ +int protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l); + +/*************************** Encoding *****************************/ + +/** @defgroup encoding Encoding and decoding functions + * @{ + */ + +/** + * @brief Encodes a signature as a byte array + * + * @param enc : Byte array to encode the signature in + * @param sig : Signature to encode + */ +void signature_to_bytes(unsigned char *enc, const signature_t *sig); + +/** + * @brief Decodes a signature from a byte array + * + * @param sig : Structure to decode the signature in + * @param enc : Byte array to decode + */ +void signature_from_bytes(signature_t *sig, const unsigned char *enc); + +/** + * @brief Encodes a public key as a byte array + * + * @param enc : Byte array to encode the public key in + * @param pk : Public key to encode + */ +unsigned char *public_key_to_bytes(unsigned char *enc, const public_key_t *pk); + +/** + * @brief Decodes a public key from a byte array + * + * @param pk : Structure to decode the public key in + * @param enc : Byte array to decode + */ +const unsigned char *public_key_from_bytes(public_key_t *pk, const unsigned char *enc); + +/** @} + */ + +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/verify.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/verify.c new file mode 100644 index 0000000000..b5f78ad398 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/verify.c @@ -0,0 +1,309 @@ +#include +#include +#include +#include +#include + +// Check that the basis change matrix elements are canonical +// representatives modulo 2^(SQIsign_response_length + 2). +static int +check_canonical_basis_change_matrix(const signature_t *sig) +{ + // This works as long as all values in sig->mat_Bchall_can_to_B_chall are + // positive integers. + int ret = 1; + scalar_t aux; + + memset(aux, 0, NWORDS_ORDER * sizeof(digit_t)); + aux[0] = 0x1; + multiple_mp_shiftl(aux, SQIsign_response_length + HD_extra_torsion - (int)sig->backtracking, NWORDS_ORDER); + + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + if (mp_compare(aux, sig->mat_Bchall_can_to_B_chall[i][j], NWORDS_ORDER) <= 0) { + ret = 0; + } + } + } + + return ret; +} + +// Compute the 2^n isogeny from the signature with kernel +// P + [chall_coeff]Q and store the codomain in E_chall +static int +compute_challenge_verify(ec_curve_t *E_chall, const signature_t *sig, const ec_curve_t *Epk, const uint8_t hint_pk) +{ + ec_basis_t bas_EA; + ec_isog_even_t phi_chall; + + // Set domain and length of 2^n isogeny + copy_curve(&phi_chall.curve, Epk); + phi_chall.length = TORSION_EVEN_POWER - sig->backtracking; + + // Compute the basis from the supplied hint + if (!ec_curve_to_basis_2f_from_hint(&bas_EA, &phi_chall.curve, TORSION_EVEN_POWER, hint_pk)) // canonical + return 0; + + // recovering the exact challenge + { + if (!ec_ladder3pt(&phi_chall.kernel, sig->chall_coeff, &bas_EA.P, &bas_EA.Q, &bas_EA.PmQ, &phi_chall.curve)) { + return 0; + }; + } + + // Double the kernel until is has the correct order + ec_dbl_iter(&phi_chall.kernel, sig->backtracking, &phi_chall.kernel, &phi_chall.curve); + + // Compute the codomain + copy_curve(E_chall, &phi_chall.curve); + if (ec_eval_even(E_chall, &phi_chall, NULL, 0)) + return 0; + return 1; +} + +// same as matrix_application_even_basis() in id2iso.c, with some modifications: +// - this version works with a matrix of scalars (not ibz_t). +// - reduction modulo 2^f of matrix elements is removed here, because it is +// assumed that the elements are already cannonical representatives modulo +// 2^f; this is ensured by calling check_canonical_basis_change_matrix() at +// the beginning of protocols_verify(). +static int +matrix_scalar_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, scalar_mtx_2x2_t *mat, int f) +{ + scalar_t scalar0, scalar1; + memset(scalar0, 0, NWORDS_ORDER * sizeof(digit_t)); + memset(scalar1, 0, NWORDS_ORDER * sizeof(digit_t)); + + ec_basis_t tmp_bas; + copy_basis(&tmp_bas, bas); + + // For a matrix [[a, c], [b, d]] we compute: + // + // first basis element R = [a]P + [b]Q + if (!ec_biscalar_mul(&bas->P, (*mat)[0][0], (*mat)[1][0], f, &tmp_bas, E)) + return 0; + // second basis element S = [c]P + [d]Q + if (!ec_biscalar_mul(&bas->Q, (*mat)[0][1], (*mat)[1][1], f, &tmp_bas, E)) + return 0; + // Their difference R - S = [a - c]P + [b - d]Q + mp_sub(scalar0, (*mat)[0][0], (*mat)[0][1], NWORDS_ORDER); + mp_mod_2exp(scalar0, f, NWORDS_ORDER); + mp_sub(scalar1, (*mat)[1][0], (*mat)[1][1], NWORDS_ORDER); + mp_mod_2exp(scalar1, f, NWORDS_ORDER); + return ec_biscalar_mul(&bas->PmQ, scalar0, scalar1, f, &tmp_bas, E); +} + +// Compute the bases for the challenge and auxillary curve from +// the canonical bases. Challenge basis is reconstructed from the +// compressed scalars within the challenge. +static int +challenge_and_aux_basis_verify(ec_basis_t *B_chall_can, + ec_basis_t *B_aux_can, + ec_curve_t *E_chall, + ec_curve_t *E_aux, + signature_t *sig, + const int pow_dim2_deg_resp) +{ + + // recovering the canonical basis as TORSION_EVEN_POWER for consistency with signing + if (!ec_curve_to_basis_2f_from_hint(B_chall_can, E_chall, TORSION_EVEN_POWER, sig->hint_chall)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_chall_can, + TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion - sig->two_resp_length, + B_chall_can, + E_chall); + + if (!ec_curve_to_basis_2f_from_hint(B_aux_can, E_aux, TORSION_EVEN_POWER, sig->hint_aux)) + return 0; + + // setting to the right order + ec_dbl_iter_basis(B_aux_can, TORSION_EVEN_POWER - pow_dim2_deg_resp - HD_extra_torsion, B_aux_can, E_aux); + +#ifndef NDEBUG + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp + sig->two_resp_length)) + debug_print("canonical basis has wrong order, expect something to fail"); +#endif + + // applying the change matrix on the basis of E_chall + return matrix_scalar_application_even_basis(B_chall_can, + E_chall, + &sig->mat_Bchall_can_to_B_chall, + pow_dim2_deg_resp + HD_extra_torsion + sig->two_resp_length); +} + +// When two_resp_length is non-zero, we must compute a small 2^n-isogeny +// updating E_chall as the codomain as well as push the basis on E_chall +// through this isogeny +static int +two_response_isogeny_verify(ec_curve_t *E_chall, ec_basis_t *B_chall_can, const signature_t *sig, int pow_dim2_deg_resp) +{ + ec_point_t ker, points[3]; + + // choosing the right point for the small two_isogenies + if (mp_is_even(sig->mat_Bchall_can_to_B_chall[0][0], NWORDS_ORDER) && + mp_is_even(sig->mat_Bchall_can_to_B_chall[1][0], NWORDS_ORDER)) { + copy_point(&ker, &B_chall_can->Q); + } else { + copy_point(&ker, &B_chall_can->P); + } + + copy_point(&points[0], &B_chall_can->P); + copy_point(&points[1], &B_chall_can->Q); + copy_point(&points[2], &B_chall_can->PmQ); + + ec_dbl_iter(&ker, pow_dim2_deg_resp + HD_extra_torsion, &ker, E_chall); + +#ifndef NDEBUG + if (!test_point_order_twof(&ker, E_chall, sig->two_resp_length)) + debug_print("kernel does not have order 2^(two_resp_length"); +#endif + + if (ec_eval_small_chain(E_chall, &ker, sig->two_resp_length, points, 3, false)) { + return 0; + } + +#ifndef NDEBUG + if (!test_point_order_twof(&points[0], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[0] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[1], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[1] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + if (!test_point_order_twof(&points[2], E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("points[2] does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + copy_point(&B_chall_can->P, &points[0]); + copy_point(&B_chall_can->Q, &points[1]); + copy_point(&B_chall_can->PmQ, &points[2]); + return 1; +} + +// The commitment curve can be recovered from the codomain of the 2D +// isogeny built from the bases computed during verification. +static int +compute_commitment_curve_verify(ec_curve_t *E_com, + const ec_basis_t *B_chall_can, + const ec_basis_t *B_aux_can, + const ec_curve_t *E_chall, + const ec_curve_t *E_aux, + int pow_dim2_deg_resp) + +{ +#ifndef NDEBUG + // Check all the points are the correct order + if (!test_basis_order_twof(B_chall_can, E_chall, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_chall_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); + + if (!test_basis_order_twof(B_aux_can, E_aux, HD_extra_torsion + pow_dim2_deg_resp)) + debug_print("B_aux_can does not have order 2^(HD_extra_torsion + pow_dim2_deg_resp"); +#endif + + // now compute the dim2 isogeny from Echall x E_aux -> E_com x E_aux' + // of kernel B_chall_can x B_aux_can + + // first we set-up the kernel + theta_couple_curve_t EchallxEaux; + copy_curve(&EchallxEaux.E1, E_chall); + copy_curve(&EchallxEaux.E2, E_aux); + + theta_kernel_couple_points_t dim_two_ker; + copy_bases_to_kernel(&dim_two_ker, B_chall_can, B_aux_can); + + // computing the isogeny + theta_couple_curve_t codomain; + int codomain_splits; + ec_curve_init(&codomain.E1); + ec_curve_init(&codomain.E2); + // handling the special case where we don't need to perform any dim2 computation + if (pow_dim2_deg_resp == 0) { + codomain_splits = 1; + copy_curve(&codomain.E1, &EchallxEaux.E1); + copy_curve(&codomain.E2, &EchallxEaux.E2); + // We still need to check that E_chall is supersingular + // This assumes that HD_extra_torsion == 2 + if (!ec_is_basis_four_torsion(B_chall_can, E_chall)) { + return 0; + } + } else { + codomain_splits = theta_chain_compute_and_eval_verify( + pow_dim2_deg_resp, &EchallxEaux, &dim_two_ker, true, &codomain, NULL, 0); + } + + // computing the commitment curve + // its always the first one because of our (2^n,2^n)-isogeny formulae + copy_curve(E_com, &codomain.E1); + + return codomain_splits; +} + +// SQIsign verification +int +protocols_verify(signature_t *sig, const public_key_t *pk, const unsigned char *m, size_t l) +{ + int verify; + + if (!check_canonical_basis_change_matrix(sig)) + return 0; + + // Computation of the length of the dim 2 2^n isogeny + int pow_dim2_deg_resp = SQIsign_response_length - (int)sig->two_resp_length - (int)sig->backtracking; + + // basic sanity test: checking that the response is not too long + if (pow_dim2_deg_resp < 0) + return 0; + // The dim 2 isogeny embeds a dim 1 isogeny of odd degree, so it can + // never be of length 2. + if (pow_dim2_deg_resp == 1) + return 0; + + // check the public curve is valid + if (!ec_curve_verify_A(&(pk->curve).A)) + return 0; + + // Set auxiliary curve from the A-coefficient within the signature + ec_curve_t E_aux; + if (!ec_curve_init_from_A(&E_aux, &sig->E_aux_A)) + return 0; // invalid curve + + // checking that we are given A-coefficients and no precomputation + assert(fp2_is_one(&pk->curve.C) == 0xFFFFFFFF && !pk->curve.is_A24_computed_and_normalized); + + // computation of the challenge + ec_curve_t E_chall; + if (!compute_challenge_verify(&E_chall, sig, &pk->curve, pk->hint_pk)) { + return 0; + } + + // Computation of the canonical bases for the challenge and aux curve + ec_basis_t B_chall_can, B_aux_can; + + if (!challenge_and_aux_basis_verify(&B_chall_can, &B_aux_can, &E_chall, &E_aux, sig, pow_dim2_deg_resp)) { + return 0; + } + + // When two_resp_length != 0 we need to compute a second, short 2^r-isogeny + if (sig->two_resp_length > 0) { + if (!two_response_isogeny_verify(&E_chall, &B_chall_can, sig, pow_dim2_deg_resp)) { + return 0; + } + } + + // We can recover the commitment curve with a 2D isogeny + // The supplied signature did not compute an isogeny between eliptic products + // and so definitely is an invalid signature. + ec_curve_t E_com; + if (!compute_commitment_curve_verify(&E_com, &B_chall_can, &B_aux_can, &E_chall, &E_aux, pow_dim2_deg_resp)) + return 0; + + scalar_t chk_chall; + + // recomputing the challenge vector + hash_to_challenge(&chk_chall, pk, &E_com, m, l); + + // performing the final check + verify = mp_compare(sig->chall_coeff, chk_chall, NWORDS_ORDER) == 0; + + return verify; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/xeval.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/xeval.c new file mode 100644 index 0000000000..7fc7170423 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/xeval.c @@ -0,0 +1,64 @@ +#include "isog.h" +#include "ec.h" +#include + +// ----------------------------------------------------------------------------------------- +// ----------------------------------------------------------------------------------------- + +// Degree-2 isogeny evaluation with kenerl generated by P != (0, 0) +void +xeval_2(ec_point_t *R, ec_point_t *const Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1, t2; + for (int j = 0; j < lenQ; j++) { + fp2_add(&t0, &Q[j].x, &Q[j].z); + fp2_sub(&t1, &Q[j].x, &Q[j].z); + fp2_mul(&t2, &kps->K.x, &t1); + fp2_mul(&t1, &kps->K.z, &t0); + fp2_add(&t0, &t2, &t1); + fp2_sub(&t1, &t2, &t1); + fp2_mul(&R[j].x, &Q[j].x, &t0); + fp2_mul(&R[j].z, &Q[j].z, &t1); + } +} + +void +xeval_2_singular(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps2_t *kps) +{ + fp2_t t0, t1; + for (int i = 0; i < lenQ; i++) { + fp2_mul(&t0, &Q[i].x, &Q[i].z); + fp2_mul(&t1, &kps->K.x, &Q[i].z); + fp2_add(&t1, &t1, &Q[i].x); + fp2_mul(&t1, &t1, &Q[i].x); + fp2_sqr(&R[i].x, &Q[i].z); + fp2_add(&R[i].x, &R[i].x, &t1); + fp2_mul(&R[i].z, &t0, &kps->K.z); + } +} + +// Degree-4 isogeny evaluation with kenerl generated by P such that [2]P != (0, 0) +void +xeval_4(ec_point_t *R, const ec_point_t *Q, const int lenQ, const ec_kps4_t *kps) +{ + const ec_point_t *K = kps->K; + + fp2_t t0, t1; + + for (int i = 0; i < lenQ; i++) { + fp2_add(&t0, &Q[i].x, &Q[i].z); + fp2_sub(&t1, &Q[i].x, &Q[i].z); + fp2_mul(&(R[i].x), &t0, &K[1].x); + fp2_mul(&(R[i].z), &t1, &K[2].x); + fp2_mul(&t0, &t0, &t1); + fp2_mul(&t0, &t0, &K[0].x); + fp2_add(&t1, &(R[i].x), &(R[i].z)); + fp2_sub(&(R[i].z), &(R[i].x), &(R[i].z)); + fp2_sqr(&t1, &t1); + fp2_sqr(&(R[i].z), &(R[i].z)); + fp2_add(&(R[i].x), &t0, &t1); + fp2_sub(&t0, &t0, &(R[i].z)); + fp2_mul(&(R[i].x), &(R[i].x), &t1); + fp2_mul(&(R[i].z), &(R[i].z), &t0); + } +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/xisog.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/xisog.c new file mode 100644 index 0000000000..7242d29433 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/xisog.c @@ -0,0 +1,61 @@ +#include "isog.h" +#include "ec.h" +#include + +// ------------------------------------------------------------------------- +// ------------------------------------------------------------------------- + +// Degree-2 isogeny with kernel generated by P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_2(ec_kps2_t *kps, ec_point_t *B, const ec_point_t P) +{ + fp2_sqr(&B->x, &P.x); + fp2_sqr(&B->z, &P.z); + fp2_sub(&B->x, &B->z, &B->x); + fp2_add(&kps->K.x, &P.x, &P.z); + fp2_sub(&kps->K.z, &P.x, &P.z); +} + +void +xisog_2_singular(ec_kps2_t *kps, ec_point_t *B24, ec_point_t A24) +{ + // No need to check the square root, only used for signing. + fp2_t t0, four; + fp2_set_small(&four, 4); + fp2_add(&t0, &A24.x, &A24.x); + fp2_sub(&t0, &t0, &A24.z); + fp2_add(&t0, &t0, &t0); + fp2_inv(&A24.z); + fp2_mul(&t0, &t0, &A24.z); + fp2_copy(&kps->K.x, &t0); + fp2_add(&B24->x, &t0, &t0); + fp2_sqr(&t0, &t0); + fp2_sub(&t0, &t0, &four); + fp2_sqrt(&t0); + fp2_neg(&kps->K.z, &t0); + fp2_add(&B24->z, &t0, &t0); + fp2_add(&B24->x, &B24->x, &B24->z); + fp2_add(&B24->z, &B24->z, &B24->z); +} + +// Degree-4 isogeny with kernel generated by P such that [2]P != (0 ,0) +// Outputs the curve coefficient in the form A24=(A+2C:4C) +void +xisog_4(ec_kps4_t *kps, ec_point_t *B, const ec_point_t P) +{ + ec_point_t *K = kps->K; + + fp2_sqr(&K[0].x, &P.x); + fp2_sqr(&K[0].z, &P.z); + fp2_add(&K[1].x, &K[0].z, &K[0].x); + fp2_sub(&K[1].z, &K[0].z, &K[0].x); + fp2_mul(&B->x, &K[1].x, &K[1].z); + fp2_sqr(&B->z, &K[0].z); + + // Constants for xeval_4 + fp2_add(&K[2].x, &P.x, &P.z); + fp2_sub(&K[1].x, &P.x, &P.z); + fp2_add(&K[0].x, &K[0].z, &K[0].z); + fp2_add(&K[0].x, &K[0].x, &K[0].x); +} diff --git a/tests/KATs/sig/kats.json b/tests/KATs/sig/kats.json index 0d4868f37a..4839e3c6b7 100644 --- a/tests/KATs/sig/kats.json +++ b/tests/KATs/sig/kats.json @@ -199,6 +199,15 @@ "all": "dadcf175289c25aaa530a389cc84154dc4331fabda06ffaf2a292944e4d03841", "single": "37d37c9b43d71341b7dd5da7f8ebbe8bbae3d7bfc53f5378446023cbcf6e04f2" }, + "SQIsign-lvl1": { + "single": "cce2e6a0e6aff1179eba6b8b5cb1b096e751f9aafdf8934a2df857164a902202" + }, + "SQIsign-lvl3": { + "single": "cce2e6a0e6aff1179eba6b8b5cb1b096e751f9aafdf8934a2df857164a902202" + }, + "SQIsign-lvl5": { + "single": "cce2e6a0e6aff1179eba6b8b5cb1b096e751f9aafdf8934a2df857164a902202" + }, "cross-rsdp-128-balanced": { "all": "7b12a6f71166cde8289c732b3107eaa21edf59c2f336b0921a62faa93980de77", "single": "1261083807232119c1f0a5b0d9f958fb8cf8e5a7b897cc4b7c30336cf12da989" diff --git a/tests/kat_sig.c b/tests/kat_sig.c index bbb903ded3..77d92efe76 100644 --- a/tests/kat_sig.c +++ b/tests/kat_sig.c @@ -732,6 +732,36 @@ OQS_STATUS combine_message_signature(uint8_t **signed_msg, size_t *signed_msg_le memcpy(*signed_msg, signature, signature_len); memcpy(*signed_msg + signature_len, msg, msg_len); return OQS_SUCCESS; + } else if (0 == strcmp(sig->method_name, "SQIsign-lvl1")) { + // signed_msg = signature || msg + *signed_msg_len = signature_len + msg_len; + *signed_msg = OQS_MEM_malloc(*signed_msg_len); + if (*signed_msg == NULL) { + return OQS_ERROR; + } + memcpy(*signed_msg, signature, signature_len); + memcpy(*signed_msg + signature_len, msg, msg_len); + return OQS_SUCCESS; + } else if (0 == strcmp(sig->method_name, "SQIsign-lvl3")) { + // signed_msg = signature || msg + *signed_msg_len = signature_len + msg_len; + *signed_msg = OQS_MEM_malloc(*signed_msg_len); + if (*signed_msg == NULL) { + return OQS_ERROR; + } + memcpy(*signed_msg, signature, signature_len); + memcpy(*signed_msg + signature_len, msg, msg_len); + return OQS_SUCCESS; + } else if (0 == strcmp(sig->method_name, "SQIsign-lvl5")) { + // signed_msg = signature || msg + *signed_msg_len = signature_len + msg_len; + *signed_msg = OQS_MEM_malloc(*signed_msg_len); + if (*signed_msg == NULL) { + return OQS_ERROR; + } + memcpy(*signed_msg, signature, signature_len); + memcpy(*signed_msg + signature_len, msg, msg_len); + return OQS_SUCCESS; ///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_COMBINE_MESSAGE_SIGNATURE_END } else { return OQS_ERROR; From 258f491a9906244c62cd6b4f4dd8132fae7131ff Mon Sep 17 00:00:00 2001 From: Shane Date: Mon, 23 Jun 2025 13:28:35 -0400 Subject: [PATCH 02/19] Get Nix Flake tests passing with the exception of mini-gmp. Signed-off-by: Shane --- .../copy_from_upstream/copy_from_upstream.yml | 6 +- .../patches/sqisign_namespace.patch | 622 ++++++++++++++++++ src/sig/sqisign/CMakeLists.txt | 30 +- .../randombytes_ctrdrbg_aesni.c | 87 --- .../randombytes_system.c | 431 ------------ .../the-sqisign_sqisign_lvl1_broadwell/rng.h | 40 +- .../the-sqisign_sqisign_lvl1_broadwell/sig.h | 47 +- .../sqisign_namespace.h | 468 ++++++------- .../tools.c | 75 --- .../randombytes_system.c | 431 ------------ .../the-sqisign_sqisign_lvl1_ref/rng.h | 40 +- .../the-sqisign_sqisign_lvl1_ref/sig.h | 47 +- .../sqisign_namespace.h | 468 ++++++------- .../the-sqisign_sqisign_lvl1_ref/tools.c | 75 --- .../randombytes_system.c | 431 ------------ .../the-sqisign_sqisign_lvl3_broadwell/rng.h | 40 +- .../the-sqisign_sqisign_lvl3_broadwell/sig.h | 47 +- .../sqisign_namespace.h | 468 ++++++------- .../tools.c | 75 --- .../randombytes_ctrdrbg.c | 161 ----- .../randombytes_system.c | 431 ------------ .../the-sqisign_sqisign_lvl3_ref/rng.h | 40 +- .../the-sqisign_sqisign_lvl3_ref/sig.h | 47 +- .../sqisign_namespace.h | 468 ++++++------- .../the-sqisign_sqisign_lvl3_ref/tools.c | 75 --- .../randombytes_ctrdrbg_aesni.c | 87 --- .../randombytes_system.c | 431 ------------ .../the-sqisign_sqisign_lvl5_broadwell/rng.h | 40 +- .../the-sqisign_sqisign_lvl5_broadwell/sig.h | 47 +- .../sqisign_namespace.h | 468 ++++++------- .../tools.c | 75 --- .../randombytes_ctrdrbg.c | 161 ----- .../randombytes_system.c | 431 ------------ .../the-sqisign_sqisign_lvl5_ref/rng.h | 40 +- .../the-sqisign_sqisign_lvl5_ref/sig.h | 47 +- .../sqisign_namespace.h | 468 ++++++------- .../the-sqisign_sqisign_lvl5_ref/tools.c | 75 --- tests/test_sig.c | 2 +- 38 files changed, 2345 insertions(+), 5177 deletions(-) create mode 100644 scripts/copy_from_upstream/patches/sqisign_namespace.patch delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_system.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_system.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_system.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.c diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index af05743bfc..a1aba095e8 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -95,11 +95,11 @@ upstreams: - name: the-sqisign git_url: https://github.com/shane-digi/the-sqisign.git - git_branch: develop_oqs - git_commit: 6ae72148fd136c19e1d3f4ba493a96012071bb89 + git_branch: dev_oqs + git_commit: 080f0b5b7fca0a19a6a2cfb3c1eae3c0ddccc4c3 sig_scheme_path: '.' sig_meta_path: 'META/{pqclean_scheme}.yml' - patches: [sqisign_fp.patch] + patches: [sqisign_fp.patch, sqisign_namespace.patch] kems: - diff --git a/scripts/copy_from_upstream/patches/sqisign_namespace.patch b/scripts/copy_from_upstream/patches/sqisign_namespace.patch new file mode 100644 index 0000000000..6bdba58527 --- /dev/null +++ b/scripts/copy_from_upstream/patches/sqisign_namespace.patch @@ -0,0 +1,622 @@ +diff --git a/include/sqisign_namespace.h b/include/sqisign_namespace.h +index 007d257..bbfe72c 100644 +--- a/include/sqisign_namespace.h ++++ b/include/sqisign_namespace.h +@@ -18,12 +18,6 @@ + #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) + #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) + +-#ifndef DISABLE_NAMESPACING +-#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +-#else +-#define SQISIGN_NAMESPACE_GENERIC(s) s +-#endif +- + #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) + #if defined(SQISIGN_BUILD_TYPE_REF) + #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) +@@ -60,23 +54,23 @@ + #undef quat_alg_scalar + #undef quat_alg_sub + +-#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) +-#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) +-#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) +-#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) +-#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) +-#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) +-#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) +-#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) +-#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) +-#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) +-#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) +-#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) +-#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) +-#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) +-#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) +-#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) +-#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) ++#define quat_alg_add SQISIGN_NAMESPACE(quat_alg_add) ++#define quat_alg_conj SQISIGN_NAMESPACE(quat_alg_conj) ++#define quat_alg_coord_mul SQISIGN_NAMESPACE(quat_alg_coord_mul) ++#define quat_alg_elem_copy SQISIGN_NAMESPACE(quat_alg_elem_copy) ++#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE(quat_alg_elem_copy_ibz) ++#define quat_alg_elem_equal SQISIGN_NAMESPACE(quat_alg_elem_equal) ++#define quat_alg_elem_is_zero SQISIGN_NAMESPACE(quat_alg_elem_is_zero) ++#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE(quat_alg_elem_mul_by_scalar) ++#define quat_alg_elem_set SQISIGN_NAMESPACE(quat_alg_elem_set) ++#define quat_alg_equal_denom SQISIGN_NAMESPACE(quat_alg_equal_denom) ++#define quat_alg_init_set_ui SQISIGN_NAMESPACE(quat_alg_init_set_ui) ++#define quat_alg_make_primitive SQISIGN_NAMESPACE(quat_alg_make_primitive) ++#define quat_alg_mul SQISIGN_NAMESPACE(quat_alg_mul) ++#define quat_alg_norm SQISIGN_NAMESPACE(quat_alg_norm) ++#define quat_alg_normalize SQISIGN_NAMESPACE(quat_alg_normalize) ++#define quat_alg_scalar SQISIGN_NAMESPACE(quat_alg_scalar) ++#define quat_alg_sub SQISIGN_NAMESPACE(quat_alg_sub) + + // Namespacing symbols exported from api.c: + #undef crypto_sign +@@ -134,14 +128,14 @@ + #undef ibz_mat_2x2_set + #undef ibz_vec_2_set + +-#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) +-#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) +-#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) +-#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) +-#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) +-#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) +-#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) +-#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) ++#define ibz_2x2_mul_mod SQISIGN_NAMESPACE(ibz_2x2_mul_mod) ++#define ibz_mat_2x2_add SQISIGN_NAMESPACE(ibz_mat_2x2_add) ++#define ibz_mat_2x2_copy SQISIGN_NAMESPACE(ibz_mat_2x2_copy) ++#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE(ibz_mat_2x2_det_from_ibz) ++#define ibz_mat_2x2_eval SQISIGN_NAMESPACE(ibz_mat_2x2_eval) ++#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE(ibz_mat_2x2_inv_mod) ++#define ibz_mat_2x2_set SQISIGN_NAMESPACE(ibz_mat_2x2_set) ++#define ibz_vec_2_set SQISIGN_NAMESPACE(ibz_vec_2_set) + + // Namespacing symbols exported from dim2id2iso.c: + #undef dim2id2iso_arbitrary_isogeny_evaluation +@@ -184,34 +178,34 @@ + #undef ibz_vec_4_sub + #undef quat_qf_eval + +-#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) +-#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) +-#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) +-#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) +-#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) +-#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) +-#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) +-#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) +-#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) +-#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) +-#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) +-#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) +-#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) +-#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) +-#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) +-#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) +-#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) +-#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) +-#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) +-#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) +-#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) +-#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) +-#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) +-#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) +-#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) +-#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) +-#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) +-#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) ++#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_mpm) ++#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_pmp) ++#define ibz_mat_4x4_copy SQISIGN_NAMESPACE(ibz_mat_4x4_copy) ++#define ibz_mat_4x4_equal SQISIGN_NAMESPACE(ibz_mat_4x4_equal) ++#define ibz_mat_4x4_eval SQISIGN_NAMESPACE(ibz_mat_4x4_eval) ++#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE(ibz_mat_4x4_eval_t) ++#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE(ibz_mat_4x4_gcd) ++#define ibz_mat_4x4_identity SQISIGN_NAMESPACE(ibz_mat_4x4_identity) ++#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE(ibz_mat_4x4_inv_with_det_as_denom) ++#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE(ibz_mat_4x4_is_identity) ++#define ibz_mat_4x4_mul SQISIGN_NAMESPACE(ibz_mat_4x4_mul) ++#define ibz_mat_4x4_negate SQISIGN_NAMESPACE(ibz_mat_4x4_negate) ++#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_div) ++#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_mul) ++#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE(ibz_mat_4x4_transpose) ++#define ibz_mat_4x4_zero SQISIGN_NAMESPACE(ibz_mat_4x4_zero) ++#define ibz_vec_4_add SQISIGN_NAMESPACE(ibz_vec_4_add) ++#define ibz_vec_4_content SQISIGN_NAMESPACE(ibz_vec_4_content) ++#define ibz_vec_4_copy SQISIGN_NAMESPACE(ibz_vec_4_copy) ++#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE(ibz_vec_4_copy_ibz) ++#define ibz_vec_4_is_zero SQISIGN_NAMESPACE(ibz_vec_4_is_zero) ++#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE(ibz_vec_4_linear_combination) ++#define ibz_vec_4_negate SQISIGN_NAMESPACE(ibz_vec_4_negate) ++#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE(ibz_vec_4_scalar_div) ++#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul) ++#define ibz_vec_4_set SQISIGN_NAMESPACE(ibz_vec_4_set) ++#define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) ++#define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) + + // Namespacing symbols exported from ec.c: + #undef cswap_points +@@ -339,22 +333,22 @@ + #undef quat_left_ideal_finalize + #undef quat_left_ideal_init + +-#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) +-#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) +-#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) +-#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) +-#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) +-#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) +-#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) +-#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) +-#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) +-#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) +-#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) +-#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) +-#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) +-#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) +-#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) +-#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) ++#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE(ibz_mat_2x2_finalize) ++#define ibz_mat_2x2_init SQISIGN_NAMESPACE(ibz_mat_2x2_init) ++#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE(ibz_mat_4x4_finalize) ++#define ibz_mat_4x4_init SQISIGN_NAMESPACE(ibz_mat_4x4_init) ++#define ibz_vec_2_finalize SQISIGN_NAMESPACE(ibz_vec_2_finalize) ++#define ibz_vec_2_init SQISIGN_NAMESPACE(ibz_vec_2_init) ++#define ibz_vec_4_finalize SQISIGN_NAMESPACE(ibz_vec_4_finalize) ++#define ibz_vec_4_init SQISIGN_NAMESPACE(ibz_vec_4_init) ++#define quat_alg_elem_finalize SQISIGN_NAMESPACE(quat_alg_elem_finalize) ++#define quat_alg_elem_init SQISIGN_NAMESPACE(quat_alg_elem_init) ++#define quat_alg_finalize SQISIGN_NAMESPACE(quat_alg_finalize) ++#define quat_alg_init_set SQISIGN_NAMESPACE(quat_alg_init_set) ++#define quat_lattice_finalize SQISIGN_NAMESPACE(quat_lattice_finalize) ++#define quat_lattice_init SQISIGN_NAMESPACE(quat_lattice_init) ++#define quat_left_ideal_finalize SQISIGN_NAMESPACE(quat_left_ideal_finalize) ++#define quat_left_ideal_init SQISIGN_NAMESPACE(quat_left_ideal_init) + + // Namespacing symbols exported from fp.c: + #undef fp_select +@@ -567,11 +561,11 @@ + #undef ibz_vec_4_linear_combination_mod + #undef ibz_vec_4_scalar_mul_mod + +-#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) +-#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) +-#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) +-#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) +-#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) ++#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE(ibz_mat_4x4_is_hnf) ++#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE(ibz_mat_4xn_hnf_mod_core) ++#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE(ibz_vec_4_copy_mod) ++#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE(ibz_vec_4_linear_combination_mod) ++#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul_mod) + + // Namespacing symbols exported from hnf_internal.c: + #undef ibz_centered_mod +@@ -579,15 +573,15 @@ + #undef ibz_mod_not_zero + #undef ibz_xgcd_with_u_not_0 + +-#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) +-#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) +-#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) +-#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) ++#define ibz_centered_mod SQISIGN_NAMESPACE(ibz_centered_mod) ++#define ibz_conditional_assign SQISIGN_NAMESPACE(ibz_conditional_assign) ++#define ibz_mod_not_zero SQISIGN_NAMESPACE(ibz_mod_not_zero) ++#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE(ibz_xgcd_with_u_not_0) + + // Namespacing symbols exported from ibz_division.c: + #undef ibz_xgcd + +-#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) ++#define ibz_xgcd SQISIGN_NAMESPACE(ibz_xgcd) + + // Namespacing symbols exported from id2iso.c: + #undef change_of_basis_matrix_tate +@@ -624,22 +618,22 @@ + #undef quat_order_discriminant + #undef quat_order_is_maximal + +-#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) +-#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) +-#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) +-#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) +-#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) +-#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) +-#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) +-#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) +-#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) +-#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) +-#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) +-#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) +-#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) +-#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) +-#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) +-#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) ++#define quat_lideal_add SQISIGN_NAMESPACE(quat_lideal_add) ++#define quat_lideal_class_gram SQISIGN_NAMESPACE(quat_lideal_class_gram) ++#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lideal_conjugate_without_hnf) ++#define quat_lideal_copy SQISIGN_NAMESPACE(quat_lideal_copy) ++#define quat_lideal_create SQISIGN_NAMESPACE(quat_lideal_create) ++#define quat_lideal_create_principal SQISIGN_NAMESPACE(quat_lideal_create_principal) ++#define quat_lideal_equals SQISIGN_NAMESPACE(quat_lideal_equals) ++#define quat_lideal_generator SQISIGN_NAMESPACE(quat_lideal_generator) ++#define quat_lideal_inter SQISIGN_NAMESPACE(quat_lideal_inter) ++#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE(quat_lideal_inverse_lattice_without_hnf) ++#define quat_lideal_mul SQISIGN_NAMESPACE(quat_lideal_mul) ++#define quat_lideal_norm SQISIGN_NAMESPACE(quat_lideal_norm) ++#define quat_lideal_right_order SQISIGN_NAMESPACE(quat_lideal_right_order) ++#define quat_lideal_right_transporter SQISIGN_NAMESPACE(quat_lideal_right_transporter) ++#define quat_order_discriminant SQISIGN_NAMESPACE(quat_order_discriminant) ++#define quat_order_is_maximal SQISIGN_NAMESPACE(quat_order_is_maximal) + + // Namespacing symbols exported from intbig.c: + #undef ibz_abs +@@ -647,6 +641,10 @@ + #undef ibz_bitsize + #undef ibz_cmp + #undef ibz_cmp_int32 ++#undef ibz_const_one ++#undef ibz_const_three ++#undef ibz_const_two ++#undef ibz_const_zero + #undef ibz_convert_to_str + #undef ibz_copy + #undef ibz_copy_digits +@@ -687,57 +685,61 @@ + #undef ibz_to_digits + #undef ibz_two_adic + +-#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) +-#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) +-#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) +-#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) +-#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) +-#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) +-#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) +-#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) +-#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) +-#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) +-#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) +-#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) +-#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) +-#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) +-#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) +-#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) +-#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) +-#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) +-#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) +-#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) +-#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) +-#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) +-#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) +-#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) +-#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) +-#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) +-#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) +-#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) +-#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) +-#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) +-#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) +-#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) +-#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) +-#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) +-#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) +-#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) +-#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) +-#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) +-#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) +-#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) +-#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) +-#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) +-#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) +-#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) ++#define ibz_abs SQISIGN_NAMESPACE(ibz_abs) ++#define ibz_add SQISIGN_NAMESPACE(ibz_add) ++#define ibz_bitsize SQISIGN_NAMESPACE(ibz_bitsize) ++#define ibz_cmp SQISIGN_NAMESPACE(ibz_cmp) ++#define ibz_cmp_int32 SQISIGN_NAMESPACE(ibz_cmp_int32) ++#define ibz_const_one SQISIGN_NAMESPACE(ibz_const_one) ++#define ibz_const_three SQISIGN_NAMESPACE(ibz_const_three) ++#define ibz_const_two SQISIGN_NAMESPACE(ibz_const_two) ++#define ibz_const_zero SQISIGN_NAMESPACE(ibz_const_zero) ++#define ibz_convert_to_str SQISIGN_NAMESPACE(ibz_convert_to_str) ++#define ibz_copy SQISIGN_NAMESPACE(ibz_copy) ++#define ibz_copy_digits SQISIGN_NAMESPACE(ibz_copy_digits) ++#define ibz_div SQISIGN_NAMESPACE(ibz_div) ++#define ibz_div_2exp SQISIGN_NAMESPACE(ibz_div_2exp) ++#define ibz_div_floor SQISIGN_NAMESPACE(ibz_div_floor) ++#define ibz_divides SQISIGN_NAMESPACE(ibz_divides) ++#define ibz_finalize SQISIGN_NAMESPACE(ibz_finalize) ++#define ibz_gcd SQISIGN_NAMESPACE(ibz_gcd) ++#define ibz_get SQISIGN_NAMESPACE(ibz_get) ++#define ibz_init SQISIGN_NAMESPACE(ibz_init) ++#define ibz_invmod SQISIGN_NAMESPACE(ibz_invmod) ++#define ibz_is_even SQISIGN_NAMESPACE(ibz_is_even) ++#define ibz_is_odd SQISIGN_NAMESPACE(ibz_is_odd) ++#define ibz_is_one SQISIGN_NAMESPACE(ibz_is_one) ++#define ibz_is_zero SQISIGN_NAMESPACE(ibz_is_zero) ++#define ibz_legendre SQISIGN_NAMESPACE(ibz_legendre) ++#define ibz_mod SQISIGN_NAMESPACE(ibz_mod) ++#define ibz_mod_ui SQISIGN_NAMESPACE(ibz_mod_ui) ++#define ibz_mul SQISIGN_NAMESPACE(ibz_mul) ++#define ibz_neg SQISIGN_NAMESPACE(ibz_neg) ++#define ibz_pow SQISIGN_NAMESPACE(ibz_pow) ++#define ibz_pow_mod SQISIGN_NAMESPACE(ibz_pow_mod) ++#define ibz_print SQISIGN_NAMESPACE(ibz_print) ++#define ibz_probab_prime SQISIGN_NAMESPACE(ibz_probab_prime) ++#define ibz_rand_interval SQISIGN_NAMESPACE(ibz_rand_interval) ++#define ibz_rand_interval_bits SQISIGN_NAMESPACE(ibz_rand_interval_bits) ++#define ibz_rand_interval_i SQISIGN_NAMESPACE(ibz_rand_interval_i) ++#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE(ibz_rand_interval_minm_m) ++#define ibz_set SQISIGN_NAMESPACE(ibz_set) ++#define ibz_set_from_str SQISIGN_NAMESPACE(ibz_set_from_str) ++#define ibz_size_in_base SQISIGN_NAMESPACE(ibz_size_in_base) ++#define ibz_sqrt SQISIGN_NAMESPACE(ibz_sqrt) ++#define ibz_sqrt_floor SQISIGN_NAMESPACE(ibz_sqrt_floor) ++#define ibz_sqrt_mod_p SQISIGN_NAMESPACE(ibz_sqrt_mod_p) ++#define ibz_sub SQISIGN_NAMESPACE(ibz_sub) ++#define ibz_swap SQISIGN_NAMESPACE(ibz_swap) ++#define ibz_to_digits SQISIGN_NAMESPACE(ibz_to_digits) ++#define ibz_two_adic SQISIGN_NAMESPACE(ibz_two_adic) + + // Namespacing symbols exported from integers.c: + #undef ibz_cornacchia_prime + #undef ibz_generate_random_prime + +-#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) +-#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) ++#define ibz_cornacchia_prime SQISIGN_NAMESPACE(ibz_cornacchia_prime) ++#define ibz_generate_random_prime SQISIGN_NAMESPACE(ibz_generate_random_prime) + + // Namespacing symbols exported from isog_chains.c: + #undef ec_eval_even +@@ -763,15 +765,15 @@ + #undef quat_lattice_lll + #undef quat_lll_core + +-#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) +-#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) ++#define quat_lattice_lll SQISIGN_NAMESPACE(quat_lattice_lll) ++#define quat_lll_core SQISIGN_NAMESPACE(quat_lll_core) + + // Namespacing symbols exported from lat_ball.c: + #undef quat_lattice_bound_parallelogram + #undef quat_lattice_sample_from_ball + +-#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) +-#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) ++#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE(quat_lattice_bound_parallelogram) ++#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE(quat_lattice_sample_from_ball) + + // Namespacing symbols exported from lattice.c: + #undef quat_lattice_add +@@ -789,29 +791,29 @@ + #undef quat_lattice_mul + #undef quat_lattice_reduce_denom + +-#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) +-#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) +-#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) +-#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) +-#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) +-#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) +-#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) +-#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) +-#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) +-#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) +-#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) +-#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) +-#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) +-#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) ++#define quat_lattice_add SQISIGN_NAMESPACE(quat_lattice_add) ++#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE(quat_lattice_alg_elem_mul) ++#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lattice_conjugate_without_hnf) ++#define quat_lattice_contains SQISIGN_NAMESPACE(quat_lattice_contains) ++#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE(quat_lattice_dual_without_hnf) ++#define quat_lattice_equal SQISIGN_NAMESPACE(quat_lattice_equal) ++#define quat_lattice_gram SQISIGN_NAMESPACE(quat_lattice_gram) ++#define quat_lattice_hnf SQISIGN_NAMESPACE(quat_lattice_hnf) ++#define quat_lattice_inclusion SQISIGN_NAMESPACE(quat_lattice_inclusion) ++#define quat_lattice_index SQISIGN_NAMESPACE(quat_lattice_index) ++#define quat_lattice_intersect SQISIGN_NAMESPACE(quat_lattice_intersect) ++#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE(quat_lattice_mat_alg_coord_mul_without_hnf) ++#define quat_lattice_mul SQISIGN_NAMESPACE(quat_lattice_mul) ++#define quat_lattice_reduce_denom SQISIGN_NAMESPACE(quat_lattice_reduce_denom) + + // Namespacing symbols exported from lll_applications.c: + #undef quat_lideal_lideal_mul_reduced + #undef quat_lideal_prime_norm_reduced_equivalent + #undef quat_lideal_reduce_basis + +-#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) +-#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) +-#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) ++#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE(quat_lideal_lideal_mul_reduced) ++#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE(quat_lideal_prime_norm_reduced_equivalent) ++#define quat_lideal_reduce_basis SQISIGN_NAMESPACE(quat_lideal_reduce_basis) + + // Namespacing symbols exported from lll_verification.c: + #undef ibq_vec_4_copy_ibz +@@ -820,18 +822,18 @@ + #undef quat_lll_set_ibq_parameters + #undef quat_lll_verify + +-#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) +-#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) +-#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) +-#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) +-#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) ++#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE(ibq_vec_4_copy_ibz) ++#define quat_lll_bilinear SQISIGN_NAMESPACE(quat_lll_bilinear) ++#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE(quat_lll_gram_schmidt_transposed_with_ibq) ++#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE(quat_lll_set_ibq_parameters) ++#define quat_lll_verify SQISIGN_NAMESPACE(quat_lll_verify) + + // Namespacing symbols exported from mem.c: + #undef sqisign_secure_clear + #undef sqisign_secure_free + +-#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) +-#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) ++#define sqisign_secure_clear SQISIGN_NAMESPACE(sqisign_secure_clear) ++#define sqisign_secure_free SQISIGN_NAMESPACE(sqisign_secure_free) + + // Namespacing symbols exported from mp.c: + #undef MUL +@@ -854,25 +856,25 @@ + #undef select_ct + #undef swap_ct + +-#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) +-#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) +-#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) +-#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) +-#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) +-#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) +-#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) +-#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) +-#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) +-#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) +-#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) +-#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) +-#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) +-#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) +-#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) +-#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) +-#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) +-#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) +-#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) ++#define MUL SQISIGN_NAMESPACE(MUL) ++#define mp_add SQISIGN_NAMESPACE(mp_add) ++#define mp_compare SQISIGN_NAMESPACE(mp_compare) ++#define mp_copy SQISIGN_NAMESPACE(mp_copy) ++#define mp_inv_2e SQISIGN_NAMESPACE(mp_inv_2e) ++#define mp_invert_matrix SQISIGN_NAMESPACE(mp_invert_matrix) ++#define mp_is_one SQISIGN_NAMESPACE(mp_is_one) ++#define mp_is_zero SQISIGN_NAMESPACE(mp_is_zero) ++#define mp_mod_2exp SQISIGN_NAMESPACE(mp_mod_2exp) ++#define mp_mul SQISIGN_NAMESPACE(mp_mul) ++#define mp_mul2 SQISIGN_NAMESPACE(mp_mul2) ++#define mp_neg SQISIGN_NAMESPACE(mp_neg) ++#define mp_print SQISIGN_NAMESPACE(mp_print) ++#define mp_shiftl SQISIGN_NAMESPACE(mp_shiftl) ++#define mp_shiftr SQISIGN_NAMESPACE(mp_shiftr) ++#define mp_sub SQISIGN_NAMESPACE(mp_sub) ++#define multiple_mp_shiftl SQISIGN_NAMESPACE(multiple_mp_shiftl) ++#define select_ct SQISIGN_NAMESPACE(select_ct) ++#define swap_ct SQISIGN_NAMESPACE(swap_ct) + + // Namespacing symbols exported from normeq.c: + #undef quat_change_to_O0_basis +@@ -882,12 +884,12 @@ + #undef quat_represent_integer + #undef quat_sampling_random_ideal_O0_given_norm + +-#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) +-#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) +-#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) +-#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) +-#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) +-#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) ++#define quat_change_to_O0_basis SQISIGN_NAMESPACE(quat_change_to_O0_basis) ++#define quat_lattice_O0_set SQISIGN_NAMESPACE(quat_lattice_O0_set) ++#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE(quat_lattice_O0_set_extremal) ++#define quat_order_elem_create SQISIGN_NAMESPACE(quat_order_elem_create) ++#define quat_represent_integer SQISIGN_NAMESPACE(quat_represent_integer) ++#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE(quat_sampling_random_ideal_O0_given_norm) + + // Namespacing symbols exported from printer.c: + #undef ibz_mat_2x2_print +@@ -899,23 +901,23 @@ + #undef quat_lattice_print + #undef quat_left_ideal_print + +-#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) +-#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) +-#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) +-#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) +-#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) +-#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) +-#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) +-#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) ++#define ibz_mat_2x2_print SQISIGN_NAMESPACE(ibz_mat_2x2_print) ++#define ibz_mat_4x4_print SQISIGN_NAMESPACE(ibz_mat_4x4_print) ++#define ibz_vec_2_print SQISIGN_NAMESPACE(ibz_vec_2_print) ++#define ibz_vec_4_print SQISIGN_NAMESPACE(ibz_vec_4_print) ++#define quat_alg_elem_print SQISIGN_NAMESPACE(quat_alg_elem_print) ++#define quat_alg_print SQISIGN_NAMESPACE(quat_alg_print) ++#define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) ++#define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) + + // Namespacing symbols exported from random_input_generation.c: + #undef quat_test_input_random_ideal_generation + #undef quat_test_input_random_ideal_lattice_generation + #undef quat_test_input_random_lattice_generation + +-#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) +-#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) +-#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) ++#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_generation) ++#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_lattice_generation) ++#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_lattice_generation) + + // Namespacing symbols exported from rationals.c: + #undef ibq_abs +@@ -941,28 +943,28 @@ + #undef ibq_vec_4_init + #undef ibq_vec_4_print + +-#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) +-#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) +-#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) +-#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) +-#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) +-#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) +-#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) +-#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) +-#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) +-#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) +-#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) +-#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) +-#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) +-#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) +-#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) +-#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) +-#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) +-#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) +-#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) +-#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) +-#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) +-#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) ++#define ibq_abs SQISIGN_NAMESPACE(ibq_abs) ++#define ibq_add SQISIGN_NAMESPACE(ibq_add) ++#define ibq_cmp SQISIGN_NAMESPACE(ibq_cmp) ++#define ibq_copy SQISIGN_NAMESPACE(ibq_copy) ++#define ibq_finalize SQISIGN_NAMESPACE(ibq_finalize) ++#define ibq_init SQISIGN_NAMESPACE(ibq_init) ++#define ibq_inv SQISIGN_NAMESPACE(ibq_inv) ++#define ibq_is_ibz SQISIGN_NAMESPACE(ibq_is_ibz) ++#define ibq_is_one SQISIGN_NAMESPACE(ibq_is_one) ++#define ibq_is_zero SQISIGN_NAMESPACE(ibq_is_zero) ++#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE(ibq_mat_4x4_finalize) ++#define ibq_mat_4x4_init SQISIGN_NAMESPACE(ibq_mat_4x4_init) ++#define ibq_mat_4x4_print SQISIGN_NAMESPACE(ibq_mat_4x4_print) ++#define ibq_mul SQISIGN_NAMESPACE(ibq_mul) ++#define ibq_neg SQISIGN_NAMESPACE(ibq_neg) ++#define ibq_reduce SQISIGN_NAMESPACE(ibq_reduce) ++#define ibq_set SQISIGN_NAMESPACE(ibq_set) ++#define ibq_sub SQISIGN_NAMESPACE(ibq_sub) ++#define ibq_to_ibz SQISIGN_NAMESPACE(ibq_to_ibz) ++#define ibq_vec_4_finalize SQISIGN_NAMESPACE(ibq_vec_4_finalize) ++#define ibq_vec_4_init SQISIGN_NAMESPACE(ibq_vec_4_init) ++#define ibq_vec_4_print SQISIGN_NAMESPACE(ibq_vec_4_print) + + // Namespacing symbols exported from sign.c: + #undef protocols_sign diff --git a/src/sig/sqisign/CMakeLists.txt b/src/sig/sqisign/CMakeLists.txt index 592ab95cab..6d33b75fe1 100644 --- a/src/sig/sqisign/CMakeLists.txt +++ b/src/sig/sqisign/CMakeLists.txt @@ -6,56 +6,56 @@ set(_SQISIGN_OBJS "") if(OQS_ENABLE_SIG_sqisign_lvl1) - add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/aes_c.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fips202.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mem.c the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl1_ref/mini-gmp.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl1_ref/randombytes_system.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/tools.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) - target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DRANDOMBYTES_SYSTEM=ON) + add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/aes_c.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fips202.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mem.c the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl1_ref/mini-gmp.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) + target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) target_include_directories(sqisign_lvl1_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_ref) target_include_directories(sqisign_lvl1_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) - target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DRANDOMBYTES_SYSTEM=ON) + target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) - add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/aes_ni.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/fips202.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/mem.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/tools.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) + add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/aes_ni.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/fips202.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/mem.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_broadwell) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl1_broadwell PRIVATE -mavx2) - target_compile_options(sqisign_lvl1_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DRANDOMBYTES_SYSTEM=ON -DSQISIGN_GF_IMPL_BROADWELL) + target_compile_options(sqisign_lvl1_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl3) - add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/aes_c.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fips202.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mem.c the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl3_ref/mini-gmp.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl3_ref/randombytes_system.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/tools.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) - target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DRANDOMBYTES_SYSTEM=ON) + add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/aes_c.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fips202.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mem.c the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl3_ref/mini-gmp.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) + target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) target_include_directories(sqisign_lvl3_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_ref) target_include_directories(sqisign_lvl3_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) - target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DRANDOMBYTES_SYSTEM=ON) + target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) - add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/aes_ni.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/fips202.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/mem.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/tools.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) + add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/aes_ni.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/fips202.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/mem.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_broadwell) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl3_broadwell PRIVATE -mavx2) - target_compile_options(sqisign_lvl3_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DRANDOMBYTES_SYSTEM=ON -DSQISIGN_GF_IMPL_BROADWELL) + target_compile_options(sqisign_lvl3_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl5) - add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/aes_c.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fips202.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mem.c the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl5_ref/mini-gmp.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl5_ref/randombytes_system.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/tools.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) - target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DRANDOMBYTES_SYSTEM=ON) + add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/aes_c.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fips202.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mem.c the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl5_ref/mini-gmp.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) + target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) target_include_directories(sqisign_lvl5_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_ref) target_include_directories(sqisign_lvl5_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) - target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DRANDOMBYTES_SYSTEM=ON) + target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) - add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/aes_ni.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/fips202.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/mem.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/tools.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) + add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/aes_ni.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/fips202.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/mem.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_broadwell) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl5_broadwell PRIVATE -mavx2) - target_compile_options(sqisign_lvl5_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DRANDOMBYTES_SYSTEM=ON -DSQISIGN_GF_IMPL_BROADWELL) + target_compile_options(sqisign_lvl5_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c deleted file mode 100644 index 3fc67acfb6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_ctrdrbg_aesni.c +++ /dev/null @@ -1,87 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 and Unknown -// -/* -NIST-developed software is provided by NIST as a public service. You may use, -copy, and distribute copies of the software in any medium, provided that you -keep intact this entire notice. You may improve, modify, and create derivative -works of the software or any portion of the software, and you may copy and -distribute such modifications or works. Modified works should carry a notice -stating that you changed the software and should note the date and nature of any -such change. Please explicitly acknowledge the National Institute of Standards -and Technology as the source of the software. - -NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF -ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, -WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS -NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR -ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE -ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, -INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR -USEFULNESS OF THE SOFTWARE. - -You are solely responsible for determining the appropriateness of using and -distributing the software and you assume all risks associated with its use, -including but not limited to the risks and costs of program errors, compliance -with applicable laws, damage to or loss of data, programs or equipment, and the -unavailability or interruption of operation. This software is not intended to be -used in any situation where a failure could cause risk of injury or damage to -property. The software developed by NIST employees is not subject to copyright -protection within the United States. -*/ - -#include - -#include -#include "ctr_drbg.h" - -#ifdef ENABLE_CT_TESTING -#include -#endif - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -CTR_DRBG_STATE drbg; - -#ifndef CTRDRBG_TEST_BENCH -static -#endif -void -randombytes_init_aes_ni(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - (void)security_strength; // fixed to 256 - CTR_DRBG_init(&drbg, entropy_input, personalization_string, - (personalization_string == NULL) ? 0 : CTR_DRBG_ENTROPY_LEN); -} - -#ifndef CTRDRBG_TEST_BENCH -static -#endif -int -randombytes_aes_ni(unsigned char *x, size_t xlen) { - CTR_DRBG_generate(&drbg, x, xlen, NULL, 0); - return RNG_SUCCESS; -} - -#ifdef RANDOMBYTES_AES_NI -SQISIGN_API -int randombytes(unsigned char *random_array, unsigned long long nbytes) { - int ret = randombytes_aes_ni(random_array, nbytes); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); -#endif - return ret; -} - -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - randombytes_init_aes_ni(entropy_input, personalization_string, - security_strength); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c deleted file mode 100644 index 689c29b242..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_system.c +++ /dev/null @@ -1,431 +0,0 @@ -// SPDX-License-Identifier: MIT - -/* -The MIT License -Copyright (c) 2017 Daan Sprenkels -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -// In the case that are compiling on linux, we need to define _GNU_SOURCE -// *before* randombytes.h is included. Otherwise SYS_getrandom will not be -// declared. -#if defined(__linux__) || defined(__GNU__) -#define _GNU_SOURCE -#endif /* defined(__linux__) || defined(__GNU__) */ - -#if defined(_WIN32) -/* Windows */ -#include -#include /* CryptAcquireContext, CryptGenRandom */ -#endif /* defined(_WIN32) */ - -/* wasi */ -#if defined(__wasi__) -#include -#endif - -/* kFreeBSD */ -#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) -#define GNU_KFREEBSD -#endif - -#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -/* Linux */ -// We would need to include , but not every target has access -// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. -// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the -// linux repo. -#define RNDGETENTCNT 0x80045200 - -#include -#include -#include -#include -#include -#include -#include -#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ - ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) -#define USE_GLIBC -#include -#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ - (__GLIBC_MINOR__ > 24)) */ -#include -#include -#include -#include - -// We need SSIZE_MAX as the maximum read len from /dev/urandom -#if !defined(SSIZE_MAX) -#define SSIZE_MAX (SIZE_MAX / 2 - 1) -#endif /* defined(SSIZE_MAX) */ - -#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ - -#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) -/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ -#include -#if defined(BSD) -#include -#endif -/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ -#if defined(__GNU__) -#undef BSD -#endif -#endif - -#if defined(__EMSCRIPTEN__) -#include -#include -#include -#include -#endif /* defined(__EMSCRIPTEN__) */ - -#if defined(_WIN32) -static int -randombytes_win32_randombytes(void *buf, size_t n) -{ - HCRYPTPROV ctx; - BOOL tmp; - DWORD to_read = 0; - const size_t MAX_DWORD = 0xFFFFFFFF; - - tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); - if (tmp == FALSE) - return -1; - - while (n > 0) { - to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); - tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); - if (tmp == FALSE) - return -1; - buf = ((char *)buf) + to_read; - n -= to_read; - } - - tmp = CryptReleaseContext(ctx, 0); - if (tmp == FALSE) - return -1; - - return 0; -} -#endif /* defined(_WIN32) */ - -#if defined(__wasi__) -static int -randombytes_wasi_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(__wasi__) */ - -#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) -#if defined(USE_GLIBC) -// getrandom is declared in glibc. -#elif defined(SYS_getrandom) -static ssize_t -getrandom(void *buf, size_t buflen, unsigned int flags) -{ - return syscall(SYS_getrandom, buf, buflen, flags); -} -#endif - -static int -randombytes_linux_randombytes_getrandom(void *buf, size_t n) -{ - /* I have thought about using a separate PRF, seeded by getrandom, but - * it turns out that the performance of getrandom is good enough - * (250 MB/s on my laptop). - */ - size_t offset = 0, chunk; - int ret; - while (n > 0) { - /* getrandom does not allow chunks larger than 33554431 */ - chunk = n <= 33554431 ? n : 33554431; - do { - ret = getrandom((char *)buf + offset, chunk, 0); - } while (ret == -1 && errno == EINTR); - if (ret < 0) - return ret; - offset += ret; - n -= ret; - } - assert(n == 0); - return 0; -} -#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ - defined(SYS_getrandom)) */ - -#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) - -#if defined(__linux__) -static int -randombytes_linux_read_entropy_ioctl(int device, int *entropy) -{ - return ioctl(device, RNDGETENTCNT, entropy); -} - -static int -randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) -{ - int retcode; - do { - rewind(stream); - retcode = fscanf(stream, "%d", entropy); - } while (retcode != 1 && errno == EINTR); - if (retcode != 1) { - return -1; - } - return 0; -} - -static int -randombytes_linux_wait_for_entropy(int device) -{ - /* We will block on /dev/random, because any increase in the OS' entropy - * level will unblock the request. I use poll here (as does libsodium), - * because we don't *actually* want to read from the device. */ - enum - { - IOCTL, - PROC - } strategy = IOCTL; - const int bits = 128; - struct pollfd pfd; - int fd; - FILE *proc_file; - int retcode, retcode_error = 0; // Used as return codes throughout this function - int entropy = 0; - - /* If the device has enough entropy already, we will want to return early */ - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - // printf("errno: %d (%s)\n", errno, strerror(errno)); - if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { - // The ioctl call on /dev/urandom has failed due to a - // - ENOTTY (unsupported action), or - // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). - // - // We will fall back to reading from - // `/proc/sys/kernel/random/entropy_avail`. This less ideal, - // because it allocates a file descriptor, and it may not work - // in a chroot. But at this point it seems we have no better - // options left. - strategy = PROC; - // Open the entropy count file - proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); - if (proc_file == NULL) { - return -1; - } - } else if (retcode != 0) { - // Unrecoverable ioctl error - return -1; - } - if (entropy >= bits) { - return 0; - } - - do { - fd = open("/dev/random", O_RDONLY); - } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ - if (fd == -1) { - /* Unrecoverable IO error */ - return -1; - } - - pfd.fd = fd; - pfd.events = POLLIN; - for (;;) { - retcode = poll(&pfd, 1, -1); - if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { - continue; - } else if (retcode == 1) { - if (strategy == IOCTL) { - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - } else if (strategy == PROC) { - retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); - } else { - return -1; // Unreachable - } - - if (retcode != 0) { - // Unrecoverable I/O error - retcode_error = retcode; - break; - } - if (entropy >= bits) { - break; - } - } else { - // Unreachable: poll() should only return -1 or 1 - retcode_error = -1; - break; - } - } - do { - retcode = close(fd); - } while (retcode == -1 && errno == EINTR); - if (strategy == PROC) { - do { - retcode = fclose(proc_file); - } while (retcode == -1 && errno == EINTR); - } - if (retcode_error != 0) { - return retcode_error; - } - return retcode; -} -#endif /* defined(__linux__) */ - -static int -randombytes_linux_randombytes_urandom(void *buf, size_t n) -{ - int fd; - size_t offset = 0, count; - ssize_t tmp; - do { - fd = open("/dev/urandom", O_RDONLY); - } while (fd == -1 && errno == EINTR); - if (fd == -1) - return -1; -#if defined(__linux__) - if (randombytes_linux_wait_for_entropy(fd) == -1) - return -1; -#endif - - while (n > 0) { - count = n <= SSIZE_MAX ? n : SSIZE_MAX; - tmp = read(fd, (char *)buf + offset, count); - if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { - continue; - } - if (tmp == -1) - return -1; /* Unrecoverable IO error */ - offset += tmp; - n -= tmp; - } - close(fd); - assert(n == 0); - return 0; -} -#endif /* defined(__linux__) && !defined(SYS_getrandom) */ - -#if defined(BSD) -static int -randombytes_bsd_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(BSD) */ - -#if defined(__EMSCRIPTEN__) -static int -randombytes_js_randombytes_nodejs(void *buf, size_t n) -{ - const int ret = EM_ASM_INT( - { - var crypto; - try { - crypto = require('crypto'); - } catch (error) { - return -2; - } - try { - writeArrayToMemory(crypto.randomBytes($1), $0); - return 0; - } catch (error) { - return -1; - } - }, - buf, - n); - switch (ret) { - case 0: - return 0; - case -1: - errno = EINVAL; - return -1; - case -2: - errno = ENOSYS; - return -1; - } - assert(false); // Unreachable -} -#endif /* defined(__EMSCRIPTEN__) */ - -SQISIGN_API -int -randombytes_select(unsigned char *buf, unsigned long long n) -{ -#if defined(__EMSCRIPTEN__) - return randombytes_js_randombytes_nodejs(buf, n); -#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -#if defined(USE_GLIBC) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#elif defined(SYS_getrandom) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#else - /* When we have enough entropy, we can read from /dev/urandom */ - return randombytes_linux_randombytes_urandom(buf, n); -#endif -#elif defined(BSD) - /* Use arc4random system call */ - return randombytes_bsd_randombytes(buf, n); -#elif defined(_WIN32) - /* Use windows API */ - return randombytes_win32_randombytes(buf, n); -#elif defined(__wasi__) - /* Use WASI */ - return randombytes_wasi_randombytes(buf, n); -#else -#error "randombytes(...) is not supported on this platform" -#endif -} - -#ifdef RANDOMBYTES_SYSTEM -SQISIGN_API -int -randombytes(unsigned char *x, unsigned long long xlen) -{ - - int ret = randombytes_select(x, (size_t)xlen); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); -#endif - return ret; -} - -SQISIGN_API -void -randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) -{ - (void)entropy_input; - (void)personalization_string; - (void)security_strength; -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h index 0a9ca0e465..d0861ac036 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h @@ -3,41 +3,11 @@ #ifndef rng_h #define rng_h -#include +#include -/** - * Randombytes initialization. - * Initialization may be needed for some random number generators (e.g. CTR-DRBG). - * - * @param[in] entropy_input 48 bytes entropy input - * @param[in] personalization_string Personalization string - * @param[in] security_strength Security string - */ -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength); - -/** - * Random byte generation using /dev/urandom. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes_select(unsigned char *x, unsigned long long xlen); - -/** - * Random byte generation. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes(unsigned char *x, unsigned long long xlen); +static int randombytes(unsigned char *x, unsigned long long xlen){ + OQS_randombytes(x, xlen); + return 0; +} #endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sig.h index 4c33510084..a5bc04e6e4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sig.h @@ -17,7 +17,7 @@ * @param[out] sk SQIsign secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_keypair(unsigned char *pk, unsigned char *sk); /** @@ -34,12 +34,34 @@ int sqisign_keypair(unsigned char *pk, unsigned char *sk); * @param[in] sk Compacted secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_sign(unsigned char *sm, unsigned long long *smlen, const unsigned char *m, unsigned long long mlen, const unsigned char *sk); + +/** + * Alternate SQIsign signature generation. Used for liboqs compatibility. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] s Signature + * @param[out] slen Pointer to the length of s + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); #endif /** @@ -75,11 +97,30 @@ int sqisign_open(unsigned char *m, * @param[in] pk Compacted public key * @return int 0 if verification succeeded, 1 otherwise. */ -SQISIGN_API +SQISIGN_API int sqisign_verify(const unsigned char *m, unsigned long long mlen, const unsigned char *sig, unsigned long long siglen, const unsigned char *pk); +/** + * Alternate SQIsign verify signature. Used for liboqs compatibility. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk); #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h index 007d2572b9..bbfe72c13b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h @@ -18,12 +18,6 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) -#ifndef DISABLE_NAMESPACING -#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) -#else -#define SQISIGN_NAMESPACE_GENERIC(s) s -#endif - #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -60,23 +54,23 @@ #undef quat_alg_scalar #undef quat_alg_sub -#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) -#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) -#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) -#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) -#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) -#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) -#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) -#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) -#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) -#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) -#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) -#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) -#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) -#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) -#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) -#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) -#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) +#define quat_alg_add SQISIGN_NAMESPACE(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE(quat_alg_sub) // Namespacing symbols exported from api.c: #undef crypto_sign @@ -134,14 +128,14 @@ #undef ibz_mat_2x2_set #undef ibz_vec_2_set -#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) -#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) -#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) -#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) -#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) -#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) -#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) -#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE(ibz_vec_2_set) // Namespacing symbols exported from dim2id2iso.c: #undef dim2id2iso_arbitrary_isogeny_evaluation @@ -184,34 +178,34 @@ #undef ibz_vec_4_sub #undef quat_qf_eval -#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) -#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) -#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) -#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) -#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) -#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) -#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) -#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) -#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) -#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) -#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) -#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) -#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) -#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) -#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) -#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) -#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) -#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) -#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) -#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) -#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) -#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) -#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) -#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) -#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) -#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) -#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) -#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) // Namespacing symbols exported from ec.c: #undef cswap_points @@ -339,22 +333,22 @@ #undef quat_left_ideal_finalize #undef quat_left_ideal_init -#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) -#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) -#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) -#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) -#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) -#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) -#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) -#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) -#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) -#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) -#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) -#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) -#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) -#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) -#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) -#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE(quat_left_ideal_init) // Namespacing symbols exported from fp.c: #undef fp_select @@ -567,11 +561,11 @@ #undef ibz_vec_4_linear_combination_mod #undef ibz_vec_4_scalar_mul_mod -#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) -#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) -#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) -#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) -#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul_mod) // Namespacing symbols exported from hnf_internal.c: #undef ibz_centered_mod @@ -579,15 +573,15 @@ #undef ibz_mod_not_zero #undef ibz_xgcd_with_u_not_0 -#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) -#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) -#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) -#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) +#define ibz_centered_mod SQISIGN_NAMESPACE(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE(ibz_xgcd_with_u_not_0) // Namespacing symbols exported from ibz_division.c: #undef ibz_xgcd -#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) +#define ibz_xgcd SQISIGN_NAMESPACE(ibz_xgcd) // Namespacing symbols exported from id2iso.c: #undef change_of_basis_matrix_tate @@ -624,22 +618,22 @@ #undef quat_order_discriminant #undef quat_order_is_maximal -#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) -#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) -#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) -#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) -#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) -#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) -#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) -#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) -#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) -#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) -#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) -#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) -#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) -#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) -#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) -#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) +#define quat_lideal_add SQISIGN_NAMESPACE(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE(quat_order_is_maximal) // Namespacing symbols exported from intbig.c: #undef ibz_abs @@ -647,6 +641,10 @@ #undef ibz_bitsize #undef ibz_cmp #undef ibz_cmp_int32 +#undef ibz_const_one +#undef ibz_const_three +#undef ibz_const_two +#undef ibz_const_zero #undef ibz_convert_to_str #undef ibz_copy #undef ibz_copy_digits @@ -687,57 +685,61 @@ #undef ibz_to_digits #undef ibz_two_adic -#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) -#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) -#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) -#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) -#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) -#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) -#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) -#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) -#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) -#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) -#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) -#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) -#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) -#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) -#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) -#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) -#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) -#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) -#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) -#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) -#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) -#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) -#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) -#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) -#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) -#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) -#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) -#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) -#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) -#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) -#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) -#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) -#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) -#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) -#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) -#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) -#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) -#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) -#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) -#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) -#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) -#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) -#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) -#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) +#define ibz_abs SQISIGN_NAMESPACE(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE(ibz_cmp_int32) +#define ibz_const_one SQISIGN_NAMESPACE(ibz_const_one) +#define ibz_const_three SQISIGN_NAMESPACE(ibz_const_three) +#define ibz_const_two SQISIGN_NAMESPACE(ibz_const_two) +#define ibz_const_zero SQISIGN_NAMESPACE(ibz_const_zero) +#define ibz_convert_to_str SQISIGN_NAMESPACE(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE(ibz_get) +#define ibz_init SQISIGN_NAMESPACE(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE(ibz_two_adic) // Namespacing symbols exported from integers.c: #undef ibz_cornacchia_prime #undef ibz_generate_random_prime -#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) -#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) +#define ibz_cornacchia_prime SQISIGN_NAMESPACE(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE(ibz_generate_random_prime) // Namespacing symbols exported from isog_chains.c: #undef ec_eval_even @@ -763,15 +765,15 @@ #undef quat_lattice_lll #undef quat_lll_core -#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) -#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) +#define quat_lattice_lll SQISIGN_NAMESPACE(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE(quat_lll_core) // Namespacing symbols exported from lat_ball.c: #undef quat_lattice_bound_parallelogram #undef quat_lattice_sample_from_ball -#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) -#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE(quat_lattice_sample_from_ball) // Namespacing symbols exported from lattice.c: #undef quat_lattice_add @@ -789,29 +791,29 @@ #undef quat_lattice_mul #undef quat_lattice_reduce_denom -#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) -#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) -#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) -#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) -#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) -#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) -#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) -#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) -#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) -#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) -#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) -#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) -#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) -#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) +#define quat_lattice_add SQISIGN_NAMESPACE(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE(quat_lattice_reduce_denom) // Namespacing symbols exported from lll_applications.c: #undef quat_lideal_lideal_mul_reduced #undef quat_lideal_prime_norm_reduced_equivalent #undef quat_lideal_reduce_basis -#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) -#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) -#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE(quat_lideal_reduce_basis) // Namespacing symbols exported from lll_verification.c: #undef ibq_vec_4_copy_ibz @@ -820,18 +822,18 @@ #undef quat_lll_set_ibq_parameters #undef quat_lll_verify -#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) -#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) -#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) -#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) -#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE(quat_lll_verify) // Namespacing symbols exported from mem.c: #undef sqisign_secure_clear #undef sqisign_secure_free -#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) -#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) +#define sqisign_secure_clear SQISIGN_NAMESPACE(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE(sqisign_secure_free) // Namespacing symbols exported from mp.c: #undef MUL @@ -854,25 +856,25 @@ #undef select_ct #undef swap_ct -#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) -#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) -#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) -#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) -#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) -#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) -#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) -#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) -#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) -#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) -#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) -#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) -#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) -#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) -#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) -#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) -#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) -#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) -#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) +#define MUL SQISIGN_NAMESPACE(MUL) +#define mp_add SQISIGN_NAMESPACE(mp_add) +#define mp_compare SQISIGN_NAMESPACE(mp_compare) +#define mp_copy SQISIGN_NAMESPACE(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE(mp_neg) +#define mp_print SQISIGN_NAMESPACE(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE(select_ct) +#define swap_ct SQISIGN_NAMESPACE(swap_ct) // Namespacing symbols exported from normeq.c: #undef quat_change_to_O0_basis @@ -882,12 +884,12 @@ #undef quat_represent_integer #undef quat_sampling_random_ideal_O0_given_norm -#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) -#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) -#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) -#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) -#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) -#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) +#define quat_change_to_O0_basis SQISIGN_NAMESPACE(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE(quat_sampling_random_ideal_O0_given_norm) // Namespacing symbols exported from printer.c: #undef ibz_mat_2x2_print @@ -899,23 +901,23 @@ #undef quat_lattice_print #undef quat_left_ideal_print -#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) -#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) -#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) -#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) -#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) -#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) -#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) -#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) +#define ibz_mat_2x2_print SQISIGN_NAMESPACE(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation #undef quat_test_input_random_lattice_generation -#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) -#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) -#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_lattice_generation) // Namespacing symbols exported from rationals.c: #undef ibq_abs @@ -941,28 +943,28 @@ #undef ibq_vec_4_init #undef ibq_vec_4_print -#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) -#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) -#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) -#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) -#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) -#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) -#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) -#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) -#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) -#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) -#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) -#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) -#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) -#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) -#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) -#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) -#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) -#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) -#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) -#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) -#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) -#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) +#define ibq_abs SQISIGN_NAMESPACE(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE(ibq_vec_4_print) // Namespacing symbols exported from sign.c: #undef protocols_sign diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.c deleted file mode 100644 index 242ea08fe2..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/tools.c +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include - -static clock_t global_timer; - -clock_t -tic(void) -{ - global_timer = clock(); - return global_timer; -} - -float -tac(void) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); - return ms; -} - -float -TAC(const char *str) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); -#ifndef NDEBUG - printf("%s [%d ms]\n", str, (int)ms); -#endif - return ms; -} - -float -toc(const clock_t t) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - return ms; -} - -float -TOC(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,clock()-t); - // return (float) (clock()-t); -} - -float -TOC_clock(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, clock() - t); - return (float)(clock() - t); -} - -clock_t -dclock(const clock_t t) -{ - return (clock() - t); -} - -float -clock_to_time(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,t); - // return (float) (t); -} - -float -clock_print(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, t); - return (float)(t); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_system.c deleted file mode 100644 index 689c29b242..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_system.c +++ /dev/null @@ -1,431 +0,0 @@ -// SPDX-License-Identifier: MIT - -/* -The MIT License -Copyright (c) 2017 Daan Sprenkels -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -// In the case that are compiling on linux, we need to define _GNU_SOURCE -// *before* randombytes.h is included. Otherwise SYS_getrandom will not be -// declared. -#if defined(__linux__) || defined(__GNU__) -#define _GNU_SOURCE -#endif /* defined(__linux__) || defined(__GNU__) */ - -#if defined(_WIN32) -/* Windows */ -#include -#include /* CryptAcquireContext, CryptGenRandom */ -#endif /* defined(_WIN32) */ - -/* wasi */ -#if defined(__wasi__) -#include -#endif - -/* kFreeBSD */ -#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) -#define GNU_KFREEBSD -#endif - -#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -/* Linux */ -// We would need to include , but not every target has access -// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. -// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the -// linux repo. -#define RNDGETENTCNT 0x80045200 - -#include -#include -#include -#include -#include -#include -#include -#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ - ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) -#define USE_GLIBC -#include -#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ - (__GLIBC_MINOR__ > 24)) */ -#include -#include -#include -#include - -// We need SSIZE_MAX as the maximum read len from /dev/urandom -#if !defined(SSIZE_MAX) -#define SSIZE_MAX (SIZE_MAX / 2 - 1) -#endif /* defined(SSIZE_MAX) */ - -#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ - -#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) -/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ -#include -#if defined(BSD) -#include -#endif -/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ -#if defined(__GNU__) -#undef BSD -#endif -#endif - -#if defined(__EMSCRIPTEN__) -#include -#include -#include -#include -#endif /* defined(__EMSCRIPTEN__) */ - -#if defined(_WIN32) -static int -randombytes_win32_randombytes(void *buf, size_t n) -{ - HCRYPTPROV ctx; - BOOL tmp; - DWORD to_read = 0; - const size_t MAX_DWORD = 0xFFFFFFFF; - - tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); - if (tmp == FALSE) - return -1; - - while (n > 0) { - to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); - tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); - if (tmp == FALSE) - return -1; - buf = ((char *)buf) + to_read; - n -= to_read; - } - - tmp = CryptReleaseContext(ctx, 0); - if (tmp == FALSE) - return -1; - - return 0; -} -#endif /* defined(_WIN32) */ - -#if defined(__wasi__) -static int -randombytes_wasi_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(__wasi__) */ - -#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) -#if defined(USE_GLIBC) -// getrandom is declared in glibc. -#elif defined(SYS_getrandom) -static ssize_t -getrandom(void *buf, size_t buflen, unsigned int flags) -{ - return syscall(SYS_getrandom, buf, buflen, flags); -} -#endif - -static int -randombytes_linux_randombytes_getrandom(void *buf, size_t n) -{ - /* I have thought about using a separate PRF, seeded by getrandom, but - * it turns out that the performance of getrandom is good enough - * (250 MB/s on my laptop). - */ - size_t offset = 0, chunk; - int ret; - while (n > 0) { - /* getrandom does not allow chunks larger than 33554431 */ - chunk = n <= 33554431 ? n : 33554431; - do { - ret = getrandom((char *)buf + offset, chunk, 0); - } while (ret == -1 && errno == EINTR); - if (ret < 0) - return ret; - offset += ret; - n -= ret; - } - assert(n == 0); - return 0; -} -#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ - defined(SYS_getrandom)) */ - -#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) - -#if defined(__linux__) -static int -randombytes_linux_read_entropy_ioctl(int device, int *entropy) -{ - return ioctl(device, RNDGETENTCNT, entropy); -} - -static int -randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) -{ - int retcode; - do { - rewind(stream); - retcode = fscanf(stream, "%d", entropy); - } while (retcode != 1 && errno == EINTR); - if (retcode != 1) { - return -1; - } - return 0; -} - -static int -randombytes_linux_wait_for_entropy(int device) -{ - /* We will block on /dev/random, because any increase in the OS' entropy - * level will unblock the request. I use poll here (as does libsodium), - * because we don't *actually* want to read from the device. */ - enum - { - IOCTL, - PROC - } strategy = IOCTL; - const int bits = 128; - struct pollfd pfd; - int fd; - FILE *proc_file; - int retcode, retcode_error = 0; // Used as return codes throughout this function - int entropy = 0; - - /* If the device has enough entropy already, we will want to return early */ - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - // printf("errno: %d (%s)\n", errno, strerror(errno)); - if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { - // The ioctl call on /dev/urandom has failed due to a - // - ENOTTY (unsupported action), or - // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). - // - // We will fall back to reading from - // `/proc/sys/kernel/random/entropy_avail`. This less ideal, - // because it allocates a file descriptor, and it may not work - // in a chroot. But at this point it seems we have no better - // options left. - strategy = PROC; - // Open the entropy count file - proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); - if (proc_file == NULL) { - return -1; - } - } else if (retcode != 0) { - // Unrecoverable ioctl error - return -1; - } - if (entropy >= bits) { - return 0; - } - - do { - fd = open("/dev/random", O_RDONLY); - } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ - if (fd == -1) { - /* Unrecoverable IO error */ - return -1; - } - - pfd.fd = fd; - pfd.events = POLLIN; - for (;;) { - retcode = poll(&pfd, 1, -1); - if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { - continue; - } else if (retcode == 1) { - if (strategy == IOCTL) { - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - } else if (strategy == PROC) { - retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); - } else { - return -1; // Unreachable - } - - if (retcode != 0) { - // Unrecoverable I/O error - retcode_error = retcode; - break; - } - if (entropy >= bits) { - break; - } - } else { - // Unreachable: poll() should only return -1 or 1 - retcode_error = -1; - break; - } - } - do { - retcode = close(fd); - } while (retcode == -1 && errno == EINTR); - if (strategy == PROC) { - do { - retcode = fclose(proc_file); - } while (retcode == -1 && errno == EINTR); - } - if (retcode_error != 0) { - return retcode_error; - } - return retcode; -} -#endif /* defined(__linux__) */ - -static int -randombytes_linux_randombytes_urandom(void *buf, size_t n) -{ - int fd; - size_t offset = 0, count; - ssize_t tmp; - do { - fd = open("/dev/urandom", O_RDONLY); - } while (fd == -1 && errno == EINTR); - if (fd == -1) - return -1; -#if defined(__linux__) - if (randombytes_linux_wait_for_entropy(fd) == -1) - return -1; -#endif - - while (n > 0) { - count = n <= SSIZE_MAX ? n : SSIZE_MAX; - tmp = read(fd, (char *)buf + offset, count); - if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { - continue; - } - if (tmp == -1) - return -1; /* Unrecoverable IO error */ - offset += tmp; - n -= tmp; - } - close(fd); - assert(n == 0); - return 0; -} -#endif /* defined(__linux__) && !defined(SYS_getrandom) */ - -#if defined(BSD) -static int -randombytes_bsd_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(BSD) */ - -#if defined(__EMSCRIPTEN__) -static int -randombytes_js_randombytes_nodejs(void *buf, size_t n) -{ - const int ret = EM_ASM_INT( - { - var crypto; - try { - crypto = require('crypto'); - } catch (error) { - return -2; - } - try { - writeArrayToMemory(crypto.randomBytes($1), $0); - return 0; - } catch (error) { - return -1; - } - }, - buf, - n); - switch (ret) { - case 0: - return 0; - case -1: - errno = EINVAL; - return -1; - case -2: - errno = ENOSYS; - return -1; - } - assert(false); // Unreachable -} -#endif /* defined(__EMSCRIPTEN__) */ - -SQISIGN_API -int -randombytes_select(unsigned char *buf, unsigned long long n) -{ -#if defined(__EMSCRIPTEN__) - return randombytes_js_randombytes_nodejs(buf, n); -#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -#if defined(USE_GLIBC) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#elif defined(SYS_getrandom) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#else - /* When we have enough entropy, we can read from /dev/urandom */ - return randombytes_linux_randombytes_urandom(buf, n); -#endif -#elif defined(BSD) - /* Use arc4random system call */ - return randombytes_bsd_randombytes(buf, n); -#elif defined(_WIN32) - /* Use windows API */ - return randombytes_win32_randombytes(buf, n); -#elif defined(__wasi__) - /* Use WASI */ - return randombytes_wasi_randombytes(buf, n); -#else -#error "randombytes(...) is not supported on this platform" -#endif -} - -#ifdef RANDOMBYTES_SYSTEM -SQISIGN_API -int -randombytes(unsigned char *x, unsigned long long xlen) -{ - - int ret = randombytes_select(x, (size_t)xlen); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); -#endif - return ret; -} - -SQISIGN_API -void -randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) -{ - (void)entropy_input; - (void)personalization_string; - (void)security_strength; -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h index 0a9ca0e465..d0861ac036 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h @@ -3,41 +3,11 @@ #ifndef rng_h #define rng_h -#include +#include -/** - * Randombytes initialization. - * Initialization may be needed for some random number generators (e.g. CTR-DRBG). - * - * @param[in] entropy_input 48 bytes entropy input - * @param[in] personalization_string Personalization string - * @param[in] security_strength Security string - */ -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength); - -/** - * Random byte generation using /dev/urandom. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes_select(unsigned char *x, unsigned long long xlen); - -/** - * Random byte generation. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes(unsigned char *x, unsigned long long xlen); +static int randombytes(unsigned char *x, unsigned long long xlen){ + OQS_randombytes(x, xlen); + return 0; +} #endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sig.h index 4c33510084..a5bc04e6e4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sig.h @@ -17,7 +17,7 @@ * @param[out] sk SQIsign secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_keypair(unsigned char *pk, unsigned char *sk); /** @@ -34,12 +34,34 @@ int sqisign_keypair(unsigned char *pk, unsigned char *sk); * @param[in] sk Compacted secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_sign(unsigned char *sm, unsigned long long *smlen, const unsigned char *m, unsigned long long mlen, const unsigned char *sk); + +/** + * Alternate SQIsign signature generation. Used for liboqs compatibility. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] s Signature + * @param[out] slen Pointer to the length of s + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); #endif /** @@ -75,11 +97,30 @@ int sqisign_open(unsigned char *m, * @param[in] pk Compacted public key * @return int 0 if verification succeeded, 1 otherwise. */ -SQISIGN_API +SQISIGN_API int sqisign_verify(const unsigned char *m, unsigned long long mlen, const unsigned char *sig, unsigned long long siglen, const unsigned char *pk); +/** + * Alternate SQIsign verify signature. Used for liboqs compatibility. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk); #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h index 007d2572b9..bbfe72c13b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h @@ -18,12 +18,6 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) -#ifndef DISABLE_NAMESPACING -#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) -#else -#define SQISIGN_NAMESPACE_GENERIC(s) s -#endif - #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -60,23 +54,23 @@ #undef quat_alg_scalar #undef quat_alg_sub -#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) -#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) -#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) -#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) -#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) -#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) -#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) -#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) -#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) -#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) -#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) -#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) -#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) -#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) -#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) -#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) -#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) +#define quat_alg_add SQISIGN_NAMESPACE(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE(quat_alg_sub) // Namespacing symbols exported from api.c: #undef crypto_sign @@ -134,14 +128,14 @@ #undef ibz_mat_2x2_set #undef ibz_vec_2_set -#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) -#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) -#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) -#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) -#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) -#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) -#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) -#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE(ibz_vec_2_set) // Namespacing symbols exported from dim2id2iso.c: #undef dim2id2iso_arbitrary_isogeny_evaluation @@ -184,34 +178,34 @@ #undef ibz_vec_4_sub #undef quat_qf_eval -#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) -#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) -#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) -#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) -#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) -#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) -#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) -#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) -#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) -#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) -#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) -#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) -#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) -#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) -#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) -#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) -#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) -#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) -#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) -#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) -#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) -#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) -#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) -#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) -#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) -#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) -#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) -#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) // Namespacing symbols exported from ec.c: #undef cswap_points @@ -339,22 +333,22 @@ #undef quat_left_ideal_finalize #undef quat_left_ideal_init -#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) -#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) -#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) -#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) -#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) -#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) -#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) -#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) -#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) -#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) -#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) -#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) -#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) -#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) -#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) -#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE(quat_left_ideal_init) // Namespacing symbols exported from fp.c: #undef fp_select @@ -567,11 +561,11 @@ #undef ibz_vec_4_linear_combination_mod #undef ibz_vec_4_scalar_mul_mod -#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) -#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) -#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) -#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) -#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul_mod) // Namespacing symbols exported from hnf_internal.c: #undef ibz_centered_mod @@ -579,15 +573,15 @@ #undef ibz_mod_not_zero #undef ibz_xgcd_with_u_not_0 -#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) -#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) -#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) -#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) +#define ibz_centered_mod SQISIGN_NAMESPACE(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE(ibz_xgcd_with_u_not_0) // Namespacing symbols exported from ibz_division.c: #undef ibz_xgcd -#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) +#define ibz_xgcd SQISIGN_NAMESPACE(ibz_xgcd) // Namespacing symbols exported from id2iso.c: #undef change_of_basis_matrix_tate @@ -624,22 +618,22 @@ #undef quat_order_discriminant #undef quat_order_is_maximal -#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) -#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) -#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) -#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) -#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) -#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) -#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) -#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) -#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) -#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) -#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) -#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) -#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) -#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) -#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) -#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) +#define quat_lideal_add SQISIGN_NAMESPACE(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE(quat_order_is_maximal) // Namespacing symbols exported from intbig.c: #undef ibz_abs @@ -647,6 +641,10 @@ #undef ibz_bitsize #undef ibz_cmp #undef ibz_cmp_int32 +#undef ibz_const_one +#undef ibz_const_three +#undef ibz_const_two +#undef ibz_const_zero #undef ibz_convert_to_str #undef ibz_copy #undef ibz_copy_digits @@ -687,57 +685,61 @@ #undef ibz_to_digits #undef ibz_two_adic -#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) -#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) -#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) -#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) -#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) -#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) -#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) -#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) -#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) -#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) -#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) -#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) -#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) -#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) -#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) -#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) -#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) -#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) -#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) -#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) -#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) -#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) -#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) -#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) -#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) -#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) -#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) -#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) -#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) -#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) -#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) -#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) -#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) -#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) -#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) -#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) -#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) -#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) -#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) -#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) -#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) -#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) -#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) -#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) +#define ibz_abs SQISIGN_NAMESPACE(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE(ibz_cmp_int32) +#define ibz_const_one SQISIGN_NAMESPACE(ibz_const_one) +#define ibz_const_three SQISIGN_NAMESPACE(ibz_const_three) +#define ibz_const_two SQISIGN_NAMESPACE(ibz_const_two) +#define ibz_const_zero SQISIGN_NAMESPACE(ibz_const_zero) +#define ibz_convert_to_str SQISIGN_NAMESPACE(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE(ibz_get) +#define ibz_init SQISIGN_NAMESPACE(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE(ibz_two_adic) // Namespacing symbols exported from integers.c: #undef ibz_cornacchia_prime #undef ibz_generate_random_prime -#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) -#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) +#define ibz_cornacchia_prime SQISIGN_NAMESPACE(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE(ibz_generate_random_prime) // Namespacing symbols exported from isog_chains.c: #undef ec_eval_even @@ -763,15 +765,15 @@ #undef quat_lattice_lll #undef quat_lll_core -#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) -#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) +#define quat_lattice_lll SQISIGN_NAMESPACE(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE(quat_lll_core) // Namespacing symbols exported from lat_ball.c: #undef quat_lattice_bound_parallelogram #undef quat_lattice_sample_from_ball -#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) -#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE(quat_lattice_sample_from_ball) // Namespacing symbols exported from lattice.c: #undef quat_lattice_add @@ -789,29 +791,29 @@ #undef quat_lattice_mul #undef quat_lattice_reduce_denom -#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) -#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) -#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) -#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) -#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) -#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) -#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) -#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) -#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) -#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) -#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) -#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) -#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) -#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) +#define quat_lattice_add SQISIGN_NAMESPACE(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE(quat_lattice_reduce_denom) // Namespacing symbols exported from lll_applications.c: #undef quat_lideal_lideal_mul_reduced #undef quat_lideal_prime_norm_reduced_equivalent #undef quat_lideal_reduce_basis -#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) -#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) -#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE(quat_lideal_reduce_basis) // Namespacing symbols exported from lll_verification.c: #undef ibq_vec_4_copy_ibz @@ -820,18 +822,18 @@ #undef quat_lll_set_ibq_parameters #undef quat_lll_verify -#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) -#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) -#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) -#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) -#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE(quat_lll_verify) // Namespacing symbols exported from mem.c: #undef sqisign_secure_clear #undef sqisign_secure_free -#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) -#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) +#define sqisign_secure_clear SQISIGN_NAMESPACE(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE(sqisign_secure_free) // Namespacing symbols exported from mp.c: #undef MUL @@ -854,25 +856,25 @@ #undef select_ct #undef swap_ct -#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) -#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) -#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) -#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) -#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) -#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) -#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) -#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) -#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) -#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) -#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) -#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) -#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) -#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) -#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) -#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) -#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) -#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) -#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) +#define MUL SQISIGN_NAMESPACE(MUL) +#define mp_add SQISIGN_NAMESPACE(mp_add) +#define mp_compare SQISIGN_NAMESPACE(mp_compare) +#define mp_copy SQISIGN_NAMESPACE(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE(mp_neg) +#define mp_print SQISIGN_NAMESPACE(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE(select_ct) +#define swap_ct SQISIGN_NAMESPACE(swap_ct) // Namespacing symbols exported from normeq.c: #undef quat_change_to_O0_basis @@ -882,12 +884,12 @@ #undef quat_represent_integer #undef quat_sampling_random_ideal_O0_given_norm -#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) -#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) -#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) -#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) -#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) -#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) +#define quat_change_to_O0_basis SQISIGN_NAMESPACE(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE(quat_sampling_random_ideal_O0_given_norm) // Namespacing symbols exported from printer.c: #undef ibz_mat_2x2_print @@ -899,23 +901,23 @@ #undef quat_lattice_print #undef quat_left_ideal_print -#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) -#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) -#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) -#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) -#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) -#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) -#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) -#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) +#define ibz_mat_2x2_print SQISIGN_NAMESPACE(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation #undef quat_test_input_random_lattice_generation -#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) -#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) -#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_lattice_generation) // Namespacing symbols exported from rationals.c: #undef ibq_abs @@ -941,28 +943,28 @@ #undef ibq_vec_4_init #undef ibq_vec_4_print -#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) -#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) -#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) -#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) -#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) -#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) -#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) -#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) -#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) -#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) -#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) -#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) -#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) -#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) -#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) -#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) -#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) -#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) -#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) -#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) -#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) -#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) +#define ibq_abs SQISIGN_NAMESPACE(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE(ibq_vec_4_print) // Namespacing symbols exported from sign.c: #undef protocols_sign diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.c deleted file mode 100644 index 242ea08fe2..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/tools.c +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include - -static clock_t global_timer; - -clock_t -tic(void) -{ - global_timer = clock(); - return global_timer; -} - -float -tac(void) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); - return ms; -} - -float -TAC(const char *str) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); -#ifndef NDEBUG - printf("%s [%d ms]\n", str, (int)ms); -#endif - return ms; -} - -float -toc(const clock_t t) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - return ms; -} - -float -TOC(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,clock()-t); - // return (float) (clock()-t); -} - -float -TOC_clock(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, clock() - t); - return (float)(clock() - t); -} - -clock_t -dclock(const clock_t t) -{ - return (clock() - t); -} - -float -clock_to_time(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,t); - // return (float) (t); -} - -float -clock_print(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, t); - return (float)(t); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c deleted file mode 100644 index 689c29b242..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_system.c +++ /dev/null @@ -1,431 +0,0 @@ -// SPDX-License-Identifier: MIT - -/* -The MIT License -Copyright (c) 2017 Daan Sprenkels -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -// In the case that are compiling on linux, we need to define _GNU_SOURCE -// *before* randombytes.h is included. Otherwise SYS_getrandom will not be -// declared. -#if defined(__linux__) || defined(__GNU__) -#define _GNU_SOURCE -#endif /* defined(__linux__) || defined(__GNU__) */ - -#if defined(_WIN32) -/* Windows */ -#include -#include /* CryptAcquireContext, CryptGenRandom */ -#endif /* defined(_WIN32) */ - -/* wasi */ -#if defined(__wasi__) -#include -#endif - -/* kFreeBSD */ -#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) -#define GNU_KFREEBSD -#endif - -#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -/* Linux */ -// We would need to include , but not every target has access -// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. -// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the -// linux repo. -#define RNDGETENTCNT 0x80045200 - -#include -#include -#include -#include -#include -#include -#include -#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ - ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) -#define USE_GLIBC -#include -#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ - (__GLIBC_MINOR__ > 24)) */ -#include -#include -#include -#include - -// We need SSIZE_MAX as the maximum read len from /dev/urandom -#if !defined(SSIZE_MAX) -#define SSIZE_MAX (SIZE_MAX / 2 - 1) -#endif /* defined(SSIZE_MAX) */ - -#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ - -#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) -/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ -#include -#if defined(BSD) -#include -#endif -/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ -#if defined(__GNU__) -#undef BSD -#endif -#endif - -#if defined(__EMSCRIPTEN__) -#include -#include -#include -#include -#endif /* defined(__EMSCRIPTEN__) */ - -#if defined(_WIN32) -static int -randombytes_win32_randombytes(void *buf, size_t n) -{ - HCRYPTPROV ctx; - BOOL tmp; - DWORD to_read = 0; - const size_t MAX_DWORD = 0xFFFFFFFF; - - tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); - if (tmp == FALSE) - return -1; - - while (n > 0) { - to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); - tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); - if (tmp == FALSE) - return -1; - buf = ((char *)buf) + to_read; - n -= to_read; - } - - tmp = CryptReleaseContext(ctx, 0); - if (tmp == FALSE) - return -1; - - return 0; -} -#endif /* defined(_WIN32) */ - -#if defined(__wasi__) -static int -randombytes_wasi_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(__wasi__) */ - -#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) -#if defined(USE_GLIBC) -// getrandom is declared in glibc. -#elif defined(SYS_getrandom) -static ssize_t -getrandom(void *buf, size_t buflen, unsigned int flags) -{ - return syscall(SYS_getrandom, buf, buflen, flags); -} -#endif - -static int -randombytes_linux_randombytes_getrandom(void *buf, size_t n) -{ - /* I have thought about using a separate PRF, seeded by getrandom, but - * it turns out that the performance of getrandom is good enough - * (250 MB/s on my laptop). - */ - size_t offset = 0, chunk; - int ret; - while (n > 0) { - /* getrandom does not allow chunks larger than 33554431 */ - chunk = n <= 33554431 ? n : 33554431; - do { - ret = getrandom((char *)buf + offset, chunk, 0); - } while (ret == -1 && errno == EINTR); - if (ret < 0) - return ret; - offset += ret; - n -= ret; - } - assert(n == 0); - return 0; -} -#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ - defined(SYS_getrandom)) */ - -#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) - -#if defined(__linux__) -static int -randombytes_linux_read_entropy_ioctl(int device, int *entropy) -{ - return ioctl(device, RNDGETENTCNT, entropy); -} - -static int -randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) -{ - int retcode; - do { - rewind(stream); - retcode = fscanf(stream, "%d", entropy); - } while (retcode != 1 && errno == EINTR); - if (retcode != 1) { - return -1; - } - return 0; -} - -static int -randombytes_linux_wait_for_entropy(int device) -{ - /* We will block on /dev/random, because any increase in the OS' entropy - * level will unblock the request. I use poll here (as does libsodium), - * because we don't *actually* want to read from the device. */ - enum - { - IOCTL, - PROC - } strategy = IOCTL; - const int bits = 128; - struct pollfd pfd; - int fd; - FILE *proc_file; - int retcode, retcode_error = 0; // Used as return codes throughout this function - int entropy = 0; - - /* If the device has enough entropy already, we will want to return early */ - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - // printf("errno: %d (%s)\n", errno, strerror(errno)); - if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { - // The ioctl call on /dev/urandom has failed due to a - // - ENOTTY (unsupported action), or - // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). - // - // We will fall back to reading from - // `/proc/sys/kernel/random/entropy_avail`. This less ideal, - // because it allocates a file descriptor, and it may not work - // in a chroot. But at this point it seems we have no better - // options left. - strategy = PROC; - // Open the entropy count file - proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); - if (proc_file == NULL) { - return -1; - } - } else if (retcode != 0) { - // Unrecoverable ioctl error - return -1; - } - if (entropy >= bits) { - return 0; - } - - do { - fd = open("/dev/random", O_RDONLY); - } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ - if (fd == -1) { - /* Unrecoverable IO error */ - return -1; - } - - pfd.fd = fd; - pfd.events = POLLIN; - for (;;) { - retcode = poll(&pfd, 1, -1); - if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { - continue; - } else if (retcode == 1) { - if (strategy == IOCTL) { - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - } else if (strategy == PROC) { - retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); - } else { - return -1; // Unreachable - } - - if (retcode != 0) { - // Unrecoverable I/O error - retcode_error = retcode; - break; - } - if (entropy >= bits) { - break; - } - } else { - // Unreachable: poll() should only return -1 or 1 - retcode_error = -1; - break; - } - } - do { - retcode = close(fd); - } while (retcode == -1 && errno == EINTR); - if (strategy == PROC) { - do { - retcode = fclose(proc_file); - } while (retcode == -1 && errno == EINTR); - } - if (retcode_error != 0) { - return retcode_error; - } - return retcode; -} -#endif /* defined(__linux__) */ - -static int -randombytes_linux_randombytes_urandom(void *buf, size_t n) -{ - int fd; - size_t offset = 0, count; - ssize_t tmp; - do { - fd = open("/dev/urandom", O_RDONLY); - } while (fd == -1 && errno == EINTR); - if (fd == -1) - return -1; -#if defined(__linux__) - if (randombytes_linux_wait_for_entropy(fd) == -1) - return -1; -#endif - - while (n > 0) { - count = n <= SSIZE_MAX ? n : SSIZE_MAX; - tmp = read(fd, (char *)buf + offset, count); - if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { - continue; - } - if (tmp == -1) - return -1; /* Unrecoverable IO error */ - offset += tmp; - n -= tmp; - } - close(fd); - assert(n == 0); - return 0; -} -#endif /* defined(__linux__) && !defined(SYS_getrandom) */ - -#if defined(BSD) -static int -randombytes_bsd_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(BSD) */ - -#if defined(__EMSCRIPTEN__) -static int -randombytes_js_randombytes_nodejs(void *buf, size_t n) -{ - const int ret = EM_ASM_INT( - { - var crypto; - try { - crypto = require('crypto'); - } catch (error) { - return -2; - } - try { - writeArrayToMemory(crypto.randomBytes($1), $0); - return 0; - } catch (error) { - return -1; - } - }, - buf, - n); - switch (ret) { - case 0: - return 0; - case -1: - errno = EINVAL; - return -1; - case -2: - errno = ENOSYS; - return -1; - } - assert(false); // Unreachable -} -#endif /* defined(__EMSCRIPTEN__) */ - -SQISIGN_API -int -randombytes_select(unsigned char *buf, unsigned long long n) -{ -#if defined(__EMSCRIPTEN__) - return randombytes_js_randombytes_nodejs(buf, n); -#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -#if defined(USE_GLIBC) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#elif defined(SYS_getrandom) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#else - /* When we have enough entropy, we can read from /dev/urandom */ - return randombytes_linux_randombytes_urandom(buf, n); -#endif -#elif defined(BSD) - /* Use arc4random system call */ - return randombytes_bsd_randombytes(buf, n); -#elif defined(_WIN32) - /* Use windows API */ - return randombytes_win32_randombytes(buf, n); -#elif defined(__wasi__) - /* Use WASI */ - return randombytes_wasi_randombytes(buf, n); -#else -#error "randombytes(...) is not supported on this platform" -#endif -} - -#ifdef RANDOMBYTES_SYSTEM -SQISIGN_API -int -randombytes(unsigned char *x, unsigned long long xlen) -{ - - int ret = randombytes_select(x, (size_t)xlen); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); -#endif - return ret; -} - -SQISIGN_API -void -randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) -{ - (void)entropy_input; - (void)personalization_string; - (void)security_strength; -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h index 0a9ca0e465..d0861ac036 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h @@ -3,41 +3,11 @@ #ifndef rng_h #define rng_h -#include +#include -/** - * Randombytes initialization. - * Initialization may be needed for some random number generators (e.g. CTR-DRBG). - * - * @param[in] entropy_input 48 bytes entropy input - * @param[in] personalization_string Personalization string - * @param[in] security_strength Security string - */ -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength); - -/** - * Random byte generation using /dev/urandom. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes_select(unsigned char *x, unsigned long long xlen); - -/** - * Random byte generation. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes(unsigned char *x, unsigned long long xlen); +static int randombytes(unsigned char *x, unsigned long long xlen){ + OQS_randombytes(x, xlen); + return 0; +} #endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sig.h index 4c33510084..a5bc04e6e4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sig.h @@ -17,7 +17,7 @@ * @param[out] sk SQIsign secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_keypair(unsigned char *pk, unsigned char *sk); /** @@ -34,12 +34,34 @@ int sqisign_keypair(unsigned char *pk, unsigned char *sk); * @param[in] sk Compacted secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_sign(unsigned char *sm, unsigned long long *smlen, const unsigned char *m, unsigned long long mlen, const unsigned char *sk); + +/** + * Alternate SQIsign signature generation. Used for liboqs compatibility. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] s Signature + * @param[out] slen Pointer to the length of s + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); #endif /** @@ -75,11 +97,30 @@ int sqisign_open(unsigned char *m, * @param[in] pk Compacted public key * @return int 0 if verification succeeded, 1 otherwise. */ -SQISIGN_API +SQISIGN_API int sqisign_verify(const unsigned char *m, unsigned long long mlen, const unsigned char *sig, unsigned long long siglen, const unsigned char *pk); +/** + * Alternate SQIsign verify signature. Used for liboqs compatibility. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk); #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h index 007d2572b9..bbfe72c13b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h @@ -18,12 +18,6 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) -#ifndef DISABLE_NAMESPACING -#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) -#else -#define SQISIGN_NAMESPACE_GENERIC(s) s -#endif - #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -60,23 +54,23 @@ #undef quat_alg_scalar #undef quat_alg_sub -#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) -#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) -#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) -#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) -#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) -#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) -#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) -#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) -#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) -#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) -#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) -#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) -#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) -#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) -#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) -#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) -#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) +#define quat_alg_add SQISIGN_NAMESPACE(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE(quat_alg_sub) // Namespacing symbols exported from api.c: #undef crypto_sign @@ -134,14 +128,14 @@ #undef ibz_mat_2x2_set #undef ibz_vec_2_set -#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) -#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) -#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) -#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) -#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) -#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) -#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) -#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE(ibz_vec_2_set) // Namespacing symbols exported from dim2id2iso.c: #undef dim2id2iso_arbitrary_isogeny_evaluation @@ -184,34 +178,34 @@ #undef ibz_vec_4_sub #undef quat_qf_eval -#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) -#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) -#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) -#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) -#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) -#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) -#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) -#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) -#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) -#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) -#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) -#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) -#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) -#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) -#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) -#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) -#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) -#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) -#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) -#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) -#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) -#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) -#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) -#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) -#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) -#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) -#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) -#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) // Namespacing symbols exported from ec.c: #undef cswap_points @@ -339,22 +333,22 @@ #undef quat_left_ideal_finalize #undef quat_left_ideal_init -#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) -#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) -#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) -#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) -#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) -#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) -#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) -#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) -#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) -#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) -#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) -#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) -#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) -#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) -#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) -#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE(quat_left_ideal_init) // Namespacing symbols exported from fp.c: #undef fp_select @@ -567,11 +561,11 @@ #undef ibz_vec_4_linear_combination_mod #undef ibz_vec_4_scalar_mul_mod -#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) -#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) -#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) -#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) -#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul_mod) // Namespacing symbols exported from hnf_internal.c: #undef ibz_centered_mod @@ -579,15 +573,15 @@ #undef ibz_mod_not_zero #undef ibz_xgcd_with_u_not_0 -#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) -#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) -#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) -#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) +#define ibz_centered_mod SQISIGN_NAMESPACE(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE(ibz_xgcd_with_u_not_0) // Namespacing symbols exported from ibz_division.c: #undef ibz_xgcd -#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) +#define ibz_xgcd SQISIGN_NAMESPACE(ibz_xgcd) // Namespacing symbols exported from id2iso.c: #undef change_of_basis_matrix_tate @@ -624,22 +618,22 @@ #undef quat_order_discriminant #undef quat_order_is_maximal -#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) -#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) -#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) -#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) -#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) -#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) -#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) -#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) -#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) -#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) -#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) -#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) -#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) -#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) -#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) -#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) +#define quat_lideal_add SQISIGN_NAMESPACE(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE(quat_order_is_maximal) // Namespacing symbols exported from intbig.c: #undef ibz_abs @@ -647,6 +641,10 @@ #undef ibz_bitsize #undef ibz_cmp #undef ibz_cmp_int32 +#undef ibz_const_one +#undef ibz_const_three +#undef ibz_const_two +#undef ibz_const_zero #undef ibz_convert_to_str #undef ibz_copy #undef ibz_copy_digits @@ -687,57 +685,61 @@ #undef ibz_to_digits #undef ibz_two_adic -#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) -#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) -#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) -#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) -#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) -#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) -#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) -#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) -#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) -#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) -#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) -#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) -#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) -#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) -#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) -#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) -#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) -#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) -#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) -#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) -#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) -#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) -#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) -#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) -#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) -#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) -#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) -#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) -#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) -#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) -#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) -#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) -#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) -#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) -#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) -#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) -#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) -#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) -#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) -#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) -#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) -#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) -#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) -#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) +#define ibz_abs SQISIGN_NAMESPACE(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE(ibz_cmp_int32) +#define ibz_const_one SQISIGN_NAMESPACE(ibz_const_one) +#define ibz_const_three SQISIGN_NAMESPACE(ibz_const_three) +#define ibz_const_two SQISIGN_NAMESPACE(ibz_const_two) +#define ibz_const_zero SQISIGN_NAMESPACE(ibz_const_zero) +#define ibz_convert_to_str SQISIGN_NAMESPACE(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE(ibz_get) +#define ibz_init SQISIGN_NAMESPACE(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE(ibz_two_adic) // Namespacing symbols exported from integers.c: #undef ibz_cornacchia_prime #undef ibz_generate_random_prime -#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) -#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) +#define ibz_cornacchia_prime SQISIGN_NAMESPACE(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE(ibz_generate_random_prime) // Namespacing symbols exported from isog_chains.c: #undef ec_eval_even @@ -763,15 +765,15 @@ #undef quat_lattice_lll #undef quat_lll_core -#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) -#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) +#define quat_lattice_lll SQISIGN_NAMESPACE(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE(quat_lll_core) // Namespacing symbols exported from lat_ball.c: #undef quat_lattice_bound_parallelogram #undef quat_lattice_sample_from_ball -#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) -#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE(quat_lattice_sample_from_ball) // Namespacing symbols exported from lattice.c: #undef quat_lattice_add @@ -789,29 +791,29 @@ #undef quat_lattice_mul #undef quat_lattice_reduce_denom -#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) -#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) -#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) -#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) -#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) -#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) -#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) -#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) -#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) -#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) -#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) -#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) -#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) -#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) +#define quat_lattice_add SQISIGN_NAMESPACE(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE(quat_lattice_reduce_denom) // Namespacing symbols exported from lll_applications.c: #undef quat_lideal_lideal_mul_reduced #undef quat_lideal_prime_norm_reduced_equivalent #undef quat_lideal_reduce_basis -#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) -#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) -#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE(quat_lideal_reduce_basis) // Namespacing symbols exported from lll_verification.c: #undef ibq_vec_4_copy_ibz @@ -820,18 +822,18 @@ #undef quat_lll_set_ibq_parameters #undef quat_lll_verify -#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) -#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) -#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) -#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) -#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE(quat_lll_verify) // Namespacing symbols exported from mem.c: #undef sqisign_secure_clear #undef sqisign_secure_free -#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) -#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) +#define sqisign_secure_clear SQISIGN_NAMESPACE(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE(sqisign_secure_free) // Namespacing symbols exported from mp.c: #undef MUL @@ -854,25 +856,25 @@ #undef select_ct #undef swap_ct -#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) -#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) -#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) -#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) -#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) -#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) -#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) -#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) -#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) -#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) -#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) -#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) -#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) -#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) -#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) -#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) -#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) -#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) -#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) +#define MUL SQISIGN_NAMESPACE(MUL) +#define mp_add SQISIGN_NAMESPACE(mp_add) +#define mp_compare SQISIGN_NAMESPACE(mp_compare) +#define mp_copy SQISIGN_NAMESPACE(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE(mp_neg) +#define mp_print SQISIGN_NAMESPACE(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE(select_ct) +#define swap_ct SQISIGN_NAMESPACE(swap_ct) // Namespacing symbols exported from normeq.c: #undef quat_change_to_O0_basis @@ -882,12 +884,12 @@ #undef quat_represent_integer #undef quat_sampling_random_ideal_O0_given_norm -#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) -#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) -#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) -#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) -#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) -#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) +#define quat_change_to_O0_basis SQISIGN_NAMESPACE(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE(quat_sampling_random_ideal_O0_given_norm) // Namespacing symbols exported from printer.c: #undef ibz_mat_2x2_print @@ -899,23 +901,23 @@ #undef quat_lattice_print #undef quat_left_ideal_print -#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) -#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) -#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) -#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) -#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) -#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) -#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) -#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) +#define ibz_mat_2x2_print SQISIGN_NAMESPACE(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation #undef quat_test_input_random_lattice_generation -#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) -#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) -#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_lattice_generation) // Namespacing symbols exported from rationals.c: #undef ibq_abs @@ -941,28 +943,28 @@ #undef ibq_vec_4_init #undef ibq_vec_4_print -#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) -#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) -#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) -#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) -#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) -#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) -#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) -#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) -#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) -#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) -#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) -#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) -#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) -#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) -#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) -#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) -#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) -#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) -#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) -#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) -#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) -#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) +#define ibq_abs SQISIGN_NAMESPACE(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE(ibq_vec_4_print) // Namespacing symbols exported from sign.c: #undef protocols_sign diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.c deleted file mode 100644 index 242ea08fe2..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/tools.c +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include - -static clock_t global_timer; - -clock_t -tic(void) -{ - global_timer = clock(); - return global_timer; -} - -float -tac(void) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); - return ms; -} - -float -TAC(const char *str) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); -#ifndef NDEBUG - printf("%s [%d ms]\n", str, (int)ms); -#endif - return ms; -} - -float -toc(const clock_t t) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - return ms; -} - -float -TOC(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,clock()-t); - // return (float) (clock()-t); -} - -float -TOC_clock(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, clock() - t); - return (float)(clock() - t); -} - -clock_t -dclock(const clock_t t) -{ - return (clock() - t); -} - -float -clock_to_time(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,t); - // return (float) (t); -} - -float -clock_print(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, t); - return (float)(t); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c deleted file mode 100644 index 372cc0de81..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_ctrdrbg.c +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 and Unknown -// -/* -NIST-developed software is provided by NIST as a public service. You may use, -copy, and distribute copies of the software in any medium, provided that you -keep intact this entire notice. You may improve, modify, and create derivative -works of the software or any portion of the software, and you may copy and -distribute such modifications or works. Modified works should carry a notice -stating that you changed the software and should note the date and nature of any -such change. Please explicitly acknowledge the National Institute of Standards -and Technology as the source of the software. - -NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF -ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, -WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS -NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR -ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE -ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, -INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR -USEFULNESS OF THE SOFTWARE. - -You are solely responsible for determining the appropriateness of using and -distributing the software and you assume all risks associated with its use, -including but not limited to the risks and costs of program errors, compliance -with applicable laws, damage to or loss of data, programs or equipment, and the -unavailability or interruption of operation. This software is not intended to be -used in any situation where a failure could cause risk of injury or damage to -property. The software developed by NIST employees is not subject to copyright -protection within the United States. -*/ - -#include -#include - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -static inline void AES256_ECB(const unsigned char *key, - const unsigned char *ctr, unsigned char *buffer) { - AES_ECB_encrypt(ctr, key, buffer); -} - -typedef struct { - unsigned char Key[32]; - unsigned char V[16]; - int reseed_counter; -} AES256_CTR_DRBG_struct; - -void AES256_CTR_DRBG_Update(const unsigned char *provided_data, - unsigned char *Key, unsigned char *V); - -AES256_CTR_DRBG_struct DRBG_ctx; - -#ifndef CTRDRBG_TEST_BENCH -static -#endif - void - randombytes_init_nist(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - unsigned char seed_material[48]; - - (void)security_strength; // Unused parameter - memcpy(seed_material, entropy_input, 48); - if (personalization_string) - for (int i = 0; i < 48; i++) { - seed_material[i] ^= personalization_string[i]; - } - memset(DRBG_ctx.Key, 0x00, 32); - memset(DRBG_ctx.V, 0x00, 16); - AES256_CTR_DRBG_Update(seed_material, DRBG_ctx.Key, DRBG_ctx.V); - DRBG_ctx.reseed_counter = 1; -} - -#ifndef CTRDRBG_TEST_BENCH -static -#endif - int - randombytes_nist(unsigned char *x, size_t xlen) { - unsigned char block[16]; - size_t i = 0; - - while (xlen > 0) { - // increment V - for (int j = 15; j >= 0; j--) { - if (DRBG_ctx.V[j] == 0xff) { - DRBG_ctx.V[j] = 0x00; - } else { - DRBG_ctx.V[j]++; - break; - } - } - AES256_ECB(DRBG_ctx.Key, DRBG_ctx.V, block); - if (xlen > 15) { - memcpy(x + i, block, 16); - i += 16; - xlen -= 16; - } else { - memcpy(x + i, block, xlen); - i += xlen; - xlen = 0; - } - } - AES256_CTR_DRBG_Update(NULL, DRBG_ctx.Key, DRBG_ctx.V); - DRBG_ctx.reseed_counter++; - - return 0; -} - -void AES256_CTR_DRBG_Update(const unsigned char *provided_data, - unsigned char *Key, unsigned char *V) { - unsigned char temp[48]; - - for (int i = 0; i < 3; i++) { - // increment V - for (int j = 15; j >= 0; j--) { - if (V[j] == 0xff) { - V[j] = 0x00; - } else { - V[j]++; - break; - } - } - - AES256_ECB(Key, V, temp + 16 * i); - } - if (provided_data != NULL) - for (int i = 0; i < 48; i++) { - temp[i] ^= provided_data[i]; - } - memcpy(Key, temp, 32); - memcpy(V, temp + 32, 16); -} - -#ifdef RANDOMBYTES_C -SQISIGN_API -int randombytes(unsigned char *random_array, unsigned long long nbytes) { - int ret = randombytes_nist(random_array, nbytes); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); -#endif - return ret; -} - -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - randombytes_init_nist(entropy_input, personalization_string, - security_strength); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_system.c deleted file mode 100644 index 689c29b242..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/randombytes_system.c +++ /dev/null @@ -1,431 +0,0 @@ -// SPDX-License-Identifier: MIT - -/* -The MIT License -Copyright (c) 2017 Daan Sprenkels -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -// In the case that are compiling on linux, we need to define _GNU_SOURCE -// *before* randombytes.h is included. Otherwise SYS_getrandom will not be -// declared. -#if defined(__linux__) || defined(__GNU__) -#define _GNU_SOURCE -#endif /* defined(__linux__) || defined(__GNU__) */ - -#if defined(_WIN32) -/* Windows */ -#include -#include /* CryptAcquireContext, CryptGenRandom */ -#endif /* defined(_WIN32) */ - -/* wasi */ -#if defined(__wasi__) -#include -#endif - -/* kFreeBSD */ -#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) -#define GNU_KFREEBSD -#endif - -#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -/* Linux */ -// We would need to include , but not every target has access -// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. -// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the -// linux repo. -#define RNDGETENTCNT 0x80045200 - -#include -#include -#include -#include -#include -#include -#include -#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ - ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) -#define USE_GLIBC -#include -#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ - (__GLIBC_MINOR__ > 24)) */ -#include -#include -#include -#include - -// We need SSIZE_MAX as the maximum read len from /dev/urandom -#if !defined(SSIZE_MAX) -#define SSIZE_MAX (SIZE_MAX / 2 - 1) -#endif /* defined(SSIZE_MAX) */ - -#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ - -#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) -/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ -#include -#if defined(BSD) -#include -#endif -/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ -#if defined(__GNU__) -#undef BSD -#endif -#endif - -#if defined(__EMSCRIPTEN__) -#include -#include -#include -#include -#endif /* defined(__EMSCRIPTEN__) */ - -#if defined(_WIN32) -static int -randombytes_win32_randombytes(void *buf, size_t n) -{ - HCRYPTPROV ctx; - BOOL tmp; - DWORD to_read = 0; - const size_t MAX_DWORD = 0xFFFFFFFF; - - tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); - if (tmp == FALSE) - return -1; - - while (n > 0) { - to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); - tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); - if (tmp == FALSE) - return -1; - buf = ((char *)buf) + to_read; - n -= to_read; - } - - tmp = CryptReleaseContext(ctx, 0); - if (tmp == FALSE) - return -1; - - return 0; -} -#endif /* defined(_WIN32) */ - -#if defined(__wasi__) -static int -randombytes_wasi_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(__wasi__) */ - -#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) -#if defined(USE_GLIBC) -// getrandom is declared in glibc. -#elif defined(SYS_getrandom) -static ssize_t -getrandom(void *buf, size_t buflen, unsigned int flags) -{ - return syscall(SYS_getrandom, buf, buflen, flags); -} -#endif - -static int -randombytes_linux_randombytes_getrandom(void *buf, size_t n) -{ - /* I have thought about using a separate PRF, seeded by getrandom, but - * it turns out that the performance of getrandom is good enough - * (250 MB/s on my laptop). - */ - size_t offset = 0, chunk; - int ret; - while (n > 0) { - /* getrandom does not allow chunks larger than 33554431 */ - chunk = n <= 33554431 ? n : 33554431; - do { - ret = getrandom((char *)buf + offset, chunk, 0); - } while (ret == -1 && errno == EINTR); - if (ret < 0) - return ret; - offset += ret; - n -= ret; - } - assert(n == 0); - return 0; -} -#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ - defined(SYS_getrandom)) */ - -#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) - -#if defined(__linux__) -static int -randombytes_linux_read_entropy_ioctl(int device, int *entropy) -{ - return ioctl(device, RNDGETENTCNT, entropy); -} - -static int -randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) -{ - int retcode; - do { - rewind(stream); - retcode = fscanf(stream, "%d", entropy); - } while (retcode != 1 && errno == EINTR); - if (retcode != 1) { - return -1; - } - return 0; -} - -static int -randombytes_linux_wait_for_entropy(int device) -{ - /* We will block on /dev/random, because any increase in the OS' entropy - * level will unblock the request. I use poll here (as does libsodium), - * because we don't *actually* want to read from the device. */ - enum - { - IOCTL, - PROC - } strategy = IOCTL; - const int bits = 128; - struct pollfd pfd; - int fd; - FILE *proc_file; - int retcode, retcode_error = 0; // Used as return codes throughout this function - int entropy = 0; - - /* If the device has enough entropy already, we will want to return early */ - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - // printf("errno: %d (%s)\n", errno, strerror(errno)); - if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { - // The ioctl call on /dev/urandom has failed due to a - // - ENOTTY (unsupported action), or - // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). - // - // We will fall back to reading from - // `/proc/sys/kernel/random/entropy_avail`. This less ideal, - // because it allocates a file descriptor, and it may not work - // in a chroot. But at this point it seems we have no better - // options left. - strategy = PROC; - // Open the entropy count file - proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); - if (proc_file == NULL) { - return -1; - } - } else if (retcode != 0) { - // Unrecoverable ioctl error - return -1; - } - if (entropy >= bits) { - return 0; - } - - do { - fd = open("/dev/random", O_RDONLY); - } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ - if (fd == -1) { - /* Unrecoverable IO error */ - return -1; - } - - pfd.fd = fd; - pfd.events = POLLIN; - for (;;) { - retcode = poll(&pfd, 1, -1); - if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { - continue; - } else if (retcode == 1) { - if (strategy == IOCTL) { - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - } else if (strategy == PROC) { - retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); - } else { - return -1; // Unreachable - } - - if (retcode != 0) { - // Unrecoverable I/O error - retcode_error = retcode; - break; - } - if (entropy >= bits) { - break; - } - } else { - // Unreachable: poll() should only return -1 or 1 - retcode_error = -1; - break; - } - } - do { - retcode = close(fd); - } while (retcode == -1 && errno == EINTR); - if (strategy == PROC) { - do { - retcode = fclose(proc_file); - } while (retcode == -1 && errno == EINTR); - } - if (retcode_error != 0) { - return retcode_error; - } - return retcode; -} -#endif /* defined(__linux__) */ - -static int -randombytes_linux_randombytes_urandom(void *buf, size_t n) -{ - int fd; - size_t offset = 0, count; - ssize_t tmp; - do { - fd = open("/dev/urandom", O_RDONLY); - } while (fd == -1 && errno == EINTR); - if (fd == -1) - return -1; -#if defined(__linux__) - if (randombytes_linux_wait_for_entropy(fd) == -1) - return -1; -#endif - - while (n > 0) { - count = n <= SSIZE_MAX ? n : SSIZE_MAX; - tmp = read(fd, (char *)buf + offset, count); - if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { - continue; - } - if (tmp == -1) - return -1; /* Unrecoverable IO error */ - offset += tmp; - n -= tmp; - } - close(fd); - assert(n == 0); - return 0; -} -#endif /* defined(__linux__) && !defined(SYS_getrandom) */ - -#if defined(BSD) -static int -randombytes_bsd_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(BSD) */ - -#if defined(__EMSCRIPTEN__) -static int -randombytes_js_randombytes_nodejs(void *buf, size_t n) -{ - const int ret = EM_ASM_INT( - { - var crypto; - try { - crypto = require('crypto'); - } catch (error) { - return -2; - } - try { - writeArrayToMemory(crypto.randomBytes($1), $0); - return 0; - } catch (error) { - return -1; - } - }, - buf, - n); - switch (ret) { - case 0: - return 0; - case -1: - errno = EINVAL; - return -1; - case -2: - errno = ENOSYS; - return -1; - } - assert(false); // Unreachable -} -#endif /* defined(__EMSCRIPTEN__) */ - -SQISIGN_API -int -randombytes_select(unsigned char *buf, unsigned long long n) -{ -#if defined(__EMSCRIPTEN__) - return randombytes_js_randombytes_nodejs(buf, n); -#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -#if defined(USE_GLIBC) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#elif defined(SYS_getrandom) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#else - /* When we have enough entropy, we can read from /dev/urandom */ - return randombytes_linux_randombytes_urandom(buf, n); -#endif -#elif defined(BSD) - /* Use arc4random system call */ - return randombytes_bsd_randombytes(buf, n); -#elif defined(_WIN32) - /* Use windows API */ - return randombytes_win32_randombytes(buf, n); -#elif defined(__wasi__) - /* Use WASI */ - return randombytes_wasi_randombytes(buf, n); -#else -#error "randombytes(...) is not supported on this platform" -#endif -} - -#ifdef RANDOMBYTES_SYSTEM -SQISIGN_API -int -randombytes(unsigned char *x, unsigned long long xlen) -{ - - int ret = randombytes_select(x, (size_t)xlen); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); -#endif - return ret; -} - -SQISIGN_API -void -randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) -{ - (void)entropy_input; - (void)personalization_string; - (void)security_strength; -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h index 0a9ca0e465..d0861ac036 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h @@ -3,41 +3,11 @@ #ifndef rng_h #define rng_h -#include +#include -/** - * Randombytes initialization. - * Initialization may be needed for some random number generators (e.g. CTR-DRBG). - * - * @param[in] entropy_input 48 bytes entropy input - * @param[in] personalization_string Personalization string - * @param[in] security_strength Security string - */ -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength); - -/** - * Random byte generation using /dev/urandom. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes_select(unsigned char *x, unsigned long long xlen); - -/** - * Random byte generation. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes(unsigned char *x, unsigned long long xlen); +static int randombytes(unsigned char *x, unsigned long long xlen){ + OQS_randombytes(x, xlen); + return 0; +} #endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sig.h index 4c33510084..a5bc04e6e4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sig.h @@ -17,7 +17,7 @@ * @param[out] sk SQIsign secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_keypair(unsigned char *pk, unsigned char *sk); /** @@ -34,12 +34,34 @@ int sqisign_keypair(unsigned char *pk, unsigned char *sk); * @param[in] sk Compacted secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_sign(unsigned char *sm, unsigned long long *smlen, const unsigned char *m, unsigned long long mlen, const unsigned char *sk); + +/** + * Alternate SQIsign signature generation. Used for liboqs compatibility. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] s Signature + * @param[out] slen Pointer to the length of s + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); #endif /** @@ -75,11 +97,30 @@ int sqisign_open(unsigned char *m, * @param[in] pk Compacted public key * @return int 0 if verification succeeded, 1 otherwise. */ -SQISIGN_API +SQISIGN_API int sqisign_verify(const unsigned char *m, unsigned long long mlen, const unsigned char *sig, unsigned long long siglen, const unsigned char *pk); +/** + * Alternate SQIsign verify signature. Used for liboqs compatibility. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk); #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h index 007d2572b9..bbfe72c13b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h @@ -18,12 +18,6 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) -#ifndef DISABLE_NAMESPACING -#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) -#else -#define SQISIGN_NAMESPACE_GENERIC(s) s -#endif - #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -60,23 +54,23 @@ #undef quat_alg_scalar #undef quat_alg_sub -#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) -#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) -#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) -#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) -#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) -#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) -#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) -#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) -#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) -#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) -#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) -#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) -#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) -#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) -#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) -#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) -#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) +#define quat_alg_add SQISIGN_NAMESPACE(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE(quat_alg_sub) // Namespacing symbols exported from api.c: #undef crypto_sign @@ -134,14 +128,14 @@ #undef ibz_mat_2x2_set #undef ibz_vec_2_set -#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) -#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) -#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) -#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) -#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) -#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) -#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) -#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE(ibz_vec_2_set) // Namespacing symbols exported from dim2id2iso.c: #undef dim2id2iso_arbitrary_isogeny_evaluation @@ -184,34 +178,34 @@ #undef ibz_vec_4_sub #undef quat_qf_eval -#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) -#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) -#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) -#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) -#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) -#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) -#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) -#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) -#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) -#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) -#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) -#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) -#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) -#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) -#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) -#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) -#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) -#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) -#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) -#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) -#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) -#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) -#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) -#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) -#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) -#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) -#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) -#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) // Namespacing symbols exported from ec.c: #undef cswap_points @@ -339,22 +333,22 @@ #undef quat_left_ideal_finalize #undef quat_left_ideal_init -#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) -#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) -#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) -#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) -#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) -#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) -#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) -#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) -#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) -#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) -#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) -#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) -#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) -#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) -#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) -#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE(quat_left_ideal_init) // Namespacing symbols exported from fp.c: #undef fp_select @@ -567,11 +561,11 @@ #undef ibz_vec_4_linear_combination_mod #undef ibz_vec_4_scalar_mul_mod -#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) -#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) -#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) -#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) -#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul_mod) // Namespacing symbols exported from hnf_internal.c: #undef ibz_centered_mod @@ -579,15 +573,15 @@ #undef ibz_mod_not_zero #undef ibz_xgcd_with_u_not_0 -#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) -#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) -#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) -#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) +#define ibz_centered_mod SQISIGN_NAMESPACE(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE(ibz_xgcd_with_u_not_0) // Namespacing symbols exported from ibz_division.c: #undef ibz_xgcd -#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) +#define ibz_xgcd SQISIGN_NAMESPACE(ibz_xgcd) // Namespacing symbols exported from id2iso.c: #undef change_of_basis_matrix_tate @@ -624,22 +618,22 @@ #undef quat_order_discriminant #undef quat_order_is_maximal -#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) -#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) -#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) -#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) -#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) -#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) -#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) -#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) -#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) -#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) -#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) -#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) -#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) -#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) -#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) -#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) +#define quat_lideal_add SQISIGN_NAMESPACE(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE(quat_order_is_maximal) // Namespacing symbols exported from intbig.c: #undef ibz_abs @@ -647,6 +641,10 @@ #undef ibz_bitsize #undef ibz_cmp #undef ibz_cmp_int32 +#undef ibz_const_one +#undef ibz_const_three +#undef ibz_const_two +#undef ibz_const_zero #undef ibz_convert_to_str #undef ibz_copy #undef ibz_copy_digits @@ -687,57 +685,61 @@ #undef ibz_to_digits #undef ibz_two_adic -#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) -#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) -#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) -#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) -#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) -#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) -#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) -#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) -#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) -#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) -#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) -#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) -#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) -#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) -#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) -#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) -#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) -#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) -#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) -#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) -#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) -#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) -#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) -#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) -#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) -#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) -#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) -#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) -#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) -#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) -#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) -#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) -#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) -#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) -#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) -#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) -#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) -#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) -#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) -#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) -#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) -#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) -#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) -#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) +#define ibz_abs SQISIGN_NAMESPACE(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE(ibz_cmp_int32) +#define ibz_const_one SQISIGN_NAMESPACE(ibz_const_one) +#define ibz_const_three SQISIGN_NAMESPACE(ibz_const_three) +#define ibz_const_two SQISIGN_NAMESPACE(ibz_const_two) +#define ibz_const_zero SQISIGN_NAMESPACE(ibz_const_zero) +#define ibz_convert_to_str SQISIGN_NAMESPACE(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE(ibz_get) +#define ibz_init SQISIGN_NAMESPACE(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE(ibz_two_adic) // Namespacing symbols exported from integers.c: #undef ibz_cornacchia_prime #undef ibz_generate_random_prime -#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) -#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) +#define ibz_cornacchia_prime SQISIGN_NAMESPACE(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE(ibz_generate_random_prime) // Namespacing symbols exported from isog_chains.c: #undef ec_eval_even @@ -763,15 +765,15 @@ #undef quat_lattice_lll #undef quat_lll_core -#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) -#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) +#define quat_lattice_lll SQISIGN_NAMESPACE(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE(quat_lll_core) // Namespacing symbols exported from lat_ball.c: #undef quat_lattice_bound_parallelogram #undef quat_lattice_sample_from_ball -#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) -#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE(quat_lattice_sample_from_ball) // Namespacing symbols exported from lattice.c: #undef quat_lattice_add @@ -789,29 +791,29 @@ #undef quat_lattice_mul #undef quat_lattice_reduce_denom -#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) -#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) -#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) -#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) -#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) -#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) -#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) -#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) -#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) -#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) -#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) -#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) -#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) -#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) +#define quat_lattice_add SQISIGN_NAMESPACE(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE(quat_lattice_reduce_denom) // Namespacing symbols exported from lll_applications.c: #undef quat_lideal_lideal_mul_reduced #undef quat_lideal_prime_norm_reduced_equivalent #undef quat_lideal_reduce_basis -#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) -#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) -#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE(quat_lideal_reduce_basis) // Namespacing symbols exported from lll_verification.c: #undef ibq_vec_4_copy_ibz @@ -820,18 +822,18 @@ #undef quat_lll_set_ibq_parameters #undef quat_lll_verify -#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) -#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) -#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) -#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) -#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE(quat_lll_verify) // Namespacing symbols exported from mem.c: #undef sqisign_secure_clear #undef sqisign_secure_free -#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) -#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) +#define sqisign_secure_clear SQISIGN_NAMESPACE(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE(sqisign_secure_free) // Namespacing symbols exported from mp.c: #undef MUL @@ -854,25 +856,25 @@ #undef select_ct #undef swap_ct -#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) -#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) -#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) -#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) -#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) -#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) -#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) -#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) -#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) -#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) -#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) -#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) -#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) -#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) -#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) -#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) -#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) -#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) -#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) +#define MUL SQISIGN_NAMESPACE(MUL) +#define mp_add SQISIGN_NAMESPACE(mp_add) +#define mp_compare SQISIGN_NAMESPACE(mp_compare) +#define mp_copy SQISIGN_NAMESPACE(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE(mp_neg) +#define mp_print SQISIGN_NAMESPACE(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE(select_ct) +#define swap_ct SQISIGN_NAMESPACE(swap_ct) // Namespacing symbols exported from normeq.c: #undef quat_change_to_O0_basis @@ -882,12 +884,12 @@ #undef quat_represent_integer #undef quat_sampling_random_ideal_O0_given_norm -#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) -#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) -#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) -#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) -#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) -#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) +#define quat_change_to_O0_basis SQISIGN_NAMESPACE(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE(quat_sampling_random_ideal_O0_given_norm) // Namespacing symbols exported from printer.c: #undef ibz_mat_2x2_print @@ -899,23 +901,23 @@ #undef quat_lattice_print #undef quat_left_ideal_print -#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) -#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) -#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) -#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) -#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) -#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) -#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) -#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) +#define ibz_mat_2x2_print SQISIGN_NAMESPACE(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation #undef quat_test_input_random_lattice_generation -#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) -#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) -#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_lattice_generation) // Namespacing symbols exported from rationals.c: #undef ibq_abs @@ -941,28 +943,28 @@ #undef ibq_vec_4_init #undef ibq_vec_4_print -#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) -#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) -#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) -#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) -#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) -#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) -#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) -#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) -#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) -#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) -#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) -#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) -#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) -#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) -#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) -#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) -#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) -#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) -#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) -#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) -#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) -#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) +#define ibq_abs SQISIGN_NAMESPACE(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE(ibq_vec_4_print) // Namespacing symbols exported from sign.c: #undef protocols_sign diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.c deleted file mode 100644 index 242ea08fe2..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/tools.c +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include - -static clock_t global_timer; - -clock_t -tic(void) -{ - global_timer = clock(); - return global_timer; -} - -float -tac(void) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); - return ms; -} - -float -TAC(const char *str) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); -#ifndef NDEBUG - printf("%s [%d ms]\n", str, (int)ms); -#endif - return ms; -} - -float -toc(const clock_t t) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - return ms; -} - -float -TOC(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,clock()-t); - // return (float) (clock()-t); -} - -float -TOC_clock(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, clock() - t); - return (float)(clock() - t); -} - -clock_t -dclock(const clock_t t) -{ - return (clock() - t); -} - -float -clock_to_time(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,t); - // return (float) (t); -} - -float -clock_print(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, t); - return (float)(t); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c deleted file mode 100644 index 3fc67acfb6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_ctrdrbg_aesni.c +++ /dev/null @@ -1,87 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 and Unknown -// -/* -NIST-developed software is provided by NIST as a public service. You may use, -copy, and distribute copies of the software in any medium, provided that you -keep intact this entire notice. You may improve, modify, and create derivative -works of the software or any portion of the software, and you may copy and -distribute such modifications or works. Modified works should carry a notice -stating that you changed the software and should note the date and nature of any -such change. Please explicitly acknowledge the National Institute of Standards -and Technology as the source of the software. - -NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF -ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, -WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS -NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR -ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE -ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, -INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR -USEFULNESS OF THE SOFTWARE. - -You are solely responsible for determining the appropriateness of using and -distributing the software and you assume all risks associated with its use, -including but not limited to the risks and costs of program errors, compliance -with applicable laws, damage to or loss of data, programs or equipment, and the -unavailability or interruption of operation. This software is not intended to be -used in any situation where a failure could cause risk of injury or damage to -property. The software developed by NIST employees is not subject to copyright -protection within the United States. -*/ - -#include - -#include -#include "ctr_drbg.h" - -#ifdef ENABLE_CT_TESTING -#include -#endif - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -CTR_DRBG_STATE drbg; - -#ifndef CTRDRBG_TEST_BENCH -static -#endif -void -randombytes_init_aes_ni(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - (void)security_strength; // fixed to 256 - CTR_DRBG_init(&drbg, entropy_input, personalization_string, - (personalization_string == NULL) ? 0 : CTR_DRBG_ENTROPY_LEN); -} - -#ifndef CTRDRBG_TEST_BENCH -static -#endif -int -randombytes_aes_ni(unsigned char *x, size_t xlen) { - CTR_DRBG_generate(&drbg, x, xlen, NULL, 0); - return RNG_SUCCESS; -} - -#ifdef RANDOMBYTES_AES_NI -SQISIGN_API -int randombytes(unsigned char *random_array, unsigned long long nbytes) { - int ret = randombytes_aes_ni(random_array, nbytes); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); -#endif - return ret; -} - -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - randombytes_init_aes_ni(entropy_input, personalization_string, - security_strength); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c deleted file mode 100644 index 689c29b242..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_system.c +++ /dev/null @@ -1,431 +0,0 @@ -// SPDX-License-Identifier: MIT - -/* -The MIT License -Copyright (c) 2017 Daan Sprenkels -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -// In the case that are compiling on linux, we need to define _GNU_SOURCE -// *before* randombytes.h is included. Otherwise SYS_getrandom will not be -// declared. -#if defined(__linux__) || defined(__GNU__) -#define _GNU_SOURCE -#endif /* defined(__linux__) || defined(__GNU__) */ - -#if defined(_WIN32) -/* Windows */ -#include -#include /* CryptAcquireContext, CryptGenRandom */ -#endif /* defined(_WIN32) */ - -/* wasi */ -#if defined(__wasi__) -#include -#endif - -/* kFreeBSD */ -#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) -#define GNU_KFREEBSD -#endif - -#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -/* Linux */ -// We would need to include , but not every target has access -// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. -// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the -// linux repo. -#define RNDGETENTCNT 0x80045200 - -#include -#include -#include -#include -#include -#include -#include -#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ - ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) -#define USE_GLIBC -#include -#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ - (__GLIBC_MINOR__ > 24)) */ -#include -#include -#include -#include - -// We need SSIZE_MAX as the maximum read len from /dev/urandom -#if !defined(SSIZE_MAX) -#define SSIZE_MAX (SIZE_MAX / 2 - 1) -#endif /* defined(SSIZE_MAX) */ - -#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ - -#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) -/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ -#include -#if defined(BSD) -#include -#endif -/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ -#if defined(__GNU__) -#undef BSD -#endif -#endif - -#if defined(__EMSCRIPTEN__) -#include -#include -#include -#include -#endif /* defined(__EMSCRIPTEN__) */ - -#if defined(_WIN32) -static int -randombytes_win32_randombytes(void *buf, size_t n) -{ - HCRYPTPROV ctx; - BOOL tmp; - DWORD to_read = 0; - const size_t MAX_DWORD = 0xFFFFFFFF; - - tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); - if (tmp == FALSE) - return -1; - - while (n > 0) { - to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); - tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); - if (tmp == FALSE) - return -1; - buf = ((char *)buf) + to_read; - n -= to_read; - } - - tmp = CryptReleaseContext(ctx, 0); - if (tmp == FALSE) - return -1; - - return 0; -} -#endif /* defined(_WIN32) */ - -#if defined(__wasi__) -static int -randombytes_wasi_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(__wasi__) */ - -#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) -#if defined(USE_GLIBC) -// getrandom is declared in glibc. -#elif defined(SYS_getrandom) -static ssize_t -getrandom(void *buf, size_t buflen, unsigned int flags) -{ - return syscall(SYS_getrandom, buf, buflen, flags); -} -#endif - -static int -randombytes_linux_randombytes_getrandom(void *buf, size_t n) -{ - /* I have thought about using a separate PRF, seeded by getrandom, but - * it turns out that the performance of getrandom is good enough - * (250 MB/s on my laptop). - */ - size_t offset = 0, chunk; - int ret; - while (n > 0) { - /* getrandom does not allow chunks larger than 33554431 */ - chunk = n <= 33554431 ? n : 33554431; - do { - ret = getrandom((char *)buf + offset, chunk, 0); - } while (ret == -1 && errno == EINTR); - if (ret < 0) - return ret; - offset += ret; - n -= ret; - } - assert(n == 0); - return 0; -} -#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ - defined(SYS_getrandom)) */ - -#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) - -#if defined(__linux__) -static int -randombytes_linux_read_entropy_ioctl(int device, int *entropy) -{ - return ioctl(device, RNDGETENTCNT, entropy); -} - -static int -randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) -{ - int retcode; - do { - rewind(stream); - retcode = fscanf(stream, "%d", entropy); - } while (retcode != 1 && errno == EINTR); - if (retcode != 1) { - return -1; - } - return 0; -} - -static int -randombytes_linux_wait_for_entropy(int device) -{ - /* We will block on /dev/random, because any increase in the OS' entropy - * level will unblock the request. I use poll here (as does libsodium), - * because we don't *actually* want to read from the device. */ - enum - { - IOCTL, - PROC - } strategy = IOCTL; - const int bits = 128; - struct pollfd pfd; - int fd; - FILE *proc_file; - int retcode, retcode_error = 0; // Used as return codes throughout this function - int entropy = 0; - - /* If the device has enough entropy already, we will want to return early */ - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - // printf("errno: %d (%s)\n", errno, strerror(errno)); - if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { - // The ioctl call on /dev/urandom has failed due to a - // - ENOTTY (unsupported action), or - // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). - // - // We will fall back to reading from - // `/proc/sys/kernel/random/entropy_avail`. This less ideal, - // because it allocates a file descriptor, and it may not work - // in a chroot. But at this point it seems we have no better - // options left. - strategy = PROC; - // Open the entropy count file - proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); - if (proc_file == NULL) { - return -1; - } - } else if (retcode != 0) { - // Unrecoverable ioctl error - return -1; - } - if (entropy >= bits) { - return 0; - } - - do { - fd = open("/dev/random", O_RDONLY); - } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ - if (fd == -1) { - /* Unrecoverable IO error */ - return -1; - } - - pfd.fd = fd; - pfd.events = POLLIN; - for (;;) { - retcode = poll(&pfd, 1, -1); - if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { - continue; - } else if (retcode == 1) { - if (strategy == IOCTL) { - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - } else if (strategy == PROC) { - retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); - } else { - return -1; // Unreachable - } - - if (retcode != 0) { - // Unrecoverable I/O error - retcode_error = retcode; - break; - } - if (entropy >= bits) { - break; - } - } else { - // Unreachable: poll() should only return -1 or 1 - retcode_error = -1; - break; - } - } - do { - retcode = close(fd); - } while (retcode == -1 && errno == EINTR); - if (strategy == PROC) { - do { - retcode = fclose(proc_file); - } while (retcode == -1 && errno == EINTR); - } - if (retcode_error != 0) { - return retcode_error; - } - return retcode; -} -#endif /* defined(__linux__) */ - -static int -randombytes_linux_randombytes_urandom(void *buf, size_t n) -{ - int fd; - size_t offset = 0, count; - ssize_t tmp; - do { - fd = open("/dev/urandom", O_RDONLY); - } while (fd == -1 && errno == EINTR); - if (fd == -1) - return -1; -#if defined(__linux__) - if (randombytes_linux_wait_for_entropy(fd) == -1) - return -1; -#endif - - while (n > 0) { - count = n <= SSIZE_MAX ? n : SSIZE_MAX; - tmp = read(fd, (char *)buf + offset, count); - if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { - continue; - } - if (tmp == -1) - return -1; /* Unrecoverable IO error */ - offset += tmp; - n -= tmp; - } - close(fd); - assert(n == 0); - return 0; -} -#endif /* defined(__linux__) && !defined(SYS_getrandom) */ - -#if defined(BSD) -static int -randombytes_bsd_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(BSD) */ - -#if defined(__EMSCRIPTEN__) -static int -randombytes_js_randombytes_nodejs(void *buf, size_t n) -{ - const int ret = EM_ASM_INT( - { - var crypto; - try { - crypto = require('crypto'); - } catch (error) { - return -2; - } - try { - writeArrayToMemory(crypto.randomBytes($1), $0); - return 0; - } catch (error) { - return -1; - } - }, - buf, - n); - switch (ret) { - case 0: - return 0; - case -1: - errno = EINVAL; - return -1; - case -2: - errno = ENOSYS; - return -1; - } - assert(false); // Unreachable -} -#endif /* defined(__EMSCRIPTEN__) */ - -SQISIGN_API -int -randombytes_select(unsigned char *buf, unsigned long long n) -{ -#if defined(__EMSCRIPTEN__) - return randombytes_js_randombytes_nodejs(buf, n); -#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -#if defined(USE_GLIBC) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#elif defined(SYS_getrandom) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#else - /* When we have enough entropy, we can read from /dev/urandom */ - return randombytes_linux_randombytes_urandom(buf, n); -#endif -#elif defined(BSD) - /* Use arc4random system call */ - return randombytes_bsd_randombytes(buf, n); -#elif defined(_WIN32) - /* Use windows API */ - return randombytes_win32_randombytes(buf, n); -#elif defined(__wasi__) - /* Use WASI */ - return randombytes_wasi_randombytes(buf, n); -#else -#error "randombytes(...) is not supported on this platform" -#endif -} - -#ifdef RANDOMBYTES_SYSTEM -SQISIGN_API -int -randombytes(unsigned char *x, unsigned long long xlen) -{ - - int ret = randombytes_select(x, (size_t)xlen); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); -#endif - return ret; -} - -SQISIGN_API -void -randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) -{ - (void)entropy_input; - (void)personalization_string; - (void)security_strength; -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h index 0a9ca0e465..d0861ac036 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h @@ -3,41 +3,11 @@ #ifndef rng_h #define rng_h -#include +#include -/** - * Randombytes initialization. - * Initialization may be needed for some random number generators (e.g. CTR-DRBG). - * - * @param[in] entropy_input 48 bytes entropy input - * @param[in] personalization_string Personalization string - * @param[in] security_strength Security string - */ -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength); - -/** - * Random byte generation using /dev/urandom. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes_select(unsigned char *x, unsigned long long xlen); - -/** - * Random byte generation. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes(unsigned char *x, unsigned long long xlen); +static int randombytes(unsigned char *x, unsigned long long xlen){ + OQS_randombytes(x, xlen); + return 0; +} #endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sig.h index 4c33510084..a5bc04e6e4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sig.h @@ -17,7 +17,7 @@ * @param[out] sk SQIsign secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_keypair(unsigned char *pk, unsigned char *sk); /** @@ -34,12 +34,34 @@ int sqisign_keypair(unsigned char *pk, unsigned char *sk); * @param[in] sk Compacted secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_sign(unsigned char *sm, unsigned long long *smlen, const unsigned char *m, unsigned long long mlen, const unsigned char *sk); + +/** + * Alternate SQIsign signature generation. Used for liboqs compatibility. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] s Signature + * @param[out] slen Pointer to the length of s + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); #endif /** @@ -75,11 +97,30 @@ int sqisign_open(unsigned char *m, * @param[in] pk Compacted public key * @return int 0 if verification succeeded, 1 otherwise. */ -SQISIGN_API +SQISIGN_API int sqisign_verify(const unsigned char *m, unsigned long long mlen, const unsigned char *sig, unsigned long long siglen, const unsigned char *pk); +/** + * Alternate SQIsign verify signature. Used for liboqs compatibility. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk); #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h index 007d2572b9..bbfe72c13b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h @@ -18,12 +18,6 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) -#ifndef DISABLE_NAMESPACING -#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) -#else -#define SQISIGN_NAMESPACE_GENERIC(s) s -#endif - #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -60,23 +54,23 @@ #undef quat_alg_scalar #undef quat_alg_sub -#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) -#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) -#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) -#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) -#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) -#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) -#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) -#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) -#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) -#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) -#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) -#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) -#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) -#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) -#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) -#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) -#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) +#define quat_alg_add SQISIGN_NAMESPACE(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE(quat_alg_sub) // Namespacing symbols exported from api.c: #undef crypto_sign @@ -134,14 +128,14 @@ #undef ibz_mat_2x2_set #undef ibz_vec_2_set -#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) -#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) -#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) -#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) -#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) -#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) -#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) -#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE(ibz_vec_2_set) // Namespacing symbols exported from dim2id2iso.c: #undef dim2id2iso_arbitrary_isogeny_evaluation @@ -184,34 +178,34 @@ #undef ibz_vec_4_sub #undef quat_qf_eval -#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) -#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) -#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) -#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) -#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) -#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) -#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) -#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) -#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) -#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) -#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) -#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) -#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) -#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) -#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) -#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) -#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) -#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) -#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) -#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) -#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) -#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) -#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) -#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) -#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) -#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) -#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) -#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) // Namespacing symbols exported from ec.c: #undef cswap_points @@ -339,22 +333,22 @@ #undef quat_left_ideal_finalize #undef quat_left_ideal_init -#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) -#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) -#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) -#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) -#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) -#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) -#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) -#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) -#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) -#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) -#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) -#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) -#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) -#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) -#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) -#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE(quat_left_ideal_init) // Namespacing symbols exported from fp.c: #undef fp_select @@ -567,11 +561,11 @@ #undef ibz_vec_4_linear_combination_mod #undef ibz_vec_4_scalar_mul_mod -#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) -#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) -#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) -#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) -#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul_mod) // Namespacing symbols exported from hnf_internal.c: #undef ibz_centered_mod @@ -579,15 +573,15 @@ #undef ibz_mod_not_zero #undef ibz_xgcd_with_u_not_0 -#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) -#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) -#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) -#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) +#define ibz_centered_mod SQISIGN_NAMESPACE(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE(ibz_xgcd_with_u_not_0) // Namespacing symbols exported from ibz_division.c: #undef ibz_xgcd -#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) +#define ibz_xgcd SQISIGN_NAMESPACE(ibz_xgcd) // Namespacing symbols exported from id2iso.c: #undef change_of_basis_matrix_tate @@ -624,22 +618,22 @@ #undef quat_order_discriminant #undef quat_order_is_maximal -#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) -#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) -#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) -#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) -#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) -#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) -#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) -#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) -#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) -#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) -#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) -#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) -#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) -#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) -#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) -#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) +#define quat_lideal_add SQISIGN_NAMESPACE(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE(quat_order_is_maximal) // Namespacing symbols exported from intbig.c: #undef ibz_abs @@ -647,6 +641,10 @@ #undef ibz_bitsize #undef ibz_cmp #undef ibz_cmp_int32 +#undef ibz_const_one +#undef ibz_const_three +#undef ibz_const_two +#undef ibz_const_zero #undef ibz_convert_to_str #undef ibz_copy #undef ibz_copy_digits @@ -687,57 +685,61 @@ #undef ibz_to_digits #undef ibz_two_adic -#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) -#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) -#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) -#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) -#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) -#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) -#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) -#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) -#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) -#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) -#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) -#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) -#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) -#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) -#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) -#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) -#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) -#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) -#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) -#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) -#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) -#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) -#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) -#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) -#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) -#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) -#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) -#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) -#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) -#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) -#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) -#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) -#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) -#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) -#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) -#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) -#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) -#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) -#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) -#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) -#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) -#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) -#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) -#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) +#define ibz_abs SQISIGN_NAMESPACE(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE(ibz_cmp_int32) +#define ibz_const_one SQISIGN_NAMESPACE(ibz_const_one) +#define ibz_const_three SQISIGN_NAMESPACE(ibz_const_three) +#define ibz_const_two SQISIGN_NAMESPACE(ibz_const_two) +#define ibz_const_zero SQISIGN_NAMESPACE(ibz_const_zero) +#define ibz_convert_to_str SQISIGN_NAMESPACE(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE(ibz_get) +#define ibz_init SQISIGN_NAMESPACE(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE(ibz_two_adic) // Namespacing symbols exported from integers.c: #undef ibz_cornacchia_prime #undef ibz_generate_random_prime -#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) -#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) +#define ibz_cornacchia_prime SQISIGN_NAMESPACE(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE(ibz_generate_random_prime) // Namespacing symbols exported from isog_chains.c: #undef ec_eval_even @@ -763,15 +765,15 @@ #undef quat_lattice_lll #undef quat_lll_core -#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) -#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) +#define quat_lattice_lll SQISIGN_NAMESPACE(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE(quat_lll_core) // Namespacing symbols exported from lat_ball.c: #undef quat_lattice_bound_parallelogram #undef quat_lattice_sample_from_ball -#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) -#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE(quat_lattice_sample_from_ball) // Namespacing symbols exported from lattice.c: #undef quat_lattice_add @@ -789,29 +791,29 @@ #undef quat_lattice_mul #undef quat_lattice_reduce_denom -#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) -#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) -#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) -#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) -#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) -#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) -#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) -#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) -#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) -#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) -#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) -#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) -#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) -#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) +#define quat_lattice_add SQISIGN_NAMESPACE(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE(quat_lattice_reduce_denom) // Namespacing symbols exported from lll_applications.c: #undef quat_lideal_lideal_mul_reduced #undef quat_lideal_prime_norm_reduced_equivalent #undef quat_lideal_reduce_basis -#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) -#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) -#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE(quat_lideal_reduce_basis) // Namespacing symbols exported from lll_verification.c: #undef ibq_vec_4_copy_ibz @@ -820,18 +822,18 @@ #undef quat_lll_set_ibq_parameters #undef quat_lll_verify -#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) -#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) -#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) -#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) -#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE(quat_lll_verify) // Namespacing symbols exported from mem.c: #undef sqisign_secure_clear #undef sqisign_secure_free -#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) -#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) +#define sqisign_secure_clear SQISIGN_NAMESPACE(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE(sqisign_secure_free) // Namespacing symbols exported from mp.c: #undef MUL @@ -854,25 +856,25 @@ #undef select_ct #undef swap_ct -#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) -#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) -#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) -#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) -#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) -#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) -#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) -#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) -#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) -#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) -#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) -#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) -#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) -#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) -#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) -#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) -#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) -#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) -#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) +#define MUL SQISIGN_NAMESPACE(MUL) +#define mp_add SQISIGN_NAMESPACE(mp_add) +#define mp_compare SQISIGN_NAMESPACE(mp_compare) +#define mp_copy SQISIGN_NAMESPACE(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE(mp_neg) +#define mp_print SQISIGN_NAMESPACE(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE(select_ct) +#define swap_ct SQISIGN_NAMESPACE(swap_ct) // Namespacing symbols exported from normeq.c: #undef quat_change_to_O0_basis @@ -882,12 +884,12 @@ #undef quat_represent_integer #undef quat_sampling_random_ideal_O0_given_norm -#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) -#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) -#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) -#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) -#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) -#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) +#define quat_change_to_O0_basis SQISIGN_NAMESPACE(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE(quat_sampling_random_ideal_O0_given_norm) // Namespacing symbols exported from printer.c: #undef ibz_mat_2x2_print @@ -899,23 +901,23 @@ #undef quat_lattice_print #undef quat_left_ideal_print -#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) -#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) -#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) -#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) -#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) -#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) -#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) -#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) +#define ibz_mat_2x2_print SQISIGN_NAMESPACE(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation #undef quat_test_input_random_lattice_generation -#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) -#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) -#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_lattice_generation) // Namespacing symbols exported from rationals.c: #undef ibq_abs @@ -941,28 +943,28 @@ #undef ibq_vec_4_init #undef ibq_vec_4_print -#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) -#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) -#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) -#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) -#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) -#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) -#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) -#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) -#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) -#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) -#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) -#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) -#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) -#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) -#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) -#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) -#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) -#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) -#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) -#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) -#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) -#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) +#define ibq_abs SQISIGN_NAMESPACE(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE(ibq_vec_4_print) // Namespacing symbols exported from sign.c: #undef protocols_sign diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.c deleted file mode 100644 index 242ea08fe2..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/tools.c +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include - -static clock_t global_timer; - -clock_t -tic(void) -{ - global_timer = clock(); - return global_timer; -} - -float -tac(void) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); - return ms; -} - -float -TAC(const char *str) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); -#ifndef NDEBUG - printf("%s [%d ms]\n", str, (int)ms); -#endif - return ms; -} - -float -toc(const clock_t t) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - return ms; -} - -float -TOC(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,clock()-t); - // return (float) (clock()-t); -} - -float -TOC_clock(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, clock() - t); - return (float)(clock() - t); -} - -clock_t -dclock(const clock_t t) -{ - return (clock() - t); -} - -float -clock_to_time(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,t); - // return (float) (t); -} - -float -clock_print(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, t); - return (float)(t); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c deleted file mode 100644 index 372cc0de81..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_ctrdrbg.c +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 and Unknown -// -/* -NIST-developed software is provided by NIST as a public service. You may use, -copy, and distribute copies of the software in any medium, provided that you -keep intact this entire notice. You may improve, modify, and create derivative -works of the software or any portion of the software, and you may copy and -distribute such modifications or works. Modified works should carry a notice -stating that you changed the software and should note the date and nature of any -such change. Please explicitly acknowledge the National Institute of Standards -and Technology as the source of the software. - -NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF -ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, -WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS -NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR -ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE -ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, -INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR -USEFULNESS OF THE SOFTWARE. - -You are solely responsible for determining the appropriateness of using and -distributing the software and you assume all risks associated with its use, -including but not limited to the risks and costs of program errors, compliance -with applicable laws, damage to or loss of data, programs or equipment, and the -unavailability or interruption of operation. This software is not intended to be -used in any situation where a failure could cause risk of injury or damage to -property. The software developed by NIST employees is not subject to copyright -protection within the United States. -*/ - -#include -#include - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -static inline void AES256_ECB(const unsigned char *key, - const unsigned char *ctr, unsigned char *buffer) { - AES_ECB_encrypt(ctr, key, buffer); -} - -typedef struct { - unsigned char Key[32]; - unsigned char V[16]; - int reseed_counter; -} AES256_CTR_DRBG_struct; - -void AES256_CTR_DRBG_Update(const unsigned char *provided_data, - unsigned char *Key, unsigned char *V); - -AES256_CTR_DRBG_struct DRBG_ctx; - -#ifndef CTRDRBG_TEST_BENCH -static -#endif - void - randombytes_init_nist(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - unsigned char seed_material[48]; - - (void)security_strength; // Unused parameter - memcpy(seed_material, entropy_input, 48); - if (personalization_string) - for (int i = 0; i < 48; i++) { - seed_material[i] ^= personalization_string[i]; - } - memset(DRBG_ctx.Key, 0x00, 32); - memset(DRBG_ctx.V, 0x00, 16); - AES256_CTR_DRBG_Update(seed_material, DRBG_ctx.Key, DRBG_ctx.V); - DRBG_ctx.reseed_counter = 1; -} - -#ifndef CTRDRBG_TEST_BENCH -static -#endif - int - randombytes_nist(unsigned char *x, size_t xlen) { - unsigned char block[16]; - size_t i = 0; - - while (xlen > 0) { - // increment V - for (int j = 15; j >= 0; j--) { - if (DRBG_ctx.V[j] == 0xff) { - DRBG_ctx.V[j] = 0x00; - } else { - DRBG_ctx.V[j]++; - break; - } - } - AES256_ECB(DRBG_ctx.Key, DRBG_ctx.V, block); - if (xlen > 15) { - memcpy(x + i, block, 16); - i += 16; - xlen -= 16; - } else { - memcpy(x + i, block, xlen); - i += xlen; - xlen = 0; - } - } - AES256_CTR_DRBG_Update(NULL, DRBG_ctx.Key, DRBG_ctx.V); - DRBG_ctx.reseed_counter++; - - return 0; -} - -void AES256_CTR_DRBG_Update(const unsigned char *provided_data, - unsigned char *Key, unsigned char *V) { - unsigned char temp[48]; - - for (int i = 0; i < 3; i++) { - // increment V - for (int j = 15; j >= 0; j--) { - if (V[j] == 0xff) { - V[j] = 0x00; - } else { - V[j]++; - break; - } - } - - AES256_ECB(Key, V, temp + 16 * i); - } - if (provided_data != NULL) - for (int i = 0; i < 48; i++) { - temp[i] ^= provided_data[i]; - } - memcpy(Key, temp, 32); - memcpy(V, temp + 32, 16); -} - -#ifdef RANDOMBYTES_C -SQISIGN_API -int randombytes(unsigned char *random_array, unsigned long long nbytes) { - int ret = randombytes_nist(random_array, nbytes); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); -#endif - return ret; -} - -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - randombytes_init_nist(entropy_input, personalization_string, - security_strength); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_system.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_system.c deleted file mode 100644 index 689c29b242..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/randombytes_system.c +++ /dev/null @@ -1,431 +0,0 @@ -// SPDX-License-Identifier: MIT - -/* -The MIT License -Copyright (c) 2017 Daan Sprenkels -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -// In the case that are compiling on linux, we need to define _GNU_SOURCE -// *before* randombytes.h is included. Otherwise SYS_getrandom will not be -// declared. -#if defined(__linux__) || defined(__GNU__) -#define _GNU_SOURCE -#endif /* defined(__linux__) || defined(__GNU__) */ - -#if defined(_WIN32) -/* Windows */ -#include -#include /* CryptAcquireContext, CryptGenRandom */ -#endif /* defined(_WIN32) */ - -/* wasi */ -#if defined(__wasi__) -#include -#endif - -/* kFreeBSD */ -#if defined(__FreeBSD_kernel__) && defined(__GLIBC__) -#define GNU_KFREEBSD -#endif - -#if defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -/* Linux */ -// We would need to include , but not every target has access -// to the linux headers. We only need RNDGETENTCNT, so we instead inline it. -// RNDGETENTCNT is originally defined in `include/uapi/linux/random.h` in the -// linux repo. -#define RNDGETENTCNT 0x80045200 - -#include -#include -#include -#include -#include -#include -#include -#if (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && \ - ((__GLIBC__ > 2) || (__GLIBC_MINOR__ > 24)) -#define USE_GLIBC -#include -#endif /* (defined(__linux__) || defined(__GNU__)) && defined(__GLIBC__) && ((__GLIBC__ > 2) || \ - (__GLIBC_MINOR__ > 24)) */ -#include -#include -#include -#include - -// We need SSIZE_MAX as the maximum read len from /dev/urandom -#if !defined(SSIZE_MAX) -#define SSIZE_MAX (SIZE_MAX / 2 - 1) -#endif /* defined(SSIZE_MAX) */ - -#endif /* defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) */ - -#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) -/* Dragonfly, FreeBSD, NetBSD, OpenBSD (has arc4random) */ -#include -#if defined(BSD) -#include -#endif -/* GNU/Hurd defines BSD in sys/param.h which causes problems later */ -#if defined(__GNU__) -#undef BSD -#endif -#endif - -#if defined(__EMSCRIPTEN__) -#include -#include -#include -#include -#endif /* defined(__EMSCRIPTEN__) */ - -#if defined(_WIN32) -static int -randombytes_win32_randombytes(void *buf, size_t n) -{ - HCRYPTPROV ctx; - BOOL tmp; - DWORD to_read = 0; - const size_t MAX_DWORD = 0xFFFFFFFF; - - tmp = CryptAcquireContext(&ctx, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); - if (tmp == FALSE) - return -1; - - while (n > 0) { - to_read = (DWORD)(n < MAX_DWORD ? n : MAX_DWORD); - tmp = CryptGenRandom(ctx, to_read, (BYTE *)buf); - if (tmp == FALSE) - return -1; - buf = ((char *)buf) + to_read; - n -= to_read; - } - - tmp = CryptReleaseContext(ctx, 0); - if (tmp == FALSE) - return -1; - - return 0; -} -#endif /* defined(_WIN32) */ - -#if defined(__wasi__) -static int -randombytes_wasi_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(__wasi__) */ - -#if (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || defined(SYS_getrandom)) -#if defined(USE_GLIBC) -// getrandom is declared in glibc. -#elif defined(SYS_getrandom) -static ssize_t -getrandom(void *buf, size_t buflen, unsigned int flags) -{ - return syscall(SYS_getrandom, buf, buflen, flags); -} -#endif - -static int -randombytes_linux_randombytes_getrandom(void *buf, size_t n) -{ - /* I have thought about using a separate PRF, seeded by getrandom, but - * it turns out that the performance of getrandom is good enough - * (250 MB/s on my laptop). - */ - size_t offset = 0, chunk; - int ret; - while (n > 0) { - /* getrandom does not allow chunks larger than 33554431 */ - chunk = n <= 33554431 ? n : 33554431; - do { - ret = getrandom((char *)buf + offset, chunk, 0); - } while (ret == -1 && errno == EINTR); - if (ret < 0) - return ret; - offset += ret; - n -= ret; - } - assert(n == 0); - return 0; -} -#endif /* (defined(__linux__) || defined(__GNU__)) && (defined(USE_GLIBC) || \ - defined(SYS_getrandom)) */ - -#if (defined(__linux__) || defined(GNU_KFREEBSD)) && !defined(SYS_getrandom) - -#if defined(__linux__) -static int -randombytes_linux_read_entropy_ioctl(int device, int *entropy) -{ - return ioctl(device, RNDGETENTCNT, entropy); -} - -static int -randombytes_linux_read_entropy_proc(FILE *stream, int *entropy) -{ - int retcode; - do { - rewind(stream); - retcode = fscanf(stream, "%d", entropy); - } while (retcode != 1 && errno == EINTR); - if (retcode != 1) { - return -1; - } - return 0; -} - -static int -randombytes_linux_wait_for_entropy(int device) -{ - /* We will block on /dev/random, because any increase in the OS' entropy - * level will unblock the request. I use poll here (as does libsodium), - * because we don't *actually* want to read from the device. */ - enum - { - IOCTL, - PROC - } strategy = IOCTL; - const int bits = 128; - struct pollfd pfd; - int fd; - FILE *proc_file; - int retcode, retcode_error = 0; // Used as return codes throughout this function - int entropy = 0; - - /* If the device has enough entropy already, we will want to return early */ - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - // printf("errno: %d (%s)\n", errno, strerror(errno)); - if (retcode != 0 && (errno == ENOTTY || errno == ENOSYS)) { - // The ioctl call on /dev/urandom has failed due to a - // - ENOTTY (unsupported action), or - // - ENOSYS (invalid ioctl; this happens on MIPS, see #22). - // - // We will fall back to reading from - // `/proc/sys/kernel/random/entropy_avail`. This less ideal, - // because it allocates a file descriptor, and it may not work - // in a chroot. But at this point it seems we have no better - // options left. - strategy = PROC; - // Open the entropy count file - proc_file = fopen("/proc/sys/kernel/random/entropy_avail", "r"); - if (proc_file == NULL) { - return -1; - } - } else if (retcode != 0) { - // Unrecoverable ioctl error - return -1; - } - if (entropy >= bits) { - return 0; - } - - do { - fd = open("/dev/random", O_RDONLY); - } while (fd == -1 && errno == EINTR); /* EAGAIN will not occur */ - if (fd == -1) { - /* Unrecoverable IO error */ - return -1; - } - - pfd.fd = fd; - pfd.events = POLLIN; - for (;;) { - retcode = poll(&pfd, 1, -1); - if (retcode == -1 && (errno == EINTR || errno == EAGAIN)) { - continue; - } else if (retcode == 1) { - if (strategy == IOCTL) { - retcode = randombytes_linux_read_entropy_ioctl(device, &entropy); - } else if (strategy == PROC) { - retcode = randombytes_linux_read_entropy_proc(proc_file, &entropy); - } else { - return -1; // Unreachable - } - - if (retcode != 0) { - // Unrecoverable I/O error - retcode_error = retcode; - break; - } - if (entropy >= bits) { - break; - } - } else { - // Unreachable: poll() should only return -1 or 1 - retcode_error = -1; - break; - } - } - do { - retcode = close(fd); - } while (retcode == -1 && errno == EINTR); - if (strategy == PROC) { - do { - retcode = fclose(proc_file); - } while (retcode == -1 && errno == EINTR); - } - if (retcode_error != 0) { - return retcode_error; - } - return retcode; -} -#endif /* defined(__linux__) */ - -static int -randombytes_linux_randombytes_urandom(void *buf, size_t n) -{ - int fd; - size_t offset = 0, count; - ssize_t tmp; - do { - fd = open("/dev/urandom", O_RDONLY); - } while (fd == -1 && errno == EINTR); - if (fd == -1) - return -1; -#if defined(__linux__) - if (randombytes_linux_wait_for_entropy(fd) == -1) - return -1; -#endif - - while (n > 0) { - count = n <= SSIZE_MAX ? n : SSIZE_MAX; - tmp = read(fd, (char *)buf + offset, count); - if (tmp == -1 && (errno == EAGAIN || errno == EINTR)) { - continue; - } - if (tmp == -1) - return -1; /* Unrecoverable IO error */ - offset += tmp; - n -= tmp; - } - close(fd); - assert(n == 0); - return 0; -} -#endif /* defined(__linux__) && !defined(SYS_getrandom) */ - -#if defined(BSD) -static int -randombytes_bsd_randombytes(void *buf, size_t n) -{ - arc4random_buf(buf, n); - return 0; -} -#endif /* defined(BSD) */ - -#if defined(__EMSCRIPTEN__) -static int -randombytes_js_randombytes_nodejs(void *buf, size_t n) -{ - const int ret = EM_ASM_INT( - { - var crypto; - try { - crypto = require('crypto'); - } catch (error) { - return -2; - } - try { - writeArrayToMemory(crypto.randomBytes($1), $0); - return 0; - } catch (error) { - return -1; - } - }, - buf, - n); - switch (ret) { - case 0: - return 0; - case -1: - errno = EINVAL; - return -1; - case -2: - errno = ENOSYS; - return -1; - } - assert(false); // Unreachable -} -#endif /* defined(__EMSCRIPTEN__) */ - -SQISIGN_API -int -randombytes_select(unsigned char *buf, unsigned long long n) -{ -#if defined(__EMSCRIPTEN__) - return randombytes_js_randombytes_nodejs(buf, n); -#elif defined(__linux__) || defined(__GNU__) || defined(GNU_KFREEBSD) -#if defined(USE_GLIBC) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#elif defined(SYS_getrandom) - /* Use getrandom system call */ - return randombytes_linux_randombytes_getrandom(buf, n); -#else - /* When we have enough entropy, we can read from /dev/urandom */ - return randombytes_linux_randombytes_urandom(buf, n); -#endif -#elif defined(BSD) - /* Use arc4random system call */ - return randombytes_bsd_randombytes(buf, n); -#elif defined(_WIN32) - /* Use windows API */ - return randombytes_win32_randombytes(buf, n); -#elif defined(__wasi__) - /* Use WASI */ - return randombytes_wasi_randombytes(buf, n); -#else -#error "randombytes(...) is not supported on this platform" -#endif -} - -#ifdef RANDOMBYTES_SYSTEM -SQISIGN_API -int -randombytes(unsigned char *x, unsigned long long xlen) -{ - - int ret = randombytes_select(x, (size_t)xlen); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(x, xlen); -#endif - return ret; -} - -SQISIGN_API -void -randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) -{ - (void)entropy_input; - (void)personalization_string; - (void)security_strength; -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h index 0a9ca0e465..d0861ac036 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h @@ -3,41 +3,11 @@ #ifndef rng_h #define rng_h -#include +#include -/** - * Randombytes initialization. - * Initialization may be needed for some random number generators (e.g. CTR-DRBG). - * - * @param[in] entropy_input 48 bytes entropy input - * @param[in] personalization_string Personalization string - * @param[in] security_strength Security string - */ -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength); - -/** - * Random byte generation using /dev/urandom. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes_select(unsigned char *x, unsigned long long xlen); - -/** - * Random byte generation. - * The caller is responsible to allocate sufficient memory to hold x. - * - * @param[out] x Memory to hold the random bytes. - * @param[in] xlen Number of random bytes to be generated - * @return int 0 on success, -1 otherwise - */ -SQISIGN_API -int randombytes(unsigned char *x, unsigned long long xlen); +static int randombytes(unsigned char *x, unsigned long long xlen){ + OQS_randombytes(x, xlen); + return 0; +} #endif /* rng_h */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sig.h index 4c33510084..a5bc04e6e4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sig.h @@ -17,7 +17,7 @@ * @param[out] sk SQIsign secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_keypair(unsigned char *pk, unsigned char *sk); /** @@ -34,12 +34,34 @@ int sqisign_keypair(unsigned char *pk, unsigned char *sk); * @param[in] sk Compacted secret key * @return int status code */ -SQISIGN_API +SQISIGN_API int sqisign_sign(unsigned char *sm, unsigned long long *smlen, const unsigned char *m, unsigned long long mlen, const unsigned char *sk); + +/** + * Alternate SQIsign signature generation. Used for liboqs compatibility. + * + * The implementation performs SQIsign.expandSK() + SQIsign.sign() in the SQIsign spec. + * Keys provided is a compacted secret keys. + * The caller is responsible to allocate sufficient memory to hold sm. + * + * @param[out] s Signature + * @param[out] slen Pointer to the length of s + * @param[in] m Message to be signed + * @param[in] mlen Message length + * @param[in] sk Compacted secret key + * @return int status code + */ +SQISIGN_API +int +sqisign_sign_signature(unsigned char *s, + unsigned long long *slen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *sk); #endif /** @@ -75,11 +97,30 @@ int sqisign_open(unsigned char *m, * @param[in] pk Compacted public key * @return int 0 if verification succeeded, 1 otherwise. */ -SQISIGN_API +SQISIGN_API int sqisign_verify(const unsigned char *m, unsigned long long mlen, const unsigned char *sig, unsigned long long siglen, const unsigned char *pk); +/** + * Alternate SQIsign verify signature. Used for liboqs compatibility. + * + * If the signature verification succeeded, returns 0, otherwise 1. + * + * @param[in] sig Signature + * @param[in] siglen Length of sig + * @param[out] m Message stored if verification succeeds + * @param[out] mlen Pointer to the length of m + * @param[in] pk Compacted public key + * @return int 0 if verification succeeded, 1 otherwise. + */ +SQISIGN_API +int +sqisign_verify_signature(const unsigned char *sig, + unsigned long long siglen, + const unsigned char *m, + unsigned long long mlen, + const unsigned char *pk); #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h index 007d2572b9..bbfe72c13b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h @@ -18,12 +18,6 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) -#ifndef DISABLE_NAMESPACING -#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) -#else -#define SQISIGN_NAMESPACE_GENERIC(s) s -#endif - #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -60,23 +54,23 @@ #undef quat_alg_scalar #undef quat_alg_sub -#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) -#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) -#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) -#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) -#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) -#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) -#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) -#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) -#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) -#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) -#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) -#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) -#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) -#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) -#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) -#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) -#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) +#define quat_alg_add SQISIGN_NAMESPACE(quat_alg_add) +#define quat_alg_conj SQISIGN_NAMESPACE(quat_alg_conj) +#define quat_alg_coord_mul SQISIGN_NAMESPACE(quat_alg_coord_mul) +#define quat_alg_elem_copy SQISIGN_NAMESPACE(quat_alg_elem_copy) +#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE(quat_alg_elem_copy_ibz) +#define quat_alg_elem_equal SQISIGN_NAMESPACE(quat_alg_elem_equal) +#define quat_alg_elem_is_zero SQISIGN_NAMESPACE(quat_alg_elem_is_zero) +#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE(quat_alg_elem_mul_by_scalar) +#define quat_alg_elem_set SQISIGN_NAMESPACE(quat_alg_elem_set) +#define quat_alg_equal_denom SQISIGN_NAMESPACE(quat_alg_equal_denom) +#define quat_alg_init_set_ui SQISIGN_NAMESPACE(quat_alg_init_set_ui) +#define quat_alg_make_primitive SQISIGN_NAMESPACE(quat_alg_make_primitive) +#define quat_alg_mul SQISIGN_NAMESPACE(quat_alg_mul) +#define quat_alg_norm SQISIGN_NAMESPACE(quat_alg_norm) +#define quat_alg_normalize SQISIGN_NAMESPACE(quat_alg_normalize) +#define quat_alg_scalar SQISIGN_NAMESPACE(quat_alg_scalar) +#define quat_alg_sub SQISIGN_NAMESPACE(quat_alg_sub) // Namespacing symbols exported from api.c: #undef crypto_sign @@ -134,14 +128,14 @@ #undef ibz_mat_2x2_set #undef ibz_vec_2_set -#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) -#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) -#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) -#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) -#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) -#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) -#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) -#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) +#define ibz_2x2_mul_mod SQISIGN_NAMESPACE(ibz_2x2_mul_mod) +#define ibz_mat_2x2_add SQISIGN_NAMESPACE(ibz_mat_2x2_add) +#define ibz_mat_2x2_copy SQISIGN_NAMESPACE(ibz_mat_2x2_copy) +#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE(ibz_mat_2x2_det_from_ibz) +#define ibz_mat_2x2_eval SQISIGN_NAMESPACE(ibz_mat_2x2_eval) +#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE(ibz_mat_2x2_inv_mod) +#define ibz_mat_2x2_set SQISIGN_NAMESPACE(ibz_mat_2x2_set) +#define ibz_vec_2_set SQISIGN_NAMESPACE(ibz_vec_2_set) // Namespacing symbols exported from dim2id2iso.c: #undef dim2id2iso_arbitrary_isogeny_evaluation @@ -184,34 +178,34 @@ #undef ibz_vec_4_sub #undef quat_qf_eval -#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) -#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) -#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) -#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) -#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) -#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) -#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) -#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) -#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) -#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) -#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) -#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) -#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) -#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) -#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) -#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) -#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) -#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) -#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) -#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) -#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) -#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) -#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) -#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) -#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) -#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) -#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) -#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) +#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_mpm) +#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_pmp) +#define ibz_mat_4x4_copy SQISIGN_NAMESPACE(ibz_mat_4x4_copy) +#define ibz_mat_4x4_equal SQISIGN_NAMESPACE(ibz_mat_4x4_equal) +#define ibz_mat_4x4_eval SQISIGN_NAMESPACE(ibz_mat_4x4_eval) +#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE(ibz_mat_4x4_eval_t) +#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE(ibz_mat_4x4_gcd) +#define ibz_mat_4x4_identity SQISIGN_NAMESPACE(ibz_mat_4x4_identity) +#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE(ibz_mat_4x4_inv_with_det_as_denom) +#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE(ibz_mat_4x4_is_identity) +#define ibz_mat_4x4_mul SQISIGN_NAMESPACE(ibz_mat_4x4_mul) +#define ibz_mat_4x4_negate SQISIGN_NAMESPACE(ibz_mat_4x4_negate) +#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_div) +#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_mul) +#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE(ibz_mat_4x4_transpose) +#define ibz_mat_4x4_zero SQISIGN_NAMESPACE(ibz_mat_4x4_zero) +#define ibz_vec_4_add SQISIGN_NAMESPACE(ibz_vec_4_add) +#define ibz_vec_4_content SQISIGN_NAMESPACE(ibz_vec_4_content) +#define ibz_vec_4_copy SQISIGN_NAMESPACE(ibz_vec_4_copy) +#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE(ibz_vec_4_copy_ibz) +#define ibz_vec_4_is_zero SQISIGN_NAMESPACE(ibz_vec_4_is_zero) +#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE(ibz_vec_4_linear_combination) +#define ibz_vec_4_negate SQISIGN_NAMESPACE(ibz_vec_4_negate) +#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE(ibz_vec_4_scalar_div) +#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul) +#define ibz_vec_4_set SQISIGN_NAMESPACE(ibz_vec_4_set) +#define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) +#define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) // Namespacing symbols exported from ec.c: #undef cswap_points @@ -339,22 +333,22 @@ #undef quat_left_ideal_finalize #undef quat_left_ideal_init -#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) -#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) -#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) -#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) -#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) -#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) -#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) -#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) -#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) -#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) -#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) -#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) -#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) -#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) -#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) -#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) +#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE(ibz_mat_2x2_finalize) +#define ibz_mat_2x2_init SQISIGN_NAMESPACE(ibz_mat_2x2_init) +#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE(ibz_mat_4x4_finalize) +#define ibz_mat_4x4_init SQISIGN_NAMESPACE(ibz_mat_4x4_init) +#define ibz_vec_2_finalize SQISIGN_NAMESPACE(ibz_vec_2_finalize) +#define ibz_vec_2_init SQISIGN_NAMESPACE(ibz_vec_2_init) +#define ibz_vec_4_finalize SQISIGN_NAMESPACE(ibz_vec_4_finalize) +#define ibz_vec_4_init SQISIGN_NAMESPACE(ibz_vec_4_init) +#define quat_alg_elem_finalize SQISIGN_NAMESPACE(quat_alg_elem_finalize) +#define quat_alg_elem_init SQISIGN_NAMESPACE(quat_alg_elem_init) +#define quat_alg_finalize SQISIGN_NAMESPACE(quat_alg_finalize) +#define quat_alg_init_set SQISIGN_NAMESPACE(quat_alg_init_set) +#define quat_lattice_finalize SQISIGN_NAMESPACE(quat_lattice_finalize) +#define quat_lattice_init SQISIGN_NAMESPACE(quat_lattice_init) +#define quat_left_ideal_finalize SQISIGN_NAMESPACE(quat_left_ideal_finalize) +#define quat_left_ideal_init SQISIGN_NAMESPACE(quat_left_ideal_init) // Namespacing symbols exported from fp.c: #undef fp_select @@ -567,11 +561,11 @@ #undef ibz_vec_4_linear_combination_mod #undef ibz_vec_4_scalar_mul_mod -#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) -#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) -#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) -#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) -#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) +#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE(ibz_mat_4x4_is_hnf) +#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE(ibz_mat_4xn_hnf_mod_core) +#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE(ibz_vec_4_copy_mod) +#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE(ibz_vec_4_linear_combination_mod) +#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul_mod) // Namespacing symbols exported from hnf_internal.c: #undef ibz_centered_mod @@ -579,15 +573,15 @@ #undef ibz_mod_not_zero #undef ibz_xgcd_with_u_not_0 -#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) -#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) -#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) -#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) +#define ibz_centered_mod SQISIGN_NAMESPACE(ibz_centered_mod) +#define ibz_conditional_assign SQISIGN_NAMESPACE(ibz_conditional_assign) +#define ibz_mod_not_zero SQISIGN_NAMESPACE(ibz_mod_not_zero) +#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE(ibz_xgcd_with_u_not_0) // Namespacing symbols exported from ibz_division.c: #undef ibz_xgcd -#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) +#define ibz_xgcd SQISIGN_NAMESPACE(ibz_xgcd) // Namespacing symbols exported from id2iso.c: #undef change_of_basis_matrix_tate @@ -624,22 +618,22 @@ #undef quat_order_discriminant #undef quat_order_is_maximal -#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) -#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) -#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) -#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) -#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) -#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) -#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) -#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) -#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) -#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) -#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) -#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) -#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) -#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) -#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) -#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) +#define quat_lideal_add SQISIGN_NAMESPACE(quat_lideal_add) +#define quat_lideal_class_gram SQISIGN_NAMESPACE(quat_lideal_class_gram) +#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lideal_conjugate_without_hnf) +#define quat_lideal_copy SQISIGN_NAMESPACE(quat_lideal_copy) +#define quat_lideal_create SQISIGN_NAMESPACE(quat_lideal_create) +#define quat_lideal_create_principal SQISIGN_NAMESPACE(quat_lideal_create_principal) +#define quat_lideal_equals SQISIGN_NAMESPACE(quat_lideal_equals) +#define quat_lideal_generator SQISIGN_NAMESPACE(quat_lideal_generator) +#define quat_lideal_inter SQISIGN_NAMESPACE(quat_lideal_inter) +#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE(quat_lideal_inverse_lattice_without_hnf) +#define quat_lideal_mul SQISIGN_NAMESPACE(quat_lideal_mul) +#define quat_lideal_norm SQISIGN_NAMESPACE(quat_lideal_norm) +#define quat_lideal_right_order SQISIGN_NAMESPACE(quat_lideal_right_order) +#define quat_lideal_right_transporter SQISIGN_NAMESPACE(quat_lideal_right_transporter) +#define quat_order_discriminant SQISIGN_NAMESPACE(quat_order_discriminant) +#define quat_order_is_maximal SQISIGN_NAMESPACE(quat_order_is_maximal) // Namespacing symbols exported from intbig.c: #undef ibz_abs @@ -647,6 +641,10 @@ #undef ibz_bitsize #undef ibz_cmp #undef ibz_cmp_int32 +#undef ibz_const_one +#undef ibz_const_three +#undef ibz_const_two +#undef ibz_const_zero #undef ibz_convert_to_str #undef ibz_copy #undef ibz_copy_digits @@ -687,57 +685,61 @@ #undef ibz_to_digits #undef ibz_two_adic -#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) -#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) -#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) -#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) -#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) -#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) -#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) -#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) -#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) -#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) -#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) -#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) -#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) -#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) -#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) -#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) -#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) -#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) -#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) -#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) -#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) -#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) -#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) -#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) -#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) -#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) -#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) -#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) -#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) -#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) -#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) -#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) -#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) -#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) -#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) -#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) -#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) -#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) -#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) -#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) -#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) -#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) -#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) -#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) +#define ibz_abs SQISIGN_NAMESPACE(ibz_abs) +#define ibz_add SQISIGN_NAMESPACE(ibz_add) +#define ibz_bitsize SQISIGN_NAMESPACE(ibz_bitsize) +#define ibz_cmp SQISIGN_NAMESPACE(ibz_cmp) +#define ibz_cmp_int32 SQISIGN_NAMESPACE(ibz_cmp_int32) +#define ibz_const_one SQISIGN_NAMESPACE(ibz_const_one) +#define ibz_const_three SQISIGN_NAMESPACE(ibz_const_three) +#define ibz_const_two SQISIGN_NAMESPACE(ibz_const_two) +#define ibz_const_zero SQISIGN_NAMESPACE(ibz_const_zero) +#define ibz_convert_to_str SQISIGN_NAMESPACE(ibz_convert_to_str) +#define ibz_copy SQISIGN_NAMESPACE(ibz_copy) +#define ibz_copy_digits SQISIGN_NAMESPACE(ibz_copy_digits) +#define ibz_div SQISIGN_NAMESPACE(ibz_div) +#define ibz_div_2exp SQISIGN_NAMESPACE(ibz_div_2exp) +#define ibz_div_floor SQISIGN_NAMESPACE(ibz_div_floor) +#define ibz_divides SQISIGN_NAMESPACE(ibz_divides) +#define ibz_finalize SQISIGN_NAMESPACE(ibz_finalize) +#define ibz_gcd SQISIGN_NAMESPACE(ibz_gcd) +#define ibz_get SQISIGN_NAMESPACE(ibz_get) +#define ibz_init SQISIGN_NAMESPACE(ibz_init) +#define ibz_invmod SQISIGN_NAMESPACE(ibz_invmod) +#define ibz_is_even SQISIGN_NAMESPACE(ibz_is_even) +#define ibz_is_odd SQISIGN_NAMESPACE(ibz_is_odd) +#define ibz_is_one SQISIGN_NAMESPACE(ibz_is_one) +#define ibz_is_zero SQISIGN_NAMESPACE(ibz_is_zero) +#define ibz_legendre SQISIGN_NAMESPACE(ibz_legendre) +#define ibz_mod SQISIGN_NAMESPACE(ibz_mod) +#define ibz_mod_ui SQISIGN_NAMESPACE(ibz_mod_ui) +#define ibz_mul SQISIGN_NAMESPACE(ibz_mul) +#define ibz_neg SQISIGN_NAMESPACE(ibz_neg) +#define ibz_pow SQISIGN_NAMESPACE(ibz_pow) +#define ibz_pow_mod SQISIGN_NAMESPACE(ibz_pow_mod) +#define ibz_print SQISIGN_NAMESPACE(ibz_print) +#define ibz_probab_prime SQISIGN_NAMESPACE(ibz_probab_prime) +#define ibz_rand_interval SQISIGN_NAMESPACE(ibz_rand_interval) +#define ibz_rand_interval_bits SQISIGN_NAMESPACE(ibz_rand_interval_bits) +#define ibz_rand_interval_i SQISIGN_NAMESPACE(ibz_rand_interval_i) +#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE(ibz_rand_interval_minm_m) +#define ibz_set SQISIGN_NAMESPACE(ibz_set) +#define ibz_set_from_str SQISIGN_NAMESPACE(ibz_set_from_str) +#define ibz_size_in_base SQISIGN_NAMESPACE(ibz_size_in_base) +#define ibz_sqrt SQISIGN_NAMESPACE(ibz_sqrt) +#define ibz_sqrt_floor SQISIGN_NAMESPACE(ibz_sqrt_floor) +#define ibz_sqrt_mod_p SQISIGN_NAMESPACE(ibz_sqrt_mod_p) +#define ibz_sub SQISIGN_NAMESPACE(ibz_sub) +#define ibz_swap SQISIGN_NAMESPACE(ibz_swap) +#define ibz_to_digits SQISIGN_NAMESPACE(ibz_to_digits) +#define ibz_two_adic SQISIGN_NAMESPACE(ibz_two_adic) // Namespacing symbols exported from integers.c: #undef ibz_cornacchia_prime #undef ibz_generate_random_prime -#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) -#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) +#define ibz_cornacchia_prime SQISIGN_NAMESPACE(ibz_cornacchia_prime) +#define ibz_generate_random_prime SQISIGN_NAMESPACE(ibz_generate_random_prime) // Namespacing symbols exported from isog_chains.c: #undef ec_eval_even @@ -763,15 +765,15 @@ #undef quat_lattice_lll #undef quat_lll_core -#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) -#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) +#define quat_lattice_lll SQISIGN_NAMESPACE(quat_lattice_lll) +#define quat_lll_core SQISIGN_NAMESPACE(quat_lll_core) // Namespacing symbols exported from lat_ball.c: #undef quat_lattice_bound_parallelogram #undef quat_lattice_sample_from_ball -#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) -#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) +#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE(quat_lattice_bound_parallelogram) +#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE(quat_lattice_sample_from_ball) // Namespacing symbols exported from lattice.c: #undef quat_lattice_add @@ -789,29 +791,29 @@ #undef quat_lattice_mul #undef quat_lattice_reduce_denom -#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) -#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) -#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) -#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) -#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) -#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) -#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) -#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) -#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) -#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) -#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) -#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) -#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) -#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) +#define quat_lattice_add SQISIGN_NAMESPACE(quat_lattice_add) +#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE(quat_lattice_alg_elem_mul) +#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lattice_conjugate_without_hnf) +#define quat_lattice_contains SQISIGN_NAMESPACE(quat_lattice_contains) +#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE(quat_lattice_dual_without_hnf) +#define quat_lattice_equal SQISIGN_NAMESPACE(quat_lattice_equal) +#define quat_lattice_gram SQISIGN_NAMESPACE(quat_lattice_gram) +#define quat_lattice_hnf SQISIGN_NAMESPACE(quat_lattice_hnf) +#define quat_lattice_inclusion SQISIGN_NAMESPACE(quat_lattice_inclusion) +#define quat_lattice_index SQISIGN_NAMESPACE(quat_lattice_index) +#define quat_lattice_intersect SQISIGN_NAMESPACE(quat_lattice_intersect) +#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE(quat_lattice_mat_alg_coord_mul_without_hnf) +#define quat_lattice_mul SQISIGN_NAMESPACE(quat_lattice_mul) +#define quat_lattice_reduce_denom SQISIGN_NAMESPACE(quat_lattice_reduce_denom) // Namespacing symbols exported from lll_applications.c: #undef quat_lideal_lideal_mul_reduced #undef quat_lideal_prime_norm_reduced_equivalent #undef quat_lideal_reduce_basis -#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) -#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) -#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) +#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE(quat_lideal_lideal_mul_reduced) +#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE(quat_lideal_prime_norm_reduced_equivalent) +#define quat_lideal_reduce_basis SQISIGN_NAMESPACE(quat_lideal_reduce_basis) // Namespacing symbols exported from lll_verification.c: #undef ibq_vec_4_copy_ibz @@ -820,18 +822,18 @@ #undef quat_lll_set_ibq_parameters #undef quat_lll_verify -#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) -#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) -#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) -#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) -#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) +#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE(ibq_vec_4_copy_ibz) +#define quat_lll_bilinear SQISIGN_NAMESPACE(quat_lll_bilinear) +#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE(quat_lll_gram_schmidt_transposed_with_ibq) +#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE(quat_lll_set_ibq_parameters) +#define quat_lll_verify SQISIGN_NAMESPACE(quat_lll_verify) // Namespacing symbols exported from mem.c: #undef sqisign_secure_clear #undef sqisign_secure_free -#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) -#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) +#define sqisign_secure_clear SQISIGN_NAMESPACE(sqisign_secure_clear) +#define sqisign_secure_free SQISIGN_NAMESPACE(sqisign_secure_free) // Namespacing symbols exported from mp.c: #undef MUL @@ -854,25 +856,25 @@ #undef select_ct #undef swap_ct -#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) -#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) -#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) -#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) -#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) -#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) -#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) -#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) -#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) -#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) -#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) -#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) -#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) -#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) -#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) -#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) -#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) -#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) -#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) +#define MUL SQISIGN_NAMESPACE(MUL) +#define mp_add SQISIGN_NAMESPACE(mp_add) +#define mp_compare SQISIGN_NAMESPACE(mp_compare) +#define mp_copy SQISIGN_NAMESPACE(mp_copy) +#define mp_inv_2e SQISIGN_NAMESPACE(mp_inv_2e) +#define mp_invert_matrix SQISIGN_NAMESPACE(mp_invert_matrix) +#define mp_is_one SQISIGN_NAMESPACE(mp_is_one) +#define mp_is_zero SQISIGN_NAMESPACE(mp_is_zero) +#define mp_mod_2exp SQISIGN_NAMESPACE(mp_mod_2exp) +#define mp_mul SQISIGN_NAMESPACE(mp_mul) +#define mp_mul2 SQISIGN_NAMESPACE(mp_mul2) +#define mp_neg SQISIGN_NAMESPACE(mp_neg) +#define mp_print SQISIGN_NAMESPACE(mp_print) +#define mp_shiftl SQISIGN_NAMESPACE(mp_shiftl) +#define mp_shiftr SQISIGN_NAMESPACE(mp_shiftr) +#define mp_sub SQISIGN_NAMESPACE(mp_sub) +#define multiple_mp_shiftl SQISIGN_NAMESPACE(multiple_mp_shiftl) +#define select_ct SQISIGN_NAMESPACE(select_ct) +#define swap_ct SQISIGN_NAMESPACE(swap_ct) // Namespacing symbols exported from normeq.c: #undef quat_change_to_O0_basis @@ -882,12 +884,12 @@ #undef quat_represent_integer #undef quat_sampling_random_ideal_O0_given_norm -#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) -#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) -#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) -#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) -#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) -#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) +#define quat_change_to_O0_basis SQISIGN_NAMESPACE(quat_change_to_O0_basis) +#define quat_lattice_O0_set SQISIGN_NAMESPACE(quat_lattice_O0_set) +#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE(quat_lattice_O0_set_extremal) +#define quat_order_elem_create SQISIGN_NAMESPACE(quat_order_elem_create) +#define quat_represent_integer SQISIGN_NAMESPACE(quat_represent_integer) +#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE(quat_sampling_random_ideal_O0_given_norm) // Namespacing symbols exported from printer.c: #undef ibz_mat_2x2_print @@ -899,23 +901,23 @@ #undef quat_lattice_print #undef quat_left_ideal_print -#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) -#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) -#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) -#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) -#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) -#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) -#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) -#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) +#define ibz_mat_2x2_print SQISIGN_NAMESPACE(ibz_mat_2x2_print) +#define ibz_mat_4x4_print SQISIGN_NAMESPACE(ibz_mat_4x4_print) +#define ibz_vec_2_print SQISIGN_NAMESPACE(ibz_vec_2_print) +#define ibz_vec_4_print SQISIGN_NAMESPACE(ibz_vec_4_print) +#define quat_alg_elem_print SQISIGN_NAMESPACE(quat_alg_elem_print) +#define quat_alg_print SQISIGN_NAMESPACE(quat_alg_print) +#define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) +#define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation #undef quat_test_input_random_lattice_generation -#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) -#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) -#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) +#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_generation) +#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_lattice_generation) +#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_lattice_generation) // Namespacing symbols exported from rationals.c: #undef ibq_abs @@ -941,28 +943,28 @@ #undef ibq_vec_4_init #undef ibq_vec_4_print -#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) -#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) -#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) -#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) -#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) -#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) -#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) -#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) -#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) -#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) -#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) -#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) -#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) -#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) -#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) -#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) -#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) -#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) -#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) -#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) -#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) -#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) +#define ibq_abs SQISIGN_NAMESPACE(ibq_abs) +#define ibq_add SQISIGN_NAMESPACE(ibq_add) +#define ibq_cmp SQISIGN_NAMESPACE(ibq_cmp) +#define ibq_copy SQISIGN_NAMESPACE(ibq_copy) +#define ibq_finalize SQISIGN_NAMESPACE(ibq_finalize) +#define ibq_init SQISIGN_NAMESPACE(ibq_init) +#define ibq_inv SQISIGN_NAMESPACE(ibq_inv) +#define ibq_is_ibz SQISIGN_NAMESPACE(ibq_is_ibz) +#define ibq_is_one SQISIGN_NAMESPACE(ibq_is_one) +#define ibq_is_zero SQISIGN_NAMESPACE(ibq_is_zero) +#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE(ibq_mat_4x4_finalize) +#define ibq_mat_4x4_init SQISIGN_NAMESPACE(ibq_mat_4x4_init) +#define ibq_mat_4x4_print SQISIGN_NAMESPACE(ibq_mat_4x4_print) +#define ibq_mul SQISIGN_NAMESPACE(ibq_mul) +#define ibq_neg SQISIGN_NAMESPACE(ibq_neg) +#define ibq_reduce SQISIGN_NAMESPACE(ibq_reduce) +#define ibq_set SQISIGN_NAMESPACE(ibq_set) +#define ibq_sub SQISIGN_NAMESPACE(ibq_sub) +#define ibq_to_ibz SQISIGN_NAMESPACE(ibq_to_ibz) +#define ibq_vec_4_finalize SQISIGN_NAMESPACE(ibq_vec_4_finalize) +#define ibq_vec_4_init SQISIGN_NAMESPACE(ibq_vec_4_init) +#define ibq_vec_4_print SQISIGN_NAMESPACE(ibq_vec_4_print) // Namespacing symbols exported from sign.c: #undef protocols_sign diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.c deleted file mode 100644 index 242ea08fe2..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/tools.c +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include - -static clock_t global_timer; - -clock_t -tic(void) -{ - global_timer = clock(); - return global_timer; -} - -float -tac(void) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); - return ms; -} - -float -TAC(const char *str) -{ - float ms = (1000. * (float)(clock() - global_timer) / CLOCKS_PER_SEC); -#ifndef NDEBUG - printf("%s [%d ms]\n", str, (int)ms); -#endif - return ms; -} - -float -toc(const clock_t t) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - return ms; -} - -float -TOC(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(clock() - t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,clock()-t); - // return (float) (clock()-t); -} - -float -TOC_clock(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, clock() - t); - return (float)(clock() - t); -} - -clock_t -dclock(const clock_t t) -{ - return (clock() - t); -} - -float -clock_to_time(const clock_t t, const char *str) -{ - float ms = (1000. * (float)(t) / CLOCKS_PER_SEC); - printf("%s [%d ms]\n", str, (int)ms); - return ms; - // printf("%s [%ld]\n",str,t); - // return (float) (t); -} - -float -clock_print(const clock_t t, const char *str) -{ - printf("%s [%ld]\n", str, t); - return (float)(t); -} diff --git a/tests/test_sig.c b/tests/test_sig.c index 107f26e933..9338a03e1f 100644 --- a/tests/test_sig.c +++ b/tests/test_sig.c @@ -322,7 +322,7 @@ int main(int argc, char **argv) { #if OQS_USE_PTHREADS #define MAX_LEN_SIG_NAME_ 64 // don't run algorithms with large stack usage in threads - char no_thread_sig_patterns[][MAX_LEN_SIG_NAME_] = {"MAYO-5", "cross-rsdp-128-small", "cross-rsdp-192-small", "cross-rsdp-256-balanced", "cross-rsdp-256-small", "cross-rsdpg-192-small", "cross-rsdpg-256-small", "SNOVA_37_17_2", "SNOVA_56_25_2", "SNOVA_49_11_3", "SNOVA_37_8_4", "SNOVA_24_5_5", "SNOVA_60_10_4", "SNOVA_29_6_5"}; + char no_thread_sig_patterns[][MAX_LEN_SIG_NAME_] = {"MAYO-5", "cross-rsdp-128-small", "cross-rsdp-192-small", "cross-rsdp-256-balanced", "cross-rsdp-256-small", "cross-rsdpg-192-small", "cross-rsdpg-256-small", "SNOVA_37_17_2", "SNOVA_56_25_2", "SNOVA_49_11_3", "SNOVA_37_8_4", "SNOVA_24_5_5", "SNOVA_60_10_4", "SNOVA_29_6_5", "SQIsign-lvl1", "SQIsign-lvl3", "SQIsign-lvl5"}; int test_in_thread = 1; for (size_t i = 0 ; i < sizeof(no_thread_sig_patterns) / MAX_LEN_SIG_NAME_; ++i) { if (strstr(alg_name, no_thread_sig_patterns[i]) != NULL) { From 307012ac3cfbfff4946b9cb695078501f9b5a3cf Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 24 Jun 2025 14:09:40 -0400 Subject: [PATCH 03/19] Get full-tests running locally on arm MacOS. Signed-off-by: Shane --- README.md | 1 + docs/algorithms/sig/sqisign.md | 52 ++++++ docs/algorithms/sig/sqisign.yml | 132 ++++++++++++++ docs/cbom.json | 161 +++++++++++++++++- .../copy_from_upstream/copy_from_upstream.yml | 4 +- src/sig/sqisign/CMakeLists.txt | 12 +- .../the-sqisign_sqisign_lvl1_broadwell/mem.c | 23 --- .../the-sqisign_sqisign_lvl1_broadwell/mem.h | 24 --- .../the-sqisign_sqisign_lvl1_ref/mem.c | 23 --- .../the-sqisign_sqisign_lvl1_ref/mem.h | 24 --- .../the-sqisign_sqisign_lvl3_broadwell/mem.c | 23 --- .../the-sqisign_sqisign_lvl3_ref/mem.c | 23 --- .../the-sqisign_sqisign_lvl3_ref/mem.h | 24 --- .../the-sqisign_sqisign_lvl5_broadwell/mem.c | 23 --- .../the-sqisign_sqisign_lvl5_broadwell/mem.h | 24 --- .../the-sqisign_sqisign_lvl5_ref/mem.c | 23 --- .../the-sqisign_sqisign_lvl5_ref/mem.h | 24 --- 17 files changed, 347 insertions(+), 273 deletions(-) create mode 100644 docs/algorithms/sig/sqisign.md create mode 100644 docs/algorithms/sig/sqisign.yml delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.h diff --git a/README.md b/README.md index a94b563d57..6768780d08 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,7 @@ All names other than `ML-KEM` and `ML-DSA` are subject to change. `liboqs` makes - **SNOVA**: SNOVA\_24\_5\_4, SNOVA\_24\_5\_4\_SHAKE, SNOVA\_24\_5\_4\_esk, SNOVA\_24\_5\_4\_SHAKE\_esk, SNOVA\_37\_17\_2†, SNOVA\_25\_8\_3, SNOVA\_56\_25\_2†, SNOVA\_49\_11\_3†, SNOVA\_37\_8\_4†, SNOVA\_24\_5\_5†, SNOVA\_60\_10\_4†, SNOVA\_29\_6\_5† - **SPHINCS+-SHA2**: SPHINCS+-SHA2-128f-simple, SPHINCS+-SHA2-128s-simple, SPHINCS+-SHA2-192f-simple, SPHINCS+-SHA2-192s-simple, SPHINCS+-SHA2-256f-simple, SPHINCS+-SHA2-256s-simple - **SPHINCS+-SHAKE**: SPHINCS+-SHAKE-128f-simple, SPHINCS+-SHAKE-128s-simple, SPHINCS+-SHAKE-192f-simple, SPHINCS+-SHAKE-192s-simple, SPHINCS+-SHAKE-256f-simple, SPHINCS+-SHAKE-256s-simple +- **SQIsign**: SQIsign-lvl1, SQIsign-lvl3, SQIsign-lvl5 - **UOV**: OV-Is, OV-Ip, OV-III, OV-V, OV-Is-pkc, OV-Ip-pkc, OV-III-pkc, OV-V-pkc, OV-Is-pkc-skc, OV-Ip-pkc-skc, OV-III-pkc-skc, OV-V-pkc-skc - **XMSS**: XMSS-SHA2_10_256, XMSS-SHA2_16_256, XMSS-SHA2_20_256, XMSS-SHAKE_10_256, XMSS-SHAKE_16_256, XMSS-SHAKE_20_256, XMSS-SHA2_10_512, XMSS-SHA2_16_512, XMSS-SHA2_20_512, XMSS-SHAKE_10_512, XMSS-SHAKE_16_512, XMSS-SHAKE_20_512, XMSS-SHA2_10_192, XMSS-SHA2_16_192, XMSS-SHA2_20_192, XMSS-SHAKE256_10_192, XMSS-SHAKE256_16_192, XMSS-SHAKE256_20_192, SHAKE256_10_256, SHAKE256_16_256, SHAKE256_20_256, XMSSMT-SHA2_20/2_256, XMSSMT-SHA2_20/4_256, XMSSMT-SHA2_40/2_256, XMSSMT-SHA2_40/4_256, XMSSMT-SHA2_40/8_256, XMSSMT-SHA2_60/3_256, XMSSMT-SHA2_60/6_256, XMSSMT-SHA2_60/12_256, XMSSMT-SHAKE_20/2_256, XMSSMT-SHAKE_20/4_256, XMSSMT-SHAKE_40/2_256, XMSSMT-SHAKE_40/4_256, XMSSMT-SHAKE_40/8_256, XMSSMT-SHAKE_60/3_256, XMSSMT-SHAKE_60/6_256, XMSSMT-SHAKE_60/12_256 diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md new file mode 100644 index 0000000000..cefc85849a --- /dev/null +++ b/docs/algorithms/sig/sqisign.md @@ -0,0 +1,52 @@ +# SQIsign + +- **Algorithm type**: Digital signature scheme. +- **Main cryptographic assumption**: Computing the endomorphism ring of a supersingular elliptic curve.. +- **Principal submitters**: Marius A. Aardal, Gora Adj, Diego F.Aranha, Andrea Basso, Isaac Andrés Canales Martínez, Jorge Chávez-Saab, Maria Corte-Real Santos, Pierrick Dartois, Luca De Feo, Max Duparc, Jonathan Komada Eriksen, Tako Boris Fouotsa, Décio Luiz Gazzoni Filho, Basil Hess, David Kohel, Antonin Leroux, Patrick Longa, Luciano Maino, Michael Meyer, Kohei Nakagawa, Hiroshi Onuki, Lorenz Panny, Sikhar Patranabis, Christophe Petit, Giacomo Pope, Krijn Reijnders, Damien Robert, Francisco Rodríguez Henríquez, Sina Schaeffler, Benjamin Wesolowski. +- **Authors' website**: https://sqisign.org/ +- **Specification version**: Round 2. +- **Primary Source**: + - **Source**: https://github.com/shane-digi/the-sqisign/commit/a8c8a3d5acbeaecec179d9088c8eab7216db4db5 with copy_from_upstream patches + - **Implementation license (SPDX-Identifier)**: Apache-2.0 + + +## Parameter set summary + +| Parameter set | Parameter set alias | Security model | Claimed NIST Level | Public key size (bytes) | Secret key size (bytes) | Signature size (bytes) | +|:---------------:|:----------------------|:-----------------|---------------------:|--------------------------:|--------------------------:|-------------------------:| +| SQIsign-lvl1 | NA | EUF-CMA | 1 | 65 | 353 | 148 | +| SQIsign-lvl3 | NA | EUF-CMA | 3 | 97 | 529 | 224 | +| SQIsign-lvl5 | NA | EUF-CMA | 5 | 129 | 701 | 292 | + +## SQIsign-lvl1 implementation characteristics + +| Implementation source | Identifier in upstream | Supported architecture(s) | Supported operating system(s) | CPU extension(s) used | No branching-on-secrets claimed? | No branching-on-secrets checked by valgrind? | Large stack usage?‡ | +|:---------------------------------:|:-------------------------|:----------------------------|:--------------------------------|:------------------------|:-----------------------------------|:-----------------------------------------------|:----------------------| +| [Primary Source](#primary-source) | ref | All | All | None | True | True | False | +| [Primary Source](#primary-source) | broadwell | x86\_64 | Darwin,Linux | AVX2 | True | True | False | + +Are implementations chosen based on runtime CPU feature detection? **Yes**. + + ‡For an explanation of what this denotes, consult the [Explanation of Terms](#explanation-of-terms) section at the end of this file. + +## SQIsign-lvl3 implementation characteristics + +| Implementation source | Identifier in upstream | Supported architecture(s) | Supported operating system(s) | CPU extension(s) used | No branching-on-secrets claimed? | No branching-on-secrets checked by valgrind? | Large stack usage? | +|:---------------------------------:|:-------------------------|:----------------------------|:--------------------------------|:------------------------|:-----------------------------------|:-----------------------------------------------|:---------------------| +| [Primary Source](#primary-source) | ref | All | All | None | True | True | False | +| [Primary Source](#primary-source) | broadwell | x86\_64 | Darwin,Linux | AVX2 | True | True | False | + +Are implementations chosen based on runtime CPU feature detection? **Yes**. + +## SQIsign-lvl5 implementation characteristics + +| Implementation source | Identifier in upstream | Supported architecture(s) | Supported operating system(s) | CPU extension(s) used | No branching-on-secrets claimed? | No branching-on-secrets checked by valgrind? | Large stack usage? | +|:---------------------------------:|:-------------------------|:----------------------------|:--------------------------------|:------------------------|:-----------------------------------|:-----------------------------------------------|:---------------------| +| [Primary Source](#primary-source) | ref | All | All | None | True | True | False | +| [Primary Source](#primary-source) | broadwell | x86\_64 | Darwin,Linux | AVX2 | True | True | False | + +Are implementations chosen based on runtime CPU feature detection? **Yes**. + +## Explanation of Terms + +- **Large Stack Usage**: Implementations identified as having such may cause failures when running in threads or in constrained environments. \ No newline at end of file diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml new file mode 100644 index 0000000000..c1cc6ea380 --- /dev/null +++ b/docs/algorithms/sig/sqisign.yml @@ -0,0 +1,132 @@ +name: SQIsign +type: signature +principal-submitters: +- Marius A. Aardal +- Gora Adj +- Diego F.Aranha +- Andrea Basso +- Isaac Andrés Canales Martínez +- Jorge Chávez-Saab +- Maria Corte-Real Santos +- Pierrick Dartois +- Luca De Feo +- Max Duparc +- Jonathan Komada Eriksen +- Tako Boris Fouotsa +- Décio Luiz Gazzoni Filho +- Basil Hess +- David Kohel +- Antonin Leroux +- Patrick Longa +- Luciano Maino +- Michael Meyer +- Kohei Nakagawa +- Hiroshi Onuki +- Lorenz Panny +- Sikhar Patranabis +- Christophe Petit +- Giacomo Pope +- Krijn Reijnders +- Damien Robert +- Francisco Rodríguez Henríquez +- Sina Schaeffler +- Benjamin Wesolowski +crypto-assumption: Computing the endomorphism ring of a supersingular elliptic curve. +website: https://sqisign.org/ +nist-round: 2 +spec-version: Round 2 +primary-upstream: + source: https://github.com/shane-digi/the-sqisign/commit/a8c8a3d5acbeaecec179d9088c8eab7216db4db5 + with copy_from_upstream patches + spdx-license-identifier: Apache-2.0 +parameter-sets: +- name: SQIsign-lvl1 + oqs_alg: OQS_SIG_alg_sqisign_lvl1 + claimed-nist-level: 1 + claimed-security: EUF-CMA + length-public-key: 65 + length-secret-key: 353 + length-signature: 148 + implementations-switch-on-runtime-cpu-features: true + implementations: + - upstream: primary-upstream + upstream-id: ref + supported-platforms: all + no-secret-dependent-branching-claimed: true + no-secret-dependent-branching-checked-by-valgrind: true + large-stack-usage: false + - upstream: primary-upstream + upstream-id: broadwell + supported-platforms: + - architecture: x86_64 + operating_systems: + - Darwin + - Linux + required_flags: + - avx2 + common-crypto: + - SHA3: liboqs + - AES: liboqs + no-secret-dependent-branching-claimed: true + no-secret-dependent-branching-checked-by-valgrind: true + large-stack-usage: false +- name: SQIsign-lvl3 + oqs_alg: OQS_SIG_alg_sqisign_lvl3 + claimed-nist-level: 3 + claimed-security: EUF-CMA + length-public-key: 97 + length-secret-key: 529 + length-signature: 224 + implementations-switch-on-runtime-cpu-features: true + implementations: + - upstream: primary-upstream + upstream-id: ref + supported-platforms: all + no-secret-dependent-branching-claimed: true + no-secret-dependent-branching-checked-by-valgrind: true + large-stack-usage: false + - upstream: primary-upstream + upstream-id: broadwell + supported-platforms: + - architecture: x86_64 + operating_systems: + - Darwin + - Linux + required_flags: + - avx2 + common-crypto: + - SHA3: liboqs + - AES: liboqs + no-secret-dependent-branching-claimed: true + no-secret-dependent-branching-checked-by-valgrind: true + large-stack-usage: false +- name: SQIsign-lvl5 + oqs_alg: OQS_SIG_alg_sqisign_lvl5 + claimed-nist-level: 5 + claimed-security: EUF-CMA + length-public-key: 129 + length-secret-key: 701 + length-signature: 292 + implementations-switch-on-runtime-cpu-features: true + implementations: + - upstream: primary-upstream + upstream-id: ref + supported-platforms: all + no-secret-dependent-branching-claimed: true + no-secret-dependent-branching-checked-by-valgrind: true + large-stack-usage: false + - upstream: primary-upstream + upstream-id: broadwell + supported-platforms: + - architecture: x86_64 + operating_systems: + - Darwin + - Linux + required_flags: + - avx2 + common-crypto: + - SHA3: liboqs + - AES: liboqs + no-secret-dependent-branching-claimed: true + no-secret-dependent-branching-checked-by-valgrind: true + large-stack-usage: false diff --git a/docs/cbom.json b/docs/cbom.json index 49960e61bf..6e0226c5e7 100644 --- a/docs/cbom.json +++ b/docs/cbom.json @@ -2,23 +2,23 @@ "$schema": "https://raw.githubusercontent.com/CycloneDX/specification/1.6/schema/bom-1.6.schema.json", "bomFormat": "CycloneDX", "specVersion": "1.6", - "serialNumber": "urn:uuid:c419036f-6fb1-4cf6-a6b4-a473e3c40e78", + "serialNumber": "urn:uuid:987f564e-99f0-4235-9897-8d8642143c75", "version": 1, "metadata": { - "timestamp": "2025-03-30T14:09:19.533630+00:00", + "timestamp": "2025-06-24T18:01:41.103680+00:00", "component": { "type": "library", - "bom-ref": "pkg:github/open-quantum-safe/liboqs@630b96f49ccba9557add3e8826fb072c00d18407", + "bom-ref": "pkg:github/open-quantum-safe/liboqs@1a4912bbbd71ef1ca1707f22aa987fd5ab4a38bf", "name": "liboqs", - "version": "630b96f49ccba9557add3e8826fb072c00d18407" + "version": "1a4912bbbd71ef1ca1707f22aa987fd5ab4a38bf" } }, "components": [ { "type": "library", - "bom-ref": "pkg:github/open-quantum-safe/liboqs@630b96f49ccba9557add3e8826fb072c00d18407", + "bom-ref": "pkg:github/open-quantum-safe/liboqs@1a4912bbbd71ef1ca1707f22aa987fd5ab4a38bf", "name": "liboqs", - "version": "630b96f49ccba9557add3e8826fb072c00d18407" + "version": "1a4912bbbd71ef1ca1707f22aa987fd5ab4a38bf" }, { "type": "cryptographic-asset", @@ -3960,6 +3960,126 @@ } } }, + { + "type": "cryptographic-asset", + "bom-ref": "alg:SQIsign-lvl1:generic", + "name": "SQIsign", + "cryptoProperties": { + "assetType": "algorithm", + "algorithmProperties": { + "parameterSetIdentifier": "SQIsign-lvl1", + "primitive": "signature", + "executionEnvironment": "software-plain-ram", + "cryptoFunctions": [ + "keygen", + "sign", + "verify" + ], + "nistQuantumSecurityLevel": 1, + "implementationPlatform": "generic" + } + } + }, + { + "type": "cryptographic-asset", + "bom-ref": "alg:SQIsign-lvl1:x86_64", + "name": "SQIsign", + "cryptoProperties": { + "assetType": "algorithm", + "algorithmProperties": { + "parameterSetIdentifier": "SQIsign-lvl1", + "primitive": "signature", + "executionEnvironment": "software-plain-ram", + "cryptoFunctions": [ + "keygen", + "sign", + "verify" + ], + "nistQuantumSecurityLevel": 1, + "implementationPlatform": "x86_64" + } + } + }, + { + "type": "cryptographic-asset", + "bom-ref": "alg:SQIsign-lvl3:generic", + "name": "SQIsign", + "cryptoProperties": { + "assetType": "algorithm", + "algorithmProperties": { + "parameterSetIdentifier": "SQIsign-lvl3", + "primitive": "signature", + "executionEnvironment": "software-plain-ram", + "cryptoFunctions": [ + "keygen", + "sign", + "verify" + ], + "nistQuantumSecurityLevel": 3, + "implementationPlatform": "generic" + } + } + }, + { + "type": "cryptographic-asset", + "bom-ref": "alg:SQIsign-lvl3:x86_64", + "name": "SQIsign", + "cryptoProperties": { + "assetType": "algorithm", + "algorithmProperties": { + "parameterSetIdentifier": "SQIsign-lvl3", + "primitive": "signature", + "executionEnvironment": "software-plain-ram", + "cryptoFunctions": [ + "keygen", + "sign", + "verify" + ], + "nistQuantumSecurityLevel": 3, + "implementationPlatform": "x86_64" + } + } + }, + { + "type": "cryptographic-asset", + "bom-ref": "alg:SQIsign-lvl5:generic", + "name": "SQIsign", + "cryptoProperties": { + "assetType": "algorithm", + "algorithmProperties": { + "parameterSetIdentifier": "SQIsign-lvl5", + "primitive": "signature", + "executionEnvironment": "software-plain-ram", + "cryptoFunctions": [ + "keygen", + "sign", + "verify" + ], + "nistQuantumSecurityLevel": 5, + "implementationPlatform": "generic" + } + } + }, + { + "type": "cryptographic-asset", + "bom-ref": "alg:SQIsign-lvl5:x86_64", + "name": "SQIsign", + "cryptoProperties": { + "assetType": "algorithm", + "algorithmProperties": { + "parameterSetIdentifier": "SQIsign-lvl5", + "primitive": "signature", + "executionEnvironment": "software-plain-ram", + "cryptoFunctions": [ + "keygen", + "sign", + "verify" + ], + "nistQuantumSecurityLevel": 5, + "implementationPlatform": "x86_64" + } + } + }, { "type": "cryptographic-asset", "bom-ref": "alg:OV-Is:generic", @@ -4707,7 +4827,7 @@ ], "dependencies": [ { - "ref": "pkg:github/open-quantum-safe/liboqs@630b96f49ccba9557add3e8826fb072c00d18407", + "ref": "pkg:github/open-quantum-safe/liboqs@1a4912bbbd71ef1ca1707f22aa987fd5ab4a38bf", "provides": [ "alg:BIKE-L1:x86_64", "alg:BIKE-L3:x86_64", @@ -4906,6 +5026,12 @@ "alg:SPHINCS+-SHAKE-256f-simple:x86_64", "alg:SPHINCS+-SHAKE-256s-simple:generic", "alg:SPHINCS+-SHAKE-256s-simple:x86_64", + "alg:SQIsign-lvl1:generic", + "alg:SQIsign-lvl1:x86_64", + "alg:SQIsign-lvl3:generic", + "alg:SQIsign-lvl3:x86_64", + "alg:SQIsign-lvl5:generic", + "alg:SQIsign-lvl5:x86_64", "alg:OV-Is:generic", "alg:OV-Is:armv8-a", "alg:OV-Is:x86_64", @@ -6108,6 +6234,27 @@ "alg:sha3" ] }, + { + "ref": "alg:SQIsign-lvl1:x86_64", + "dependsOn": [ + "alg:sha3", + "alg:aes" + ] + }, + { + "ref": "alg:SQIsign-lvl3:x86_64", + "dependsOn": [ + "alg:sha3", + "alg:aes" + ] + }, + { + "ref": "alg:SQIsign-lvl5:x86_64", + "dependsOn": [ + "alg:sha3", + "alg:aes" + ] + }, { "ref": "alg:OV-Is:generic", "dependsOn": [ diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index a1aba095e8..6f99a472bb 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -95,8 +95,8 @@ upstreams: - name: the-sqisign git_url: https://github.com/shane-digi/the-sqisign.git - git_branch: dev_oqs - git_commit: 080f0b5b7fca0a19a6a2cfb3c1eae3c0ddccc4c3 + git_branch: oqs + git_commit: a8c8a3d5acbeaecec179d9088c8eab7216db4db5 sig_scheme_path: '.' sig_meta_path: 'META/{pqclean_scheme}.yml' patches: [sqisign_fp.patch, sqisign_namespace.patch] diff --git a/src/sig/sqisign/CMakeLists.txt b/src/sig/sqisign/CMakeLists.txt index 6d33b75fe1..0b344b054c 100644 --- a/src/sig/sqisign/CMakeLists.txt +++ b/src/sig/sqisign/CMakeLists.txt @@ -6,7 +6,7 @@ set(_SQISIGN_OBJS "") if(OQS_ENABLE_SIG_sqisign_lvl1) - add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/aes_c.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fips202.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mem.c the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl1_ref/mini-gmp.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) + add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/aes_c.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fips202.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl1_ref/mini-gmp.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) target_include_directories(sqisign_lvl1_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_ref) target_include_directories(sqisign_lvl1_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) @@ -15,7 +15,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl1) endif() if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) - add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/aes_ni.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/fips202.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/mem.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) + add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/aes_ni.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/fips202.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_broadwell) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl1_broadwell PRIVATE -mavx2) @@ -24,7 +24,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) endif() if(OQS_ENABLE_SIG_sqisign_lvl3) - add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/aes_c.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fips202.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mem.c the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl3_ref/mini-gmp.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) + add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/aes_c.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fips202.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl3_ref/mini-gmp.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) target_include_directories(sqisign_lvl3_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_ref) target_include_directories(sqisign_lvl3_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) @@ -33,7 +33,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl3) endif() if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) - add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/aes_ni.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/fips202.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/mem.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) + add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/aes_ni.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/fips202.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_broadwell) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl3_broadwell PRIVATE -mavx2) @@ -42,7 +42,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) endif() if(OQS_ENABLE_SIG_sqisign_lvl5) - add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/aes_c.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fips202.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mem.c the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl5_ref/mini-gmp.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) + add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/aes_c.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fips202.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl5_ref/mini-gmp.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) target_include_directories(sqisign_lvl5_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_ref) target_include_directories(sqisign_lvl5_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) @@ -51,7 +51,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl5) endif() if(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) - add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/aes_ni.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/fips202.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/mem.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) + add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/aes_ni.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/fips202.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_broadwell) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl5_broadwell PRIVATE -mavx2) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.c deleted file mode 100644 index 4956beda50..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#include -#include -#include - -void -sqisign_secure_free(void *mem, size_t size) -{ - if (mem) { - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); - free(mem); - } -} -void -sqisign_secure_clear(void *mem, size_t size) -{ - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.h deleted file mode 100644 index ab8f6c6481..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mem.h +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef MEM_H -#define MEM_H -#include -#include - -/** - * Clears and frees allocated memory. - * - * @param[out] mem Memory to be cleared and freed. - * @param size Size of memory to be cleared and freed. - */ -void sqisign_secure_free(void *mem, size_t size); - -/** - * Clears memory. - * - * @param[out] mem Memory to be cleared. - * @param size Size of memory to be cleared. - */ -void sqisign_secure_clear(void *mem, size_t size); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.c deleted file mode 100644 index 4956beda50..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#include -#include -#include - -void -sqisign_secure_free(void *mem, size_t size) -{ - if (mem) { - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); - free(mem); - } -} -void -sqisign_secure_clear(void *mem, size_t size) -{ - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.h deleted file mode 100644 index ab8f6c6481..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mem.h +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef MEM_H -#define MEM_H -#include -#include - -/** - * Clears and frees allocated memory. - * - * @param[out] mem Memory to be cleared and freed. - * @param size Size of memory to be cleared and freed. - */ -void sqisign_secure_free(void *mem, size_t size); - -/** - * Clears memory. - * - * @param[out] mem Memory to be cleared. - * @param size Size of memory to be cleared. - */ -void sqisign_secure_clear(void *mem, size_t size); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.c deleted file mode 100644 index 4956beda50..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#include -#include -#include - -void -sqisign_secure_free(void *mem, size_t size) -{ - if (mem) { - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); - free(mem); - } -} -void -sqisign_secure_clear(void *mem, size_t size) -{ - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.c deleted file mode 100644 index 4956beda50..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#include -#include -#include - -void -sqisign_secure_free(void *mem, size_t size) -{ - if (mem) { - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); - free(mem); - } -} -void -sqisign_secure_clear(void *mem, size_t size) -{ - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.h deleted file mode 100644 index ab8f6c6481..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mem.h +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef MEM_H -#define MEM_H -#include -#include - -/** - * Clears and frees allocated memory. - * - * @param[out] mem Memory to be cleared and freed. - * @param size Size of memory to be cleared and freed. - */ -void sqisign_secure_free(void *mem, size_t size); - -/** - * Clears memory. - * - * @param[out] mem Memory to be cleared. - * @param size Size of memory to be cleared. - */ -void sqisign_secure_clear(void *mem, size_t size); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.c deleted file mode 100644 index 4956beda50..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#include -#include -#include - -void -sqisign_secure_free(void *mem, size_t size) -{ - if (mem) { - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); - free(mem); - } -} -void -sqisign_secure_clear(void *mem, size_t size) -{ - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.h deleted file mode 100644 index ab8f6c6481..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mem.h +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef MEM_H -#define MEM_H -#include -#include - -/** - * Clears and frees allocated memory. - * - * @param[out] mem Memory to be cleared and freed. - * @param size Size of memory to be cleared and freed. - */ -void sqisign_secure_free(void *mem, size_t size); - -/** - * Clears memory. - * - * @param[out] mem Memory to be cleared. - * @param size Size of memory to be cleared. - */ -void sqisign_secure_clear(void *mem, size_t size); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.c deleted file mode 100644 index 4956beda50..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#include -#include -#include - -void -sqisign_secure_free(void *mem, size_t size) -{ - if (mem) { - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); - free(mem); - } -} -void -sqisign_secure_clear(void *mem, size_t size) -{ - typedef void *(*memset_t)(void *, int, size_t); - static volatile memset_t memset_func = memset; - memset_func(mem, 0, size); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.h deleted file mode 100644 index ab8f6c6481..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mem.h +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef MEM_H -#define MEM_H -#include -#include - -/** - * Clears and frees allocated memory. - * - * @param[out] mem Memory to be cleared and freed. - * @param size Size of memory to be cleared and freed. - */ -void sqisign_secure_free(void *mem, size_t size); - -/** - * Clears memory. - * - * @param[out] mem Memory to be cleared. - * @param size Size of memory to be cleared. - */ -void sqisign_secure_clear(void *mem, size_t size); - -#endif From 28d23716acbb341436ebe4190216caa2583a5762 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 25 Jun 2025 14:13:32 -0400 Subject: [PATCH 04/19] Use the OQS version of SHAKE and remove internal AES usage. Signed-off-by: Shane --- docs/algorithms/sig/sqisign.md | 2 +- docs/algorithms/sig/sqisign.yml | 2 +- .../copy_from_upstream/copy_from_upstream.yml | 2 +- src/sig/sqisign/CMakeLists.txt | 12 +- .../the-sqisign_sqisign_lvl1_broadwell/aes.h | 29 - .../aes_ni.c | 258 ------ .../aes_ni.h | 85 -- .../algebra.c | 280 ++++++ .../ctr_drbg.c | 201 ---- .../ctr_drbg.h | 78 -- .../the-sqisign_sqisign_lvl1_broadwell/dim2.c | 132 +++ .../the-sqisign_sqisign_lvl1_broadwell/dim4.c | 470 ++++++++++ .../the-sqisign_sqisign_lvl1_broadwell/dpe.h | 743 +++++++++++++++ .../finit.c | 122 +++ .../fips202.c | 876 ------------------ .../fips202.h | 169 +--- .../the-sqisign_sqisign_lvl1_broadwell/hnf.c | 210 +++++ .../hnf_internal.c | 182 ++++ .../hnf_internal.h | 94 ++ .../ibz_division.c | 12 + .../ideal.c | 323 +++++++ .../intbig.c | 791 ++++++++++++++++ .../intbig_internal.h | 123 +++ .../integers.c | 116 +++ .../internal.h | 812 ++++++++++++++++ .../the-sqisign_sqisign_lvl1_broadwell/l2.c | 190 ++++ .../lat_ball.c | 139 +++ .../lattice.c | 328 +++++++ .../lll_applications.c | 127 +++ .../lll_internals.h | 238 +++++ .../the-sqisign_sqisign_lvl1_broadwell/mp.c | 357 +++++++ .../normeq.c | 369 ++++++++ .../randombytes_arm64crypto.h | 27 - .../rationals.c | 233 +++++ .../vaes256_key_expansion.S | 122 --- .../the-sqisign_sqisign_lvl1_ref/aes.h | 29 - .../the-sqisign_sqisign_lvl1_ref/aes_c.c | 783 ---------------- .../the-sqisign_sqisign_lvl1_ref/fips202.c | 876 ------------------ .../the-sqisign_sqisign_lvl1_ref/fips202.h | 169 +--- .../randombytes_ctrdrbg.c | 161 ---- .../the-sqisign_sqisign_lvl3_broadwell/aes.h | 29 - .../aes_ni.c | 258 ------ .../aes_ni.h | 85 -- .../algebra.c | 280 ++++++ .../ctr_drbg.c | 201 ---- .../ctr_drbg.h | 78 -- .../the-sqisign_sqisign_lvl3_broadwell/dim2.c | 132 +++ .../the-sqisign_sqisign_lvl3_broadwell/dim4.c | 470 ++++++++++ .../the-sqisign_sqisign_lvl3_broadwell/dpe.h | 743 +++++++++++++++ .../finit.c | 122 +++ .../fips202.c | 876 ------------------ .../fips202.h | 169 +--- .../the-sqisign_sqisign_lvl3_broadwell/hnf.c | 210 +++++ .../hnf_internal.c | 182 ++++ .../hnf_internal.h | 94 ++ .../ibz_division.c | 12 + .../ideal.c | 323 +++++++ .../intbig.c | 791 ++++++++++++++++ .../intbig_internal.h | 123 +++ .../integers.c | 116 +++ .../internal.h | 812 ++++++++++++++++ .../the-sqisign_sqisign_lvl3_broadwell/l2.c | 190 ++++ .../lat_ball.c | 139 +++ .../lattice.c | 328 +++++++ .../lll_applications.c | 127 +++ .../lll_internals.h | 238 +++++ .../the-sqisign_sqisign_lvl3_broadwell/mp.c | 357 +++++++ .../normeq.c | 369 ++++++++ .../randombytes_arm64crypto.h | 27 - .../randombytes_ctrdrbg_aesni.c | 87 -- .../rationals.c | 233 +++++ .../vaes256_key_expansion.S | 122 --- .../the-sqisign_sqisign_lvl3_ref/aes.h | 29 - .../the-sqisign_sqisign_lvl3_ref/aes_c.c | 783 ---------------- .../the-sqisign_sqisign_lvl3_ref/fips202.c | 876 ------------------ .../the-sqisign_sqisign_lvl3_ref/fips202.h | 169 +--- .../the-sqisign_sqisign_lvl5_broadwell/aes.h | 29 - .../aes_ni.c | 258 ------ .../aes_ni.h | 85 -- .../algebra.c | 280 ++++++ .../ctr_drbg.c | 201 ---- .../ctr_drbg.h | 78 -- .../the-sqisign_sqisign_lvl5_broadwell/dim2.c | 132 +++ .../the-sqisign_sqisign_lvl5_broadwell/dim4.c | 470 ++++++++++ .../the-sqisign_sqisign_lvl5_broadwell/dpe.h | 743 +++++++++++++++ .../finit.c | 122 +++ .../fips202.c | 876 ------------------ .../fips202.h | 169 +--- .../the-sqisign_sqisign_lvl5_broadwell/hnf.c | 210 +++++ .../hnf_internal.c | 182 ++++ .../hnf_internal.h | 94 ++ .../ibz_division.c | 12 + .../ideal.c | 323 +++++++ .../intbig.c | 791 ++++++++++++++++ .../intbig_internal.h | 123 +++ .../integers.c | 116 +++ .../internal.h | 812 ++++++++++++++++ .../the-sqisign_sqisign_lvl5_broadwell/l2.c | 190 ++++ .../lat_ball.c | 139 +++ .../lattice.c | 328 +++++++ .../lll_applications.c | 127 +++ .../lll_internals.h | 238 +++++ .../the-sqisign_sqisign_lvl5_broadwell/mp.c | 357 +++++++ .../normeq.c | 369 ++++++++ .../randombytes_arm64crypto.h | 27 - .../rationals.c | 233 +++++ .../vaes256_key_expansion.S | 122 --- .../the-sqisign_sqisign_lvl5_ref/aes.h | 29 - .../the-sqisign_sqisign_lvl5_ref/aes_c.c | 783 ---------------- .../the-sqisign_sqisign_lvl5_ref/fips202.c | 876 ------------------ .../the-sqisign_sqisign_lvl5_ref/fips202.h | 169 +--- tests/KATs/sig/kats.json | 6 +- tests/test_binary.py | 2 +- 113 files changed, 19222 insertions(+), 11331 deletions(-) delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim4.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dpe.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/finit.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ideal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/integers.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lat_ball.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lattice.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_applications.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_internals.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/normeq.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_arm64crypto.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rationals.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes_c.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim4.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dpe.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/finit.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ideal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/integers.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lat_ball.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lattice.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_applications.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_internals.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/normeq.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_arm64crypto.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rationals.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes_c.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim4.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dpe.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/finit.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ideal.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig_internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/integers.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/internal.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lat_ball.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lattice.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_applications.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_internals.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.c create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/normeq.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_arm64crypto.h create mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rationals.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes_c.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.c diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md index cefc85849a..efd0ec5b39 100644 --- a/docs/algorithms/sig/sqisign.md +++ b/docs/algorithms/sig/sqisign.md @@ -6,7 +6,7 @@ - **Authors' website**: https://sqisign.org/ - **Specification version**: Round 2. - **Primary Source**: - - **Source**: https://github.com/shane-digi/the-sqisign/commit/a8c8a3d5acbeaecec179d9088c8eab7216db4db5 with copy_from_upstream patches + - **Source**: https://github.com/shane-digi/the-sqisign/commit/09bce2f0244bd11caa90c4eaef2150a759bd945d with copy_from_upstream patches - **Implementation license (SPDX-Identifier)**: Apache-2.0 diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml index c1cc6ea380..4fb1092aef 100644 --- a/docs/algorithms/sig/sqisign.yml +++ b/docs/algorithms/sig/sqisign.yml @@ -36,7 +36,7 @@ website: https://sqisign.org/ nist-round: 2 spec-version: Round 2 primary-upstream: - source: https://github.com/shane-digi/the-sqisign/commit/a8c8a3d5acbeaecec179d9088c8eab7216db4db5 + source: https://github.com/shane-digi/the-sqisign/commit/09bce2f0244bd11caa90c4eaef2150a759bd945d with copy_from_upstream patches spdx-license-identifier: Apache-2.0 parameter-sets: diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index 6f99a472bb..4cfcfc616c 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -96,7 +96,7 @@ upstreams: name: the-sqisign git_url: https://github.com/shane-digi/the-sqisign.git git_branch: oqs - git_commit: a8c8a3d5acbeaecec179d9088c8eab7216db4db5 + git_commit: 09bce2f0244bd11caa90c4eaef2150a759bd945d sig_scheme_path: '.' sig_meta_path: 'META/{pqclean_scheme}.yml' patches: [sqisign_fp.patch, sqisign_namespace.patch] diff --git a/src/sig/sqisign/CMakeLists.txt b/src/sig/sqisign/CMakeLists.txt index 0b344b054c..c53de94f18 100644 --- a/src/sig/sqisign/CMakeLists.txt +++ b/src/sig/sqisign/CMakeLists.txt @@ -6,7 +6,7 @@ set(_SQISIGN_OBJS "") if(OQS_ENABLE_SIG_sqisign_lvl1) - add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/aes_c.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fips202.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl1_ref/mini-gmp.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) + add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl1_ref/mini-gmp.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) target_include_directories(sqisign_lvl1_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_ref) target_include_directories(sqisign_lvl1_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) @@ -15,7 +15,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl1) endif() if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) - add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/aes_ni.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/fips202.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) + add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/algebra.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/dim2.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/dim4.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/finit.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/hnf.c the-sqisign_sqisign_lvl1_broadwell/hnf_internal.c the-sqisign_sqisign_lvl1_broadwell/ibz_division.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/ideal.c the-sqisign_sqisign_lvl1_broadwell/intbig.c the-sqisign_sqisign_lvl1_broadwell/integers.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/l2.c the-sqisign_sqisign_lvl1_broadwell/lat_ball.c the-sqisign_sqisign_lvl1_broadwell/lattice.c the-sqisign_sqisign_lvl1_broadwell/lll_applications.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c the-sqisign_sqisign_lvl1_broadwell/mp.c the-sqisign_sqisign_lvl1_broadwell/normeq.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/rationals.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_broadwell) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl1_broadwell PRIVATE -mavx2) @@ -24,7 +24,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) endif() if(OQS_ENABLE_SIG_sqisign_lvl3) - add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/aes_c.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fips202.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl3_ref/mini-gmp.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) + add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl3_ref/mini-gmp.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) target_include_directories(sqisign_lvl3_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_ref) target_include_directories(sqisign_lvl3_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) @@ -33,7 +33,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl3) endif() if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) - add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/aes_ni.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/fips202.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) + add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/algebra.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/dim2.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/dim4.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/finit.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/hnf.c the-sqisign_sqisign_lvl3_broadwell/hnf_internal.c the-sqisign_sqisign_lvl3_broadwell/ibz_division.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/ideal.c the-sqisign_sqisign_lvl3_broadwell/intbig.c the-sqisign_sqisign_lvl3_broadwell/integers.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/l2.c the-sqisign_sqisign_lvl3_broadwell/lat_ball.c the-sqisign_sqisign_lvl3_broadwell/lattice.c the-sqisign_sqisign_lvl3_broadwell/lll_applications.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c the-sqisign_sqisign_lvl3_broadwell/mp.c the-sqisign_sqisign_lvl3_broadwell/normeq.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/rationals.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_broadwell) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl3_broadwell PRIVATE -mavx2) @@ -42,7 +42,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) endif() if(OQS_ENABLE_SIG_sqisign_lvl5) - add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/aes_c.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fips202.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl5_ref/mini-gmp.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) + add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl5_ref/mini-gmp.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) target_include_directories(sqisign_lvl5_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_ref) target_include_directories(sqisign_lvl5_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) @@ -51,7 +51,7 @@ if(OQS_ENABLE_SIG_sqisign_lvl5) endif() if(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) - add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/aes_ni.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/fips202.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) + add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/algebra.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/dim2.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/dim4.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/finit.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/hnf.c the-sqisign_sqisign_lvl5_broadwell/hnf_internal.c the-sqisign_sqisign_lvl5_broadwell/ibz_division.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/ideal.c the-sqisign_sqisign_lvl5_broadwell/intbig.c the-sqisign_sqisign_lvl5_broadwell/integers.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/l2.c the-sqisign_sqisign_lvl5_broadwell/lat_ball.c the-sqisign_sqisign_lvl5_broadwell/lattice.c the-sqisign_sqisign_lvl5_broadwell/lll_applications.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c the-sqisign_sqisign_lvl5_broadwell/mp.c the-sqisign_sqisign_lvl5_broadwell/normeq.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/rationals.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_broadwell) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) target_compile_options(sqisign_lvl5_broadwell PRIVATE -mavx2) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes.h deleted file mode 100644 index e35ec3705b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes.h +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef AES_H -#define AES_H - -#include -#include - -void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); -#define AES_ECB_encrypt AES_256_ECB - -#ifdef ENABLE_AESNI -int AES_128_CTR_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -int AES_128_CTR_4R_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#define AES_128_CTR AES_128_CTR_NI -#else -int AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#endif - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.c deleted file mode 100644 index dc778fc9b6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.c +++ /dev/null @@ -1,258 +0,0 @@ -/*************************************************************************** -* This implementation is a modified version of the code, -* written by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* -* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"). -* You may not use this file except in compliance with the License. -* A copy of the License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* or in the "license" file accompanying this file. This file is distributed -* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -* express or implied. See the License for the specific language governing -* permissions and limitations under the License. -* The license is detailed in the file LICENSE.txt, and applies to this file. -* ***************************************************************************/ - -#include "aes_ni.h" -#include - -#include -#include - -#define AESENC(m, key) _mm_aesenc_si128(m, key) -#define AESENCLAST(m, key) _mm_aesenclast_si128(m, key) -#define XOR(a, b) _mm_xor_si128(a, b) -#define ADD32(a, b) _mm_add_epi32(a, b) -#define SHUF8(a, mask) _mm_shuffle_epi8(a, mask) - -#define ZERO256 _mm256_zeroall - -#define BSWAP_MASK 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f - -#ifdef VAES256 -#define VAESENC(a, key) _mm256_aesenc_epi128(a, key) -#define VAESENCLAST(a, key) _mm256_aesenclast_epi128(a, key) -#define EXTRACT128(a, imm) _mm256_extracti128_si256(a, imm) -#define XOR256(a, b) _mm256_xor_si256(a,b) -#define ADD32_256(a, b) _mm256_add_epi32(a,b) -#define SHUF8_256(a, mask) _mm256_shuffle_epi8(a, mask) -#endif - -#ifdef VAES512 -#define VAESENC(a, key) _mm512_aesenc_epi128(a, key) -#define VAESENCLAST(a, key) _mm512_aesenclast_epi128(a, key) -#define EXTRACT128(a, imm) _mm512_extracti64x2_epi64(a, imm) -#define XOR512(a, b) _mm512_xor_si512(a,b) -#define ADD32_512(a, b) _mm512_add_epi32(a,b) -#define SHUF8_512(a, mask) _mm512_shuffle_epi8(a, mask) -#endif - -_INLINE_ __m128i load_m128i(IN const uint8_t *ctr) -{ - return _mm_set_epi8(ctr[0], ctr[1], ctr[2], ctr[3], - ctr[4], ctr[5], ctr[6], ctr[7], - ctr[8], ctr[9], ctr[10], ctr[11], - ctr[12], ctr[13], ctr[14], ctr[15]); -} - -_INLINE_ __m128i loadr_m128i(IN const uint8_t *ctr) -{ - return _mm_setr_epi8(ctr[0], ctr[1], ctr[2], ctr[3], - ctr[4], ctr[5], ctr[6], ctr[7], - ctr[8], ctr[9], ctr[10], ctr[11], - ctr[12], ctr[13], ctr[14], ctr[15]); -} - -void aes256_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const aes256_ks_t *ks) { - uint32_t i = 0; - __m128i block = loadr_m128i(pt); - - block = XOR(block, ks->keys[0]); - for (i = 1; i < AES256_ROUNDS; i++) { - block = AESENC(block, ks->keys[i]); - } - block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); - - _mm_storeu_si128((void*)ct, block); - - // Delete secrets from registers if any. - ZERO256(); -} - -void aes256_ctr_enc(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - __m128i ctr_block = load_m128i(ctr); - - const __m128i bswap_mask = _mm_set_epi32(BSWAP_MASK); - const __m128i one = _mm_set_epi32(0,0,0,1); - - __m128i block = SHUF8(ctr_block, bswap_mask); - - for (uint32_t bidx = 0; bidx < num_blocks; bidx++) - { - block = XOR(block, ks->keys[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) { - block = AESENC(block, ks->keys[i]); - } - block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); - - //We use memcpy to avoid align casting. - _mm_storeu_si128((void*)&ct[16*bidx], block); - - ctr_block = ADD32(ctr_block, one); - block = SHUF8(ctr_block, bswap_mask); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#ifdef VAES256 -_INLINE_ void load_ks(OUT __m256i ks256[AES256_ROUNDS + 1], - IN const aes256_ks_t *ks) -{ - for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) - { - ks256[i] = _mm256_broadcastsi128_si256(ks->keys[i]); - } -} - -// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that -// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 -// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 -// Here num_blocks is assumed to be less then 2^32. -// It is the caller responsiblity to ensure it. -void aes256_ctr_enc256(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - const uint64_t num_par_blocks = num_blocks/2; - const uint64_t blocks_rem = num_blocks - (2*(num_par_blocks)); - - __m256i ks256[AES256_ROUNDS + 1]; - load_ks(ks256, ks); - - __m128i single_block = load_m128i(ctr); - __m256i ctr_blocks = _mm256_broadcastsi128_si256(single_block); - - // Preparing the masks - const __m256i bswap_mask = _mm256_set_epi32(BSWAP_MASK, BSWAP_MASK); - const __m256i two = _mm256_set_epi32(0,0,0,2,0,0,0,2); - const __m256i init = _mm256_set_epi32(0,0,0,1,0,0,0,0); - - // Initialize two parallel counters - ctr_blocks = ADD32_256(ctr_blocks, init); - __m256i p = SHUF8_256(ctr_blocks, bswap_mask); - - for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) - { - p = XOR256(p, ks256[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) - { - p = VAESENC(p, ks256[i]); - } - p = VAESENCLAST(p, ks256[AES256_ROUNDS]); - - // We use memcpy to avoid align casting. - _mm256_storeu_si256((__m256i *)&ct[PAR_AES_BLOCK_SIZE * block_idx], p); - - // Increase the two counters in parallel - ctr_blocks = ADD32_256(ctr_blocks, two); - p = SHUF8_256(ctr_blocks, bswap_mask); - } - - if(0 != blocks_rem) - { - single_block = EXTRACT128(p, 0); - aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], - (const uint8_t*)&single_block, blocks_rem, ks); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#endif //VAES256 - -#ifdef VAES512 - -_INLINE_ void load_ks(OUT __m512i ks512[AES256_ROUNDS + 1], - IN const aes256_ks_t *ks) -{ - for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) - { - ks512[i] = _mm512_broadcast_i32x4(ks->keys[i]); - } -} - -// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that -// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 -// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 -// Here num_blocks is assumed to be less then 2^32. -// It is the caller responsiblity to ensure it. -void aes256_ctr_enc512(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - const uint64_t num_par_blocks = num_blocks/4; - const uint64_t blocks_rem = num_blocks - (4*(num_par_blocks)); - - __m512i ks512[AES256_ROUNDS + 1]; - load_ks(ks512, ks); - - __m128i single_block = load_m128i(ctr); - __m512i ctr_blocks = _mm512_broadcast_i32x4(single_block); - - // Preparing the masks - const __m512i bswap_mask = _mm512_set_epi32(BSWAP_MASK, BSWAP_MASK, - BSWAP_MASK, BSWAP_MASK); - const __m512i four = _mm512_set_epi32(0,0,0,4,0,0,0,4,0,0,0,4,0,0,0,4); - const __m512i init = _mm512_set_epi32(0,0,0,3,0,0,0,2,0,0,0,1,0,0,0,0); - - // Initialize four parallel counters - ctr_blocks = ADD32_512(ctr_blocks, init); - __m512i p = SHUF8_512(ctr_blocks, bswap_mask); - - for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) - { - p = XOR512(p, ks512[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) - { - p = VAESENC(p, ks512[i]); - } - p = VAESENCLAST(p, ks512[AES256_ROUNDS]); - - - // We use memcpy to avoid align casting. - _mm512_storeu_si512(&ct[PAR_AES_BLOCK_SIZE * block_idx], p); - - // Increase the four counters in parallel - ctr_blocks = ADD32_512(ctr_blocks, four); - p = SHUF8_512(ctr_blocks, bswap_mask); - } - - if(0 != blocks_rem) - { - single_block = EXTRACT128(p, 0); - aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], - (const uint8_t*)&single_block, blocks_rem, ks); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#endif //VAES512 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.h deleted file mode 100644 index 3d2b21ecf5..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/aes_ni.h +++ /dev/null @@ -1,85 +0,0 @@ -/*************************************************************************** -* Written by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* -* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"). -* You may not use this file except in compliance with the License. -* A copy of the License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* or in the "license" file accompanying this file. This file is distributed -* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -* express or implied. See the License for the specific language governing -* permissions and limitations under the License. -* The license is detailed in the file LICENSE.txt, and applies to this file. -* ***************************************************************************/ - -#pragma once - -#include -#include -#include "defs.h" - -#define MAX_AES_INVOKATION (MASK(32)) - -#define AES256_KEY_SIZE (32ULL) -#define AES256_KEY_BITS (AES256_KEY_SIZE * 8) -#define AES_BLOCK_SIZE (16ULL) -#define AES256_ROUNDS (14ULL) - -#ifdef VAES256 -#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*2) -#elif defined(VAES512) -#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*4) -#endif - -typedef ALIGN(16) struct aes256_key_s { - uint8_t raw[AES256_KEY_SIZE]; -} aes256_key_t; - -typedef ALIGN(16) struct aes256_ks_s { - __m128i keys[AES256_ROUNDS + 1]; -} aes256_ks_t; - -// The ks parameter must be 16 bytes aligned! -EXTERNC void aes256_key_expansion(OUT aes256_ks_t *ks, - IN const aes256_key_t *key); - -// Encrypt one 128-bit block ct = E(pt,ks) -void aes256_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks using VAES (AVX-2) -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc256(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks using VAES (AVX512) -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc512(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c new file mode 100644 index 0000000000..50629f9fec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c @@ -0,0 +1,280 @@ +#include +#include "internal.h" + +// Internal helper functions + +void +quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) +{ + ibz_t bp; + ibz_init(&bp); + ibz_set(&bp, p); + quat_alg_init_set(alg, &bp); + ibz_finalize(&bp); +} + +void +quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg) +{ + ibz_t prod; + ibz_vec_4_t sum; + ibz_init(&prod); + ibz_vec_4_init(&sum); + + ibz_set(&(sum[0]), 0); + ibz_set(&(sum[1]), 0); + ibz_set(&(sum[2]), 0); + ibz_set(&(sum[3]), 0); + + // compute 1 coordinate + ibz_mul(&prod, &((*a)[2]), &((*b)[2])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[3])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[0])); + ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[1])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + // compute i coordiante + ibz_mul(&prod, &((*a)[2]), &((*b)[3])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[2])); + ibz_sub(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[1])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[0])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + // compute j coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[2])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[0])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[3])); + ibz_sub(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[1])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + // compute ij coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[3])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[0])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[1])); + ibz_sub(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[2])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + + ibz_copy(&((*res)[0]), &(sum[0])); + ibz_copy(&((*res)[1]), &(sum[1])); + ibz_copy(&((*res)[2]), &(sum[2])); + ibz_copy(&((*res)[3]), &(sum[3])); + + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &(a->denom), &(b->denom)); + // temporarily set res_a.denom to a.denom/gcd, and res_b.denom to b.denom/gcd + ibz_div(&(res_a->denom), &r, &(a->denom), &gcd); + ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); + for (int i = 0; i < 4; i++) { + // multiply coordiates by reduced denominators from the other element + ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + } + // multiply both reduced denominators + ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); + // multiply them by the gcd to get the new common denominator + ibz_mul(&(res_b->denom), &(res_a->denom), &gcd); + ibz_mul(&(res_a->denom), &(res_a->denom), &gcd); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +// Public Functions + +void +quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then add + ibz_copy(&(res->denom), &(res_a.denom)); + ibz_vec_4_add(&(res->coord), &(res_a.coord), &(res_b.coord)); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then substract + ibz_copy(&res->denom, &res_a.denom); + ibz_vec_4_sub(&res->coord, &res_a.coord, &res_b.coord); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg) +{ + // denominator: product of denominators + ibz_mul(&(res->denom), &(a->denom), &(b->denom)); + quat_alg_coord_mul(&(res->coord), &(a->coord), &(b->coord), alg); +} + +void +quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_t *alg) +{ + ibz_t r, g; + quat_alg_elem_t norm; + ibz_init(&r); + ibz_init(&g); + quat_alg_elem_init(&norm); + + quat_alg_conj(&norm, a); + quat_alg_mul(&norm, a, &norm, alg); + ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_div(res_denom, &r, &(norm.denom), &g); + ibz_abs(res_denom, res_denom); + ibz_abs(res_num, res_num); + assert(ibz_cmp(res_denom, &ibz_const_zero) > 0); + + quat_alg_elem_finalize(&norm); + ibz_finalize(&r); + ibz_finalize(&g); +} + +void +quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) +{ + ibz_copy(&(elem->denom), denominator); + ibz_copy(&(elem->coord[0]), numerator); + ibz_set(&(elem->coord[1]), 0); + ibz_set(&(elem->coord[2]), 0); + ibz_set(&(elem->coord[3]), 0); +} + +void +quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) +{ + ibz_copy(&(conj->denom), &(x->denom)); + ibz_copy(&(conj->coord[0]), &(x->coord[0])); + ibz_neg(&(conj->coord[1]), &(x->coord[1])); + ibz_neg(&(conj->coord[2]), &(x->coord[2])); + ibz_neg(&(conj->coord[3]), &(x->coord[3])); +} + +void +quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg_elem_t *x, const quat_lattice_t *order) +{ + int ok UNUSED = quat_lattice_contains(primitive_x, order, x); + assert(ok); + ibz_vec_4_content(content, primitive_x); + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + } + ibz_finalize(&r); +} + +void +quat_alg_normalize(quat_alg_elem_t *x) +{ + ibz_t gcd, sign, r; + ibz_init(&gcd); + ibz_init(&sign); + ibz_init(&r); + ibz_vec_4_content(&gcd, &(x->coord)); + ibz_gcd(&gcd, &gcd, &(x->denom)); + ibz_div(&(x->denom), &r, &(x->denom), &gcd); + ibz_vec_4_scalar_div(&(x->coord), &gcd, &(x->coord)); + ibz_set(&sign, 2 * (0 > ibz_cmp(&ibz_const_zero, &(x->denom))) - 1); + ibz_vec_4_scalar_mul(&(x->coord), &sign, &(x->coord)); + ibz_mul(&(x->denom), &sign, &(x->denom)); + ibz_finalize(&gcd); + ibz_finalize(&sign); + ibz_finalize(&r); +} + +int +quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t diff; + quat_alg_elem_init(&diff); + quat_alg_sub(&diff, a, b); + int res = quat_alg_elem_is_zero(&diff); + quat_alg_elem_finalize(&diff); + return (res); +} + +int +quat_alg_elem_is_zero(const quat_alg_elem_t *x) +{ + int res = ibz_vec_4_is_zero(&(x->coord)); + return (res); +} + +void +quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&(elem->coord[0]), coord0); + ibz_set(&(elem->coord[1]), coord1); + ibz_set(&(elem->coord[2]), coord2); + ibz_set(&(elem->coord[3]), coord3); + + ibz_set(&(elem->denom), denom); +} + +void +quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) +{ + ibz_copy(©->denom, &copied->denom); + ibz_copy(©->coord[0], &copied->coord[0]); + ibz_copy(©->coord[1], &copied->coord[1]); + ibz_copy(©->coord[2], &copied->coord[2]); + ibz_copy(©->coord[3], &copied->coord[3]); +} + +// helper functions for lattices +void +quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3) +{ + ibz_copy(&(elem->coord[0]), coord0); + ibz_copy(&(elem->coord[1]), coord1); + ibz_copy(&(elem->coord[2]), coord2); + ibz_copy(&(elem->coord[3]), coord3); + + ibz_copy(&(elem->denom), denom); +} + +void +quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + } + ibz_copy(&(res->denom), &(elem->denom)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c deleted file mode 100644 index 983ba49adf..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.c +++ /dev/null @@ -1,201 +0,0 @@ -/* Copyright (c) 2017, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -/*************************************************************************** - * Small modification by Nir Drucker and Shay Gueron - * AWS Cryptographic Algorithms Group - * (ndrucker@amazon.com, gueron@amazon.com) - * include: - * 1) Use memcpy/memset instead of OPENSSL_memcpy/memset - * 2) Include aes.h as the underlying aes code - * 3) Modifying the drbg structure - * ***************************************************************************/ - -#include "ctr_drbg.h" -#include - - -// Section references in this file refer to SP 800-90Ar1: -// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf - -int CTR_DRBG_init(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *personalization, size_t personalization_len) { - // Section 10.2.1.3.1 - if (personalization_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - uint8_t seed_material[CTR_DRBG_ENTROPY_LEN]; - memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN); - - for (size_t i = 0; i < personalization_len; i++) { - seed_material[i] ^= personalization[i]; - } - - // Section 10.2.1.2 - // kInitMask is the result of encrypting blocks with big-endian value 1, 2 - // and 3 with the all-zero AES-256 key. - static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { - 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, - 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, - 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca, - 0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e, - }; - - for (size_t i = 0; i < sizeof(kInitMask); i++) { - seed_material[i] ^= kInitMask[i]; - } - - aes256_key_t key; - memcpy(key.raw, seed_material, 32); - memcpy(drbg->counter.bytes, seed_material + 32, 16); - - aes256_key_expansion(&drbg->ks, &key); - drbg->reseed_counter = 1; - - return 1; -} - -// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a -// big-endian number. -static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { - drbg->counter.words[3] = - CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n); -} - -static int ctr_drbg_update(CTR_DRBG_STATE *drbg, const uint8_t *data, - size_t data_len) { - // Per section 10.2.1.2, |data_len| must be |CTR_DRBG_ENTROPY_LEN|. Here, we - // allow shorter inputs and right-pad them with zeros. This is equivalent to - // the specified algorithm but saves a copy in |CTR_DRBG_generate|. - if (data_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - uint8_t temp[CTR_DRBG_ENTROPY_LEN]; - for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) { - ctr32_add(drbg, 1); - aes256_enc(temp + i, drbg->counter.bytes, &drbg->ks); - } - - for (size_t i = 0; i < data_len; i++) { - temp[i] ^= data[i]; - } - - aes256_key_t key; - memcpy(key.raw, temp, 32); - memcpy(drbg->counter.bytes, temp + 32, 16); - aes256_key_expansion(&drbg->ks, &key); - - return 1; -} - -int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *additional_data, - size_t additional_data_len) { - // Section 10.2.1.4 - uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; - - if (additional_data_len > 0) { - if (additional_data_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN); - for (size_t i = 0; i < additional_data_len; i++) { - entropy_copy[i] ^= additional_data[i]; - } - - entropy = entropy_copy; - } - - if (!ctr_drbg_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) { - return 0; - } - - drbg->reseed_counter = 1; - - return 1; -} - -int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, - const uint8_t *additional_data, - size_t additional_data_len) { - if (additional_data_len != 0 && - !ctr_drbg_update(drbg, additional_data, additional_data_len)) { - return 0; - } - - // kChunkSize is used to interact better with the cache. Since the AES-CTR - // code assumes that it's encrypting rather than just writing keystream, the - // buffer has to be zeroed first. Without chunking, large reads would zero - // the whole buffer, flushing the L1 cache, and then do another pass (missing - // the cache every time) to “encrypt” it. The code can avoid this by - // chunking. - static const size_t kChunkSize = 8 * 1024; - - while (out_len >= AES_BLOCK_SIZE) { - size_t todo = kChunkSize; - if (todo > out_len) { - todo = out_len; - } - - todo &= ~(AES_BLOCK_SIZE - 1); - - const size_t num_blocks = todo / AES_BLOCK_SIZE; - if (1) { - memset(out, 0, todo); - ctr32_add(drbg, 1); -#ifdef VAES512 - aes256_ctr_enc512(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#elif defined(VAES256) - aes256_ctr_enc256(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#else - aes256_ctr_enc(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#endif - ctr32_add(drbg, num_blocks - 1); - } else { - for (size_t i = 0; i < todo; i += AES_BLOCK_SIZE) { - ctr32_add(drbg, 1); - aes256_enc(&out[i], drbg->counter.bytes, &drbg->ks); - } - } - - out += todo; - out_len -= todo; - } - - if (out_len > 0) { - uint8_t block[AES_BLOCK_SIZE]; - ctr32_add(drbg, 1); - aes256_enc(block, drbg->counter.bytes, &drbg->ks); - - memcpy(out, block, out_len); - } - - // Right-padding |additional_data| in step 2.2 is handled implicitly by - // |ctr_drbg_update|, to save a copy. - if (!ctr_drbg_update(drbg, additional_data, additional_data_len)) { - return 0; - } - - drbg->reseed_counter++; - return 1; -} - -void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) { - secure_clean((uint8_t *)drbg, sizeof(CTR_DRBG_STATE)); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.h deleted file mode 100644 index 2d1b1f3f0c..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ctr_drbg.h +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2017, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -/*************************************************************************** -* Small modification by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* include: -* 1) Use memcpy/memset instead of OPENSSL_memcpy/memset -* 2) Include aes.h as the underlying aes code -* 3) Modifying the drbg structure -* ***************************************************************************/ - -#pragma once - -#if defined(__cplusplus) -extern "C" { -#endif - -#include "aes_ni.h" - -// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP -// 800-90Ar1. -typedef struct { - aes256_ks_t ks; - union { - uint8_t bytes[16]; - uint32_t words[4]; - } counter; - uint64_t reseed_counter; -} CTR_DRBG_STATE; - -// See SP 800-90Ar1, table 3. -#define CTR_DRBG_ENTROPY_LEN 48 - -// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of -// entropy in |entropy| and, optionally, a personalization string up to -// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero -// on error. -int CTR_DRBG_init(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *personalization, - size_t personalization_len); - -// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy -// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of -// additional data. It returns one on success or zero on error. -int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *additional_data, - size_t additional_data_len); - -// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional -// data (if any) and then writes |out_len| random bytes to |out|. It returns one on success or -// zero on error. -int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, - size_t out_len, - const uint8_t *additional_data, - size_t additional_data_len); - -// CTR_DRBG_clear zeroises the state of |drbg|. -void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); - - -#if defined(__cplusplus) -} // extern C -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2.c new file mode 100644 index 0000000000..b31ae7771a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +// internal helpers, also for other files +void +ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) +{ + ibz_set(&((*vec)[0]), a0); + ibz_set(&((*vec)[1]), a1); +} +void +ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) +{ + ibz_set(&((*mat)[0][0]), a00); + ibz_set(&((*mat)[0][1]), a01); + ibz_set(&((*mat)[1][0]), a10); + ibz_set(&((*mat)[1][1]), a11); +} + +void +ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) +{ + ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); + ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); + ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); + ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); +} + +void +ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) +{ + ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); + ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); + ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); + ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); +} + +void +ibz_mat_2x2_det_from_ibz(ibz_t *det, const ibz_t *a11, const ibz_t *a12, const ibz_t *a21, const ibz_t *a22) +{ + ibz_t prod; + ibz_init(&prod); + ibz_mul(&prod, a12, a21); + ibz_mul(det, a11, a22); + ibz_sub(det, det, &prod); + ibz_finalize(&prod); +} + +void +ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec) +{ + ibz_t prod; + ibz_vec_2_t matvec; + ibz_init(&prod); + ibz_vec_2_init(&matvec); + ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); + ibz_copy(&(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); + ibz_add(&(matvec[0]), &(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); + ibz_copy(&(matvec[1]), &prod); + ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); + ibz_add(&(matvec[1]), &(matvec[1]), &prod); + ibz_copy(&((*res)[0]), &(matvec[0])); + ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_finalize(&prod); + ibz_vec_2_finalize(&matvec); +} + +// modular 2x2 operations + +void +ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2x2_t *mat_b, const ibz_t *m) +{ + ibz_t mul; + ibz_mat_2x2_t sums; + ibz_init(&mul); + ibz_mat_2x2_init(&sums); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_set(&(sums[i][j]), 0); + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + for (int k = 0; k < 2; k++) { + ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); + ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); + ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + } + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + } + } + ibz_finalize(&mul); + ibz_mat_2x2_finalize(&sums); +} + +int +ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m) +{ + ibz_t det, prod; + ibz_init(&det); + ibz_init(&prod); + ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mod(&det, &det, m); + ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_sub(&det, &det, &prod); + ibz_mod(&det, &det, m); + int res = ibz_invmod(&det, &det, m); + // return 0 matrix if non invertible determinant + ibz_set(&prod, res); + ibz_mul(&det, &det, &prod); + // compute inverse + ibz_copy(&prod, &((*mat)[0][0])); + ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); + ibz_copy(&((*inv)[1][1]), &prod); + ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); + ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); + ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + } + } + ibz_finalize(&det); + ibz_finalize(&prod); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim4.c new file mode 100644 index 0000000000..495dc2dcb2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim4.c @@ -0,0 +1,470 @@ +#include +#include "internal.h" + +// internal helper functions +void +ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b) +{ + ibz_mat_4x4_t mat; + ibz_t prod; + ibz_init(&prod); + ibz_mat_4x4_init(&mat); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(mat[i][j]), 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); + ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + } + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*res)[i][j]), &(mat[i][j])); + } + } + ibz_mat_4x4_finalize(&mat); + ibz_finalize(&prod); +} + +// helper functions for lattices +void +ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&((*vec)[0]), coord0); + ibz_set(&((*vec)[1]), coord1); + ibz_set(&((*vec)[2]), coord2); + ibz_set(&((*vec)[3]), coord3); +} + +void +ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_copy(&((*new)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) +{ + ibz_copy(&((*res)[0]), coord0); + ibz_copy(&((*res)[1]), coord1); + ibz_copy(&((*res)[2]), coord2); + ibz_copy(&((*res)[3]), coord3); +} + +void +ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) +{ + ibz_gcd(content, &((*v)[0]), &((*v)[1])); + ibz_gcd(content, &((*v)[2]), content); + ibz_gcd(content, &((*v)[3]), content); +} + +void +ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_neg(&((*neg)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +void +ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +int +ibz_vec_4_is_zero(const ibz_vec_4_t *x) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + res &= ibz_is_zero(&((*x)[i])); + } + return (res); +} + +void +ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b) +{ + ibz_t prod; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + } +} + +int +ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + res = res && ibz_is_zero(&r); + } + ibz_finalize(&r); + return (res); +} + +void +ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) +{ + ibz_mat_4x4_t work; + ibz_mat_4x4_init(&work); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(work[i][j]), &((*mat)[j][i])); + } + } + ibz_mat_4x4_copy(transposed, &work); + ibz_mat_4x4_finalize(&work); +} + +void +ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*zero)[i][j]), 0); + } + } +} + +void +ibz_mat_4x4_identity(ibz_mat_4x4_t *id) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*id)[i][j]), 0); + } + ibz_set(&((*id)[i][i]), 1); + } +} + +int +ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + } + } + return (res); +} + +int +ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) +{ + int res = 0; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + } + } + return (!res); +} + +void +ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + } + } +} + +void +ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) +{ + ibz_t d; + ibz_init(&d); + ibz_copy(&d, &((*mat)[0][0])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_gcd(&d, &d, &((*mat)[i][j])); + } + } + ibz_copy(gcd, &d); + ibz_finalize(&d); +} + +int +ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + res = res && ibz_is_zero(&r); + } + } + ibz_finalize(&r); + return (res); +} + +// 4x4 inversion helper functions +void +ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, a1, a2); + ibz_mul(&prod, b1, b2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_add(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +void +ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, b1, b2); + ibz_mul(&prod, a1, a2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_sub(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +// Method from https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf 3rd of May +// 2023, 16h15 CEST +int +ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat) +{ + ibz_t prod, work_det; + ibz_mat_4x4_t work; + ibz_t s[6]; + ibz_t c[6]; + for (int i = 0; i < 6; i++) { + ibz_init(&(s[i])); + ibz_init(&(c[i])); + } + ibz_mat_4x4_init(&work); + ibz_init(&prod); + ibz_init(&work_det); + + // compute some 2x2 minors, store them in s and c + for (int i = 0; i < 3; i++) { + ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + } + for (int i = 0; i < 2; i++) { + ibz_mat_2x2_det_from_ibz( + &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + ibz_mat_2x2_det_from_ibz( + &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + } + ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + + // compute det + ibz_set(&work_det, 0); + for (int i = 0; i < 6; i++) { + ibz_mul(&prod, &(s[i]), &(c[5 - i])); + if ((i != 1) && (i != 4)) { + ibz_add(&work_det, &work_det, &prod); + } else { + ibz_sub(&work_det, &work_det, &prod); + } + } + // compute transposed adjugate + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 2; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } + } + for (int k = 2; k < 4; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } + } + } + if (inv != NULL) { + // put transposed adjugate in result, or 0 if no inverse + ibz_set(&prod, !ibz_is_zero(&work_det)); + ibz_mat_4x4_scalar_mul(inv, &prod, &work); + } + // output det + if (det != NULL) + ibz_copy(det, &work_det); + for (int i = 0; i < 6; i++) { + ibz_finalize(&s[i]); + ibz_finalize(&c[i]); + } + ibz_mat_4x4_finalize(&work); + ibz_finalize(&work_det); + ibz_finalize(&prod); + return (!ibz_is_zero(det)); +} + +// matrix evaluation + +void +ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +// quadratic forms + +void +quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + ibz_mat_4x4_eval(&sum, qf, coord); + for (int i = 0; i < 4; i++) { + ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + if (i > 0) { + ibz_add(&(sum[0]), &(sum[0]), &prod); + } else { + ibz_copy(&sum[0], &prod); + } + } + ibz_copy(res, &sum[0]); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dpe.h new file mode 100644 index 0000000000..b9a7a35e0b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dpe.h @@ -0,0 +1,743 @@ +/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. + +This file is part of the DPE Library. + +The DPE Library is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 3 of the License, or (at your +option) any later version. + +The DPE Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with the DPE Library; see the file COPYING.LIB. +If not, see . */ + +#ifndef __DPE +#define __DPE + +#include /* For abort */ +#include /* For fprintf */ +#include /* for round, floor, ceil */ +#include + +/* if you change the version, please change it in Makefile too */ +#define DPE_VERSION_MAJOR 1 +#define DPE_VERSION_MINOR 7 + +#if defined(__GNUC__) && (__GNUC__ >= 3) +# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) +# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) +# define DPE_UNUSED_ATTR __attribute__((unused)) +#else +# define DPE_LIKELY(x) (x) +# define DPE_UNLIKELY(x) (x) +# define DPE_UNUSED_ATTR +#endif + +/* If no user defined mode, define it to double */ +#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) +# define DPE_USE_DOUBLE +#endif + +#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) +# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." +#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#endif + +#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) +# define DPE_LITTLEENDIAN32 +#endif + +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) +# define DPE_DEFINE_ROUND_TRUNC +#endif + +#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 +# define DPE_ISFINITE __builtin_isfinite +#elif defined(isfinite) +# define DPE_ISFINITE isfinite /* new C99 function */ +#else +# define DPE_ISFINITE finite /* obsolete BSD function */ +#endif + +/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ +/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with + 1/2 <= m < 1 */ +/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ +#if defined(DPE_USE_DOUBLE) +# define DPE_DOUBLE double /* mantissa type */ +# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ +# define DPE_2_POW_BITSIZE 0x1P53 +# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 +# define DPE_LDEXP __builtin_ldexp +# define DPE_FREXP __builtin_frexp +# define DPE_FLOOR __builtin_floor +# define DPE_CEIL __builtin_ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND __builtin_round +# define DPE_TRUNC __builtin_trunc +# endif +# else +# define DPE_LDEXP ldexp +# define DPE_FREXP frexp +# define DPE_FLOOR floor +# define DPE_CEIL ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND round +# define DPE_TRUNC trunc +# endif +# endif + +#elif defined(DPE_USE_LONGDOUBLE) +# define DPE_DOUBLE long double +# define DPE_BITSIZE 64 +# define DPE_2_POW_BITSIZE 0x1P64 +# define DPE_LDEXP ldexpl +# define DPE_FREXP frexpl +# define DPE_FLOOR floorl +# define DPE_CEIL ceill +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundl +# define DPE_TRUNC truncl +# endif + +#elif defined(DPE_USE_FLOAT128) +# include "quadmath.h" +# define DPE_DOUBLE __float128 +# define DPE_BITSIZE 113 +# define DPE_2_POW_BITSIZE 0x1P113 +# define DPE_LDEXP ldexpq +# define DPE_FLOOR floorq +# define DPE_CEIL ceilq +# define DPE_FREXP frexpq +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundq +# define DPE_TRUNC truncq +# endif + +#else +# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" +#endif + +/* If no C99, do what we can */ +#ifndef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) +# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) +#endif + +#if defined(DPE_USE_LONG) +# define DPE_EXP_T long /* exponent type */ +# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ +#elif defined(DPE_USE_LONGLONG) +# define DPE_EXP_T long long +# define DPE_EXPMIN LLONG_MIN +#else +# define DPE_EXP_T int /* exponent type */ +# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ +#endif + +#ifdef DPE_LITTLEENDIAN32 +typedef union +{ + double d; +#if INT_MAX == 0x7FFFFFFFL + int i[2]; +#elif LONG_MAX == 0x7FFFFFFFL + long i[2]; +#elif SHRT_MAX == 0x7FFFFFFFL + short i[2]; +#else +# error Cannot find a 32 bits integer type. +#endif +} dpe_double_words; +#endif + +typedef struct +{ + DPE_DOUBLE d; /* significand */ + DPE_EXP_T exp; /* exponent */ +} dpe_struct; + +typedef dpe_struct dpe_t[1]; + +#define DPE_MANT(x) ((x)->d) +#define DPE_EXP(x) ((x)->exp) +#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) + +#define DPE_INLINE static inline + +/* initialize */ +DPE_INLINE void +dpe_init (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* clear */ +DPE_INLINE void +dpe_clear (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* set x to y */ +DPE_INLINE void +dpe_set (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to -y */ +DPE_INLINE void +dpe_neg (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to |y| */ +DPE_INLINE void +dpe_abs (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ +/* FIXME: don't inline this function yet ? */ +static void +dpe_normalize (dpe_t x) +{ + if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) + { + if (DPE_MANT(x) == 0.0) + DPE_EXP(x) = DPE_EXPMIN; + /* otherwise let the exponent of NaN, Inf unchanged */ + } + else + { + DPE_EXP_T e; +#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ + dpe_double_words dw; + dw.d = DPE_MANT(x); + e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ + DPE_EXP(x) += e - 1022; + dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; + DPE_MANT(x) = dw.d; +#else /* portable code */ + double m = DPE_MANT(x); + DPE_MANT(x) = DPE_FREXP (m, &e); + DPE_EXP(x) += e; +#endif + } +} + +#if defined(DPE_USE_DOUBLE) +static const double dpe_scale_tab[54] = { + 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, + 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, + 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, + 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, + 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, + 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, + 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; +#endif + +DPE_INLINE DPE_DOUBLE +dpe_scale (DPE_DOUBLE d, int s) +{ + /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ +#if defined(DPE_USE_DOUBLE) + return d * dpe_scale_tab [-s]; +#else /* portable code */ + return DPE_LDEXP (d, s); +#endif +} + +/* set x to y */ +DPE_INLINE void +dpe_set_d (dpe_t x, double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ld (dpe_t x, long double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ui (dpe_t x, unsigned long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_si (dpe_t x, long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +DPE_INLINE long +dpe_get_si (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (long) d; +} + +DPE_INLINE unsigned long +dpe_get_ui (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (d < 0.0) ? 0 : (unsigned long) d; +} + +DPE_INLINE double +dpe_get_d (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +DPE_INLINE long double +dpe_get_ld (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +#if defined(__GMP_H__) || defined(__MINI_GMP_H__) +/* set x to y */ +DPE_INLINE void +dpe_set_z (dpe_t x, mpz_t y) +{ + long e; + DPE_MANT(x) = mpz_get_d_2exp (&e, y); + DPE_EXP(x) = (DPE_EXP_T) e; +} + +/* set x to y, rounded to nearest */ +DPE_INLINE void +dpe_get_z (mpz_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey >= DPE_BITSIZE) /* y is an integer */ + { + DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ + mpz_set_d (x, d); /* should be exact */ + mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); + } + else /* DPE_EXP(y) < DPE_BITSIZE */ + { + if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ + mpz_set_ui (x, 0); + else + { + DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); + mpz_set_d (x, (double) DPE_ROUND(d)); + } + } +} + +/* return e and x such that y = x*2^e */ +DPE_INLINE mp_exp_t +dpe_get_z_exp (mpz_t x, dpe_t y) +{ + mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); + return DPE_EXP(y) - DPE_BITSIZE; +} +#endif + +/* x <- y + z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_add (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y+z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_set (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y - z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_sub (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y-z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_neg (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y * z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_mul (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- sqrt(y), assuming y is normalized, returns x normalized */ +DPE_INLINE void +dpe_sqrt (dpe_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey % 2) + { + /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ + DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); + DPE_EXP(x) = (ey + 1) / 2; + } + else + { + DPE_MANT(x) = sqrt (DPE_MANT(y)); + DPE_EXP(x) = ey / 2; + } +} + +/* x <- y / z, assuming y and z are normalized, returns x normalized. + Assumes z is not zero. */ +DPE_INLINE void +dpe_div (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- y * z, assuming y normalized, returns x normalized */ +DPE_INLINE void +dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ +DPE_INLINE void +dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y * 2^e */ +DPE_INLINE void +dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; +} + +/* x <- y / 2^e */ +DPE_INLINE void +dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; +} + +/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' + type has fewer bits than the significand in dpe_t) */ +DPE_INLINE DPE_EXP_T +dpe_get_si_exp (long *x, dpe_t y) +{ + if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ + { + *x = (long) (DPE_MANT(y) * 2147483648.0); + return DPE_EXP(y) - 31; + } + else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ + { + *x = (long) (DPE_MANT (y) * 9223372036854775808.0); + return DPE_EXP(y) - 63; + } + else + { + fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); + exit (1); + } +} + +static DPE_UNUSED_ATTR int dpe_str_prec = 16; +static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; + +static int +dpe_out_str (FILE *s, int base, dpe_t x) +{ + DPE_DOUBLE d = DPE_MANT(x); + DPE_EXP_T e2 = DPE_EXP(x); + int e10 = 0; + char sign = ' '; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } + if (d == 0.0) +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%1.*f", dpe_str_prec, d); +#else + return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); +#endif + if (d < 0) + { + d = -d; + sign = '-'; + } + if (e2 > 0) + { + while (e2 > 0) + { + e2 --; + d *= 2.0; + if (d >= 10.0) + { + d /= 10.0; + e10 ++; + } + } + } + else /* e2 <= 0 */ + { + while (e2 < 0) + { + e2 ++; + d /= 2.0; + if (d < 1.0) + { + d *= 10.0; + e10 --; + } + } + } +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); +#else + return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); +#endif +} + +static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; + +static size_t +dpe_inp_str (dpe_t x, FILE *s, int base) +{ + size_t res; + DPE_DOUBLE d; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } +#ifdef DPE_USE_DOUBLE + res = fscanf (s, "%lf", &d); +#elif defined(DPE_USE_LONGDOUBLE) + res = fscanf (s, "%Lf", &d); +#else + { + long double d_ld; + res = fscanf (s, "%Lf", &d_ld); + d = d_ld; + } +#endif + dpe_set_d (x, d); + return res; +} + +DPE_INLINE void +dpe_dump (dpe_t x) +{ + dpe_out_str (stdout, 10, x); + putchar ('\n'); +} + +DPE_INLINE int +dpe_zero_p (dpe_t x) +{ + return DPE_MANT (x) == 0; +} + +/* return a positive value if x > y + a negative value if x < y + and 0 otherwise (x=y). */ +DPE_INLINE int +dpe_cmp (dpe_t x, dpe_t y) +{ + int sx = DPE_SIGN(x); + int d = sx - DPE_SIGN(y); + + if (d != 0) + return d; + else if (DPE_EXP(x) > DPE_EXP(y)) + return (sx > 0) ? 1 : -1; + else if (DPE_EXP(y) > DPE_EXP(x)) + return (sx > 0) ? -1 : 1; + else /* DPE_EXP(x) = DPE_EXP(y) */ + return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); +} + +DPE_INLINE int +dpe_cmp_d (dpe_t x, double d) +{ + dpe_t y; + dpe_set_d (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_ui (dpe_t x, unsigned long d) +{ + dpe_t y; + dpe_set_ui (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_si (dpe_t x, long d) +{ + dpe_t y; + dpe_set_si (y, d); + return dpe_cmp (x, y); +} + +/* set x to integer nearest to y */ +DPE_INLINE void +dpe_round (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) < 0) /* |y| < 1/2 */ + dpe_set_ui (x, 0); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_ROUND(d)); + } +} + +/* set x to the fractional part of y, defined as y - trunc(y), thus the + fractional part has absolute value in [0, 1), and same sign as y */ +DPE_INLINE void +dpe_frac (dpe_t x, dpe_t y) +{ + /* If |y| is smaller than 1, keep it */ + if (DPE_EXP(y) <= 0) + dpe_set (x, y); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set_ui (x, 0); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, d - DPE_TRUNC(d)); + } +} + +/* set x to largest integer <= y */ +DPE_INLINE void +dpe_floor (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ + dpe_set_ui (x, 0); + else /* -1 < y < 0 */ + dpe_set_si (x, -1); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_FLOOR(d)); + } +} + +/* set x to smallest integer >= y */ +DPE_INLINE void +dpe_ceil (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ + dpe_set_ui (x, 1); + else /* -1 < y <= 0 */ + dpe_set_si (x, 0); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_CEIL(d)); + } +} + +DPE_INLINE void +dpe_swap (dpe_t x, dpe_t y) +{ + DPE_EXP_T i = DPE_EXP (x); + DPE_DOUBLE d = DPE_MANT (x); + DPE_EXP (x) = DPE_EXP (y); + DPE_MANT (x) = DPE_MANT (y); + DPE_EXP (y) = i; + DPE_MANT (y) = d; +} + +#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/finit.c new file mode 100644 index 0000000000..b3808edf07 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/finit.c @@ -0,0 +1,122 @@ +#include "internal.h" + +void +quat_alg_init_set(quat_alg_t *alg, const ibz_t *p) +{ + ibz_init(&(*alg).p); + ibz_copy(&(*alg).p, p); +} +void +quat_alg_finalize(quat_alg_t *alg) +{ + ibz_finalize(&(*alg).p); +} + +void +quat_alg_elem_init(quat_alg_elem_t *elem) +{ + ibz_vec_4_init(&(*elem).coord); + ibz_init(&(*elem).denom); + ibz_set(&(*elem).denom, 1); +} +void +quat_alg_elem_finalize(quat_alg_elem_t *elem) +{ + ibz_vec_4_finalize(&(*elem).coord); + ibz_finalize(&(*elem).denom); +} + +void +ibz_vec_2_init(ibz_vec_2_t *vec) +{ + ibz_init(&((*vec)[0])); + ibz_init(&((*vec)[1])); +} + +void +ibz_vec_2_finalize(ibz_vec_2_t *vec) +{ + ibz_finalize(&((*vec)[0])); + ibz_finalize(&((*vec)[1])); +} + +void +ibz_vec_4_init(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_init(&(*vec)[i]); + } +} +void +ibz_vec_4_finalize(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_finalize(&(*vec)[i]); + } +} + +void +ibz_mat_2x2_init(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +ibz_mat_4x4_init(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +quat_lattice_init(quat_lattice_t *lat) +{ + ibz_mat_4x4_init(&(*lat).basis); + ibz_init(&(*lat).denom); + ibz_set(&(*lat).denom, 1); +} +void +quat_lattice_finalize(quat_lattice_t *lat) +{ + ibz_finalize(&(*lat).denom); + ibz_mat_4x4_finalize(&(*lat).basis); +} + +void +quat_left_ideal_init(quat_left_ideal_t *lideal) +{ + quat_lattice_init(&(*lideal).lattice); + ibz_init(&(*lideal).norm); + (*lideal).parent_order = NULL; +} +void +quat_left_ideal_finalize(quat_left_ideal_t *lideal) +{ + ibz_finalize(&(*lideal).norm); + quat_lattice_finalize(&(*lideal).lattice); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.c deleted file mode 100644 index f2992d8c7f..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.c +++ /dev/null @@ -1,876 +0,0 @@ -// SPDX-License-Identifier: PD and Apache-2.0 - -/* FIPS202 implementation based on code from PQClean, - * which is in turn based based on the public domain implementation in - * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html - * by Ronny Van Keer - * and the public domain "TweetFips202" implementation - * from https://twitter.com/tweetfips202 - * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ - -#include -#include -#include -#include - -#include "fips202.h" - -#define NROUNDS 24 -#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) - -/************************************************* - * Name: load64 - * - * Description: Load 8 bytes into uint64_t in little-endian order - * - * Arguments: - const uint8_t *x: pointer to input byte array - * - * Returns the loaded 64-bit unsigned integer - **************************************************/ -static uint64_t load64(const uint8_t *x) { - uint64_t r = 0; - for (size_t i = 0; i < 8; ++i) { - r |= (uint64_t)x[i] << 8 * i; - } - - return r; -} - -/************************************************* - * Name: store64 - * - * Description: Store a 64-bit integer to a byte array in little-endian order - * - * Arguments: - uint8_t *x: pointer to the output byte array - * - uint64_t u: input 64-bit unsigned integer - **************************************************/ -static void store64(uint8_t *x, uint64_t u) { - for (size_t i = 0; i < 8; ++i) { - x[i] = (uint8_t) (u >> 8 * i); - } -} - -/* Keccak round constants */ -static const uint64_t KeccakF_RoundConstants[NROUNDS] = { - 0x0000000000000001ULL, 0x0000000000008082ULL, - 0x800000000000808aULL, 0x8000000080008000ULL, - 0x000000000000808bULL, 0x0000000080000001ULL, - 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x000000000000008aULL, 0x0000000000000088ULL, - 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, - 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, - 0x000000000000800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, - 0x0000000080000001ULL, 0x8000000080008008ULL -}; - -/************************************************* - * Name: KeccakF1600_StatePermute - * - * Description: The Keccak F1600 Permutation - * - * Arguments: - uint64_t *state: pointer to input/output Keccak state - **************************************************/ -static void KeccakF1600_StatePermute(uint64_t *state) { - int round; - - uint64_t Aba, Abe, Abi, Abo, Abu; - uint64_t Aga, Age, Agi, Ago, Agu; - uint64_t Aka, Ake, Aki, Ako, Aku; - uint64_t Ama, Ame, Ami, Amo, Amu; - uint64_t Asa, Ase, Asi, Aso, Asu; - uint64_t BCa, BCe, BCi, BCo, BCu; - uint64_t Da, De, Di, Do, Du; - uint64_t Eba, Ebe, Ebi, Ebo, Ebu; - uint64_t Ega, Ege, Egi, Ego, Egu; - uint64_t Eka, Eke, Eki, Eko, Eku; - uint64_t Ema, Eme, Emi, Emo, Emu; - uint64_t Esa, Ese, Esi, Eso, Esu; - - // copyFromState(A, state) - Aba = state[0]; - Abe = state[1]; - Abi = state[2]; - Abo = state[3]; - Abu = state[4]; - Aga = state[5]; - Age = state[6]; - Agi = state[7]; - Ago = state[8]; - Agu = state[9]; - Aka = state[10]; - Ake = state[11]; - Aki = state[12]; - Ako = state[13]; - Aku = state[14]; - Ama = state[15]; - Ame = state[16]; - Ami = state[17]; - Amo = state[18]; - Amu = state[19]; - Asa = state[20]; - Ase = state[21]; - Asi = state[22]; - Aso = state[23]; - Asu = state[24]; - - for (round = 0; round < NROUNDS; round += 2) { - // prepareTheta - BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; - BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; - BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; - BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; - BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; - - // thetaRhoPiChiIotaPrepareTheta(round , A, E) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Aba ^= Da; - BCa = Aba; - Age ^= De; - BCe = ROL(Age, 44); - Aki ^= Di; - BCi = ROL(Aki, 43); - Amo ^= Do; - BCo = ROL(Amo, 21); - Asu ^= Du; - BCu = ROL(Asu, 14); - Eba = BCa ^ ((~BCe) & BCi); - Eba ^= KeccakF_RoundConstants[round]; - Ebe = BCe ^ ((~BCi) & BCo); - Ebi = BCi ^ ((~BCo) & BCu); - Ebo = BCo ^ ((~BCu) & BCa); - Ebu = BCu ^ ((~BCa) & BCe); - - Abo ^= Do; - BCa = ROL(Abo, 28); - Agu ^= Du; - BCe = ROL(Agu, 20); - Aka ^= Da; - BCi = ROL(Aka, 3); - Ame ^= De; - BCo = ROL(Ame, 45); - Asi ^= Di; - BCu = ROL(Asi, 61); - Ega = BCa ^ ((~BCe) & BCi); - Ege = BCe ^ ((~BCi) & BCo); - Egi = BCi ^ ((~BCo) & BCu); - Ego = BCo ^ ((~BCu) & BCa); - Egu = BCu ^ ((~BCa) & BCe); - - Abe ^= De; - BCa = ROL(Abe, 1); - Agi ^= Di; - BCe = ROL(Agi, 6); - Ako ^= Do; - BCi = ROL(Ako, 25); - Amu ^= Du; - BCo = ROL(Amu, 8); - Asa ^= Da; - BCu = ROL(Asa, 18); - Eka = BCa ^ ((~BCe) & BCi); - Eke = BCe ^ ((~BCi) & BCo); - Eki = BCi ^ ((~BCo) & BCu); - Eko = BCo ^ ((~BCu) & BCa); - Eku = BCu ^ ((~BCa) & BCe); - - Abu ^= Du; - BCa = ROL(Abu, 27); - Aga ^= Da; - BCe = ROL(Aga, 36); - Ake ^= De; - BCi = ROL(Ake, 10); - Ami ^= Di; - BCo = ROL(Ami, 15); - Aso ^= Do; - BCu = ROL(Aso, 56); - Ema = BCa ^ ((~BCe) & BCi); - Eme = BCe ^ ((~BCi) & BCo); - Emi = BCi ^ ((~BCo) & BCu); - Emo = BCo ^ ((~BCu) & BCa); - Emu = BCu ^ ((~BCa) & BCe); - - Abi ^= Di; - BCa = ROL(Abi, 62); - Ago ^= Do; - BCe = ROL(Ago, 55); - Aku ^= Du; - BCi = ROL(Aku, 39); - Ama ^= Da; - BCo = ROL(Ama, 41); - Ase ^= De; - BCu = ROL(Ase, 2); - Esa = BCa ^ ((~BCe) & BCi); - Ese = BCe ^ ((~BCi) & BCo); - Esi = BCi ^ ((~BCo) & BCu); - Eso = BCo ^ ((~BCu) & BCa); - Esu = BCu ^ ((~BCa) & BCe); - - // prepareTheta - BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; - BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; - BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; - BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; - BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; - - // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Eba ^= Da; - BCa = Eba; - Ege ^= De; - BCe = ROL(Ege, 44); - Eki ^= Di; - BCi = ROL(Eki, 43); - Emo ^= Do; - BCo = ROL(Emo, 21); - Esu ^= Du; - BCu = ROL(Esu, 14); - Aba = BCa ^ ((~BCe) & BCi); - Aba ^= KeccakF_RoundConstants[round + 1]; - Abe = BCe ^ ((~BCi) & BCo); - Abi = BCi ^ ((~BCo) & BCu); - Abo = BCo ^ ((~BCu) & BCa); - Abu = BCu ^ ((~BCa) & BCe); - - Ebo ^= Do; - BCa = ROL(Ebo, 28); - Egu ^= Du; - BCe = ROL(Egu, 20); - Eka ^= Da; - BCi = ROL(Eka, 3); - Eme ^= De; - BCo = ROL(Eme, 45); - Esi ^= Di; - BCu = ROL(Esi, 61); - Aga = BCa ^ ((~BCe) & BCi); - Age = BCe ^ ((~BCi) & BCo); - Agi = BCi ^ ((~BCo) & BCu); - Ago = BCo ^ ((~BCu) & BCa); - Agu = BCu ^ ((~BCa) & BCe); - - Ebe ^= De; - BCa = ROL(Ebe, 1); - Egi ^= Di; - BCe = ROL(Egi, 6); - Eko ^= Do; - BCi = ROL(Eko, 25); - Emu ^= Du; - BCo = ROL(Emu, 8); - Esa ^= Da; - BCu = ROL(Esa, 18); - Aka = BCa ^ ((~BCe) & BCi); - Ake = BCe ^ ((~BCi) & BCo); - Aki = BCi ^ ((~BCo) & BCu); - Ako = BCo ^ ((~BCu) & BCa); - Aku = BCu ^ ((~BCa) & BCe); - - Ebu ^= Du; - BCa = ROL(Ebu, 27); - Ega ^= Da; - BCe = ROL(Ega, 36); - Eke ^= De; - BCi = ROL(Eke, 10); - Emi ^= Di; - BCo = ROL(Emi, 15); - Eso ^= Do; - BCu = ROL(Eso, 56); - Ama = BCa ^ ((~BCe) & BCi); - Ame = BCe ^ ((~BCi) & BCo); - Ami = BCi ^ ((~BCo) & BCu); - Amo = BCo ^ ((~BCu) & BCa); - Amu = BCu ^ ((~BCa) & BCe); - - Ebi ^= Di; - BCa = ROL(Ebi, 62); - Ego ^= Do; - BCe = ROL(Ego, 55); - Eku ^= Du; - BCi = ROL(Eku, 39); - Ema ^= Da; - BCo = ROL(Ema, 41); - Ese ^= De; - BCu = ROL(Ese, 2); - Asa = BCa ^ ((~BCe) & BCi); - Ase = BCe ^ ((~BCi) & BCo); - Asi = BCi ^ ((~BCo) & BCu); - Aso = BCo ^ ((~BCu) & BCa); - Asu = BCu ^ ((~BCa) & BCe); - } - - // copyToState(state, A) - state[0] = Aba; - state[1] = Abe; - state[2] = Abi; - state[3] = Abo; - state[4] = Abu; - state[5] = Aga; - state[6] = Age; - state[7] = Agi; - state[8] = Ago; - state[9] = Agu; - state[10] = Aka; - state[11] = Ake; - state[12] = Aki; - state[13] = Ako; - state[14] = Aku; - state[15] = Ama; - state[16] = Ame; - state[17] = Ami; - state[18] = Amo; - state[19] = Amu; - state[20] = Asa; - state[21] = Ase; - state[22] = Asi; - state[23] = Aso; - state[24] = Asu; -} - -/************************************************* - * Name: keccak_absorb - * - * Description: Absorb step of Keccak; - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, - size_t mlen, uint8_t p) { - size_t i; - uint8_t t[200]; - - /* Zero state */ - for (i = 0; i < 25; ++i) { - s[i] = 0; - } - - while (mlen >= r) { - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(m + 8 * i); - } - - KeccakF1600_StatePermute(s); - mlen -= r; - m += r; - } - - for (i = 0; i < r; ++i) { - t[i] = 0; - } - for (i = 0; i < mlen; ++i) { - t[i] = m[i]; - } - t[i] = p; - t[r - 1] |= 128; - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(t + 8 * i); - } -} - -/************************************************* - * Name: keccak_squeezeblocks - * - * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. - * Modifies the state. Can be called multiple times to keep - * squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *h: pointer to output blocks - * - size_t nblocks: number of blocks to be - * squeezed (written to h) - * - uint64_t *s: pointer to input/output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, - uint64_t *s, uint32_t r) { - while (nblocks > 0) { - KeccakF1600_StatePermute(s); - for (size_t i = 0; i < (r >> 3); i++) { - store64(h + 8 * i, s[i]); - } - h += r; - nblocks--; - } -} - -/************************************************* - * Name: keccak_inc_init - * - * Description: Initializes the incremental Keccak state to zero. - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - **************************************************/ -static void keccak_inc_init(uint64_t *s_inc) { - size_t i; - - for (i = 0; i < 25; ++i) { - s_inc[i] = 0; - } - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_absorb - * - * Description: Incremental keccak absorb - * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - **************************************************/ -static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, - size_t mlen) { - size_t i; - - /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ - while (mlen + s_inc[25] >= r) { - for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { - /* Take the i'th byte from message - xor with the s_inc[25] + i'th byte of the state; little-endian */ - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - mlen -= (size_t)(r - s_inc[25]); - m += r - s_inc[25]; - s_inc[25] = 0; - - KeccakF1600_StatePermute(s_inc); - } - - for (i = 0; i < mlen; i++) { - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - s_inc[25] += mlen; -} - -/************************************************* - * Name: keccak_inc_finalize - * - * Description: Finalizes Keccak absorb phase, prepares for squeezing - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { - /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, - so we can always use one more byte for p in the current state. */ - s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); - s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_squeeze - * - * Description: Incremental Keccak squeeze; can be called on byte-level - * - * Arguments: - uint8_t *h: pointer to output bytes - * - size_t outlen: number of bytes to be squeezed - * - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_inc_squeeze(uint8_t *h, size_t outlen, - uint64_t *s_inc, uint32_t r) { - size_t i; - - /* First consume any bytes we still have sitting around */ - for (i = 0; i < outlen && i < s_inc[25]; i++) { - /* There are s_inc[25] bytes left, so r - s_inc[25] is the first - available byte. We consume from there, i.e., up to r. */ - h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] -= i; - - /* Then squeeze the remaining necessary blocks */ - while (outlen > 0) { - KeccakF1600_StatePermute(s_inc); - - for (i = 0; i < outlen && i < r; i++) { - h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] = r - i; - } -} - -void shake128_inc_init(shake128incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); -} - -void shake128_inc_finalize(shake128incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); -} - -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); -} - -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake128_inc_ctx_release(shake128incctx *state) { - (void)state; -} - -void shake256_inc_init(shake256incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); -} - -void shake256_inc_finalize(shake256incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); -} - -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); -} - -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake256_inc_ctx_release(shake256incctx *state) { - (void)state; -} - - -/************************************************* - * Name: shake128_absorb - * - * Description: Absorb step of the SHAKE128 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake128_squeezeblocks - * - * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of - * SHAKE128_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake128ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); -} - -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake128_ctx_release(shake128ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake256_absorb - * - * Description: Absorb step of the SHAKE256 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake256_squeezeblocks - * - * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of - * SHAKE256_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake256ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); -} - -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake256_ctx_release(shake256ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake128 - * - * Description: SHAKE128 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE128_RATE; - uint8_t t[SHAKE128_RATE]; - shake128ctx s; - - shake128_absorb(&s, input, inlen); - shake128_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE128_RATE; - outlen -= nblocks * SHAKE128_RATE; - - if (outlen) { - shake128_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake128_ctx_release(&s); -} - -/************************************************* - * Name: shake256 - * - * Description: SHAKE256 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE256_RATE; - uint8_t t[SHAKE256_RATE]; - shake256ctx s; - - shake256_absorb(&s, input, inlen); - shake256_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE256_RATE; - outlen -= nblocks * SHAKE256_RATE; - - if (outlen) { - shake256_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake256_ctx_release(&s); -} - -void sha3_256_inc_init(sha3_256incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_256_inc_ctx_release(sha3_256incctx *state) { - (void)state; -} - -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); -} - -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { - uint8_t t[SHA3_256_RATE]; - keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); - - sha3_256_inc_ctx_release(state); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_256 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_256_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -void sha3_384_inc_init(sha3_384incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); -} - -void sha3_384_inc_ctx_release(sha3_384incctx *state) { - (void)state; -} - -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { - uint8_t t[SHA3_384_RATE]; - keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); - - sha3_384_inc_ctx_release(state); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_384 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_384_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -void sha3_512_inc_init(sha3_512incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); -} - -void sha3_512_inc_ctx_release(sha3_512incctx *state) { - (void)state; -} - -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { - uint8_t t[SHA3_512_RATE]; - keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); - - sha3_512_inc_ctx_release(state); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_512 - * - * Description: SHA3-512 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_512_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h index c29ebd8f9d..21bc0c3f79 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h @@ -3,169 +3,12 @@ #ifndef FIPS202_H #define FIPS202_H -#include -#include +#include -#define SHAKE128_RATE 168 -#define SHAKE256_RATE 136 -#define SHA3_256_RATE 136 -#define SHA3_384_RATE 104 -#define SHA3_512_RATE 72 - -#define PQC_SHAKEINCCTX_U64WORDS 26 -#define PQC_SHAKECTX_U64WORDS 25 - -#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) -#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake128incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake128ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake256incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake256ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_256incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_384incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_512incctx; - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); -/* Free the state */ -void shake128_ctx_release(shake128ctx *state); -/* Copy the state. */ -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); - -/* Initialize incremental hashing API */ -void shake128_inc_init(shake128incctx *state); -/* Absorb more information into the XOF. - * - * Can be called multiple times. - */ -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); -/* Finalize the XOF for squeezing */ -void shake128_inc_finalize(shake128incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); -/* Copy the context of the SHAKE128 XOF */ -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); -/* Free the context of the SHAKE128 XOF */ -void shake128_inc_ctx_release(shake128incctx *state); - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); -/* Free the context held by this XOF */ -void shake256_ctx_release(shake256ctx *state); -/* Copy the context held by this XOF */ -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); - -/* Initialize incremental hashing API */ -void shake256_inc_init(shake256incctx *state); -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); -/* Prepares for squeeze phase */ -void shake256_inc_finalize(shake256incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); -/* Copy the state */ -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); -/* Free the state */ -void shake256_inc_ctx_release(shake256incctx *state); - -/* One-stop SHAKE128 call */ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* One-stop SHAKE256 call */ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_256_inc_init(sha3_256incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); -/* Copy the context */ -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_256_inc_ctx_release(sha3_256incctx *state); - -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_384_inc_init(sha3_384incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); -/* Copy the context */ -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_384_inc_ctx_release(sha3_384incctx *state); - -/* One-stop SHA3-384 shop */ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_512_inc_init(sha3_512incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); -/* Copy the context */ -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_512_inc_ctx_release(sha3_512incctx *state); - -/* One-stop SHA3-512 shop */ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); +#define shake256incctx OQS_SHA3_shake256_inc_ctx +#define shake256_inc_init OQS_SHA3_shake256_inc_init +#define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb +#define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize +#define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c new file mode 100644 index 0000000000..1fb4c0f139 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c @@ -0,0 +1,210 @@ +#include "hnf_internal.h" +#include "internal.h" + +// HNF test function +int +ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) +{ + int res = 1; + int found; + int ind = 0; + ibz_t zero; + ibz_init(&zero); + // upper triangular + for (int i = 0; i < 4; i++) { + // upper triangular + for (int j = 0; j < i; j++) { + res = res && ibz_is_zero(&((*mat)[i][j])); + } + // find first non 0 element of line + found = 0; + for (int j = i; j < 4; j++) { + if (found) { + // all values are positive, and first non-0 is the largest of that line + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + } else { + if (!ibz_is_zero(&((*mat)[i][j]))) { + found = 1; + ind = j; + // mustbe non-negative + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + } + } + } + } + // check that first nom-zero elements ndex per column is strictly increasing + int linestart = -1; + int i = 0; + for (int j = 0; j < 4; j++) { + while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + i = i + 1; + } + if (i != 4) { + res = res && (linestart < i); + } + i = 0; + } + ibz_finalize(&zero); + return res; +} + +// Untested HNF helpers +// centered mod +void +ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b, + const ibz_t *mod) +{ + ibz_t prod, m; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_finalize(&m); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m; + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + } + ibz_finalize(&m); +} + +// no need to center this, and not 0 +void +ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m, s; + ibz_init(&m); + ibz_init(&s); + ibz_copy(&s, scalar); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); + ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + } + ibz_finalize(&m); + ibz_finalize(&s); +} + +// Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic +// Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 +// assumes ibz_xgcd outputs u,v which are small in absolute value (as described in the +// book) +void +ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec_4_t *generators, const ibz_t *mod) +{ + int i = 3; + assert(generator_number > 3); + int n = generator_number; + int j = n - 1; + int k = n - 1; + ibz_t b, u, v, d, q, m, coeff_1, coeff_2, r; + ibz_vec_4_t c; + ibz_vec_4_t a[generator_number]; + ibz_vec_4_t w[4]; + ibz_init(&b); + ibz_init(&d); + ibz_init(&u); + ibz_init(&v); + ibz_init(&r); + ibz_init(&m); + ibz_init(&q); + ibz_init(&coeff_1); + ibz_init(&coeff_2); + ibz_vec_4_init(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_init(&(w[h])); + ibz_vec_4_init(&(a[h])); + ibz_copy(&(a[h][0]), &(generators[h][0])); + ibz_copy(&(a[h][1]), &(generators[h][1])); + ibz_copy(&(a[h][2]), &(generators[h][2])); + ibz_copy(&(a[h][3]), &(generators[h][3])); + } + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_copy(&m, mod); + while (i != -1) { + while (j != 0) { + j = j - 1; + if (!ibz_is_zero(&(a[j][i]))) { + // assumtion that ibz_xgcd outputs u,v which are small in absolute + // value is needed here also, needs u non 0, but v can be 0 if needed + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); + ibz_div(&coeff_1, &r, &(a[k][i]), &d); + ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_neg(&coeff_2, &coeff_2); + ibz_vec_4_linear_combination_mod( + &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m + ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy + } + } + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult + if (ibz_is_zero(&(w[i][i]))) { + ibz_copy(&(w[i][i]), &m); + } + for (int h = i + 1; h < 4; h++) { + ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_neg(&q, &q); + ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); + } + ibz_div(&m, &r, &m, &d); + assert(ibz_is_zero(&r)); + if (i != 0) { + k = k - 1; + i = i - 1; + j = k; + if (ibz_is_zero(&(a[k][i]))) + ibz_copy(&(a[k][i]), &m); + + } else { + k = k - 1; + i = i - 1; + j = k; + } + } + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + } + } + + ibz_finalize(&b); + ibz_finalize(&d); + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&coeff_1); + ibz_finalize(&coeff_2); + ibz_finalize(&m); + ibz_vec_4_finalize(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_finalize(&(w[h])); + ibz_vec_4_finalize(&(a[h])); + } +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.c new file mode 100644 index 0000000000..b2db5b54c9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.c @@ -0,0 +1,182 @@ +#include "hnf_internal.h" +#include "internal.h" + +// Small helper for integers +void +ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod) +{ + ibz_t m, t; + ibz_init(&m); + ibz_init(&t); + ibz_mod(&m, x, mod); + ibz_set(&t, ibz_is_zero(&m)); + ibz_mul(&t, &t, mod); + ibz_add(res, &m, &t); + ibz_finalize(&m); + ibz_finalize(&t); +} + +// centered and rather positive then negative +void +ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod) +{ + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_t tmp, d, t; + ibz_init(&tmp); + ibz_init(&d); + ibz_init(&t); + ibz_div_floor(&d, &tmp, mod, &ibz_const_two); + ibz_mod_not_zero(&tmp, a, mod); + ibz_set(&t, ibz_cmp(&tmp, &d) > 0); + ibz_mul(&t, &t, mod); + ibz_sub(remainder, &tmp, &t); + ibz_finalize(&tmp); + ibz_finalize(&d); + ibz_finalize(&t); +} + +// if c, res = x, else res = y +void +ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c) +{ + ibz_t s, t, r; + ibz_init(&r); + ibz_init(&s); + ibz_init(&t); + ibz_set(&s, c != 0); + ibz_sub(&t, &ibz_const_one, &s); + ibz_mul(&r, &s, x); + ibz_mul(res, &t, y); + ibz_add(res, &r, res); + ibz_finalize(&r); + ibz_finalize(&s); + ibz_finalize(&t); +} + +// mpz_gcdext specification specifies unique outputs used here +void +ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const ibz_t *y) +{ + if (ibz_is_zero(x) & ibz_is_zero(y)) { + ibz_set(d, 1); + ibz_set(u, 1); + ibz_set(v, 0); + return; + } + ibz_t q, r, x1, y1; + ibz_init(&q); + ibz_init(&r); + ibz_init(&x1); + ibz_init(&y1); + ibz_copy(&x1, x); + ibz_copy(&y1, y); + + // xgcd + ibz_xgcd(d, u, v, &x1, &y1); + + // make sure u!=0 (v can be 0 if needed) + // following GMP specification, u == 0 implies y|x + if (ibz_is_zero(u)) { + if (!ibz_is_zero(&x1)) { + if (ibz_is_zero(&y1)) { + ibz_set(&y1, 1); + } + ibz_div(&q, &r, &x1, &y1); + assert(ibz_is_zero(&r)); + ibz_sub(v, v, &q); + } + ibz_set(u, 1); + } + if (!ibz_is_zero(&x1)) { + // Make sure ux > 0 (and as small as possible) + assert(ibz_cmp(d, &ibz_const_zero) > 0); + ibz_mul(&r, &x1, &y1); + int neg = ibz_cmp(&r, &ibz_const_zero) < 0; + ibz_mul(&q, &x1, u); + while (ibz_cmp(&q, &ibz_const_zero) <= 0) { + ibz_div(&q, &r, &y1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_add(u, u, &q); + ibz_div(&q, &r, &x1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_sub(v, v, &q); + + ibz_mul(&q, &x1, u); + } + } + +#ifndef NDEBUG + int res = 0; + ibz_t sum, prod, test, cmp; + ibz_init(&sum); + ibz_init(&prod); + ibz_init(&cmp); + ibz_init(&test); + // sign correct + res = res | !(ibz_cmp(d, &ibz_const_zero) >= 0); + if (ibz_is_zero(&x1) && ibz_is_zero(&y1)) { + res = res | !(ibz_is_zero(v) && ibz_is_one(u) && ibz_is_one(d)); + } else { + if (!ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &x1, u); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) > 0); + ibz_mul(&sum, &sum, &y1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) <= 0); + + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &y1, v); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) <= 0); + ibz_mul(&sum, &sum, &x1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) < 0); + } else { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + if (ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + ibz_abs(&prod, v); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_one(u)); + } else { + ibz_abs(&prod, u); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_zero(v)); + } + } + + // Bezout coeffs + ibz_mul(&sum, &x1, u); + ibz_mul(&prod, &y1, v); + ibz_add(&sum, &sum, &prod); + res = res | !(ibz_cmp(&sum, d) == 0); + } + assert(!res); + ibz_finalize(&sum); + ibz_finalize(&prod); + ibz_finalize(&cmp); + ibz_finalize(&test); + +#endif + + ibz_finalize(&x1); + ibz_finalize(&y1); + ibz_finalize(&q); + ibz_finalize(&r); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.h new file mode 100644 index 0000000000..5ecc871bb4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.h @@ -0,0 +1,94 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for functions internal to the HNF computation and its tests + */ + +#ifndef QUAT_HNF_HELPERS_H +#define QUAT_HNF_HELPERS_H + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup quat_hnf_helpers Internal functions for the HNF computation and tests + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_helpers_ibz Internal renamed GMP functions for the HNF computation + */ + +/** + * @brief GCD and Bézout coefficients u, v such that ua + bv = gcd + * + * @param gcd Output: Set to the gcd of a and b + * @param u Output: integer such that ua+bv=gcd + * @param v Output: Integer such that ua+bv=gcd + * @param a + * @param b + */ +void ibz_xgcd(ibz_t *gcd, + ibz_t *u, + ibz_t *v, + const ibz_t *a, + const ibz_t *b); // integers, dim4, test/integers, test/dim4 + +/** @} + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_integer_helpers Integer functions internal to the HNF computation and tests + * @{ + */ + +/** @brief x mod mod, with x in [1,mod] + * + * @param res Output: res = x [mod] and 0 0 + */ +void ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod); + +/** @brief x mod mod, with x in ]-mod/2,mod/2] + * + * Centered and rather positive then negative. + * + * @param remainder Output: remainder = x [mod] and -mod/2 0 + */ +void ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod); + +/** @brief if c then x else y + * + * @param res Output: if c, res = x, else res = y + * @param x + * @param y + * @param c condition: must be 0 or 1 + */ +void ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c); + +/** @brief d = gcd(x,y)>0 and d = ux+vy and u!= 0 and d>0 and u, v of small absolute value, u not 0 + * + * More precisely: + * If x and y are both non 0, -|xy|/d +#else +#include +#endif + +void +ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) +{ + mpz_gcdext(*gcd, *u, *v, *a, *b); +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ideal.c new file mode 100644 index 0000000000..9cf863a104 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ideal.c @@ -0,0 +1,323 @@ +#include +#include +#include "internal.h" + +// assumes parent order and lattice correctly set, computes and sets the norm +void +quat_lideal_norm(quat_left_ideal_t *lideal) +{ + quat_lattice_index(&(lideal->norm), &(lideal->lattice), (lideal->parent_order)); + int ok UNUSED = ibz_sqrt(&(lideal->norm), &(lideal->norm)); + assert(ok); +} + +// assumes parent order and lattice correctly set, recomputes and verifies its norm +static int +quat_lideal_norm_verify(const quat_left_ideal_t *lideal) +{ + int res; + ibz_t index; + ibz_init(&index); + quat_lattice_index(&index, &(lideal->lattice), (lideal->parent_order)); + ibz_sqrt(&index, &index); + res = (ibz_cmp(&(lideal->norm), &index) == 0); + ibz_finalize(&index); + return (res); +} + +void +quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) +{ + copy->parent_order = copied->parent_order; + ibz_copy(©->norm, &copied->norm); + ibz_copy(©->lattice.denom, &copied->lattice.denom); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + } + } +} + +void +quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(quat_lattice_contains(NULL, order, x)); + ibz_t norm_n, norm_d; + ibz_init(&norm_n); + ibz_init(&norm_d); + + // Multiply order on the right by x + quat_lattice_alg_elem_mul(&(lideal->lattice), order, x, alg); + + // Reduce denominator. This conserves HNF + quat_lattice_reduce_denom(&lideal->lattice, &lideal->lattice); + + // Compute norm and check it's integral + quat_alg_norm(&norm_n, &norm_d, x, alg); + assert(ibz_is_one(&norm_d)); + ibz_copy(&lideal->norm, &norm_n); + + // Set order + lideal->parent_order = order; + ibz_finalize(&norm_n); + ibz_finalize(&norm_d); +} + +void +quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(!quat_alg_elem_is_zero(x)); + + quat_lattice_t ON; + quat_lattice_init(&ON); + + // Compute ideal generated by x + quat_lideal_create_principal(lideal, x, order, alg); + + // Compute ideal generated by N (without reducing denominator) + ibz_mat_4x4_scalar_mul(&ON.basis, N, &order->basis); + ibz_copy(&ON.denom, &order->denom); + + // Add lattices (reduces denominators) + quat_lattice_add(&lideal->lattice, &lideal->lattice, &ON); + // Set order + lideal->parent_order = order; + // Compute norm + quat_lideal_norm(lideal); + + quat_lattice_finalize(&ON); +} + +int +quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + ibz_t norm_int, norm_n, gcd, r, q, norm_denom; + ibz_vec_4_t vec; + ibz_vec_4_init(&vec); + ibz_init(&norm_denom); + ibz_init(&norm_int); + ibz_init(&norm_n); + ibz_init(&r); + ibz_init(&q); + ibz_init(&gcd); + int a, b, c, d; + int found = 0; + int int_norm = 0; + while (1) { + int_norm++; + for (a = -int_norm; a <= int_norm; a++) { + for (b = -int_norm + abs(a); b <= int_norm - abs(a); b++) { + for (c = -int_norm + abs(a) + abs(b); c <= int_norm - abs(a) - abs(b); c++) { + d = int_norm - abs(a) - abs(b) - abs(c); + ibz_vec_4_set(&vec, a, b, c, d); + ibz_vec_4_content(&gcd, &vec); + if (ibz_is_one(&gcd)) { + ibz_mat_4x4_eval(&(gen->coord), &(lideal->lattice.basis), &vec); + ibz_copy(&(gen->denom), &(lideal->lattice.denom)); + quat_alg_norm(&norm_int, &norm_denom, gen, alg); + assert(ibz_is_one(&norm_denom)); + ibz_div(&q, &r, &norm_int, &(lideal->norm)); + assert(ibz_is_zero(&r)); + ibz_gcd(&gcd, &(lideal->norm), &q); + found = (0 == ibz_cmp(&gcd, &ibz_const_one)); + if (found) + goto fin; + } + } + } + } + } +fin:; + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&norm_denom); + ibz_finalize(&norm_int); + ibz_finalize(&norm_n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&gcd); + return (found); +} + +void +quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t norm, norm_d; + ibz_init(&norm); + ibz_init(&norm_d); + quat_lattice_alg_elem_mul(&(product->lattice), &(lideal->lattice), alpha, alg); + product->parent_order = lideal->parent_order; + quat_alg_norm(&norm, &norm_d, alpha, alg); + ibz_mul(&(product->norm), &(lideal->norm), &norm); + assert(ibz_divides(&(product->norm), &norm_d)); + ibz_div(&(product->norm), &norm, &(product->norm), &norm_d); + assert(quat_lideal_norm_verify(lideal)); + ibz_finalize(&norm_d); + ibz_finalize(&norm); +} + +void +quat_lideal_add(quat_left_ideal_t *sum, const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_add(&sum->lattice, &I1->lattice, &I2->lattice); + sum->parent_order = I1->parent_order; + quat_lideal_norm(sum); +} + +void +quat_lideal_inter(quat_left_ideal_t *inter, + const quat_left_ideal_t *I1, + const quat_left_ideal_t *I2, + const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_intersect(&inter->lattice, &I1->lattice, &I2->lattice); + inter->parent_order = I1->parent_order; + quat_lideal_norm(inter); +} + +int +quat_lideal_equals(const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((I2->parent_order), alg)); + assert(quat_order_is_maximal((I1->parent_order), alg)); + return (I1->parent_order == I2->parent_order) & (ibz_cmp(&I1->norm, &I2->norm) == 0) & + quat_lattice_equal(&I1->lattice, &I2->lattice); +} + +void +quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lattice_conjugate_without_hnf(inv, &(lideal->lattice)); + ibz_mul(&(inv->denom), &(inv->denom), &(lideal->norm)); +} + +// following the implementation of ideal isomorphisms in the code of LearningToSQI's sage +// implementation of SQIsign +void +quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal1->parent_order), alg)); + assert(quat_order_is_maximal((lideal2->parent_order), alg)); + assert(lideal1->parent_order == lideal2->parent_order); + quat_lattice_t inv; + quat_lattice_init(&inv); + quat_lideal_inverse_lattice_without_hnf(&inv, lideal1, alg); + quat_lattice_mul(trans, &inv, &(lideal2->lattice), alg); + quat_lattice_finalize(&inv); +} + +void +quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lideal_right_transporter(order, lideal, lideal, alg); +} + +void +quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + quat_lattice_gram(G, &(lideal->lattice), alg); + + // divide by norm · denominator² + ibz_t divisor, rmd; + ibz_init(&divisor); + ibz_init(&rmd); + + ibz_mul(&divisor, &(lideal->lattice.denom), &(lideal->lattice.denom)); + ibz_mul(&divisor, &divisor, &(lideal->norm)); + + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + assert(ibz_is_zero(&rmd)); + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i - 1; j++) { + ibz_copy(&(*G)[j][i], &(*G)[i][j]); + } + } + + ibz_finalize(&rmd); + ibz_finalize(&divisor); +} + +void +quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + quat_lideal_right_order(new_parent_order, lideal, alg); + quat_lattice_conjugate_without_hnf(&(conj->lattice), &(lideal->lattice)); + conj->parent_order = new_parent_order; + ibz_copy(&(conj->norm), &(lideal->norm)); +} + +int +quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg_t *alg) +{ + int ok = 0; + ibz_t det, sqr, div; + ibz_mat_4x4_t transposed, norm, prod; + ibz_init(&det); + ibz_init(&sqr); + ibz_init(&div); + ibz_mat_4x4_init(&transposed); + ibz_mat_4x4_init(&norm); + ibz_mat_4x4_init(&prod); + ibz_mat_4x4_transpose(&transposed, &(order->basis)); + // multiply gram matrix by 2 because of reduced trace + ibz_mat_4x4_identity(&norm); + ibz_copy(&(norm[2][2]), &(alg->p)); + ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); + ibz_mat_4x4_mul(&prod, &transposed, &norm); + ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &prod); + ibz_mul(&div, &(order->denom), &(order->denom)); + ibz_mul(&div, &div, &div); + ibz_mul(&div, &div, &div); + ibz_div(&sqr, &div, &det, &div); + ok = ibz_is_zero(&div); + ok = ok & ibz_sqrt(disc, &sqr); + ibz_finalize(&det); + ibz_finalize(&div); + ibz_finalize(&sqr); + ibz_mat_4x4_finalize(&transposed); + ibz_mat_4x4_finalize(&norm); + ibz_mat_4x4_finalize(&prod); + return (ok); +} + +int +quat_order_is_maximal(const quat_lattice_t *order, const quat_alg_t *alg) +{ + int res; + ibz_t disc; + ibz_init(&disc); + quat_order_discriminant(&disc, order, alg); + res = (ibz_cmp(&disc, &(alg->p)) == 0); + ibz_finalize(&disc); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.c new file mode 100644 index 0000000000..b0462dc8b5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.c @@ -0,0 +1,791 @@ +#include "intbig_internal.h" +#include +#include +#include +#include +#include +#include + +// #define DEBUG_VERBOSE + +#ifdef DEBUG_VERBOSE +#define DEBUG_STR_PRINTF(x) printf("%s\n", (x)); + +static void +DEBUG_STR_FUN_INT_MP(const char *op, int arg1, const ibz_t *arg2) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s\n", op, arg1, arg2_str); +} + +static void +DEBUG_STR_FUN_3(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + printf("%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_MP2_INT(const char *op, const ibz_t *arg1, const ibz_t *arg2, int arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%s,%s,%x\n", op, arg1_str, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_INT_MP2(const char *op, int arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + if (arg1 >= 0) + printf("%s,%x,%s,%s\n", op, arg1, arg2_str, arg3_str); + else + printf("%s,-%x,%s,%s\n", op, -arg1, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_INT_MP_INT(const char *op, int arg1, const ibz_t *arg2, int arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s,%x\n", op, arg1, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3, const ibz_t *arg4) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + int arg4_size = ibz_size_in_base(arg4, 16); + char arg4_str[arg4_size + 2]; + ibz_convert_to_str(arg4, arg4_str, 16); + + printf("%s,%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str, arg4_str); +} +#else +#define DEBUG_STR_PRINTF(x) +#define DEBUG_STR_FUN_INT_MP(op, arg1, arg2) +#define DEBUG_STR_FUN_3(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP2(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP_INT(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_4(op, arg1, arg2, arg3, arg4) +#endif + +/** @defgroup ibz_t Constants + * @{ + */ + +const __mpz_struct ibz_const_zero[1] = { + { + ._mp_alloc = 0, + ._mp_size = 0, + ._mp_d = (mp_limb_t[]){ 0 }, + } +}; + +const __mpz_struct ibz_const_one[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 1 }, + } +}; + +const __mpz_struct ibz_const_two[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 2 }, + } +}; + +const __mpz_struct ibz_const_three[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 3 }, + } +}; + +void +ibz_init(ibz_t *x) +{ + mpz_init(*x); +} + +void +ibz_finalize(ibz_t *x) +{ + mpz_clear(*x); +} + +void +ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_add(*sum, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_sub(*diff, *a, *b); + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_mul(*prod, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_neg(ibz_t *neg, const ibz_t *a) +{ + mpz_neg(*neg, *a); +} + +void +ibz_abs(ibz_t *abs, const ibz_t *a) +{ + mpz_abs(*abs, *a); +} + +void +ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_tdiv_qr(*quotient, *remainder, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp; + ibz_init(&a_cp); + ibz_copy(&a_cp, a); +#endif + mpz_tdiv_q_2exp(*quotient, *a, exp); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); + ibz_finalize(&a_cp); +#endif +} + +void +ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) +{ + mpz_fdiv_qr(*q, *r, *n, *d); +} + +void +ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) +{ + mpz_mod(*r, *a, *b); +} + +unsigned long int +ibz_mod_ui(const mpz_t *n, unsigned long int d) +{ + return mpz_fdiv_ui(*n, d); +} + +int +ibz_divides(const ibz_t *a, const ibz_t *b) +{ + return mpz_divisible_p(*a, *b); +} + +void +ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) +{ + mpz_pow_ui(*pow, *x, e); +} + +void +ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) +{ + mpz_powm(*pow, *x, *e, *m); + DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); +} + +int +ibz_two_adic(ibz_t *pow) +{ + return mpz_scan1(*pow, 0); +} + +int +ibz_cmp(const ibz_t *a, const ibz_t *b) +{ + int ret = mpz_cmp(*a, *b); + DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); + return ret; +} + +int +ibz_is_zero(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); + return ret; +} + +int +ibz_is_one(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 1); + DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); + return ret; +} + +int +ibz_cmp_int32(const ibz_t *x, int32_t y) +{ + int ret = mpz_cmp_si(*x, (signed long int)y); + DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); + return ret; +} + +int +ibz_is_even(const ibz_t *x) +{ + int ret = !mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); + return ret; +} + +int +ibz_is_odd(const ibz_t *x) +{ + int ret = mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); + return ret; +} + +void +ibz_set(ibz_t *i, int32_t x) +{ + mpz_set_si(*i, x); +} + +int +ibz_convert_to_str(const ibz_t *i, char *str, int base) +{ + if (!str || (base != 10 && base != 16)) + return 0; + + mpz_get_str(str, base, *i); + + return 1; +} + +void +ibz_print(const ibz_t *num, int base) +{ + assert(base == 10 || base == 16); + + int num_size = ibz_size_in_base(num, base); + char num_str[num_size + 2]; + ibz_convert_to_str(num, num_str, base); + printf("%s", num_str); +} + +int +ibz_set_from_str(ibz_t *i, const char *str, int base) +{ + return (1 + mpz_set_str(*i, str, base)); +} + +void +ibz_copy(ibz_t *target, const ibz_t *value) +{ + mpz_set(*target, *value); +} + +void +ibz_swap(ibz_t *a, ibz_t *b) +{ + mpz_swap(*a, *b); +} + +int32_t +ibz_get(const ibz_t *i) +{ +#if LONG_MAX == INT32_MAX + return (int32_t)mpz_get_si(*i); +#elif LONG_MAX > INT32_MAX + // Extracts the sign bit and the 31 least significant bits + signed long int t = mpz_get_si(*i); + return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); +#else +#error Unsupported configuration: LONG_MAX must be >= INT32_MAX +#endif +} + +int +ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) +{ + int randret; + int ret = 1; + mpz_t tmp; + mpz_t bmina; + mpz_init(bmina); + mpz_sub(bmina, *b, *a); + + if (mpz_sgn(bmina) == 0) { + mpz_set(*rand, *a); + mpz_clear(bmina); + return 1; + } + + size_t len_bits = mpz_sizeinbase(bmina, 2); + size_t len_bytes = (len_bits + 7) / 8; + size_t sizeof_limb = sizeof(mp_limb_t); + size_t sizeof_limb_bits = sizeof_limb * 8; + size_t len_limbs = (len_bytes + sizeof_limb - 1) / sizeof_limb; + + mp_limb_t mask = ((mp_limb_t)-1) >> (sizeof_limb_bits - len_bits) % sizeof_limb_bits; + mp_limb_t r[len_limbs]; + +#ifndef NDEBUG + { + for (size_t i = 0; i < len_limbs; ++i) + r[i] = (mp_limb_t)-1; + r[len_limbs - 1] = mask; + mpz_t check; + mpz_roinit_n(check, r, len_limbs); + assert(mpz_cmp(check, bmina) >= 0); // max sampled value >= b - a + mpz_t bmina2; + mpz_init(bmina2); + mpz_add(bmina2, bmina, bmina); + assert(mpz_cmp(check, bmina2) < 0); // max sampled value < 2 * (b - a) + mpz_clear(bmina2); + } +#endif + + do { + randret = randombytes((unsigned char *)r, len_bytes); + if (randret != 0) { + ret = 0; + goto err; + } +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < len_limbs; ++i) + r[i] = BSWAP_DIGIT(r[i]); +#endif + r[len_limbs - 1] &= mask; + mpz_roinit_n(tmp, r, len_limbs); + if (mpz_cmp(tmp, bmina) <= 0) + break; + } while (1); + + mpz_add(*rand, tmp, *a); +err: + mpz_clear(bmina); + return ret; +} + +int +ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b) +{ + uint32_t diff, mask; + int32_t rand32; + + if (!(a >= 0 && b >= 0 && b > a)) { + printf("a = %d b = %d\n", a, b); + } + assert(a >= 0 && b >= 0 && b > a); + + diff = b - a; + + // Create a mask with 1 + ceil(log2(diff)) least significant bits set +#if (defined(__GNUC__) || defined(__clang__)) && INT_MAX == INT32_MAX + mask = (1 << (32 - __builtin_clz((uint32_t)diff))) - 1; +#else + uint32_t diff2 = diff, tmp; + + mask = (diff2 > 0xFFFF) << 4; + diff2 >>= mask; + + tmp = (diff2 > 0xFF) << 3; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0xF) << 2; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0x3) << 1; + diff2 >>= tmp; + mask |= tmp; + + mask |= diff2 >> 1; + + mask = (1 << (mask + 1)) - 1; +#endif + + assert(mask >= diff && mask < 2 * diff); + + // Rejection sampling + do { + randombytes((unsigned char *)&rand32, sizeof(rand32)); + +#ifdef TARGET_BIG_ENDIAN + rand32 = BSWAP32(rand32); +#endif + + rand32 &= mask; + } while (rand32 > (int32_t)diff); + + rand32 += a; + ibz_set(rand, rand32); + + return 1; +} + +int +ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) +{ + int ret = 1; + mpz_t m_big; + + // m_big = 2 * m + mpz_init_set_si(m_big, m); + mpz_add(m_big, m_big, m_big); + + // Sample in [0, 2*m] + ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); + + // Adjust to range [-m, m] + mpz_sub_ui(*rand, *rand, m); + + mpz_clear(m_big); + + return ret; +} + +int +ibz_rand_interval_bits(ibz_t *rand, uint32_t m) +{ + int ret = 1; + mpz_t tmp; + mpz_t low; + mpz_init_set_ui(tmp, 1); + mpz_mul_2exp(tmp, tmp, m); + mpz_init(low); + mpz_neg(low, tmp); + ret = ibz_rand_interval(rand, &low, &tmp); + mpz_clear(tmp); + mpz_clear(low); + if (ret != 1) + goto err; + mpz_sub_ui(*rand, *rand, (unsigned long int)m); + return ret; +err: + mpz_clear(tmp); + mpz_clear(low); + return ret; +} + +int +ibz_bitsize(const ibz_t *a) +{ + return (int)mpz_sizeinbase(*a, 2); +} + +int +ibz_size_in_base(const ibz_t *a, int base) +{ + return (int)mpz_sizeinbase(*a, base); +} + +void +ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) +{ + mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); +} + +void +ibz_to_digits(digit_t *target, const ibz_t *ibz) +{ + // From the GMP documentation: + // "If op is zero then the count returned will be zero and nothing written to rop." + // The next line ensures zero is written to the first limb of target if ibz is zero; + // target is then overwritten by the actual value if it is not. + target[0] = 0; + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); +} + +int +ibz_probab_prime(const ibz_t *n, int reps) +{ + int ret = mpz_probab_prime_p(*n, reps); + DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); + return ret; +} + +void +ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) +{ + mpz_gcd(*gcd, *a, *b); +} + +int +ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) +{ + return (mpz_invert(*inv, *a, *mod) ? 1 : 0); +} + +int +ibz_legendre(const ibz_t *a, const ibz_t *p) +{ + return mpz_legendre(*a, *p); +} + +int +ibz_sqrt(ibz_t *sqrt, const ibz_t *a) +{ + if (mpz_perfect_square_p(*a)) { + mpz_sqrt(*sqrt, *a); + return 1; + } else { + return 0; + } +} + +void +ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) +{ + mpz_sqrt(*sqrt, *a); +} + +int +ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) +{ +#ifndef NDEBUG + assert(ibz_probab_prime(p, 100)); +#endif + // Case a = 0 + { + ibz_t test; + ibz_init(&test); + ibz_mod(&test, a, p); + if (ibz_is_zero(&test)) { + ibz_set(sqrt, 0); + } + ibz_finalize(&test); + } +#ifdef DEBUG_VERBOSE + ibz_t a_cp, p_cp; + ibz_init(&a_cp); + ibz_init(&p_cp); + ibz_copy(&a_cp, a); + ibz_copy(&p_cp, p); +#endif + + mpz_t amod, tmp, exp, a4, a2, q, z, qnr, x, y, b, pm1; + mpz_init(amod); + mpz_init(tmp); + mpz_init(exp); + mpz_init(a4); + mpz_init(a2); + mpz_init(q); + mpz_init(z); + mpz_init(qnr); + mpz_init(x); + mpz_init(y); + mpz_init(b); + mpz_init(pm1); + + int ret = 1; + + mpz_mod(amod, *a, *p); + if (mpz_cmp_ui(amod, 0) < 0) { + mpz_add(amod, *p, amod); + } + + if (mpz_legendre(amod, *p) != 1) { + ret = 0; + goto end; + } + + mpz_sub_ui(pm1, *p, 1); + + if (mpz_mod_ui(tmp, *p, 4) == 3) { + // p % 4 == 3 + mpz_add_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(*sqrt, amod, tmp, *p); + } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + // p % 8 == 5 + mpz_sub_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + if (!mpz_cmp_ui(tmp, 1)) { + mpz_add_ui(tmp, *p, 3); + mpz_fdiv_q_2exp(tmp, tmp, 3); + mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + } else { + mpz_sub_ui(tmp, *p, 5); + mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 + mpz_mul_2exp(a4, amod, 2); // 4*a + mpz_powm(tmp, a4, tmp, *p); + + mpz_mul_2exp(a2, amod, 1); + mpz_mul(tmp, a2, tmp); + mpz_mod(*sqrt, tmp, *p); + } + } else { + // p % 8 == 1 -> Shanks-Tonelli + int e = 0; + mpz_sub_ui(q, *p, 1); + while (mpz_tstbit(q, e) == 0) + e++; + mpz_fdiv_q_2exp(q, q, e); + + // 1. find generator - non-quadratic residue + mpz_set_ui(qnr, 2); + while (mpz_legendre(qnr, *p) != -1) + mpz_add_ui(qnr, qnr, 1); + mpz_powm(z, qnr, q, *p); + + // 2. Initialize + mpz_set(y, z); + mpz_powm(y, amod, q, *p); // y = a^q mod p + + mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 + mpz_fdiv_q_2exp(tmp, tmp, 1); + + mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + + mpz_set_ui(exp, 1); + mpz_mul_2exp(exp, exp, e - 2); + + for (int i = 0; i < e; ++i) { + mpz_powm(b, y, exp, *p); + + if (!mpz_cmp(b, pm1)) { + mpz_mul(x, x, z); + mpz_mod(x, x, *p); + + mpz_mul(y, y, z); + mpz_mul(y, y, z); + mpz_mod(y, y, *p); + } + + mpz_powm_ui(z, z, 2, *p); + mpz_fdiv_q_2exp(exp, exp, 1); + } + + mpz_set(*sqrt, x); + } + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sqrt_mod_p", sqrt, &a_cp, &p_cp); + ibz_finalize(&a_cp); + ibz_finalize(&p_cp); +#endif + +end: + mpz_clear(amod); + mpz_clear(tmp); + mpz_clear(exp); + mpz_clear(a4); + mpz_clear(a2); + mpz_clear(q); + mpz_clear(z); + mpz_clear(qnr); + mpz_clear(x); + mpz_clear(y); + mpz_clear(b); + mpz_clear(pm1); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig_internal.h new file mode 100644 index 0000000000..de4762a6d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig_internal.h @@ -0,0 +1,123 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for big integer functions only used in quaternion functions + */ + +#ifndef INTBIG_INTERNAL_H +#define INTBIG_INTERNAL_H + +#include "intbig.h" + +/** @internal + * @ingroup quat_helpers + * @defgroup ibz_helper Internal integer functions (gmp-based) + * @{ + */ + +/********************************************************************/ + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards minus infinity. + */ +void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d); + +/** @brief generate random value in [a, b] + * assumed that a >= 0, b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b); + +/** @brief generate random value in [-2^m, 2^m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_bits(ibz_t *rand, uint32_t m); + +/** @brief set str to a string containing the representation of i in base + * + * Base should be 10 or 16 + * + * str should be an array of length enough to store the representation of in + * in base, which can be obtained by ibz_sizeinbase(i, base) + 2, where the 2 + * is for the sign and the null terminator + * + * Case for base 16 does not matter + * + * @returns 1 if the integer could be converted to a string, 0 otherwise + */ +int ibz_convert_to_str(const ibz_t *i, char *str, int base); + +/** @brief print num in base to stdout + * + * Base should be 10 or 16 + */ +void ibz_print(const ibz_t *num, int base); + +/** @brief set i to integer contained in string when read as number in base + * + * Base should be 10 or 16, and the number should be written without ponctuation or whitespaces + * + * Case for base 16 does not matter + * + * @returns 1 if the string could be converted to an integer, 0 otherwise + */ +int ibz_set_from_str(ibz_t *i, const char *str, int base); + +/** + * @brief Probabilistic primality test + * + * @param n The number to test + * @param reps Number of Miller-Rabin repetitions. The more, the slower and the less likely are + * false positives + * @return 1 if probably prime, 0 if certainly not prime, 2 if certainly prime + * + * Using GMP's implementation: + * + * From GMP's documentation: "This function performs some trial divisions, a Baillie-PSW probable + * prime test, then reps-24 Miller-Rabin probabilistic primality tests." + */ +int ibz_probab_prime(const ibz_t *n, int reps); + +/** + * @brief Square root modulo a prime + * + * @returns 1 if square root of a mod p exists and was computed, 0 otherwise + * @param sqrt Output: Set to a square root of a mod p if any exist + * @param a number of which a square root mod p is searched + * @param p assumed prime + */ +int ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p); + +/** + * @brief Integer square root of a perfect square + * + * @returns 1 if an integer square root of a exists and was computed, 0 otherwise + * @param sqrt Output: Set to a integer square root of a if any exist + * @param a number of which an integer square root is searched + */ +int ibz_sqrt(ibz_t *sqrt, const ibz_t *a); + +/** + * @brief Legendre symbol of a mod p + * + * @returns Legendre symbol of a mod p + * @param a + * @param p assumed prime + * + * Uses GMP's implementation + * + * If output is 1, a is a square mod p, if -1, not. If 0, it is divisible by p + */ +int ibz_legendre(const ibz_t *a, const ibz_t *p); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/integers.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/integers.c new file mode 100644 index 0000000000..ec7cda05eb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/integers.c @@ -0,0 +1,116 @@ +#include +#include "internal.h" +#include +#include +#include + +// Random prime generation for tests +int +ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations) +{ + assert(bitsize != 0); + int found = 0; + ibz_t two_pow, two_powp; + + ibz_init(&two_pow); + ibz_init(&two_powp); + ibz_pow(&two_pow, &ibz_const_two, (bitsize - 1) - (0 != is3mod4)); + ibz_pow(&two_powp, &ibz_const_two, bitsize - (0 != is3mod4)); + + int cnt = 0; + while (!found) { + cnt++; + if (cnt % 100000 == 0) { + printf("Random prime generation is still running after %d attempts, this is not " + "normal! The expected number of attempts is %d \n", + cnt, + bitsize); + } + ibz_rand_interval(p, &two_pow, &two_powp); + ibz_add(p, p, p); + if (is3mod4) { + ibz_add(p, p, p); + ibz_add(p, &ibz_const_two, p); + } + ibz_add(p, &ibz_const_one, p); + + found = ibz_probab_prime(p, probability_test_iterations); + } + ibz_finalize(&two_pow); + ibz_finalize(&two_powp); + return found; +} + +// solves x^2 + n y^2 == p for positive integers x, y +// assumes that p is prime and -n mod p is a square +int +ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p) +{ + ibz_t r0, r1, r2, a, prod; + ibz_init(&r0); + ibz_init(&r1); + ibz_init(&r2); + ibz_init(&a); + ibz_init(&prod); + + int res = 0; + + // manage case p = 2 separately + if (!ibz_cmp(p, &ibz_const_two)) { + if (ibz_is_one(n)) { + ibz_set(x, 1); + ibz_set(y, 1); + res = 1; + } + goto done; + } + // manage case p = n separately + if (!ibz_cmp(p, n)) { + ibz_set(x, 0); + ibz_set(y, 1); + res = 1; + goto done; + } + + // test coprimality (should always be ok in our cases) + ibz_gcd(&r2, p, n); + if (!ibz_is_one(&r2)) + goto done; + + // get sqrt of -n mod p + ibz_neg(&r2, n); + if (!ibz_sqrt_mod_p(&r2, &r2, p)) + goto done; + + // run loop + ibz_copy(&prod, p); + ibz_copy(&r1, p); + ibz_copy(&r0, p); + while (ibz_cmp(&prod, p) >= 0) { + ibz_div(&a, &r0, &r2, &r1); + ibz_mul(&prod, &r0, &r0); + ibz_copy(&r2, &r1); + ibz_copy(&r1, &r0); + } + // test if result is solution + ibz_sub(&a, p, &prod); + ibz_div(&a, &r2, &a, n); + if (!ibz_is_zero(&r2)) + goto done; + if (!ibz_sqrt(y, &a)) + goto done; + + ibz_copy(x, &r0); + ibz_mul(&a, y, y); + ibz_mul(&a, &a, n); + ibz_add(&prod, &prod, &a); + res = !ibz_cmp(&prod, p); + +done: + ibz_finalize(&r0); + ibz_finalize(&r1); + ibz_finalize(&r2); + ibz_finalize(&a); + ibz_finalize(&prod); + return res; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/internal.h new file mode 100644 index 0000000000..edbba345f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/internal.h @@ -0,0 +1,812 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for helper functions for quaternion algebra implementation + */ + +#ifndef QUAT_HELPER_H +#define QUAT_HELPER_H + +#include +#include +#include "intbig_internal.h" + +/** @internal + * @ingroup quat_quat + * @defgroup quat_helpers Quaternion module internal functions + * @{ + */ + +/** @internal + * @defgroup quat_alg_helpers Helper functions for the alg library + * @{ + */ + +/** @internal + * @brief helper function for initializing small quaternion algebras. + */ +void quat_alg_init_set_ui(quat_alg_t *alg, + unsigned int p); // test/lattice, test/ideal, test/algebra + +/** @brief a*b + * + * Multiply two coordinate vectors as elements of the algebra in basis (1,i,j,ij) with i^2 = -1, j^2 + * = -p + * + * @param res Output: Will contain product + * @param a + * @param b + * @param alg The quaternion algebra + */ +void quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg); + +/** @brief a=b + * + * Test if a and b represent the same quaternion algebra element + * + * @param a + * @param b + * @returns 1 if a=b, 0 otherwise + */ +int quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + * + * x is 0 iff all coordinates in x->coord are 0 + */ +int quat_alg_elem_is_zero(const quat_alg_elem_t *x); + +/** @brief Compute same denominator form of two quaternion algebra elements + * + * res_a=a and res_b=b (representing the same element) and res_a.denom = res_b.denom + * + * @param res_a + * @param res_b + * @param a + * @param b + */ +void quat_alg_equal_denom(quat_alg_elem_t *res_a, + quat_alg_elem_t *res_b, + const quat_alg_elem_t *a, + const quat_alg_elem_t *b); + +/** @brief Copies the given values into an algebra element, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Sets an algebra element to the given integer values, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_set(quat_alg_elem_t *elem, + int32_t denom, + int32_t coord0, + int32_t coord1, + int32_t coord2, + int32_t coord3); + +/** + * @brief Creates algebra element from scalar + * + * Resulting element has 1-coordinate equal to numerator/denominator + * + * @param elem Output: algebra element with numerator/denominator as first coordiante + * (1-coordinate), 0 elsewhere (i,j,ij coordinates) + * @param numerator + * @param denominator Assumed non zero + */ +void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator); + +/** @brief a+b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief a-b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Multiplies algebra element by integer scalar, without normalizing it + * + * @param res Output + * @param scalar Integer + * @param elem Algebra element + */ +void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_helpers Helper functions for functions for matrices or vectors in dimension 4 + * @{ + */ + +/** @internal + * @defgroup quat_inv_helpers Helper functions for the integer matrix inversion function + * @{ + */ + +/** @brief a1a2+b1b2+c1c2 + * + * @param coeff Output: The coefficien which was computed as a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief -a1a2+b1b2-c1c2 + * + * @param coeff Output: The coefficien which was computed as -a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief Matrix determinant and a matrix inv such that inv/det is the inverse matrix of the input + * + * Implemented following the methof of 2x2 minors explained at Method from + * https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf (visited on 3rd of May + * 2023, 16h15 CEST) + * + * @returns 1 if the determinant of mat is not 0 and an inverse was computed, 0 otherwise + * @param inv Output: Will contain an integer matrix which, dividet by det, will yield the rational + * inverse of the matrix if it exists, can be NULL + * @param det Output: Will contain the determinant of the input matrix, can be NULL + * @param mat Matrix of which the inverse will be computed + */ +int ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_lat_helpers Helper functions on vectors and matrices used mainly for lattices + * @{ + */ + +/** @brief Copy all values from one vector to another + * + * @param new Output: is set to same values as vec + * @param vec + */ +void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec); + +/** @brief set res to values coord0,coord1,coord2,coord3 + * + * @param res Output: Will contain vector (coord0,coord1,coord2,coord3) + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Set a vector of 4 integers to given values + * + * @param vec Output: is set to given coordinates + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3); + +/** @brief a+b + * + * Add two integer 4-vectors + * + * @param res Output: Will contain sum + * @param a + * @param b + */ +void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief a-b + * + * Substract two integer 4-vectors + * + * @param res Output: Will contain difference + * @param a + * @param b + */ +void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief x=0 + * + * Test if a vector x has only zero coordinates + * + * @returns 0 if x has at least one non-zero coordinates, 1 otherwise + * @param x + */ +int ibz_vec_4_is_zero(const ibz_vec_4_t *x); + +/** @brief Compute the linear combination lc = coeff_a vec_a + coeff_b vec_b + * + * @param lc Output: linear combination lc = coeff_a vec_a + coeff_b vec_b + * @param coeff_a Scalar multiplied to vec_a + * @param vec_a + * @param coeff_b Scalar multiplied to vec_b + * @param vec_b + */ +void ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b); + +/** @brief multiplies all values in vector by same scalar + * + * @param prod Output + * @param scalar + * @param vec + */ +void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief divides all values in vector by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param vec + */ +int ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief Negation for vectors of 4 integers + * + * @param neg Output: is set to -vec + * @param vec + */ +void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec); + +/** + * @brief content of a 4-vector of integers + * + * The content is the GCD of all entries. + * + * @param v A 4-vector of integers + * @param content Output: the resulting gcd + */ +void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v); + +/** @brief -mat for mat a 4x4 integer matrix + * + * @param neg Output: is set to -mat + * @param mat Input matrix + */ +void ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat); + +/** @brief Set all coefficients of a matrix to zero for 4x4 integer matrices + * + * @param zero + */ +void ibz_mat_4x4_zero(ibz_mat_4x4_t *zero); + +/** @brief Set a matrix to the identity for 4x4 integer matrices + * + * @param id + */ +void ibz_mat_4x4_identity(ibz_mat_4x4_t *id); + +/** @brief Test equality to identity for 4x4 integer matrices + * + * @returns 1 if mat is the identity matrix, 0 otherwise + * @param mat + */ +int ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat); + +/** @brief Equality test for 4x4 integer matrices + * + * @returns 1 if equal, 0 otherwise + * @param mat1 + * @param mat2 + */ +int ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat); + +/** @brief Matrix by integer multiplication + * + * @param prod Output + * @param scalar + * @param mat + */ +void ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** @brief gcd of all values in matrix + * + * @param gcd Output + * @param mat + */ +void ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat); + +/** @brief Verifies whether the 4x4 input matrix is in Hermite Normal Form + * + * @returns 1 if mat is in HNF, 0 otherwise + * @param mat Matrix to be tested + */ +int ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat); + +/** @brief Hermite Normal Form of a matrix of 8 integer vectors, computed using a multiple of its + * determinant as modulo + * + * Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic + * Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 + * + * @param hnf Output: Matrix in Hermite Normal Form generating the same lattice as generators + * @param generators matrix whose colums generate the same lattice than the output + * @param generator_number number of generators given + * @param mod integer, must be a multiple of the volume of the lattice generated by the columns of + * generators + */ +void ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, + int generator_number, + const ibz_vec_4_t *generators, + const ibz_t *mod); + +/** @} + */ +/** @} + */ + +/** @internal + * @defgroup quat_dim2_helpers Helper functions for dimension 2 + * @{ + */ + +/** @brief Set vector coefficients to the given integers + * + * @param vec Output: Vector + * @param a0 + * @param a1 + */ +void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1); // test/dim2 + +/** @brief Set matrix coefficients to the given integers + * + * @param mat Output: Matrix + * @param a00 + * @param a01 + * @param a10 + * @param a11 + */ +void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11); // test/dim2 + +void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, + const ibz_mat_2x2_t *b); // unused + +/** @brief Determinant of a 2x2 integer matrix given as 4 integers + * + * @param det Output: Determinant of the matrix + * @param a11 matrix coefficient (upper left corner) + * @param a12 matrix coefficient (upper right corner) + * @param a21 matrix coefficient (lower left corner) + * @param a22 matrix coefficient (lower right corner) + */ +void ibz_mat_2x2_det_from_ibz(ibz_t *det, + const ibz_t *a11, + const ibz_t *a12, + const ibz_t *a21, + const ibz_t *a22); // dim4 + +/** + * @brief a*b for 2x2 integer matrices modulo m + * + * @param prod Output matrix + * @param mat_a Input matrix + * @param mat_b Input matrix + * @param m Integer modulo + */ +void ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, + const ibz_mat_2x2_t *mat_a, + const ibz_mat_2x2_t *mat_b, + const ibz_t *m); // test/dim2 +/** @} + */ + +/** @internal + * @defgroup quat_lattice_helper Helper functions for the lattice library (dimension 4) + * @{ + */ + +/** + * @brief Modifies a lattice to put it in hermite normal form + * + * In-place modification of the lattice. + * + * @param lat input lattice + * + * On a correct lattice this function changes nothing (since it is already in HNF), but it can be + * used to put a handmade one in correct form in order to use the other lattice functions. + */ +void quat_lattice_hnf(quat_lattice_t *lat); // lattice, test/lattice, test/algebra, + +/** + * @brief Lattice equality + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if both lattices are equal, 0 otherwise + * @param lat1 + * @param lat2 + */ +int quat_lattice_equal(const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice, test/ideal + +/** + * @brief Lattice inclusion test + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if sublat is included in overlat, 0 otherwise + * @param sublat Lattice whose inclusion in overlat will be testes + * @param overlat + */ +int quat_lattice_inclusion(const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // test/lattice, test/ideal + +/** @brief Divides basis and denominator of a lattice by their gcd + * + * @param reduced Output + * @param lat Lattice + */ +void quat_lattice_reduce_denom(quat_lattice_t *reduced, + const quat_lattice_t *lat); // lattice, ideal, + +/** @brief a+b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + */ +void quat_lattice_add(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice + +/** @brief a*b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + * @param alg The quaternion algebra + */ +void quat_lattice_mul(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2, + const quat_alg_t *alg); // ideal, lattie, test/ideal, test/lattice + +/** + * @brief Computes the dual lattice of lat, without putting its basis in HNF + * + * This function returns a lattice not under HNF. For careful internal use only. + * + * Computation method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted + * on 19 of May 2023, 12h40 CEST + * + * @param dual Output: The dual lattice of lat. ATTENTION: is not under HNF. hnf computation must be + * applied before using lattice functions on it + * @param lat lattice, the dual of it will be computed + */ +void quat_lattice_dual_without_hnf(quat_lattice_t *dual, + const quat_lattice_t *lat); // lattice, ideal + +/** + * @brief Multiply all columns of lat with coord (as algebra elements) + * + * The columns and coord are seen as algebra elements in basis 1,i,j,ij, i^2 = -1, j^2 = -p). Coord + * is multiplied to the right of lat. + * + * The output matrix is not under HNF. + * + * @param prod Output: Matrix not under HND whose columns represent the algebra elements obtained as + * L*coord for L column of lat. + * @param lat Matrix whose columns are algebra elements in basis (1,i,j,ij) + * @param coord Integer coordinate algebra element in basis (1,i,j,ij) + * @param alg The quaternion algebra + */ +void quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg); // lattice + +/** @brief The index of sublat into overlat + * + * Assumes inputs are in HNF. + * + * @param index Output + * @param sublat A lattice in HNF, must be sublattice of overlat + * @param overlat A lattice in HNF, must be overlattice of sublat + */ +void quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // ideal + +/** @brief Compute the Gram matrix of the quaternion trace bilinear form + * + * Given a lattice of the quaternion algebra, computes the Gram matrix + * of the bilinear form + * + * 〈a,b〉 := [lattice->denom^2] Tr(a·conj(b)) + * + * multiplied by the square of the denominator of the lattice. + * + * This matrix always has integer entries. + * + * @param G Output: Gram matrix of the trace bilinear form on the lattice, multiplied by the square + * of the denominator of the lattice + * @param lattice A lattice + * @param alg The quaternion algebra + */ +void quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @brief Compute an integer parallelogram containing the ball of + * given radius for the positive definite quadratic form defined by + * the Gram matrix G. + * + * The computed parallelogram is defined by the vectors + * + * (x₁ x₂ x₃ x₄) · U + * + * with x_i ∈ [ -box[i], box[i] ]. + * + * @param box Output: bounds of the parallelogram + * @param U Output: Unimodular transformation defining the parallelogram + * @param G Gram matrix of the quadratic form, must be full rank + * @param radius Radius of the ball, must be non-negative + * @returns 0 if the box only contains the origin, 1 otherwise + */ +int quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius); + +/** @} + */ + +/** @internal + * @defgroup quat_lideal_helper Helper functions for ideals and orders + * @{ + */ +/** @brief Set norm of an ideal given its lattice and parent order + * + * @param lideal In/Output: Ideal which has lattice and parent_order correctly set, but not + * necessarily the norm. Will have norm correctly set too. + */ +void quat_lideal_norm(quat_left_ideal_t *lideal); // ideal + +/** + * @brief Left principal ideal of order, generated by x + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element + * + * Creates the left ideal in 'order' generated by the element 'x' + */ +void quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg); // ideal, test/ideal + +/** + * @brief Equality test for left ideals + * + * @returns 1 if both left ideals are equal, 0 otherwise + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +int quat_lideal_equals(const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // test/ideal + +/** + * @brief Sum of two left ideals + * + * @param sum Output: Left ideal which is the sum of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_add(quat_left_ideal_t *sum, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // Not used outside + +/** + * @brief Left ideal product of left ideal I and element alpha + * + * @param product Output: lideal I*alpha, must have integer norm + * @param lideal left ideal + * @param alpha element multiplied to lideal to get the product ideal + * @param alg the quaternion algebra + * + * I*alpha where I is a left-ideal and alpha an element of the algebra + * + * The resulting ideal must have an integer norm + * + */ +void quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg); // test/ideal + +/** @brief Computes the inverse ideal (for a left ideal of a maximal order) without putting it under + * HNF + * + * This function returns a lattice not under HNF. For careful internal use only + * + * Computes the inverse ideal for lideal as conjugate(lideal)/norm(lideal) + * + * @param inv Output: lattice which is lattice representation of the inverse ideal of lideal + * ATTENTION: is not under HNF. hnf computation must be applied before using lattice functions on it + * @param lideal Left ideal of a maximal order in alg + * @param alg The quaternion algebra + */ +void quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** @brief Computes the right transporter of two left ideals of the same maximal order + * + * Following the implementation of ideal isomorphisms in the code of LearningToSQI's sage + * implementation of SQIsign. Computes the right transporter of (J:I) as inverse(I)J. + * + * @param trans Output: lattice which is right transporter from lideal1 to lideal2 (lideal2:lideal1) + * @param lideal1 Left ideal of the same maximal order than lideal1 in alg + * @param lideal2 Left ideal of the same maximal order than lideal1 in alg + * @param alg The quaternion algebra + */ +void quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Right order of a left ideal + * + * @param order Output: right order of the given ideal + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** + * @brief Gram matrix of the trace map of the ideal class + * + * Compute the Gram matrix of the bilinear form + * + * 〈a, b〉 := Tr(a·conj(b)) / norm(lideal) + * + * on the basis of the ideal. This matrix has integer entries and its + * integer congruence class only depends on the ideal class. + * + * @param G Output: Gram matrix of the trace map + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg); + +/** @brief Test if order is maximal + * + * Checks if the discriminant of the order equals the prime p defining the quaternion algebra. + * + * It is not verified whether the order is really an order. The output 1 only means that if it is an + * order, then it is maximal. + * + * @returns 1 if order is maximal (assuming it is an order), 0 otherwise + * @param order An order of the quaternion algebra (assumes to be an order, this is not tested) + * @param alg The quaternion algebra + */ +int quat_order_is_maximal(const quat_lattice_t *order, + const quat_alg_t *alg); // ideal (only in asserts) + +/** @brief Compute the discriminant of an order as sqrt(det(gram(reduced_norm))) + * + * @param disc: Output: The discriminant sqrt(det(gram(reduced_norm))) + * @param order An order of the quaternion algebra + * @param alg The quaternion algebra + */ +int quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, + const quat_alg_t *alg); // ideal + +/** @} + */ + +/** @internal + * @ingroup quat_normeq + * @{ + */ + +/** @brief Set lattice to O0 + * + * @param O0 Lattice to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set(quat_lattice_t *O0); + +/** @brief Set p-extremal maximal order to O0 + * + * @param O0 p-extremal order to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0); + +/** + * @brief Create an element of a extremal maximal order from its coefficients + * + * @param elem Output: the quaternion element + * @param order the order + * @param coeffs the vector of 4 ibz coefficients + * @param Bpoo quaternion algebra + * + * elem = x + z*y + z*u + t*z*v + * where coeffs = [x,y,u,v] and t = order.t z = order.z + * + */ +void quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo); // normeq, untested + +/** @} + */ +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c new file mode 100644 index 0000000000..8c49b21d20 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c @@ -0,0 +1,190 @@ +#include +#include "lll_internals.h" +#include "internal.h" + +#include "dpe.h" + +// Access entry of symmetric matrix +#define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + dpe_t dpe_const_one, dpe_const_DELTABAR; + + dpe_init(dpe_const_one); + dpe_set_ui(dpe_const_one, 1); + + dpe_init(dpe_const_DELTABAR); + dpe_set_d(dpe_const_DELTABAR, DELTABAR); + + // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions + dpe_t r[4][4], u[4][4], lovasz[4]; + for (int i = 0; i < 4; i++) { + dpe_init(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_init(r[i][j]); + dpe_init(u[i][j]); + } + } + + // threshold for swaps + dpe_t delta_bar; + dpe_init(delta_bar); + dpe_set_d(delta_bar, DELTABAR); + + // Other work variables + dpe_t Xf, tmpF; + dpe_init(Xf); + dpe_init(tmpF); + ibz_t X, tmpI; + ibz_init(&X); + ibz_init(&tmpI); + + // Main L² loop + dpe_set_z(r[0][0], (*G)[0][0]); + int kappa = 1; + while (kappa < 4) { + // size reduce b_κ + int done = 0; + while (!done) { + // Recompute the κ-th row of the Choleski Factorisation + // Loop invariant: + // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 + for (int j = 0; j <= kappa; j++) { + dpe_set_z(r[kappa][j], (*G)[kappa][j]); + for (int k = 0; k < j; k++) { + dpe_mul(tmpF, r[kappa][k], u[j][k]); + dpe_sub(r[kappa][j], r[kappa][j], tmpF); + } + if (j < kappa) + dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + } + + done = 1; + // size reduce + for (int i = kappa - 1; i >= 0; i--) { + if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + done = 0; + dpe_set(Xf, u[kappa][i]); + dpe_round(Xf, Xf); + dpe_get_z(X, Xf); + // Update basis: b_κ ← b_κ - X·b_i + for (int j = 0; j < 4; j++) { + ibz_mul(&tmpI, &X, &(*basis)[j][i]); + ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + } + // Update lower half of the Gram matrix + // = - 2X + X² = + // - X - X( - X·) + //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 + ibz_mul(&tmpI, &X, &(*G)[kappa][i]); + ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + for (int j = 0; j < 4; j++) { // works because i < κ + // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 + ibz_mul(&tmpI, &X, SYM((*G), i, j)); + ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + } + // After the loop: + //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, + /// b_i〉) = 〈b_κ - X·b_i, b_κ - X·b_i〉 + // + // Update u[kappa][j] + for (int j = 0; j < i; j++) { + dpe_mul(tmpF, Xf, u[i][j]); + dpe_sub(u[kappa][j], u[kappa][j], tmpF); + } + } + } + } + + // Check Lovasz' conditions + // lovasz[0] = ‖b_κ‖² + dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] + for (int i = 1; i < kappa; i++) { + dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); + dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + } + int swap; + for (swap = kappa; swap > 0; swap--) { + dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); + if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + break; + } + + // Insert b_κ before b_swap + if (kappa != swap) { + // Insert b_κ before b_swap in the basis and in the lower half Gram matrix + for (int j = kappa; j > swap; j--) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + if (i == j - 1) + ibz_swap(&(*G)[i][i], &(*G)[j][j]); + else if (i != j) + ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + } + } + // Copy row u[κ] and r[κ] in swap position, ignore what follows + for (int i = 0; i < swap; i++) { + dpe_set(u[swap][i], u[kappa][i]); + dpe_set(r[swap][i], r[kappa][i]); + } + dpe_set(r[swap][swap], lovasz[swap]); + // swap complete + kappa = swap; + } + + kappa += 1; + } + +#ifndef NDEBUG + // Check size-reducedness + for (int i = 0; i < 4; i++) + for (int j = 0; j < i; j++) { + dpe_abs(u[i][j], u[i][j]); + assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + } + // Check Lovasz' conditions + for (int i = 1; i < 4; i++) { + dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); + dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); + dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); + assert(dpe_cmp(tmpF, r[i][i]) <= 0); + } +#endif + + // Fill in the upper half of the Gram matrix + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + + // Clearinghouse + ibz_finalize(&X); + ibz_finalize(&tmpI); + dpe_clear(dpe_const_one); + dpe_clear(dpe_const_DELTABAR); + dpe_clear(Xf); + dpe_clear(tmpF); + dpe_clear(delta_bar); + for (int i = 0; i < 4; i++) { + dpe_clear(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_clear(r[i][j]); + dpe_clear(u[i][j]); + } + } +} + +int +quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_mat_4x4_t G; // Gram Matrix + ibz_mat_4x4_init(&G); + quat_lattice_gram(&G, lattice, alg); + ibz_mat_4x4_copy(red, &lattice->basis); + quat_lll_core(&G, red); + ibz_mat_4x4_finalize(&G); + return 0; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lat_ball.c new file mode 100644 index 0000000000..c7bbb9682f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lat_ball.c @@ -0,0 +1,139 @@ +#include +#include +#include +#include "internal.h" +#include "lll_internals.h" + +int +quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius) +{ + ibz_t denom, rem; + ibz_init(&denom); + ibz_init(&rem); + ibz_mat_4x4_t dualG; + ibz_mat_4x4_init(&dualG); + +// Compute the Gram matrix of the dual lattice +#ifndef NDEBUG + int inv_check = ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); + assert(inv_check); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); +#endif + // Initialize the dual lattice basis to the identity matrix + ibz_mat_4x4_identity(U); + // Reduce the dual lattice + quat_lll_core(&dualG, U); + + // Compute the parallelogram's bounds + int trivial = 1; + for (int i = 0; i < 4; i++) { + ibz_mul(&(*box)[i], &dualG[i][i], radius); + ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); + ibz_sqrt_floor(&(*box)[i], &(*box)[i]); + trivial &= ibz_is_zero(&(*box)[i]); + } + + // Compute the transpose transformation matrix +#ifndef NDEBUG + int inv = ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#endif + // U is unitary, det(U) = ± 1 + ibz_mat_4x4_scalar_mul(U, &denom, U); +#ifndef NDEBUG + assert(inv); + ibz_abs(&denom, &denom); + assert(ibz_is_one(&denom)); +#endif + + ibz_mat_4x4_finalize(&dualG); + ibz_finalize(&denom); + ibz_finalize(&rem); + return !trivial; +} + +int +quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius) +{ + assert(ibz_cmp(radius, &ibz_const_zero) > 0); + + ibz_vec_4_t box; + ibz_vec_4_init(&box); + ibz_mat_4x4_t U, G; + ibz_mat_4x4_init(&U); + ibz_mat_4x4_init(&G); + ibz_vec_4_t x; + ibz_vec_4_init(&x); + ibz_t rad, tmp; + ibz_init(&rad); + ibz_init(&tmp); + + // Compute the Gram matrix of the lattice + quat_lattice_gram(&G, lattice, alg); + + // Correct ball radius by the denominator + ibz_mul(&rad, radius, &lattice->denom); + ibz_mul(&rad, &rad, &lattice->denom); + // Correct by 2 (Gram matrix corresponds to twice the norm) + ibz_mul(&rad, &rad, &ibz_const_two); + + // Compute a bounding parallelogram for the ball, stop if it only + // contains the origin + int ok = quat_lattice_bound_parallelogram(&box, &U, &G, &rad); + if (!ok) + goto err; + + // Rejection sampling from the parallelogram +#ifndef NDEBUG + int cnt = 0; +#endif + do { + // Sample vector + for (int i = 0; i < 4; i++) { + if (ibz_is_zero(&box[i])) { + ibz_copy(&x[i], &ibz_const_zero); + } else { + ibz_add(&tmp, &box[i], &box[i]); + ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); + ibz_sub(&x[i], &x[i], &box[i]); + if (!ok) + goto err; + } + } + // Map to parallelogram + ibz_mat_4x4_eval_t(&x, &x, &U); + // Evaluate quadratic form + quat_qf_eval(&tmp, &G, &x); +#ifndef NDEBUG + cnt++; + if (cnt % 100 == 0) + printf("Lattice sampling rejected %d times", cnt - 1); +#endif + } while (ibz_is_zero(&tmp) || (ibz_cmp(&tmp, &rad) > 0)); + + // Evaluate linear combination + ibz_mat_4x4_eval(&(res->coord), &(lattice->basis), &x); + ibz_copy(&(res->denom), &(lattice->denom)); + quat_alg_normalize(res); + +#ifndef NDEBUG + // Check norm is smaller than radius + quat_alg_norm(&tmp, &rad, res, alg); + ibz_mul(&rad, &rad, radius); + assert(ibz_cmp(&tmp, &rad) <= 0); +#endif + +err: + ibz_finalize(&rad); + ibz_finalize(&tmp); + ibz_vec_4_finalize(&x); + ibz_mat_4x4_finalize(&U); + ibz_mat_4x4_finalize(&G); + ibz_vec_4_finalize(&box); + return ok; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lattice.c new file mode 100644 index 0000000000..c98bae9499 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lattice.c @@ -0,0 +1,328 @@ +#include +#include +#include "internal.h" + +// helper functions +int +quat_lattice_equal(const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + int equal = 1; + quat_lattice_t a, b; + quat_lattice_init(&a); + quat_lattice_init(&b); + quat_lattice_reduce_denom(&a, lat1); + quat_lattice_reduce_denom(&b, lat2); + ibz_abs(&(a.denom), &(a.denom)); + ibz_abs(&(b.denom), &(b.denom)); + quat_lattice_hnf(&a); + quat_lattice_hnf(&b); + equal = equal && (ibz_cmp(&(a.denom), &(b.denom)) == 0); + equal = equal && ibz_mat_4x4_equal(&(a.basis), &(b.basis)); + quat_lattice_finalize(&a); + quat_lattice_finalize(&b); + return (equal); +} + +// sublattice test +int +quat_lattice_inclusion(const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + int res; + quat_lattice_t sum; + quat_lattice_init(&sum); + quat_lattice_add(&sum, overlat, sublat); + res = quat_lattice_equal(&sum, overlat); + quat_lattice_finalize(&sum); + return (res); +} + +void +quat_lattice_reduce_denom(quat_lattice_t *reduced, const quat_lattice_t *lat) +{ + ibz_t gcd; + ibz_init(&gcd); + ibz_mat_4x4_gcd(&gcd, &(lat->basis)); + ibz_gcd(&gcd, &gcd, &(lat->denom)); + ibz_mat_4x4_scalar_div(&(reduced->basis), &gcd, &(lat->basis)); + ibz_div(&(reduced->denom), &gcd, &(lat->denom), &gcd); + ibz_abs(&(reduced->denom), &(reduced->denom)); + ibz_finalize(&gcd); +} + +void +quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat) +{ + ibz_mat_4x4_copy(&(conj->basis), &(lat->basis)); + ibz_copy(&(conj->denom), &(lat->denom)); + + for (int row = 1; row < 4; ++row) { + for (int col = 0; col < 4; ++col) { + ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + } + } +} + +// Method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_dual_without_hnf(quat_lattice_t *dual, const quat_lattice_t *lat) +{ + ibz_mat_4x4_t inv; + ibz_t det; + ibz_init(&det); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + ibz_mat_4x4_transpose(&inv, &inv); + // dual_denom = det/lat_denom + ibz_mat_4x4_scalar_mul(&(dual->basis), &(lat->denom), &inv); + ibz_copy(&(dual->denom), &det); + + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); +} + +void +quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + ibz_vec_4_t generators[8]; + ibz_mat_4x4_t tmp; + ibz_t det1, det2, detprod; + ibz_init(&det1); + ibz_init(&det2); + ibz_init(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_init(&(generators[i])); + ibz_mat_4x4_init(&tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); + assert(!ibz_is_zero(&det1)); + assert(!ibz_is_zero(&det2)); + ibz_gcd(&detprod, &det1, &det2); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 8, generators, &detprod); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_mat_4x4_finalize(&tmp); + ibz_finalize(&det1); + ibz_finalize(&det2); + ibz_finalize(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + quat_lattice_t dual1, dual2, dual_res; + quat_lattice_init(&dual1); + quat_lattice_init(&dual2); + quat_lattice_init(&dual_res); + quat_lattice_dual_without_hnf(&dual1, lat1); + + quat_lattice_dual_without_hnf(&dual2, lat2); + quat_lattice_add(&dual_res, &dual1, &dual2); + quat_lattice_dual_without_hnf(res, &dual_res); + quat_lattice_hnf(res); // could be removed if we do not expect HNF any more + quat_lattice_finalize(&dual1); + quat_lattice_finalize(&dual2); + quat_lattice_finalize(&dual_res); +} + +void +quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg) +{ + ibz_vec_4_t p, a; + ibz_vec_4_init(&p); + ibz_vec_4_init(&a); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + quat_alg_coord_mul(&p, &a, coord, alg); + ibz_copy(&((*prod)[0][i]), &(p[0])); + ibz_copy(&((*prod)[1][i]), &(p[1])); + ibz_copy(&((*prod)[2][i]), &(p[2])); + ibz_copy(&((*prod)[3][i]), &(p[3])); + } + ibz_vec_4_finalize(&p); + ibz_vec_4_finalize(&a); +} + +void +quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg) +{ + quat_lattice_mat_alg_coord_mul_without_hnf(&(prod->basis), &(lat->basis), &(elem->coord), alg); + ibz_mul(&(prod->denom), &(lat->denom), &(elem->denom)); + quat_lattice_hnf(prod); +} + +void +quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2, const quat_alg_t *alg) +{ + ibz_vec_4_t elem1, elem2, elem_res; + ibz_vec_4_t generators[16]; + ibz_mat_4x4_t detmat; + ibz_t det; + quat_lattice_t lat_res; + ibz_init(&det); + ibz_mat_4x4_init(&detmat); + quat_lattice_init(&lat_res); + ibz_vec_4_init(&elem1); + ibz_vec_4_init(&elem2); + ibz_vec_4_init(&elem_res); + for (int i = 0; i < 16; i++) + ibz_vec_4_init(&(generators[i])); + for (int k = 0; k < 4; k++) { + ibz_vec_4_copy_ibz( + &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz( + &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); + for (int j = 0; j < 4; j++) { + if (k == 0) + ibz_copy(&(detmat[i][j]), &(elem_res[j])); + ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + } + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &detmat); + ibz_abs(&det, &det); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 16, generators, &det); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_vec_4_finalize(&elem1); + ibz_vec_4_finalize(&elem2); + ibz_vec_4_finalize(&elem_res); + quat_lattice_finalize(&lat_res); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&(detmat)); + for (int i = 0; i < 16; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// lattice assumed of full rank +int +quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x) +{ + int divisible = 0; + ibz_vec_4_t work_coord; + ibz_mat_4x4_t inv; + ibz_t det, prod; + ibz_init(&prod); + ibz_init(&det); + ibz_vec_4_init(&work_coord); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + assert(!ibz_is_zero(&det)); + ibz_mat_4x4_eval(&work_coord, &inv, &(x->coord)); + ibz_vec_4_scalar_mul(&(work_coord), &(lat->denom), &work_coord); + ibz_mul(&prod, &(x->denom), &det); + divisible = ibz_vec_4_scalar_div(&work_coord, &prod, &work_coord); + // copy result + if (divisible && (coord != NULL)) { + for (int i = 0; i < 4; i++) { + ibz_copy(&((*coord)[i]), &(work_coord[i])); + } + } + ibz_finalize(&prod); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); + ibz_vec_4_finalize(&work_coord); + return (divisible); +} + +void +quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + ibz_t tmp, det; + ibz_init(&tmp); + ibz_init(&det); + + // det = det(sublat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &sublat->basis); + // tmp = (overlat->denom)⁴ + ibz_mul(&tmp, &overlat->denom, &overlat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // index = (overlat->denom)⁴ · det(sublat->basis) + ibz_mul(index, &det, &tmp); + // tmp = (sublat->denom)⁴ + ibz_mul(&tmp, &sublat->denom, &sublat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // det = det(overlat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &overlat->basis); + // tmp = (sublat->denom)⁴ · det(overlat->basis) + ibz_mul(&tmp, &tmp, &det); + // index = index / tmp + ibz_div(index, &tmp, index, &tmp); + assert(ibz_is_zero(&tmp)); + // index = |index| + ibz_abs(index, index); + + ibz_finalize(&tmp); + ibz_finalize(&det); +} + +void +quat_lattice_hnf(quat_lattice_t *lat) +{ + ibz_t mod; + ibz_vec_4_t generators[4]; + ibz_init(&mod); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &mod, &(lat->basis)); + ibz_abs(&mod, &mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_init(&(generators[i])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + } + } + ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); + quat_lattice_reduce_denom(lat, lat); + ibz_finalize(&mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +void +quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_t tmp; + ibz_init(&tmp); + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_set(&(*G)[i][j], 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + if (k >= 2) + ibz_mul(&tmp, &tmp, &alg->p); + ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + } + ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + } + } + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + } + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_applications.c new file mode 100644 index 0000000000..6c763b8c04 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_applications.c @@ -0,0 +1,127 @@ +#include +#include +#include "lll_internals.h" + +void +quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t gram_corrector; + ibz_init(&gram_corrector); + ibz_mul(&gram_corrector, &(lideal->lattice.denom), &(lideal->lattice.denom)); + quat_lideal_class_gram(gram, lideal, alg); + ibz_mat_4x4_copy(reduced, &(lideal->lattice.basis)); + quat_lll_core(gram, reduced); + ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); + for (int i = 0; i < 4; i++) { + ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + for (int j = i + 1; j < 4; j++) { + ibz_set(&((*gram)[i][j]), 0); + } + } + ibz_finalize(&gram_corrector); +} + +void +quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + ibz_mat_4x4_t red; + ibz_mat_4x4_init(&red); + + quat_lattice_mul(&(prod->lattice), &(lideal1->lattice), &(lideal2->lattice), alg); + prod->parent_order = lideal1->parent_order; + quat_lideal_norm(prod); + quat_lideal_reduce_basis(&red, gram, prod, alg); + ibz_mat_4x4_copy(&(prod->lattice.basis), &red); + + ibz_mat_4x4_finalize(&red); +} + +int +quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff) +{ + ibz_mat_4x4_t gram, red; + ibz_mat_4x4_init(&gram); + ibz_mat_4x4_init(&red); + + int found = 0; + + // computing the reduced basis + quat_lideal_reduce_basis(&red, &gram, lideal, alg); + + quat_alg_elem_t new_alpha; + quat_alg_elem_init(&new_alpha); + ibz_t tmp, remainder, adjusted_norm; + ibz_init(&tmp); + ibz_init(&remainder); + ibz_init(&adjusted_norm); + + ibz_mul(&adjusted_norm, &lideal->lattice.denom, &lideal->lattice.denom); + + int ctr = 0; + + // equiv_num_iter = (2 * equiv_bound_coeff + 1)^4 + assert(equiv_bound_coeff < (1 << 20)); + int equiv_num_iter = (2 * equiv_bound_coeff + 1); + equiv_num_iter = equiv_num_iter * equiv_num_iter; + equiv_num_iter = equiv_num_iter * equiv_num_iter; + + while (!found && ctr < equiv_num_iter) { + ctr++; + // we select our linear combination at random + ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + + // computation of the norm of the vector sampled + quat_qf_eval(&tmp, &gram, &new_alpha.coord); + + // compute the norm of the equivalent ideal + // can be improved by removing the power of two first and the odd part only if the trial + // division failed (this should always be called on an ideal of norm 2^x * N for some + // big prime N ) + ibz_div(&tmp, &remainder, &tmp, &adjusted_norm); + + // debug : check that the remainder is zero + assert(ibz_is_zero(&remainder)); + + // pseudo-primality test + if (ibz_probab_prime(&tmp, primality_num_iter)) { + + // computes the generator using a matrix multiplication + ibz_mat_4x4_eval(&new_alpha.coord, &red, &new_alpha.coord); + ibz_copy(&new_alpha.denom, &lideal->lattice.denom); + assert(quat_lattice_contains(NULL, &lideal->lattice, &new_alpha)); + + quat_alg_conj(&new_alpha, &new_alpha); + ibz_mul(&new_alpha.denom, &new_alpha.denom, &lideal->norm); + quat_lideal_mul(lideal, lideal, &new_alpha, alg); + assert(ibz_probab_prime(&lideal->norm, primality_num_iter)); + + found = 1; + break; + } + } + assert(found); + + ibz_finalize(&tmp); + ibz_finalize(&remainder); + ibz_finalize(&adjusted_norm); + quat_alg_elem_finalize(&new_alpha); + + ibz_mat_4x4_finalize(&gram); + ibz_mat_4x4_finalize(&red); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_internals.h new file mode 100644 index 0000000000..e8d90141ac --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_internals.h @@ -0,0 +1,238 @@ +#ifndef LLL_INTERNALS_H +#define LLL_INTERNALS_H + +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations of functions only used for the LLL tets + */ + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup lll_internal Functions only used for LLL or its tests + * @{ + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_params Parameters used by the L2 implementation (floats) and its tests (ints) + * @{ + */ + +#define DELTABAR 0.995 +#define DELTA_NUM 99 +#define DELTA_DENOM 100 + +#define ETABAR 0.505 +#define EPSILON_NUM 1 +#define EPSILON_DENOM 100 + +#define PREC 64 +/** + * @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup ibq_t Types for rationals + * @{ + */ + +/** @brief Type for fractions of integers + * + * @typedef ibq_t + * + * For fractions of integers of arbitrary size, used by intbig module, using gmp + */ +typedef ibz_t ibq_t[2]; +typedef ibq_t ibq_vec_4_t[4]; +typedef ibq_t ibq_mat_4x4_t[4][4]; + +/**@} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_ibq_c Constructors and Destructors and Printers + * @{ + */ + +void ibq_init(ibq_t *x); +void ibq_finalize(ibq_t *x); + +void ibq_mat_4x4_init(ibq_mat_4x4_t *mat); +void ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat); + +void ibq_vec_4_init(ibq_vec_4_t *vec); +void ibq_vec_4_finalize(ibq_vec_4_t *vec); + +void ibq_mat_4x4_print(const ibq_mat_4x4_t *mat); +void ibq_vec_4_print(const ibq_vec_4_t *vec); + +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_qa Basic fraction arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b); + +/** @brief diff=a-b + */ +void ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b); + +/** @brief neg=-x + */ +void ibq_neg(ibq_t *neg, const ibq_t *x); + +/** @brief abs=|x| + */ +void ibq_abs(ibq_t *abs, const ibq_t *x); + +/** @brief prod=a*b + */ +void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b); + +/** @brief inv=1/x + * + * @returns 0 if x is 0, 1 if inverse exists and was computed + */ +int ibq_inv(ibq_t *inv, const ibq_t *x); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibq_cmp(const ibq_t *a, const ibq_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibq_is_zero(const ibq_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibq_is_one(const ibq_t *x); + +/** @brief Set q to a/b if b not 0 + * + * @returns 1 if b not 0 and q is set, 0 otherwise + */ +int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b); + +/** @brief Copy value into target + */ +void ibq_copy(ibq_t *target, const ibq_t *value); + +/** @brief Checks if q is an integer + * + * @returns 1 if yes, 0 if not + */ +int ibq_is_ibz(const ibq_t *q); + +/** + * @brief Converts a fraction q to an integer y, if q is an integer. + * + * @returns 1 if z is an integer, 0 if not + */ +int ibq_to_ibz(ibz_t *z, const ibq_t *q); +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup quat_lll_verify_helpers Helper functions for lll verification in dimension 4 + * @{ + */ + +/** @brief Set ibq to parameters delta and eta = 1/2 + epsilon using L2 constants + */ +void quat_lll_set_ibq_parameters(ibq_t *delta, ibq_t *eta); + +/** @brief Set an ibq vector to 4 given integer coefficients + */ +void ibq_vec_4_copy_ibz(ibq_vec_4_t *vec, + const ibz_t *coeff0, + const ibz_t *coeff1, + const ibz_t *coeff2, + const ibz_t *coeff3); // dim4, test/dim4 + +/** @brief Bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 for ibz_q + */ +void quat_lll_bilinear(ibq_t *b, const ibq_vec_4_t *vec0, const ibq_vec_4_t *vec1, + const ibz_t *q); // dim4, test/dim4 + +/** @brief Outputs the transposition of the orthogonalised matrix of mat (as fractions) + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +void quat_lll_gram_schmidt_transposed_with_ibq(ibq_mat_4x4_t *orthogonalised_transposed, + const ibz_mat_4x4_t *mat, + const ibz_t *q); // dim4 + +/** @brief Verifies if mat is lll-reduced for parameter coeff and norm defined by q + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +int quat_lll_verify(const ibz_mat_4x4_t *mat, + const ibq_t *delta, + const ibq_t *eta, + const quat_alg_t *alg); // test/lattice, test/dim4 + /** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_internal_gram Internal LLL function + * @{ + */ + +/** @brief In-place L2 reduction core function + * + * Given a lattice basis represented by the columns of a 4x4 matrix + * and the Gram matrix of its bilinear form, L2-reduces the basis + * in-place and updates the Gram matrix accordingly. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param G In/Output: Gram matrix of the lattice basis + * @param basis In/Output: lattice basis + */ +void quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis); + +/** + * @brief LLL reduction on 4-dimensional lattice + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param red Output: LLL reduced basis + * @param lattice In/Output: lattice with 4-dimensional basis + * @param alg The quaternion algebra + */ +int quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @} + */ + +// end of lll_internal +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.c new file mode 100644 index 0000000000..27f4a963db --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.c @@ -0,0 +1,357 @@ +#include +#include +#include +#include + +// double-wide multiplication +void +MUL(digit_t *out, const digit_t a, const digit_t b) +{ +#ifdef RADIX_32 + uint64_t r = (uint64_t)a * b; + out[0] = r & 0xFFFFFFFFUL; + out[1] = r >> 32; + +#elif defined(RADIX_64) && defined(_MSC_VER) + uint64_t umul_hi; + out[0] = _umul128(a, b, &umul_hi); + out[1] = umul_hi; + +#elif defined(RADIX_64) && defined(HAVE_UINT128) + unsigned __int128 umul_tmp; + umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); + out[0] = (uint64_t)umul_tmp; + out[1] = (uint64_t)(umul_tmp >> 64); + +#else + register digit_t al, ah, bl, bh, temp; + digit_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; + digit_t mask_low = (digit_t)(-1) >> (sizeof(digit_t) * 4), mask_high = (digit_t)(-1) << (sizeof(digit_t) * 4); + al = a & mask_low; // Low part + ah = a >> (sizeof(digit_t) * 4); // High part + bl = b & mask_low; + bh = b >> (sizeof(digit_t) * 4); + + albl = al * bl; + albh = al * bh; + ahbl = ah * bl; + ahbh = ah * bh; + out[0] = albl & mask_low; // out00 + + res1 = albl >> (sizeof(digit_t) * 4); + res2 = ahbl & mask_low; + res3 = albh & mask_low; + temp = res1 + res2 + res3; + carry = temp >> (sizeof(digit_t) * 4); + out[0] ^= temp << (sizeof(digit_t) * 4); // out01 + + res1 = ahbl >> (sizeof(digit_t) * 4); + res2 = albh >> (sizeof(digit_t) * 4); + res3 = ahbh & mask_low; + temp = res1 + res2 + res3 + carry; + out[1] = temp & mask_low; // out10 + carry = temp & mask_high; + out[1] ^= (ahbh & mask_high) + carry; // out11 + +#endif +} + +void +mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision addition + unsigned int i, carry = 0; + + for (i = 0; i < nwords; i++) { + ADDC(c[i], carry, a[i], b[i], carry); + } +} + +digit_t +mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision right shift by 1...RADIX-1 + digit_t bit_out = x[0] & 1; + + for (unsigned int i = 0; i < nwords - 1; i++) { + SHIFTR(x[i + 1], x[i], shift, x[i], RADIX); + } + x[nwords - 1] >>= shift; + return bit_out; +} + +void +mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision left shift by 1...RADIX-1 + + for (int i = nwords - 1; i > 0; i--) { + SHIFTL(x[i], x[i - 1], shift, x[i], RADIX); + } + x[0] <<= shift; +} + +void +multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ + int t = shift; + while (t > RADIX - 1) { + mp_shiftl(x, RADIX - 1, nwords); + t = t - (RADIX - 1); + } + mp_shiftl(x, t, nwords); +} + +// The below functions were taken from the EC module + +void +mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision subtraction, assuming a > b + unsigned int i, borrow = 0; + + for (i = 0; i < nwords; i++) { + SUBC(c[i], borrow, a[i], b[i], borrow); + } +} + +void +select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords) +{ // Select c <- a if mask = 0, select c <- b if mask = 1...1 + + for (int i = 0; i < nwords; i++) { + c[i] = ((a[i] ^ b[i]) & mask) ^ a[i]; + } +} + +void +swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords) +{ // Swap entries + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then a <- b and b <- a + digit_t temp; + + for (int i = 0; i < nwords; i++) { + temp = option & (a[i] ^ b[i]); + a[i] = temp ^ a[i]; + b[i] = temp ^ b[i]; + } +} + +int +mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords) +{ // Multiprecision comparison, a=b? : (1) a>b, (0) a=b, (-1) a= 0; i--) { + if (a[i] > b[i]) + return 1; + else if (a[i] < b[i]) + return -1; + } + return 0; +} + +bool +mp_is_zero(const digit_t *a, unsigned int nwords) +{ // Is a multiprecision element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + digit_t r = 0; + + for (unsigned int i = 0; i < nwords; i++) + r |= a[i] ^ 0; + + return (bool)is_digit_zero_ct(r); +} + +void +mp_mul2(digit_t *c, const digit_t *a, const digit_t *b) +{ // Multiprecision multiplication fixed to two-digit operands + unsigned int carry = 0; + digit_t t0[2], t1[2], t2[2]; + + MUL(t0, a[0], b[0]); + MUL(t1, a[0], b[1]); + ADDC(t0[1], carry, t0[1], t1[0], carry); + ADDC(t1[1], carry, 0, t1[1], carry); + MUL(t2, a[1], b[1]); + ADDC(t2[0], carry, t2[0], t1[1], carry); + ADDC(t2[1], carry, 0, t2[1], carry); + c[0] = t0[0]; + c[1] = t0[1]; + c[2] = t2[0]; + c[3] = t2[1]; +} + +void +mp_print(const digit_t *a, size_t nwords) +{ + printf("0x"); + for (size_t i = 0; i < nwords; i++) { +#ifdef RADIX_32 + printf("%08" PRIx32, a[nwords - i - 1]); // Print each word with 8 hex digits +#elif defined(RADIX_64) + printf("%016" PRIx64, a[nwords - i - 1]); // Print each word with 16 hex digits +#endif + } +} + +void +mp_copy(digit_t *b, const digit_t *a, size_t nwords) +{ + for (size_t i = 0; i < nwords; i++) { + b[i] = a[i]; + } +} + +void +mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords) +{ + // Multiprecision multiplication, c = a*b, for nwords-digit inputs, with nwords-digit output + // explicitly does not use the higher half of c, as we do not need in our applications + digit_t carry, UV[2], t[nwords], cc[nwords]; + + for (size_t i = 0; i < nwords; i++) { + cc[i] = 0; + } + + for (size_t i = 0; i < nwords; i++) { + + MUL(t, a[i], b[0]); + + for (size_t j = 1; j < nwords - 1; j++) { + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + t[j + 1] = UV[1] + carry; + } + + int j = nwords - 1; + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + + mp_add(&cc[i], &cc[i], t, nwords - i); + } + + mp_copy(c, cc, nwords); +} + +void +mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords) +{ // Multiprecision modulo 2^e, with 0 <= a < 2^(e) + unsigned int i, q = e >> LOG2RADIX, r = e & (RADIX - 1); + + if (q < nwords) { + a[q] &= ((digit_t)1 << r) - 1; + + for (i = q + 1; i < nwords; i++) { + a[i] = 0; + } + } +} + +void +mp_neg(digit_t *a, unsigned int nwords) +{ // negates a + for (size_t i = 0; i < nwords; i++) { + a[i] ^= -1; + } + + a[0] += 1; +} + +bool +mp_is_one(const digit_t *x, unsigned int nwords) +{ // returns true if x represents 1, and false otherwise + if (x[0] != 1) { + return false; + } + + for (size_t i = 1; i < nwords; i++) { + if (x[i] != 0) { + return false; + } + } + return true; +} + +void +mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) +{ // Inversion modulo 2^e, using Newton's method and Hensel lifting + // we take the first power of 2 larger than e to use + // requires a to be odd, of course + // returns b such that a*b = 1 mod 2^e + assert((a[0] & 1) == 1); + + digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + mp_copy(aa, a, nwords); + + mp_one[0] = 1; + for (unsigned int i = 1; i < nwords; i++) { + mp_one[i] = 0; + } + + int p = 1; + while ((1 << p) < e) { + p++; + } + p -= 2; // using k = 4 for initial inverse + int w = (1 << (p + 2)); + + mp_mod_2exp(aa, w, nwords); + mp_add(x, aa, aa, nwords); + mp_add(x, x, aa, nwords); // should be 3a + x[0] ^= (1 << 1); // so that x equals (3a)^2 xor 2 + mp_mod_2exp(x, w, nwords); // now x*a = 1 mod 2^4, which we lift + + mp_mul(tmp, aa, x, nwords); + mp_neg(tmp, nwords); + mp_add(y, mp_one, tmp, nwords); + + // Hensel lifting for p rounds + for (int i = 0; i < p; i++) { + mp_add(tmp, mp_one, y, nwords); + mp_mul(x, x, tmp, nwords); + mp_mul(y, y, y, nwords); + } + + mp_mod_2exp(x, w, nwords); + mp_copy(b, x, nwords); + + // verify results + mp_mul(x, x, aa, nwords); + mp_mod_2exp(x, w, nwords); + assert(mp_is_one(x, nwords)); +} + +void +mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords) +{ + // given a matrix ( ( a, b ), (c, d) ) of values mod 2^e + // returns the inverse matrix gamma ( (d, -b), (-c, a) ) + // where gamma is the inverse of the determinant a*d - b*c + // assumes the matrix is invertible, otherwises, inversion of determinant fails + + int p = 1; + while ((1 << p) < e) { + p++; + } + int w = (1 << (p)); + + digit_t det[nwords], tmp[nwords], resa[nwords], resb[nwords], resc[nwords], resd[nwords]; + mp_mul(tmp, r1, s2, nwords); + mp_mul(det, r2, s1, nwords); + mp_sub(det, tmp, det, nwords); + mp_inv_2e(det, det, e, nwords); + + mp_mul(resa, det, s2, nwords); + mp_mul(resb, det, r2, nwords); + mp_mul(resc, det, s1, nwords); + mp_mul(resd, det, r1, nwords); + + mp_neg(resb, nwords); + mp_neg(resc, nwords); + + mp_mod_2exp(resa, w, nwords); + mp_mod_2exp(resb, w, nwords); + mp_mod_2exp(resc, w, nwords); + mp_mod_2exp(resd, w, nwords); + + mp_copy(r1, resa, nwords); + mp_copy(r2, resb, nwords); + mp_copy(s1, resc, nwords); + mp_copy(s2, resd, nwords); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/normeq.c new file mode 100644 index 0000000000..8c133dd095 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/normeq.c @@ -0,0 +1,369 @@ +#include +#include "internal.h" + +/** @file + * + * @authors Antonin Leroux + * + * @brief Functions related to norm equation solving or special extremal orders + */ + +void +quat_lattice_O0_set(quat_lattice_t *O0) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(O0->basis[i][j]), 0); + } + } + ibz_set(&(O0->denom), 2); + ibz_set(&(O0->basis[0][0]), 2); + ibz_set(&(O0->basis[1][1]), 2); + ibz_set(&(O0->basis[2][2]), 1); + ibz_set(&(O0->basis[1][2]), 1); + ibz_set(&(O0->basis[3][3]), 1); + ibz_set(&(O0->basis[0][3]), 1); +} + +void +quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) +{ + ibz_set(&O0->z.coord[1], 1); + ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.denom, 1); + ibz_set(&O0->t.denom, 1); + O0->q = 1; + quat_lattice_O0_set(&(O0->order)); +} + +void +quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo) +{ + + // var dec + quat_alg_elem_t quat_temp; + + // var init + quat_alg_elem_init(&quat_temp); + + // elem = x + quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + + // quat_temp = i*y + quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); + + // elem = x + i*y + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = z * j + quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + + // elem = x + i* + z*j + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = t * j * i + quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); + + // elem = x + i*y + j*z + j*i*t + quat_alg_add(elem, elem, &quat_temp); + + quat_alg_elem_finalize(&quat_temp); +} + +int +quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params) +{ + + if (ibz_is_even(n_gamma)) { + return 0; + } + // var dec + int found; + ibz_t cornacchia_target; + ibz_t adjusted_n_gamma, q; + ibz_t bound, sq_bound, temp; + ibz_t test; + ibz_vec_4_t coeffs; // coeffs = [x,y,z,t] + quat_alg_elem_t quat_temp; + + if (non_diag) + assert(params->order->q % 4 == 1); + + // var init + found = 0; + ibz_init(&bound); + ibz_init(&test); + ibz_init(&temp); + ibz_init(&q); + ibz_init(&sq_bound); + ibz_vec_4_init(&coeffs); + quat_alg_elem_init(&quat_temp); + ibz_init(&adjusted_n_gamma); + ibz_init(&cornacchia_target); + + ibz_set(&q, params->order->q); + + // this could be removed in the current state + int standard_order = (params->order->q == 1); + + // adjusting the norm of gamma (multiplying by 4 to find a solution in an order of odd level) + if (non_diag || standard_order) { + ibz_mul(&adjusted_n_gamma, n_gamma, &ibz_const_two); + ibz_mul(&adjusted_n_gamma, &adjusted_n_gamma, &ibz_const_two); + } else { + ibz_copy(&adjusted_n_gamma, n_gamma); + } + // computation of the first bound = sqrt (adjust_n_gamma / p - q) + ibz_div(&sq_bound, &bound, &adjusted_n_gamma, &((params->algebra)->p)); + ibz_set(&temp, params->order->q); + ibz_sub(&sq_bound, &sq_bound, &temp); + ibz_sqrt_floor(&bound, &sq_bound); + + // the size of the search space is roughly n_gamma / (p√q) + ibz_t counter; + ibz_init(&counter); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_sqrt_floor(&temp, &temp); + ibz_div(&counter, &temp, &adjusted_n_gamma, &temp); + + // entering the main loop + while (!found && ibz_cmp(&counter, &ibz_const_zero) != 0) { + // decreasing the counter + ibz_sub(&counter, &counter, &ibz_const_one); + + // we start by sampling the first coordinate + ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + + // then, we sample the second coordinate + // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) + ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); + ibz_sub(&temp, &adjusted_n_gamma, &temp); + ibz_mul(&sq_bound, &q, &(params->algebra->p)); + ibz_div(&temp, &sq_bound, &temp, &sq_bound); + ibz_sqrt_floor(&temp, &temp); + + if (ibz_cmp(&temp, &ibz_const_zero) == 0) { + continue; + } + // sampling the second value + ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + + // compute cornacchia_target = n_gamma - p * (z² + q*t²) + ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &q, &temp); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); + ibz_sub(&cornacchia_target, &adjusted_n_gamma, &cornacchia_target); + assert(ibz_cmp(&cornacchia_target, &ibz_const_zero) > 0); + + // applying cornacchia + if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) + found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + else + found = 0; + + if (found && non_diag && standard_order) { + // check that we can divide by two at least once + // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 + // we must have x = t mod 2 and y = z mod 2 + // if q=1 we can simply swap x and y + if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { + ibz_swap(&coeffs[1], &coeffs[0]); + } + // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the + // resulting endomorphism will behave well for dim 2 computations + found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && + ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + } + if (found) { + +#ifndef NDEBUG + ibz_set(&temp, (params->order->q)); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_add(&temp, &temp, &test); + assert(0 == ibz_cmp(&temp, &cornacchia_target)); + + ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); + ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_set(&temp, (params->order->q)); + ibz_mul(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &temp, &(params->algebra->p)); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); +#endif + // translate x,y,z,t into the quaternion element gamma + quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); +#ifndef NDEBUG + quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs[0]))); + assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); + assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); +#endif + // making gamma primitive + // coeffs contains the coefficients of primitivized gamma in the basis of order + quat_alg_make_primitive(&coeffs, &temp, gamma, &((params->order)->order)); + + if (non_diag || standard_order) + found = (ibz_cmp(&temp, &ibz_const_two) == 0); + else + found = (ibz_cmp(&temp, &ibz_const_one) == 0); + } + } + + if (found) { + // new gamma + ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); + ibz_copy(&gamma->coord[0], &coeffs[0]); + ibz_copy(&gamma->coord[1], &coeffs[1]); + ibz_copy(&gamma->coord[2], &coeffs[2]); + ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->denom, &(((params->order)->order).denom)); + } + // var finalize + ibz_finalize(&counter); + ibz_finalize(&bound); + ibz_finalize(&temp); + ibz_finalize(&sq_bound); + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&quat_temp); + ibz_finalize(&adjusted_n_gamma); + ibz_finalize(&cornacchia_target); + ibz_finalize(&q); + ibz_finalize(&test); + + return found; +} + +int +quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor) +{ + + ibz_t n_temp, norm_d; + ibz_t disc; + quat_alg_elem_t gen, gen_rerand; + int found = 0; + ibz_init(&n_temp); + ibz_init(&norm_d); + ibz_init(&disc); + quat_alg_elem_init(&gen); + quat_alg_elem_init(&gen_rerand); + + // when the norm is prime we can be quite efficient + // by avoiding to run represent integer + // the first step is to generate one ideal of the correct norm + if (is_prime) { + + // we find a quaternion element of norm divisible by norm + while (!found) { + // generating a trace-zero element at random + ibz_set(&gen.coord[0], 0); + ibz_sub(&n_temp, norm, &ibz_const_one); + for (int i = 1; i < 4; i++) + ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + + // and finally the negation mod norm + ibz_neg(&disc, &n_temp); + ibz_mod(&disc, &disc, norm); + // now we check that -n is a square mod norm + // and if the square root exists we compute it + found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = found && !quat_alg_elem_is_zero(&gen); + } + } else { + assert(prime_cofactor != NULL); + // if it is not prime or we don't know if it is prime, we may just use represent integer + // and use a precomputed prime as cofactor + assert(!ibz_is_zero(norm)); + ibz_mul(&n_temp, prime_cofactor, norm); + found = quat_represent_integer(&gen, &n_temp, 0, params); + found = found && !quat_alg_elem_is_zero(&gen); + } +#ifndef NDEBUG + if (found) { + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_mod(&n_temp, &n_temp, norm); + assert(ibz_cmp(&n_temp, &ibz_const_zero) == 0); + } +#endif + + // now we just have to rerandomize the class of the ideal generated by gen + found = 0; + while (!found) { + for (int i = 0; i < 4; i++) { + ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + } + quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_gcd(&disc, &n_temp, norm); + found = ibz_is_one(&disc); + found = found && !quat_alg_elem_is_zero(&gen_rerand); + } + + quat_alg_mul(&gen, &gen, &gen_rerand, (params->algebra)); + // in both cases, whether norm is prime or not prime, + // gen is not divisible by any integer factor of the target norm + // therefore the call below will yield an ideal of the correct norm + quat_lideal_create(lideal, &gen, norm, &((params->order)->order), (params->algebra)); + assert(ibz_cmp(norm, &(lideal->norm)) == 0); + + ibz_finalize(&n_temp); + quat_alg_elem_finalize(&gen); + quat_alg_elem_finalize(&gen_rerand); + ibz_finalize(&norm_d); + ibz_finalize(&disc); + return (found); +} + +void +quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_copy(&(*vec)[2], &el->coord[2]); + ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) + ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) + ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); + ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); + ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); + + assert(ibz_divides(&(*vec)[0], &el->denom)); + assert(ibz_divides(&(*vec)[1], &el->denom)); + assert(ibz_divides(&(*vec)[2], &el->denom)); + assert(ibz_divides(&(*vec)[3], &el->denom)); + + ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); + ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); + ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); + ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_arm64crypto.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_arm64crypto.h deleted file mode 100644 index 88c4bf48d0..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/randombytes_arm64crypto.h +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef RANDOMBYTES_ARM64CRYPTO_H -#define RANDOMBYTES_ARM64CRYPTO_H - -#include - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -typedef struct { - unsigned char buffer[16]; - int buffer_pos; - unsigned long length_remaining; - unsigned char key[32]; - unsigned char ctr[16]; -} AES_XOF_struct; - -typedef struct { - unsigned char Key[32]; - unsigned char V[16]; - int reseed_counter; -} AES256_CTR_DRBG_struct; - -#endif /* RANDOMBYTES_ARM64CRYPTO_H */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rationals.c new file mode 100644 index 0000000000..0c5387e5e8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rationals.c @@ -0,0 +1,233 @@ +#include +#include "internal.h" +#include "lll_internals.h" + +void +ibq_init(ibq_t *x) +{ + ibz_init(&((*x)[0])); + ibz_init(&((*x)[1])); + ibz_set(&((*x)[1]), 1); +} + +void +ibq_finalize(ibq_t *x) +{ + ibz_finalize(&((*x)[0])); + ibz_finalize(&((*x)[1])); +} + +void +ibq_mat_4x4_init(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_init(&(*mat)[i][j]); + } + } +} +void +ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_finalize(&(*mat)[i][j]); + } + } +} + +void +ibq_vec_4_init(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_init(&(*vec)[i]); + } +} +void +ibq_vec_4_finalize(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_finalize(&(*vec)[i]); + } +} + +void +ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j][0]), 10); + printf("/"); + ibz_print(&((*mat)[i][j][1]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibq_vec_4_print(const ibq_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i][0]), 10); + printf("/"); + ibz_print(&((*vec)[i][1]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibq_reduce(ibq_t *x) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); + ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + assert(ibz_is_zero(&r)); + ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + assert(ibz_is_zero(&r)); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +void +ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) +{ + ibz_t add, prod; + ibz_init(&add); + ibz_init(&prod); + + ibz_mul(&add, &((*a)[0]), &((*b)[1])); + ibz_mul(&prod, &((*b)[0]), &((*a)[1])); + ibz_add(&((*sum)[0]), &add, &prod); + ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_finalize(&add); + ibz_finalize(&prod); +} + +void +ibq_neg(ibq_t *neg, const ibq_t *x) +{ + ibz_copy(&((*neg)[1]), &((*x)[1])); + ibz_neg(&((*neg)[0]), &((*x)[0])); +} + +void +ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b) +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, b); + ibq_add(diff, a, &neg); + ibq_finalize(&neg); +} + +void +ibq_abs(ibq_t *abs, const ibq_t *x) // once +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, x); + if (ibq_cmp(x, &neg) < 0) + ibq_copy(abs, &neg); + else + ibq_copy(abs, x); + ibq_finalize(&neg); +} + +void +ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) +{ + ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); + ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); +} + +int +ibq_inv(ibq_t *inv, const ibq_t *x) +{ + int res = !ibq_is_zero(x); + if (res) { + ibz_copy(&((*inv)[0]), &((*x)[0])); + ibz_copy(&((*inv)[1]), &((*x)[1])); + ibz_swap(&((*inv)[1]), &((*inv)[0])); + } + return (res); +} + +int +ibq_cmp(const ibq_t *a, const ibq_t *b) +{ + ibz_t x, y; + ibz_init(&x); + ibz_init(&y); + ibz_copy(&x, &((*a)[0])); + ibz_copy(&y, &((*b)[0])); + ibz_mul(&y, &y, &((*a)[1])); + ibz_mul(&x, &x, &((*b)[1])); + if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + int res = ibz_cmp(&x, &y); + ibz_finalize(&x); + ibz_finalize(&y); + return (res); +} + +int +ibq_is_zero(const ibq_t *x) +{ + return ibz_is_zero(&((*x)[0])); +} + +int +ibq_is_one(const ibq_t *x) +{ + return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); +} + +int +ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) +{ + ibz_copy(&((*q)[0]), a); + ibz_copy(&((*q)[1]), b); + return !ibz_is_zero(b); +} + +void +ibq_copy(ibq_t *target, const ibq_t *value) // once +{ + ibz_copy(&((*target)[0]), &((*value)[0])); + ibz_copy(&((*target)[1]), &((*value)[1])); +} + +int +ibq_is_ibz(const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_mod(&r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} + +int +ibq_to_ibz(ibz_t *z, const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S deleted file mode 100644 index 2311fa9bc8..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/vaes256_key_expansion.S +++ /dev/null @@ -1,122 +0,0 @@ -#*************************************************************************** -# This implementation is a modified version of the code, -# written by Nir Drucker and Shay Gueron -# AWS Cryptographic Algorithms Group -# (ndrucker@amazon.com, gueron@amazon.com) -# -# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). -# You may not use this file except in compliance with the License. -# A copy of the License is located at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. -# The license is detailed in the file LICENSE.txt, and applies to this file. -#*************************************************************************** - -.intel_syntax noprefix -.data - -.p2align 4, 0x90 -MASK1: -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -CON1: -.long 1,1,1,1 - -.set k256_size, 32 - -#if defined(__linux__) && defined(__ELF__) -.section .note.GNU-stack,"",@progbits -#endif -.text - -################################################################################ -# void aes256_key_expansion(OUT aes256_ks_t* ks, IN const uint8_t* key); -# The output parameter must be 16 bytes aligned! -# -#Linux ABI -#define out rdi -#define in rsi - -#define CON xmm0 -#define MASK_REG xmm1 - -#define IN0 xmm2 -#define IN1 xmm3 - -#define TMP1 xmm4 -#define TMP2 xmm5 - -#define ZERO xmm15 - -.macro ROUND1 in0 in1 - add out, k256_size - vpshufb TMP2, \in1, MASK_REG - aesenclast TMP2, CON - vpslld CON, CON, 1 - vpslldq TMP1, \in0, 4 - vpxor \in0, \in0, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor \in0, \in0, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor \in0, \in0, TMP1 - vpxor \in0, \in0, TMP2 - vmovdqa [out], \in0 - -.endm - -.macro ROUND2 - vpshufd TMP2, IN0, 0xff - aesenclast TMP2, ZERO - vpslldq TMP1, IN1, 4 - vpxor IN1, IN1, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor IN1, IN1, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor IN1, IN1, TMP1 - vpxor IN1, IN1, TMP2 - vmovdqa [out+16], IN1 -.endm - -#ifdef __APPLE__ -#define AES256_KEY_EXPANSION _aes256_key_expansion -#else -#define AES256_KEY_EXPANSION aes256_key_expansion -#endif - -#ifndef __APPLE__ -.type AES256_KEY_EXPANSION,@function -.hidden AES256_KEY_EXPANSION -#endif -.globl AES256_KEY_EXPANSION -AES256_KEY_EXPANSION: - vmovdqu IN0, [in] - vmovdqu IN1, [in+16] - vmovdqa [out], IN0 - vmovdqa [out+16], IN1 - - vmovdqa CON, [rip+CON1] - vmovdqa MASK_REG, [rip+MASK1] - - vpxor ZERO, ZERO, ZERO - - mov ax, 6 -.loop256: - - ROUND1 IN0, IN1 - dec ax - ROUND2 - jne .loop256 - - ROUND1 IN0, IN1 - - ret -#ifndef __APPLE__ -.size AES256_KEY_EXPANSION, .-AES256_KEY_EXPANSION -#endif - diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes.h deleted file mode 100644 index e35ec3705b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes.h +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef AES_H -#define AES_H - -#include -#include - -void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); -#define AES_ECB_encrypt AES_256_ECB - -#ifdef ENABLE_AESNI -int AES_128_CTR_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -int AES_128_CTR_4R_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#define AES_128_CTR AES_128_CTR_NI -#else -int AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#endif - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes_c.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes_c.c deleted file mode 100644 index 5e2d7d6161..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/aes_c.c +++ /dev/null @@ -1,783 +0,0 @@ -// SPDX-License-Identifier: MIT and Apache-2.0 - -/* - * AES implementation based on code from PQClean, - * which is in turn based on BearSSL (https://bearssl.org/) - * by Thomas Pornin. - * - * - * Copyright (c) 2016 Thomas Pornin - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include - -#define AES128_KEYBYTES 16 -#define AES192_KEYBYTES 24 -#define AES256_KEYBYTES 32 -#define AESCTR_NONCEBYTES 12 -#define AES_BLOCKBYTES 16 - -#define PQC_AES128_STATESIZE 88 -typedef struct -{ - uint64_t sk_exp[PQC_AES128_STATESIZE]; -} aes128ctx; - -#define PQC_AES192_STATESIZE 104 -typedef struct -{ - uint64_t sk_exp[PQC_AES192_STATESIZE]; -} aes192ctx; - -#define PQC_AES256_STATESIZE 120 -typedef struct -{ - uint64_t sk_exp[PQC_AES256_STATESIZE]; -} aes256ctx; - -/** Initializes the context **/ -void aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key); - -void aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key); - -void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx); - -void aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx); - -/** Frees the context **/ -void aes128_ctx_release(aes128ctx *r); - -/** Initializes the context **/ -void aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key); - -void aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key); - -void aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx); - -void aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx); - -void aes192_ctx_release(aes192ctx *r); - -/** Initializes the context **/ -void aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key); - -void aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key); - -void aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx); - -void aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx); - -/** Frees the context **/ -void aes256_ctx_release(aes256ctx *r); - -static inline uint32_t -br_dec32le(const unsigned char *src) -{ - return (uint32_t)src[0] | ((uint32_t)src[1] << 8) | ((uint32_t)src[2] << 16) | - ((uint32_t)src[3] << 24); -} - -static void -br_range_dec32le(uint32_t *v, size_t num, const unsigned char *src) -{ - while (num-- > 0) { - *v++ = br_dec32le(src); - src += 4; - } -} - -static inline uint32_t -br_swap32(uint32_t x) -{ - x = ((x & (uint32_t)0x00FF00FF) << 8) | ((x >> 8) & (uint32_t)0x00FF00FF); - return (x << 16) | (x >> 16); -} - -static inline void -br_enc32le(unsigned char *dst, uint32_t x) -{ - dst[0] = (unsigned char)x; - dst[1] = (unsigned char)(x >> 8); - dst[2] = (unsigned char)(x >> 16); - dst[3] = (unsigned char)(x >> 24); -} - -static void -br_range_enc32le(unsigned char *dst, const uint32_t *v, size_t num) -{ - while (num-- > 0) { - br_enc32le(dst, *v++); - dst += 4; - } -} - -static void -br_aes_ct64_bitslice_Sbox(uint64_t *q) -{ - /* - * This S-box implementation is a straightforward translation of - * the circuit described by Boyar and Peralta in "A new - * combinational logic minimization technique with applications - * to cryptology" (https://eprint.iacr.org/2009/191.pdf). - * - * Note that variables x* (input) and s* (output) are numbered - * in "reverse" order (x0 is the high bit, x7 is the low bit). - */ - - uint64_t x0, x1, x2, x3, x4, x5, x6, x7; - uint64_t y1, y2, y3, y4, y5, y6, y7, y8, y9; - uint64_t y10, y11, y12, y13, y14, y15, y16, y17, y18, y19; - uint64_t y20, y21; - uint64_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9; - uint64_t z10, z11, z12, z13, z14, z15, z16, z17; - uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; - uint64_t t10, t11, t12, t13, t14, t15, t16, t17, t18, t19; - uint64_t t20, t21, t22, t23, t24, t25, t26, t27, t28, t29; - uint64_t t30, t31, t32, t33, t34, t35, t36, t37, t38, t39; - uint64_t t40, t41, t42, t43, t44, t45, t46, t47, t48, t49; - uint64_t t50, t51, t52, t53, t54, t55, t56, t57, t58, t59; - uint64_t t60, t61, t62, t63, t64, t65, t66, t67; - uint64_t s0, s1, s2, s3, s4, s5, s6, s7; - - x0 = q[7]; - x1 = q[6]; - x2 = q[5]; - x3 = q[4]; - x4 = q[3]; - x5 = q[2]; - x6 = q[1]; - x7 = q[0]; - - /* - * Top linear transformation. - */ - y14 = x3 ^ x5; - y13 = x0 ^ x6; - y9 = x0 ^ x3; - y8 = x0 ^ x5; - t0 = x1 ^ x2; - y1 = t0 ^ x7; - y4 = y1 ^ x3; - y12 = y13 ^ y14; - y2 = y1 ^ x0; - y5 = y1 ^ x6; - y3 = y5 ^ y8; - t1 = x4 ^ y12; - y15 = t1 ^ x5; - y20 = t1 ^ x1; - y6 = y15 ^ x7; - y10 = y15 ^ t0; - y11 = y20 ^ y9; - y7 = x7 ^ y11; - y17 = y10 ^ y11; - y19 = y10 ^ y8; - y16 = t0 ^ y11; - y21 = y13 ^ y16; - y18 = x0 ^ y16; - - /* - * Non-linear section. - */ - t2 = y12 & y15; - t3 = y3 & y6; - t4 = t3 ^ t2; - t5 = y4 & x7; - t6 = t5 ^ t2; - t7 = y13 & y16; - t8 = y5 & y1; - t9 = t8 ^ t7; - t10 = y2 & y7; - t11 = t10 ^ t7; - t12 = y9 & y11; - t13 = y14 & y17; - t14 = t13 ^ t12; - t15 = y8 & y10; - t16 = t15 ^ t12; - t17 = t4 ^ t14; - t18 = t6 ^ t16; - t19 = t9 ^ t14; - t20 = t11 ^ t16; - t21 = t17 ^ y20; - t22 = t18 ^ y19; - t23 = t19 ^ y21; - t24 = t20 ^ y18; - - t25 = t21 ^ t22; - t26 = t21 & t23; - t27 = t24 ^ t26; - t28 = t25 & t27; - t29 = t28 ^ t22; - t30 = t23 ^ t24; - t31 = t22 ^ t26; - t32 = t31 & t30; - t33 = t32 ^ t24; - t34 = t23 ^ t33; - t35 = t27 ^ t33; - t36 = t24 & t35; - t37 = t36 ^ t34; - t38 = t27 ^ t36; - t39 = t29 & t38; - t40 = t25 ^ t39; - - t41 = t40 ^ t37; - t42 = t29 ^ t33; - t43 = t29 ^ t40; - t44 = t33 ^ t37; - t45 = t42 ^ t41; - z0 = t44 & y15; - z1 = t37 & y6; - z2 = t33 & x7; - z3 = t43 & y16; - z4 = t40 & y1; - z5 = t29 & y7; - z6 = t42 & y11; - z7 = t45 & y17; - z8 = t41 & y10; - z9 = t44 & y12; - z10 = t37 & y3; - z11 = t33 & y4; - z12 = t43 & y13; - z13 = t40 & y5; - z14 = t29 & y2; - z15 = t42 & y9; - z16 = t45 & y14; - z17 = t41 & y8; - - /* - * Bottom linear transformation. - */ - t46 = z15 ^ z16; - t47 = z10 ^ z11; - t48 = z5 ^ z13; - t49 = z9 ^ z10; - t50 = z2 ^ z12; - t51 = z2 ^ z5; - t52 = z7 ^ z8; - t53 = z0 ^ z3; - t54 = z6 ^ z7; - t55 = z16 ^ z17; - t56 = z12 ^ t48; - t57 = t50 ^ t53; - t58 = z4 ^ t46; - t59 = z3 ^ t54; - t60 = t46 ^ t57; - t61 = z14 ^ t57; - t62 = t52 ^ t58; - t63 = t49 ^ t58; - t64 = z4 ^ t59; - t65 = t61 ^ t62; - t66 = z1 ^ t63; - s0 = t59 ^ t63; - s6 = t56 ^ ~t62; - s7 = t48 ^ ~t60; - t67 = t64 ^ t65; - s3 = t53 ^ t66; - s4 = t51 ^ t66; - s5 = t47 ^ t65; - s1 = t64 ^ ~s3; - s2 = t55 ^ ~t67; - - q[7] = s0; - q[6] = s1; - q[5] = s2; - q[4] = s3; - q[3] = s4; - q[2] = s5; - q[1] = s6; - q[0] = s7; -} - -static void -br_aes_ct64_ortho(uint64_t *q) -{ -#define SWAPN(cl, ch, s, x, y) \ - do { \ - uint64_t a, b; \ - a = (x); \ - b = (y); \ - (x) = (a & (uint64_t)(cl)) | ((b & (uint64_t)(cl)) << (s)); \ - (y) = ((a & (uint64_t)(ch)) >> (s)) | (b & (uint64_t)(ch)); \ - } while (0) - -#define SWAP2(x, y) SWAPN(0x5555555555555555, 0xAAAAAAAAAAAAAAAA, 1, x, y) -#define SWAP4(x, y) SWAPN(0x3333333333333333, 0xCCCCCCCCCCCCCCCC, 2, x, y) -#define SWAP8(x, y) SWAPN(0x0F0F0F0F0F0F0F0F, 0xF0F0F0F0F0F0F0F0, 4, x, y) - - SWAP2(q[0], q[1]); - SWAP2(q[2], q[3]); - SWAP2(q[4], q[5]); - SWAP2(q[6], q[7]); - - SWAP4(q[0], q[2]); - SWAP4(q[1], q[3]); - SWAP4(q[4], q[6]); - SWAP4(q[5], q[7]); - - SWAP8(q[0], q[4]); - SWAP8(q[1], q[5]); - SWAP8(q[2], q[6]); - SWAP8(q[3], q[7]); -} - -static void -br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t *w) -{ - uint64_t x0, x1, x2, x3; - - x0 = w[0]; - x1 = w[1]; - x2 = w[2]; - x3 = w[3]; - x0 |= (x0 << 16); - x1 |= (x1 << 16); - x2 |= (x2 << 16); - x3 |= (x3 << 16); - x0 &= (uint64_t)0x0000FFFF0000FFFF; - x1 &= (uint64_t)0x0000FFFF0000FFFF; - x2 &= (uint64_t)0x0000FFFF0000FFFF; - x3 &= (uint64_t)0x0000FFFF0000FFFF; - x0 |= (x0 << 8); - x1 |= (x1 << 8); - x2 |= (x2 << 8); - x3 |= (x3 << 8); - x0 &= (uint64_t)0x00FF00FF00FF00FF; - x1 &= (uint64_t)0x00FF00FF00FF00FF; - x2 &= (uint64_t)0x00FF00FF00FF00FF; - x3 &= (uint64_t)0x00FF00FF00FF00FF; - *q0 = x0 | (x2 << 8); - *q1 = x1 | (x3 << 8); -} - -static void -br_aes_ct64_interleave_out(uint32_t *w, uint64_t q0, uint64_t q1) -{ - uint64_t x0, x1, x2, x3; - - x0 = q0 & (uint64_t)0x00FF00FF00FF00FF; - x1 = q1 & (uint64_t)0x00FF00FF00FF00FF; - x2 = (q0 >> 8) & (uint64_t)0x00FF00FF00FF00FF; - x3 = (q1 >> 8) & (uint64_t)0x00FF00FF00FF00FF; - x0 |= (x0 >> 8); - x1 |= (x1 >> 8); - x2 |= (x2 >> 8); - x3 |= (x3 >> 8); - x0 &= (uint64_t)0x0000FFFF0000FFFF; - x1 &= (uint64_t)0x0000FFFF0000FFFF; - x2 &= (uint64_t)0x0000FFFF0000FFFF; - x3 &= (uint64_t)0x0000FFFF0000FFFF; - w[0] = (uint32_t)x0 | (uint32_t)(x0 >> 16); - w[1] = (uint32_t)x1 | (uint32_t)(x1 >> 16); - w[2] = (uint32_t)x2 | (uint32_t)(x2 >> 16); - w[3] = (uint32_t)x3 | (uint32_t)(x3 >> 16); -} - -static const unsigned char Rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36 }; - -static uint32_t -sub_word(uint32_t x) -{ - uint64_t q[8]; - - memset(q, 0, sizeof q); - q[0] = x; - br_aes_ct64_ortho(q); - br_aes_ct64_bitslice_Sbox(q); - br_aes_ct64_ortho(q); - return (uint32_t)q[0]; -} - -static void -br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, unsigned int key_len) -{ - unsigned int i, j, k, nk, nkf; - uint32_t tmp; - uint32_t skey[60]; - unsigned nrounds = 10 + ((key_len - 16) >> 2); - - nk = (key_len >> 2); - nkf = ((nrounds + 1) << 2); - br_range_dec32le(skey, (key_len >> 2), key); - tmp = skey[(key_len >> 2) - 1]; - for (i = nk, j = 0, k = 0; i < nkf; i++) { - if (j == 0) { - tmp = (tmp << 24) | (tmp >> 8); - tmp = sub_word(tmp) ^ Rcon[k]; - } else if (nk > 6 && j == 4) { - tmp = sub_word(tmp); - } - tmp ^= skey[i - nk]; - skey[i] = tmp; - if (++j == nk) { - j = 0; - k++; - } - } - - for (i = 0, j = 0; i < nkf; i += 4, j += 2) { - uint64_t q[8]; - - br_aes_ct64_interleave_in(&q[0], &q[4], skey + i); - q[1] = q[0]; - q[2] = q[0]; - q[3] = q[0]; - q[5] = q[4]; - q[6] = q[4]; - q[7] = q[4]; - br_aes_ct64_ortho(q); - comp_skey[j + 0] = - (q[0] & (uint64_t)0x1111111111111111) | (q[1] & (uint64_t)0x2222222222222222) | - (q[2] & (uint64_t)0x4444444444444444) | (q[3] & (uint64_t)0x8888888888888888); - comp_skey[j + 1] = - (q[4] & (uint64_t)0x1111111111111111) | (q[5] & (uint64_t)0x2222222222222222) | - (q[6] & (uint64_t)0x4444444444444444) | (q[7] & (uint64_t)0x8888888888888888); - } -} - -static void -br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, unsigned int nrounds) -{ - unsigned u, v, n; - - n = (nrounds + 1) << 1; - for (u = 0, v = 0; u < n; u++, v += 4) { - uint64_t x0, x1, x2, x3; - - x0 = x1 = x2 = x3 = comp_skey[u]; - x0 &= (uint64_t)0x1111111111111111; - x1 &= (uint64_t)0x2222222222222222; - x2 &= (uint64_t)0x4444444444444444; - x3 &= (uint64_t)0x8888888888888888; - x1 >>= 1; - x2 >>= 2; - x3 >>= 3; - skey[v + 0] = (x0 << 4) - x0; - skey[v + 1] = (x1 << 4) - x1; - skey[v + 2] = (x2 << 4) - x2; - skey[v + 3] = (x3 << 4) - x3; - } -} - -static inline void -add_round_key(uint64_t *q, const uint64_t *sk) -{ - q[0] ^= sk[0]; - q[1] ^= sk[1]; - q[2] ^= sk[2]; - q[3] ^= sk[3]; - q[4] ^= sk[4]; - q[5] ^= sk[5]; - q[6] ^= sk[6]; - q[7] ^= sk[7]; -} - -static inline void -shift_rows(uint64_t *q) -{ - int i; - - for (i = 0; i < 8; i++) { - uint64_t x; - - x = q[i]; - q[i] = - (x & (uint64_t)0x000000000000FFFF) | ((x & (uint64_t)0x00000000FFF00000) >> 4) | - ((x & (uint64_t)0x00000000000F0000) << 12) | ((x & (uint64_t)0x0000FF0000000000) >> 8) | - ((x & (uint64_t)0x000000FF00000000) << 8) | ((x & (uint64_t)0xF000000000000000) >> 12) | - ((x & (uint64_t)0x0FFF000000000000) << 4); - } -} - -static inline uint64_t -rotr32(uint64_t x) -{ - return (x << 32) | (x >> 32); -} - -static inline void -mix_columns(uint64_t *q) -{ - uint64_t q0, q1, q2, q3, q4, q5, q6, q7; - uint64_t r0, r1, r2, r3, r4, r5, r6, r7; - - q0 = q[0]; - q1 = q[1]; - q2 = q[2]; - q3 = q[3]; - q4 = q[4]; - q5 = q[5]; - q6 = q[6]; - q7 = q[7]; - r0 = (q0 >> 16) | (q0 << 48); - r1 = (q1 >> 16) | (q1 << 48); - r2 = (q2 >> 16) | (q2 << 48); - r3 = (q3 >> 16) | (q3 << 48); - r4 = (q4 >> 16) | (q4 << 48); - r5 = (q5 >> 16) | (q5 << 48); - r6 = (q6 >> 16) | (q6 << 48); - r7 = (q7 >> 16) | (q7 << 48); - - q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0); - q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1); - q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2); - q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3); - q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4); - q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5); - q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6); - q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7); -} - -static void -inc4_be(uint32_t *x) -{ - uint32_t t = br_swap32(*x) + 4; - *x = br_swap32(t); -} - -static void -aes_ecb4x(unsigned char out[64], - const uint32_t ivw[16], - const uint64_t *sk_exp, - unsigned int nrounds) -{ - uint32_t w[16]; - uint64_t q[8]; - unsigned int i; - - memcpy(w, ivw, sizeof(w)); - for (i = 0; i < 4; i++) { - br_aes_ct64_interleave_in(&q[i], &q[i + 4], w + (i << 2)); - } - br_aes_ct64_ortho(q); - - add_round_key(q, sk_exp); - for (i = 1; i < nrounds; i++) { - br_aes_ct64_bitslice_Sbox(q); - shift_rows(q); - mix_columns(q); - add_round_key(q, sk_exp + (i << 3)); - } - br_aes_ct64_bitslice_Sbox(q); - shift_rows(q); - add_round_key(q, sk_exp + 8 * nrounds); - - br_aes_ct64_ortho(q); - for (i = 0; i < 4; i++) { - br_aes_ct64_interleave_out(w + (i << 2), q[i], q[i + 4]); - } - br_range_enc32le(out, w, 16); -} - -static void -aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) -{ - aes_ecb4x(out, ivw, sk_exp, nrounds); - - /* Increase counter for next 4 blocks */ - inc4_be(ivw + 3); - inc4_be(ivw + 7); - inc4_be(ivw + 11); - inc4_be(ivw + 15); -} - -static void -aes_ecb(unsigned char *out, - const unsigned char *in, - size_t nblocks, - const uint64_t *rkeys, - unsigned int nrounds) -{ - uint32_t blocks[16]; - unsigned char t[64]; - - while (nblocks >= 4) { - br_range_dec32le(blocks, 16, in); - aes_ecb4x(out, blocks, rkeys, nrounds); - nblocks -= 4; - in += 64; - out += 64; - } - - if (nblocks) { - br_range_dec32le(blocks, nblocks * 4, in); - aes_ecb4x(t, blocks, rkeys, nrounds); - memcpy(out, t, nblocks * 16); - } -} - -static void -aes_ctr(unsigned char *out, - size_t outlen, - const unsigned char *iv, - const uint64_t *rkeys, - unsigned int nrounds) -{ - uint32_t ivw[16]; - size_t i; - uint32_t cc = 0; - - br_range_dec32le(ivw, 3, iv); - memcpy(ivw + 4, ivw, 3 * sizeof(uint32_t)); - memcpy(ivw + 8, ivw, 3 * sizeof(uint32_t)); - memcpy(ivw + 12, ivw, 3 * sizeof(uint32_t)); - ivw[3] = br_swap32(cc); - ivw[7] = br_swap32(cc + 1); - ivw[11] = br_swap32(cc + 2); - ivw[15] = br_swap32(cc + 3); - - while (outlen > 64) { - aes_ctr4x(out, ivw, rkeys, nrounds); - out += 64; - outlen -= 64; - } - if (outlen > 0) { - unsigned char tmp[64]; - aes_ctr4x(tmp, ivw, rkeys, nrounds); - for (i = 0; i < outlen; i++) { - out[i] = tmp[i]; - } - } -} - -void -aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key) -{ - uint64_t skey[22]; - - br_aes_ct64_keysched(skey, key, 16); - br_aes_ct64_skey_expand(r->sk_exp, skey, 10); -} - -void -aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key) -{ - aes128_ecb_keyexp(r, key); -} - -void -aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key) -{ - uint64_t skey[26]; - - br_aes_ct64_keysched(skey, key, 24); - br_aes_ct64_skey_expand(r->sk_exp, skey, 12); -} - -void -aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key) -{ - aes192_ecb_keyexp(r, key); -} - -void -aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key) -{ - uint64_t skey[30]; - - br_aes_ct64_keysched(skey, key, 32); - br_aes_ct64_skey_expand(r->sk_exp, skey, 14); -} - -void -aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key) -{ - aes256_ecb_keyexp(r, key); -} - -void -aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 10); -} - -void -aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 10); -} - -void -aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 12); -} - -void -aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 12); -} - -void -aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 14); -} - -void -aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 14); -} - -void -aes128_ctx_release(aes128ctx *r) -{ -} - -void -aes192_ctx_release(aes192ctx *r) -{ -} - -void -aes256_ctx_release(aes256ctx *r) -{ -} - -int -AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen) -{ - aes128ctx ctx; - const unsigned char iv[16] = { 0 }; - - aes128_ctr_keyexp(&ctx, input); - aes128_ctr(output, outputByteLen, iv, &ctx); - aes128_ctx_release(&ctx); - - return (int)outputByteLen; -} - -void -AES_256_ECB(const uint8_t *input, const unsigned char *key, unsigned char *output) -{ - aes256ctx ctx; - - aes256_ecb_keyexp(&ctx, key); - aes256_ecb(output, input, 1, &ctx); - aes256_ctx_release(&ctx); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.c deleted file mode 100644 index f2992d8c7f..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.c +++ /dev/null @@ -1,876 +0,0 @@ -// SPDX-License-Identifier: PD and Apache-2.0 - -/* FIPS202 implementation based on code from PQClean, - * which is in turn based based on the public domain implementation in - * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html - * by Ronny Van Keer - * and the public domain "TweetFips202" implementation - * from https://twitter.com/tweetfips202 - * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ - -#include -#include -#include -#include - -#include "fips202.h" - -#define NROUNDS 24 -#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) - -/************************************************* - * Name: load64 - * - * Description: Load 8 bytes into uint64_t in little-endian order - * - * Arguments: - const uint8_t *x: pointer to input byte array - * - * Returns the loaded 64-bit unsigned integer - **************************************************/ -static uint64_t load64(const uint8_t *x) { - uint64_t r = 0; - for (size_t i = 0; i < 8; ++i) { - r |= (uint64_t)x[i] << 8 * i; - } - - return r; -} - -/************************************************* - * Name: store64 - * - * Description: Store a 64-bit integer to a byte array in little-endian order - * - * Arguments: - uint8_t *x: pointer to the output byte array - * - uint64_t u: input 64-bit unsigned integer - **************************************************/ -static void store64(uint8_t *x, uint64_t u) { - for (size_t i = 0; i < 8; ++i) { - x[i] = (uint8_t) (u >> 8 * i); - } -} - -/* Keccak round constants */ -static const uint64_t KeccakF_RoundConstants[NROUNDS] = { - 0x0000000000000001ULL, 0x0000000000008082ULL, - 0x800000000000808aULL, 0x8000000080008000ULL, - 0x000000000000808bULL, 0x0000000080000001ULL, - 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x000000000000008aULL, 0x0000000000000088ULL, - 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, - 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, - 0x000000000000800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, - 0x0000000080000001ULL, 0x8000000080008008ULL -}; - -/************************************************* - * Name: KeccakF1600_StatePermute - * - * Description: The Keccak F1600 Permutation - * - * Arguments: - uint64_t *state: pointer to input/output Keccak state - **************************************************/ -static void KeccakF1600_StatePermute(uint64_t *state) { - int round; - - uint64_t Aba, Abe, Abi, Abo, Abu; - uint64_t Aga, Age, Agi, Ago, Agu; - uint64_t Aka, Ake, Aki, Ako, Aku; - uint64_t Ama, Ame, Ami, Amo, Amu; - uint64_t Asa, Ase, Asi, Aso, Asu; - uint64_t BCa, BCe, BCi, BCo, BCu; - uint64_t Da, De, Di, Do, Du; - uint64_t Eba, Ebe, Ebi, Ebo, Ebu; - uint64_t Ega, Ege, Egi, Ego, Egu; - uint64_t Eka, Eke, Eki, Eko, Eku; - uint64_t Ema, Eme, Emi, Emo, Emu; - uint64_t Esa, Ese, Esi, Eso, Esu; - - // copyFromState(A, state) - Aba = state[0]; - Abe = state[1]; - Abi = state[2]; - Abo = state[3]; - Abu = state[4]; - Aga = state[5]; - Age = state[6]; - Agi = state[7]; - Ago = state[8]; - Agu = state[9]; - Aka = state[10]; - Ake = state[11]; - Aki = state[12]; - Ako = state[13]; - Aku = state[14]; - Ama = state[15]; - Ame = state[16]; - Ami = state[17]; - Amo = state[18]; - Amu = state[19]; - Asa = state[20]; - Ase = state[21]; - Asi = state[22]; - Aso = state[23]; - Asu = state[24]; - - for (round = 0; round < NROUNDS; round += 2) { - // prepareTheta - BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; - BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; - BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; - BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; - BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; - - // thetaRhoPiChiIotaPrepareTheta(round , A, E) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Aba ^= Da; - BCa = Aba; - Age ^= De; - BCe = ROL(Age, 44); - Aki ^= Di; - BCi = ROL(Aki, 43); - Amo ^= Do; - BCo = ROL(Amo, 21); - Asu ^= Du; - BCu = ROL(Asu, 14); - Eba = BCa ^ ((~BCe) & BCi); - Eba ^= KeccakF_RoundConstants[round]; - Ebe = BCe ^ ((~BCi) & BCo); - Ebi = BCi ^ ((~BCo) & BCu); - Ebo = BCo ^ ((~BCu) & BCa); - Ebu = BCu ^ ((~BCa) & BCe); - - Abo ^= Do; - BCa = ROL(Abo, 28); - Agu ^= Du; - BCe = ROL(Agu, 20); - Aka ^= Da; - BCi = ROL(Aka, 3); - Ame ^= De; - BCo = ROL(Ame, 45); - Asi ^= Di; - BCu = ROL(Asi, 61); - Ega = BCa ^ ((~BCe) & BCi); - Ege = BCe ^ ((~BCi) & BCo); - Egi = BCi ^ ((~BCo) & BCu); - Ego = BCo ^ ((~BCu) & BCa); - Egu = BCu ^ ((~BCa) & BCe); - - Abe ^= De; - BCa = ROL(Abe, 1); - Agi ^= Di; - BCe = ROL(Agi, 6); - Ako ^= Do; - BCi = ROL(Ako, 25); - Amu ^= Du; - BCo = ROL(Amu, 8); - Asa ^= Da; - BCu = ROL(Asa, 18); - Eka = BCa ^ ((~BCe) & BCi); - Eke = BCe ^ ((~BCi) & BCo); - Eki = BCi ^ ((~BCo) & BCu); - Eko = BCo ^ ((~BCu) & BCa); - Eku = BCu ^ ((~BCa) & BCe); - - Abu ^= Du; - BCa = ROL(Abu, 27); - Aga ^= Da; - BCe = ROL(Aga, 36); - Ake ^= De; - BCi = ROL(Ake, 10); - Ami ^= Di; - BCo = ROL(Ami, 15); - Aso ^= Do; - BCu = ROL(Aso, 56); - Ema = BCa ^ ((~BCe) & BCi); - Eme = BCe ^ ((~BCi) & BCo); - Emi = BCi ^ ((~BCo) & BCu); - Emo = BCo ^ ((~BCu) & BCa); - Emu = BCu ^ ((~BCa) & BCe); - - Abi ^= Di; - BCa = ROL(Abi, 62); - Ago ^= Do; - BCe = ROL(Ago, 55); - Aku ^= Du; - BCi = ROL(Aku, 39); - Ama ^= Da; - BCo = ROL(Ama, 41); - Ase ^= De; - BCu = ROL(Ase, 2); - Esa = BCa ^ ((~BCe) & BCi); - Ese = BCe ^ ((~BCi) & BCo); - Esi = BCi ^ ((~BCo) & BCu); - Eso = BCo ^ ((~BCu) & BCa); - Esu = BCu ^ ((~BCa) & BCe); - - // prepareTheta - BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; - BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; - BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; - BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; - BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; - - // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Eba ^= Da; - BCa = Eba; - Ege ^= De; - BCe = ROL(Ege, 44); - Eki ^= Di; - BCi = ROL(Eki, 43); - Emo ^= Do; - BCo = ROL(Emo, 21); - Esu ^= Du; - BCu = ROL(Esu, 14); - Aba = BCa ^ ((~BCe) & BCi); - Aba ^= KeccakF_RoundConstants[round + 1]; - Abe = BCe ^ ((~BCi) & BCo); - Abi = BCi ^ ((~BCo) & BCu); - Abo = BCo ^ ((~BCu) & BCa); - Abu = BCu ^ ((~BCa) & BCe); - - Ebo ^= Do; - BCa = ROL(Ebo, 28); - Egu ^= Du; - BCe = ROL(Egu, 20); - Eka ^= Da; - BCi = ROL(Eka, 3); - Eme ^= De; - BCo = ROL(Eme, 45); - Esi ^= Di; - BCu = ROL(Esi, 61); - Aga = BCa ^ ((~BCe) & BCi); - Age = BCe ^ ((~BCi) & BCo); - Agi = BCi ^ ((~BCo) & BCu); - Ago = BCo ^ ((~BCu) & BCa); - Agu = BCu ^ ((~BCa) & BCe); - - Ebe ^= De; - BCa = ROL(Ebe, 1); - Egi ^= Di; - BCe = ROL(Egi, 6); - Eko ^= Do; - BCi = ROL(Eko, 25); - Emu ^= Du; - BCo = ROL(Emu, 8); - Esa ^= Da; - BCu = ROL(Esa, 18); - Aka = BCa ^ ((~BCe) & BCi); - Ake = BCe ^ ((~BCi) & BCo); - Aki = BCi ^ ((~BCo) & BCu); - Ako = BCo ^ ((~BCu) & BCa); - Aku = BCu ^ ((~BCa) & BCe); - - Ebu ^= Du; - BCa = ROL(Ebu, 27); - Ega ^= Da; - BCe = ROL(Ega, 36); - Eke ^= De; - BCi = ROL(Eke, 10); - Emi ^= Di; - BCo = ROL(Emi, 15); - Eso ^= Do; - BCu = ROL(Eso, 56); - Ama = BCa ^ ((~BCe) & BCi); - Ame = BCe ^ ((~BCi) & BCo); - Ami = BCi ^ ((~BCo) & BCu); - Amo = BCo ^ ((~BCu) & BCa); - Amu = BCu ^ ((~BCa) & BCe); - - Ebi ^= Di; - BCa = ROL(Ebi, 62); - Ego ^= Do; - BCe = ROL(Ego, 55); - Eku ^= Du; - BCi = ROL(Eku, 39); - Ema ^= Da; - BCo = ROL(Ema, 41); - Ese ^= De; - BCu = ROL(Ese, 2); - Asa = BCa ^ ((~BCe) & BCi); - Ase = BCe ^ ((~BCi) & BCo); - Asi = BCi ^ ((~BCo) & BCu); - Aso = BCo ^ ((~BCu) & BCa); - Asu = BCu ^ ((~BCa) & BCe); - } - - // copyToState(state, A) - state[0] = Aba; - state[1] = Abe; - state[2] = Abi; - state[3] = Abo; - state[4] = Abu; - state[5] = Aga; - state[6] = Age; - state[7] = Agi; - state[8] = Ago; - state[9] = Agu; - state[10] = Aka; - state[11] = Ake; - state[12] = Aki; - state[13] = Ako; - state[14] = Aku; - state[15] = Ama; - state[16] = Ame; - state[17] = Ami; - state[18] = Amo; - state[19] = Amu; - state[20] = Asa; - state[21] = Ase; - state[22] = Asi; - state[23] = Aso; - state[24] = Asu; -} - -/************************************************* - * Name: keccak_absorb - * - * Description: Absorb step of Keccak; - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, - size_t mlen, uint8_t p) { - size_t i; - uint8_t t[200]; - - /* Zero state */ - for (i = 0; i < 25; ++i) { - s[i] = 0; - } - - while (mlen >= r) { - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(m + 8 * i); - } - - KeccakF1600_StatePermute(s); - mlen -= r; - m += r; - } - - for (i = 0; i < r; ++i) { - t[i] = 0; - } - for (i = 0; i < mlen; ++i) { - t[i] = m[i]; - } - t[i] = p; - t[r - 1] |= 128; - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(t + 8 * i); - } -} - -/************************************************* - * Name: keccak_squeezeblocks - * - * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. - * Modifies the state. Can be called multiple times to keep - * squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *h: pointer to output blocks - * - size_t nblocks: number of blocks to be - * squeezed (written to h) - * - uint64_t *s: pointer to input/output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, - uint64_t *s, uint32_t r) { - while (nblocks > 0) { - KeccakF1600_StatePermute(s); - for (size_t i = 0; i < (r >> 3); i++) { - store64(h + 8 * i, s[i]); - } - h += r; - nblocks--; - } -} - -/************************************************* - * Name: keccak_inc_init - * - * Description: Initializes the incremental Keccak state to zero. - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - **************************************************/ -static void keccak_inc_init(uint64_t *s_inc) { - size_t i; - - for (i = 0; i < 25; ++i) { - s_inc[i] = 0; - } - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_absorb - * - * Description: Incremental keccak absorb - * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - **************************************************/ -static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, - size_t mlen) { - size_t i; - - /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ - while (mlen + s_inc[25] >= r) { - for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { - /* Take the i'th byte from message - xor with the s_inc[25] + i'th byte of the state; little-endian */ - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - mlen -= (size_t)(r - s_inc[25]); - m += r - s_inc[25]; - s_inc[25] = 0; - - KeccakF1600_StatePermute(s_inc); - } - - for (i = 0; i < mlen; i++) { - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - s_inc[25] += mlen; -} - -/************************************************* - * Name: keccak_inc_finalize - * - * Description: Finalizes Keccak absorb phase, prepares for squeezing - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { - /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, - so we can always use one more byte for p in the current state. */ - s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); - s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_squeeze - * - * Description: Incremental Keccak squeeze; can be called on byte-level - * - * Arguments: - uint8_t *h: pointer to output bytes - * - size_t outlen: number of bytes to be squeezed - * - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_inc_squeeze(uint8_t *h, size_t outlen, - uint64_t *s_inc, uint32_t r) { - size_t i; - - /* First consume any bytes we still have sitting around */ - for (i = 0; i < outlen && i < s_inc[25]; i++) { - /* There are s_inc[25] bytes left, so r - s_inc[25] is the first - available byte. We consume from there, i.e., up to r. */ - h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] -= i; - - /* Then squeeze the remaining necessary blocks */ - while (outlen > 0) { - KeccakF1600_StatePermute(s_inc); - - for (i = 0; i < outlen && i < r; i++) { - h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] = r - i; - } -} - -void shake128_inc_init(shake128incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); -} - -void shake128_inc_finalize(shake128incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); -} - -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); -} - -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake128_inc_ctx_release(shake128incctx *state) { - (void)state; -} - -void shake256_inc_init(shake256incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); -} - -void shake256_inc_finalize(shake256incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); -} - -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); -} - -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake256_inc_ctx_release(shake256incctx *state) { - (void)state; -} - - -/************************************************* - * Name: shake128_absorb - * - * Description: Absorb step of the SHAKE128 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake128_squeezeblocks - * - * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of - * SHAKE128_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake128ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); -} - -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake128_ctx_release(shake128ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake256_absorb - * - * Description: Absorb step of the SHAKE256 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake256_squeezeblocks - * - * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of - * SHAKE256_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake256ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); -} - -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake256_ctx_release(shake256ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake128 - * - * Description: SHAKE128 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE128_RATE; - uint8_t t[SHAKE128_RATE]; - shake128ctx s; - - shake128_absorb(&s, input, inlen); - shake128_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE128_RATE; - outlen -= nblocks * SHAKE128_RATE; - - if (outlen) { - shake128_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake128_ctx_release(&s); -} - -/************************************************* - * Name: shake256 - * - * Description: SHAKE256 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE256_RATE; - uint8_t t[SHAKE256_RATE]; - shake256ctx s; - - shake256_absorb(&s, input, inlen); - shake256_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE256_RATE; - outlen -= nblocks * SHAKE256_RATE; - - if (outlen) { - shake256_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake256_ctx_release(&s); -} - -void sha3_256_inc_init(sha3_256incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_256_inc_ctx_release(sha3_256incctx *state) { - (void)state; -} - -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); -} - -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { - uint8_t t[SHA3_256_RATE]; - keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); - - sha3_256_inc_ctx_release(state); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_256 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_256_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -void sha3_384_inc_init(sha3_384incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); -} - -void sha3_384_inc_ctx_release(sha3_384incctx *state) { - (void)state; -} - -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { - uint8_t t[SHA3_384_RATE]; - keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); - - sha3_384_inc_ctx_release(state); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_384 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_384_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -void sha3_512_inc_init(sha3_512incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); -} - -void sha3_512_inc_ctx_release(sha3_512incctx *state) { - (void)state; -} - -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { - uint8_t t[SHA3_512_RATE]; - keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); - - sha3_512_inc_ctx_release(state); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_512 - * - * Description: SHA3-512 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_512_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h index c29ebd8f9d..21bc0c3f79 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h @@ -3,169 +3,12 @@ #ifndef FIPS202_H #define FIPS202_H -#include -#include +#include -#define SHAKE128_RATE 168 -#define SHAKE256_RATE 136 -#define SHA3_256_RATE 136 -#define SHA3_384_RATE 104 -#define SHA3_512_RATE 72 - -#define PQC_SHAKEINCCTX_U64WORDS 26 -#define PQC_SHAKECTX_U64WORDS 25 - -#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) -#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake128incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake128ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake256incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake256ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_256incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_384incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_512incctx; - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); -/* Free the state */ -void shake128_ctx_release(shake128ctx *state); -/* Copy the state. */ -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); - -/* Initialize incremental hashing API */ -void shake128_inc_init(shake128incctx *state); -/* Absorb more information into the XOF. - * - * Can be called multiple times. - */ -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); -/* Finalize the XOF for squeezing */ -void shake128_inc_finalize(shake128incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); -/* Copy the context of the SHAKE128 XOF */ -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); -/* Free the context of the SHAKE128 XOF */ -void shake128_inc_ctx_release(shake128incctx *state); - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); -/* Free the context held by this XOF */ -void shake256_ctx_release(shake256ctx *state); -/* Copy the context held by this XOF */ -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); - -/* Initialize incremental hashing API */ -void shake256_inc_init(shake256incctx *state); -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); -/* Prepares for squeeze phase */ -void shake256_inc_finalize(shake256incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); -/* Copy the state */ -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); -/* Free the state */ -void shake256_inc_ctx_release(shake256incctx *state); - -/* One-stop SHAKE128 call */ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* One-stop SHAKE256 call */ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_256_inc_init(sha3_256incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); -/* Copy the context */ -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_256_inc_ctx_release(sha3_256incctx *state); - -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_384_inc_init(sha3_384incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); -/* Copy the context */ -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_384_inc_ctx_release(sha3_384incctx *state); - -/* One-stop SHA3-384 shop */ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_512_inc_init(sha3_512incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); -/* Copy the context */ -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_512_inc_ctx_release(sha3_512incctx *state); - -/* One-stop SHA3-512 shop */ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); +#define shake256incctx OQS_SHA3_shake256_inc_ctx +#define shake256_inc_init OQS_SHA3_shake256_inc_init +#define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb +#define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize +#define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c deleted file mode 100644 index 372cc0de81..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/randombytes_ctrdrbg.c +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 and Unknown -// -/* -NIST-developed software is provided by NIST as a public service. You may use, -copy, and distribute copies of the software in any medium, provided that you -keep intact this entire notice. You may improve, modify, and create derivative -works of the software or any portion of the software, and you may copy and -distribute such modifications or works. Modified works should carry a notice -stating that you changed the software and should note the date and nature of any -such change. Please explicitly acknowledge the National Institute of Standards -and Technology as the source of the software. - -NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF -ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, -WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS -NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR -ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE -ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, -INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR -USEFULNESS OF THE SOFTWARE. - -You are solely responsible for determining the appropriateness of using and -distributing the software and you assume all risks associated with its use, -including but not limited to the risks and costs of program errors, compliance -with applicable laws, damage to or loss of data, programs or equipment, and the -unavailability or interruption of operation. This software is not intended to be -used in any situation where a failure could cause risk of injury or damage to -property. The software developed by NIST employees is not subject to copyright -protection within the United States. -*/ - -#include -#include - -#include - -#ifdef ENABLE_CT_TESTING -#include -#endif - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -static inline void AES256_ECB(const unsigned char *key, - const unsigned char *ctr, unsigned char *buffer) { - AES_ECB_encrypt(ctr, key, buffer); -} - -typedef struct { - unsigned char Key[32]; - unsigned char V[16]; - int reseed_counter; -} AES256_CTR_DRBG_struct; - -void AES256_CTR_DRBG_Update(const unsigned char *provided_data, - unsigned char *Key, unsigned char *V); - -AES256_CTR_DRBG_struct DRBG_ctx; - -#ifndef CTRDRBG_TEST_BENCH -static -#endif - void - randombytes_init_nist(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - unsigned char seed_material[48]; - - (void)security_strength; // Unused parameter - memcpy(seed_material, entropy_input, 48); - if (personalization_string) - for (int i = 0; i < 48; i++) { - seed_material[i] ^= personalization_string[i]; - } - memset(DRBG_ctx.Key, 0x00, 32); - memset(DRBG_ctx.V, 0x00, 16); - AES256_CTR_DRBG_Update(seed_material, DRBG_ctx.Key, DRBG_ctx.V); - DRBG_ctx.reseed_counter = 1; -} - -#ifndef CTRDRBG_TEST_BENCH -static -#endif - int - randombytes_nist(unsigned char *x, size_t xlen) { - unsigned char block[16]; - size_t i = 0; - - while (xlen > 0) { - // increment V - for (int j = 15; j >= 0; j--) { - if (DRBG_ctx.V[j] == 0xff) { - DRBG_ctx.V[j] = 0x00; - } else { - DRBG_ctx.V[j]++; - break; - } - } - AES256_ECB(DRBG_ctx.Key, DRBG_ctx.V, block); - if (xlen > 15) { - memcpy(x + i, block, 16); - i += 16; - xlen -= 16; - } else { - memcpy(x + i, block, xlen); - i += xlen; - xlen = 0; - } - } - AES256_CTR_DRBG_Update(NULL, DRBG_ctx.Key, DRBG_ctx.V); - DRBG_ctx.reseed_counter++; - - return 0; -} - -void AES256_CTR_DRBG_Update(const unsigned char *provided_data, - unsigned char *Key, unsigned char *V) { - unsigned char temp[48]; - - for (int i = 0; i < 3; i++) { - // increment V - for (int j = 15; j >= 0; j--) { - if (V[j] == 0xff) { - V[j] = 0x00; - } else { - V[j]++; - break; - } - } - - AES256_ECB(Key, V, temp + 16 * i); - } - if (provided_data != NULL) - for (int i = 0; i < 48; i++) { - temp[i] ^= provided_data[i]; - } - memcpy(Key, temp, 32); - memcpy(V, temp + 32, 16); -} - -#ifdef RANDOMBYTES_C -SQISIGN_API -int randombytes(unsigned char *random_array, unsigned long long nbytes) { - int ret = randombytes_nist(random_array, nbytes); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); -#endif - return ret; -} - -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - randombytes_init_nist(entropy_input, personalization_string, - security_strength); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes.h deleted file mode 100644 index e35ec3705b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes.h +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef AES_H -#define AES_H - -#include -#include - -void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); -#define AES_ECB_encrypt AES_256_ECB - -#ifdef ENABLE_AESNI -int AES_128_CTR_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -int AES_128_CTR_4R_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#define AES_128_CTR AES_128_CTR_NI -#else -int AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#endif - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.c deleted file mode 100644 index dc778fc9b6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.c +++ /dev/null @@ -1,258 +0,0 @@ -/*************************************************************************** -* This implementation is a modified version of the code, -* written by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* -* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"). -* You may not use this file except in compliance with the License. -* A copy of the License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* or in the "license" file accompanying this file. This file is distributed -* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -* express or implied. See the License for the specific language governing -* permissions and limitations under the License. -* The license is detailed in the file LICENSE.txt, and applies to this file. -* ***************************************************************************/ - -#include "aes_ni.h" -#include - -#include -#include - -#define AESENC(m, key) _mm_aesenc_si128(m, key) -#define AESENCLAST(m, key) _mm_aesenclast_si128(m, key) -#define XOR(a, b) _mm_xor_si128(a, b) -#define ADD32(a, b) _mm_add_epi32(a, b) -#define SHUF8(a, mask) _mm_shuffle_epi8(a, mask) - -#define ZERO256 _mm256_zeroall - -#define BSWAP_MASK 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f - -#ifdef VAES256 -#define VAESENC(a, key) _mm256_aesenc_epi128(a, key) -#define VAESENCLAST(a, key) _mm256_aesenclast_epi128(a, key) -#define EXTRACT128(a, imm) _mm256_extracti128_si256(a, imm) -#define XOR256(a, b) _mm256_xor_si256(a,b) -#define ADD32_256(a, b) _mm256_add_epi32(a,b) -#define SHUF8_256(a, mask) _mm256_shuffle_epi8(a, mask) -#endif - -#ifdef VAES512 -#define VAESENC(a, key) _mm512_aesenc_epi128(a, key) -#define VAESENCLAST(a, key) _mm512_aesenclast_epi128(a, key) -#define EXTRACT128(a, imm) _mm512_extracti64x2_epi64(a, imm) -#define XOR512(a, b) _mm512_xor_si512(a,b) -#define ADD32_512(a, b) _mm512_add_epi32(a,b) -#define SHUF8_512(a, mask) _mm512_shuffle_epi8(a, mask) -#endif - -_INLINE_ __m128i load_m128i(IN const uint8_t *ctr) -{ - return _mm_set_epi8(ctr[0], ctr[1], ctr[2], ctr[3], - ctr[4], ctr[5], ctr[6], ctr[7], - ctr[8], ctr[9], ctr[10], ctr[11], - ctr[12], ctr[13], ctr[14], ctr[15]); -} - -_INLINE_ __m128i loadr_m128i(IN const uint8_t *ctr) -{ - return _mm_setr_epi8(ctr[0], ctr[1], ctr[2], ctr[3], - ctr[4], ctr[5], ctr[6], ctr[7], - ctr[8], ctr[9], ctr[10], ctr[11], - ctr[12], ctr[13], ctr[14], ctr[15]); -} - -void aes256_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const aes256_ks_t *ks) { - uint32_t i = 0; - __m128i block = loadr_m128i(pt); - - block = XOR(block, ks->keys[0]); - for (i = 1; i < AES256_ROUNDS; i++) { - block = AESENC(block, ks->keys[i]); - } - block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); - - _mm_storeu_si128((void*)ct, block); - - // Delete secrets from registers if any. - ZERO256(); -} - -void aes256_ctr_enc(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - __m128i ctr_block = load_m128i(ctr); - - const __m128i bswap_mask = _mm_set_epi32(BSWAP_MASK); - const __m128i one = _mm_set_epi32(0,0,0,1); - - __m128i block = SHUF8(ctr_block, bswap_mask); - - for (uint32_t bidx = 0; bidx < num_blocks; bidx++) - { - block = XOR(block, ks->keys[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) { - block = AESENC(block, ks->keys[i]); - } - block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); - - //We use memcpy to avoid align casting. - _mm_storeu_si128((void*)&ct[16*bidx], block); - - ctr_block = ADD32(ctr_block, one); - block = SHUF8(ctr_block, bswap_mask); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#ifdef VAES256 -_INLINE_ void load_ks(OUT __m256i ks256[AES256_ROUNDS + 1], - IN const aes256_ks_t *ks) -{ - for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) - { - ks256[i] = _mm256_broadcastsi128_si256(ks->keys[i]); - } -} - -// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that -// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 -// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 -// Here num_blocks is assumed to be less then 2^32. -// It is the caller responsiblity to ensure it. -void aes256_ctr_enc256(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - const uint64_t num_par_blocks = num_blocks/2; - const uint64_t blocks_rem = num_blocks - (2*(num_par_blocks)); - - __m256i ks256[AES256_ROUNDS + 1]; - load_ks(ks256, ks); - - __m128i single_block = load_m128i(ctr); - __m256i ctr_blocks = _mm256_broadcastsi128_si256(single_block); - - // Preparing the masks - const __m256i bswap_mask = _mm256_set_epi32(BSWAP_MASK, BSWAP_MASK); - const __m256i two = _mm256_set_epi32(0,0,0,2,0,0,0,2); - const __m256i init = _mm256_set_epi32(0,0,0,1,0,0,0,0); - - // Initialize two parallel counters - ctr_blocks = ADD32_256(ctr_blocks, init); - __m256i p = SHUF8_256(ctr_blocks, bswap_mask); - - for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) - { - p = XOR256(p, ks256[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) - { - p = VAESENC(p, ks256[i]); - } - p = VAESENCLAST(p, ks256[AES256_ROUNDS]); - - // We use memcpy to avoid align casting. - _mm256_storeu_si256((__m256i *)&ct[PAR_AES_BLOCK_SIZE * block_idx], p); - - // Increase the two counters in parallel - ctr_blocks = ADD32_256(ctr_blocks, two); - p = SHUF8_256(ctr_blocks, bswap_mask); - } - - if(0 != blocks_rem) - { - single_block = EXTRACT128(p, 0); - aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], - (const uint8_t*)&single_block, blocks_rem, ks); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#endif //VAES256 - -#ifdef VAES512 - -_INLINE_ void load_ks(OUT __m512i ks512[AES256_ROUNDS + 1], - IN const aes256_ks_t *ks) -{ - for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) - { - ks512[i] = _mm512_broadcast_i32x4(ks->keys[i]); - } -} - -// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that -// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 -// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 -// Here num_blocks is assumed to be less then 2^32. -// It is the caller responsiblity to ensure it. -void aes256_ctr_enc512(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - const uint64_t num_par_blocks = num_blocks/4; - const uint64_t blocks_rem = num_blocks - (4*(num_par_blocks)); - - __m512i ks512[AES256_ROUNDS + 1]; - load_ks(ks512, ks); - - __m128i single_block = load_m128i(ctr); - __m512i ctr_blocks = _mm512_broadcast_i32x4(single_block); - - // Preparing the masks - const __m512i bswap_mask = _mm512_set_epi32(BSWAP_MASK, BSWAP_MASK, - BSWAP_MASK, BSWAP_MASK); - const __m512i four = _mm512_set_epi32(0,0,0,4,0,0,0,4,0,0,0,4,0,0,0,4); - const __m512i init = _mm512_set_epi32(0,0,0,3,0,0,0,2,0,0,0,1,0,0,0,0); - - // Initialize four parallel counters - ctr_blocks = ADD32_512(ctr_blocks, init); - __m512i p = SHUF8_512(ctr_blocks, bswap_mask); - - for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) - { - p = XOR512(p, ks512[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) - { - p = VAESENC(p, ks512[i]); - } - p = VAESENCLAST(p, ks512[AES256_ROUNDS]); - - - // We use memcpy to avoid align casting. - _mm512_storeu_si512(&ct[PAR_AES_BLOCK_SIZE * block_idx], p); - - // Increase the four counters in parallel - ctr_blocks = ADD32_512(ctr_blocks, four); - p = SHUF8_512(ctr_blocks, bswap_mask); - } - - if(0 != blocks_rem) - { - single_block = EXTRACT128(p, 0); - aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], - (const uint8_t*)&single_block, blocks_rem, ks); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#endif //VAES512 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.h deleted file mode 100644 index 3d2b21ecf5..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/aes_ni.h +++ /dev/null @@ -1,85 +0,0 @@ -/*************************************************************************** -* Written by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* -* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"). -* You may not use this file except in compliance with the License. -* A copy of the License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* or in the "license" file accompanying this file. This file is distributed -* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -* express or implied. See the License for the specific language governing -* permissions and limitations under the License. -* The license is detailed in the file LICENSE.txt, and applies to this file. -* ***************************************************************************/ - -#pragma once - -#include -#include -#include "defs.h" - -#define MAX_AES_INVOKATION (MASK(32)) - -#define AES256_KEY_SIZE (32ULL) -#define AES256_KEY_BITS (AES256_KEY_SIZE * 8) -#define AES_BLOCK_SIZE (16ULL) -#define AES256_ROUNDS (14ULL) - -#ifdef VAES256 -#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*2) -#elif defined(VAES512) -#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*4) -#endif - -typedef ALIGN(16) struct aes256_key_s { - uint8_t raw[AES256_KEY_SIZE]; -} aes256_key_t; - -typedef ALIGN(16) struct aes256_ks_s { - __m128i keys[AES256_ROUNDS + 1]; -} aes256_ks_t; - -// The ks parameter must be 16 bytes aligned! -EXTERNC void aes256_key_expansion(OUT aes256_ks_t *ks, - IN const aes256_key_t *key); - -// Encrypt one 128-bit block ct = E(pt,ks) -void aes256_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks using VAES (AVX-2) -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc256(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks using VAES (AVX512) -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc512(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c new file mode 100644 index 0000000000..50629f9fec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c @@ -0,0 +1,280 @@ +#include +#include "internal.h" + +// Internal helper functions + +void +quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) +{ + ibz_t bp; + ibz_init(&bp); + ibz_set(&bp, p); + quat_alg_init_set(alg, &bp); + ibz_finalize(&bp); +} + +void +quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg) +{ + ibz_t prod; + ibz_vec_4_t sum; + ibz_init(&prod); + ibz_vec_4_init(&sum); + + ibz_set(&(sum[0]), 0); + ibz_set(&(sum[1]), 0); + ibz_set(&(sum[2]), 0); + ibz_set(&(sum[3]), 0); + + // compute 1 coordinate + ibz_mul(&prod, &((*a)[2]), &((*b)[2])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[3])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[0])); + ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[1])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + // compute i coordiante + ibz_mul(&prod, &((*a)[2]), &((*b)[3])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[2])); + ibz_sub(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[1])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[0])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + // compute j coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[2])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[0])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[3])); + ibz_sub(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[1])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + // compute ij coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[3])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[0])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[1])); + ibz_sub(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[2])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + + ibz_copy(&((*res)[0]), &(sum[0])); + ibz_copy(&((*res)[1]), &(sum[1])); + ibz_copy(&((*res)[2]), &(sum[2])); + ibz_copy(&((*res)[3]), &(sum[3])); + + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &(a->denom), &(b->denom)); + // temporarily set res_a.denom to a.denom/gcd, and res_b.denom to b.denom/gcd + ibz_div(&(res_a->denom), &r, &(a->denom), &gcd); + ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); + for (int i = 0; i < 4; i++) { + // multiply coordiates by reduced denominators from the other element + ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + } + // multiply both reduced denominators + ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); + // multiply them by the gcd to get the new common denominator + ibz_mul(&(res_b->denom), &(res_a->denom), &gcd); + ibz_mul(&(res_a->denom), &(res_a->denom), &gcd); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +// Public Functions + +void +quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then add + ibz_copy(&(res->denom), &(res_a.denom)); + ibz_vec_4_add(&(res->coord), &(res_a.coord), &(res_b.coord)); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then substract + ibz_copy(&res->denom, &res_a.denom); + ibz_vec_4_sub(&res->coord, &res_a.coord, &res_b.coord); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg) +{ + // denominator: product of denominators + ibz_mul(&(res->denom), &(a->denom), &(b->denom)); + quat_alg_coord_mul(&(res->coord), &(a->coord), &(b->coord), alg); +} + +void +quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_t *alg) +{ + ibz_t r, g; + quat_alg_elem_t norm; + ibz_init(&r); + ibz_init(&g); + quat_alg_elem_init(&norm); + + quat_alg_conj(&norm, a); + quat_alg_mul(&norm, a, &norm, alg); + ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_div(res_denom, &r, &(norm.denom), &g); + ibz_abs(res_denom, res_denom); + ibz_abs(res_num, res_num); + assert(ibz_cmp(res_denom, &ibz_const_zero) > 0); + + quat_alg_elem_finalize(&norm); + ibz_finalize(&r); + ibz_finalize(&g); +} + +void +quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) +{ + ibz_copy(&(elem->denom), denominator); + ibz_copy(&(elem->coord[0]), numerator); + ibz_set(&(elem->coord[1]), 0); + ibz_set(&(elem->coord[2]), 0); + ibz_set(&(elem->coord[3]), 0); +} + +void +quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) +{ + ibz_copy(&(conj->denom), &(x->denom)); + ibz_copy(&(conj->coord[0]), &(x->coord[0])); + ibz_neg(&(conj->coord[1]), &(x->coord[1])); + ibz_neg(&(conj->coord[2]), &(x->coord[2])); + ibz_neg(&(conj->coord[3]), &(x->coord[3])); +} + +void +quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg_elem_t *x, const quat_lattice_t *order) +{ + int ok UNUSED = quat_lattice_contains(primitive_x, order, x); + assert(ok); + ibz_vec_4_content(content, primitive_x); + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + } + ibz_finalize(&r); +} + +void +quat_alg_normalize(quat_alg_elem_t *x) +{ + ibz_t gcd, sign, r; + ibz_init(&gcd); + ibz_init(&sign); + ibz_init(&r); + ibz_vec_4_content(&gcd, &(x->coord)); + ibz_gcd(&gcd, &gcd, &(x->denom)); + ibz_div(&(x->denom), &r, &(x->denom), &gcd); + ibz_vec_4_scalar_div(&(x->coord), &gcd, &(x->coord)); + ibz_set(&sign, 2 * (0 > ibz_cmp(&ibz_const_zero, &(x->denom))) - 1); + ibz_vec_4_scalar_mul(&(x->coord), &sign, &(x->coord)); + ibz_mul(&(x->denom), &sign, &(x->denom)); + ibz_finalize(&gcd); + ibz_finalize(&sign); + ibz_finalize(&r); +} + +int +quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t diff; + quat_alg_elem_init(&diff); + quat_alg_sub(&diff, a, b); + int res = quat_alg_elem_is_zero(&diff); + quat_alg_elem_finalize(&diff); + return (res); +} + +int +quat_alg_elem_is_zero(const quat_alg_elem_t *x) +{ + int res = ibz_vec_4_is_zero(&(x->coord)); + return (res); +} + +void +quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&(elem->coord[0]), coord0); + ibz_set(&(elem->coord[1]), coord1); + ibz_set(&(elem->coord[2]), coord2); + ibz_set(&(elem->coord[3]), coord3); + + ibz_set(&(elem->denom), denom); +} + +void +quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) +{ + ibz_copy(©->denom, &copied->denom); + ibz_copy(©->coord[0], &copied->coord[0]); + ibz_copy(©->coord[1], &copied->coord[1]); + ibz_copy(©->coord[2], &copied->coord[2]); + ibz_copy(©->coord[3], &copied->coord[3]); +} + +// helper functions for lattices +void +quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3) +{ + ibz_copy(&(elem->coord[0]), coord0); + ibz_copy(&(elem->coord[1]), coord1); + ibz_copy(&(elem->coord[2]), coord2); + ibz_copy(&(elem->coord[3]), coord3); + + ibz_copy(&(elem->denom), denom); +} + +void +quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + } + ibz_copy(&(res->denom), &(elem->denom)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c deleted file mode 100644 index 983ba49adf..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.c +++ /dev/null @@ -1,201 +0,0 @@ -/* Copyright (c) 2017, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -/*************************************************************************** - * Small modification by Nir Drucker and Shay Gueron - * AWS Cryptographic Algorithms Group - * (ndrucker@amazon.com, gueron@amazon.com) - * include: - * 1) Use memcpy/memset instead of OPENSSL_memcpy/memset - * 2) Include aes.h as the underlying aes code - * 3) Modifying the drbg structure - * ***************************************************************************/ - -#include "ctr_drbg.h" -#include - - -// Section references in this file refer to SP 800-90Ar1: -// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf - -int CTR_DRBG_init(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *personalization, size_t personalization_len) { - // Section 10.2.1.3.1 - if (personalization_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - uint8_t seed_material[CTR_DRBG_ENTROPY_LEN]; - memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN); - - for (size_t i = 0; i < personalization_len; i++) { - seed_material[i] ^= personalization[i]; - } - - // Section 10.2.1.2 - // kInitMask is the result of encrypting blocks with big-endian value 1, 2 - // and 3 with the all-zero AES-256 key. - static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { - 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, - 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, - 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca, - 0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e, - }; - - for (size_t i = 0; i < sizeof(kInitMask); i++) { - seed_material[i] ^= kInitMask[i]; - } - - aes256_key_t key; - memcpy(key.raw, seed_material, 32); - memcpy(drbg->counter.bytes, seed_material + 32, 16); - - aes256_key_expansion(&drbg->ks, &key); - drbg->reseed_counter = 1; - - return 1; -} - -// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a -// big-endian number. -static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { - drbg->counter.words[3] = - CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n); -} - -static int ctr_drbg_update(CTR_DRBG_STATE *drbg, const uint8_t *data, - size_t data_len) { - // Per section 10.2.1.2, |data_len| must be |CTR_DRBG_ENTROPY_LEN|. Here, we - // allow shorter inputs and right-pad them with zeros. This is equivalent to - // the specified algorithm but saves a copy in |CTR_DRBG_generate|. - if (data_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - uint8_t temp[CTR_DRBG_ENTROPY_LEN]; - for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) { - ctr32_add(drbg, 1); - aes256_enc(temp + i, drbg->counter.bytes, &drbg->ks); - } - - for (size_t i = 0; i < data_len; i++) { - temp[i] ^= data[i]; - } - - aes256_key_t key; - memcpy(key.raw, temp, 32); - memcpy(drbg->counter.bytes, temp + 32, 16); - aes256_key_expansion(&drbg->ks, &key); - - return 1; -} - -int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *additional_data, - size_t additional_data_len) { - // Section 10.2.1.4 - uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; - - if (additional_data_len > 0) { - if (additional_data_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN); - for (size_t i = 0; i < additional_data_len; i++) { - entropy_copy[i] ^= additional_data[i]; - } - - entropy = entropy_copy; - } - - if (!ctr_drbg_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) { - return 0; - } - - drbg->reseed_counter = 1; - - return 1; -} - -int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, - const uint8_t *additional_data, - size_t additional_data_len) { - if (additional_data_len != 0 && - !ctr_drbg_update(drbg, additional_data, additional_data_len)) { - return 0; - } - - // kChunkSize is used to interact better with the cache. Since the AES-CTR - // code assumes that it's encrypting rather than just writing keystream, the - // buffer has to be zeroed first. Without chunking, large reads would zero - // the whole buffer, flushing the L1 cache, and then do another pass (missing - // the cache every time) to “encrypt” it. The code can avoid this by - // chunking. - static const size_t kChunkSize = 8 * 1024; - - while (out_len >= AES_BLOCK_SIZE) { - size_t todo = kChunkSize; - if (todo > out_len) { - todo = out_len; - } - - todo &= ~(AES_BLOCK_SIZE - 1); - - const size_t num_blocks = todo / AES_BLOCK_SIZE; - if (1) { - memset(out, 0, todo); - ctr32_add(drbg, 1); -#ifdef VAES512 - aes256_ctr_enc512(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#elif defined(VAES256) - aes256_ctr_enc256(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#else - aes256_ctr_enc(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#endif - ctr32_add(drbg, num_blocks - 1); - } else { - for (size_t i = 0; i < todo; i += AES_BLOCK_SIZE) { - ctr32_add(drbg, 1); - aes256_enc(&out[i], drbg->counter.bytes, &drbg->ks); - } - } - - out += todo; - out_len -= todo; - } - - if (out_len > 0) { - uint8_t block[AES_BLOCK_SIZE]; - ctr32_add(drbg, 1); - aes256_enc(block, drbg->counter.bytes, &drbg->ks); - - memcpy(out, block, out_len); - } - - // Right-padding |additional_data| in step 2.2 is handled implicitly by - // |ctr_drbg_update|, to save a copy. - if (!ctr_drbg_update(drbg, additional_data, additional_data_len)) { - return 0; - } - - drbg->reseed_counter++; - return 1; -} - -void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) { - secure_clean((uint8_t *)drbg, sizeof(CTR_DRBG_STATE)); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.h deleted file mode 100644 index 2d1b1f3f0c..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ctr_drbg.h +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2017, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -/*************************************************************************** -* Small modification by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* include: -* 1) Use memcpy/memset instead of OPENSSL_memcpy/memset -* 2) Include aes.h as the underlying aes code -* 3) Modifying the drbg structure -* ***************************************************************************/ - -#pragma once - -#if defined(__cplusplus) -extern "C" { -#endif - -#include "aes_ni.h" - -// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP -// 800-90Ar1. -typedef struct { - aes256_ks_t ks; - union { - uint8_t bytes[16]; - uint32_t words[4]; - } counter; - uint64_t reseed_counter; -} CTR_DRBG_STATE; - -// See SP 800-90Ar1, table 3. -#define CTR_DRBG_ENTROPY_LEN 48 - -// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of -// entropy in |entropy| and, optionally, a personalization string up to -// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero -// on error. -int CTR_DRBG_init(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *personalization, - size_t personalization_len); - -// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy -// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of -// additional data. It returns one on success or zero on error. -int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *additional_data, - size_t additional_data_len); - -// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional -// data (if any) and then writes |out_len| random bytes to |out|. It returns one on success or -// zero on error. -int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, - size_t out_len, - const uint8_t *additional_data, - size_t additional_data_len); - -// CTR_DRBG_clear zeroises the state of |drbg|. -void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); - - -#if defined(__cplusplus) -} // extern C -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2.c new file mode 100644 index 0000000000..b31ae7771a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +// internal helpers, also for other files +void +ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) +{ + ibz_set(&((*vec)[0]), a0); + ibz_set(&((*vec)[1]), a1); +} +void +ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) +{ + ibz_set(&((*mat)[0][0]), a00); + ibz_set(&((*mat)[0][1]), a01); + ibz_set(&((*mat)[1][0]), a10); + ibz_set(&((*mat)[1][1]), a11); +} + +void +ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) +{ + ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); + ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); + ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); + ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); +} + +void +ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) +{ + ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); + ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); + ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); + ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); +} + +void +ibz_mat_2x2_det_from_ibz(ibz_t *det, const ibz_t *a11, const ibz_t *a12, const ibz_t *a21, const ibz_t *a22) +{ + ibz_t prod; + ibz_init(&prod); + ibz_mul(&prod, a12, a21); + ibz_mul(det, a11, a22); + ibz_sub(det, det, &prod); + ibz_finalize(&prod); +} + +void +ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec) +{ + ibz_t prod; + ibz_vec_2_t matvec; + ibz_init(&prod); + ibz_vec_2_init(&matvec); + ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); + ibz_copy(&(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); + ibz_add(&(matvec[0]), &(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); + ibz_copy(&(matvec[1]), &prod); + ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); + ibz_add(&(matvec[1]), &(matvec[1]), &prod); + ibz_copy(&((*res)[0]), &(matvec[0])); + ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_finalize(&prod); + ibz_vec_2_finalize(&matvec); +} + +// modular 2x2 operations + +void +ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2x2_t *mat_b, const ibz_t *m) +{ + ibz_t mul; + ibz_mat_2x2_t sums; + ibz_init(&mul); + ibz_mat_2x2_init(&sums); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_set(&(sums[i][j]), 0); + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + for (int k = 0; k < 2; k++) { + ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); + ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); + ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + } + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + } + } + ibz_finalize(&mul); + ibz_mat_2x2_finalize(&sums); +} + +int +ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m) +{ + ibz_t det, prod; + ibz_init(&det); + ibz_init(&prod); + ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mod(&det, &det, m); + ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_sub(&det, &det, &prod); + ibz_mod(&det, &det, m); + int res = ibz_invmod(&det, &det, m); + // return 0 matrix if non invertible determinant + ibz_set(&prod, res); + ibz_mul(&det, &det, &prod); + // compute inverse + ibz_copy(&prod, &((*mat)[0][0])); + ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); + ibz_copy(&((*inv)[1][1]), &prod); + ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); + ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); + ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + } + } + ibz_finalize(&det); + ibz_finalize(&prod); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim4.c new file mode 100644 index 0000000000..495dc2dcb2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim4.c @@ -0,0 +1,470 @@ +#include +#include "internal.h" + +// internal helper functions +void +ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b) +{ + ibz_mat_4x4_t mat; + ibz_t prod; + ibz_init(&prod); + ibz_mat_4x4_init(&mat); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(mat[i][j]), 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); + ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + } + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*res)[i][j]), &(mat[i][j])); + } + } + ibz_mat_4x4_finalize(&mat); + ibz_finalize(&prod); +} + +// helper functions for lattices +void +ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&((*vec)[0]), coord0); + ibz_set(&((*vec)[1]), coord1); + ibz_set(&((*vec)[2]), coord2); + ibz_set(&((*vec)[3]), coord3); +} + +void +ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_copy(&((*new)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) +{ + ibz_copy(&((*res)[0]), coord0); + ibz_copy(&((*res)[1]), coord1); + ibz_copy(&((*res)[2]), coord2); + ibz_copy(&((*res)[3]), coord3); +} + +void +ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) +{ + ibz_gcd(content, &((*v)[0]), &((*v)[1])); + ibz_gcd(content, &((*v)[2]), content); + ibz_gcd(content, &((*v)[3]), content); +} + +void +ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_neg(&((*neg)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +void +ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +int +ibz_vec_4_is_zero(const ibz_vec_4_t *x) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + res &= ibz_is_zero(&((*x)[i])); + } + return (res); +} + +void +ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b) +{ + ibz_t prod; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + } +} + +int +ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + res = res && ibz_is_zero(&r); + } + ibz_finalize(&r); + return (res); +} + +void +ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) +{ + ibz_mat_4x4_t work; + ibz_mat_4x4_init(&work); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(work[i][j]), &((*mat)[j][i])); + } + } + ibz_mat_4x4_copy(transposed, &work); + ibz_mat_4x4_finalize(&work); +} + +void +ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*zero)[i][j]), 0); + } + } +} + +void +ibz_mat_4x4_identity(ibz_mat_4x4_t *id) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*id)[i][j]), 0); + } + ibz_set(&((*id)[i][i]), 1); + } +} + +int +ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + } + } + return (res); +} + +int +ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) +{ + int res = 0; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + } + } + return (!res); +} + +void +ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + } + } +} + +void +ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) +{ + ibz_t d; + ibz_init(&d); + ibz_copy(&d, &((*mat)[0][0])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_gcd(&d, &d, &((*mat)[i][j])); + } + } + ibz_copy(gcd, &d); + ibz_finalize(&d); +} + +int +ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + res = res && ibz_is_zero(&r); + } + } + ibz_finalize(&r); + return (res); +} + +// 4x4 inversion helper functions +void +ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, a1, a2); + ibz_mul(&prod, b1, b2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_add(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +void +ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, b1, b2); + ibz_mul(&prod, a1, a2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_sub(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +// Method from https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf 3rd of May +// 2023, 16h15 CEST +int +ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat) +{ + ibz_t prod, work_det; + ibz_mat_4x4_t work; + ibz_t s[6]; + ibz_t c[6]; + for (int i = 0; i < 6; i++) { + ibz_init(&(s[i])); + ibz_init(&(c[i])); + } + ibz_mat_4x4_init(&work); + ibz_init(&prod); + ibz_init(&work_det); + + // compute some 2x2 minors, store them in s and c + for (int i = 0; i < 3; i++) { + ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + } + for (int i = 0; i < 2; i++) { + ibz_mat_2x2_det_from_ibz( + &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + ibz_mat_2x2_det_from_ibz( + &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + } + ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + + // compute det + ibz_set(&work_det, 0); + for (int i = 0; i < 6; i++) { + ibz_mul(&prod, &(s[i]), &(c[5 - i])); + if ((i != 1) && (i != 4)) { + ibz_add(&work_det, &work_det, &prod); + } else { + ibz_sub(&work_det, &work_det, &prod); + } + } + // compute transposed adjugate + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 2; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } + } + for (int k = 2; k < 4; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } + } + } + if (inv != NULL) { + // put transposed adjugate in result, or 0 if no inverse + ibz_set(&prod, !ibz_is_zero(&work_det)); + ibz_mat_4x4_scalar_mul(inv, &prod, &work); + } + // output det + if (det != NULL) + ibz_copy(det, &work_det); + for (int i = 0; i < 6; i++) { + ibz_finalize(&s[i]); + ibz_finalize(&c[i]); + } + ibz_mat_4x4_finalize(&work); + ibz_finalize(&work_det); + ibz_finalize(&prod); + return (!ibz_is_zero(det)); +} + +// matrix evaluation + +void +ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +// quadratic forms + +void +quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + ibz_mat_4x4_eval(&sum, qf, coord); + for (int i = 0; i < 4; i++) { + ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + if (i > 0) { + ibz_add(&(sum[0]), &(sum[0]), &prod); + } else { + ibz_copy(&sum[0], &prod); + } + } + ibz_copy(res, &sum[0]); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dpe.h new file mode 100644 index 0000000000..b9a7a35e0b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dpe.h @@ -0,0 +1,743 @@ +/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. + +This file is part of the DPE Library. + +The DPE Library is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 3 of the License, or (at your +option) any later version. + +The DPE Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with the DPE Library; see the file COPYING.LIB. +If not, see . */ + +#ifndef __DPE +#define __DPE + +#include /* For abort */ +#include /* For fprintf */ +#include /* for round, floor, ceil */ +#include + +/* if you change the version, please change it in Makefile too */ +#define DPE_VERSION_MAJOR 1 +#define DPE_VERSION_MINOR 7 + +#if defined(__GNUC__) && (__GNUC__ >= 3) +# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) +# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) +# define DPE_UNUSED_ATTR __attribute__((unused)) +#else +# define DPE_LIKELY(x) (x) +# define DPE_UNLIKELY(x) (x) +# define DPE_UNUSED_ATTR +#endif + +/* If no user defined mode, define it to double */ +#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) +# define DPE_USE_DOUBLE +#endif + +#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) +# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." +#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#endif + +#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) +# define DPE_LITTLEENDIAN32 +#endif + +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) +# define DPE_DEFINE_ROUND_TRUNC +#endif + +#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 +# define DPE_ISFINITE __builtin_isfinite +#elif defined(isfinite) +# define DPE_ISFINITE isfinite /* new C99 function */ +#else +# define DPE_ISFINITE finite /* obsolete BSD function */ +#endif + +/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ +/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with + 1/2 <= m < 1 */ +/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ +#if defined(DPE_USE_DOUBLE) +# define DPE_DOUBLE double /* mantissa type */ +# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ +# define DPE_2_POW_BITSIZE 0x1P53 +# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 +# define DPE_LDEXP __builtin_ldexp +# define DPE_FREXP __builtin_frexp +# define DPE_FLOOR __builtin_floor +# define DPE_CEIL __builtin_ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND __builtin_round +# define DPE_TRUNC __builtin_trunc +# endif +# else +# define DPE_LDEXP ldexp +# define DPE_FREXP frexp +# define DPE_FLOOR floor +# define DPE_CEIL ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND round +# define DPE_TRUNC trunc +# endif +# endif + +#elif defined(DPE_USE_LONGDOUBLE) +# define DPE_DOUBLE long double +# define DPE_BITSIZE 64 +# define DPE_2_POW_BITSIZE 0x1P64 +# define DPE_LDEXP ldexpl +# define DPE_FREXP frexpl +# define DPE_FLOOR floorl +# define DPE_CEIL ceill +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundl +# define DPE_TRUNC truncl +# endif + +#elif defined(DPE_USE_FLOAT128) +# include "quadmath.h" +# define DPE_DOUBLE __float128 +# define DPE_BITSIZE 113 +# define DPE_2_POW_BITSIZE 0x1P113 +# define DPE_LDEXP ldexpq +# define DPE_FLOOR floorq +# define DPE_CEIL ceilq +# define DPE_FREXP frexpq +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundq +# define DPE_TRUNC truncq +# endif + +#else +# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" +#endif + +/* If no C99, do what we can */ +#ifndef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) +# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) +#endif + +#if defined(DPE_USE_LONG) +# define DPE_EXP_T long /* exponent type */ +# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ +#elif defined(DPE_USE_LONGLONG) +# define DPE_EXP_T long long +# define DPE_EXPMIN LLONG_MIN +#else +# define DPE_EXP_T int /* exponent type */ +# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ +#endif + +#ifdef DPE_LITTLEENDIAN32 +typedef union +{ + double d; +#if INT_MAX == 0x7FFFFFFFL + int i[2]; +#elif LONG_MAX == 0x7FFFFFFFL + long i[2]; +#elif SHRT_MAX == 0x7FFFFFFFL + short i[2]; +#else +# error Cannot find a 32 bits integer type. +#endif +} dpe_double_words; +#endif + +typedef struct +{ + DPE_DOUBLE d; /* significand */ + DPE_EXP_T exp; /* exponent */ +} dpe_struct; + +typedef dpe_struct dpe_t[1]; + +#define DPE_MANT(x) ((x)->d) +#define DPE_EXP(x) ((x)->exp) +#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) + +#define DPE_INLINE static inline + +/* initialize */ +DPE_INLINE void +dpe_init (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* clear */ +DPE_INLINE void +dpe_clear (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* set x to y */ +DPE_INLINE void +dpe_set (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to -y */ +DPE_INLINE void +dpe_neg (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to |y| */ +DPE_INLINE void +dpe_abs (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ +/* FIXME: don't inline this function yet ? */ +static void +dpe_normalize (dpe_t x) +{ + if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) + { + if (DPE_MANT(x) == 0.0) + DPE_EXP(x) = DPE_EXPMIN; + /* otherwise let the exponent of NaN, Inf unchanged */ + } + else + { + DPE_EXP_T e; +#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ + dpe_double_words dw; + dw.d = DPE_MANT(x); + e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ + DPE_EXP(x) += e - 1022; + dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; + DPE_MANT(x) = dw.d; +#else /* portable code */ + double m = DPE_MANT(x); + DPE_MANT(x) = DPE_FREXP (m, &e); + DPE_EXP(x) += e; +#endif + } +} + +#if defined(DPE_USE_DOUBLE) +static const double dpe_scale_tab[54] = { + 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, + 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, + 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, + 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, + 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, + 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, + 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; +#endif + +DPE_INLINE DPE_DOUBLE +dpe_scale (DPE_DOUBLE d, int s) +{ + /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ +#if defined(DPE_USE_DOUBLE) + return d * dpe_scale_tab [-s]; +#else /* portable code */ + return DPE_LDEXP (d, s); +#endif +} + +/* set x to y */ +DPE_INLINE void +dpe_set_d (dpe_t x, double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ld (dpe_t x, long double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ui (dpe_t x, unsigned long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_si (dpe_t x, long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +DPE_INLINE long +dpe_get_si (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (long) d; +} + +DPE_INLINE unsigned long +dpe_get_ui (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (d < 0.0) ? 0 : (unsigned long) d; +} + +DPE_INLINE double +dpe_get_d (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +DPE_INLINE long double +dpe_get_ld (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +#if defined(__GMP_H__) || defined(__MINI_GMP_H__) +/* set x to y */ +DPE_INLINE void +dpe_set_z (dpe_t x, mpz_t y) +{ + long e; + DPE_MANT(x) = mpz_get_d_2exp (&e, y); + DPE_EXP(x) = (DPE_EXP_T) e; +} + +/* set x to y, rounded to nearest */ +DPE_INLINE void +dpe_get_z (mpz_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey >= DPE_BITSIZE) /* y is an integer */ + { + DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ + mpz_set_d (x, d); /* should be exact */ + mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); + } + else /* DPE_EXP(y) < DPE_BITSIZE */ + { + if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ + mpz_set_ui (x, 0); + else + { + DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); + mpz_set_d (x, (double) DPE_ROUND(d)); + } + } +} + +/* return e and x such that y = x*2^e */ +DPE_INLINE mp_exp_t +dpe_get_z_exp (mpz_t x, dpe_t y) +{ + mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); + return DPE_EXP(y) - DPE_BITSIZE; +} +#endif + +/* x <- y + z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_add (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y+z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_set (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y - z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_sub (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y-z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_neg (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y * z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_mul (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- sqrt(y), assuming y is normalized, returns x normalized */ +DPE_INLINE void +dpe_sqrt (dpe_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey % 2) + { + /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ + DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); + DPE_EXP(x) = (ey + 1) / 2; + } + else + { + DPE_MANT(x) = sqrt (DPE_MANT(y)); + DPE_EXP(x) = ey / 2; + } +} + +/* x <- y / z, assuming y and z are normalized, returns x normalized. + Assumes z is not zero. */ +DPE_INLINE void +dpe_div (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- y * z, assuming y normalized, returns x normalized */ +DPE_INLINE void +dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ +DPE_INLINE void +dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y * 2^e */ +DPE_INLINE void +dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; +} + +/* x <- y / 2^e */ +DPE_INLINE void +dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; +} + +/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' + type has fewer bits than the significand in dpe_t) */ +DPE_INLINE DPE_EXP_T +dpe_get_si_exp (long *x, dpe_t y) +{ + if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ + { + *x = (long) (DPE_MANT(y) * 2147483648.0); + return DPE_EXP(y) - 31; + } + else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ + { + *x = (long) (DPE_MANT (y) * 9223372036854775808.0); + return DPE_EXP(y) - 63; + } + else + { + fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); + exit (1); + } +} + +static DPE_UNUSED_ATTR int dpe_str_prec = 16; +static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; + +static int +dpe_out_str (FILE *s, int base, dpe_t x) +{ + DPE_DOUBLE d = DPE_MANT(x); + DPE_EXP_T e2 = DPE_EXP(x); + int e10 = 0; + char sign = ' '; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } + if (d == 0.0) +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%1.*f", dpe_str_prec, d); +#else + return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); +#endif + if (d < 0) + { + d = -d; + sign = '-'; + } + if (e2 > 0) + { + while (e2 > 0) + { + e2 --; + d *= 2.0; + if (d >= 10.0) + { + d /= 10.0; + e10 ++; + } + } + } + else /* e2 <= 0 */ + { + while (e2 < 0) + { + e2 ++; + d /= 2.0; + if (d < 1.0) + { + d *= 10.0; + e10 --; + } + } + } +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); +#else + return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); +#endif +} + +static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; + +static size_t +dpe_inp_str (dpe_t x, FILE *s, int base) +{ + size_t res; + DPE_DOUBLE d; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } +#ifdef DPE_USE_DOUBLE + res = fscanf (s, "%lf", &d); +#elif defined(DPE_USE_LONGDOUBLE) + res = fscanf (s, "%Lf", &d); +#else + { + long double d_ld; + res = fscanf (s, "%Lf", &d_ld); + d = d_ld; + } +#endif + dpe_set_d (x, d); + return res; +} + +DPE_INLINE void +dpe_dump (dpe_t x) +{ + dpe_out_str (stdout, 10, x); + putchar ('\n'); +} + +DPE_INLINE int +dpe_zero_p (dpe_t x) +{ + return DPE_MANT (x) == 0; +} + +/* return a positive value if x > y + a negative value if x < y + and 0 otherwise (x=y). */ +DPE_INLINE int +dpe_cmp (dpe_t x, dpe_t y) +{ + int sx = DPE_SIGN(x); + int d = sx - DPE_SIGN(y); + + if (d != 0) + return d; + else if (DPE_EXP(x) > DPE_EXP(y)) + return (sx > 0) ? 1 : -1; + else if (DPE_EXP(y) > DPE_EXP(x)) + return (sx > 0) ? -1 : 1; + else /* DPE_EXP(x) = DPE_EXP(y) */ + return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); +} + +DPE_INLINE int +dpe_cmp_d (dpe_t x, double d) +{ + dpe_t y; + dpe_set_d (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_ui (dpe_t x, unsigned long d) +{ + dpe_t y; + dpe_set_ui (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_si (dpe_t x, long d) +{ + dpe_t y; + dpe_set_si (y, d); + return dpe_cmp (x, y); +} + +/* set x to integer nearest to y */ +DPE_INLINE void +dpe_round (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) < 0) /* |y| < 1/2 */ + dpe_set_ui (x, 0); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_ROUND(d)); + } +} + +/* set x to the fractional part of y, defined as y - trunc(y), thus the + fractional part has absolute value in [0, 1), and same sign as y */ +DPE_INLINE void +dpe_frac (dpe_t x, dpe_t y) +{ + /* If |y| is smaller than 1, keep it */ + if (DPE_EXP(y) <= 0) + dpe_set (x, y); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set_ui (x, 0); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, d - DPE_TRUNC(d)); + } +} + +/* set x to largest integer <= y */ +DPE_INLINE void +dpe_floor (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ + dpe_set_ui (x, 0); + else /* -1 < y < 0 */ + dpe_set_si (x, -1); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_FLOOR(d)); + } +} + +/* set x to smallest integer >= y */ +DPE_INLINE void +dpe_ceil (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ + dpe_set_ui (x, 1); + else /* -1 < y <= 0 */ + dpe_set_si (x, 0); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_CEIL(d)); + } +} + +DPE_INLINE void +dpe_swap (dpe_t x, dpe_t y) +{ + DPE_EXP_T i = DPE_EXP (x); + DPE_DOUBLE d = DPE_MANT (x); + DPE_EXP (x) = DPE_EXP (y); + DPE_MANT (x) = DPE_MANT (y); + DPE_EXP (y) = i; + DPE_MANT (y) = d; +} + +#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/finit.c new file mode 100644 index 0000000000..b3808edf07 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/finit.c @@ -0,0 +1,122 @@ +#include "internal.h" + +void +quat_alg_init_set(quat_alg_t *alg, const ibz_t *p) +{ + ibz_init(&(*alg).p); + ibz_copy(&(*alg).p, p); +} +void +quat_alg_finalize(quat_alg_t *alg) +{ + ibz_finalize(&(*alg).p); +} + +void +quat_alg_elem_init(quat_alg_elem_t *elem) +{ + ibz_vec_4_init(&(*elem).coord); + ibz_init(&(*elem).denom); + ibz_set(&(*elem).denom, 1); +} +void +quat_alg_elem_finalize(quat_alg_elem_t *elem) +{ + ibz_vec_4_finalize(&(*elem).coord); + ibz_finalize(&(*elem).denom); +} + +void +ibz_vec_2_init(ibz_vec_2_t *vec) +{ + ibz_init(&((*vec)[0])); + ibz_init(&((*vec)[1])); +} + +void +ibz_vec_2_finalize(ibz_vec_2_t *vec) +{ + ibz_finalize(&((*vec)[0])); + ibz_finalize(&((*vec)[1])); +} + +void +ibz_vec_4_init(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_init(&(*vec)[i]); + } +} +void +ibz_vec_4_finalize(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_finalize(&(*vec)[i]); + } +} + +void +ibz_mat_2x2_init(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +ibz_mat_4x4_init(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +quat_lattice_init(quat_lattice_t *lat) +{ + ibz_mat_4x4_init(&(*lat).basis); + ibz_init(&(*lat).denom); + ibz_set(&(*lat).denom, 1); +} +void +quat_lattice_finalize(quat_lattice_t *lat) +{ + ibz_finalize(&(*lat).denom); + ibz_mat_4x4_finalize(&(*lat).basis); +} + +void +quat_left_ideal_init(quat_left_ideal_t *lideal) +{ + quat_lattice_init(&(*lideal).lattice); + ibz_init(&(*lideal).norm); + (*lideal).parent_order = NULL; +} +void +quat_left_ideal_finalize(quat_left_ideal_t *lideal) +{ + ibz_finalize(&(*lideal).norm); + quat_lattice_finalize(&(*lideal).lattice); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.c deleted file mode 100644 index f2992d8c7f..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.c +++ /dev/null @@ -1,876 +0,0 @@ -// SPDX-License-Identifier: PD and Apache-2.0 - -/* FIPS202 implementation based on code from PQClean, - * which is in turn based based on the public domain implementation in - * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html - * by Ronny Van Keer - * and the public domain "TweetFips202" implementation - * from https://twitter.com/tweetfips202 - * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ - -#include -#include -#include -#include - -#include "fips202.h" - -#define NROUNDS 24 -#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) - -/************************************************* - * Name: load64 - * - * Description: Load 8 bytes into uint64_t in little-endian order - * - * Arguments: - const uint8_t *x: pointer to input byte array - * - * Returns the loaded 64-bit unsigned integer - **************************************************/ -static uint64_t load64(const uint8_t *x) { - uint64_t r = 0; - for (size_t i = 0; i < 8; ++i) { - r |= (uint64_t)x[i] << 8 * i; - } - - return r; -} - -/************************************************* - * Name: store64 - * - * Description: Store a 64-bit integer to a byte array in little-endian order - * - * Arguments: - uint8_t *x: pointer to the output byte array - * - uint64_t u: input 64-bit unsigned integer - **************************************************/ -static void store64(uint8_t *x, uint64_t u) { - for (size_t i = 0; i < 8; ++i) { - x[i] = (uint8_t) (u >> 8 * i); - } -} - -/* Keccak round constants */ -static const uint64_t KeccakF_RoundConstants[NROUNDS] = { - 0x0000000000000001ULL, 0x0000000000008082ULL, - 0x800000000000808aULL, 0x8000000080008000ULL, - 0x000000000000808bULL, 0x0000000080000001ULL, - 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x000000000000008aULL, 0x0000000000000088ULL, - 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, - 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, - 0x000000000000800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, - 0x0000000080000001ULL, 0x8000000080008008ULL -}; - -/************************************************* - * Name: KeccakF1600_StatePermute - * - * Description: The Keccak F1600 Permutation - * - * Arguments: - uint64_t *state: pointer to input/output Keccak state - **************************************************/ -static void KeccakF1600_StatePermute(uint64_t *state) { - int round; - - uint64_t Aba, Abe, Abi, Abo, Abu; - uint64_t Aga, Age, Agi, Ago, Agu; - uint64_t Aka, Ake, Aki, Ako, Aku; - uint64_t Ama, Ame, Ami, Amo, Amu; - uint64_t Asa, Ase, Asi, Aso, Asu; - uint64_t BCa, BCe, BCi, BCo, BCu; - uint64_t Da, De, Di, Do, Du; - uint64_t Eba, Ebe, Ebi, Ebo, Ebu; - uint64_t Ega, Ege, Egi, Ego, Egu; - uint64_t Eka, Eke, Eki, Eko, Eku; - uint64_t Ema, Eme, Emi, Emo, Emu; - uint64_t Esa, Ese, Esi, Eso, Esu; - - // copyFromState(A, state) - Aba = state[0]; - Abe = state[1]; - Abi = state[2]; - Abo = state[3]; - Abu = state[4]; - Aga = state[5]; - Age = state[6]; - Agi = state[7]; - Ago = state[8]; - Agu = state[9]; - Aka = state[10]; - Ake = state[11]; - Aki = state[12]; - Ako = state[13]; - Aku = state[14]; - Ama = state[15]; - Ame = state[16]; - Ami = state[17]; - Amo = state[18]; - Amu = state[19]; - Asa = state[20]; - Ase = state[21]; - Asi = state[22]; - Aso = state[23]; - Asu = state[24]; - - for (round = 0; round < NROUNDS; round += 2) { - // prepareTheta - BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; - BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; - BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; - BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; - BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; - - // thetaRhoPiChiIotaPrepareTheta(round , A, E) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Aba ^= Da; - BCa = Aba; - Age ^= De; - BCe = ROL(Age, 44); - Aki ^= Di; - BCi = ROL(Aki, 43); - Amo ^= Do; - BCo = ROL(Amo, 21); - Asu ^= Du; - BCu = ROL(Asu, 14); - Eba = BCa ^ ((~BCe) & BCi); - Eba ^= KeccakF_RoundConstants[round]; - Ebe = BCe ^ ((~BCi) & BCo); - Ebi = BCi ^ ((~BCo) & BCu); - Ebo = BCo ^ ((~BCu) & BCa); - Ebu = BCu ^ ((~BCa) & BCe); - - Abo ^= Do; - BCa = ROL(Abo, 28); - Agu ^= Du; - BCe = ROL(Agu, 20); - Aka ^= Da; - BCi = ROL(Aka, 3); - Ame ^= De; - BCo = ROL(Ame, 45); - Asi ^= Di; - BCu = ROL(Asi, 61); - Ega = BCa ^ ((~BCe) & BCi); - Ege = BCe ^ ((~BCi) & BCo); - Egi = BCi ^ ((~BCo) & BCu); - Ego = BCo ^ ((~BCu) & BCa); - Egu = BCu ^ ((~BCa) & BCe); - - Abe ^= De; - BCa = ROL(Abe, 1); - Agi ^= Di; - BCe = ROL(Agi, 6); - Ako ^= Do; - BCi = ROL(Ako, 25); - Amu ^= Du; - BCo = ROL(Amu, 8); - Asa ^= Da; - BCu = ROL(Asa, 18); - Eka = BCa ^ ((~BCe) & BCi); - Eke = BCe ^ ((~BCi) & BCo); - Eki = BCi ^ ((~BCo) & BCu); - Eko = BCo ^ ((~BCu) & BCa); - Eku = BCu ^ ((~BCa) & BCe); - - Abu ^= Du; - BCa = ROL(Abu, 27); - Aga ^= Da; - BCe = ROL(Aga, 36); - Ake ^= De; - BCi = ROL(Ake, 10); - Ami ^= Di; - BCo = ROL(Ami, 15); - Aso ^= Do; - BCu = ROL(Aso, 56); - Ema = BCa ^ ((~BCe) & BCi); - Eme = BCe ^ ((~BCi) & BCo); - Emi = BCi ^ ((~BCo) & BCu); - Emo = BCo ^ ((~BCu) & BCa); - Emu = BCu ^ ((~BCa) & BCe); - - Abi ^= Di; - BCa = ROL(Abi, 62); - Ago ^= Do; - BCe = ROL(Ago, 55); - Aku ^= Du; - BCi = ROL(Aku, 39); - Ama ^= Da; - BCo = ROL(Ama, 41); - Ase ^= De; - BCu = ROL(Ase, 2); - Esa = BCa ^ ((~BCe) & BCi); - Ese = BCe ^ ((~BCi) & BCo); - Esi = BCi ^ ((~BCo) & BCu); - Eso = BCo ^ ((~BCu) & BCa); - Esu = BCu ^ ((~BCa) & BCe); - - // prepareTheta - BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; - BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; - BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; - BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; - BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; - - // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Eba ^= Da; - BCa = Eba; - Ege ^= De; - BCe = ROL(Ege, 44); - Eki ^= Di; - BCi = ROL(Eki, 43); - Emo ^= Do; - BCo = ROL(Emo, 21); - Esu ^= Du; - BCu = ROL(Esu, 14); - Aba = BCa ^ ((~BCe) & BCi); - Aba ^= KeccakF_RoundConstants[round + 1]; - Abe = BCe ^ ((~BCi) & BCo); - Abi = BCi ^ ((~BCo) & BCu); - Abo = BCo ^ ((~BCu) & BCa); - Abu = BCu ^ ((~BCa) & BCe); - - Ebo ^= Do; - BCa = ROL(Ebo, 28); - Egu ^= Du; - BCe = ROL(Egu, 20); - Eka ^= Da; - BCi = ROL(Eka, 3); - Eme ^= De; - BCo = ROL(Eme, 45); - Esi ^= Di; - BCu = ROL(Esi, 61); - Aga = BCa ^ ((~BCe) & BCi); - Age = BCe ^ ((~BCi) & BCo); - Agi = BCi ^ ((~BCo) & BCu); - Ago = BCo ^ ((~BCu) & BCa); - Agu = BCu ^ ((~BCa) & BCe); - - Ebe ^= De; - BCa = ROL(Ebe, 1); - Egi ^= Di; - BCe = ROL(Egi, 6); - Eko ^= Do; - BCi = ROL(Eko, 25); - Emu ^= Du; - BCo = ROL(Emu, 8); - Esa ^= Da; - BCu = ROL(Esa, 18); - Aka = BCa ^ ((~BCe) & BCi); - Ake = BCe ^ ((~BCi) & BCo); - Aki = BCi ^ ((~BCo) & BCu); - Ako = BCo ^ ((~BCu) & BCa); - Aku = BCu ^ ((~BCa) & BCe); - - Ebu ^= Du; - BCa = ROL(Ebu, 27); - Ega ^= Da; - BCe = ROL(Ega, 36); - Eke ^= De; - BCi = ROL(Eke, 10); - Emi ^= Di; - BCo = ROL(Emi, 15); - Eso ^= Do; - BCu = ROL(Eso, 56); - Ama = BCa ^ ((~BCe) & BCi); - Ame = BCe ^ ((~BCi) & BCo); - Ami = BCi ^ ((~BCo) & BCu); - Amo = BCo ^ ((~BCu) & BCa); - Amu = BCu ^ ((~BCa) & BCe); - - Ebi ^= Di; - BCa = ROL(Ebi, 62); - Ego ^= Do; - BCe = ROL(Ego, 55); - Eku ^= Du; - BCi = ROL(Eku, 39); - Ema ^= Da; - BCo = ROL(Ema, 41); - Ese ^= De; - BCu = ROL(Ese, 2); - Asa = BCa ^ ((~BCe) & BCi); - Ase = BCe ^ ((~BCi) & BCo); - Asi = BCi ^ ((~BCo) & BCu); - Aso = BCo ^ ((~BCu) & BCa); - Asu = BCu ^ ((~BCa) & BCe); - } - - // copyToState(state, A) - state[0] = Aba; - state[1] = Abe; - state[2] = Abi; - state[3] = Abo; - state[4] = Abu; - state[5] = Aga; - state[6] = Age; - state[7] = Agi; - state[8] = Ago; - state[9] = Agu; - state[10] = Aka; - state[11] = Ake; - state[12] = Aki; - state[13] = Ako; - state[14] = Aku; - state[15] = Ama; - state[16] = Ame; - state[17] = Ami; - state[18] = Amo; - state[19] = Amu; - state[20] = Asa; - state[21] = Ase; - state[22] = Asi; - state[23] = Aso; - state[24] = Asu; -} - -/************************************************* - * Name: keccak_absorb - * - * Description: Absorb step of Keccak; - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, - size_t mlen, uint8_t p) { - size_t i; - uint8_t t[200]; - - /* Zero state */ - for (i = 0; i < 25; ++i) { - s[i] = 0; - } - - while (mlen >= r) { - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(m + 8 * i); - } - - KeccakF1600_StatePermute(s); - mlen -= r; - m += r; - } - - for (i = 0; i < r; ++i) { - t[i] = 0; - } - for (i = 0; i < mlen; ++i) { - t[i] = m[i]; - } - t[i] = p; - t[r - 1] |= 128; - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(t + 8 * i); - } -} - -/************************************************* - * Name: keccak_squeezeblocks - * - * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. - * Modifies the state. Can be called multiple times to keep - * squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *h: pointer to output blocks - * - size_t nblocks: number of blocks to be - * squeezed (written to h) - * - uint64_t *s: pointer to input/output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, - uint64_t *s, uint32_t r) { - while (nblocks > 0) { - KeccakF1600_StatePermute(s); - for (size_t i = 0; i < (r >> 3); i++) { - store64(h + 8 * i, s[i]); - } - h += r; - nblocks--; - } -} - -/************************************************* - * Name: keccak_inc_init - * - * Description: Initializes the incremental Keccak state to zero. - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - **************************************************/ -static void keccak_inc_init(uint64_t *s_inc) { - size_t i; - - for (i = 0; i < 25; ++i) { - s_inc[i] = 0; - } - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_absorb - * - * Description: Incremental keccak absorb - * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - **************************************************/ -static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, - size_t mlen) { - size_t i; - - /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ - while (mlen + s_inc[25] >= r) { - for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { - /* Take the i'th byte from message - xor with the s_inc[25] + i'th byte of the state; little-endian */ - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - mlen -= (size_t)(r - s_inc[25]); - m += r - s_inc[25]; - s_inc[25] = 0; - - KeccakF1600_StatePermute(s_inc); - } - - for (i = 0; i < mlen; i++) { - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - s_inc[25] += mlen; -} - -/************************************************* - * Name: keccak_inc_finalize - * - * Description: Finalizes Keccak absorb phase, prepares for squeezing - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { - /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, - so we can always use one more byte for p in the current state. */ - s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); - s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_squeeze - * - * Description: Incremental Keccak squeeze; can be called on byte-level - * - * Arguments: - uint8_t *h: pointer to output bytes - * - size_t outlen: number of bytes to be squeezed - * - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_inc_squeeze(uint8_t *h, size_t outlen, - uint64_t *s_inc, uint32_t r) { - size_t i; - - /* First consume any bytes we still have sitting around */ - for (i = 0; i < outlen && i < s_inc[25]; i++) { - /* There are s_inc[25] bytes left, so r - s_inc[25] is the first - available byte. We consume from there, i.e., up to r. */ - h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] -= i; - - /* Then squeeze the remaining necessary blocks */ - while (outlen > 0) { - KeccakF1600_StatePermute(s_inc); - - for (i = 0; i < outlen && i < r; i++) { - h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] = r - i; - } -} - -void shake128_inc_init(shake128incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); -} - -void shake128_inc_finalize(shake128incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); -} - -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); -} - -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake128_inc_ctx_release(shake128incctx *state) { - (void)state; -} - -void shake256_inc_init(shake256incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); -} - -void shake256_inc_finalize(shake256incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); -} - -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); -} - -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake256_inc_ctx_release(shake256incctx *state) { - (void)state; -} - - -/************************************************* - * Name: shake128_absorb - * - * Description: Absorb step of the SHAKE128 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake128_squeezeblocks - * - * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of - * SHAKE128_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake128ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); -} - -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake128_ctx_release(shake128ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake256_absorb - * - * Description: Absorb step of the SHAKE256 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake256_squeezeblocks - * - * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of - * SHAKE256_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake256ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); -} - -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake256_ctx_release(shake256ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake128 - * - * Description: SHAKE128 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE128_RATE; - uint8_t t[SHAKE128_RATE]; - shake128ctx s; - - shake128_absorb(&s, input, inlen); - shake128_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE128_RATE; - outlen -= nblocks * SHAKE128_RATE; - - if (outlen) { - shake128_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake128_ctx_release(&s); -} - -/************************************************* - * Name: shake256 - * - * Description: SHAKE256 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE256_RATE; - uint8_t t[SHAKE256_RATE]; - shake256ctx s; - - shake256_absorb(&s, input, inlen); - shake256_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE256_RATE; - outlen -= nblocks * SHAKE256_RATE; - - if (outlen) { - shake256_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake256_ctx_release(&s); -} - -void sha3_256_inc_init(sha3_256incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_256_inc_ctx_release(sha3_256incctx *state) { - (void)state; -} - -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); -} - -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { - uint8_t t[SHA3_256_RATE]; - keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); - - sha3_256_inc_ctx_release(state); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_256 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_256_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -void sha3_384_inc_init(sha3_384incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); -} - -void sha3_384_inc_ctx_release(sha3_384incctx *state) { - (void)state; -} - -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { - uint8_t t[SHA3_384_RATE]; - keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); - - sha3_384_inc_ctx_release(state); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_384 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_384_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -void sha3_512_inc_init(sha3_512incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); -} - -void sha3_512_inc_ctx_release(sha3_512incctx *state) { - (void)state; -} - -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { - uint8_t t[SHA3_512_RATE]; - keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); - - sha3_512_inc_ctx_release(state); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_512 - * - * Description: SHA3-512 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_512_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h index c29ebd8f9d..21bc0c3f79 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h @@ -3,169 +3,12 @@ #ifndef FIPS202_H #define FIPS202_H -#include -#include +#include -#define SHAKE128_RATE 168 -#define SHAKE256_RATE 136 -#define SHA3_256_RATE 136 -#define SHA3_384_RATE 104 -#define SHA3_512_RATE 72 - -#define PQC_SHAKEINCCTX_U64WORDS 26 -#define PQC_SHAKECTX_U64WORDS 25 - -#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) -#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake128incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake128ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake256incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake256ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_256incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_384incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_512incctx; - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); -/* Free the state */ -void shake128_ctx_release(shake128ctx *state); -/* Copy the state. */ -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); - -/* Initialize incremental hashing API */ -void shake128_inc_init(shake128incctx *state); -/* Absorb more information into the XOF. - * - * Can be called multiple times. - */ -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); -/* Finalize the XOF for squeezing */ -void shake128_inc_finalize(shake128incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); -/* Copy the context of the SHAKE128 XOF */ -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); -/* Free the context of the SHAKE128 XOF */ -void shake128_inc_ctx_release(shake128incctx *state); - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); -/* Free the context held by this XOF */ -void shake256_ctx_release(shake256ctx *state); -/* Copy the context held by this XOF */ -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); - -/* Initialize incremental hashing API */ -void shake256_inc_init(shake256incctx *state); -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); -/* Prepares for squeeze phase */ -void shake256_inc_finalize(shake256incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); -/* Copy the state */ -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); -/* Free the state */ -void shake256_inc_ctx_release(shake256incctx *state); - -/* One-stop SHAKE128 call */ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* One-stop SHAKE256 call */ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_256_inc_init(sha3_256incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); -/* Copy the context */ -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_256_inc_ctx_release(sha3_256incctx *state); - -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_384_inc_init(sha3_384incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); -/* Copy the context */ -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_384_inc_ctx_release(sha3_384incctx *state); - -/* One-stop SHA3-384 shop */ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_512_inc_init(sha3_512incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); -/* Copy the context */ -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_512_inc_ctx_release(sha3_512incctx *state); - -/* One-stop SHA3-512 shop */ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); +#define shake256incctx OQS_SHA3_shake256_inc_ctx +#define shake256_inc_init OQS_SHA3_shake256_inc_init +#define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb +#define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize +#define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c new file mode 100644 index 0000000000..1fb4c0f139 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c @@ -0,0 +1,210 @@ +#include "hnf_internal.h" +#include "internal.h" + +// HNF test function +int +ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) +{ + int res = 1; + int found; + int ind = 0; + ibz_t zero; + ibz_init(&zero); + // upper triangular + for (int i = 0; i < 4; i++) { + // upper triangular + for (int j = 0; j < i; j++) { + res = res && ibz_is_zero(&((*mat)[i][j])); + } + // find first non 0 element of line + found = 0; + for (int j = i; j < 4; j++) { + if (found) { + // all values are positive, and first non-0 is the largest of that line + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + } else { + if (!ibz_is_zero(&((*mat)[i][j]))) { + found = 1; + ind = j; + // mustbe non-negative + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + } + } + } + } + // check that first nom-zero elements ndex per column is strictly increasing + int linestart = -1; + int i = 0; + for (int j = 0; j < 4; j++) { + while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + i = i + 1; + } + if (i != 4) { + res = res && (linestart < i); + } + i = 0; + } + ibz_finalize(&zero); + return res; +} + +// Untested HNF helpers +// centered mod +void +ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b, + const ibz_t *mod) +{ + ibz_t prod, m; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_finalize(&m); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m; + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + } + ibz_finalize(&m); +} + +// no need to center this, and not 0 +void +ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m, s; + ibz_init(&m); + ibz_init(&s); + ibz_copy(&s, scalar); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); + ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + } + ibz_finalize(&m); + ibz_finalize(&s); +} + +// Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic +// Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 +// assumes ibz_xgcd outputs u,v which are small in absolute value (as described in the +// book) +void +ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec_4_t *generators, const ibz_t *mod) +{ + int i = 3; + assert(generator_number > 3); + int n = generator_number; + int j = n - 1; + int k = n - 1; + ibz_t b, u, v, d, q, m, coeff_1, coeff_2, r; + ibz_vec_4_t c; + ibz_vec_4_t a[generator_number]; + ibz_vec_4_t w[4]; + ibz_init(&b); + ibz_init(&d); + ibz_init(&u); + ibz_init(&v); + ibz_init(&r); + ibz_init(&m); + ibz_init(&q); + ibz_init(&coeff_1); + ibz_init(&coeff_2); + ibz_vec_4_init(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_init(&(w[h])); + ibz_vec_4_init(&(a[h])); + ibz_copy(&(a[h][0]), &(generators[h][0])); + ibz_copy(&(a[h][1]), &(generators[h][1])); + ibz_copy(&(a[h][2]), &(generators[h][2])); + ibz_copy(&(a[h][3]), &(generators[h][3])); + } + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_copy(&m, mod); + while (i != -1) { + while (j != 0) { + j = j - 1; + if (!ibz_is_zero(&(a[j][i]))) { + // assumtion that ibz_xgcd outputs u,v which are small in absolute + // value is needed here also, needs u non 0, but v can be 0 if needed + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); + ibz_div(&coeff_1, &r, &(a[k][i]), &d); + ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_neg(&coeff_2, &coeff_2); + ibz_vec_4_linear_combination_mod( + &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m + ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy + } + } + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult + if (ibz_is_zero(&(w[i][i]))) { + ibz_copy(&(w[i][i]), &m); + } + for (int h = i + 1; h < 4; h++) { + ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_neg(&q, &q); + ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); + } + ibz_div(&m, &r, &m, &d); + assert(ibz_is_zero(&r)); + if (i != 0) { + k = k - 1; + i = i - 1; + j = k; + if (ibz_is_zero(&(a[k][i]))) + ibz_copy(&(a[k][i]), &m); + + } else { + k = k - 1; + i = i - 1; + j = k; + } + } + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + } + } + + ibz_finalize(&b); + ibz_finalize(&d); + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&coeff_1); + ibz_finalize(&coeff_2); + ibz_finalize(&m); + ibz_vec_4_finalize(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_finalize(&(w[h])); + ibz_vec_4_finalize(&(a[h])); + } +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.c new file mode 100644 index 0000000000..b2db5b54c9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.c @@ -0,0 +1,182 @@ +#include "hnf_internal.h" +#include "internal.h" + +// Small helper for integers +void +ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod) +{ + ibz_t m, t; + ibz_init(&m); + ibz_init(&t); + ibz_mod(&m, x, mod); + ibz_set(&t, ibz_is_zero(&m)); + ibz_mul(&t, &t, mod); + ibz_add(res, &m, &t); + ibz_finalize(&m); + ibz_finalize(&t); +} + +// centered and rather positive then negative +void +ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod) +{ + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_t tmp, d, t; + ibz_init(&tmp); + ibz_init(&d); + ibz_init(&t); + ibz_div_floor(&d, &tmp, mod, &ibz_const_two); + ibz_mod_not_zero(&tmp, a, mod); + ibz_set(&t, ibz_cmp(&tmp, &d) > 0); + ibz_mul(&t, &t, mod); + ibz_sub(remainder, &tmp, &t); + ibz_finalize(&tmp); + ibz_finalize(&d); + ibz_finalize(&t); +} + +// if c, res = x, else res = y +void +ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c) +{ + ibz_t s, t, r; + ibz_init(&r); + ibz_init(&s); + ibz_init(&t); + ibz_set(&s, c != 0); + ibz_sub(&t, &ibz_const_one, &s); + ibz_mul(&r, &s, x); + ibz_mul(res, &t, y); + ibz_add(res, &r, res); + ibz_finalize(&r); + ibz_finalize(&s); + ibz_finalize(&t); +} + +// mpz_gcdext specification specifies unique outputs used here +void +ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const ibz_t *y) +{ + if (ibz_is_zero(x) & ibz_is_zero(y)) { + ibz_set(d, 1); + ibz_set(u, 1); + ibz_set(v, 0); + return; + } + ibz_t q, r, x1, y1; + ibz_init(&q); + ibz_init(&r); + ibz_init(&x1); + ibz_init(&y1); + ibz_copy(&x1, x); + ibz_copy(&y1, y); + + // xgcd + ibz_xgcd(d, u, v, &x1, &y1); + + // make sure u!=0 (v can be 0 if needed) + // following GMP specification, u == 0 implies y|x + if (ibz_is_zero(u)) { + if (!ibz_is_zero(&x1)) { + if (ibz_is_zero(&y1)) { + ibz_set(&y1, 1); + } + ibz_div(&q, &r, &x1, &y1); + assert(ibz_is_zero(&r)); + ibz_sub(v, v, &q); + } + ibz_set(u, 1); + } + if (!ibz_is_zero(&x1)) { + // Make sure ux > 0 (and as small as possible) + assert(ibz_cmp(d, &ibz_const_zero) > 0); + ibz_mul(&r, &x1, &y1); + int neg = ibz_cmp(&r, &ibz_const_zero) < 0; + ibz_mul(&q, &x1, u); + while (ibz_cmp(&q, &ibz_const_zero) <= 0) { + ibz_div(&q, &r, &y1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_add(u, u, &q); + ibz_div(&q, &r, &x1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_sub(v, v, &q); + + ibz_mul(&q, &x1, u); + } + } + +#ifndef NDEBUG + int res = 0; + ibz_t sum, prod, test, cmp; + ibz_init(&sum); + ibz_init(&prod); + ibz_init(&cmp); + ibz_init(&test); + // sign correct + res = res | !(ibz_cmp(d, &ibz_const_zero) >= 0); + if (ibz_is_zero(&x1) && ibz_is_zero(&y1)) { + res = res | !(ibz_is_zero(v) && ibz_is_one(u) && ibz_is_one(d)); + } else { + if (!ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &x1, u); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) > 0); + ibz_mul(&sum, &sum, &y1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) <= 0); + + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &y1, v); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) <= 0); + ibz_mul(&sum, &sum, &x1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) < 0); + } else { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + if (ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + ibz_abs(&prod, v); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_one(u)); + } else { + ibz_abs(&prod, u); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_zero(v)); + } + } + + // Bezout coeffs + ibz_mul(&sum, &x1, u); + ibz_mul(&prod, &y1, v); + ibz_add(&sum, &sum, &prod); + res = res | !(ibz_cmp(&sum, d) == 0); + } + assert(!res); + ibz_finalize(&sum); + ibz_finalize(&prod); + ibz_finalize(&cmp); + ibz_finalize(&test); + +#endif + + ibz_finalize(&x1); + ibz_finalize(&y1); + ibz_finalize(&q); + ibz_finalize(&r); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.h new file mode 100644 index 0000000000..5ecc871bb4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.h @@ -0,0 +1,94 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for functions internal to the HNF computation and its tests + */ + +#ifndef QUAT_HNF_HELPERS_H +#define QUAT_HNF_HELPERS_H + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup quat_hnf_helpers Internal functions for the HNF computation and tests + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_helpers_ibz Internal renamed GMP functions for the HNF computation + */ + +/** + * @brief GCD and Bézout coefficients u, v such that ua + bv = gcd + * + * @param gcd Output: Set to the gcd of a and b + * @param u Output: integer such that ua+bv=gcd + * @param v Output: Integer such that ua+bv=gcd + * @param a + * @param b + */ +void ibz_xgcd(ibz_t *gcd, + ibz_t *u, + ibz_t *v, + const ibz_t *a, + const ibz_t *b); // integers, dim4, test/integers, test/dim4 + +/** @} + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_integer_helpers Integer functions internal to the HNF computation and tests + * @{ + */ + +/** @brief x mod mod, with x in [1,mod] + * + * @param res Output: res = x [mod] and 0 0 + */ +void ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod); + +/** @brief x mod mod, with x in ]-mod/2,mod/2] + * + * Centered and rather positive then negative. + * + * @param remainder Output: remainder = x [mod] and -mod/2 0 + */ +void ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod); + +/** @brief if c then x else y + * + * @param res Output: if c, res = x, else res = y + * @param x + * @param y + * @param c condition: must be 0 or 1 + */ +void ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c); + +/** @brief d = gcd(x,y)>0 and d = ux+vy and u!= 0 and d>0 and u, v of small absolute value, u not 0 + * + * More precisely: + * If x and y are both non 0, -|xy|/d +#else +#include +#endif + +void +ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) +{ + mpz_gcdext(*gcd, *u, *v, *a, *b); +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ideal.c new file mode 100644 index 0000000000..9cf863a104 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ideal.c @@ -0,0 +1,323 @@ +#include +#include +#include "internal.h" + +// assumes parent order and lattice correctly set, computes and sets the norm +void +quat_lideal_norm(quat_left_ideal_t *lideal) +{ + quat_lattice_index(&(lideal->norm), &(lideal->lattice), (lideal->parent_order)); + int ok UNUSED = ibz_sqrt(&(lideal->norm), &(lideal->norm)); + assert(ok); +} + +// assumes parent order and lattice correctly set, recomputes and verifies its norm +static int +quat_lideal_norm_verify(const quat_left_ideal_t *lideal) +{ + int res; + ibz_t index; + ibz_init(&index); + quat_lattice_index(&index, &(lideal->lattice), (lideal->parent_order)); + ibz_sqrt(&index, &index); + res = (ibz_cmp(&(lideal->norm), &index) == 0); + ibz_finalize(&index); + return (res); +} + +void +quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) +{ + copy->parent_order = copied->parent_order; + ibz_copy(©->norm, &copied->norm); + ibz_copy(©->lattice.denom, &copied->lattice.denom); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + } + } +} + +void +quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(quat_lattice_contains(NULL, order, x)); + ibz_t norm_n, norm_d; + ibz_init(&norm_n); + ibz_init(&norm_d); + + // Multiply order on the right by x + quat_lattice_alg_elem_mul(&(lideal->lattice), order, x, alg); + + // Reduce denominator. This conserves HNF + quat_lattice_reduce_denom(&lideal->lattice, &lideal->lattice); + + // Compute norm and check it's integral + quat_alg_norm(&norm_n, &norm_d, x, alg); + assert(ibz_is_one(&norm_d)); + ibz_copy(&lideal->norm, &norm_n); + + // Set order + lideal->parent_order = order; + ibz_finalize(&norm_n); + ibz_finalize(&norm_d); +} + +void +quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(!quat_alg_elem_is_zero(x)); + + quat_lattice_t ON; + quat_lattice_init(&ON); + + // Compute ideal generated by x + quat_lideal_create_principal(lideal, x, order, alg); + + // Compute ideal generated by N (without reducing denominator) + ibz_mat_4x4_scalar_mul(&ON.basis, N, &order->basis); + ibz_copy(&ON.denom, &order->denom); + + // Add lattices (reduces denominators) + quat_lattice_add(&lideal->lattice, &lideal->lattice, &ON); + // Set order + lideal->parent_order = order; + // Compute norm + quat_lideal_norm(lideal); + + quat_lattice_finalize(&ON); +} + +int +quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + ibz_t norm_int, norm_n, gcd, r, q, norm_denom; + ibz_vec_4_t vec; + ibz_vec_4_init(&vec); + ibz_init(&norm_denom); + ibz_init(&norm_int); + ibz_init(&norm_n); + ibz_init(&r); + ibz_init(&q); + ibz_init(&gcd); + int a, b, c, d; + int found = 0; + int int_norm = 0; + while (1) { + int_norm++; + for (a = -int_norm; a <= int_norm; a++) { + for (b = -int_norm + abs(a); b <= int_norm - abs(a); b++) { + for (c = -int_norm + abs(a) + abs(b); c <= int_norm - abs(a) - abs(b); c++) { + d = int_norm - abs(a) - abs(b) - abs(c); + ibz_vec_4_set(&vec, a, b, c, d); + ibz_vec_4_content(&gcd, &vec); + if (ibz_is_one(&gcd)) { + ibz_mat_4x4_eval(&(gen->coord), &(lideal->lattice.basis), &vec); + ibz_copy(&(gen->denom), &(lideal->lattice.denom)); + quat_alg_norm(&norm_int, &norm_denom, gen, alg); + assert(ibz_is_one(&norm_denom)); + ibz_div(&q, &r, &norm_int, &(lideal->norm)); + assert(ibz_is_zero(&r)); + ibz_gcd(&gcd, &(lideal->norm), &q); + found = (0 == ibz_cmp(&gcd, &ibz_const_one)); + if (found) + goto fin; + } + } + } + } + } +fin:; + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&norm_denom); + ibz_finalize(&norm_int); + ibz_finalize(&norm_n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&gcd); + return (found); +} + +void +quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t norm, norm_d; + ibz_init(&norm); + ibz_init(&norm_d); + quat_lattice_alg_elem_mul(&(product->lattice), &(lideal->lattice), alpha, alg); + product->parent_order = lideal->parent_order; + quat_alg_norm(&norm, &norm_d, alpha, alg); + ibz_mul(&(product->norm), &(lideal->norm), &norm); + assert(ibz_divides(&(product->norm), &norm_d)); + ibz_div(&(product->norm), &norm, &(product->norm), &norm_d); + assert(quat_lideal_norm_verify(lideal)); + ibz_finalize(&norm_d); + ibz_finalize(&norm); +} + +void +quat_lideal_add(quat_left_ideal_t *sum, const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_add(&sum->lattice, &I1->lattice, &I2->lattice); + sum->parent_order = I1->parent_order; + quat_lideal_norm(sum); +} + +void +quat_lideal_inter(quat_left_ideal_t *inter, + const quat_left_ideal_t *I1, + const quat_left_ideal_t *I2, + const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_intersect(&inter->lattice, &I1->lattice, &I2->lattice); + inter->parent_order = I1->parent_order; + quat_lideal_norm(inter); +} + +int +quat_lideal_equals(const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((I2->parent_order), alg)); + assert(quat_order_is_maximal((I1->parent_order), alg)); + return (I1->parent_order == I2->parent_order) & (ibz_cmp(&I1->norm, &I2->norm) == 0) & + quat_lattice_equal(&I1->lattice, &I2->lattice); +} + +void +quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lattice_conjugate_without_hnf(inv, &(lideal->lattice)); + ibz_mul(&(inv->denom), &(inv->denom), &(lideal->norm)); +} + +// following the implementation of ideal isomorphisms in the code of LearningToSQI's sage +// implementation of SQIsign +void +quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal1->parent_order), alg)); + assert(quat_order_is_maximal((lideal2->parent_order), alg)); + assert(lideal1->parent_order == lideal2->parent_order); + quat_lattice_t inv; + quat_lattice_init(&inv); + quat_lideal_inverse_lattice_without_hnf(&inv, lideal1, alg); + quat_lattice_mul(trans, &inv, &(lideal2->lattice), alg); + quat_lattice_finalize(&inv); +} + +void +quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lideal_right_transporter(order, lideal, lideal, alg); +} + +void +quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + quat_lattice_gram(G, &(lideal->lattice), alg); + + // divide by norm · denominator² + ibz_t divisor, rmd; + ibz_init(&divisor); + ibz_init(&rmd); + + ibz_mul(&divisor, &(lideal->lattice.denom), &(lideal->lattice.denom)); + ibz_mul(&divisor, &divisor, &(lideal->norm)); + + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + assert(ibz_is_zero(&rmd)); + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i - 1; j++) { + ibz_copy(&(*G)[j][i], &(*G)[i][j]); + } + } + + ibz_finalize(&rmd); + ibz_finalize(&divisor); +} + +void +quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + quat_lideal_right_order(new_parent_order, lideal, alg); + quat_lattice_conjugate_without_hnf(&(conj->lattice), &(lideal->lattice)); + conj->parent_order = new_parent_order; + ibz_copy(&(conj->norm), &(lideal->norm)); +} + +int +quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg_t *alg) +{ + int ok = 0; + ibz_t det, sqr, div; + ibz_mat_4x4_t transposed, norm, prod; + ibz_init(&det); + ibz_init(&sqr); + ibz_init(&div); + ibz_mat_4x4_init(&transposed); + ibz_mat_4x4_init(&norm); + ibz_mat_4x4_init(&prod); + ibz_mat_4x4_transpose(&transposed, &(order->basis)); + // multiply gram matrix by 2 because of reduced trace + ibz_mat_4x4_identity(&norm); + ibz_copy(&(norm[2][2]), &(alg->p)); + ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); + ibz_mat_4x4_mul(&prod, &transposed, &norm); + ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &prod); + ibz_mul(&div, &(order->denom), &(order->denom)); + ibz_mul(&div, &div, &div); + ibz_mul(&div, &div, &div); + ibz_div(&sqr, &div, &det, &div); + ok = ibz_is_zero(&div); + ok = ok & ibz_sqrt(disc, &sqr); + ibz_finalize(&det); + ibz_finalize(&div); + ibz_finalize(&sqr); + ibz_mat_4x4_finalize(&transposed); + ibz_mat_4x4_finalize(&norm); + ibz_mat_4x4_finalize(&prod); + return (ok); +} + +int +quat_order_is_maximal(const quat_lattice_t *order, const quat_alg_t *alg) +{ + int res; + ibz_t disc; + ibz_init(&disc); + quat_order_discriminant(&disc, order, alg); + res = (ibz_cmp(&disc, &(alg->p)) == 0); + ibz_finalize(&disc); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.c new file mode 100644 index 0000000000..b0462dc8b5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.c @@ -0,0 +1,791 @@ +#include "intbig_internal.h" +#include +#include +#include +#include +#include +#include + +// #define DEBUG_VERBOSE + +#ifdef DEBUG_VERBOSE +#define DEBUG_STR_PRINTF(x) printf("%s\n", (x)); + +static void +DEBUG_STR_FUN_INT_MP(const char *op, int arg1, const ibz_t *arg2) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s\n", op, arg1, arg2_str); +} + +static void +DEBUG_STR_FUN_3(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + printf("%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_MP2_INT(const char *op, const ibz_t *arg1, const ibz_t *arg2, int arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%s,%s,%x\n", op, arg1_str, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_INT_MP2(const char *op, int arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + if (arg1 >= 0) + printf("%s,%x,%s,%s\n", op, arg1, arg2_str, arg3_str); + else + printf("%s,-%x,%s,%s\n", op, -arg1, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_INT_MP_INT(const char *op, int arg1, const ibz_t *arg2, int arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s,%x\n", op, arg1, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3, const ibz_t *arg4) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + int arg4_size = ibz_size_in_base(arg4, 16); + char arg4_str[arg4_size + 2]; + ibz_convert_to_str(arg4, arg4_str, 16); + + printf("%s,%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str, arg4_str); +} +#else +#define DEBUG_STR_PRINTF(x) +#define DEBUG_STR_FUN_INT_MP(op, arg1, arg2) +#define DEBUG_STR_FUN_3(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP2(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP_INT(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_4(op, arg1, arg2, arg3, arg4) +#endif + +/** @defgroup ibz_t Constants + * @{ + */ + +const __mpz_struct ibz_const_zero[1] = { + { + ._mp_alloc = 0, + ._mp_size = 0, + ._mp_d = (mp_limb_t[]){ 0 }, + } +}; + +const __mpz_struct ibz_const_one[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 1 }, + } +}; + +const __mpz_struct ibz_const_two[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 2 }, + } +}; + +const __mpz_struct ibz_const_three[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 3 }, + } +}; + +void +ibz_init(ibz_t *x) +{ + mpz_init(*x); +} + +void +ibz_finalize(ibz_t *x) +{ + mpz_clear(*x); +} + +void +ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_add(*sum, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_sub(*diff, *a, *b); + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_mul(*prod, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_neg(ibz_t *neg, const ibz_t *a) +{ + mpz_neg(*neg, *a); +} + +void +ibz_abs(ibz_t *abs, const ibz_t *a) +{ + mpz_abs(*abs, *a); +} + +void +ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_tdiv_qr(*quotient, *remainder, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp; + ibz_init(&a_cp); + ibz_copy(&a_cp, a); +#endif + mpz_tdiv_q_2exp(*quotient, *a, exp); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); + ibz_finalize(&a_cp); +#endif +} + +void +ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) +{ + mpz_fdiv_qr(*q, *r, *n, *d); +} + +void +ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) +{ + mpz_mod(*r, *a, *b); +} + +unsigned long int +ibz_mod_ui(const mpz_t *n, unsigned long int d) +{ + return mpz_fdiv_ui(*n, d); +} + +int +ibz_divides(const ibz_t *a, const ibz_t *b) +{ + return mpz_divisible_p(*a, *b); +} + +void +ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) +{ + mpz_pow_ui(*pow, *x, e); +} + +void +ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) +{ + mpz_powm(*pow, *x, *e, *m); + DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); +} + +int +ibz_two_adic(ibz_t *pow) +{ + return mpz_scan1(*pow, 0); +} + +int +ibz_cmp(const ibz_t *a, const ibz_t *b) +{ + int ret = mpz_cmp(*a, *b); + DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); + return ret; +} + +int +ibz_is_zero(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); + return ret; +} + +int +ibz_is_one(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 1); + DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); + return ret; +} + +int +ibz_cmp_int32(const ibz_t *x, int32_t y) +{ + int ret = mpz_cmp_si(*x, (signed long int)y); + DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); + return ret; +} + +int +ibz_is_even(const ibz_t *x) +{ + int ret = !mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); + return ret; +} + +int +ibz_is_odd(const ibz_t *x) +{ + int ret = mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); + return ret; +} + +void +ibz_set(ibz_t *i, int32_t x) +{ + mpz_set_si(*i, x); +} + +int +ibz_convert_to_str(const ibz_t *i, char *str, int base) +{ + if (!str || (base != 10 && base != 16)) + return 0; + + mpz_get_str(str, base, *i); + + return 1; +} + +void +ibz_print(const ibz_t *num, int base) +{ + assert(base == 10 || base == 16); + + int num_size = ibz_size_in_base(num, base); + char num_str[num_size + 2]; + ibz_convert_to_str(num, num_str, base); + printf("%s", num_str); +} + +int +ibz_set_from_str(ibz_t *i, const char *str, int base) +{ + return (1 + mpz_set_str(*i, str, base)); +} + +void +ibz_copy(ibz_t *target, const ibz_t *value) +{ + mpz_set(*target, *value); +} + +void +ibz_swap(ibz_t *a, ibz_t *b) +{ + mpz_swap(*a, *b); +} + +int32_t +ibz_get(const ibz_t *i) +{ +#if LONG_MAX == INT32_MAX + return (int32_t)mpz_get_si(*i); +#elif LONG_MAX > INT32_MAX + // Extracts the sign bit and the 31 least significant bits + signed long int t = mpz_get_si(*i); + return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); +#else +#error Unsupported configuration: LONG_MAX must be >= INT32_MAX +#endif +} + +int +ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) +{ + int randret; + int ret = 1; + mpz_t tmp; + mpz_t bmina; + mpz_init(bmina); + mpz_sub(bmina, *b, *a); + + if (mpz_sgn(bmina) == 0) { + mpz_set(*rand, *a); + mpz_clear(bmina); + return 1; + } + + size_t len_bits = mpz_sizeinbase(bmina, 2); + size_t len_bytes = (len_bits + 7) / 8; + size_t sizeof_limb = sizeof(mp_limb_t); + size_t sizeof_limb_bits = sizeof_limb * 8; + size_t len_limbs = (len_bytes + sizeof_limb - 1) / sizeof_limb; + + mp_limb_t mask = ((mp_limb_t)-1) >> (sizeof_limb_bits - len_bits) % sizeof_limb_bits; + mp_limb_t r[len_limbs]; + +#ifndef NDEBUG + { + for (size_t i = 0; i < len_limbs; ++i) + r[i] = (mp_limb_t)-1; + r[len_limbs - 1] = mask; + mpz_t check; + mpz_roinit_n(check, r, len_limbs); + assert(mpz_cmp(check, bmina) >= 0); // max sampled value >= b - a + mpz_t bmina2; + mpz_init(bmina2); + mpz_add(bmina2, bmina, bmina); + assert(mpz_cmp(check, bmina2) < 0); // max sampled value < 2 * (b - a) + mpz_clear(bmina2); + } +#endif + + do { + randret = randombytes((unsigned char *)r, len_bytes); + if (randret != 0) { + ret = 0; + goto err; + } +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < len_limbs; ++i) + r[i] = BSWAP_DIGIT(r[i]); +#endif + r[len_limbs - 1] &= mask; + mpz_roinit_n(tmp, r, len_limbs); + if (mpz_cmp(tmp, bmina) <= 0) + break; + } while (1); + + mpz_add(*rand, tmp, *a); +err: + mpz_clear(bmina); + return ret; +} + +int +ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b) +{ + uint32_t diff, mask; + int32_t rand32; + + if (!(a >= 0 && b >= 0 && b > a)) { + printf("a = %d b = %d\n", a, b); + } + assert(a >= 0 && b >= 0 && b > a); + + diff = b - a; + + // Create a mask with 1 + ceil(log2(diff)) least significant bits set +#if (defined(__GNUC__) || defined(__clang__)) && INT_MAX == INT32_MAX + mask = (1 << (32 - __builtin_clz((uint32_t)diff))) - 1; +#else + uint32_t diff2 = diff, tmp; + + mask = (diff2 > 0xFFFF) << 4; + diff2 >>= mask; + + tmp = (diff2 > 0xFF) << 3; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0xF) << 2; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0x3) << 1; + diff2 >>= tmp; + mask |= tmp; + + mask |= diff2 >> 1; + + mask = (1 << (mask + 1)) - 1; +#endif + + assert(mask >= diff && mask < 2 * diff); + + // Rejection sampling + do { + randombytes((unsigned char *)&rand32, sizeof(rand32)); + +#ifdef TARGET_BIG_ENDIAN + rand32 = BSWAP32(rand32); +#endif + + rand32 &= mask; + } while (rand32 > (int32_t)diff); + + rand32 += a; + ibz_set(rand, rand32); + + return 1; +} + +int +ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) +{ + int ret = 1; + mpz_t m_big; + + // m_big = 2 * m + mpz_init_set_si(m_big, m); + mpz_add(m_big, m_big, m_big); + + // Sample in [0, 2*m] + ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); + + // Adjust to range [-m, m] + mpz_sub_ui(*rand, *rand, m); + + mpz_clear(m_big); + + return ret; +} + +int +ibz_rand_interval_bits(ibz_t *rand, uint32_t m) +{ + int ret = 1; + mpz_t tmp; + mpz_t low; + mpz_init_set_ui(tmp, 1); + mpz_mul_2exp(tmp, tmp, m); + mpz_init(low); + mpz_neg(low, tmp); + ret = ibz_rand_interval(rand, &low, &tmp); + mpz_clear(tmp); + mpz_clear(low); + if (ret != 1) + goto err; + mpz_sub_ui(*rand, *rand, (unsigned long int)m); + return ret; +err: + mpz_clear(tmp); + mpz_clear(low); + return ret; +} + +int +ibz_bitsize(const ibz_t *a) +{ + return (int)mpz_sizeinbase(*a, 2); +} + +int +ibz_size_in_base(const ibz_t *a, int base) +{ + return (int)mpz_sizeinbase(*a, base); +} + +void +ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) +{ + mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); +} + +void +ibz_to_digits(digit_t *target, const ibz_t *ibz) +{ + // From the GMP documentation: + // "If op is zero then the count returned will be zero and nothing written to rop." + // The next line ensures zero is written to the first limb of target if ibz is zero; + // target is then overwritten by the actual value if it is not. + target[0] = 0; + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); +} + +int +ibz_probab_prime(const ibz_t *n, int reps) +{ + int ret = mpz_probab_prime_p(*n, reps); + DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); + return ret; +} + +void +ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) +{ + mpz_gcd(*gcd, *a, *b); +} + +int +ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) +{ + return (mpz_invert(*inv, *a, *mod) ? 1 : 0); +} + +int +ibz_legendre(const ibz_t *a, const ibz_t *p) +{ + return mpz_legendre(*a, *p); +} + +int +ibz_sqrt(ibz_t *sqrt, const ibz_t *a) +{ + if (mpz_perfect_square_p(*a)) { + mpz_sqrt(*sqrt, *a); + return 1; + } else { + return 0; + } +} + +void +ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) +{ + mpz_sqrt(*sqrt, *a); +} + +int +ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) +{ +#ifndef NDEBUG + assert(ibz_probab_prime(p, 100)); +#endif + // Case a = 0 + { + ibz_t test; + ibz_init(&test); + ibz_mod(&test, a, p); + if (ibz_is_zero(&test)) { + ibz_set(sqrt, 0); + } + ibz_finalize(&test); + } +#ifdef DEBUG_VERBOSE + ibz_t a_cp, p_cp; + ibz_init(&a_cp); + ibz_init(&p_cp); + ibz_copy(&a_cp, a); + ibz_copy(&p_cp, p); +#endif + + mpz_t amod, tmp, exp, a4, a2, q, z, qnr, x, y, b, pm1; + mpz_init(amod); + mpz_init(tmp); + mpz_init(exp); + mpz_init(a4); + mpz_init(a2); + mpz_init(q); + mpz_init(z); + mpz_init(qnr); + mpz_init(x); + mpz_init(y); + mpz_init(b); + mpz_init(pm1); + + int ret = 1; + + mpz_mod(amod, *a, *p); + if (mpz_cmp_ui(amod, 0) < 0) { + mpz_add(amod, *p, amod); + } + + if (mpz_legendre(amod, *p) != 1) { + ret = 0; + goto end; + } + + mpz_sub_ui(pm1, *p, 1); + + if (mpz_mod_ui(tmp, *p, 4) == 3) { + // p % 4 == 3 + mpz_add_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(*sqrt, amod, tmp, *p); + } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + // p % 8 == 5 + mpz_sub_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + if (!mpz_cmp_ui(tmp, 1)) { + mpz_add_ui(tmp, *p, 3); + mpz_fdiv_q_2exp(tmp, tmp, 3); + mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + } else { + mpz_sub_ui(tmp, *p, 5); + mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 + mpz_mul_2exp(a4, amod, 2); // 4*a + mpz_powm(tmp, a4, tmp, *p); + + mpz_mul_2exp(a2, amod, 1); + mpz_mul(tmp, a2, tmp); + mpz_mod(*sqrt, tmp, *p); + } + } else { + // p % 8 == 1 -> Shanks-Tonelli + int e = 0; + mpz_sub_ui(q, *p, 1); + while (mpz_tstbit(q, e) == 0) + e++; + mpz_fdiv_q_2exp(q, q, e); + + // 1. find generator - non-quadratic residue + mpz_set_ui(qnr, 2); + while (mpz_legendre(qnr, *p) != -1) + mpz_add_ui(qnr, qnr, 1); + mpz_powm(z, qnr, q, *p); + + // 2. Initialize + mpz_set(y, z); + mpz_powm(y, amod, q, *p); // y = a^q mod p + + mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 + mpz_fdiv_q_2exp(tmp, tmp, 1); + + mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + + mpz_set_ui(exp, 1); + mpz_mul_2exp(exp, exp, e - 2); + + for (int i = 0; i < e; ++i) { + mpz_powm(b, y, exp, *p); + + if (!mpz_cmp(b, pm1)) { + mpz_mul(x, x, z); + mpz_mod(x, x, *p); + + mpz_mul(y, y, z); + mpz_mul(y, y, z); + mpz_mod(y, y, *p); + } + + mpz_powm_ui(z, z, 2, *p); + mpz_fdiv_q_2exp(exp, exp, 1); + } + + mpz_set(*sqrt, x); + } + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sqrt_mod_p", sqrt, &a_cp, &p_cp); + ibz_finalize(&a_cp); + ibz_finalize(&p_cp); +#endif + +end: + mpz_clear(amod); + mpz_clear(tmp); + mpz_clear(exp); + mpz_clear(a4); + mpz_clear(a2); + mpz_clear(q); + mpz_clear(z); + mpz_clear(qnr); + mpz_clear(x); + mpz_clear(y); + mpz_clear(b); + mpz_clear(pm1); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig_internal.h new file mode 100644 index 0000000000..de4762a6d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig_internal.h @@ -0,0 +1,123 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for big integer functions only used in quaternion functions + */ + +#ifndef INTBIG_INTERNAL_H +#define INTBIG_INTERNAL_H + +#include "intbig.h" + +/** @internal + * @ingroup quat_helpers + * @defgroup ibz_helper Internal integer functions (gmp-based) + * @{ + */ + +/********************************************************************/ + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards minus infinity. + */ +void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d); + +/** @brief generate random value in [a, b] + * assumed that a >= 0, b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b); + +/** @brief generate random value in [-2^m, 2^m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_bits(ibz_t *rand, uint32_t m); + +/** @brief set str to a string containing the representation of i in base + * + * Base should be 10 or 16 + * + * str should be an array of length enough to store the representation of in + * in base, which can be obtained by ibz_sizeinbase(i, base) + 2, where the 2 + * is for the sign and the null terminator + * + * Case for base 16 does not matter + * + * @returns 1 if the integer could be converted to a string, 0 otherwise + */ +int ibz_convert_to_str(const ibz_t *i, char *str, int base); + +/** @brief print num in base to stdout + * + * Base should be 10 or 16 + */ +void ibz_print(const ibz_t *num, int base); + +/** @brief set i to integer contained in string when read as number in base + * + * Base should be 10 or 16, and the number should be written without ponctuation or whitespaces + * + * Case for base 16 does not matter + * + * @returns 1 if the string could be converted to an integer, 0 otherwise + */ +int ibz_set_from_str(ibz_t *i, const char *str, int base); + +/** + * @brief Probabilistic primality test + * + * @param n The number to test + * @param reps Number of Miller-Rabin repetitions. The more, the slower and the less likely are + * false positives + * @return 1 if probably prime, 0 if certainly not prime, 2 if certainly prime + * + * Using GMP's implementation: + * + * From GMP's documentation: "This function performs some trial divisions, a Baillie-PSW probable + * prime test, then reps-24 Miller-Rabin probabilistic primality tests." + */ +int ibz_probab_prime(const ibz_t *n, int reps); + +/** + * @brief Square root modulo a prime + * + * @returns 1 if square root of a mod p exists and was computed, 0 otherwise + * @param sqrt Output: Set to a square root of a mod p if any exist + * @param a number of which a square root mod p is searched + * @param p assumed prime + */ +int ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p); + +/** + * @brief Integer square root of a perfect square + * + * @returns 1 if an integer square root of a exists and was computed, 0 otherwise + * @param sqrt Output: Set to a integer square root of a if any exist + * @param a number of which an integer square root is searched + */ +int ibz_sqrt(ibz_t *sqrt, const ibz_t *a); + +/** + * @brief Legendre symbol of a mod p + * + * @returns Legendre symbol of a mod p + * @param a + * @param p assumed prime + * + * Uses GMP's implementation + * + * If output is 1, a is a square mod p, if -1, not. If 0, it is divisible by p + */ +int ibz_legendre(const ibz_t *a, const ibz_t *p); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/integers.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/integers.c new file mode 100644 index 0000000000..ec7cda05eb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/integers.c @@ -0,0 +1,116 @@ +#include +#include "internal.h" +#include +#include +#include + +// Random prime generation for tests +int +ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations) +{ + assert(bitsize != 0); + int found = 0; + ibz_t two_pow, two_powp; + + ibz_init(&two_pow); + ibz_init(&two_powp); + ibz_pow(&two_pow, &ibz_const_two, (bitsize - 1) - (0 != is3mod4)); + ibz_pow(&two_powp, &ibz_const_two, bitsize - (0 != is3mod4)); + + int cnt = 0; + while (!found) { + cnt++; + if (cnt % 100000 == 0) { + printf("Random prime generation is still running after %d attempts, this is not " + "normal! The expected number of attempts is %d \n", + cnt, + bitsize); + } + ibz_rand_interval(p, &two_pow, &two_powp); + ibz_add(p, p, p); + if (is3mod4) { + ibz_add(p, p, p); + ibz_add(p, &ibz_const_two, p); + } + ibz_add(p, &ibz_const_one, p); + + found = ibz_probab_prime(p, probability_test_iterations); + } + ibz_finalize(&two_pow); + ibz_finalize(&two_powp); + return found; +} + +// solves x^2 + n y^2 == p for positive integers x, y +// assumes that p is prime and -n mod p is a square +int +ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p) +{ + ibz_t r0, r1, r2, a, prod; + ibz_init(&r0); + ibz_init(&r1); + ibz_init(&r2); + ibz_init(&a); + ibz_init(&prod); + + int res = 0; + + // manage case p = 2 separately + if (!ibz_cmp(p, &ibz_const_two)) { + if (ibz_is_one(n)) { + ibz_set(x, 1); + ibz_set(y, 1); + res = 1; + } + goto done; + } + // manage case p = n separately + if (!ibz_cmp(p, n)) { + ibz_set(x, 0); + ibz_set(y, 1); + res = 1; + goto done; + } + + // test coprimality (should always be ok in our cases) + ibz_gcd(&r2, p, n); + if (!ibz_is_one(&r2)) + goto done; + + // get sqrt of -n mod p + ibz_neg(&r2, n); + if (!ibz_sqrt_mod_p(&r2, &r2, p)) + goto done; + + // run loop + ibz_copy(&prod, p); + ibz_copy(&r1, p); + ibz_copy(&r0, p); + while (ibz_cmp(&prod, p) >= 0) { + ibz_div(&a, &r0, &r2, &r1); + ibz_mul(&prod, &r0, &r0); + ibz_copy(&r2, &r1); + ibz_copy(&r1, &r0); + } + // test if result is solution + ibz_sub(&a, p, &prod); + ibz_div(&a, &r2, &a, n); + if (!ibz_is_zero(&r2)) + goto done; + if (!ibz_sqrt(y, &a)) + goto done; + + ibz_copy(x, &r0); + ibz_mul(&a, y, y); + ibz_mul(&a, &a, n); + ibz_add(&prod, &prod, &a); + res = !ibz_cmp(&prod, p); + +done: + ibz_finalize(&r0); + ibz_finalize(&r1); + ibz_finalize(&r2); + ibz_finalize(&a); + ibz_finalize(&prod); + return res; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/internal.h new file mode 100644 index 0000000000..edbba345f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/internal.h @@ -0,0 +1,812 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for helper functions for quaternion algebra implementation + */ + +#ifndef QUAT_HELPER_H +#define QUAT_HELPER_H + +#include +#include +#include "intbig_internal.h" + +/** @internal + * @ingroup quat_quat + * @defgroup quat_helpers Quaternion module internal functions + * @{ + */ + +/** @internal + * @defgroup quat_alg_helpers Helper functions for the alg library + * @{ + */ + +/** @internal + * @brief helper function for initializing small quaternion algebras. + */ +void quat_alg_init_set_ui(quat_alg_t *alg, + unsigned int p); // test/lattice, test/ideal, test/algebra + +/** @brief a*b + * + * Multiply two coordinate vectors as elements of the algebra in basis (1,i,j,ij) with i^2 = -1, j^2 + * = -p + * + * @param res Output: Will contain product + * @param a + * @param b + * @param alg The quaternion algebra + */ +void quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg); + +/** @brief a=b + * + * Test if a and b represent the same quaternion algebra element + * + * @param a + * @param b + * @returns 1 if a=b, 0 otherwise + */ +int quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + * + * x is 0 iff all coordinates in x->coord are 0 + */ +int quat_alg_elem_is_zero(const quat_alg_elem_t *x); + +/** @brief Compute same denominator form of two quaternion algebra elements + * + * res_a=a and res_b=b (representing the same element) and res_a.denom = res_b.denom + * + * @param res_a + * @param res_b + * @param a + * @param b + */ +void quat_alg_equal_denom(quat_alg_elem_t *res_a, + quat_alg_elem_t *res_b, + const quat_alg_elem_t *a, + const quat_alg_elem_t *b); + +/** @brief Copies the given values into an algebra element, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Sets an algebra element to the given integer values, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_set(quat_alg_elem_t *elem, + int32_t denom, + int32_t coord0, + int32_t coord1, + int32_t coord2, + int32_t coord3); + +/** + * @brief Creates algebra element from scalar + * + * Resulting element has 1-coordinate equal to numerator/denominator + * + * @param elem Output: algebra element with numerator/denominator as first coordiante + * (1-coordinate), 0 elsewhere (i,j,ij coordinates) + * @param numerator + * @param denominator Assumed non zero + */ +void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator); + +/** @brief a+b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief a-b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Multiplies algebra element by integer scalar, without normalizing it + * + * @param res Output + * @param scalar Integer + * @param elem Algebra element + */ +void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_helpers Helper functions for functions for matrices or vectors in dimension 4 + * @{ + */ + +/** @internal + * @defgroup quat_inv_helpers Helper functions for the integer matrix inversion function + * @{ + */ + +/** @brief a1a2+b1b2+c1c2 + * + * @param coeff Output: The coefficien which was computed as a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief -a1a2+b1b2-c1c2 + * + * @param coeff Output: The coefficien which was computed as -a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief Matrix determinant and a matrix inv such that inv/det is the inverse matrix of the input + * + * Implemented following the methof of 2x2 minors explained at Method from + * https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf (visited on 3rd of May + * 2023, 16h15 CEST) + * + * @returns 1 if the determinant of mat is not 0 and an inverse was computed, 0 otherwise + * @param inv Output: Will contain an integer matrix which, dividet by det, will yield the rational + * inverse of the matrix if it exists, can be NULL + * @param det Output: Will contain the determinant of the input matrix, can be NULL + * @param mat Matrix of which the inverse will be computed + */ +int ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_lat_helpers Helper functions on vectors and matrices used mainly for lattices + * @{ + */ + +/** @brief Copy all values from one vector to another + * + * @param new Output: is set to same values as vec + * @param vec + */ +void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec); + +/** @brief set res to values coord0,coord1,coord2,coord3 + * + * @param res Output: Will contain vector (coord0,coord1,coord2,coord3) + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Set a vector of 4 integers to given values + * + * @param vec Output: is set to given coordinates + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3); + +/** @brief a+b + * + * Add two integer 4-vectors + * + * @param res Output: Will contain sum + * @param a + * @param b + */ +void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief a-b + * + * Substract two integer 4-vectors + * + * @param res Output: Will contain difference + * @param a + * @param b + */ +void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief x=0 + * + * Test if a vector x has only zero coordinates + * + * @returns 0 if x has at least one non-zero coordinates, 1 otherwise + * @param x + */ +int ibz_vec_4_is_zero(const ibz_vec_4_t *x); + +/** @brief Compute the linear combination lc = coeff_a vec_a + coeff_b vec_b + * + * @param lc Output: linear combination lc = coeff_a vec_a + coeff_b vec_b + * @param coeff_a Scalar multiplied to vec_a + * @param vec_a + * @param coeff_b Scalar multiplied to vec_b + * @param vec_b + */ +void ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b); + +/** @brief multiplies all values in vector by same scalar + * + * @param prod Output + * @param scalar + * @param vec + */ +void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief divides all values in vector by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param vec + */ +int ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief Negation for vectors of 4 integers + * + * @param neg Output: is set to -vec + * @param vec + */ +void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec); + +/** + * @brief content of a 4-vector of integers + * + * The content is the GCD of all entries. + * + * @param v A 4-vector of integers + * @param content Output: the resulting gcd + */ +void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v); + +/** @brief -mat for mat a 4x4 integer matrix + * + * @param neg Output: is set to -mat + * @param mat Input matrix + */ +void ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat); + +/** @brief Set all coefficients of a matrix to zero for 4x4 integer matrices + * + * @param zero + */ +void ibz_mat_4x4_zero(ibz_mat_4x4_t *zero); + +/** @brief Set a matrix to the identity for 4x4 integer matrices + * + * @param id + */ +void ibz_mat_4x4_identity(ibz_mat_4x4_t *id); + +/** @brief Test equality to identity for 4x4 integer matrices + * + * @returns 1 if mat is the identity matrix, 0 otherwise + * @param mat + */ +int ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat); + +/** @brief Equality test for 4x4 integer matrices + * + * @returns 1 if equal, 0 otherwise + * @param mat1 + * @param mat2 + */ +int ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat); + +/** @brief Matrix by integer multiplication + * + * @param prod Output + * @param scalar + * @param mat + */ +void ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** @brief gcd of all values in matrix + * + * @param gcd Output + * @param mat + */ +void ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat); + +/** @brief Verifies whether the 4x4 input matrix is in Hermite Normal Form + * + * @returns 1 if mat is in HNF, 0 otherwise + * @param mat Matrix to be tested + */ +int ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat); + +/** @brief Hermite Normal Form of a matrix of 8 integer vectors, computed using a multiple of its + * determinant as modulo + * + * Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic + * Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 + * + * @param hnf Output: Matrix in Hermite Normal Form generating the same lattice as generators + * @param generators matrix whose colums generate the same lattice than the output + * @param generator_number number of generators given + * @param mod integer, must be a multiple of the volume of the lattice generated by the columns of + * generators + */ +void ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, + int generator_number, + const ibz_vec_4_t *generators, + const ibz_t *mod); + +/** @} + */ +/** @} + */ + +/** @internal + * @defgroup quat_dim2_helpers Helper functions for dimension 2 + * @{ + */ + +/** @brief Set vector coefficients to the given integers + * + * @param vec Output: Vector + * @param a0 + * @param a1 + */ +void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1); // test/dim2 + +/** @brief Set matrix coefficients to the given integers + * + * @param mat Output: Matrix + * @param a00 + * @param a01 + * @param a10 + * @param a11 + */ +void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11); // test/dim2 + +void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, + const ibz_mat_2x2_t *b); // unused + +/** @brief Determinant of a 2x2 integer matrix given as 4 integers + * + * @param det Output: Determinant of the matrix + * @param a11 matrix coefficient (upper left corner) + * @param a12 matrix coefficient (upper right corner) + * @param a21 matrix coefficient (lower left corner) + * @param a22 matrix coefficient (lower right corner) + */ +void ibz_mat_2x2_det_from_ibz(ibz_t *det, + const ibz_t *a11, + const ibz_t *a12, + const ibz_t *a21, + const ibz_t *a22); // dim4 + +/** + * @brief a*b for 2x2 integer matrices modulo m + * + * @param prod Output matrix + * @param mat_a Input matrix + * @param mat_b Input matrix + * @param m Integer modulo + */ +void ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, + const ibz_mat_2x2_t *mat_a, + const ibz_mat_2x2_t *mat_b, + const ibz_t *m); // test/dim2 +/** @} + */ + +/** @internal + * @defgroup quat_lattice_helper Helper functions for the lattice library (dimension 4) + * @{ + */ + +/** + * @brief Modifies a lattice to put it in hermite normal form + * + * In-place modification of the lattice. + * + * @param lat input lattice + * + * On a correct lattice this function changes nothing (since it is already in HNF), but it can be + * used to put a handmade one in correct form in order to use the other lattice functions. + */ +void quat_lattice_hnf(quat_lattice_t *lat); // lattice, test/lattice, test/algebra, + +/** + * @brief Lattice equality + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if both lattices are equal, 0 otherwise + * @param lat1 + * @param lat2 + */ +int quat_lattice_equal(const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice, test/ideal + +/** + * @brief Lattice inclusion test + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if sublat is included in overlat, 0 otherwise + * @param sublat Lattice whose inclusion in overlat will be testes + * @param overlat + */ +int quat_lattice_inclusion(const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // test/lattice, test/ideal + +/** @brief Divides basis and denominator of a lattice by their gcd + * + * @param reduced Output + * @param lat Lattice + */ +void quat_lattice_reduce_denom(quat_lattice_t *reduced, + const quat_lattice_t *lat); // lattice, ideal, + +/** @brief a+b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + */ +void quat_lattice_add(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice + +/** @brief a*b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + * @param alg The quaternion algebra + */ +void quat_lattice_mul(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2, + const quat_alg_t *alg); // ideal, lattie, test/ideal, test/lattice + +/** + * @brief Computes the dual lattice of lat, without putting its basis in HNF + * + * This function returns a lattice not under HNF. For careful internal use only. + * + * Computation method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted + * on 19 of May 2023, 12h40 CEST + * + * @param dual Output: The dual lattice of lat. ATTENTION: is not under HNF. hnf computation must be + * applied before using lattice functions on it + * @param lat lattice, the dual of it will be computed + */ +void quat_lattice_dual_without_hnf(quat_lattice_t *dual, + const quat_lattice_t *lat); // lattice, ideal + +/** + * @brief Multiply all columns of lat with coord (as algebra elements) + * + * The columns and coord are seen as algebra elements in basis 1,i,j,ij, i^2 = -1, j^2 = -p). Coord + * is multiplied to the right of lat. + * + * The output matrix is not under HNF. + * + * @param prod Output: Matrix not under HND whose columns represent the algebra elements obtained as + * L*coord for L column of lat. + * @param lat Matrix whose columns are algebra elements in basis (1,i,j,ij) + * @param coord Integer coordinate algebra element in basis (1,i,j,ij) + * @param alg The quaternion algebra + */ +void quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg); // lattice + +/** @brief The index of sublat into overlat + * + * Assumes inputs are in HNF. + * + * @param index Output + * @param sublat A lattice in HNF, must be sublattice of overlat + * @param overlat A lattice in HNF, must be overlattice of sublat + */ +void quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // ideal + +/** @brief Compute the Gram matrix of the quaternion trace bilinear form + * + * Given a lattice of the quaternion algebra, computes the Gram matrix + * of the bilinear form + * + * 〈a,b〉 := [lattice->denom^2] Tr(a·conj(b)) + * + * multiplied by the square of the denominator of the lattice. + * + * This matrix always has integer entries. + * + * @param G Output: Gram matrix of the trace bilinear form on the lattice, multiplied by the square + * of the denominator of the lattice + * @param lattice A lattice + * @param alg The quaternion algebra + */ +void quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @brief Compute an integer parallelogram containing the ball of + * given radius for the positive definite quadratic form defined by + * the Gram matrix G. + * + * The computed parallelogram is defined by the vectors + * + * (x₁ x₂ x₃ x₄) · U + * + * with x_i ∈ [ -box[i], box[i] ]. + * + * @param box Output: bounds of the parallelogram + * @param U Output: Unimodular transformation defining the parallelogram + * @param G Gram matrix of the quadratic form, must be full rank + * @param radius Radius of the ball, must be non-negative + * @returns 0 if the box only contains the origin, 1 otherwise + */ +int quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius); + +/** @} + */ + +/** @internal + * @defgroup quat_lideal_helper Helper functions for ideals and orders + * @{ + */ +/** @brief Set norm of an ideal given its lattice and parent order + * + * @param lideal In/Output: Ideal which has lattice and parent_order correctly set, but not + * necessarily the norm. Will have norm correctly set too. + */ +void quat_lideal_norm(quat_left_ideal_t *lideal); // ideal + +/** + * @brief Left principal ideal of order, generated by x + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element + * + * Creates the left ideal in 'order' generated by the element 'x' + */ +void quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg); // ideal, test/ideal + +/** + * @brief Equality test for left ideals + * + * @returns 1 if both left ideals are equal, 0 otherwise + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +int quat_lideal_equals(const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // test/ideal + +/** + * @brief Sum of two left ideals + * + * @param sum Output: Left ideal which is the sum of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_add(quat_left_ideal_t *sum, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // Not used outside + +/** + * @brief Left ideal product of left ideal I and element alpha + * + * @param product Output: lideal I*alpha, must have integer norm + * @param lideal left ideal + * @param alpha element multiplied to lideal to get the product ideal + * @param alg the quaternion algebra + * + * I*alpha where I is a left-ideal and alpha an element of the algebra + * + * The resulting ideal must have an integer norm + * + */ +void quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg); // test/ideal + +/** @brief Computes the inverse ideal (for a left ideal of a maximal order) without putting it under + * HNF + * + * This function returns a lattice not under HNF. For careful internal use only + * + * Computes the inverse ideal for lideal as conjugate(lideal)/norm(lideal) + * + * @param inv Output: lattice which is lattice representation of the inverse ideal of lideal + * ATTENTION: is not under HNF. hnf computation must be applied before using lattice functions on it + * @param lideal Left ideal of a maximal order in alg + * @param alg The quaternion algebra + */ +void quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** @brief Computes the right transporter of two left ideals of the same maximal order + * + * Following the implementation of ideal isomorphisms in the code of LearningToSQI's sage + * implementation of SQIsign. Computes the right transporter of (J:I) as inverse(I)J. + * + * @param trans Output: lattice which is right transporter from lideal1 to lideal2 (lideal2:lideal1) + * @param lideal1 Left ideal of the same maximal order than lideal1 in alg + * @param lideal2 Left ideal of the same maximal order than lideal1 in alg + * @param alg The quaternion algebra + */ +void quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Right order of a left ideal + * + * @param order Output: right order of the given ideal + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** + * @brief Gram matrix of the trace map of the ideal class + * + * Compute the Gram matrix of the bilinear form + * + * 〈a, b〉 := Tr(a·conj(b)) / norm(lideal) + * + * on the basis of the ideal. This matrix has integer entries and its + * integer congruence class only depends on the ideal class. + * + * @param G Output: Gram matrix of the trace map + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg); + +/** @brief Test if order is maximal + * + * Checks if the discriminant of the order equals the prime p defining the quaternion algebra. + * + * It is not verified whether the order is really an order. The output 1 only means that if it is an + * order, then it is maximal. + * + * @returns 1 if order is maximal (assuming it is an order), 0 otherwise + * @param order An order of the quaternion algebra (assumes to be an order, this is not tested) + * @param alg The quaternion algebra + */ +int quat_order_is_maximal(const quat_lattice_t *order, + const quat_alg_t *alg); // ideal (only in asserts) + +/** @brief Compute the discriminant of an order as sqrt(det(gram(reduced_norm))) + * + * @param disc: Output: The discriminant sqrt(det(gram(reduced_norm))) + * @param order An order of the quaternion algebra + * @param alg The quaternion algebra + */ +int quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, + const quat_alg_t *alg); // ideal + +/** @} + */ + +/** @internal + * @ingroup quat_normeq + * @{ + */ + +/** @brief Set lattice to O0 + * + * @param O0 Lattice to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set(quat_lattice_t *O0); + +/** @brief Set p-extremal maximal order to O0 + * + * @param O0 p-extremal order to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0); + +/** + * @brief Create an element of a extremal maximal order from its coefficients + * + * @param elem Output: the quaternion element + * @param order the order + * @param coeffs the vector of 4 ibz coefficients + * @param Bpoo quaternion algebra + * + * elem = x + z*y + z*u + t*z*v + * where coeffs = [x,y,u,v] and t = order.t z = order.z + * + */ +void quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo); // normeq, untested + +/** @} + */ +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c new file mode 100644 index 0000000000..8c49b21d20 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c @@ -0,0 +1,190 @@ +#include +#include "lll_internals.h" +#include "internal.h" + +#include "dpe.h" + +// Access entry of symmetric matrix +#define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + dpe_t dpe_const_one, dpe_const_DELTABAR; + + dpe_init(dpe_const_one); + dpe_set_ui(dpe_const_one, 1); + + dpe_init(dpe_const_DELTABAR); + dpe_set_d(dpe_const_DELTABAR, DELTABAR); + + // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions + dpe_t r[4][4], u[4][4], lovasz[4]; + for (int i = 0; i < 4; i++) { + dpe_init(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_init(r[i][j]); + dpe_init(u[i][j]); + } + } + + // threshold for swaps + dpe_t delta_bar; + dpe_init(delta_bar); + dpe_set_d(delta_bar, DELTABAR); + + // Other work variables + dpe_t Xf, tmpF; + dpe_init(Xf); + dpe_init(tmpF); + ibz_t X, tmpI; + ibz_init(&X); + ibz_init(&tmpI); + + // Main L² loop + dpe_set_z(r[0][0], (*G)[0][0]); + int kappa = 1; + while (kappa < 4) { + // size reduce b_κ + int done = 0; + while (!done) { + // Recompute the κ-th row of the Choleski Factorisation + // Loop invariant: + // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 + for (int j = 0; j <= kappa; j++) { + dpe_set_z(r[kappa][j], (*G)[kappa][j]); + for (int k = 0; k < j; k++) { + dpe_mul(tmpF, r[kappa][k], u[j][k]); + dpe_sub(r[kappa][j], r[kappa][j], tmpF); + } + if (j < kappa) + dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + } + + done = 1; + // size reduce + for (int i = kappa - 1; i >= 0; i--) { + if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + done = 0; + dpe_set(Xf, u[kappa][i]); + dpe_round(Xf, Xf); + dpe_get_z(X, Xf); + // Update basis: b_κ ← b_κ - X·b_i + for (int j = 0; j < 4; j++) { + ibz_mul(&tmpI, &X, &(*basis)[j][i]); + ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + } + // Update lower half of the Gram matrix + // = - 2X + X² = + // - X - X( - X·) + //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 + ibz_mul(&tmpI, &X, &(*G)[kappa][i]); + ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + for (int j = 0; j < 4; j++) { // works because i < κ + // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 + ibz_mul(&tmpI, &X, SYM((*G), i, j)); + ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + } + // After the loop: + //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, + /// b_i〉) = 〈b_κ - X·b_i, b_κ - X·b_i〉 + // + // Update u[kappa][j] + for (int j = 0; j < i; j++) { + dpe_mul(tmpF, Xf, u[i][j]); + dpe_sub(u[kappa][j], u[kappa][j], tmpF); + } + } + } + } + + // Check Lovasz' conditions + // lovasz[0] = ‖b_κ‖² + dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] + for (int i = 1; i < kappa; i++) { + dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); + dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + } + int swap; + for (swap = kappa; swap > 0; swap--) { + dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); + if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + break; + } + + // Insert b_κ before b_swap + if (kappa != swap) { + // Insert b_κ before b_swap in the basis and in the lower half Gram matrix + for (int j = kappa; j > swap; j--) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + if (i == j - 1) + ibz_swap(&(*G)[i][i], &(*G)[j][j]); + else if (i != j) + ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + } + } + // Copy row u[κ] and r[κ] in swap position, ignore what follows + for (int i = 0; i < swap; i++) { + dpe_set(u[swap][i], u[kappa][i]); + dpe_set(r[swap][i], r[kappa][i]); + } + dpe_set(r[swap][swap], lovasz[swap]); + // swap complete + kappa = swap; + } + + kappa += 1; + } + +#ifndef NDEBUG + // Check size-reducedness + for (int i = 0; i < 4; i++) + for (int j = 0; j < i; j++) { + dpe_abs(u[i][j], u[i][j]); + assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + } + // Check Lovasz' conditions + for (int i = 1; i < 4; i++) { + dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); + dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); + dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); + assert(dpe_cmp(tmpF, r[i][i]) <= 0); + } +#endif + + // Fill in the upper half of the Gram matrix + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + + // Clearinghouse + ibz_finalize(&X); + ibz_finalize(&tmpI); + dpe_clear(dpe_const_one); + dpe_clear(dpe_const_DELTABAR); + dpe_clear(Xf); + dpe_clear(tmpF); + dpe_clear(delta_bar); + for (int i = 0; i < 4; i++) { + dpe_clear(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_clear(r[i][j]); + dpe_clear(u[i][j]); + } + } +} + +int +quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_mat_4x4_t G; // Gram Matrix + ibz_mat_4x4_init(&G); + quat_lattice_gram(&G, lattice, alg); + ibz_mat_4x4_copy(red, &lattice->basis); + quat_lll_core(&G, red); + ibz_mat_4x4_finalize(&G); + return 0; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lat_ball.c new file mode 100644 index 0000000000..c7bbb9682f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lat_ball.c @@ -0,0 +1,139 @@ +#include +#include +#include +#include "internal.h" +#include "lll_internals.h" + +int +quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius) +{ + ibz_t denom, rem; + ibz_init(&denom); + ibz_init(&rem); + ibz_mat_4x4_t dualG; + ibz_mat_4x4_init(&dualG); + +// Compute the Gram matrix of the dual lattice +#ifndef NDEBUG + int inv_check = ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); + assert(inv_check); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); +#endif + // Initialize the dual lattice basis to the identity matrix + ibz_mat_4x4_identity(U); + // Reduce the dual lattice + quat_lll_core(&dualG, U); + + // Compute the parallelogram's bounds + int trivial = 1; + for (int i = 0; i < 4; i++) { + ibz_mul(&(*box)[i], &dualG[i][i], radius); + ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); + ibz_sqrt_floor(&(*box)[i], &(*box)[i]); + trivial &= ibz_is_zero(&(*box)[i]); + } + + // Compute the transpose transformation matrix +#ifndef NDEBUG + int inv = ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#endif + // U is unitary, det(U) = ± 1 + ibz_mat_4x4_scalar_mul(U, &denom, U); +#ifndef NDEBUG + assert(inv); + ibz_abs(&denom, &denom); + assert(ibz_is_one(&denom)); +#endif + + ibz_mat_4x4_finalize(&dualG); + ibz_finalize(&denom); + ibz_finalize(&rem); + return !trivial; +} + +int +quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius) +{ + assert(ibz_cmp(radius, &ibz_const_zero) > 0); + + ibz_vec_4_t box; + ibz_vec_4_init(&box); + ibz_mat_4x4_t U, G; + ibz_mat_4x4_init(&U); + ibz_mat_4x4_init(&G); + ibz_vec_4_t x; + ibz_vec_4_init(&x); + ibz_t rad, tmp; + ibz_init(&rad); + ibz_init(&tmp); + + // Compute the Gram matrix of the lattice + quat_lattice_gram(&G, lattice, alg); + + // Correct ball radius by the denominator + ibz_mul(&rad, radius, &lattice->denom); + ibz_mul(&rad, &rad, &lattice->denom); + // Correct by 2 (Gram matrix corresponds to twice the norm) + ibz_mul(&rad, &rad, &ibz_const_two); + + // Compute a bounding parallelogram for the ball, stop if it only + // contains the origin + int ok = quat_lattice_bound_parallelogram(&box, &U, &G, &rad); + if (!ok) + goto err; + + // Rejection sampling from the parallelogram +#ifndef NDEBUG + int cnt = 0; +#endif + do { + // Sample vector + for (int i = 0; i < 4; i++) { + if (ibz_is_zero(&box[i])) { + ibz_copy(&x[i], &ibz_const_zero); + } else { + ibz_add(&tmp, &box[i], &box[i]); + ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); + ibz_sub(&x[i], &x[i], &box[i]); + if (!ok) + goto err; + } + } + // Map to parallelogram + ibz_mat_4x4_eval_t(&x, &x, &U); + // Evaluate quadratic form + quat_qf_eval(&tmp, &G, &x); +#ifndef NDEBUG + cnt++; + if (cnt % 100 == 0) + printf("Lattice sampling rejected %d times", cnt - 1); +#endif + } while (ibz_is_zero(&tmp) || (ibz_cmp(&tmp, &rad) > 0)); + + // Evaluate linear combination + ibz_mat_4x4_eval(&(res->coord), &(lattice->basis), &x); + ibz_copy(&(res->denom), &(lattice->denom)); + quat_alg_normalize(res); + +#ifndef NDEBUG + // Check norm is smaller than radius + quat_alg_norm(&tmp, &rad, res, alg); + ibz_mul(&rad, &rad, radius); + assert(ibz_cmp(&tmp, &rad) <= 0); +#endif + +err: + ibz_finalize(&rad); + ibz_finalize(&tmp); + ibz_vec_4_finalize(&x); + ibz_mat_4x4_finalize(&U); + ibz_mat_4x4_finalize(&G); + ibz_vec_4_finalize(&box); + return ok; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lattice.c new file mode 100644 index 0000000000..c98bae9499 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lattice.c @@ -0,0 +1,328 @@ +#include +#include +#include "internal.h" + +// helper functions +int +quat_lattice_equal(const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + int equal = 1; + quat_lattice_t a, b; + quat_lattice_init(&a); + quat_lattice_init(&b); + quat_lattice_reduce_denom(&a, lat1); + quat_lattice_reduce_denom(&b, lat2); + ibz_abs(&(a.denom), &(a.denom)); + ibz_abs(&(b.denom), &(b.denom)); + quat_lattice_hnf(&a); + quat_lattice_hnf(&b); + equal = equal && (ibz_cmp(&(a.denom), &(b.denom)) == 0); + equal = equal && ibz_mat_4x4_equal(&(a.basis), &(b.basis)); + quat_lattice_finalize(&a); + quat_lattice_finalize(&b); + return (equal); +} + +// sublattice test +int +quat_lattice_inclusion(const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + int res; + quat_lattice_t sum; + quat_lattice_init(&sum); + quat_lattice_add(&sum, overlat, sublat); + res = quat_lattice_equal(&sum, overlat); + quat_lattice_finalize(&sum); + return (res); +} + +void +quat_lattice_reduce_denom(quat_lattice_t *reduced, const quat_lattice_t *lat) +{ + ibz_t gcd; + ibz_init(&gcd); + ibz_mat_4x4_gcd(&gcd, &(lat->basis)); + ibz_gcd(&gcd, &gcd, &(lat->denom)); + ibz_mat_4x4_scalar_div(&(reduced->basis), &gcd, &(lat->basis)); + ibz_div(&(reduced->denom), &gcd, &(lat->denom), &gcd); + ibz_abs(&(reduced->denom), &(reduced->denom)); + ibz_finalize(&gcd); +} + +void +quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat) +{ + ibz_mat_4x4_copy(&(conj->basis), &(lat->basis)); + ibz_copy(&(conj->denom), &(lat->denom)); + + for (int row = 1; row < 4; ++row) { + for (int col = 0; col < 4; ++col) { + ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + } + } +} + +// Method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_dual_without_hnf(quat_lattice_t *dual, const quat_lattice_t *lat) +{ + ibz_mat_4x4_t inv; + ibz_t det; + ibz_init(&det); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + ibz_mat_4x4_transpose(&inv, &inv); + // dual_denom = det/lat_denom + ibz_mat_4x4_scalar_mul(&(dual->basis), &(lat->denom), &inv); + ibz_copy(&(dual->denom), &det); + + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); +} + +void +quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + ibz_vec_4_t generators[8]; + ibz_mat_4x4_t tmp; + ibz_t det1, det2, detprod; + ibz_init(&det1); + ibz_init(&det2); + ibz_init(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_init(&(generators[i])); + ibz_mat_4x4_init(&tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); + assert(!ibz_is_zero(&det1)); + assert(!ibz_is_zero(&det2)); + ibz_gcd(&detprod, &det1, &det2); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 8, generators, &detprod); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_mat_4x4_finalize(&tmp); + ibz_finalize(&det1); + ibz_finalize(&det2); + ibz_finalize(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + quat_lattice_t dual1, dual2, dual_res; + quat_lattice_init(&dual1); + quat_lattice_init(&dual2); + quat_lattice_init(&dual_res); + quat_lattice_dual_without_hnf(&dual1, lat1); + + quat_lattice_dual_without_hnf(&dual2, lat2); + quat_lattice_add(&dual_res, &dual1, &dual2); + quat_lattice_dual_without_hnf(res, &dual_res); + quat_lattice_hnf(res); // could be removed if we do not expect HNF any more + quat_lattice_finalize(&dual1); + quat_lattice_finalize(&dual2); + quat_lattice_finalize(&dual_res); +} + +void +quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg) +{ + ibz_vec_4_t p, a; + ibz_vec_4_init(&p); + ibz_vec_4_init(&a); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + quat_alg_coord_mul(&p, &a, coord, alg); + ibz_copy(&((*prod)[0][i]), &(p[0])); + ibz_copy(&((*prod)[1][i]), &(p[1])); + ibz_copy(&((*prod)[2][i]), &(p[2])); + ibz_copy(&((*prod)[3][i]), &(p[3])); + } + ibz_vec_4_finalize(&p); + ibz_vec_4_finalize(&a); +} + +void +quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg) +{ + quat_lattice_mat_alg_coord_mul_without_hnf(&(prod->basis), &(lat->basis), &(elem->coord), alg); + ibz_mul(&(prod->denom), &(lat->denom), &(elem->denom)); + quat_lattice_hnf(prod); +} + +void +quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2, const quat_alg_t *alg) +{ + ibz_vec_4_t elem1, elem2, elem_res; + ibz_vec_4_t generators[16]; + ibz_mat_4x4_t detmat; + ibz_t det; + quat_lattice_t lat_res; + ibz_init(&det); + ibz_mat_4x4_init(&detmat); + quat_lattice_init(&lat_res); + ibz_vec_4_init(&elem1); + ibz_vec_4_init(&elem2); + ibz_vec_4_init(&elem_res); + for (int i = 0; i < 16; i++) + ibz_vec_4_init(&(generators[i])); + for (int k = 0; k < 4; k++) { + ibz_vec_4_copy_ibz( + &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz( + &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); + for (int j = 0; j < 4; j++) { + if (k == 0) + ibz_copy(&(detmat[i][j]), &(elem_res[j])); + ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + } + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &detmat); + ibz_abs(&det, &det); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 16, generators, &det); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_vec_4_finalize(&elem1); + ibz_vec_4_finalize(&elem2); + ibz_vec_4_finalize(&elem_res); + quat_lattice_finalize(&lat_res); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&(detmat)); + for (int i = 0; i < 16; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// lattice assumed of full rank +int +quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x) +{ + int divisible = 0; + ibz_vec_4_t work_coord; + ibz_mat_4x4_t inv; + ibz_t det, prod; + ibz_init(&prod); + ibz_init(&det); + ibz_vec_4_init(&work_coord); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + assert(!ibz_is_zero(&det)); + ibz_mat_4x4_eval(&work_coord, &inv, &(x->coord)); + ibz_vec_4_scalar_mul(&(work_coord), &(lat->denom), &work_coord); + ibz_mul(&prod, &(x->denom), &det); + divisible = ibz_vec_4_scalar_div(&work_coord, &prod, &work_coord); + // copy result + if (divisible && (coord != NULL)) { + for (int i = 0; i < 4; i++) { + ibz_copy(&((*coord)[i]), &(work_coord[i])); + } + } + ibz_finalize(&prod); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); + ibz_vec_4_finalize(&work_coord); + return (divisible); +} + +void +quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + ibz_t tmp, det; + ibz_init(&tmp); + ibz_init(&det); + + // det = det(sublat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &sublat->basis); + // tmp = (overlat->denom)⁴ + ibz_mul(&tmp, &overlat->denom, &overlat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // index = (overlat->denom)⁴ · det(sublat->basis) + ibz_mul(index, &det, &tmp); + // tmp = (sublat->denom)⁴ + ibz_mul(&tmp, &sublat->denom, &sublat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // det = det(overlat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &overlat->basis); + // tmp = (sublat->denom)⁴ · det(overlat->basis) + ibz_mul(&tmp, &tmp, &det); + // index = index / tmp + ibz_div(index, &tmp, index, &tmp); + assert(ibz_is_zero(&tmp)); + // index = |index| + ibz_abs(index, index); + + ibz_finalize(&tmp); + ibz_finalize(&det); +} + +void +quat_lattice_hnf(quat_lattice_t *lat) +{ + ibz_t mod; + ibz_vec_4_t generators[4]; + ibz_init(&mod); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &mod, &(lat->basis)); + ibz_abs(&mod, &mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_init(&(generators[i])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + } + } + ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); + quat_lattice_reduce_denom(lat, lat); + ibz_finalize(&mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +void +quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_t tmp; + ibz_init(&tmp); + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_set(&(*G)[i][j], 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + if (k >= 2) + ibz_mul(&tmp, &tmp, &alg->p); + ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + } + ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + } + } + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + } + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_applications.c new file mode 100644 index 0000000000..6c763b8c04 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_applications.c @@ -0,0 +1,127 @@ +#include +#include +#include "lll_internals.h" + +void +quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t gram_corrector; + ibz_init(&gram_corrector); + ibz_mul(&gram_corrector, &(lideal->lattice.denom), &(lideal->lattice.denom)); + quat_lideal_class_gram(gram, lideal, alg); + ibz_mat_4x4_copy(reduced, &(lideal->lattice.basis)); + quat_lll_core(gram, reduced); + ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); + for (int i = 0; i < 4; i++) { + ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + for (int j = i + 1; j < 4; j++) { + ibz_set(&((*gram)[i][j]), 0); + } + } + ibz_finalize(&gram_corrector); +} + +void +quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + ibz_mat_4x4_t red; + ibz_mat_4x4_init(&red); + + quat_lattice_mul(&(prod->lattice), &(lideal1->lattice), &(lideal2->lattice), alg); + prod->parent_order = lideal1->parent_order; + quat_lideal_norm(prod); + quat_lideal_reduce_basis(&red, gram, prod, alg); + ibz_mat_4x4_copy(&(prod->lattice.basis), &red); + + ibz_mat_4x4_finalize(&red); +} + +int +quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff) +{ + ibz_mat_4x4_t gram, red; + ibz_mat_4x4_init(&gram); + ibz_mat_4x4_init(&red); + + int found = 0; + + // computing the reduced basis + quat_lideal_reduce_basis(&red, &gram, lideal, alg); + + quat_alg_elem_t new_alpha; + quat_alg_elem_init(&new_alpha); + ibz_t tmp, remainder, adjusted_norm; + ibz_init(&tmp); + ibz_init(&remainder); + ibz_init(&adjusted_norm); + + ibz_mul(&adjusted_norm, &lideal->lattice.denom, &lideal->lattice.denom); + + int ctr = 0; + + // equiv_num_iter = (2 * equiv_bound_coeff + 1)^4 + assert(equiv_bound_coeff < (1 << 20)); + int equiv_num_iter = (2 * equiv_bound_coeff + 1); + equiv_num_iter = equiv_num_iter * equiv_num_iter; + equiv_num_iter = equiv_num_iter * equiv_num_iter; + + while (!found && ctr < equiv_num_iter) { + ctr++; + // we select our linear combination at random + ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + + // computation of the norm of the vector sampled + quat_qf_eval(&tmp, &gram, &new_alpha.coord); + + // compute the norm of the equivalent ideal + // can be improved by removing the power of two first and the odd part only if the trial + // division failed (this should always be called on an ideal of norm 2^x * N for some + // big prime N ) + ibz_div(&tmp, &remainder, &tmp, &adjusted_norm); + + // debug : check that the remainder is zero + assert(ibz_is_zero(&remainder)); + + // pseudo-primality test + if (ibz_probab_prime(&tmp, primality_num_iter)) { + + // computes the generator using a matrix multiplication + ibz_mat_4x4_eval(&new_alpha.coord, &red, &new_alpha.coord); + ibz_copy(&new_alpha.denom, &lideal->lattice.denom); + assert(quat_lattice_contains(NULL, &lideal->lattice, &new_alpha)); + + quat_alg_conj(&new_alpha, &new_alpha); + ibz_mul(&new_alpha.denom, &new_alpha.denom, &lideal->norm); + quat_lideal_mul(lideal, lideal, &new_alpha, alg); + assert(ibz_probab_prime(&lideal->norm, primality_num_iter)); + + found = 1; + break; + } + } + assert(found); + + ibz_finalize(&tmp); + ibz_finalize(&remainder); + ibz_finalize(&adjusted_norm); + quat_alg_elem_finalize(&new_alpha); + + ibz_mat_4x4_finalize(&gram); + ibz_mat_4x4_finalize(&red); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_internals.h new file mode 100644 index 0000000000..e8d90141ac --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_internals.h @@ -0,0 +1,238 @@ +#ifndef LLL_INTERNALS_H +#define LLL_INTERNALS_H + +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations of functions only used for the LLL tets + */ + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup lll_internal Functions only used for LLL or its tests + * @{ + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_params Parameters used by the L2 implementation (floats) and its tests (ints) + * @{ + */ + +#define DELTABAR 0.995 +#define DELTA_NUM 99 +#define DELTA_DENOM 100 + +#define ETABAR 0.505 +#define EPSILON_NUM 1 +#define EPSILON_DENOM 100 + +#define PREC 64 +/** + * @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup ibq_t Types for rationals + * @{ + */ + +/** @brief Type for fractions of integers + * + * @typedef ibq_t + * + * For fractions of integers of arbitrary size, used by intbig module, using gmp + */ +typedef ibz_t ibq_t[2]; +typedef ibq_t ibq_vec_4_t[4]; +typedef ibq_t ibq_mat_4x4_t[4][4]; + +/**@} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_ibq_c Constructors and Destructors and Printers + * @{ + */ + +void ibq_init(ibq_t *x); +void ibq_finalize(ibq_t *x); + +void ibq_mat_4x4_init(ibq_mat_4x4_t *mat); +void ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat); + +void ibq_vec_4_init(ibq_vec_4_t *vec); +void ibq_vec_4_finalize(ibq_vec_4_t *vec); + +void ibq_mat_4x4_print(const ibq_mat_4x4_t *mat); +void ibq_vec_4_print(const ibq_vec_4_t *vec); + +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_qa Basic fraction arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b); + +/** @brief diff=a-b + */ +void ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b); + +/** @brief neg=-x + */ +void ibq_neg(ibq_t *neg, const ibq_t *x); + +/** @brief abs=|x| + */ +void ibq_abs(ibq_t *abs, const ibq_t *x); + +/** @brief prod=a*b + */ +void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b); + +/** @brief inv=1/x + * + * @returns 0 if x is 0, 1 if inverse exists and was computed + */ +int ibq_inv(ibq_t *inv, const ibq_t *x); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibq_cmp(const ibq_t *a, const ibq_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibq_is_zero(const ibq_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibq_is_one(const ibq_t *x); + +/** @brief Set q to a/b if b not 0 + * + * @returns 1 if b not 0 and q is set, 0 otherwise + */ +int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b); + +/** @brief Copy value into target + */ +void ibq_copy(ibq_t *target, const ibq_t *value); + +/** @brief Checks if q is an integer + * + * @returns 1 if yes, 0 if not + */ +int ibq_is_ibz(const ibq_t *q); + +/** + * @brief Converts a fraction q to an integer y, if q is an integer. + * + * @returns 1 if z is an integer, 0 if not + */ +int ibq_to_ibz(ibz_t *z, const ibq_t *q); +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup quat_lll_verify_helpers Helper functions for lll verification in dimension 4 + * @{ + */ + +/** @brief Set ibq to parameters delta and eta = 1/2 + epsilon using L2 constants + */ +void quat_lll_set_ibq_parameters(ibq_t *delta, ibq_t *eta); + +/** @brief Set an ibq vector to 4 given integer coefficients + */ +void ibq_vec_4_copy_ibz(ibq_vec_4_t *vec, + const ibz_t *coeff0, + const ibz_t *coeff1, + const ibz_t *coeff2, + const ibz_t *coeff3); // dim4, test/dim4 + +/** @brief Bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 for ibz_q + */ +void quat_lll_bilinear(ibq_t *b, const ibq_vec_4_t *vec0, const ibq_vec_4_t *vec1, + const ibz_t *q); // dim4, test/dim4 + +/** @brief Outputs the transposition of the orthogonalised matrix of mat (as fractions) + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +void quat_lll_gram_schmidt_transposed_with_ibq(ibq_mat_4x4_t *orthogonalised_transposed, + const ibz_mat_4x4_t *mat, + const ibz_t *q); // dim4 + +/** @brief Verifies if mat is lll-reduced for parameter coeff and norm defined by q + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +int quat_lll_verify(const ibz_mat_4x4_t *mat, + const ibq_t *delta, + const ibq_t *eta, + const quat_alg_t *alg); // test/lattice, test/dim4 + /** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_internal_gram Internal LLL function + * @{ + */ + +/** @brief In-place L2 reduction core function + * + * Given a lattice basis represented by the columns of a 4x4 matrix + * and the Gram matrix of its bilinear form, L2-reduces the basis + * in-place and updates the Gram matrix accordingly. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param G In/Output: Gram matrix of the lattice basis + * @param basis In/Output: lattice basis + */ +void quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis); + +/** + * @brief LLL reduction on 4-dimensional lattice + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param red Output: LLL reduced basis + * @param lattice In/Output: lattice with 4-dimensional basis + * @param alg The quaternion algebra + */ +int quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @} + */ + +// end of lll_internal +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.c new file mode 100644 index 0000000000..27f4a963db --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.c @@ -0,0 +1,357 @@ +#include +#include +#include +#include + +// double-wide multiplication +void +MUL(digit_t *out, const digit_t a, const digit_t b) +{ +#ifdef RADIX_32 + uint64_t r = (uint64_t)a * b; + out[0] = r & 0xFFFFFFFFUL; + out[1] = r >> 32; + +#elif defined(RADIX_64) && defined(_MSC_VER) + uint64_t umul_hi; + out[0] = _umul128(a, b, &umul_hi); + out[1] = umul_hi; + +#elif defined(RADIX_64) && defined(HAVE_UINT128) + unsigned __int128 umul_tmp; + umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); + out[0] = (uint64_t)umul_tmp; + out[1] = (uint64_t)(umul_tmp >> 64); + +#else + register digit_t al, ah, bl, bh, temp; + digit_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; + digit_t mask_low = (digit_t)(-1) >> (sizeof(digit_t) * 4), mask_high = (digit_t)(-1) << (sizeof(digit_t) * 4); + al = a & mask_low; // Low part + ah = a >> (sizeof(digit_t) * 4); // High part + bl = b & mask_low; + bh = b >> (sizeof(digit_t) * 4); + + albl = al * bl; + albh = al * bh; + ahbl = ah * bl; + ahbh = ah * bh; + out[0] = albl & mask_low; // out00 + + res1 = albl >> (sizeof(digit_t) * 4); + res2 = ahbl & mask_low; + res3 = albh & mask_low; + temp = res1 + res2 + res3; + carry = temp >> (sizeof(digit_t) * 4); + out[0] ^= temp << (sizeof(digit_t) * 4); // out01 + + res1 = ahbl >> (sizeof(digit_t) * 4); + res2 = albh >> (sizeof(digit_t) * 4); + res3 = ahbh & mask_low; + temp = res1 + res2 + res3 + carry; + out[1] = temp & mask_low; // out10 + carry = temp & mask_high; + out[1] ^= (ahbh & mask_high) + carry; // out11 + +#endif +} + +void +mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision addition + unsigned int i, carry = 0; + + for (i = 0; i < nwords; i++) { + ADDC(c[i], carry, a[i], b[i], carry); + } +} + +digit_t +mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision right shift by 1...RADIX-1 + digit_t bit_out = x[0] & 1; + + for (unsigned int i = 0; i < nwords - 1; i++) { + SHIFTR(x[i + 1], x[i], shift, x[i], RADIX); + } + x[nwords - 1] >>= shift; + return bit_out; +} + +void +mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision left shift by 1...RADIX-1 + + for (int i = nwords - 1; i > 0; i--) { + SHIFTL(x[i], x[i - 1], shift, x[i], RADIX); + } + x[0] <<= shift; +} + +void +multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ + int t = shift; + while (t > RADIX - 1) { + mp_shiftl(x, RADIX - 1, nwords); + t = t - (RADIX - 1); + } + mp_shiftl(x, t, nwords); +} + +// The below functions were taken from the EC module + +void +mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision subtraction, assuming a > b + unsigned int i, borrow = 0; + + for (i = 0; i < nwords; i++) { + SUBC(c[i], borrow, a[i], b[i], borrow); + } +} + +void +select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords) +{ // Select c <- a if mask = 0, select c <- b if mask = 1...1 + + for (int i = 0; i < nwords; i++) { + c[i] = ((a[i] ^ b[i]) & mask) ^ a[i]; + } +} + +void +swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords) +{ // Swap entries + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then a <- b and b <- a + digit_t temp; + + for (int i = 0; i < nwords; i++) { + temp = option & (a[i] ^ b[i]); + a[i] = temp ^ a[i]; + b[i] = temp ^ b[i]; + } +} + +int +mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords) +{ // Multiprecision comparison, a=b? : (1) a>b, (0) a=b, (-1) a= 0; i--) { + if (a[i] > b[i]) + return 1; + else if (a[i] < b[i]) + return -1; + } + return 0; +} + +bool +mp_is_zero(const digit_t *a, unsigned int nwords) +{ // Is a multiprecision element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + digit_t r = 0; + + for (unsigned int i = 0; i < nwords; i++) + r |= a[i] ^ 0; + + return (bool)is_digit_zero_ct(r); +} + +void +mp_mul2(digit_t *c, const digit_t *a, const digit_t *b) +{ // Multiprecision multiplication fixed to two-digit operands + unsigned int carry = 0; + digit_t t0[2], t1[2], t2[2]; + + MUL(t0, a[0], b[0]); + MUL(t1, a[0], b[1]); + ADDC(t0[1], carry, t0[1], t1[0], carry); + ADDC(t1[1], carry, 0, t1[1], carry); + MUL(t2, a[1], b[1]); + ADDC(t2[0], carry, t2[0], t1[1], carry); + ADDC(t2[1], carry, 0, t2[1], carry); + c[0] = t0[0]; + c[1] = t0[1]; + c[2] = t2[0]; + c[3] = t2[1]; +} + +void +mp_print(const digit_t *a, size_t nwords) +{ + printf("0x"); + for (size_t i = 0; i < nwords; i++) { +#ifdef RADIX_32 + printf("%08" PRIx32, a[nwords - i - 1]); // Print each word with 8 hex digits +#elif defined(RADIX_64) + printf("%016" PRIx64, a[nwords - i - 1]); // Print each word with 16 hex digits +#endif + } +} + +void +mp_copy(digit_t *b, const digit_t *a, size_t nwords) +{ + for (size_t i = 0; i < nwords; i++) { + b[i] = a[i]; + } +} + +void +mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords) +{ + // Multiprecision multiplication, c = a*b, for nwords-digit inputs, with nwords-digit output + // explicitly does not use the higher half of c, as we do not need in our applications + digit_t carry, UV[2], t[nwords], cc[nwords]; + + for (size_t i = 0; i < nwords; i++) { + cc[i] = 0; + } + + for (size_t i = 0; i < nwords; i++) { + + MUL(t, a[i], b[0]); + + for (size_t j = 1; j < nwords - 1; j++) { + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + t[j + 1] = UV[1] + carry; + } + + int j = nwords - 1; + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + + mp_add(&cc[i], &cc[i], t, nwords - i); + } + + mp_copy(c, cc, nwords); +} + +void +mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords) +{ // Multiprecision modulo 2^e, with 0 <= a < 2^(e) + unsigned int i, q = e >> LOG2RADIX, r = e & (RADIX - 1); + + if (q < nwords) { + a[q] &= ((digit_t)1 << r) - 1; + + for (i = q + 1; i < nwords; i++) { + a[i] = 0; + } + } +} + +void +mp_neg(digit_t *a, unsigned int nwords) +{ // negates a + for (size_t i = 0; i < nwords; i++) { + a[i] ^= -1; + } + + a[0] += 1; +} + +bool +mp_is_one(const digit_t *x, unsigned int nwords) +{ // returns true if x represents 1, and false otherwise + if (x[0] != 1) { + return false; + } + + for (size_t i = 1; i < nwords; i++) { + if (x[i] != 0) { + return false; + } + } + return true; +} + +void +mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) +{ // Inversion modulo 2^e, using Newton's method and Hensel lifting + // we take the first power of 2 larger than e to use + // requires a to be odd, of course + // returns b such that a*b = 1 mod 2^e + assert((a[0] & 1) == 1); + + digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + mp_copy(aa, a, nwords); + + mp_one[0] = 1; + for (unsigned int i = 1; i < nwords; i++) { + mp_one[i] = 0; + } + + int p = 1; + while ((1 << p) < e) { + p++; + } + p -= 2; // using k = 4 for initial inverse + int w = (1 << (p + 2)); + + mp_mod_2exp(aa, w, nwords); + mp_add(x, aa, aa, nwords); + mp_add(x, x, aa, nwords); // should be 3a + x[0] ^= (1 << 1); // so that x equals (3a)^2 xor 2 + mp_mod_2exp(x, w, nwords); // now x*a = 1 mod 2^4, which we lift + + mp_mul(tmp, aa, x, nwords); + mp_neg(tmp, nwords); + mp_add(y, mp_one, tmp, nwords); + + // Hensel lifting for p rounds + for (int i = 0; i < p; i++) { + mp_add(tmp, mp_one, y, nwords); + mp_mul(x, x, tmp, nwords); + mp_mul(y, y, y, nwords); + } + + mp_mod_2exp(x, w, nwords); + mp_copy(b, x, nwords); + + // verify results + mp_mul(x, x, aa, nwords); + mp_mod_2exp(x, w, nwords); + assert(mp_is_one(x, nwords)); +} + +void +mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords) +{ + // given a matrix ( ( a, b ), (c, d) ) of values mod 2^e + // returns the inverse matrix gamma ( (d, -b), (-c, a) ) + // where gamma is the inverse of the determinant a*d - b*c + // assumes the matrix is invertible, otherwises, inversion of determinant fails + + int p = 1; + while ((1 << p) < e) { + p++; + } + int w = (1 << (p)); + + digit_t det[nwords], tmp[nwords], resa[nwords], resb[nwords], resc[nwords], resd[nwords]; + mp_mul(tmp, r1, s2, nwords); + mp_mul(det, r2, s1, nwords); + mp_sub(det, tmp, det, nwords); + mp_inv_2e(det, det, e, nwords); + + mp_mul(resa, det, s2, nwords); + mp_mul(resb, det, r2, nwords); + mp_mul(resc, det, s1, nwords); + mp_mul(resd, det, r1, nwords); + + mp_neg(resb, nwords); + mp_neg(resc, nwords); + + mp_mod_2exp(resa, w, nwords); + mp_mod_2exp(resb, w, nwords); + mp_mod_2exp(resc, w, nwords); + mp_mod_2exp(resd, w, nwords); + + mp_copy(r1, resa, nwords); + mp_copy(r2, resb, nwords); + mp_copy(s1, resc, nwords); + mp_copy(s2, resd, nwords); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/normeq.c new file mode 100644 index 0000000000..8c133dd095 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/normeq.c @@ -0,0 +1,369 @@ +#include +#include "internal.h" + +/** @file + * + * @authors Antonin Leroux + * + * @brief Functions related to norm equation solving or special extremal orders + */ + +void +quat_lattice_O0_set(quat_lattice_t *O0) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(O0->basis[i][j]), 0); + } + } + ibz_set(&(O0->denom), 2); + ibz_set(&(O0->basis[0][0]), 2); + ibz_set(&(O0->basis[1][1]), 2); + ibz_set(&(O0->basis[2][2]), 1); + ibz_set(&(O0->basis[1][2]), 1); + ibz_set(&(O0->basis[3][3]), 1); + ibz_set(&(O0->basis[0][3]), 1); +} + +void +quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) +{ + ibz_set(&O0->z.coord[1], 1); + ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.denom, 1); + ibz_set(&O0->t.denom, 1); + O0->q = 1; + quat_lattice_O0_set(&(O0->order)); +} + +void +quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo) +{ + + // var dec + quat_alg_elem_t quat_temp; + + // var init + quat_alg_elem_init(&quat_temp); + + // elem = x + quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + + // quat_temp = i*y + quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); + + // elem = x + i*y + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = z * j + quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + + // elem = x + i* + z*j + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = t * j * i + quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); + + // elem = x + i*y + j*z + j*i*t + quat_alg_add(elem, elem, &quat_temp); + + quat_alg_elem_finalize(&quat_temp); +} + +int +quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params) +{ + + if (ibz_is_even(n_gamma)) { + return 0; + } + // var dec + int found; + ibz_t cornacchia_target; + ibz_t adjusted_n_gamma, q; + ibz_t bound, sq_bound, temp; + ibz_t test; + ibz_vec_4_t coeffs; // coeffs = [x,y,z,t] + quat_alg_elem_t quat_temp; + + if (non_diag) + assert(params->order->q % 4 == 1); + + // var init + found = 0; + ibz_init(&bound); + ibz_init(&test); + ibz_init(&temp); + ibz_init(&q); + ibz_init(&sq_bound); + ibz_vec_4_init(&coeffs); + quat_alg_elem_init(&quat_temp); + ibz_init(&adjusted_n_gamma); + ibz_init(&cornacchia_target); + + ibz_set(&q, params->order->q); + + // this could be removed in the current state + int standard_order = (params->order->q == 1); + + // adjusting the norm of gamma (multiplying by 4 to find a solution in an order of odd level) + if (non_diag || standard_order) { + ibz_mul(&adjusted_n_gamma, n_gamma, &ibz_const_two); + ibz_mul(&adjusted_n_gamma, &adjusted_n_gamma, &ibz_const_two); + } else { + ibz_copy(&adjusted_n_gamma, n_gamma); + } + // computation of the first bound = sqrt (adjust_n_gamma / p - q) + ibz_div(&sq_bound, &bound, &adjusted_n_gamma, &((params->algebra)->p)); + ibz_set(&temp, params->order->q); + ibz_sub(&sq_bound, &sq_bound, &temp); + ibz_sqrt_floor(&bound, &sq_bound); + + // the size of the search space is roughly n_gamma / (p√q) + ibz_t counter; + ibz_init(&counter); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_sqrt_floor(&temp, &temp); + ibz_div(&counter, &temp, &adjusted_n_gamma, &temp); + + // entering the main loop + while (!found && ibz_cmp(&counter, &ibz_const_zero) != 0) { + // decreasing the counter + ibz_sub(&counter, &counter, &ibz_const_one); + + // we start by sampling the first coordinate + ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + + // then, we sample the second coordinate + // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) + ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); + ibz_sub(&temp, &adjusted_n_gamma, &temp); + ibz_mul(&sq_bound, &q, &(params->algebra->p)); + ibz_div(&temp, &sq_bound, &temp, &sq_bound); + ibz_sqrt_floor(&temp, &temp); + + if (ibz_cmp(&temp, &ibz_const_zero) == 0) { + continue; + } + // sampling the second value + ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + + // compute cornacchia_target = n_gamma - p * (z² + q*t²) + ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &q, &temp); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); + ibz_sub(&cornacchia_target, &adjusted_n_gamma, &cornacchia_target); + assert(ibz_cmp(&cornacchia_target, &ibz_const_zero) > 0); + + // applying cornacchia + if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) + found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + else + found = 0; + + if (found && non_diag && standard_order) { + // check that we can divide by two at least once + // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 + // we must have x = t mod 2 and y = z mod 2 + // if q=1 we can simply swap x and y + if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { + ibz_swap(&coeffs[1], &coeffs[0]); + } + // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the + // resulting endomorphism will behave well for dim 2 computations + found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && + ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + } + if (found) { + +#ifndef NDEBUG + ibz_set(&temp, (params->order->q)); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_add(&temp, &temp, &test); + assert(0 == ibz_cmp(&temp, &cornacchia_target)); + + ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); + ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_set(&temp, (params->order->q)); + ibz_mul(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &temp, &(params->algebra->p)); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); +#endif + // translate x,y,z,t into the quaternion element gamma + quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); +#ifndef NDEBUG + quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs[0]))); + assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); + assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); +#endif + // making gamma primitive + // coeffs contains the coefficients of primitivized gamma in the basis of order + quat_alg_make_primitive(&coeffs, &temp, gamma, &((params->order)->order)); + + if (non_diag || standard_order) + found = (ibz_cmp(&temp, &ibz_const_two) == 0); + else + found = (ibz_cmp(&temp, &ibz_const_one) == 0); + } + } + + if (found) { + // new gamma + ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); + ibz_copy(&gamma->coord[0], &coeffs[0]); + ibz_copy(&gamma->coord[1], &coeffs[1]); + ibz_copy(&gamma->coord[2], &coeffs[2]); + ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->denom, &(((params->order)->order).denom)); + } + // var finalize + ibz_finalize(&counter); + ibz_finalize(&bound); + ibz_finalize(&temp); + ibz_finalize(&sq_bound); + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&quat_temp); + ibz_finalize(&adjusted_n_gamma); + ibz_finalize(&cornacchia_target); + ibz_finalize(&q); + ibz_finalize(&test); + + return found; +} + +int +quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor) +{ + + ibz_t n_temp, norm_d; + ibz_t disc; + quat_alg_elem_t gen, gen_rerand; + int found = 0; + ibz_init(&n_temp); + ibz_init(&norm_d); + ibz_init(&disc); + quat_alg_elem_init(&gen); + quat_alg_elem_init(&gen_rerand); + + // when the norm is prime we can be quite efficient + // by avoiding to run represent integer + // the first step is to generate one ideal of the correct norm + if (is_prime) { + + // we find a quaternion element of norm divisible by norm + while (!found) { + // generating a trace-zero element at random + ibz_set(&gen.coord[0], 0); + ibz_sub(&n_temp, norm, &ibz_const_one); + for (int i = 1; i < 4; i++) + ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + + // and finally the negation mod norm + ibz_neg(&disc, &n_temp); + ibz_mod(&disc, &disc, norm); + // now we check that -n is a square mod norm + // and if the square root exists we compute it + found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = found && !quat_alg_elem_is_zero(&gen); + } + } else { + assert(prime_cofactor != NULL); + // if it is not prime or we don't know if it is prime, we may just use represent integer + // and use a precomputed prime as cofactor + assert(!ibz_is_zero(norm)); + ibz_mul(&n_temp, prime_cofactor, norm); + found = quat_represent_integer(&gen, &n_temp, 0, params); + found = found && !quat_alg_elem_is_zero(&gen); + } +#ifndef NDEBUG + if (found) { + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_mod(&n_temp, &n_temp, norm); + assert(ibz_cmp(&n_temp, &ibz_const_zero) == 0); + } +#endif + + // now we just have to rerandomize the class of the ideal generated by gen + found = 0; + while (!found) { + for (int i = 0; i < 4; i++) { + ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + } + quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_gcd(&disc, &n_temp, norm); + found = ibz_is_one(&disc); + found = found && !quat_alg_elem_is_zero(&gen_rerand); + } + + quat_alg_mul(&gen, &gen, &gen_rerand, (params->algebra)); + // in both cases, whether norm is prime or not prime, + // gen is not divisible by any integer factor of the target norm + // therefore the call below will yield an ideal of the correct norm + quat_lideal_create(lideal, &gen, norm, &((params->order)->order), (params->algebra)); + assert(ibz_cmp(norm, &(lideal->norm)) == 0); + + ibz_finalize(&n_temp); + quat_alg_elem_finalize(&gen); + quat_alg_elem_finalize(&gen_rerand); + ibz_finalize(&norm_d); + ibz_finalize(&disc); + return (found); +} + +void +quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_copy(&(*vec)[2], &el->coord[2]); + ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) + ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) + ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); + ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); + ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); + + assert(ibz_divides(&(*vec)[0], &el->denom)); + assert(ibz_divides(&(*vec)[1], &el->denom)); + assert(ibz_divides(&(*vec)[2], &el->denom)); + assert(ibz_divides(&(*vec)[3], &el->denom)); + + ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); + ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); + ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); + ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_arm64crypto.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_arm64crypto.h deleted file mode 100644 index 88c4bf48d0..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_arm64crypto.h +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef RANDOMBYTES_ARM64CRYPTO_H -#define RANDOMBYTES_ARM64CRYPTO_H - -#include - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -typedef struct { - unsigned char buffer[16]; - int buffer_pos; - unsigned long length_remaining; - unsigned char key[32]; - unsigned char ctr[16]; -} AES_XOF_struct; - -typedef struct { - unsigned char Key[32]; - unsigned char V[16]; - int reseed_counter; -} AES256_CTR_DRBG_struct; - -#endif /* RANDOMBYTES_ARM64CRYPTO_H */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c deleted file mode 100644 index 3fc67acfb6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/randombytes_ctrdrbg_aesni.c +++ /dev/null @@ -1,87 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 and Unknown -// -/* -NIST-developed software is provided by NIST as a public service. You may use, -copy, and distribute copies of the software in any medium, provided that you -keep intact this entire notice. You may improve, modify, and create derivative -works of the software or any portion of the software, and you may copy and -distribute such modifications or works. Modified works should carry a notice -stating that you changed the software and should note the date and nature of any -such change. Please explicitly acknowledge the National Institute of Standards -and Technology as the source of the software. - -NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF -ANY KIND, EXPRESS, IMPLIED, IN FACT, OR ARISING BY OPERATION OF LAW, INCLUDING, -WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE, NON-INFRINGEMENT, AND DATA ACCURACY. NIST NEITHER REPRESENTS -NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR -ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE -ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, -INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR -USEFULNESS OF THE SOFTWARE. - -You are solely responsible for determining the appropriateness of using and -distributing the software and you assume all risks associated with its use, -including but not limited to the risks and costs of program errors, compliance -with applicable laws, damage to or loss of data, programs or equipment, and the -unavailability or interruption of operation. This software is not intended to be -used in any situation where a failure could cause risk of injury or damage to -property. The software developed by NIST employees is not subject to copyright -protection within the United States. -*/ - -#include - -#include -#include "ctr_drbg.h" - -#ifdef ENABLE_CT_TESTING -#include -#endif - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -CTR_DRBG_STATE drbg; - -#ifndef CTRDRBG_TEST_BENCH -static -#endif -void -randombytes_init_aes_ni(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - (void)security_strength; // fixed to 256 - CTR_DRBG_init(&drbg, entropy_input, personalization_string, - (personalization_string == NULL) ? 0 : CTR_DRBG_ENTROPY_LEN); -} - -#ifndef CTRDRBG_TEST_BENCH -static -#endif -int -randombytes_aes_ni(unsigned char *x, size_t xlen) { - CTR_DRBG_generate(&drbg, x, xlen, NULL, 0); - return RNG_SUCCESS; -} - -#ifdef RANDOMBYTES_AES_NI -SQISIGN_API -int randombytes(unsigned char *random_array, unsigned long long nbytes) { - int ret = randombytes_aes_ni(random_array, nbytes); -#ifdef ENABLE_CT_TESTING - VALGRIND_MAKE_MEM_UNDEFINED(random_array, ret); -#endif - return ret; -} - -SQISIGN_API -void randombytes_init(unsigned char *entropy_input, - unsigned char *personalization_string, - int security_strength) { - randombytes_init_aes_ni(entropy_input, personalization_string, - security_strength); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rationals.c new file mode 100644 index 0000000000..0c5387e5e8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rationals.c @@ -0,0 +1,233 @@ +#include +#include "internal.h" +#include "lll_internals.h" + +void +ibq_init(ibq_t *x) +{ + ibz_init(&((*x)[0])); + ibz_init(&((*x)[1])); + ibz_set(&((*x)[1]), 1); +} + +void +ibq_finalize(ibq_t *x) +{ + ibz_finalize(&((*x)[0])); + ibz_finalize(&((*x)[1])); +} + +void +ibq_mat_4x4_init(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_init(&(*mat)[i][j]); + } + } +} +void +ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_finalize(&(*mat)[i][j]); + } + } +} + +void +ibq_vec_4_init(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_init(&(*vec)[i]); + } +} +void +ibq_vec_4_finalize(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_finalize(&(*vec)[i]); + } +} + +void +ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j][0]), 10); + printf("/"); + ibz_print(&((*mat)[i][j][1]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibq_vec_4_print(const ibq_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i][0]), 10); + printf("/"); + ibz_print(&((*vec)[i][1]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibq_reduce(ibq_t *x) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); + ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + assert(ibz_is_zero(&r)); + ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + assert(ibz_is_zero(&r)); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +void +ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) +{ + ibz_t add, prod; + ibz_init(&add); + ibz_init(&prod); + + ibz_mul(&add, &((*a)[0]), &((*b)[1])); + ibz_mul(&prod, &((*b)[0]), &((*a)[1])); + ibz_add(&((*sum)[0]), &add, &prod); + ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_finalize(&add); + ibz_finalize(&prod); +} + +void +ibq_neg(ibq_t *neg, const ibq_t *x) +{ + ibz_copy(&((*neg)[1]), &((*x)[1])); + ibz_neg(&((*neg)[0]), &((*x)[0])); +} + +void +ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b) +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, b); + ibq_add(diff, a, &neg); + ibq_finalize(&neg); +} + +void +ibq_abs(ibq_t *abs, const ibq_t *x) // once +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, x); + if (ibq_cmp(x, &neg) < 0) + ibq_copy(abs, &neg); + else + ibq_copy(abs, x); + ibq_finalize(&neg); +} + +void +ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) +{ + ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); + ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); +} + +int +ibq_inv(ibq_t *inv, const ibq_t *x) +{ + int res = !ibq_is_zero(x); + if (res) { + ibz_copy(&((*inv)[0]), &((*x)[0])); + ibz_copy(&((*inv)[1]), &((*x)[1])); + ibz_swap(&((*inv)[1]), &((*inv)[0])); + } + return (res); +} + +int +ibq_cmp(const ibq_t *a, const ibq_t *b) +{ + ibz_t x, y; + ibz_init(&x); + ibz_init(&y); + ibz_copy(&x, &((*a)[0])); + ibz_copy(&y, &((*b)[0])); + ibz_mul(&y, &y, &((*a)[1])); + ibz_mul(&x, &x, &((*b)[1])); + if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + int res = ibz_cmp(&x, &y); + ibz_finalize(&x); + ibz_finalize(&y); + return (res); +} + +int +ibq_is_zero(const ibq_t *x) +{ + return ibz_is_zero(&((*x)[0])); +} + +int +ibq_is_one(const ibq_t *x) +{ + return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); +} + +int +ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) +{ + ibz_copy(&((*q)[0]), a); + ibz_copy(&((*q)[1]), b); + return !ibz_is_zero(b); +} + +void +ibq_copy(ibq_t *target, const ibq_t *value) // once +{ + ibz_copy(&((*target)[0]), &((*value)[0])); + ibz_copy(&((*target)[1]), &((*value)[1])); +} + +int +ibq_is_ibz(const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_mod(&r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} + +int +ibq_to_ibz(ibz_t *z, const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S deleted file mode 100644 index 2311fa9bc8..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/vaes256_key_expansion.S +++ /dev/null @@ -1,122 +0,0 @@ -#*************************************************************************** -# This implementation is a modified version of the code, -# written by Nir Drucker and Shay Gueron -# AWS Cryptographic Algorithms Group -# (ndrucker@amazon.com, gueron@amazon.com) -# -# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). -# You may not use this file except in compliance with the License. -# A copy of the License is located at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. -# The license is detailed in the file LICENSE.txt, and applies to this file. -#*************************************************************************** - -.intel_syntax noprefix -.data - -.p2align 4, 0x90 -MASK1: -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -CON1: -.long 1,1,1,1 - -.set k256_size, 32 - -#if defined(__linux__) && defined(__ELF__) -.section .note.GNU-stack,"",@progbits -#endif -.text - -################################################################################ -# void aes256_key_expansion(OUT aes256_ks_t* ks, IN const uint8_t* key); -# The output parameter must be 16 bytes aligned! -# -#Linux ABI -#define out rdi -#define in rsi - -#define CON xmm0 -#define MASK_REG xmm1 - -#define IN0 xmm2 -#define IN1 xmm3 - -#define TMP1 xmm4 -#define TMP2 xmm5 - -#define ZERO xmm15 - -.macro ROUND1 in0 in1 - add out, k256_size - vpshufb TMP2, \in1, MASK_REG - aesenclast TMP2, CON - vpslld CON, CON, 1 - vpslldq TMP1, \in0, 4 - vpxor \in0, \in0, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor \in0, \in0, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor \in0, \in0, TMP1 - vpxor \in0, \in0, TMP2 - vmovdqa [out], \in0 - -.endm - -.macro ROUND2 - vpshufd TMP2, IN0, 0xff - aesenclast TMP2, ZERO - vpslldq TMP1, IN1, 4 - vpxor IN1, IN1, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor IN1, IN1, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor IN1, IN1, TMP1 - vpxor IN1, IN1, TMP2 - vmovdqa [out+16], IN1 -.endm - -#ifdef __APPLE__ -#define AES256_KEY_EXPANSION _aes256_key_expansion -#else -#define AES256_KEY_EXPANSION aes256_key_expansion -#endif - -#ifndef __APPLE__ -.type AES256_KEY_EXPANSION,@function -.hidden AES256_KEY_EXPANSION -#endif -.globl AES256_KEY_EXPANSION -AES256_KEY_EXPANSION: - vmovdqu IN0, [in] - vmovdqu IN1, [in+16] - vmovdqa [out], IN0 - vmovdqa [out+16], IN1 - - vmovdqa CON, [rip+CON1] - vmovdqa MASK_REG, [rip+MASK1] - - vpxor ZERO, ZERO, ZERO - - mov ax, 6 -.loop256: - - ROUND1 IN0, IN1 - dec ax - ROUND2 - jne .loop256 - - ROUND1 IN0, IN1 - - ret -#ifndef __APPLE__ -.size AES256_KEY_EXPANSION, .-AES256_KEY_EXPANSION -#endif - diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes.h deleted file mode 100644 index e35ec3705b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes.h +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef AES_H -#define AES_H - -#include -#include - -void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); -#define AES_ECB_encrypt AES_256_ECB - -#ifdef ENABLE_AESNI -int AES_128_CTR_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -int AES_128_CTR_4R_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#define AES_128_CTR AES_128_CTR_NI -#else -int AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#endif - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes_c.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes_c.c deleted file mode 100644 index 5e2d7d6161..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/aes_c.c +++ /dev/null @@ -1,783 +0,0 @@ -// SPDX-License-Identifier: MIT and Apache-2.0 - -/* - * AES implementation based on code from PQClean, - * which is in turn based on BearSSL (https://bearssl.org/) - * by Thomas Pornin. - * - * - * Copyright (c) 2016 Thomas Pornin - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include - -#define AES128_KEYBYTES 16 -#define AES192_KEYBYTES 24 -#define AES256_KEYBYTES 32 -#define AESCTR_NONCEBYTES 12 -#define AES_BLOCKBYTES 16 - -#define PQC_AES128_STATESIZE 88 -typedef struct -{ - uint64_t sk_exp[PQC_AES128_STATESIZE]; -} aes128ctx; - -#define PQC_AES192_STATESIZE 104 -typedef struct -{ - uint64_t sk_exp[PQC_AES192_STATESIZE]; -} aes192ctx; - -#define PQC_AES256_STATESIZE 120 -typedef struct -{ - uint64_t sk_exp[PQC_AES256_STATESIZE]; -} aes256ctx; - -/** Initializes the context **/ -void aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key); - -void aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key); - -void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx); - -void aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx); - -/** Frees the context **/ -void aes128_ctx_release(aes128ctx *r); - -/** Initializes the context **/ -void aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key); - -void aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key); - -void aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx); - -void aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx); - -void aes192_ctx_release(aes192ctx *r); - -/** Initializes the context **/ -void aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key); - -void aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key); - -void aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx); - -void aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx); - -/** Frees the context **/ -void aes256_ctx_release(aes256ctx *r); - -static inline uint32_t -br_dec32le(const unsigned char *src) -{ - return (uint32_t)src[0] | ((uint32_t)src[1] << 8) | ((uint32_t)src[2] << 16) | - ((uint32_t)src[3] << 24); -} - -static void -br_range_dec32le(uint32_t *v, size_t num, const unsigned char *src) -{ - while (num-- > 0) { - *v++ = br_dec32le(src); - src += 4; - } -} - -static inline uint32_t -br_swap32(uint32_t x) -{ - x = ((x & (uint32_t)0x00FF00FF) << 8) | ((x >> 8) & (uint32_t)0x00FF00FF); - return (x << 16) | (x >> 16); -} - -static inline void -br_enc32le(unsigned char *dst, uint32_t x) -{ - dst[0] = (unsigned char)x; - dst[1] = (unsigned char)(x >> 8); - dst[2] = (unsigned char)(x >> 16); - dst[3] = (unsigned char)(x >> 24); -} - -static void -br_range_enc32le(unsigned char *dst, const uint32_t *v, size_t num) -{ - while (num-- > 0) { - br_enc32le(dst, *v++); - dst += 4; - } -} - -static void -br_aes_ct64_bitslice_Sbox(uint64_t *q) -{ - /* - * This S-box implementation is a straightforward translation of - * the circuit described by Boyar and Peralta in "A new - * combinational logic minimization technique with applications - * to cryptology" (https://eprint.iacr.org/2009/191.pdf). - * - * Note that variables x* (input) and s* (output) are numbered - * in "reverse" order (x0 is the high bit, x7 is the low bit). - */ - - uint64_t x0, x1, x2, x3, x4, x5, x6, x7; - uint64_t y1, y2, y3, y4, y5, y6, y7, y8, y9; - uint64_t y10, y11, y12, y13, y14, y15, y16, y17, y18, y19; - uint64_t y20, y21; - uint64_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9; - uint64_t z10, z11, z12, z13, z14, z15, z16, z17; - uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; - uint64_t t10, t11, t12, t13, t14, t15, t16, t17, t18, t19; - uint64_t t20, t21, t22, t23, t24, t25, t26, t27, t28, t29; - uint64_t t30, t31, t32, t33, t34, t35, t36, t37, t38, t39; - uint64_t t40, t41, t42, t43, t44, t45, t46, t47, t48, t49; - uint64_t t50, t51, t52, t53, t54, t55, t56, t57, t58, t59; - uint64_t t60, t61, t62, t63, t64, t65, t66, t67; - uint64_t s0, s1, s2, s3, s4, s5, s6, s7; - - x0 = q[7]; - x1 = q[6]; - x2 = q[5]; - x3 = q[4]; - x4 = q[3]; - x5 = q[2]; - x6 = q[1]; - x7 = q[0]; - - /* - * Top linear transformation. - */ - y14 = x3 ^ x5; - y13 = x0 ^ x6; - y9 = x0 ^ x3; - y8 = x0 ^ x5; - t0 = x1 ^ x2; - y1 = t0 ^ x7; - y4 = y1 ^ x3; - y12 = y13 ^ y14; - y2 = y1 ^ x0; - y5 = y1 ^ x6; - y3 = y5 ^ y8; - t1 = x4 ^ y12; - y15 = t1 ^ x5; - y20 = t1 ^ x1; - y6 = y15 ^ x7; - y10 = y15 ^ t0; - y11 = y20 ^ y9; - y7 = x7 ^ y11; - y17 = y10 ^ y11; - y19 = y10 ^ y8; - y16 = t0 ^ y11; - y21 = y13 ^ y16; - y18 = x0 ^ y16; - - /* - * Non-linear section. - */ - t2 = y12 & y15; - t3 = y3 & y6; - t4 = t3 ^ t2; - t5 = y4 & x7; - t6 = t5 ^ t2; - t7 = y13 & y16; - t8 = y5 & y1; - t9 = t8 ^ t7; - t10 = y2 & y7; - t11 = t10 ^ t7; - t12 = y9 & y11; - t13 = y14 & y17; - t14 = t13 ^ t12; - t15 = y8 & y10; - t16 = t15 ^ t12; - t17 = t4 ^ t14; - t18 = t6 ^ t16; - t19 = t9 ^ t14; - t20 = t11 ^ t16; - t21 = t17 ^ y20; - t22 = t18 ^ y19; - t23 = t19 ^ y21; - t24 = t20 ^ y18; - - t25 = t21 ^ t22; - t26 = t21 & t23; - t27 = t24 ^ t26; - t28 = t25 & t27; - t29 = t28 ^ t22; - t30 = t23 ^ t24; - t31 = t22 ^ t26; - t32 = t31 & t30; - t33 = t32 ^ t24; - t34 = t23 ^ t33; - t35 = t27 ^ t33; - t36 = t24 & t35; - t37 = t36 ^ t34; - t38 = t27 ^ t36; - t39 = t29 & t38; - t40 = t25 ^ t39; - - t41 = t40 ^ t37; - t42 = t29 ^ t33; - t43 = t29 ^ t40; - t44 = t33 ^ t37; - t45 = t42 ^ t41; - z0 = t44 & y15; - z1 = t37 & y6; - z2 = t33 & x7; - z3 = t43 & y16; - z4 = t40 & y1; - z5 = t29 & y7; - z6 = t42 & y11; - z7 = t45 & y17; - z8 = t41 & y10; - z9 = t44 & y12; - z10 = t37 & y3; - z11 = t33 & y4; - z12 = t43 & y13; - z13 = t40 & y5; - z14 = t29 & y2; - z15 = t42 & y9; - z16 = t45 & y14; - z17 = t41 & y8; - - /* - * Bottom linear transformation. - */ - t46 = z15 ^ z16; - t47 = z10 ^ z11; - t48 = z5 ^ z13; - t49 = z9 ^ z10; - t50 = z2 ^ z12; - t51 = z2 ^ z5; - t52 = z7 ^ z8; - t53 = z0 ^ z3; - t54 = z6 ^ z7; - t55 = z16 ^ z17; - t56 = z12 ^ t48; - t57 = t50 ^ t53; - t58 = z4 ^ t46; - t59 = z3 ^ t54; - t60 = t46 ^ t57; - t61 = z14 ^ t57; - t62 = t52 ^ t58; - t63 = t49 ^ t58; - t64 = z4 ^ t59; - t65 = t61 ^ t62; - t66 = z1 ^ t63; - s0 = t59 ^ t63; - s6 = t56 ^ ~t62; - s7 = t48 ^ ~t60; - t67 = t64 ^ t65; - s3 = t53 ^ t66; - s4 = t51 ^ t66; - s5 = t47 ^ t65; - s1 = t64 ^ ~s3; - s2 = t55 ^ ~t67; - - q[7] = s0; - q[6] = s1; - q[5] = s2; - q[4] = s3; - q[3] = s4; - q[2] = s5; - q[1] = s6; - q[0] = s7; -} - -static void -br_aes_ct64_ortho(uint64_t *q) -{ -#define SWAPN(cl, ch, s, x, y) \ - do { \ - uint64_t a, b; \ - a = (x); \ - b = (y); \ - (x) = (a & (uint64_t)(cl)) | ((b & (uint64_t)(cl)) << (s)); \ - (y) = ((a & (uint64_t)(ch)) >> (s)) | (b & (uint64_t)(ch)); \ - } while (0) - -#define SWAP2(x, y) SWAPN(0x5555555555555555, 0xAAAAAAAAAAAAAAAA, 1, x, y) -#define SWAP4(x, y) SWAPN(0x3333333333333333, 0xCCCCCCCCCCCCCCCC, 2, x, y) -#define SWAP8(x, y) SWAPN(0x0F0F0F0F0F0F0F0F, 0xF0F0F0F0F0F0F0F0, 4, x, y) - - SWAP2(q[0], q[1]); - SWAP2(q[2], q[3]); - SWAP2(q[4], q[5]); - SWAP2(q[6], q[7]); - - SWAP4(q[0], q[2]); - SWAP4(q[1], q[3]); - SWAP4(q[4], q[6]); - SWAP4(q[5], q[7]); - - SWAP8(q[0], q[4]); - SWAP8(q[1], q[5]); - SWAP8(q[2], q[6]); - SWAP8(q[3], q[7]); -} - -static void -br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t *w) -{ - uint64_t x0, x1, x2, x3; - - x0 = w[0]; - x1 = w[1]; - x2 = w[2]; - x3 = w[3]; - x0 |= (x0 << 16); - x1 |= (x1 << 16); - x2 |= (x2 << 16); - x3 |= (x3 << 16); - x0 &= (uint64_t)0x0000FFFF0000FFFF; - x1 &= (uint64_t)0x0000FFFF0000FFFF; - x2 &= (uint64_t)0x0000FFFF0000FFFF; - x3 &= (uint64_t)0x0000FFFF0000FFFF; - x0 |= (x0 << 8); - x1 |= (x1 << 8); - x2 |= (x2 << 8); - x3 |= (x3 << 8); - x0 &= (uint64_t)0x00FF00FF00FF00FF; - x1 &= (uint64_t)0x00FF00FF00FF00FF; - x2 &= (uint64_t)0x00FF00FF00FF00FF; - x3 &= (uint64_t)0x00FF00FF00FF00FF; - *q0 = x0 | (x2 << 8); - *q1 = x1 | (x3 << 8); -} - -static void -br_aes_ct64_interleave_out(uint32_t *w, uint64_t q0, uint64_t q1) -{ - uint64_t x0, x1, x2, x3; - - x0 = q0 & (uint64_t)0x00FF00FF00FF00FF; - x1 = q1 & (uint64_t)0x00FF00FF00FF00FF; - x2 = (q0 >> 8) & (uint64_t)0x00FF00FF00FF00FF; - x3 = (q1 >> 8) & (uint64_t)0x00FF00FF00FF00FF; - x0 |= (x0 >> 8); - x1 |= (x1 >> 8); - x2 |= (x2 >> 8); - x3 |= (x3 >> 8); - x0 &= (uint64_t)0x0000FFFF0000FFFF; - x1 &= (uint64_t)0x0000FFFF0000FFFF; - x2 &= (uint64_t)0x0000FFFF0000FFFF; - x3 &= (uint64_t)0x0000FFFF0000FFFF; - w[0] = (uint32_t)x0 | (uint32_t)(x0 >> 16); - w[1] = (uint32_t)x1 | (uint32_t)(x1 >> 16); - w[2] = (uint32_t)x2 | (uint32_t)(x2 >> 16); - w[3] = (uint32_t)x3 | (uint32_t)(x3 >> 16); -} - -static const unsigned char Rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36 }; - -static uint32_t -sub_word(uint32_t x) -{ - uint64_t q[8]; - - memset(q, 0, sizeof q); - q[0] = x; - br_aes_ct64_ortho(q); - br_aes_ct64_bitslice_Sbox(q); - br_aes_ct64_ortho(q); - return (uint32_t)q[0]; -} - -static void -br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, unsigned int key_len) -{ - unsigned int i, j, k, nk, nkf; - uint32_t tmp; - uint32_t skey[60]; - unsigned nrounds = 10 + ((key_len - 16) >> 2); - - nk = (key_len >> 2); - nkf = ((nrounds + 1) << 2); - br_range_dec32le(skey, (key_len >> 2), key); - tmp = skey[(key_len >> 2) - 1]; - for (i = nk, j = 0, k = 0; i < nkf; i++) { - if (j == 0) { - tmp = (tmp << 24) | (tmp >> 8); - tmp = sub_word(tmp) ^ Rcon[k]; - } else if (nk > 6 && j == 4) { - tmp = sub_word(tmp); - } - tmp ^= skey[i - nk]; - skey[i] = tmp; - if (++j == nk) { - j = 0; - k++; - } - } - - for (i = 0, j = 0; i < nkf; i += 4, j += 2) { - uint64_t q[8]; - - br_aes_ct64_interleave_in(&q[0], &q[4], skey + i); - q[1] = q[0]; - q[2] = q[0]; - q[3] = q[0]; - q[5] = q[4]; - q[6] = q[4]; - q[7] = q[4]; - br_aes_ct64_ortho(q); - comp_skey[j + 0] = - (q[0] & (uint64_t)0x1111111111111111) | (q[1] & (uint64_t)0x2222222222222222) | - (q[2] & (uint64_t)0x4444444444444444) | (q[3] & (uint64_t)0x8888888888888888); - comp_skey[j + 1] = - (q[4] & (uint64_t)0x1111111111111111) | (q[5] & (uint64_t)0x2222222222222222) | - (q[6] & (uint64_t)0x4444444444444444) | (q[7] & (uint64_t)0x8888888888888888); - } -} - -static void -br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, unsigned int nrounds) -{ - unsigned u, v, n; - - n = (nrounds + 1) << 1; - for (u = 0, v = 0; u < n; u++, v += 4) { - uint64_t x0, x1, x2, x3; - - x0 = x1 = x2 = x3 = comp_skey[u]; - x0 &= (uint64_t)0x1111111111111111; - x1 &= (uint64_t)0x2222222222222222; - x2 &= (uint64_t)0x4444444444444444; - x3 &= (uint64_t)0x8888888888888888; - x1 >>= 1; - x2 >>= 2; - x3 >>= 3; - skey[v + 0] = (x0 << 4) - x0; - skey[v + 1] = (x1 << 4) - x1; - skey[v + 2] = (x2 << 4) - x2; - skey[v + 3] = (x3 << 4) - x3; - } -} - -static inline void -add_round_key(uint64_t *q, const uint64_t *sk) -{ - q[0] ^= sk[0]; - q[1] ^= sk[1]; - q[2] ^= sk[2]; - q[3] ^= sk[3]; - q[4] ^= sk[4]; - q[5] ^= sk[5]; - q[6] ^= sk[6]; - q[7] ^= sk[7]; -} - -static inline void -shift_rows(uint64_t *q) -{ - int i; - - for (i = 0; i < 8; i++) { - uint64_t x; - - x = q[i]; - q[i] = - (x & (uint64_t)0x000000000000FFFF) | ((x & (uint64_t)0x00000000FFF00000) >> 4) | - ((x & (uint64_t)0x00000000000F0000) << 12) | ((x & (uint64_t)0x0000FF0000000000) >> 8) | - ((x & (uint64_t)0x000000FF00000000) << 8) | ((x & (uint64_t)0xF000000000000000) >> 12) | - ((x & (uint64_t)0x0FFF000000000000) << 4); - } -} - -static inline uint64_t -rotr32(uint64_t x) -{ - return (x << 32) | (x >> 32); -} - -static inline void -mix_columns(uint64_t *q) -{ - uint64_t q0, q1, q2, q3, q4, q5, q6, q7; - uint64_t r0, r1, r2, r3, r4, r5, r6, r7; - - q0 = q[0]; - q1 = q[1]; - q2 = q[2]; - q3 = q[3]; - q4 = q[4]; - q5 = q[5]; - q6 = q[6]; - q7 = q[7]; - r0 = (q0 >> 16) | (q0 << 48); - r1 = (q1 >> 16) | (q1 << 48); - r2 = (q2 >> 16) | (q2 << 48); - r3 = (q3 >> 16) | (q3 << 48); - r4 = (q4 >> 16) | (q4 << 48); - r5 = (q5 >> 16) | (q5 << 48); - r6 = (q6 >> 16) | (q6 << 48); - r7 = (q7 >> 16) | (q7 << 48); - - q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0); - q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1); - q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2); - q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3); - q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4); - q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5); - q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6); - q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7); -} - -static void -inc4_be(uint32_t *x) -{ - uint32_t t = br_swap32(*x) + 4; - *x = br_swap32(t); -} - -static void -aes_ecb4x(unsigned char out[64], - const uint32_t ivw[16], - const uint64_t *sk_exp, - unsigned int nrounds) -{ - uint32_t w[16]; - uint64_t q[8]; - unsigned int i; - - memcpy(w, ivw, sizeof(w)); - for (i = 0; i < 4; i++) { - br_aes_ct64_interleave_in(&q[i], &q[i + 4], w + (i << 2)); - } - br_aes_ct64_ortho(q); - - add_round_key(q, sk_exp); - for (i = 1; i < nrounds; i++) { - br_aes_ct64_bitslice_Sbox(q); - shift_rows(q); - mix_columns(q); - add_round_key(q, sk_exp + (i << 3)); - } - br_aes_ct64_bitslice_Sbox(q); - shift_rows(q); - add_round_key(q, sk_exp + 8 * nrounds); - - br_aes_ct64_ortho(q); - for (i = 0; i < 4; i++) { - br_aes_ct64_interleave_out(w + (i << 2), q[i], q[i + 4]); - } - br_range_enc32le(out, w, 16); -} - -static void -aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) -{ - aes_ecb4x(out, ivw, sk_exp, nrounds); - - /* Increase counter for next 4 blocks */ - inc4_be(ivw + 3); - inc4_be(ivw + 7); - inc4_be(ivw + 11); - inc4_be(ivw + 15); -} - -static void -aes_ecb(unsigned char *out, - const unsigned char *in, - size_t nblocks, - const uint64_t *rkeys, - unsigned int nrounds) -{ - uint32_t blocks[16]; - unsigned char t[64]; - - while (nblocks >= 4) { - br_range_dec32le(blocks, 16, in); - aes_ecb4x(out, blocks, rkeys, nrounds); - nblocks -= 4; - in += 64; - out += 64; - } - - if (nblocks) { - br_range_dec32le(blocks, nblocks * 4, in); - aes_ecb4x(t, blocks, rkeys, nrounds); - memcpy(out, t, nblocks * 16); - } -} - -static void -aes_ctr(unsigned char *out, - size_t outlen, - const unsigned char *iv, - const uint64_t *rkeys, - unsigned int nrounds) -{ - uint32_t ivw[16]; - size_t i; - uint32_t cc = 0; - - br_range_dec32le(ivw, 3, iv); - memcpy(ivw + 4, ivw, 3 * sizeof(uint32_t)); - memcpy(ivw + 8, ivw, 3 * sizeof(uint32_t)); - memcpy(ivw + 12, ivw, 3 * sizeof(uint32_t)); - ivw[3] = br_swap32(cc); - ivw[7] = br_swap32(cc + 1); - ivw[11] = br_swap32(cc + 2); - ivw[15] = br_swap32(cc + 3); - - while (outlen > 64) { - aes_ctr4x(out, ivw, rkeys, nrounds); - out += 64; - outlen -= 64; - } - if (outlen > 0) { - unsigned char tmp[64]; - aes_ctr4x(tmp, ivw, rkeys, nrounds); - for (i = 0; i < outlen; i++) { - out[i] = tmp[i]; - } - } -} - -void -aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key) -{ - uint64_t skey[22]; - - br_aes_ct64_keysched(skey, key, 16); - br_aes_ct64_skey_expand(r->sk_exp, skey, 10); -} - -void -aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key) -{ - aes128_ecb_keyexp(r, key); -} - -void -aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key) -{ - uint64_t skey[26]; - - br_aes_ct64_keysched(skey, key, 24); - br_aes_ct64_skey_expand(r->sk_exp, skey, 12); -} - -void -aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key) -{ - aes192_ecb_keyexp(r, key); -} - -void -aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key) -{ - uint64_t skey[30]; - - br_aes_ct64_keysched(skey, key, 32); - br_aes_ct64_skey_expand(r->sk_exp, skey, 14); -} - -void -aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key) -{ - aes256_ecb_keyexp(r, key); -} - -void -aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 10); -} - -void -aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 10); -} - -void -aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 12); -} - -void -aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 12); -} - -void -aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 14); -} - -void -aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 14); -} - -void -aes128_ctx_release(aes128ctx *r) -{ -} - -void -aes192_ctx_release(aes192ctx *r) -{ -} - -void -aes256_ctx_release(aes256ctx *r) -{ -} - -int -AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen) -{ - aes128ctx ctx; - const unsigned char iv[16] = { 0 }; - - aes128_ctr_keyexp(&ctx, input); - aes128_ctr(output, outputByteLen, iv, &ctx); - aes128_ctx_release(&ctx); - - return (int)outputByteLen; -} - -void -AES_256_ECB(const uint8_t *input, const unsigned char *key, unsigned char *output) -{ - aes256ctx ctx; - - aes256_ecb_keyexp(&ctx, key); - aes256_ecb(output, input, 1, &ctx); - aes256_ctx_release(&ctx); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.c deleted file mode 100644 index f2992d8c7f..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.c +++ /dev/null @@ -1,876 +0,0 @@ -// SPDX-License-Identifier: PD and Apache-2.0 - -/* FIPS202 implementation based on code from PQClean, - * which is in turn based based on the public domain implementation in - * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html - * by Ronny Van Keer - * and the public domain "TweetFips202" implementation - * from https://twitter.com/tweetfips202 - * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ - -#include -#include -#include -#include - -#include "fips202.h" - -#define NROUNDS 24 -#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) - -/************************************************* - * Name: load64 - * - * Description: Load 8 bytes into uint64_t in little-endian order - * - * Arguments: - const uint8_t *x: pointer to input byte array - * - * Returns the loaded 64-bit unsigned integer - **************************************************/ -static uint64_t load64(const uint8_t *x) { - uint64_t r = 0; - for (size_t i = 0; i < 8; ++i) { - r |= (uint64_t)x[i] << 8 * i; - } - - return r; -} - -/************************************************* - * Name: store64 - * - * Description: Store a 64-bit integer to a byte array in little-endian order - * - * Arguments: - uint8_t *x: pointer to the output byte array - * - uint64_t u: input 64-bit unsigned integer - **************************************************/ -static void store64(uint8_t *x, uint64_t u) { - for (size_t i = 0; i < 8; ++i) { - x[i] = (uint8_t) (u >> 8 * i); - } -} - -/* Keccak round constants */ -static const uint64_t KeccakF_RoundConstants[NROUNDS] = { - 0x0000000000000001ULL, 0x0000000000008082ULL, - 0x800000000000808aULL, 0x8000000080008000ULL, - 0x000000000000808bULL, 0x0000000080000001ULL, - 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x000000000000008aULL, 0x0000000000000088ULL, - 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, - 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, - 0x000000000000800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, - 0x0000000080000001ULL, 0x8000000080008008ULL -}; - -/************************************************* - * Name: KeccakF1600_StatePermute - * - * Description: The Keccak F1600 Permutation - * - * Arguments: - uint64_t *state: pointer to input/output Keccak state - **************************************************/ -static void KeccakF1600_StatePermute(uint64_t *state) { - int round; - - uint64_t Aba, Abe, Abi, Abo, Abu; - uint64_t Aga, Age, Agi, Ago, Agu; - uint64_t Aka, Ake, Aki, Ako, Aku; - uint64_t Ama, Ame, Ami, Amo, Amu; - uint64_t Asa, Ase, Asi, Aso, Asu; - uint64_t BCa, BCe, BCi, BCo, BCu; - uint64_t Da, De, Di, Do, Du; - uint64_t Eba, Ebe, Ebi, Ebo, Ebu; - uint64_t Ega, Ege, Egi, Ego, Egu; - uint64_t Eka, Eke, Eki, Eko, Eku; - uint64_t Ema, Eme, Emi, Emo, Emu; - uint64_t Esa, Ese, Esi, Eso, Esu; - - // copyFromState(A, state) - Aba = state[0]; - Abe = state[1]; - Abi = state[2]; - Abo = state[3]; - Abu = state[4]; - Aga = state[5]; - Age = state[6]; - Agi = state[7]; - Ago = state[8]; - Agu = state[9]; - Aka = state[10]; - Ake = state[11]; - Aki = state[12]; - Ako = state[13]; - Aku = state[14]; - Ama = state[15]; - Ame = state[16]; - Ami = state[17]; - Amo = state[18]; - Amu = state[19]; - Asa = state[20]; - Ase = state[21]; - Asi = state[22]; - Aso = state[23]; - Asu = state[24]; - - for (round = 0; round < NROUNDS; round += 2) { - // prepareTheta - BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; - BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; - BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; - BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; - BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; - - // thetaRhoPiChiIotaPrepareTheta(round , A, E) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Aba ^= Da; - BCa = Aba; - Age ^= De; - BCe = ROL(Age, 44); - Aki ^= Di; - BCi = ROL(Aki, 43); - Amo ^= Do; - BCo = ROL(Amo, 21); - Asu ^= Du; - BCu = ROL(Asu, 14); - Eba = BCa ^ ((~BCe) & BCi); - Eba ^= KeccakF_RoundConstants[round]; - Ebe = BCe ^ ((~BCi) & BCo); - Ebi = BCi ^ ((~BCo) & BCu); - Ebo = BCo ^ ((~BCu) & BCa); - Ebu = BCu ^ ((~BCa) & BCe); - - Abo ^= Do; - BCa = ROL(Abo, 28); - Agu ^= Du; - BCe = ROL(Agu, 20); - Aka ^= Da; - BCi = ROL(Aka, 3); - Ame ^= De; - BCo = ROL(Ame, 45); - Asi ^= Di; - BCu = ROL(Asi, 61); - Ega = BCa ^ ((~BCe) & BCi); - Ege = BCe ^ ((~BCi) & BCo); - Egi = BCi ^ ((~BCo) & BCu); - Ego = BCo ^ ((~BCu) & BCa); - Egu = BCu ^ ((~BCa) & BCe); - - Abe ^= De; - BCa = ROL(Abe, 1); - Agi ^= Di; - BCe = ROL(Agi, 6); - Ako ^= Do; - BCi = ROL(Ako, 25); - Amu ^= Du; - BCo = ROL(Amu, 8); - Asa ^= Da; - BCu = ROL(Asa, 18); - Eka = BCa ^ ((~BCe) & BCi); - Eke = BCe ^ ((~BCi) & BCo); - Eki = BCi ^ ((~BCo) & BCu); - Eko = BCo ^ ((~BCu) & BCa); - Eku = BCu ^ ((~BCa) & BCe); - - Abu ^= Du; - BCa = ROL(Abu, 27); - Aga ^= Da; - BCe = ROL(Aga, 36); - Ake ^= De; - BCi = ROL(Ake, 10); - Ami ^= Di; - BCo = ROL(Ami, 15); - Aso ^= Do; - BCu = ROL(Aso, 56); - Ema = BCa ^ ((~BCe) & BCi); - Eme = BCe ^ ((~BCi) & BCo); - Emi = BCi ^ ((~BCo) & BCu); - Emo = BCo ^ ((~BCu) & BCa); - Emu = BCu ^ ((~BCa) & BCe); - - Abi ^= Di; - BCa = ROL(Abi, 62); - Ago ^= Do; - BCe = ROL(Ago, 55); - Aku ^= Du; - BCi = ROL(Aku, 39); - Ama ^= Da; - BCo = ROL(Ama, 41); - Ase ^= De; - BCu = ROL(Ase, 2); - Esa = BCa ^ ((~BCe) & BCi); - Ese = BCe ^ ((~BCi) & BCo); - Esi = BCi ^ ((~BCo) & BCu); - Eso = BCo ^ ((~BCu) & BCa); - Esu = BCu ^ ((~BCa) & BCe); - - // prepareTheta - BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; - BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; - BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; - BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; - BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; - - // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Eba ^= Da; - BCa = Eba; - Ege ^= De; - BCe = ROL(Ege, 44); - Eki ^= Di; - BCi = ROL(Eki, 43); - Emo ^= Do; - BCo = ROL(Emo, 21); - Esu ^= Du; - BCu = ROL(Esu, 14); - Aba = BCa ^ ((~BCe) & BCi); - Aba ^= KeccakF_RoundConstants[round + 1]; - Abe = BCe ^ ((~BCi) & BCo); - Abi = BCi ^ ((~BCo) & BCu); - Abo = BCo ^ ((~BCu) & BCa); - Abu = BCu ^ ((~BCa) & BCe); - - Ebo ^= Do; - BCa = ROL(Ebo, 28); - Egu ^= Du; - BCe = ROL(Egu, 20); - Eka ^= Da; - BCi = ROL(Eka, 3); - Eme ^= De; - BCo = ROL(Eme, 45); - Esi ^= Di; - BCu = ROL(Esi, 61); - Aga = BCa ^ ((~BCe) & BCi); - Age = BCe ^ ((~BCi) & BCo); - Agi = BCi ^ ((~BCo) & BCu); - Ago = BCo ^ ((~BCu) & BCa); - Agu = BCu ^ ((~BCa) & BCe); - - Ebe ^= De; - BCa = ROL(Ebe, 1); - Egi ^= Di; - BCe = ROL(Egi, 6); - Eko ^= Do; - BCi = ROL(Eko, 25); - Emu ^= Du; - BCo = ROL(Emu, 8); - Esa ^= Da; - BCu = ROL(Esa, 18); - Aka = BCa ^ ((~BCe) & BCi); - Ake = BCe ^ ((~BCi) & BCo); - Aki = BCi ^ ((~BCo) & BCu); - Ako = BCo ^ ((~BCu) & BCa); - Aku = BCu ^ ((~BCa) & BCe); - - Ebu ^= Du; - BCa = ROL(Ebu, 27); - Ega ^= Da; - BCe = ROL(Ega, 36); - Eke ^= De; - BCi = ROL(Eke, 10); - Emi ^= Di; - BCo = ROL(Emi, 15); - Eso ^= Do; - BCu = ROL(Eso, 56); - Ama = BCa ^ ((~BCe) & BCi); - Ame = BCe ^ ((~BCi) & BCo); - Ami = BCi ^ ((~BCo) & BCu); - Amo = BCo ^ ((~BCu) & BCa); - Amu = BCu ^ ((~BCa) & BCe); - - Ebi ^= Di; - BCa = ROL(Ebi, 62); - Ego ^= Do; - BCe = ROL(Ego, 55); - Eku ^= Du; - BCi = ROL(Eku, 39); - Ema ^= Da; - BCo = ROL(Ema, 41); - Ese ^= De; - BCu = ROL(Ese, 2); - Asa = BCa ^ ((~BCe) & BCi); - Ase = BCe ^ ((~BCi) & BCo); - Asi = BCi ^ ((~BCo) & BCu); - Aso = BCo ^ ((~BCu) & BCa); - Asu = BCu ^ ((~BCa) & BCe); - } - - // copyToState(state, A) - state[0] = Aba; - state[1] = Abe; - state[2] = Abi; - state[3] = Abo; - state[4] = Abu; - state[5] = Aga; - state[6] = Age; - state[7] = Agi; - state[8] = Ago; - state[9] = Agu; - state[10] = Aka; - state[11] = Ake; - state[12] = Aki; - state[13] = Ako; - state[14] = Aku; - state[15] = Ama; - state[16] = Ame; - state[17] = Ami; - state[18] = Amo; - state[19] = Amu; - state[20] = Asa; - state[21] = Ase; - state[22] = Asi; - state[23] = Aso; - state[24] = Asu; -} - -/************************************************* - * Name: keccak_absorb - * - * Description: Absorb step of Keccak; - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, - size_t mlen, uint8_t p) { - size_t i; - uint8_t t[200]; - - /* Zero state */ - for (i = 0; i < 25; ++i) { - s[i] = 0; - } - - while (mlen >= r) { - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(m + 8 * i); - } - - KeccakF1600_StatePermute(s); - mlen -= r; - m += r; - } - - for (i = 0; i < r; ++i) { - t[i] = 0; - } - for (i = 0; i < mlen; ++i) { - t[i] = m[i]; - } - t[i] = p; - t[r - 1] |= 128; - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(t + 8 * i); - } -} - -/************************************************* - * Name: keccak_squeezeblocks - * - * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. - * Modifies the state. Can be called multiple times to keep - * squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *h: pointer to output blocks - * - size_t nblocks: number of blocks to be - * squeezed (written to h) - * - uint64_t *s: pointer to input/output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, - uint64_t *s, uint32_t r) { - while (nblocks > 0) { - KeccakF1600_StatePermute(s); - for (size_t i = 0; i < (r >> 3); i++) { - store64(h + 8 * i, s[i]); - } - h += r; - nblocks--; - } -} - -/************************************************* - * Name: keccak_inc_init - * - * Description: Initializes the incremental Keccak state to zero. - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - **************************************************/ -static void keccak_inc_init(uint64_t *s_inc) { - size_t i; - - for (i = 0; i < 25; ++i) { - s_inc[i] = 0; - } - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_absorb - * - * Description: Incremental keccak absorb - * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - **************************************************/ -static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, - size_t mlen) { - size_t i; - - /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ - while (mlen + s_inc[25] >= r) { - for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { - /* Take the i'th byte from message - xor with the s_inc[25] + i'th byte of the state; little-endian */ - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - mlen -= (size_t)(r - s_inc[25]); - m += r - s_inc[25]; - s_inc[25] = 0; - - KeccakF1600_StatePermute(s_inc); - } - - for (i = 0; i < mlen; i++) { - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - s_inc[25] += mlen; -} - -/************************************************* - * Name: keccak_inc_finalize - * - * Description: Finalizes Keccak absorb phase, prepares for squeezing - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { - /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, - so we can always use one more byte for p in the current state. */ - s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); - s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_squeeze - * - * Description: Incremental Keccak squeeze; can be called on byte-level - * - * Arguments: - uint8_t *h: pointer to output bytes - * - size_t outlen: number of bytes to be squeezed - * - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_inc_squeeze(uint8_t *h, size_t outlen, - uint64_t *s_inc, uint32_t r) { - size_t i; - - /* First consume any bytes we still have sitting around */ - for (i = 0; i < outlen && i < s_inc[25]; i++) { - /* There are s_inc[25] bytes left, so r - s_inc[25] is the first - available byte. We consume from there, i.e., up to r. */ - h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] -= i; - - /* Then squeeze the remaining necessary blocks */ - while (outlen > 0) { - KeccakF1600_StatePermute(s_inc); - - for (i = 0; i < outlen && i < r; i++) { - h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] = r - i; - } -} - -void shake128_inc_init(shake128incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); -} - -void shake128_inc_finalize(shake128incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); -} - -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); -} - -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake128_inc_ctx_release(shake128incctx *state) { - (void)state; -} - -void shake256_inc_init(shake256incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); -} - -void shake256_inc_finalize(shake256incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); -} - -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); -} - -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake256_inc_ctx_release(shake256incctx *state) { - (void)state; -} - - -/************************************************* - * Name: shake128_absorb - * - * Description: Absorb step of the SHAKE128 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake128_squeezeblocks - * - * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of - * SHAKE128_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake128ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); -} - -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake128_ctx_release(shake128ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake256_absorb - * - * Description: Absorb step of the SHAKE256 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake256_squeezeblocks - * - * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of - * SHAKE256_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake256ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); -} - -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake256_ctx_release(shake256ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake128 - * - * Description: SHAKE128 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE128_RATE; - uint8_t t[SHAKE128_RATE]; - shake128ctx s; - - shake128_absorb(&s, input, inlen); - shake128_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE128_RATE; - outlen -= nblocks * SHAKE128_RATE; - - if (outlen) { - shake128_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake128_ctx_release(&s); -} - -/************************************************* - * Name: shake256 - * - * Description: SHAKE256 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE256_RATE; - uint8_t t[SHAKE256_RATE]; - shake256ctx s; - - shake256_absorb(&s, input, inlen); - shake256_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE256_RATE; - outlen -= nblocks * SHAKE256_RATE; - - if (outlen) { - shake256_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake256_ctx_release(&s); -} - -void sha3_256_inc_init(sha3_256incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_256_inc_ctx_release(sha3_256incctx *state) { - (void)state; -} - -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); -} - -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { - uint8_t t[SHA3_256_RATE]; - keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); - - sha3_256_inc_ctx_release(state); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_256 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_256_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -void sha3_384_inc_init(sha3_384incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); -} - -void sha3_384_inc_ctx_release(sha3_384incctx *state) { - (void)state; -} - -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { - uint8_t t[SHA3_384_RATE]; - keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); - - sha3_384_inc_ctx_release(state); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_384 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_384_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -void sha3_512_inc_init(sha3_512incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); -} - -void sha3_512_inc_ctx_release(sha3_512incctx *state) { - (void)state; -} - -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { - uint8_t t[SHA3_512_RATE]; - keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); - - sha3_512_inc_ctx_release(state); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_512 - * - * Description: SHA3-512 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_512_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h index c29ebd8f9d..21bc0c3f79 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h @@ -3,169 +3,12 @@ #ifndef FIPS202_H #define FIPS202_H -#include -#include +#include -#define SHAKE128_RATE 168 -#define SHAKE256_RATE 136 -#define SHA3_256_RATE 136 -#define SHA3_384_RATE 104 -#define SHA3_512_RATE 72 - -#define PQC_SHAKEINCCTX_U64WORDS 26 -#define PQC_SHAKECTX_U64WORDS 25 - -#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) -#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake128incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake128ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake256incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake256ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_256incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_384incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_512incctx; - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); -/* Free the state */ -void shake128_ctx_release(shake128ctx *state); -/* Copy the state. */ -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); - -/* Initialize incremental hashing API */ -void shake128_inc_init(shake128incctx *state); -/* Absorb more information into the XOF. - * - * Can be called multiple times. - */ -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); -/* Finalize the XOF for squeezing */ -void shake128_inc_finalize(shake128incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); -/* Copy the context of the SHAKE128 XOF */ -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); -/* Free the context of the SHAKE128 XOF */ -void shake128_inc_ctx_release(shake128incctx *state); - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); -/* Free the context held by this XOF */ -void shake256_ctx_release(shake256ctx *state); -/* Copy the context held by this XOF */ -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); - -/* Initialize incremental hashing API */ -void shake256_inc_init(shake256incctx *state); -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); -/* Prepares for squeeze phase */ -void shake256_inc_finalize(shake256incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); -/* Copy the state */ -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); -/* Free the state */ -void shake256_inc_ctx_release(shake256incctx *state); - -/* One-stop SHAKE128 call */ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* One-stop SHAKE256 call */ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_256_inc_init(sha3_256incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); -/* Copy the context */ -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_256_inc_ctx_release(sha3_256incctx *state); - -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_384_inc_init(sha3_384incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); -/* Copy the context */ -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_384_inc_ctx_release(sha3_384incctx *state); - -/* One-stop SHA3-384 shop */ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_512_inc_init(sha3_512incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); -/* Copy the context */ -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_512_inc_ctx_release(sha3_512incctx *state); - -/* One-stop SHA3-512 shop */ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); +#define shake256incctx OQS_SHA3_shake256_inc_ctx +#define shake256_inc_init OQS_SHA3_shake256_inc_init +#define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb +#define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize +#define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes.h deleted file mode 100644 index e35ec3705b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes.h +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef AES_H -#define AES_H - -#include -#include - -void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); -#define AES_ECB_encrypt AES_256_ECB - -#ifdef ENABLE_AESNI -int AES_128_CTR_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -int AES_128_CTR_4R_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#define AES_128_CTR AES_128_CTR_NI -#else -int AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#endif - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.c deleted file mode 100644 index dc778fc9b6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.c +++ /dev/null @@ -1,258 +0,0 @@ -/*************************************************************************** -* This implementation is a modified version of the code, -* written by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* -* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"). -* You may not use this file except in compliance with the License. -* A copy of the License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* or in the "license" file accompanying this file. This file is distributed -* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -* express or implied. See the License for the specific language governing -* permissions and limitations under the License. -* The license is detailed in the file LICENSE.txt, and applies to this file. -* ***************************************************************************/ - -#include "aes_ni.h" -#include - -#include -#include - -#define AESENC(m, key) _mm_aesenc_si128(m, key) -#define AESENCLAST(m, key) _mm_aesenclast_si128(m, key) -#define XOR(a, b) _mm_xor_si128(a, b) -#define ADD32(a, b) _mm_add_epi32(a, b) -#define SHUF8(a, mask) _mm_shuffle_epi8(a, mask) - -#define ZERO256 _mm256_zeroall - -#define BSWAP_MASK 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f - -#ifdef VAES256 -#define VAESENC(a, key) _mm256_aesenc_epi128(a, key) -#define VAESENCLAST(a, key) _mm256_aesenclast_epi128(a, key) -#define EXTRACT128(a, imm) _mm256_extracti128_si256(a, imm) -#define XOR256(a, b) _mm256_xor_si256(a,b) -#define ADD32_256(a, b) _mm256_add_epi32(a,b) -#define SHUF8_256(a, mask) _mm256_shuffle_epi8(a, mask) -#endif - -#ifdef VAES512 -#define VAESENC(a, key) _mm512_aesenc_epi128(a, key) -#define VAESENCLAST(a, key) _mm512_aesenclast_epi128(a, key) -#define EXTRACT128(a, imm) _mm512_extracti64x2_epi64(a, imm) -#define XOR512(a, b) _mm512_xor_si512(a,b) -#define ADD32_512(a, b) _mm512_add_epi32(a,b) -#define SHUF8_512(a, mask) _mm512_shuffle_epi8(a, mask) -#endif - -_INLINE_ __m128i load_m128i(IN const uint8_t *ctr) -{ - return _mm_set_epi8(ctr[0], ctr[1], ctr[2], ctr[3], - ctr[4], ctr[5], ctr[6], ctr[7], - ctr[8], ctr[9], ctr[10], ctr[11], - ctr[12], ctr[13], ctr[14], ctr[15]); -} - -_INLINE_ __m128i loadr_m128i(IN const uint8_t *ctr) -{ - return _mm_setr_epi8(ctr[0], ctr[1], ctr[2], ctr[3], - ctr[4], ctr[5], ctr[6], ctr[7], - ctr[8], ctr[9], ctr[10], ctr[11], - ctr[12], ctr[13], ctr[14], ctr[15]); -} - -void aes256_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const aes256_ks_t *ks) { - uint32_t i = 0; - __m128i block = loadr_m128i(pt); - - block = XOR(block, ks->keys[0]); - for (i = 1; i < AES256_ROUNDS; i++) { - block = AESENC(block, ks->keys[i]); - } - block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); - - _mm_storeu_si128((void*)ct, block); - - // Delete secrets from registers if any. - ZERO256(); -} - -void aes256_ctr_enc(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - __m128i ctr_block = load_m128i(ctr); - - const __m128i bswap_mask = _mm_set_epi32(BSWAP_MASK); - const __m128i one = _mm_set_epi32(0,0,0,1); - - __m128i block = SHUF8(ctr_block, bswap_mask); - - for (uint32_t bidx = 0; bidx < num_blocks; bidx++) - { - block = XOR(block, ks->keys[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) { - block = AESENC(block, ks->keys[i]); - } - block = AESENCLAST(block, ks->keys[AES256_ROUNDS]); - - //We use memcpy to avoid align casting. - _mm_storeu_si128((void*)&ct[16*bidx], block); - - ctr_block = ADD32(ctr_block, one); - block = SHUF8(ctr_block, bswap_mask); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#ifdef VAES256 -_INLINE_ void load_ks(OUT __m256i ks256[AES256_ROUNDS + 1], - IN const aes256_ks_t *ks) -{ - for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) - { - ks256[i] = _mm256_broadcastsi128_si256(ks->keys[i]); - } -} - -// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that -// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 -// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 -// Here num_blocks is assumed to be less then 2^32. -// It is the caller responsiblity to ensure it. -void aes256_ctr_enc256(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - const uint64_t num_par_blocks = num_blocks/2; - const uint64_t blocks_rem = num_blocks - (2*(num_par_blocks)); - - __m256i ks256[AES256_ROUNDS + 1]; - load_ks(ks256, ks); - - __m128i single_block = load_m128i(ctr); - __m256i ctr_blocks = _mm256_broadcastsi128_si256(single_block); - - // Preparing the masks - const __m256i bswap_mask = _mm256_set_epi32(BSWAP_MASK, BSWAP_MASK); - const __m256i two = _mm256_set_epi32(0,0,0,2,0,0,0,2); - const __m256i init = _mm256_set_epi32(0,0,0,1,0,0,0,0); - - // Initialize two parallel counters - ctr_blocks = ADD32_256(ctr_blocks, init); - __m256i p = SHUF8_256(ctr_blocks, bswap_mask); - - for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) - { - p = XOR256(p, ks256[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) - { - p = VAESENC(p, ks256[i]); - } - p = VAESENCLAST(p, ks256[AES256_ROUNDS]); - - // We use memcpy to avoid align casting. - _mm256_storeu_si256((__m256i *)&ct[PAR_AES_BLOCK_SIZE * block_idx], p); - - // Increase the two counters in parallel - ctr_blocks = ADD32_256(ctr_blocks, two); - p = SHUF8_256(ctr_blocks, bswap_mask); - } - - if(0 != blocks_rem) - { - single_block = EXTRACT128(p, 0); - aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], - (const uint8_t*)&single_block, blocks_rem, ks); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#endif //VAES256 - -#ifdef VAES512 - -_INLINE_ void load_ks(OUT __m512i ks512[AES256_ROUNDS + 1], - IN const aes256_ks_t *ks) -{ - for(uint32_t i = 0; i < AES256_ROUNDS + 1; i++) - { - ks512[i] = _mm512_broadcast_i32x4(ks->keys[i]); - } -} - -// NIST 800-90A Table 3, Section 10.2.1 (no derivation function) states that -// max_number_of_bits_per_request is min((2^ctr_len - 4) x block_len, 2^19) <= 2^19 -// Therefore the maximal number of blocks (16 bytes) is 2^19/128 = 2^19/2^7 = 2^12 < 2^32 -// Here num_blocks is assumed to be less then 2^32. -// It is the caller responsiblity to ensure it. -void aes256_ctr_enc512(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks) -{ - const uint64_t num_par_blocks = num_blocks/4; - const uint64_t blocks_rem = num_blocks - (4*(num_par_blocks)); - - __m512i ks512[AES256_ROUNDS + 1]; - load_ks(ks512, ks); - - __m128i single_block = load_m128i(ctr); - __m512i ctr_blocks = _mm512_broadcast_i32x4(single_block); - - // Preparing the masks - const __m512i bswap_mask = _mm512_set_epi32(BSWAP_MASK, BSWAP_MASK, - BSWAP_MASK, BSWAP_MASK); - const __m512i four = _mm512_set_epi32(0,0,0,4,0,0,0,4,0,0,0,4,0,0,0,4); - const __m512i init = _mm512_set_epi32(0,0,0,3,0,0,0,2,0,0,0,1,0,0,0,0); - - // Initialize four parallel counters - ctr_blocks = ADD32_512(ctr_blocks, init); - __m512i p = SHUF8_512(ctr_blocks, bswap_mask); - - for (uint32_t block_idx = 0; block_idx < num_par_blocks; block_idx++) - { - p = XOR512(p, ks512[0]); - for (uint32_t i = 1; i < AES256_ROUNDS; i++) - { - p = VAESENC(p, ks512[i]); - } - p = VAESENCLAST(p, ks512[AES256_ROUNDS]); - - - // We use memcpy to avoid align casting. - _mm512_storeu_si512(&ct[PAR_AES_BLOCK_SIZE * block_idx], p); - - // Increase the four counters in parallel - ctr_blocks = ADD32_512(ctr_blocks, four); - p = SHUF8_512(ctr_blocks, bswap_mask); - } - - if(0 != blocks_rem) - { - single_block = EXTRACT128(p, 0); - aes256_ctr_enc(&ct[PAR_AES_BLOCK_SIZE * num_par_blocks], - (const uint8_t*)&single_block, blocks_rem, ks); - } - - // Delete secrets from registers if any. - ZERO256(); -} - -#endif //VAES512 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.h deleted file mode 100644 index 3d2b21ecf5..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/aes_ni.h +++ /dev/null @@ -1,85 +0,0 @@ -/*************************************************************************** -* Written by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* -* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"). -* You may not use this file except in compliance with the License. -* A copy of the License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* or in the "license" file accompanying this file. This file is distributed -* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -* express or implied. See the License for the specific language governing -* permissions and limitations under the License. -* The license is detailed in the file LICENSE.txt, and applies to this file. -* ***************************************************************************/ - -#pragma once - -#include -#include -#include "defs.h" - -#define MAX_AES_INVOKATION (MASK(32)) - -#define AES256_KEY_SIZE (32ULL) -#define AES256_KEY_BITS (AES256_KEY_SIZE * 8) -#define AES_BLOCK_SIZE (16ULL) -#define AES256_ROUNDS (14ULL) - -#ifdef VAES256 -#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*2) -#elif defined(VAES512) -#define PAR_AES_BLOCK_SIZE (AES_BLOCK_SIZE*4) -#endif - -typedef ALIGN(16) struct aes256_key_s { - uint8_t raw[AES256_KEY_SIZE]; -} aes256_key_t; - -typedef ALIGN(16) struct aes256_ks_s { - __m128i keys[AES256_ROUNDS + 1]; -} aes256_ks_t; - -// The ks parameter must be 16 bytes aligned! -EXTERNC void aes256_key_expansion(OUT aes256_ks_t *ks, - IN const aes256_key_t *key); - -// Encrypt one 128-bit block ct = E(pt,ks) -void aes256_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc(OUT uint8_t *ct, - IN const uint8_t *pt, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks using VAES (AVX-2) -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc256(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); - -// Encrypt num_blocks 128-bit blocks using VAES (AVX512) -// ct[15:0] = E(pt[15:0],ks) -// ct[31:16] = E(pt[15:0] + 1,ks) -// ... -// ct[16*num_blocks - 1:16*(num_blocks-1)] = E(pt[15:0] + num_blocks,ks) -void aes256_ctr_enc512(OUT uint8_t *ct, - IN const uint8_t *ctr, - IN const uint32_t num_blocks, - IN const aes256_ks_t *ks); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c new file mode 100644 index 0000000000..50629f9fec --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c @@ -0,0 +1,280 @@ +#include +#include "internal.h" + +// Internal helper functions + +void +quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) +{ + ibz_t bp; + ibz_init(&bp); + ibz_set(&bp, p); + quat_alg_init_set(alg, &bp); + ibz_finalize(&bp); +} + +void +quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg) +{ + ibz_t prod; + ibz_vec_4_t sum; + ibz_init(&prod); + ibz_vec_4_init(&sum); + + ibz_set(&(sum[0]), 0); + ibz_set(&(sum[1]), 0); + ibz_set(&(sum[2]), 0); + ibz_set(&(sum[3]), 0); + + // compute 1 coordinate + ibz_mul(&prod, &((*a)[2]), &((*b)[2])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[3])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[0])); + ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[1])); + ibz_sub(&(sum[0]), &(sum[0]), &prod); + // compute i coordiante + ibz_mul(&prod, &((*a)[2]), &((*b)[3])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[2])); + ibz_sub(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); + ibz_mul(&prod, &((*a)[0]), &((*b)[1])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[0])); + ibz_add(&(sum[1]), &(sum[1]), &prod); + // compute j coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[2])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[0])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[3])); + ibz_sub(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[1])); + ibz_add(&(sum[2]), &(sum[2]), &prod); + // compute ij coordiante + ibz_mul(&prod, &((*a)[0]), &((*b)[3])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[3]), &((*b)[0])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[2]), &((*b)[1])); + ibz_sub(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &((*a)[1]), &((*b)[2])); + ibz_add(&(sum[3]), &(sum[3]), &prod); + + ibz_copy(&((*res)[0]), &(sum[0])); + ibz_copy(&((*res)[1]), &(sum[1])); + ibz_copy(&((*res)[2]), &(sum[2])); + ibz_copy(&((*res)[3]), &(sum[3])); + + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &(a->denom), &(b->denom)); + // temporarily set res_a.denom to a.denom/gcd, and res_b.denom to b.denom/gcd + ibz_div(&(res_a->denom), &r, &(a->denom), &gcd); + ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); + for (int i = 0; i < 4; i++) { + // multiply coordiates by reduced denominators from the other element + ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + } + // multiply both reduced denominators + ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); + // multiply them by the gcd to get the new common denominator + ibz_mul(&(res_b->denom), &(res_a->denom), &gcd); + ibz_mul(&(res_a->denom), &(res_a->denom), &gcd); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +// Public Functions + +void +quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then add + ibz_copy(&(res->denom), &(res_a.denom)); + ibz_vec_4_add(&(res->coord), &(res_a.coord), &(res_b.coord)); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t res_a, res_b; + quat_alg_elem_init(&res_a); + quat_alg_elem_init(&res_b); + // put both on the same denominator + quat_alg_equal_denom(&res_a, &res_b, a, b); + // then substract + ibz_copy(&res->denom, &res_a.denom); + ibz_vec_4_sub(&res->coord, &res_a.coord, &res_b.coord); + quat_alg_elem_finalize(&res_a); + quat_alg_elem_finalize(&res_b); +} + +void +quat_alg_mul(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b, const quat_alg_t *alg) +{ + // denominator: product of denominators + ibz_mul(&(res->denom), &(a->denom), &(b->denom)); + quat_alg_coord_mul(&(res->coord), &(a->coord), &(b->coord), alg); +} + +void +quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_t *alg) +{ + ibz_t r, g; + quat_alg_elem_t norm; + ibz_init(&r); + ibz_init(&g); + quat_alg_elem_init(&norm); + + quat_alg_conj(&norm, a); + quat_alg_mul(&norm, a, &norm, alg); + ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_div(res_denom, &r, &(norm.denom), &g); + ibz_abs(res_denom, res_denom); + ibz_abs(res_num, res_num); + assert(ibz_cmp(res_denom, &ibz_const_zero) > 0); + + quat_alg_elem_finalize(&norm); + ibz_finalize(&r); + ibz_finalize(&g); +} + +void +quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) +{ + ibz_copy(&(elem->denom), denominator); + ibz_copy(&(elem->coord[0]), numerator); + ibz_set(&(elem->coord[1]), 0); + ibz_set(&(elem->coord[2]), 0); + ibz_set(&(elem->coord[3]), 0); +} + +void +quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) +{ + ibz_copy(&(conj->denom), &(x->denom)); + ibz_copy(&(conj->coord[0]), &(x->coord[0])); + ibz_neg(&(conj->coord[1]), &(x->coord[1])); + ibz_neg(&(conj->coord[2]), &(x->coord[2])); + ibz_neg(&(conj->coord[3]), &(x->coord[3])); +} + +void +quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg_elem_t *x, const quat_lattice_t *order) +{ + int ok UNUSED = quat_lattice_contains(primitive_x, order, x); + assert(ok); + ibz_vec_4_content(content, primitive_x); + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + } + ibz_finalize(&r); +} + +void +quat_alg_normalize(quat_alg_elem_t *x) +{ + ibz_t gcd, sign, r; + ibz_init(&gcd); + ibz_init(&sign); + ibz_init(&r); + ibz_vec_4_content(&gcd, &(x->coord)); + ibz_gcd(&gcd, &gcd, &(x->denom)); + ibz_div(&(x->denom), &r, &(x->denom), &gcd); + ibz_vec_4_scalar_div(&(x->coord), &gcd, &(x->coord)); + ibz_set(&sign, 2 * (0 > ibz_cmp(&ibz_const_zero, &(x->denom))) - 1); + ibz_vec_4_scalar_mul(&(x->coord), &sign, &(x->coord)); + ibz_mul(&(x->denom), &sign, &(x->denom)); + ibz_finalize(&gcd); + ibz_finalize(&sign); + ibz_finalize(&r); +} + +int +quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b) +{ + quat_alg_elem_t diff; + quat_alg_elem_init(&diff); + quat_alg_sub(&diff, a, b); + int res = quat_alg_elem_is_zero(&diff); + quat_alg_elem_finalize(&diff); + return (res); +} + +int +quat_alg_elem_is_zero(const quat_alg_elem_t *x) +{ + int res = ibz_vec_4_is_zero(&(x->coord)); + return (res); +} + +void +quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&(elem->coord[0]), coord0); + ibz_set(&(elem->coord[1]), coord1); + ibz_set(&(elem->coord[2]), coord2); + ibz_set(&(elem->coord[3]), coord3); + + ibz_set(&(elem->denom), denom); +} + +void +quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) +{ + ibz_copy(©->denom, &copied->denom); + ibz_copy(©->coord[0], &copied->coord[0]); + ibz_copy(©->coord[1], &copied->coord[1]); + ibz_copy(©->coord[2], &copied->coord[2]); + ibz_copy(©->coord[3], &copied->coord[3]); +} + +// helper functions for lattices +void +quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3) +{ + ibz_copy(&(elem->coord[0]), coord0); + ibz_copy(&(elem->coord[1]), coord1); + ibz_copy(&(elem->coord[2]), coord2); + ibz_copy(&(elem->coord[3]), coord3); + + ibz_copy(&(elem->denom), denom); +} + +void +quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + } + ibz_copy(&(res->denom), &(elem->denom)); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c deleted file mode 100644 index 983ba49adf..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.c +++ /dev/null @@ -1,201 +0,0 @@ -/* Copyright (c) 2017, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -/*************************************************************************** - * Small modification by Nir Drucker and Shay Gueron - * AWS Cryptographic Algorithms Group - * (ndrucker@amazon.com, gueron@amazon.com) - * include: - * 1) Use memcpy/memset instead of OPENSSL_memcpy/memset - * 2) Include aes.h as the underlying aes code - * 3) Modifying the drbg structure - * ***************************************************************************/ - -#include "ctr_drbg.h" -#include - - -// Section references in this file refer to SP 800-90Ar1: -// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf - -int CTR_DRBG_init(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *personalization, size_t personalization_len) { - // Section 10.2.1.3.1 - if (personalization_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - uint8_t seed_material[CTR_DRBG_ENTROPY_LEN]; - memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN); - - for (size_t i = 0; i < personalization_len; i++) { - seed_material[i] ^= personalization[i]; - } - - // Section 10.2.1.2 - // kInitMask is the result of encrypting blocks with big-endian value 1, 2 - // and 3 with the all-zero AES-256 key. - static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { - 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, - 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, - 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca, - 0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e, - }; - - for (size_t i = 0; i < sizeof(kInitMask); i++) { - seed_material[i] ^= kInitMask[i]; - } - - aes256_key_t key; - memcpy(key.raw, seed_material, 32); - memcpy(drbg->counter.bytes, seed_material + 32, 16); - - aes256_key_expansion(&drbg->ks, &key); - drbg->reseed_counter = 1; - - return 1; -} - -// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a -// big-endian number. -static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { - drbg->counter.words[3] = - CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n); -} - -static int ctr_drbg_update(CTR_DRBG_STATE *drbg, const uint8_t *data, - size_t data_len) { - // Per section 10.2.1.2, |data_len| must be |CTR_DRBG_ENTROPY_LEN|. Here, we - // allow shorter inputs and right-pad them with zeros. This is equivalent to - // the specified algorithm but saves a copy in |CTR_DRBG_generate|. - if (data_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - uint8_t temp[CTR_DRBG_ENTROPY_LEN]; - for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) { - ctr32_add(drbg, 1); - aes256_enc(temp + i, drbg->counter.bytes, &drbg->ks); - } - - for (size_t i = 0; i < data_len; i++) { - temp[i] ^= data[i]; - } - - aes256_key_t key; - memcpy(key.raw, temp, 32); - memcpy(drbg->counter.bytes, temp + 32, 16); - aes256_key_expansion(&drbg->ks, &key); - - return 1; -} - -int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *additional_data, - size_t additional_data_len) { - // Section 10.2.1.4 - uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; - - if (additional_data_len > 0) { - if (additional_data_len > CTR_DRBG_ENTROPY_LEN) { - return 0; - } - - memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN); - for (size_t i = 0; i < additional_data_len; i++) { - entropy_copy[i] ^= additional_data[i]; - } - - entropy = entropy_copy; - } - - if (!ctr_drbg_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) { - return 0; - } - - drbg->reseed_counter = 1; - - return 1; -} - -int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, - const uint8_t *additional_data, - size_t additional_data_len) { - if (additional_data_len != 0 && - !ctr_drbg_update(drbg, additional_data, additional_data_len)) { - return 0; - } - - // kChunkSize is used to interact better with the cache. Since the AES-CTR - // code assumes that it's encrypting rather than just writing keystream, the - // buffer has to be zeroed first. Without chunking, large reads would zero - // the whole buffer, flushing the L1 cache, and then do another pass (missing - // the cache every time) to “encrypt” it. The code can avoid this by - // chunking. - static const size_t kChunkSize = 8 * 1024; - - while (out_len >= AES_BLOCK_SIZE) { - size_t todo = kChunkSize; - if (todo > out_len) { - todo = out_len; - } - - todo &= ~(AES_BLOCK_SIZE - 1); - - const size_t num_blocks = todo / AES_BLOCK_SIZE; - if (1) { - memset(out, 0, todo); - ctr32_add(drbg, 1); -#ifdef VAES512 - aes256_ctr_enc512(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#elif defined(VAES256) - aes256_ctr_enc256(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#else - aes256_ctr_enc(out, drbg->counter.bytes, num_blocks, &drbg->ks); -#endif - ctr32_add(drbg, num_blocks - 1); - } else { - for (size_t i = 0; i < todo; i += AES_BLOCK_SIZE) { - ctr32_add(drbg, 1); - aes256_enc(&out[i], drbg->counter.bytes, &drbg->ks); - } - } - - out += todo; - out_len -= todo; - } - - if (out_len > 0) { - uint8_t block[AES_BLOCK_SIZE]; - ctr32_add(drbg, 1); - aes256_enc(block, drbg->counter.bytes, &drbg->ks); - - memcpy(out, block, out_len); - } - - // Right-padding |additional_data| in step 2.2 is handled implicitly by - // |ctr_drbg_update|, to save a copy. - if (!ctr_drbg_update(drbg, additional_data, additional_data_len)) { - return 0; - } - - drbg->reseed_counter++; - return 1; -} - -void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) { - secure_clean((uint8_t *)drbg, sizeof(CTR_DRBG_STATE)); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.h deleted file mode 100644 index 2d1b1f3f0c..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ctr_drbg.h +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2017, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -/*************************************************************************** -* Small modification by Nir Drucker and Shay Gueron -* AWS Cryptographic Algorithms Group -* (ndrucker@amazon.com, gueron@amazon.com) -* include: -* 1) Use memcpy/memset instead of OPENSSL_memcpy/memset -* 2) Include aes.h as the underlying aes code -* 3) Modifying the drbg structure -* ***************************************************************************/ - -#pragma once - -#if defined(__cplusplus) -extern "C" { -#endif - -#include "aes_ni.h" - -// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP -// 800-90Ar1. -typedef struct { - aes256_ks_t ks; - union { - uint8_t bytes[16]; - uint32_t words[4]; - } counter; - uint64_t reseed_counter; -} CTR_DRBG_STATE; - -// See SP 800-90Ar1, table 3. -#define CTR_DRBG_ENTROPY_LEN 48 - -// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of -// entropy in |entropy| and, optionally, a personalization string up to -// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero -// on error. -int CTR_DRBG_init(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *personalization, - size_t personalization_len); - -// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy -// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of -// additional data. It returns one on success or zero on error. -int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, - const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], - const uint8_t *additional_data, - size_t additional_data_len); - -// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional -// data (if any) and then writes |out_len| random bytes to |out|. It returns one on success or -// zero on error. -int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, - size_t out_len, - const uint8_t *additional_data, - size_t additional_data_len); - -// CTR_DRBG_clear zeroises the state of |drbg|. -void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); - - -#if defined(__cplusplus) -} // extern C -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2.c new file mode 100644 index 0000000000..b31ae7771a --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2.c @@ -0,0 +1,132 @@ +#include +#include "internal.h" + +// internal helpers, also for other files +void +ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) +{ + ibz_set(&((*vec)[0]), a0); + ibz_set(&((*vec)[1]), a1); +} +void +ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) +{ + ibz_set(&((*mat)[0][0]), a00); + ibz_set(&((*mat)[0][1]), a01); + ibz_set(&((*mat)[1][0]), a10); + ibz_set(&((*mat)[1][1]), a11); +} + +void +ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) +{ + ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); + ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); + ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); + ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); +} + +void +ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) +{ + ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); + ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); + ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); + ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); +} + +void +ibz_mat_2x2_det_from_ibz(ibz_t *det, const ibz_t *a11, const ibz_t *a12, const ibz_t *a21, const ibz_t *a22) +{ + ibz_t prod; + ibz_init(&prod); + ibz_mul(&prod, a12, a21); + ibz_mul(det, a11, a22); + ibz_sub(det, det, &prod); + ibz_finalize(&prod); +} + +void +ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t *vec) +{ + ibz_t prod; + ibz_vec_2_t matvec; + ibz_init(&prod); + ibz_vec_2_init(&matvec); + ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); + ibz_copy(&(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); + ibz_add(&(matvec[0]), &(matvec[0]), &prod); + ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); + ibz_copy(&(matvec[1]), &prod); + ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); + ibz_add(&(matvec[1]), &(matvec[1]), &prod); + ibz_copy(&((*res)[0]), &(matvec[0])); + ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_finalize(&prod); + ibz_vec_2_finalize(&matvec); +} + +// modular 2x2 operations + +void +ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2x2_t *mat_b, const ibz_t *m) +{ + ibz_t mul; + ibz_mat_2x2_t sums; + ibz_init(&mul); + ibz_mat_2x2_init(&sums); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_set(&(sums[i][j]), 0); + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + for (int k = 0; k < 2; k++) { + ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); + ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); + ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + } + } + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + } + } + ibz_finalize(&mul); + ibz_mat_2x2_finalize(&sums); +} + +int +ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m) +{ + ibz_t det, prod; + ibz_init(&det); + ibz_init(&prod); + ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mod(&det, &det, m); + ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_sub(&det, &det, &prod); + ibz_mod(&det, &det, m); + int res = ibz_invmod(&det, &det, m); + // return 0 matrix if non invertible determinant + ibz_set(&prod, res); + ibz_mul(&det, &det, &prod); + // compute inverse + ibz_copy(&prod, &((*mat)[0][0])); + ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); + ibz_copy(&((*inv)[1][1]), &prod); + ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); + ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); + ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + } + } + ibz_finalize(&det); + ibz_finalize(&prod); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim4.c new file mode 100644 index 0000000000..495dc2dcb2 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim4.c @@ -0,0 +1,470 @@ +#include +#include "internal.h" + +// internal helper functions +void +ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t *b) +{ + ibz_mat_4x4_t mat; + ibz_t prod; + ibz_init(&prod); + ibz_mat_4x4_init(&mat); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(mat[i][j]), 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); + ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + } + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*res)[i][j]), &(mat[i][j])); + } + } + ibz_mat_4x4_finalize(&mat); + ibz_finalize(&prod); +} + +// helper functions for lattices +void +ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) +{ + ibz_set(&((*vec)[0]), coord0); + ibz_set(&((*vec)[1]), coord1); + ibz_set(&((*vec)[2]), coord2); + ibz_set(&((*vec)[3]), coord3); +} + +void +ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_copy(&((*new)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) +{ + ibz_copy(&((*res)[0]), coord0); + ibz_copy(&((*res)[1]), coord1); + ibz_copy(&((*res)[2]), coord2); + ibz_copy(&((*res)[3]), coord3); +} + +void +ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) +{ + ibz_gcd(content, &((*v)[0]), &((*v)[1])); + ibz_gcd(content, &((*v)[2]), content); + ibz_gcd(content, &((*v)[3]), content); +} + +void +ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_neg(&((*neg)[i]), &((*vec)[i])); + } +} + +void +ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +void +ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) +{ + ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); + ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); + ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); + ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); +} + +int +ibz_vec_4_is_zero(const ibz_vec_4_t *x) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + res &= ibz_is_zero(&((*x)[i])); + } + return (res); +} + +void +ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b) +{ + ibz_t prod; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + } +} + +int +ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + res = res && ibz_is_zero(&r); + } + ibz_finalize(&r); + return (res); +} + +void +ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + } + } +} + +void +ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) +{ + ibz_mat_4x4_t work; + ibz_mat_4x4_init(&work); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(work[i][j]), &((*mat)[j][i])); + } + } + ibz_mat_4x4_copy(transposed, &work); + ibz_mat_4x4_finalize(&work); +} + +void +ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*zero)[i][j]), 0); + } + } +} + +void +ibz_mat_4x4_identity(ibz_mat_4x4_t *id) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&((*id)[i][j]), 0); + } + ibz_set(&((*id)[i][i]), 1); + } +} + +int +ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) +{ + int res = 1; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + } + } + return (res); +} + +int +ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) +{ + int res = 0; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + } + } + return (!res); +} + +void +ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + } + } +} + +void +ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) +{ + ibz_t d; + ibz_init(&d); + ibz_copy(&d, &((*mat)[0][0])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_gcd(&d, &d, &((*mat)[i][j])); + } + } + ibz_copy(gcd, &d); + ibz_finalize(&d); +} + +int +ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4x4_t *mat) +{ + int res = 1; + ibz_t r; + ibz_init(&r); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + res = res && ibz_is_zero(&r); + } + } + ibz_finalize(&r); + return (res); +} + +// 4x4 inversion helper functions +void +ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, a1, a2); + ibz_mul(&prod, b1, b2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_add(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +void +ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2) +{ + ibz_t prod, sum; + ibz_init(&prod); + ibz_init(&sum); + ibz_mul(&sum, b1, b2); + ibz_mul(&prod, a1, a2); + ibz_sub(&sum, &sum, &prod); + ibz_mul(&prod, c1, c2); + ibz_sub(coeff, &sum, &prod); + ibz_finalize(&prod); + ibz_finalize(&sum); +} + +// Method from https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf 3rd of May +// 2023, 16h15 CEST +int +ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat) +{ + ibz_t prod, work_det; + ibz_mat_4x4_t work; + ibz_t s[6]; + ibz_t c[6]; + for (int i = 0; i < 6; i++) { + ibz_init(&(s[i])); + ibz_init(&(c[i])); + } + ibz_mat_4x4_init(&work); + ibz_init(&prod); + ibz_init(&work_det); + + // compute some 2x2 minors, store them in s and c + for (int i = 0; i < 3; i++) { + ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + } + for (int i = 0; i < 2; i++) { + ibz_mat_2x2_det_from_ibz( + &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + ibz_mat_2x2_det_from_ibz( + &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + } + ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + + // compute det + ibz_set(&work_det, 0); + for (int i = 0; i < 6; i++) { + ibz_mul(&prod, &(s[i]), &(c[5 - i])); + if ((i != 1) && (i != 4)) { + ibz_add(&work_det, &work_det, &prod); + } else { + ibz_sub(&work_det, &work_det, &prod); + } + } + // compute transposed adjugate + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 2; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[1 - k][(j == 0)]), + &(c[6 - j - (j == 0)]), + &((*mat)[1 - k][2 - (j > 1)]), + &(c[4 - j - (j == 1)]), + &((*mat)[1 - k][3 - (j == 3)]), + &(c[3 - j - (j == 1) - (j == 2)])); + } + } + for (int k = 2; k < 4; k++) { + if ((k + j + 1) % 2 == 1) { + ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } else { + ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), + &((*mat)[3 - (k == 3)][(j == 0)]), + &(s[6 - j - (j == 0)]), + &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(s[4 - j - (j == 1)]), + &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(s[3 - j - (j == 1) - (j == 2)])); + } + } + } + if (inv != NULL) { + // put transposed adjugate in result, or 0 if no inverse + ibz_set(&prod, !ibz_is_zero(&work_det)); + ibz_mat_4x4_scalar_mul(inv, &prod, &work); + } + // output det + if (det != NULL) + ibz_copy(det, &work_det); + for (int i = 0; i < 6; i++) { + ibz_finalize(&s[i]); + ibz_finalize(&c[i]); + } + ibz_mat_4x4_finalize(&work); + ibz_finalize(&work_det); + ibz_finalize(&prod); + return (!ibz_is_zero(det)); +} + +// matrix evaluation + +void +ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t *vec) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +void +ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t *mat) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + // assume initialization to 0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); + ibz_add(&(sum[i]), &(sum[i]), &prod); + } + } + ibz_vec_4_copy(res, &sum); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} + +// quadratic forms + +void +quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) +{ + ibz_vec_4_t sum; + ibz_t prod; + ibz_init(&prod); + ibz_vec_4_init(&sum); + ibz_mat_4x4_eval(&sum, qf, coord); + for (int i = 0; i < 4; i++) { + ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + if (i > 0) { + ibz_add(&(sum[0]), &(sum[0]), &prod); + } else { + ibz_copy(&sum[0], &prod); + } + } + ibz_copy(res, &sum[0]); + ibz_finalize(&prod); + ibz_vec_4_finalize(&sum); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dpe.h new file mode 100644 index 0000000000..b9a7a35e0b --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dpe.h @@ -0,0 +1,743 @@ +/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. + +This file is part of the DPE Library. + +The DPE Library is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 3 of the License, or (at your +option) any later version. + +The DPE Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with the DPE Library; see the file COPYING.LIB. +If not, see . */ + +#ifndef __DPE +#define __DPE + +#include /* For abort */ +#include /* For fprintf */ +#include /* for round, floor, ceil */ +#include + +/* if you change the version, please change it in Makefile too */ +#define DPE_VERSION_MAJOR 1 +#define DPE_VERSION_MINOR 7 + +#if defined(__GNUC__) && (__GNUC__ >= 3) +# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) +# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) +# define DPE_UNUSED_ATTR __attribute__((unused)) +#else +# define DPE_LIKELY(x) (x) +# define DPE_UNLIKELY(x) (x) +# define DPE_UNUSED_ATTR +#endif + +/* If no user defined mode, define it to double */ +#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) +# define DPE_USE_DOUBLE +#endif + +#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) +# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." +#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) +# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." +#endif + +#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) +# define DPE_LITTLEENDIAN32 +#endif + +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) +# define DPE_DEFINE_ROUND_TRUNC +#endif + +#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 +# define DPE_ISFINITE __builtin_isfinite +#elif defined(isfinite) +# define DPE_ISFINITE isfinite /* new C99 function */ +#else +# define DPE_ISFINITE finite /* obsolete BSD function */ +#endif + +/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ +/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with + 1/2 <= m < 1 */ +/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ +#if defined(DPE_USE_DOUBLE) +# define DPE_DOUBLE double /* mantissa type */ +# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ +# define DPE_2_POW_BITSIZE 0x1P53 +# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 +# define DPE_LDEXP __builtin_ldexp +# define DPE_FREXP __builtin_frexp +# define DPE_FLOOR __builtin_floor +# define DPE_CEIL __builtin_ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND __builtin_round +# define DPE_TRUNC __builtin_trunc +# endif +# else +# define DPE_LDEXP ldexp +# define DPE_FREXP frexp +# define DPE_FLOOR floor +# define DPE_CEIL ceil +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND round +# define DPE_TRUNC trunc +# endif +# endif + +#elif defined(DPE_USE_LONGDOUBLE) +# define DPE_DOUBLE long double +# define DPE_BITSIZE 64 +# define DPE_2_POW_BITSIZE 0x1P64 +# define DPE_LDEXP ldexpl +# define DPE_FREXP frexpl +# define DPE_FLOOR floorl +# define DPE_CEIL ceill +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundl +# define DPE_TRUNC truncl +# endif + +#elif defined(DPE_USE_FLOAT128) +# include "quadmath.h" +# define DPE_DOUBLE __float128 +# define DPE_BITSIZE 113 +# define DPE_2_POW_BITSIZE 0x1P113 +# define DPE_LDEXP ldexpq +# define DPE_FLOOR floorq +# define DPE_CEIL ceilq +# define DPE_FREXP frexpq +# ifdef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND roundq +# define DPE_TRUNC truncq +# endif + +#else +# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" +#endif + +/* If no C99, do what we can */ +#ifndef DPE_DEFINE_ROUND_TRUNC +# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) +# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) +#endif + +#if defined(DPE_USE_LONG) +# define DPE_EXP_T long /* exponent type */ +# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ +#elif defined(DPE_USE_LONGLONG) +# define DPE_EXP_T long long +# define DPE_EXPMIN LLONG_MIN +#else +# define DPE_EXP_T int /* exponent type */ +# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ +#endif + +#ifdef DPE_LITTLEENDIAN32 +typedef union +{ + double d; +#if INT_MAX == 0x7FFFFFFFL + int i[2]; +#elif LONG_MAX == 0x7FFFFFFFL + long i[2]; +#elif SHRT_MAX == 0x7FFFFFFFL + short i[2]; +#else +# error Cannot find a 32 bits integer type. +#endif +} dpe_double_words; +#endif + +typedef struct +{ + DPE_DOUBLE d; /* significand */ + DPE_EXP_T exp; /* exponent */ +} dpe_struct; + +typedef dpe_struct dpe_t[1]; + +#define DPE_MANT(x) ((x)->d) +#define DPE_EXP(x) ((x)->exp) +#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) + +#define DPE_INLINE static inline + +/* initialize */ +DPE_INLINE void +dpe_init (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* clear */ +DPE_INLINE void +dpe_clear (dpe_t x DPE_UNUSED_ATTR) +{ +} + +/* set x to y */ +DPE_INLINE void +dpe_set (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to -y */ +DPE_INLINE void +dpe_neg (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set x to |y| */ +DPE_INLINE void +dpe_abs (dpe_t x, dpe_t y) +{ + DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y); +} + +/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ +/* FIXME: don't inline this function yet ? */ +static void +dpe_normalize (dpe_t x) +{ + if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) + { + if (DPE_MANT(x) == 0.0) + DPE_EXP(x) = DPE_EXPMIN; + /* otherwise let the exponent of NaN, Inf unchanged */ + } + else + { + DPE_EXP_T e; +#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ + dpe_double_words dw; + dw.d = DPE_MANT(x); + e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ + DPE_EXP(x) += e - 1022; + dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; + DPE_MANT(x) = dw.d; +#else /* portable code */ + double m = DPE_MANT(x); + DPE_MANT(x) = DPE_FREXP (m, &e); + DPE_EXP(x) += e; +#endif + } +} + +#if defined(DPE_USE_DOUBLE) +static const double dpe_scale_tab[54] = { + 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, + 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, + 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, + 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, + 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, + 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, + 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; +#endif + +DPE_INLINE DPE_DOUBLE +dpe_scale (DPE_DOUBLE d, int s) +{ + /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ +#if defined(DPE_USE_DOUBLE) + return d * dpe_scale_tab [-s]; +#else /* portable code */ + return DPE_LDEXP (d, s); +#endif +} + +/* set x to y */ +DPE_INLINE void +dpe_set_d (dpe_t x, double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ld (dpe_t x, long double y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_ui (dpe_t x, unsigned long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +/* set x to y */ +DPE_INLINE void +dpe_set_si (dpe_t x, long y) +{ + DPE_MANT(x) = (DPE_DOUBLE) y; + DPE_EXP(x) = 0; + dpe_normalize (x); +} + +DPE_INLINE long +dpe_get_si (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (long) d; +} + +DPE_INLINE unsigned long +dpe_get_ui (dpe_t x) +{ + DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); + return (d < 0.0) ? 0 : (unsigned long) d; +} + +DPE_INLINE double +dpe_get_d (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +DPE_INLINE long double +dpe_get_ld (dpe_t x) +{ + return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); +} + +#if defined(__GMP_H__) || defined(__MINI_GMP_H__) +/* set x to y */ +DPE_INLINE void +dpe_set_z (dpe_t x, mpz_t y) +{ + long e; + DPE_MANT(x) = mpz_get_d_2exp (&e, y); + DPE_EXP(x) = (DPE_EXP_T) e; +} + +/* set x to y, rounded to nearest */ +DPE_INLINE void +dpe_get_z (mpz_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey >= DPE_BITSIZE) /* y is an integer */ + { + DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ + mpz_set_d (x, d); /* should be exact */ + mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); + } + else /* DPE_EXP(y) < DPE_BITSIZE */ + { + if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ + mpz_set_ui (x, 0); + else + { + DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); + mpz_set_d (x, (double) DPE_ROUND(d)); + } + } +} + +/* return e and x such that y = x*2^e */ +DPE_INLINE mp_exp_t +dpe_get_z_exp (mpz_t x, dpe_t y) +{ + mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); + return DPE_EXP(y) - DPE_BITSIZE; +} +#endif + +/* x <- y + z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_add (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y+z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_set (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y - z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_sub (dpe_t x, dpe_t y, dpe_t z) +{ + if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) + /* |z| < 1/2*ulp(y), thus o(y-z) = y */ + dpe_set (x, y); + else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) + dpe_neg (x, z); + else + { + DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ + + if (d >= 0) + { + DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); + DPE_EXP(x) = DPE_EXP(y); + } + else + { + DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(z); + } + dpe_normalize (x); + } +} + +/* x <- y * z, assuming y and z are normalized, returns x normalized */ +DPE_INLINE void +dpe_mul (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- sqrt(y), assuming y is normalized, returns x normalized */ +DPE_INLINE void +dpe_sqrt (dpe_t x, dpe_t y) +{ + DPE_EXP_T ey = DPE_EXP(y); + if (ey % 2) + { + /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ + DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); + DPE_EXP(x) = (ey + 1) / 2; + } + else + { + DPE_MANT(x) = sqrt (DPE_MANT(y)); + DPE_EXP(x) = ey / 2; + } +} + +/* x <- y / z, assuming y and z are normalized, returns x normalized. + Assumes z is not zero. */ +DPE_INLINE void +dpe_div (dpe_t x, dpe_t y, dpe_t z) +{ + DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); + DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); + dpe_normalize (x); +} + +/* x <- y * z, assuming y normalized, returns x normalized */ +DPE_INLINE void +dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ +DPE_INLINE void +dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) +{ + DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; + DPE_EXP(x) = DPE_EXP(y); + dpe_normalize (x); +} + +/* x <- y * 2^e */ +DPE_INLINE void +dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; +} + +/* x <- y / 2^e */ +DPE_INLINE void +dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) +{ + DPE_MANT(x) = DPE_MANT(y); + DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; +} + +/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' + type has fewer bits than the significand in dpe_t) */ +DPE_INLINE DPE_EXP_T +dpe_get_si_exp (long *x, dpe_t y) +{ + if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ + { + *x = (long) (DPE_MANT(y) * 2147483648.0); + return DPE_EXP(y) - 31; + } + else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ + { + *x = (long) (DPE_MANT (y) * 9223372036854775808.0); + return DPE_EXP(y) - 63; + } + else + { + fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); + exit (1); + } +} + +static DPE_UNUSED_ATTR int dpe_str_prec = 16; +static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; + +static int +dpe_out_str (FILE *s, int base, dpe_t x) +{ + DPE_DOUBLE d = DPE_MANT(x); + DPE_EXP_T e2 = DPE_EXP(x); + int e10 = 0; + char sign = ' '; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } + if (d == 0.0) +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%1.*f", dpe_str_prec, d); +#else + return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); +#endif + if (d < 0) + { + d = -d; + sign = '-'; + } + if (e2 > 0) + { + while (e2 > 0) + { + e2 --; + d *= 2.0; + if (d >= 10.0) + { + d /= 10.0; + e10 ++; + } + } + } + else /* e2 <= 0 */ + { + while (e2 < 0) + { + e2 ++; + d /= 2.0; + if (d < 1.0) + { + d *= 10.0; + e10 --; + } + } + } +#ifdef DPE_USE_DOUBLE + return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); +#else + return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); +#endif +} + +static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; + +static size_t +dpe_inp_str (dpe_t x, FILE *s, int base) +{ + size_t res; + DPE_DOUBLE d; + if (DPE_UNLIKELY (base != 10)) + { + fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); + exit (1); + } +#ifdef DPE_USE_DOUBLE + res = fscanf (s, "%lf", &d); +#elif defined(DPE_USE_LONGDOUBLE) + res = fscanf (s, "%Lf", &d); +#else + { + long double d_ld; + res = fscanf (s, "%Lf", &d_ld); + d = d_ld; + } +#endif + dpe_set_d (x, d); + return res; +} + +DPE_INLINE void +dpe_dump (dpe_t x) +{ + dpe_out_str (stdout, 10, x); + putchar ('\n'); +} + +DPE_INLINE int +dpe_zero_p (dpe_t x) +{ + return DPE_MANT (x) == 0; +} + +/* return a positive value if x > y + a negative value if x < y + and 0 otherwise (x=y). */ +DPE_INLINE int +dpe_cmp (dpe_t x, dpe_t y) +{ + int sx = DPE_SIGN(x); + int d = sx - DPE_SIGN(y); + + if (d != 0) + return d; + else if (DPE_EXP(x) > DPE_EXP(y)) + return (sx > 0) ? 1 : -1; + else if (DPE_EXP(y) > DPE_EXP(x)) + return (sx > 0) ? -1 : 1; + else /* DPE_EXP(x) = DPE_EXP(y) */ + return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); +} + +DPE_INLINE int +dpe_cmp_d (dpe_t x, double d) +{ + dpe_t y; + dpe_set_d (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_ui (dpe_t x, unsigned long d) +{ + dpe_t y; + dpe_set_ui (y, d); + return dpe_cmp (x, y); +} + +DPE_INLINE int +dpe_cmp_si (dpe_t x, long d) +{ + dpe_t y; + dpe_set_si (y, d); + return dpe_cmp (x, y); +} + +/* set x to integer nearest to y */ +DPE_INLINE void +dpe_round (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) < 0) /* |y| < 1/2 */ + dpe_set_ui (x, 0); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_ROUND(d)); + } +} + +/* set x to the fractional part of y, defined as y - trunc(y), thus the + fractional part has absolute value in [0, 1), and same sign as y */ +DPE_INLINE void +dpe_frac (dpe_t x, dpe_t y) +{ + /* If |y| is smaller than 1, keep it */ + if (DPE_EXP(y) <= 0) + dpe_set (x, y); + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set_ui (x, 0); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, d - DPE_TRUNC(d)); + } +} + +/* set x to largest integer <= y */ +DPE_INLINE void +dpe_floor (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ + dpe_set_ui (x, 0); + else /* -1 < y < 0 */ + dpe_set_si (x, -1); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_FLOOR(d)); + } +} + +/* set x to smallest integer >= y */ +DPE_INLINE void +dpe_ceil (dpe_t x, dpe_t y) +{ + if (DPE_EXP(y) <= 0) /* |y| < 1 */ + { + if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ + dpe_set_ui (x, 1); + else /* -1 < y <= 0 */ + dpe_set_si (x, 0); + } + else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ + dpe_set (x, y); + else + { + DPE_DOUBLE d; + d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); + dpe_set_d (x, DPE_CEIL(d)); + } +} + +DPE_INLINE void +dpe_swap (dpe_t x, dpe_t y) +{ + DPE_EXP_T i = DPE_EXP (x); + DPE_DOUBLE d = DPE_MANT (x); + DPE_EXP (x) = DPE_EXP (y); + DPE_MANT (x) = DPE_MANT (y); + DPE_EXP (y) = i; + DPE_MANT (y) = d; +} + +#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/finit.c new file mode 100644 index 0000000000..b3808edf07 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/finit.c @@ -0,0 +1,122 @@ +#include "internal.h" + +void +quat_alg_init_set(quat_alg_t *alg, const ibz_t *p) +{ + ibz_init(&(*alg).p); + ibz_copy(&(*alg).p, p); +} +void +quat_alg_finalize(quat_alg_t *alg) +{ + ibz_finalize(&(*alg).p); +} + +void +quat_alg_elem_init(quat_alg_elem_t *elem) +{ + ibz_vec_4_init(&(*elem).coord); + ibz_init(&(*elem).denom); + ibz_set(&(*elem).denom, 1); +} +void +quat_alg_elem_finalize(quat_alg_elem_t *elem) +{ + ibz_vec_4_finalize(&(*elem).coord); + ibz_finalize(&(*elem).denom); +} + +void +ibz_vec_2_init(ibz_vec_2_t *vec) +{ + ibz_init(&((*vec)[0])); + ibz_init(&((*vec)[1])); +} + +void +ibz_vec_2_finalize(ibz_vec_2_t *vec) +{ + ibz_finalize(&((*vec)[0])); + ibz_finalize(&((*vec)[1])); +} + +void +ibz_vec_4_init(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_init(&(*vec)[i]); + } +} +void +ibz_vec_4_finalize(ibz_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibz_finalize(&(*vec)[i]); + } +} + +void +ibz_mat_2x2_init(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) +{ + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +ibz_mat_4x4_init(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_init(&(*mat)[i][j]); + } + } +} +void +ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_finalize(&(*mat)[i][j]); + } + } +} + +void +quat_lattice_init(quat_lattice_t *lat) +{ + ibz_mat_4x4_init(&(*lat).basis); + ibz_init(&(*lat).denom); + ibz_set(&(*lat).denom, 1); +} +void +quat_lattice_finalize(quat_lattice_t *lat) +{ + ibz_finalize(&(*lat).denom); + ibz_mat_4x4_finalize(&(*lat).basis); +} + +void +quat_left_ideal_init(quat_left_ideal_t *lideal) +{ + quat_lattice_init(&(*lideal).lattice); + ibz_init(&(*lideal).norm); + (*lideal).parent_order = NULL; +} +void +quat_left_ideal_finalize(quat_left_ideal_t *lideal) +{ + ibz_finalize(&(*lideal).norm); + quat_lattice_finalize(&(*lideal).lattice); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.c deleted file mode 100644 index f2992d8c7f..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.c +++ /dev/null @@ -1,876 +0,0 @@ -// SPDX-License-Identifier: PD and Apache-2.0 - -/* FIPS202 implementation based on code from PQClean, - * which is in turn based based on the public domain implementation in - * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html - * by Ronny Van Keer - * and the public domain "TweetFips202" implementation - * from https://twitter.com/tweetfips202 - * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ - -#include -#include -#include -#include - -#include "fips202.h" - -#define NROUNDS 24 -#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) - -/************************************************* - * Name: load64 - * - * Description: Load 8 bytes into uint64_t in little-endian order - * - * Arguments: - const uint8_t *x: pointer to input byte array - * - * Returns the loaded 64-bit unsigned integer - **************************************************/ -static uint64_t load64(const uint8_t *x) { - uint64_t r = 0; - for (size_t i = 0; i < 8; ++i) { - r |= (uint64_t)x[i] << 8 * i; - } - - return r; -} - -/************************************************* - * Name: store64 - * - * Description: Store a 64-bit integer to a byte array in little-endian order - * - * Arguments: - uint8_t *x: pointer to the output byte array - * - uint64_t u: input 64-bit unsigned integer - **************************************************/ -static void store64(uint8_t *x, uint64_t u) { - for (size_t i = 0; i < 8; ++i) { - x[i] = (uint8_t) (u >> 8 * i); - } -} - -/* Keccak round constants */ -static const uint64_t KeccakF_RoundConstants[NROUNDS] = { - 0x0000000000000001ULL, 0x0000000000008082ULL, - 0x800000000000808aULL, 0x8000000080008000ULL, - 0x000000000000808bULL, 0x0000000080000001ULL, - 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x000000000000008aULL, 0x0000000000000088ULL, - 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, - 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, - 0x000000000000800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, - 0x0000000080000001ULL, 0x8000000080008008ULL -}; - -/************************************************* - * Name: KeccakF1600_StatePermute - * - * Description: The Keccak F1600 Permutation - * - * Arguments: - uint64_t *state: pointer to input/output Keccak state - **************************************************/ -static void KeccakF1600_StatePermute(uint64_t *state) { - int round; - - uint64_t Aba, Abe, Abi, Abo, Abu; - uint64_t Aga, Age, Agi, Ago, Agu; - uint64_t Aka, Ake, Aki, Ako, Aku; - uint64_t Ama, Ame, Ami, Amo, Amu; - uint64_t Asa, Ase, Asi, Aso, Asu; - uint64_t BCa, BCe, BCi, BCo, BCu; - uint64_t Da, De, Di, Do, Du; - uint64_t Eba, Ebe, Ebi, Ebo, Ebu; - uint64_t Ega, Ege, Egi, Ego, Egu; - uint64_t Eka, Eke, Eki, Eko, Eku; - uint64_t Ema, Eme, Emi, Emo, Emu; - uint64_t Esa, Ese, Esi, Eso, Esu; - - // copyFromState(A, state) - Aba = state[0]; - Abe = state[1]; - Abi = state[2]; - Abo = state[3]; - Abu = state[4]; - Aga = state[5]; - Age = state[6]; - Agi = state[7]; - Ago = state[8]; - Agu = state[9]; - Aka = state[10]; - Ake = state[11]; - Aki = state[12]; - Ako = state[13]; - Aku = state[14]; - Ama = state[15]; - Ame = state[16]; - Ami = state[17]; - Amo = state[18]; - Amu = state[19]; - Asa = state[20]; - Ase = state[21]; - Asi = state[22]; - Aso = state[23]; - Asu = state[24]; - - for (round = 0; round < NROUNDS; round += 2) { - // prepareTheta - BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; - BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; - BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; - BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; - BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; - - // thetaRhoPiChiIotaPrepareTheta(round , A, E) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Aba ^= Da; - BCa = Aba; - Age ^= De; - BCe = ROL(Age, 44); - Aki ^= Di; - BCi = ROL(Aki, 43); - Amo ^= Do; - BCo = ROL(Amo, 21); - Asu ^= Du; - BCu = ROL(Asu, 14); - Eba = BCa ^ ((~BCe) & BCi); - Eba ^= KeccakF_RoundConstants[round]; - Ebe = BCe ^ ((~BCi) & BCo); - Ebi = BCi ^ ((~BCo) & BCu); - Ebo = BCo ^ ((~BCu) & BCa); - Ebu = BCu ^ ((~BCa) & BCe); - - Abo ^= Do; - BCa = ROL(Abo, 28); - Agu ^= Du; - BCe = ROL(Agu, 20); - Aka ^= Da; - BCi = ROL(Aka, 3); - Ame ^= De; - BCo = ROL(Ame, 45); - Asi ^= Di; - BCu = ROL(Asi, 61); - Ega = BCa ^ ((~BCe) & BCi); - Ege = BCe ^ ((~BCi) & BCo); - Egi = BCi ^ ((~BCo) & BCu); - Ego = BCo ^ ((~BCu) & BCa); - Egu = BCu ^ ((~BCa) & BCe); - - Abe ^= De; - BCa = ROL(Abe, 1); - Agi ^= Di; - BCe = ROL(Agi, 6); - Ako ^= Do; - BCi = ROL(Ako, 25); - Amu ^= Du; - BCo = ROL(Amu, 8); - Asa ^= Da; - BCu = ROL(Asa, 18); - Eka = BCa ^ ((~BCe) & BCi); - Eke = BCe ^ ((~BCi) & BCo); - Eki = BCi ^ ((~BCo) & BCu); - Eko = BCo ^ ((~BCu) & BCa); - Eku = BCu ^ ((~BCa) & BCe); - - Abu ^= Du; - BCa = ROL(Abu, 27); - Aga ^= Da; - BCe = ROL(Aga, 36); - Ake ^= De; - BCi = ROL(Ake, 10); - Ami ^= Di; - BCo = ROL(Ami, 15); - Aso ^= Do; - BCu = ROL(Aso, 56); - Ema = BCa ^ ((~BCe) & BCi); - Eme = BCe ^ ((~BCi) & BCo); - Emi = BCi ^ ((~BCo) & BCu); - Emo = BCo ^ ((~BCu) & BCa); - Emu = BCu ^ ((~BCa) & BCe); - - Abi ^= Di; - BCa = ROL(Abi, 62); - Ago ^= Do; - BCe = ROL(Ago, 55); - Aku ^= Du; - BCi = ROL(Aku, 39); - Ama ^= Da; - BCo = ROL(Ama, 41); - Ase ^= De; - BCu = ROL(Ase, 2); - Esa = BCa ^ ((~BCe) & BCi); - Ese = BCe ^ ((~BCi) & BCo); - Esi = BCi ^ ((~BCo) & BCu); - Eso = BCo ^ ((~BCu) & BCa); - Esu = BCu ^ ((~BCa) & BCe); - - // prepareTheta - BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; - BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; - BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; - BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; - BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; - - // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Eba ^= Da; - BCa = Eba; - Ege ^= De; - BCe = ROL(Ege, 44); - Eki ^= Di; - BCi = ROL(Eki, 43); - Emo ^= Do; - BCo = ROL(Emo, 21); - Esu ^= Du; - BCu = ROL(Esu, 14); - Aba = BCa ^ ((~BCe) & BCi); - Aba ^= KeccakF_RoundConstants[round + 1]; - Abe = BCe ^ ((~BCi) & BCo); - Abi = BCi ^ ((~BCo) & BCu); - Abo = BCo ^ ((~BCu) & BCa); - Abu = BCu ^ ((~BCa) & BCe); - - Ebo ^= Do; - BCa = ROL(Ebo, 28); - Egu ^= Du; - BCe = ROL(Egu, 20); - Eka ^= Da; - BCi = ROL(Eka, 3); - Eme ^= De; - BCo = ROL(Eme, 45); - Esi ^= Di; - BCu = ROL(Esi, 61); - Aga = BCa ^ ((~BCe) & BCi); - Age = BCe ^ ((~BCi) & BCo); - Agi = BCi ^ ((~BCo) & BCu); - Ago = BCo ^ ((~BCu) & BCa); - Agu = BCu ^ ((~BCa) & BCe); - - Ebe ^= De; - BCa = ROL(Ebe, 1); - Egi ^= Di; - BCe = ROL(Egi, 6); - Eko ^= Do; - BCi = ROL(Eko, 25); - Emu ^= Du; - BCo = ROL(Emu, 8); - Esa ^= Da; - BCu = ROL(Esa, 18); - Aka = BCa ^ ((~BCe) & BCi); - Ake = BCe ^ ((~BCi) & BCo); - Aki = BCi ^ ((~BCo) & BCu); - Ako = BCo ^ ((~BCu) & BCa); - Aku = BCu ^ ((~BCa) & BCe); - - Ebu ^= Du; - BCa = ROL(Ebu, 27); - Ega ^= Da; - BCe = ROL(Ega, 36); - Eke ^= De; - BCi = ROL(Eke, 10); - Emi ^= Di; - BCo = ROL(Emi, 15); - Eso ^= Do; - BCu = ROL(Eso, 56); - Ama = BCa ^ ((~BCe) & BCi); - Ame = BCe ^ ((~BCi) & BCo); - Ami = BCi ^ ((~BCo) & BCu); - Amo = BCo ^ ((~BCu) & BCa); - Amu = BCu ^ ((~BCa) & BCe); - - Ebi ^= Di; - BCa = ROL(Ebi, 62); - Ego ^= Do; - BCe = ROL(Ego, 55); - Eku ^= Du; - BCi = ROL(Eku, 39); - Ema ^= Da; - BCo = ROL(Ema, 41); - Ese ^= De; - BCu = ROL(Ese, 2); - Asa = BCa ^ ((~BCe) & BCi); - Ase = BCe ^ ((~BCi) & BCo); - Asi = BCi ^ ((~BCo) & BCu); - Aso = BCo ^ ((~BCu) & BCa); - Asu = BCu ^ ((~BCa) & BCe); - } - - // copyToState(state, A) - state[0] = Aba; - state[1] = Abe; - state[2] = Abi; - state[3] = Abo; - state[4] = Abu; - state[5] = Aga; - state[6] = Age; - state[7] = Agi; - state[8] = Ago; - state[9] = Agu; - state[10] = Aka; - state[11] = Ake; - state[12] = Aki; - state[13] = Ako; - state[14] = Aku; - state[15] = Ama; - state[16] = Ame; - state[17] = Ami; - state[18] = Amo; - state[19] = Amu; - state[20] = Asa; - state[21] = Ase; - state[22] = Asi; - state[23] = Aso; - state[24] = Asu; -} - -/************************************************* - * Name: keccak_absorb - * - * Description: Absorb step of Keccak; - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, - size_t mlen, uint8_t p) { - size_t i; - uint8_t t[200]; - - /* Zero state */ - for (i = 0; i < 25; ++i) { - s[i] = 0; - } - - while (mlen >= r) { - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(m + 8 * i); - } - - KeccakF1600_StatePermute(s); - mlen -= r; - m += r; - } - - for (i = 0; i < r; ++i) { - t[i] = 0; - } - for (i = 0; i < mlen; ++i) { - t[i] = m[i]; - } - t[i] = p; - t[r - 1] |= 128; - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(t + 8 * i); - } -} - -/************************************************* - * Name: keccak_squeezeblocks - * - * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. - * Modifies the state. Can be called multiple times to keep - * squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *h: pointer to output blocks - * - size_t nblocks: number of blocks to be - * squeezed (written to h) - * - uint64_t *s: pointer to input/output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, - uint64_t *s, uint32_t r) { - while (nblocks > 0) { - KeccakF1600_StatePermute(s); - for (size_t i = 0; i < (r >> 3); i++) { - store64(h + 8 * i, s[i]); - } - h += r; - nblocks--; - } -} - -/************************************************* - * Name: keccak_inc_init - * - * Description: Initializes the incremental Keccak state to zero. - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - **************************************************/ -static void keccak_inc_init(uint64_t *s_inc) { - size_t i; - - for (i = 0; i < 25; ++i) { - s_inc[i] = 0; - } - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_absorb - * - * Description: Incremental keccak absorb - * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - **************************************************/ -static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, - size_t mlen) { - size_t i; - - /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ - while (mlen + s_inc[25] >= r) { - for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { - /* Take the i'th byte from message - xor with the s_inc[25] + i'th byte of the state; little-endian */ - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - mlen -= (size_t)(r - s_inc[25]); - m += r - s_inc[25]; - s_inc[25] = 0; - - KeccakF1600_StatePermute(s_inc); - } - - for (i = 0; i < mlen; i++) { - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - s_inc[25] += mlen; -} - -/************************************************* - * Name: keccak_inc_finalize - * - * Description: Finalizes Keccak absorb phase, prepares for squeezing - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { - /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, - so we can always use one more byte for p in the current state. */ - s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); - s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_squeeze - * - * Description: Incremental Keccak squeeze; can be called on byte-level - * - * Arguments: - uint8_t *h: pointer to output bytes - * - size_t outlen: number of bytes to be squeezed - * - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_inc_squeeze(uint8_t *h, size_t outlen, - uint64_t *s_inc, uint32_t r) { - size_t i; - - /* First consume any bytes we still have sitting around */ - for (i = 0; i < outlen && i < s_inc[25]; i++) { - /* There are s_inc[25] bytes left, so r - s_inc[25] is the first - available byte. We consume from there, i.e., up to r. */ - h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] -= i; - - /* Then squeeze the remaining necessary blocks */ - while (outlen > 0) { - KeccakF1600_StatePermute(s_inc); - - for (i = 0; i < outlen && i < r; i++) { - h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] = r - i; - } -} - -void shake128_inc_init(shake128incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); -} - -void shake128_inc_finalize(shake128incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); -} - -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); -} - -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake128_inc_ctx_release(shake128incctx *state) { - (void)state; -} - -void shake256_inc_init(shake256incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); -} - -void shake256_inc_finalize(shake256incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); -} - -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); -} - -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake256_inc_ctx_release(shake256incctx *state) { - (void)state; -} - - -/************************************************* - * Name: shake128_absorb - * - * Description: Absorb step of the SHAKE128 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake128_squeezeblocks - * - * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of - * SHAKE128_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake128ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); -} - -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake128_ctx_release(shake128ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake256_absorb - * - * Description: Absorb step of the SHAKE256 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake256_squeezeblocks - * - * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of - * SHAKE256_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake256ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); -} - -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake256_ctx_release(shake256ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake128 - * - * Description: SHAKE128 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE128_RATE; - uint8_t t[SHAKE128_RATE]; - shake128ctx s; - - shake128_absorb(&s, input, inlen); - shake128_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE128_RATE; - outlen -= nblocks * SHAKE128_RATE; - - if (outlen) { - shake128_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake128_ctx_release(&s); -} - -/************************************************* - * Name: shake256 - * - * Description: SHAKE256 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE256_RATE; - uint8_t t[SHAKE256_RATE]; - shake256ctx s; - - shake256_absorb(&s, input, inlen); - shake256_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE256_RATE; - outlen -= nblocks * SHAKE256_RATE; - - if (outlen) { - shake256_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake256_ctx_release(&s); -} - -void sha3_256_inc_init(sha3_256incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_256_inc_ctx_release(sha3_256incctx *state) { - (void)state; -} - -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); -} - -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { - uint8_t t[SHA3_256_RATE]; - keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); - - sha3_256_inc_ctx_release(state); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_256 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_256_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -void sha3_384_inc_init(sha3_384incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); -} - -void sha3_384_inc_ctx_release(sha3_384incctx *state) { - (void)state; -} - -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { - uint8_t t[SHA3_384_RATE]; - keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); - - sha3_384_inc_ctx_release(state); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_384 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_384_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -void sha3_512_inc_init(sha3_512incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); -} - -void sha3_512_inc_ctx_release(sha3_512incctx *state) { - (void)state; -} - -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { - uint8_t t[SHA3_512_RATE]; - keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); - - sha3_512_inc_ctx_release(state); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_512 - * - * Description: SHA3-512 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_512_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h index c29ebd8f9d..21bc0c3f79 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h @@ -3,169 +3,12 @@ #ifndef FIPS202_H #define FIPS202_H -#include -#include +#include -#define SHAKE128_RATE 168 -#define SHAKE256_RATE 136 -#define SHA3_256_RATE 136 -#define SHA3_384_RATE 104 -#define SHA3_512_RATE 72 - -#define PQC_SHAKEINCCTX_U64WORDS 26 -#define PQC_SHAKECTX_U64WORDS 25 - -#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) -#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake128incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake128ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake256incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake256ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_256incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_384incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_512incctx; - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); -/* Free the state */ -void shake128_ctx_release(shake128ctx *state); -/* Copy the state. */ -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); - -/* Initialize incremental hashing API */ -void shake128_inc_init(shake128incctx *state); -/* Absorb more information into the XOF. - * - * Can be called multiple times. - */ -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); -/* Finalize the XOF for squeezing */ -void shake128_inc_finalize(shake128incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); -/* Copy the context of the SHAKE128 XOF */ -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); -/* Free the context of the SHAKE128 XOF */ -void shake128_inc_ctx_release(shake128incctx *state); - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); -/* Free the context held by this XOF */ -void shake256_ctx_release(shake256ctx *state); -/* Copy the context held by this XOF */ -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); - -/* Initialize incremental hashing API */ -void shake256_inc_init(shake256incctx *state); -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); -/* Prepares for squeeze phase */ -void shake256_inc_finalize(shake256incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); -/* Copy the state */ -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); -/* Free the state */ -void shake256_inc_ctx_release(shake256incctx *state); - -/* One-stop SHAKE128 call */ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* One-stop SHAKE256 call */ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_256_inc_init(sha3_256incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); -/* Copy the context */ -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_256_inc_ctx_release(sha3_256incctx *state); - -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_384_inc_init(sha3_384incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); -/* Copy the context */ -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_384_inc_ctx_release(sha3_384incctx *state); - -/* One-stop SHA3-384 shop */ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_512_inc_init(sha3_512incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); -/* Copy the context */ -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_512_inc_ctx_release(sha3_512incctx *state); - -/* One-stop SHA3-512 shop */ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); +#define shake256incctx OQS_SHA3_shake256_inc_ctx +#define shake256_inc_init OQS_SHA3_shake256_inc_init +#define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb +#define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize +#define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c new file mode 100644 index 0000000000..1fb4c0f139 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c @@ -0,0 +1,210 @@ +#include "hnf_internal.h" +#include "internal.h" + +// HNF test function +int +ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) +{ + int res = 1; + int found; + int ind = 0; + ibz_t zero; + ibz_init(&zero); + // upper triangular + for (int i = 0; i < 4; i++) { + // upper triangular + for (int j = 0; j < i; j++) { + res = res && ibz_is_zero(&((*mat)[i][j])); + } + // find first non 0 element of line + found = 0; + for (int j = i; j < 4; j++) { + if (found) { + // all values are positive, and first non-0 is the largest of that line + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + } else { + if (!ibz_is_zero(&((*mat)[i][j]))) { + found = 1; + ind = j; + // mustbe non-negative + res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + } + } + } + } + // check that first nom-zero elements ndex per column is strictly increasing + int linestart = -1; + int i = 0; + for (int j = 0; j < 4; j++) { + while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + i = i + 1; + } + if (i != 4) { + res = res && (linestart < i); + } + i = 0; + } + ibz_finalize(&zero); + return res; +} + +// Untested HNF helpers +// centered mod +void +ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b, + const ibz_t *mod) +{ + ibz_t prod, m; + ibz_vec_4_t sums; + ibz_vec_4_init(&sums); + ibz_init(&prod); + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); + ibz_mul(&prod, coeff_b, &((*vec_b)[i])); + ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + } + for (int i = 0; i < 4; i++) { + ibz_copy(&((*lc)[i]), &(sums[i])); + } + ibz_finalize(&prod); + ibz_finalize(&m); + ibz_vec_4_finalize(&sums); +} + +void +ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m; + ibz_init(&m); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + } + ibz_finalize(&m); +} + +// no need to center this, and not 0 +void +ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec, const ibz_t *mod) +{ + ibz_t m, s; + ibz_init(&m); + ibz_init(&s); + ibz_copy(&s, scalar); + ibz_copy(&m, mod); + for (int i = 0; i < 4; i++) { + ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); + ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + } + ibz_finalize(&m); + ibz_finalize(&s); +} + +// Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic +// Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 +// assumes ibz_xgcd outputs u,v which are small in absolute value (as described in the +// book) +void +ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec_4_t *generators, const ibz_t *mod) +{ + int i = 3; + assert(generator_number > 3); + int n = generator_number; + int j = n - 1; + int k = n - 1; + ibz_t b, u, v, d, q, m, coeff_1, coeff_2, r; + ibz_vec_4_t c; + ibz_vec_4_t a[generator_number]; + ibz_vec_4_t w[4]; + ibz_init(&b); + ibz_init(&d); + ibz_init(&u); + ibz_init(&v); + ibz_init(&r); + ibz_init(&m); + ibz_init(&q); + ibz_init(&coeff_1); + ibz_init(&coeff_2); + ibz_vec_4_init(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_init(&(w[h])); + ibz_vec_4_init(&(a[h])); + ibz_copy(&(a[h][0]), &(generators[h][0])); + ibz_copy(&(a[h][1]), &(generators[h][1])); + ibz_copy(&(a[h][2]), &(generators[h][2])); + ibz_copy(&(a[h][3]), &(generators[h][3])); + } + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_copy(&m, mod); + while (i != -1) { + while (j != 0) { + j = j - 1; + if (!ibz_is_zero(&(a[j][i]))) { + // assumtion that ibz_xgcd outputs u,v which are small in absolute + // value is needed here also, needs u non 0, but v can be 0 if needed + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); + ibz_div(&coeff_1, &r, &(a[k][i]), &d); + ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_neg(&coeff_2, &coeff_2); + ibz_vec_4_linear_combination_mod( + &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m + ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy + } + } + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult + if (ibz_is_zero(&(w[i][i]))) { + ibz_copy(&(w[i][i]), &m); + } + for (int h = i + 1; h < 4; h++) { + ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_neg(&q, &q); + ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); + } + ibz_div(&m, &r, &m, &d); + assert(ibz_is_zero(&r)); + if (i != 0) { + k = k - 1; + i = i - 1; + j = k; + if (ibz_is_zero(&(a[k][i]))) + ibz_copy(&(a[k][i]), &m); + + } else { + k = k - 1; + i = i - 1; + j = k; + } + } + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + } + } + + ibz_finalize(&b); + ibz_finalize(&d); + ibz_finalize(&u); + ibz_finalize(&v); + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&coeff_1); + ibz_finalize(&coeff_2); + ibz_finalize(&m); + ibz_vec_4_finalize(&c); + for (int h = 0; h < n; h++) { + if (h < 4) + ibz_vec_4_finalize(&(w[h])); + ibz_vec_4_finalize(&(a[h])); + } +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.c new file mode 100644 index 0000000000..b2db5b54c9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.c @@ -0,0 +1,182 @@ +#include "hnf_internal.h" +#include "internal.h" + +// Small helper for integers +void +ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod) +{ + ibz_t m, t; + ibz_init(&m); + ibz_init(&t); + ibz_mod(&m, x, mod); + ibz_set(&t, ibz_is_zero(&m)); + ibz_mul(&t, &t, mod); + ibz_add(res, &m, &t); + ibz_finalize(&m); + ibz_finalize(&t); +} + +// centered and rather positive then negative +void +ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod) +{ + assert(ibz_cmp(mod, &ibz_const_zero) > 0); + ibz_t tmp, d, t; + ibz_init(&tmp); + ibz_init(&d); + ibz_init(&t); + ibz_div_floor(&d, &tmp, mod, &ibz_const_two); + ibz_mod_not_zero(&tmp, a, mod); + ibz_set(&t, ibz_cmp(&tmp, &d) > 0); + ibz_mul(&t, &t, mod); + ibz_sub(remainder, &tmp, &t); + ibz_finalize(&tmp); + ibz_finalize(&d); + ibz_finalize(&t); +} + +// if c, res = x, else res = y +void +ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c) +{ + ibz_t s, t, r; + ibz_init(&r); + ibz_init(&s); + ibz_init(&t); + ibz_set(&s, c != 0); + ibz_sub(&t, &ibz_const_one, &s); + ibz_mul(&r, &s, x); + ibz_mul(res, &t, y); + ibz_add(res, &r, res); + ibz_finalize(&r); + ibz_finalize(&s); + ibz_finalize(&t); +} + +// mpz_gcdext specification specifies unique outputs used here +void +ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const ibz_t *y) +{ + if (ibz_is_zero(x) & ibz_is_zero(y)) { + ibz_set(d, 1); + ibz_set(u, 1); + ibz_set(v, 0); + return; + } + ibz_t q, r, x1, y1; + ibz_init(&q); + ibz_init(&r); + ibz_init(&x1); + ibz_init(&y1); + ibz_copy(&x1, x); + ibz_copy(&y1, y); + + // xgcd + ibz_xgcd(d, u, v, &x1, &y1); + + // make sure u!=0 (v can be 0 if needed) + // following GMP specification, u == 0 implies y|x + if (ibz_is_zero(u)) { + if (!ibz_is_zero(&x1)) { + if (ibz_is_zero(&y1)) { + ibz_set(&y1, 1); + } + ibz_div(&q, &r, &x1, &y1); + assert(ibz_is_zero(&r)); + ibz_sub(v, v, &q); + } + ibz_set(u, 1); + } + if (!ibz_is_zero(&x1)) { + // Make sure ux > 0 (and as small as possible) + assert(ibz_cmp(d, &ibz_const_zero) > 0); + ibz_mul(&r, &x1, &y1); + int neg = ibz_cmp(&r, &ibz_const_zero) < 0; + ibz_mul(&q, &x1, u); + while (ibz_cmp(&q, &ibz_const_zero) <= 0) { + ibz_div(&q, &r, &y1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_add(u, u, &q); + ibz_div(&q, &r, &x1, d); + assert(ibz_is_zero(&r)); + if (neg) { + ibz_neg(&q, &q); + } + ibz_sub(v, v, &q); + + ibz_mul(&q, &x1, u); + } + } + +#ifndef NDEBUG + int res = 0; + ibz_t sum, prod, test, cmp; + ibz_init(&sum); + ibz_init(&prod); + ibz_init(&cmp); + ibz_init(&test); + // sign correct + res = res | !(ibz_cmp(d, &ibz_const_zero) >= 0); + if (ibz_is_zero(&x1) && ibz_is_zero(&y1)) { + res = res | !(ibz_is_zero(v) && ibz_is_one(u) && ibz_is_one(d)); + } else { + if (!ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &x1, u); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) > 0); + ibz_mul(&sum, &sum, &y1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) <= 0); + + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + // Small enough + ibz_mul(&prod, &y1, v); + res = res | !(ibz_cmp(&prod, &ibz_const_zero) <= 0); + ibz_mul(&sum, &sum, &x1); + ibz_abs(&sum, &sum); + res = res | !(ibz_cmp(&prod, &sum) < 0); + } else { + // GCD divides x + ibz_div(&sum, &prod, &x1, d); + res = res | !ibz_is_zero(&prod); + // GCD divides y + ibz_div(&sum, &prod, &y1, d); + res = res | !ibz_is_zero(&prod); + if (ibz_is_zero(&x1) && !ibz_is_zero(&y1)) { + ibz_abs(&prod, v); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_one(u)); + } else { + ibz_abs(&prod, u); + res = res | !(ibz_is_one(&prod)); + res = res | !(ibz_is_zero(v)); + } + } + + // Bezout coeffs + ibz_mul(&sum, &x1, u); + ibz_mul(&prod, &y1, v); + ibz_add(&sum, &sum, &prod); + res = res | !(ibz_cmp(&sum, d) == 0); + } + assert(!res); + ibz_finalize(&sum); + ibz_finalize(&prod); + ibz_finalize(&cmp); + ibz_finalize(&test); + +#endif + + ibz_finalize(&x1); + ibz_finalize(&y1); + ibz_finalize(&q); + ibz_finalize(&r); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.h new file mode 100644 index 0000000000..5ecc871bb4 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.h @@ -0,0 +1,94 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for functions internal to the HNF computation and its tests + */ + +#ifndef QUAT_HNF_HELPERS_H +#define QUAT_HNF_HELPERS_H + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup quat_hnf_helpers Internal functions for the HNF computation and tests + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_helpers_ibz Internal renamed GMP functions for the HNF computation + */ + +/** + * @brief GCD and Bézout coefficients u, v such that ua + bv = gcd + * + * @param gcd Output: Set to the gcd of a and b + * @param u Output: integer such that ua+bv=gcd + * @param v Output: Integer such that ua+bv=gcd + * @param a + * @param b + */ +void ibz_xgcd(ibz_t *gcd, + ibz_t *u, + ibz_t *v, + const ibz_t *a, + const ibz_t *b); // integers, dim4, test/integers, test/dim4 + +/** @} + */ + +/** @internal + * @ingroup quat_hnf_helpers + * @defgroup quat_hnf_integer_helpers Integer functions internal to the HNF computation and tests + * @{ + */ + +/** @brief x mod mod, with x in [1,mod] + * + * @param res Output: res = x [mod] and 0 0 + */ +void ibz_mod_not_zero(ibz_t *res, const ibz_t *x, const ibz_t *mod); + +/** @brief x mod mod, with x in ]-mod/2,mod/2] + * + * Centered and rather positive then negative. + * + * @param remainder Output: remainder = x [mod] and -mod/2 0 + */ +void ibz_centered_mod(ibz_t *remainder, const ibz_t *a, const ibz_t *mod); + +/** @brief if c then x else y + * + * @param res Output: if c, res = x, else res = y + * @param x + * @param y + * @param c condition: must be 0 or 1 + */ +void ibz_conditional_assign(ibz_t *res, const ibz_t *x, const ibz_t *y, int c); + +/** @brief d = gcd(x,y)>0 and d = ux+vy and u!= 0 and d>0 and u, v of small absolute value, u not 0 + * + * More precisely: + * If x and y are both non 0, -|xy|/d +#else +#include +#endif + +void +ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) +{ + mpz_gcdext(*gcd, *u, *v, *a, *b); +} \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ideal.c new file mode 100644 index 0000000000..9cf863a104 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ideal.c @@ -0,0 +1,323 @@ +#include +#include +#include "internal.h" + +// assumes parent order and lattice correctly set, computes and sets the norm +void +quat_lideal_norm(quat_left_ideal_t *lideal) +{ + quat_lattice_index(&(lideal->norm), &(lideal->lattice), (lideal->parent_order)); + int ok UNUSED = ibz_sqrt(&(lideal->norm), &(lideal->norm)); + assert(ok); +} + +// assumes parent order and lattice correctly set, recomputes and verifies its norm +static int +quat_lideal_norm_verify(const quat_left_ideal_t *lideal) +{ + int res; + ibz_t index; + ibz_init(&index); + quat_lattice_index(&index, &(lideal->lattice), (lideal->parent_order)); + ibz_sqrt(&index, &index); + res = (ibz_cmp(&(lideal->norm), &index) == 0); + ibz_finalize(&index); + return (res); +} + +void +quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) +{ + copy->parent_order = copied->parent_order; + ibz_copy(©->norm, &copied->norm); + ibz_copy(©->lattice.denom, &copied->lattice.denom); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + } + } +} + +void +quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(quat_lattice_contains(NULL, order, x)); + ibz_t norm_n, norm_d; + ibz_init(&norm_n); + ibz_init(&norm_d); + + // Multiply order on the right by x + quat_lattice_alg_elem_mul(&(lideal->lattice), order, x, alg); + + // Reduce denominator. This conserves HNF + quat_lattice_reduce_denom(&lideal->lattice, &lideal->lattice); + + // Compute norm and check it's integral + quat_alg_norm(&norm_n, &norm_d, x, alg); + assert(ibz_is_one(&norm_d)); + ibz_copy(&lideal->norm, &norm_n); + + // Set order + lideal->parent_order = order; + ibz_finalize(&norm_n); + ibz_finalize(&norm_d); +} + +void +quat_lideal_create(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const ibz_t *N, + const quat_lattice_t *order, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal(order, alg)); + assert(!quat_alg_elem_is_zero(x)); + + quat_lattice_t ON; + quat_lattice_init(&ON); + + // Compute ideal generated by x + quat_lideal_create_principal(lideal, x, order, alg); + + // Compute ideal generated by N (without reducing denominator) + ibz_mat_4x4_scalar_mul(&ON.basis, N, &order->basis); + ibz_copy(&ON.denom, &order->denom); + + // Add lattices (reduces denominators) + quat_lattice_add(&lideal->lattice, &lideal->lattice, &ON); + // Set order + lideal->parent_order = order; + // Compute norm + quat_lideal_norm(lideal); + + quat_lattice_finalize(&ON); +} + +int +quat_lideal_generator(quat_alg_elem_t *gen, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + ibz_t norm_int, norm_n, gcd, r, q, norm_denom; + ibz_vec_4_t vec; + ibz_vec_4_init(&vec); + ibz_init(&norm_denom); + ibz_init(&norm_int); + ibz_init(&norm_n); + ibz_init(&r); + ibz_init(&q); + ibz_init(&gcd); + int a, b, c, d; + int found = 0; + int int_norm = 0; + while (1) { + int_norm++; + for (a = -int_norm; a <= int_norm; a++) { + for (b = -int_norm + abs(a); b <= int_norm - abs(a); b++) { + for (c = -int_norm + abs(a) + abs(b); c <= int_norm - abs(a) - abs(b); c++) { + d = int_norm - abs(a) - abs(b) - abs(c); + ibz_vec_4_set(&vec, a, b, c, d); + ibz_vec_4_content(&gcd, &vec); + if (ibz_is_one(&gcd)) { + ibz_mat_4x4_eval(&(gen->coord), &(lideal->lattice.basis), &vec); + ibz_copy(&(gen->denom), &(lideal->lattice.denom)); + quat_alg_norm(&norm_int, &norm_denom, gen, alg); + assert(ibz_is_one(&norm_denom)); + ibz_div(&q, &r, &norm_int, &(lideal->norm)); + assert(ibz_is_zero(&r)); + ibz_gcd(&gcd, &(lideal->norm), &q); + found = (0 == ibz_cmp(&gcd, &ibz_const_one)); + if (found) + goto fin; + } + } + } + } + } +fin:; + ibz_finalize(&r); + ibz_finalize(&q); + ibz_finalize(&norm_denom); + ibz_finalize(&norm_int); + ibz_finalize(&norm_n); + ibz_vec_4_finalize(&vec); + ibz_finalize(&gcd); + return (found); +} + +void +quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t norm, norm_d; + ibz_init(&norm); + ibz_init(&norm_d); + quat_lattice_alg_elem_mul(&(product->lattice), &(lideal->lattice), alpha, alg); + product->parent_order = lideal->parent_order; + quat_alg_norm(&norm, &norm_d, alpha, alg); + ibz_mul(&(product->norm), &(lideal->norm), &norm); + assert(ibz_divides(&(product->norm), &norm_d)); + ibz_div(&(product->norm), &norm, &(product->norm), &norm_d); + assert(quat_lideal_norm_verify(lideal)); + ibz_finalize(&norm_d); + ibz_finalize(&norm); +} + +void +quat_lideal_add(quat_left_ideal_t *sum, const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_add(&sum->lattice, &I1->lattice, &I2->lattice); + sum->parent_order = I1->parent_order; + quat_lideal_norm(sum); +} + +void +quat_lideal_inter(quat_left_ideal_t *inter, + const quat_left_ideal_t *I1, + const quat_left_ideal_t *I2, + const quat_alg_t *alg) +{ + assert(I1->parent_order == I2->parent_order); + assert(quat_order_is_maximal((I2->parent_order), alg)); + quat_lattice_intersect(&inter->lattice, &I1->lattice, &I2->lattice); + inter->parent_order = I1->parent_order; + quat_lideal_norm(inter); +} + +int +quat_lideal_equals(const quat_left_ideal_t *I1, const quat_left_ideal_t *I2, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((I2->parent_order), alg)); + assert(quat_order_is_maximal((I1->parent_order), alg)); + return (I1->parent_order == I2->parent_order) & (ibz_cmp(&I1->norm, &I2->norm) == 0) & + quat_lattice_equal(&I1->lattice, &I2->lattice); +} + +void +quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lattice_conjugate_without_hnf(inv, &(lideal->lattice)); + ibz_mul(&(inv->denom), &(inv->denom), &(lideal->norm)); +} + +// following the implementation of ideal isomorphisms in the code of LearningToSQI's sage +// implementation of SQIsign +void +quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal1->parent_order), alg)); + assert(quat_order_is_maximal((lideal2->parent_order), alg)); + assert(lideal1->parent_order == lideal2->parent_order); + quat_lattice_t inv; + quat_lattice_init(&inv); + quat_lideal_inverse_lattice_without_hnf(&inv, lideal1, alg); + quat_lattice_mul(trans, &inv, &(lideal2->lattice), alg); + quat_lattice_finalize(&inv); +} + +void +quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + quat_lideal_right_transporter(order, lideal, lideal, alg); +} + +void +quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg) +{ + quat_lattice_gram(G, &(lideal->lattice), alg); + + // divide by norm · denominator² + ibz_t divisor, rmd; + ibz_init(&divisor); + ibz_init(&rmd); + + ibz_mul(&divisor, &(lideal->lattice.denom), &(lideal->lattice.denom)); + ibz_mul(&divisor, &divisor, &(lideal->norm)); + + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + assert(ibz_is_zero(&rmd)); + } + } + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i - 1; j++) { + ibz_copy(&(*G)[j][i], &(*G)[i][j]); + } + } + + ibz_finalize(&rmd); + ibz_finalize(&divisor); +} + +void +quat_lideal_conjugate_without_hnf(quat_left_ideal_t *conj, + quat_lattice_t *new_parent_order, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + quat_lideal_right_order(new_parent_order, lideal, alg); + quat_lattice_conjugate_without_hnf(&(conj->lattice), &(lideal->lattice)); + conj->parent_order = new_parent_order; + ibz_copy(&(conj->norm), &(lideal->norm)); +} + +int +quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg_t *alg) +{ + int ok = 0; + ibz_t det, sqr, div; + ibz_mat_4x4_t transposed, norm, prod; + ibz_init(&det); + ibz_init(&sqr); + ibz_init(&div); + ibz_mat_4x4_init(&transposed); + ibz_mat_4x4_init(&norm); + ibz_mat_4x4_init(&prod); + ibz_mat_4x4_transpose(&transposed, &(order->basis)); + // multiply gram matrix by 2 because of reduced trace + ibz_mat_4x4_identity(&norm); + ibz_copy(&(norm[2][2]), &(alg->p)); + ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); + ibz_mat_4x4_mul(&prod, &transposed, &norm); + ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &prod); + ibz_mul(&div, &(order->denom), &(order->denom)); + ibz_mul(&div, &div, &div); + ibz_mul(&div, &div, &div); + ibz_div(&sqr, &div, &det, &div); + ok = ibz_is_zero(&div); + ok = ok & ibz_sqrt(disc, &sqr); + ibz_finalize(&det); + ibz_finalize(&div); + ibz_finalize(&sqr); + ibz_mat_4x4_finalize(&transposed); + ibz_mat_4x4_finalize(&norm); + ibz_mat_4x4_finalize(&prod); + return (ok); +} + +int +quat_order_is_maximal(const quat_lattice_t *order, const quat_alg_t *alg) +{ + int res; + ibz_t disc; + ibz_init(&disc); + quat_order_discriminant(&disc, order, alg); + res = (ibz_cmp(&disc, &(alg->p)) == 0); + ibz_finalize(&disc); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.c new file mode 100644 index 0000000000..b0462dc8b5 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.c @@ -0,0 +1,791 @@ +#include "intbig_internal.h" +#include +#include +#include +#include +#include +#include + +// #define DEBUG_VERBOSE + +#ifdef DEBUG_VERBOSE +#define DEBUG_STR_PRINTF(x) printf("%s\n", (x)); + +static void +DEBUG_STR_FUN_INT_MP(const char *op, int arg1, const ibz_t *arg2) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s\n", op, arg1, arg2_str); +} + +static void +DEBUG_STR_FUN_3(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + printf("%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_MP2_INT(const char *op, const ibz_t *arg1, const ibz_t *arg2, int arg3) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%s,%s,%x\n", op, arg1_str, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_INT_MP2(const char *op, int arg1, const ibz_t *arg2, const ibz_t *arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + if (arg1 >= 0) + printf("%s,%x,%s,%s\n", op, arg1, arg2_str, arg3_str); + else + printf("%s,-%x,%s,%s\n", op, -arg1, arg2_str, arg3_str); +} + +static void +DEBUG_STR_FUN_INT_MP_INT(const char *op, int arg1, const ibz_t *arg2, int arg3) +{ + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + printf("%s,%x,%s,%x\n", op, arg1, arg2_str, arg3); +} + +static void +DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_t *arg3, const ibz_t *arg4) +{ + int arg1_size = ibz_size_in_base(arg1, 16); + char arg1_str[arg1_size + 2]; + ibz_convert_to_str(arg1, arg1_str, 16); + + int arg2_size = ibz_size_in_base(arg2, 16); + char arg2_str[arg2_size + 2]; + ibz_convert_to_str(arg2, arg2_str, 16); + + int arg3_size = ibz_size_in_base(arg3, 16); + char arg3_str[arg3_size + 2]; + ibz_convert_to_str(arg3, arg3_str, 16); + + int arg4_size = ibz_size_in_base(arg4, 16); + char arg4_str[arg4_size + 2]; + ibz_convert_to_str(arg4, arg4_str, 16); + + printf("%s,%s,%s,%s,%s\n", op, arg1_str, arg2_str, arg3_str, arg4_str); +} +#else +#define DEBUG_STR_PRINTF(x) +#define DEBUG_STR_FUN_INT_MP(op, arg1, arg2) +#define DEBUG_STR_FUN_3(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP2(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_INT_MP_INT(op, arg1, arg2, arg3) +#define DEBUG_STR_FUN_4(op, arg1, arg2, arg3, arg4) +#endif + +/** @defgroup ibz_t Constants + * @{ + */ + +const __mpz_struct ibz_const_zero[1] = { + { + ._mp_alloc = 0, + ._mp_size = 0, + ._mp_d = (mp_limb_t[]){ 0 }, + } +}; + +const __mpz_struct ibz_const_one[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 1 }, + } +}; + +const __mpz_struct ibz_const_two[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 2 }, + } +}; + +const __mpz_struct ibz_const_three[1] = { + { + ._mp_alloc = 0, + ._mp_size = 1, + ._mp_d = (mp_limb_t[]){ 3 }, + } +}; + +void +ibz_init(ibz_t *x) +{ + mpz_init(*x); +} + +void +ibz_finalize(ibz_t *x) +{ + mpz_clear(*x); +} + +void +ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_add(*sum, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_sub(*diff, *a, *b); + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_mul(*prod, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_neg(ibz_t *neg, const ibz_t *a) +{ + mpz_neg(*neg, *a); +} + +void +ibz_abs(ibz_t *abs, const ibz_t *a) +{ + mpz_abs(*abs, *a); +} + +void +ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp, b_cp; + ibz_init(&a_cp); + ibz_init(&b_cp); + ibz_copy(&a_cp, a); + ibz_copy(&b_cp, b); +#endif + mpz_tdiv_qr(*quotient, *remainder, *a, *b); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); + ibz_finalize(&a_cp); + ibz_finalize(&b_cp); +#endif +} + +void +ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) +{ +#ifdef DEBUG_VERBOSE + ibz_t a_cp; + ibz_init(&a_cp); + ibz_copy(&a_cp, a); +#endif + mpz_tdiv_q_2exp(*quotient, *a, exp); +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); + ibz_finalize(&a_cp); +#endif +} + +void +ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) +{ + mpz_fdiv_qr(*q, *r, *n, *d); +} + +void +ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) +{ + mpz_mod(*r, *a, *b); +} + +unsigned long int +ibz_mod_ui(const mpz_t *n, unsigned long int d) +{ + return mpz_fdiv_ui(*n, d); +} + +int +ibz_divides(const ibz_t *a, const ibz_t *b) +{ + return mpz_divisible_p(*a, *b); +} + +void +ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) +{ + mpz_pow_ui(*pow, *x, e); +} + +void +ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) +{ + mpz_powm(*pow, *x, *e, *m); + DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); +} + +int +ibz_two_adic(ibz_t *pow) +{ + return mpz_scan1(*pow, 0); +} + +int +ibz_cmp(const ibz_t *a, const ibz_t *b) +{ + int ret = mpz_cmp(*a, *b); + DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); + return ret; +} + +int +ibz_is_zero(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); + return ret; +} + +int +ibz_is_one(const ibz_t *x) +{ + int ret = !mpz_cmp_ui(*x, 1); + DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); + return ret; +} + +int +ibz_cmp_int32(const ibz_t *x, int32_t y) +{ + int ret = mpz_cmp_si(*x, (signed long int)y); + DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); + return ret; +} + +int +ibz_is_even(const ibz_t *x) +{ + int ret = !mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); + return ret; +} + +int +ibz_is_odd(const ibz_t *x) +{ + int ret = mpz_tstbit(*x, 0); + DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); + return ret; +} + +void +ibz_set(ibz_t *i, int32_t x) +{ + mpz_set_si(*i, x); +} + +int +ibz_convert_to_str(const ibz_t *i, char *str, int base) +{ + if (!str || (base != 10 && base != 16)) + return 0; + + mpz_get_str(str, base, *i); + + return 1; +} + +void +ibz_print(const ibz_t *num, int base) +{ + assert(base == 10 || base == 16); + + int num_size = ibz_size_in_base(num, base); + char num_str[num_size + 2]; + ibz_convert_to_str(num, num_str, base); + printf("%s", num_str); +} + +int +ibz_set_from_str(ibz_t *i, const char *str, int base) +{ + return (1 + mpz_set_str(*i, str, base)); +} + +void +ibz_copy(ibz_t *target, const ibz_t *value) +{ + mpz_set(*target, *value); +} + +void +ibz_swap(ibz_t *a, ibz_t *b) +{ + mpz_swap(*a, *b); +} + +int32_t +ibz_get(const ibz_t *i) +{ +#if LONG_MAX == INT32_MAX + return (int32_t)mpz_get_si(*i); +#elif LONG_MAX > INT32_MAX + // Extracts the sign bit and the 31 least significant bits + signed long int t = mpz_get_si(*i); + return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); +#else +#error Unsupported configuration: LONG_MAX must be >= INT32_MAX +#endif +} + +int +ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) +{ + int randret; + int ret = 1; + mpz_t tmp; + mpz_t bmina; + mpz_init(bmina); + mpz_sub(bmina, *b, *a); + + if (mpz_sgn(bmina) == 0) { + mpz_set(*rand, *a); + mpz_clear(bmina); + return 1; + } + + size_t len_bits = mpz_sizeinbase(bmina, 2); + size_t len_bytes = (len_bits + 7) / 8; + size_t sizeof_limb = sizeof(mp_limb_t); + size_t sizeof_limb_bits = sizeof_limb * 8; + size_t len_limbs = (len_bytes + sizeof_limb - 1) / sizeof_limb; + + mp_limb_t mask = ((mp_limb_t)-1) >> (sizeof_limb_bits - len_bits) % sizeof_limb_bits; + mp_limb_t r[len_limbs]; + +#ifndef NDEBUG + { + for (size_t i = 0; i < len_limbs; ++i) + r[i] = (mp_limb_t)-1; + r[len_limbs - 1] = mask; + mpz_t check; + mpz_roinit_n(check, r, len_limbs); + assert(mpz_cmp(check, bmina) >= 0); // max sampled value >= b - a + mpz_t bmina2; + mpz_init(bmina2); + mpz_add(bmina2, bmina, bmina); + assert(mpz_cmp(check, bmina2) < 0); // max sampled value < 2 * (b - a) + mpz_clear(bmina2); + } +#endif + + do { + randret = randombytes((unsigned char *)r, len_bytes); + if (randret != 0) { + ret = 0; + goto err; + } +#ifdef TARGET_BIG_ENDIAN + for (size_t i = 0; i < len_limbs; ++i) + r[i] = BSWAP_DIGIT(r[i]); +#endif + r[len_limbs - 1] &= mask; + mpz_roinit_n(tmp, r, len_limbs); + if (mpz_cmp(tmp, bmina) <= 0) + break; + } while (1); + + mpz_add(*rand, tmp, *a); +err: + mpz_clear(bmina); + return ret; +} + +int +ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b) +{ + uint32_t diff, mask; + int32_t rand32; + + if (!(a >= 0 && b >= 0 && b > a)) { + printf("a = %d b = %d\n", a, b); + } + assert(a >= 0 && b >= 0 && b > a); + + diff = b - a; + + // Create a mask with 1 + ceil(log2(diff)) least significant bits set +#if (defined(__GNUC__) || defined(__clang__)) && INT_MAX == INT32_MAX + mask = (1 << (32 - __builtin_clz((uint32_t)diff))) - 1; +#else + uint32_t diff2 = diff, tmp; + + mask = (diff2 > 0xFFFF) << 4; + diff2 >>= mask; + + tmp = (diff2 > 0xFF) << 3; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0xF) << 2; + diff2 >>= tmp; + mask |= tmp; + + tmp = (diff2 > 0x3) << 1; + diff2 >>= tmp; + mask |= tmp; + + mask |= diff2 >> 1; + + mask = (1 << (mask + 1)) - 1; +#endif + + assert(mask >= diff && mask < 2 * diff); + + // Rejection sampling + do { + randombytes((unsigned char *)&rand32, sizeof(rand32)); + +#ifdef TARGET_BIG_ENDIAN + rand32 = BSWAP32(rand32); +#endif + + rand32 &= mask; + } while (rand32 > (int32_t)diff); + + rand32 += a; + ibz_set(rand, rand32); + + return 1; +} + +int +ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) +{ + int ret = 1; + mpz_t m_big; + + // m_big = 2 * m + mpz_init_set_si(m_big, m); + mpz_add(m_big, m_big, m_big); + + // Sample in [0, 2*m] + ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); + + // Adjust to range [-m, m] + mpz_sub_ui(*rand, *rand, m); + + mpz_clear(m_big); + + return ret; +} + +int +ibz_rand_interval_bits(ibz_t *rand, uint32_t m) +{ + int ret = 1; + mpz_t tmp; + mpz_t low; + mpz_init_set_ui(tmp, 1); + mpz_mul_2exp(tmp, tmp, m); + mpz_init(low); + mpz_neg(low, tmp); + ret = ibz_rand_interval(rand, &low, &tmp); + mpz_clear(tmp); + mpz_clear(low); + if (ret != 1) + goto err; + mpz_sub_ui(*rand, *rand, (unsigned long int)m); + return ret; +err: + mpz_clear(tmp); + mpz_clear(low); + return ret; +} + +int +ibz_bitsize(const ibz_t *a) +{ + return (int)mpz_sizeinbase(*a, 2); +} + +int +ibz_size_in_base(const ibz_t *a, int base) +{ + return (int)mpz_sizeinbase(*a, base); +} + +void +ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) +{ + mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); +} + +void +ibz_to_digits(digit_t *target, const ibz_t *ibz) +{ + // From the GMP documentation: + // "If op is zero then the count returned will be zero and nothing written to rop." + // The next line ensures zero is written to the first limb of target if ibz is zero; + // target is then overwritten by the actual value if it is not. + target[0] = 0; + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); +} + +int +ibz_probab_prime(const ibz_t *n, int reps) +{ + int ret = mpz_probab_prime_p(*n, reps); + DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); + return ret; +} + +void +ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) +{ + mpz_gcd(*gcd, *a, *b); +} + +int +ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) +{ + return (mpz_invert(*inv, *a, *mod) ? 1 : 0); +} + +int +ibz_legendre(const ibz_t *a, const ibz_t *p) +{ + return mpz_legendre(*a, *p); +} + +int +ibz_sqrt(ibz_t *sqrt, const ibz_t *a) +{ + if (mpz_perfect_square_p(*a)) { + mpz_sqrt(*sqrt, *a); + return 1; + } else { + return 0; + } +} + +void +ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) +{ + mpz_sqrt(*sqrt, *a); +} + +int +ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) +{ +#ifndef NDEBUG + assert(ibz_probab_prime(p, 100)); +#endif + // Case a = 0 + { + ibz_t test; + ibz_init(&test); + ibz_mod(&test, a, p); + if (ibz_is_zero(&test)) { + ibz_set(sqrt, 0); + } + ibz_finalize(&test); + } +#ifdef DEBUG_VERBOSE + ibz_t a_cp, p_cp; + ibz_init(&a_cp); + ibz_init(&p_cp); + ibz_copy(&a_cp, a); + ibz_copy(&p_cp, p); +#endif + + mpz_t amod, tmp, exp, a4, a2, q, z, qnr, x, y, b, pm1; + mpz_init(amod); + mpz_init(tmp); + mpz_init(exp); + mpz_init(a4); + mpz_init(a2); + mpz_init(q); + mpz_init(z); + mpz_init(qnr); + mpz_init(x); + mpz_init(y); + mpz_init(b); + mpz_init(pm1); + + int ret = 1; + + mpz_mod(amod, *a, *p); + if (mpz_cmp_ui(amod, 0) < 0) { + mpz_add(amod, *p, amod); + } + + if (mpz_legendre(amod, *p) != 1) { + ret = 0; + goto end; + } + + mpz_sub_ui(pm1, *p, 1); + + if (mpz_mod_ui(tmp, *p, 4) == 3) { + // p % 4 == 3 + mpz_add_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(*sqrt, amod, tmp, *p); + } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + // p % 8 == 5 + mpz_sub_ui(tmp, *p, 1); + mpz_fdiv_q_2exp(tmp, tmp, 2); + mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + if (!mpz_cmp_ui(tmp, 1)) { + mpz_add_ui(tmp, *p, 3); + mpz_fdiv_q_2exp(tmp, tmp, 3); + mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + } else { + mpz_sub_ui(tmp, *p, 5); + mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 + mpz_mul_2exp(a4, amod, 2); // 4*a + mpz_powm(tmp, a4, tmp, *p); + + mpz_mul_2exp(a2, amod, 1); + mpz_mul(tmp, a2, tmp); + mpz_mod(*sqrt, tmp, *p); + } + } else { + // p % 8 == 1 -> Shanks-Tonelli + int e = 0; + mpz_sub_ui(q, *p, 1); + while (mpz_tstbit(q, e) == 0) + e++; + mpz_fdiv_q_2exp(q, q, e); + + // 1. find generator - non-quadratic residue + mpz_set_ui(qnr, 2); + while (mpz_legendre(qnr, *p) != -1) + mpz_add_ui(qnr, qnr, 1); + mpz_powm(z, qnr, q, *p); + + // 2. Initialize + mpz_set(y, z); + mpz_powm(y, amod, q, *p); // y = a^q mod p + + mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 + mpz_fdiv_q_2exp(tmp, tmp, 1); + + mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + + mpz_set_ui(exp, 1); + mpz_mul_2exp(exp, exp, e - 2); + + for (int i = 0; i < e; ++i) { + mpz_powm(b, y, exp, *p); + + if (!mpz_cmp(b, pm1)) { + mpz_mul(x, x, z); + mpz_mod(x, x, *p); + + mpz_mul(y, y, z); + mpz_mul(y, y, z); + mpz_mod(y, y, *p); + } + + mpz_powm_ui(z, z, 2, *p); + mpz_fdiv_q_2exp(exp, exp, 1); + } + + mpz_set(*sqrt, x); + } + +#ifdef DEBUG_VERBOSE + DEBUG_STR_FUN_3("ibz_sqrt_mod_p", sqrt, &a_cp, &p_cp); + ibz_finalize(&a_cp); + ibz_finalize(&p_cp); +#endif + +end: + mpz_clear(amod); + mpz_clear(tmp); + mpz_clear(exp); + mpz_clear(a4); + mpz_clear(a2); + mpz_clear(q); + mpz_clear(z); + mpz_clear(qnr); + mpz_clear(x); + mpz_clear(y); + mpz_clear(b); + mpz_clear(pm1); + + return ret; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig_internal.h new file mode 100644 index 0000000000..de4762a6d3 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig_internal.h @@ -0,0 +1,123 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for big integer functions only used in quaternion functions + */ + +#ifndef INTBIG_INTERNAL_H +#define INTBIG_INTERNAL_H + +#include "intbig.h" + +/** @internal + * @ingroup quat_helpers + * @defgroup ibz_helper Internal integer functions (gmp-based) + * @{ + */ + +/********************************************************************/ + +/** @brief Euclidean division of a by b + * + * Computes quotient, remainder so that remainder+quotient*b = a where 0<=|remainder|<|b| + * The quotient is rounded towards minus infinity. + */ +void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d); + +/** @brief generate random value in [a, b] + * assumed that a >= 0, b >= 0 and a < b + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_i(ibz_t *rand, int32_t a, int32_t b); + +/** @brief generate random value in [-2^m, 2^m] + * assumed that m > 0 and bitlength of m < 32 bit + * @returns 1 on success, 0 on failiure + */ +int ibz_rand_interval_bits(ibz_t *rand, uint32_t m); + +/** @brief set str to a string containing the representation of i in base + * + * Base should be 10 or 16 + * + * str should be an array of length enough to store the representation of in + * in base, which can be obtained by ibz_sizeinbase(i, base) + 2, where the 2 + * is for the sign and the null terminator + * + * Case for base 16 does not matter + * + * @returns 1 if the integer could be converted to a string, 0 otherwise + */ +int ibz_convert_to_str(const ibz_t *i, char *str, int base); + +/** @brief print num in base to stdout + * + * Base should be 10 or 16 + */ +void ibz_print(const ibz_t *num, int base); + +/** @brief set i to integer contained in string when read as number in base + * + * Base should be 10 or 16, and the number should be written without ponctuation or whitespaces + * + * Case for base 16 does not matter + * + * @returns 1 if the string could be converted to an integer, 0 otherwise + */ +int ibz_set_from_str(ibz_t *i, const char *str, int base); + +/** + * @brief Probabilistic primality test + * + * @param n The number to test + * @param reps Number of Miller-Rabin repetitions. The more, the slower and the less likely are + * false positives + * @return 1 if probably prime, 0 if certainly not prime, 2 if certainly prime + * + * Using GMP's implementation: + * + * From GMP's documentation: "This function performs some trial divisions, a Baillie-PSW probable + * prime test, then reps-24 Miller-Rabin probabilistic primality tests." + */ +int ibz_probab_prime(const ibz_t *n, int reps); + +/** + * @brief Square root modulo a prime + * + * @returns 1 if square root of a mod p exists and was computed, 0 otherwise + * @param sqrt Output: Set to a square root of a mod p if any exist + * @param a number of which a square root mod p is searched + * @param p assumed prime + */ +int ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p); + +/** + * @brief Integer square root of a perfect square + * + * @returns 1 if an integer square root of a exists and was computed, 0 otherwise + * @param sqrt Output: Set to a integer square root of a if any exist + * @param a number of which an integer square root is searched + */ +int ibz_sqrt(ibz_t *sqrt, const ibz_t *a); + +/** + * @brief Legendre symbol of a mod p + * + * @returns Legendre symbol of a mod p + * @param a + * @param p assumed prime + * + * Uses GMP's implementation + * + * If output is 1, a is a square mod p, if -1, not. If 0, it is divisible by p + */ +int ibz_legendre(const ibz_t *a, const ibz_t *p); + +/** @} + */ + +// end of ibz_all +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/integers.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/integers.c new file mode 100644 index 0000000000..ec7cda05eb --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/integers.c @@ -0,0 +1,116 @@ +#include +#include "internal.h" +#include +#include +#include + +// Random prime generation for tests +int +ibz_generate_random_prime(ibz_t *p, int is3mod4, int bitsize, int probability_test_iterations) +{ + assert(bitsize != 0); + int found = 0; + ibz_t two_pow, two_powp; + + ibz_init(&two_pow); + ibz_init(&two_powp); + ibz_pow(&two_pow, &ibz_const_two, (bitsize - 1) - (0 != is3mod4)); + ibz_pow(&two_powp, &ibz_const_two, bitsize - (0 != is3mod4)); + + int cnt = 0; + while (!found) { + cnt++; + if (cnt % 100000 == 0) { + printf("Random prime generation is still running after %d attempts, this is not " + "normal! The expected number of attempts is %d \n", + cnt, + bitsize); + } + ibz_rand_interval(p, &two_pow, &two_powp); + ibz_add(p, p, p); + if (is3mod4) { + ibz_add(p, p, p); + ibz_add(p, &ibz_const_two, p); + } + ibz_add(p, &ibz_const_one, p); + + found = ibz_probab_prime(p, probability_test_iterations); + } + ibz_finalize(&two_pow); + ibz_finalize(&two_powp); + return found; +} + +// solves x^2 + n y^2 == p for positive integers x, y +// assumes that p is prime and -n mod p is a square +int +ibz_cornacchia_prime(ibz_t *x, ibz_t *y, const ibz_t *n, const ibz_t *p) +{ + ibz_t r0, r1, r2, a, prod; + ibz_init(&r0); + ibz_init(&r1); + ibz_init(&r2); + ibz_init(&a); + ibz_init(&prod); + + int res = 0; + + // manage case p = 2 separately + if (!ibz_cmp(p, &ibz_const_two)) { + if (ibz_is_one(n)) { + ibz_set(x, 1); + ibz_set(y, 1); + res = 1; + } + goto done; + } + // manage case p = n separately + if (!ibz_cmp(p, n)) { + ibz_set(x, 0); + ibz_set(y, 1); + res = 1; + goto done; + } + + // test coprimality (should always be ok in our cases) + ibz_gcd(&r2, p, n); + if (!ibz_is_one(&r2)) + goto done; + + // get sqrt of -n mod p + ibz_neg(&r2, n); + if (!ibz_sqrt_mod_p(&r2, &r2, p)) + goto done; + + // run loop + ibz_copy(&prod, p); + ibz_copy(&r1, p); + ibz_copy(&r0, p); + while (ibz_cmp(&prod, p) >= 0) { + ibz_div(&a, &r0, &r2, &r1); + ibz_mul(&prod, &r0, &r0); + ibz_copy(&r2, &r1); + ibz_copy(&r1, &r0); + } + // test if result is solution + ibz_sub(&a, p, &prod); + ibz_div(&a, &r2, &a, n); + if (!ibz_is_zero(&r2)) + goto done; + if (!ibz_sqrt(y, &a)) + goto done; + + ibz_copy(x, &r0); + ibz_mul(&a, y, y); + ibz_mul(&a, &a, n); + ibz_add(&prod, &prod, &a); + res = !ibz_cmp(&prod, p); + +done: + ibz_finalize(&r0); + ibz_finalize(&r1); + ibz_finalize(&r2); + ibz_finalize(&a); + ibz_finalize(&prod); + return res; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/internal.h new file mode 100644 index 0000000000..edbba345f9 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/internal.h @@ -0,0 +1,812 @@ +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations for helper functions for quaternion algebra implementation + */ + +#ifndef QUAT_HELPER_H +#define QUAT_HELPER_H + +#include +#include +#include "intbig_internal.h" + +/** @internal + * @ingroup quat_quat + * @defgroup quat_helpers Quaternion module internal functions + * @{ + */ + +/** @internal + * @defgroup quat_alg_helpers Helper functions for the alg library + * @{ + */ + +/** @internal + * @brief helper function for initializing small quaternion algebras. + */ +void quat_alg_init_set_ui(quat_alg_t *alg, + unsigned int p); // test/lattice, test/ideal, test/algebra + +/** @brief a*b + * + * Multiply two coordinate vectors as elements of the algebra in basis (1,i,j,ij) with i^2 = -1, j^2 + * = -p + * + * @param res Output: Will contain product + * @param a + * @param b + * @param alg The quaternion algebra + */ +void quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, const quat_alg_t *alg); + +/** @brief a=b + * + * Test if a and b represent the same quaternion algebra element + * + * @param a + * @param b + * @returns 1 if a=b, 0 otherwise + */ +int quat_alg_elem_equal(const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + * + * x is 0 iff all coordinates in x->coord are 0 + */ +int quat_alg_elem_is_zero(const quat_alg_elem_t *x); + +/** @brief Compute same denominator form of two quaternion algebra elements + * + * res_a=a and res_b=b (representing the same element) and res_a.denom = res_b.denom + * + * @param res_a + * @param res_b + * @param a + * @param b + */ +void quat_alg_equal_denom(quat_alg_elem_t *res_a, + quat_alg_elem_t *res_b, + const quat_alg_elem_t *a, + const quat_alg_elem_t *b); + +/** @brief Copies the given values into an algebra element, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, + const ibz_t *denom, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Sets an algebra element to the given integer values, without normalizing it + * + * @param elem Output: algebra element of coordinates [coord0,coord1,coord2,coord3] and denominator + * denom + * @param denom Denominator, must be non zero + * @param coord0 Coordinate on 1 (0th vector of standard algebra basis) + * @param coord1 Coordinate on i (1st vector of standard algebra basis) + * @param coord2 Coordinate on j (2nd vector of standard algebra basis) + * @param coord3 Coordinate on ij (3rd vector of standard algebra basis) + */ +void quat_alg_elem_set(quat_alg_elem_t *elem, + int32_t denom, + int32_t coord0, + int32_t coord1, + int32_t coord2, + int32_t coord3); + +/** + * @brief Creates algebra element from scalar + * + * Resulting element has 1-coordinate equal to numerator/denominator + * + * @param elem Output: algebra element with numerator/denominator as first coordiante + * (1-coordinate), 0 elsewhere (i,j,ij coordinates) + * @param numerator + * @param denominator Assumed non zero + */ +void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator); + +/** @brief a+b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_add(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief a-b for algebra elements + * + * @param res Output + * @param a Algebra element + * @param b Algebra element + */ +void quat_alg_sub(quat_alg_elem_t *res, const quat_alg_elem_t *a, const quat_alg_elem_t *b); + +/** @brief Multiplies algebra element by integer scalar, without normalizing it + * + * @param res Output + * @param scalar Integer + * @param elem Algebra element + */ +void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_helpers Helper functions for functions for matrices or vectors in dimension 4 + * @{ + */ + +/** @internal + * @defgroup quat_inv_helpers Helper functions for the integer matrix inversion function + * @{ + */ + +/** @brief a1a2+b1b2+c1c2 + * + * @param coeff Output: The coefficien which was computed as a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_pmp(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief -a1a2+b1b2-c1c2 + * + * @param coeff Output: The coefficien which was computed as -a1a2+b1b2-c1c2 + * @param a1 + * @param a2 + * @param b1 + * @param b2 + * @param c1 + * @param c2 + */ +void ibz_inv_dim4_make_coeff_mpm(ibz_t *coeff, + const ibz_t *a1, + const ibz_t *a2, + const ibz_t *b1, + const ibz_t *b2, + const ibz_t *c1, + const ibz_t *c2); + +/** @brief Matrix determinant and a matrix inv such that inv/det is the inverse matrix of the input + * + * Implemented following the methof of 2x2 minors explained at Method from + * https://www.geometrictools.com/Documentation/LaplaceExpansionTheorem.pdf (visited on 3rd of May + * 2023, 16h15 CEST) + * + * @returns 1 if the determinant of mat is not 0 and an inverse was computed, 0 otherwise + * @param inv Output: Will contain an integer matrix which, dividet by det, will yield the rational + * inverse of the matrix if it exists, can be NULL + * @param det Output: Will contain the determinant of the input matrix, can be NULL + * @param mat Matrix of which the inverse will be computed + */ +int ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_4x4_t *mat); + +/** @} + */ + +/** @internal + * @defgroup quat_dim4_lat_helpers Helper functions on vectors and matrices used mainly for lattices + * @{ + */ + +/** @brief Copy all values from one vector to another + * + * @param new Output: is set to same values as vec + * @param vec + */ +void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec); + +/** @brief set res to values coord0,coord1,coord2,coord3 + * + * @param res Output: Will contain vector (coord0,coord1,coord2,coord3) + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, + const ibz_t *coord0, + const ibz_t *coord1, + const ibz_t *coord2, + const ibz_t *coord3); + +/** @brief Set a vector of 4 integers to given values + * + * @param vec Output: is set to given coordinates + * @param coord0 + * @param coord1 + * @param coord2 + * @param coord3 + */ +void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3); + +/** @brief a+b + * + * Add two integer 4-vectors + * + * @param res Output: Will contain sum + * @param a + * @param b + */ +void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief a-b + * + * Substract two integer 4-vectors + * + * @param res Output: Will contain difference + * @param a + * @param b + */ +void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b); + +/** @brief x=0 + * + * Test if a vector x has only zero coordinates + * + * @returns 0 if x has at least one non-zero coordinates, 1 otherwise + * @param x + */ +int ibz_vec_4_is_zero(const ibz_vec_4_t *x); + +/** @brief Compute the linear combination lc = coeff_a vec_a + coeff_b vec_b + * + * @param lc Output: linear combination lc = coeff_a vec_a + coeff_b vec_b + * @param coeff_a Scalar multiplied to vec_a + * @param vec_a + * @param coeff_b Scalar multiplied to vec_b + * @param vec_b + */ +void ibz_vec_4_linear_combination(ibz_vec_4_t *lc, + const ibz_t *coeff_a, + const ibz_vec_4_t *vec_a, + const ibz_t *coeff_b, + const ibz_vec_4_t *vec_b); + +/** @brief multiplies all values in vector by same scalar + * + * @param prod Output + * @param scalar + * @param vec + */ +void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief divides all values in vector by same scalar + * + * @returns 1 if scalar divided all values in mat, 0 otherwise (division is performed in both cases) + * @param quot Output + * @param scalar + * @param vec + */ +int ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t *vec); + +/** @brief Negation for vectors of 4 integers + * + * @param neg Output: is set to -vec + * @param vec + */ +void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec); + +/** + * @brief content of a 4-vector of integers + * + * The content is the GCD of all entries. + * + * @param v A 4-vector of integers + * @param content Output: the resulting gcd + */ +void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v); + +/** @brief -mat for mat a 4x4 integer matrix + * + * @param neg Output: is set to -mat + * @param mat Input matrix + */ +void ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat); + +/** @brief Set all coefficients of a matrix to zero for 4x4 integer matrices + * + * @param zero + */ +void ibz_mat_4x4_zero(ibz_mat_4x4_t *zero); + +/** @brief Set a matrix to the identity for 4x4 integer matrices + * + * @param id + */ +void ibz_mat_4x4_identity(ibz_mat_4x4_t *id); + +/** @brief Test equality to identity for 4x4 integer matrices + * + * @returns 1 if mat is the identity matrix, 0 otherwise + * @param mat + */ +int ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat); + +/** @brief Equality test for 4x4 integer matrices + * + * @returns 1 if equal, 0 otherwise + * @param mat1 + * @param mat2 + */ +int ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2); + +/** @brief Copies all values from a 4x4 integer matrix to another one + * + * @param new Output: matrix which will have its entries set to mat's entries + * @param mat Input matrix + */ +void ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat); + +/** @brief Matrix by integer multiplication + * + * @param prod Output + * @param scalar + * @param mat + */ +void ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4x4_t *mat); + +/** @brief gcd of all values in matrix + * + * @param gcd Output + * @param mat + */ +void ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat); + +/** @brief Verifies whether the 4x4 input matrix is in Hermite Normal Form + * + * @returns 1 if mat is in HNF, 0 otherwise + * @param mat Matrix to be tested + */ +int ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat); + +/** @brief Hermite Normal Form of a matrix of 8 integer vectors, computed using a multiple of its + * determinant as modulo + * + * Algorithm used is the one at number 2.4.8 in Henri Cohen's "A Course in Computational Algebraic + * Number Theory" (Springer Verlag, in series "Graduate texts in Mathematics") from 1993 + * + * @param hnf Output: Matrix in Hermite Normal Form generating the same lattice as generators + * @param generators matrix whose colums generate the same lattice than the output + * @param generator_number number of generators given + * @param mod integer, must be a multiple of the volume of the lattice generated by the columns of + * generators + */ +void ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, + int generator_number, + const ibz_vec_4_t *generators, + const ibz_t *mod); + +/** @} + */ +/** @} + */ + +/** @internal + * @defgroup quat_dim2_helpers Helper functions for dimension 2 + * @{ + */ + +/** @brief Set vector coefficients to the given integers + * + * @param vec Output: Vector + * @param a0 + * @param a1 + */ +void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1); // test/dim2 + +/** @brief Set matrix coefficients to the given integers + * + * @param mat Output: Matrix + * @param a00 + * @param a01 + * @param a10 + * @param a11 + */ +void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11); // test/dim2 + +void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, + const ibz_mat_2x2_t *b); // unused + +/** @brief Determinant of a 2x2 integer matrix given as 4 integers + * + * @param det Output: Determinant of the matrix + * @param a11 matrix coefficient (upper left corner) + * @param a12 matrix coefficient (upper right corner) + * @param a21 matrix coefficient (lower left corner) + * @param a22 matrix coefficient (lower right corner) + */ +void ibz_mat_2x2_det_from_ibz(ibz_t *det, + const ibz_t *a11, + const ibz_t *a12, + const ibz_t *a21, + const ibz_t *a22); // dim4 + +/** + * @brief a*b for 2x2 integer matrices modulo m + * + * @param prod Output matrix + * @param mat_a Input matrix + * @param mat_b Input matrix + * @param m Integer modulo + */ +void ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, + const ibz_mat_2x2_t *mat_a, + const ibz_mat_2x2_t *mat_b, + const ibz_t *m); // test/dim2 +/** @} + */ + +/** @internal + * @defgroup quat_lattice_helper Helper functions for the lattice library (dimension 4) + * @{ + */ + +/** + * @brief Modifies a lattice to put it in hermite normal form + * + * In-place modification of the lattice. + * + * @param lat input lattice + * + * On a correct lattice this function changes nothing (since it is already in HNF), but it can be + * used to put a handmade one in correct form in order to use the other lattice functions. + */ +void quat_lattice_hnf(quat_lattice_t *lat); // lattice, test/lattice, test/algebra, + +/** + * @brief Lattice equality + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if both lattices are equal, 0 otherwise + * @param lat1 + * @param lat2 + */ +int quat_lattice_equal(const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice, test/ideal + +/** + * @brief Lattice inclusion test + * + * Lattice bases are assumed to be under HNF, but denominators are free. + * + * @returns 1 if sublat is included in overlat, 0 otherwise + * @param sublat Lattice whose inclusion in overlat will be testes + * @param overlat + */ +int quat_lattice_inclusion(const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // test/lattice, test/ideal + +/** @brief Divides basis and denominator of a lattice by their gcd + * + * @param reduced Output + * @param lat Lattice + */ +void quat_lattice_reduce_denom(quat_lattice_t *reduced, + const quat_lattice_t *lat); // lattice, ideal, + +/** @brief a+b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + */ +void quat_lattice_add(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2); // ideal, lattice, test/lattice + +/** @brief a*b for lattices + * + * @param res Output + * @param lat1 Lattice + * @param lat2 Lattice + * @param alg The quaternion algebra + */ +void quat_lattice_mul(quat_lattice_t *res, + const quat_lattice_t *lat1, + const quat_lattice_t *lat2, + const quat_alg_t *alg); // ideal, lattie, test/ideal, test/lattice + +/** + * @brief Computes the dual lattice of lat, without putting its basis in HNF + * + * This function returns a lattice not under HNF. For careful internal use only. + * + * Computation method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted + * on 19 of May 2023, 12h40 CEST + * + * @param dual Output: The dual lattice of lat. ATTENTION: is not under HNF. hnf computation must be + * applied before using lattice functions on it + * @param lat lattice, the dual of it will be computed + */ +void quat_lattice_dual_without_hnf(quat_lattice_t *dual, + const quat_lattice_t *lat); // lattice, ideal + +/** + * @brief Multiply all columns of lat with coord (as algebra elements) + * + * The columns and coord are seen as algebra elements in basis 1,i,j,ij, i^2 = -1, j^2 = -p). Coord + * is multiplied to the right of lat. + * + * The output matrix is not under HNF. + * + * @param prod Output: Matrix not under HND whose columns represent the algebra elements obtained as + * L*coord for L column of lat. + * @param lat Matrix whose columns are algebra elements in basis (1,i,j,ij) + * @param coord Integer coordinate algebra element in basis (1,i,j,ij) + * @param alg The quaternion algebra + */ +void quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg); // lattice + +/** @brief The index of sublat into overlat + * + * Assumes inputs are in HNF. + * + * @param index Output + * @param sublat A lattice in HNF, must be sublattice of overlat + * @param overlat A lattice in HNF, must be overlattice of sublat + */ +void quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, + const quat_lattice_t *overlat); // ideal + +/** @brief Compute the Gram matrix of the quaternion trace bilinear form + * + * Given a lattice of the quaternion algebra, computes the Gram matrix + * of the bilinear form + * + * 〈a,b〉 := [lattice->denom^2] Tr(a·conj(b)) + * + * multiplied by the square of the denominator of the lattice. + * + * This matrix always has integer entries. + * + * @param G Output: Gram matrix of the trace bilinear form on the lattice, multiplied by the square + * of the denominator of the lattice + * @param lattice A lattice + * @param alg The quaternion algebra + */ +void quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @brief Compute an integer parallelogram containing the ball of + * given radius for the positive definite quadratic form defined by + * the Gram matrix G. + * + * The computed parallelogram is defined by the vectors + * + * (x₁ x₂ x₃ x₄) · U + * + * with x_i ∈ [ -box[i], box[i] ]. + * + * @param box Output: bounds of the parallelogram + * @param U Output: Unimodular transformation defining the parallelogram + * @param G Gram matrix of the quadratic form, must be full rank + * @param radius Radius of the ball, must be non-negative + * @returns 0 if the box only contains the origin, 1 otherwise + */ +int quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius); + +/** @} + */ + +/** @internal + * @defgroup quat_lideal_helper Helper functions for ideals and orders + * @{ + */ +/** @brief Set norm of an ideal given its lattice and parent order + * + * @param lideal In/Output: Ideal which has lattice and parent_order correctly set, but not + * necessarily the norm. Will have norm correctly set too. + */ +void quat_lideal_norm(quat_left_ideal_t *lideal); // ideal + +/** + * @brief Left principal ideal of order, generated by x + * + * @param lideal Output: left ideal + * @param alg quaternion algebra + * @param order maximal order of alg whose left ideal is searched + * @param x generating element + * + * Creates the left ideal in 'order' generated by the element 'x' + */ +void quat_lideal_create_principal(quat_left_ideal_t *lideal, + const quat_alg_elem_t *x, + const quat_lattice_t *order, + const quat_alg_t *alg); // ideal, test/ideal + +/** + * @brief Equality test for left ideals + * + * @returns 1 if both left ideals are equal, 0 otherwise + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +int quat_lideal_equals(const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // test/ideal + +/** + * @brief Sum of two left ideals + * + * @param sum Output: Left ideal which is the sum of the 2 inputs + * @param lideal1 left ideal + * @param lideal2 left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_add(quat_left_ideal_t *sum, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); // Not used outside + +/** + * @brief Left ideal product of left ideal I and element alpha + * + * @param product Output: lideal I*alpha, must have integer norm + * @param lideal left ideal + * @param alpha element multiplied to lideal to get the product ideal + * @param alg the quaternion algebra + * + * I*alpha where I is a left-ideal and alpha an element of the algebra + * + * The resulting ideal must have an integer norm + * + */ +void quat_lideal_mul(quat_left_ideal_t *product, + const quat_left_ideal_t *lideal, + const quat_alg_elem_t *alpha, + const quat_alg_t *alg); // test/ideal + +/** @brief Computes the inverse ideal (for a left ideal of a maximal order) without putting it under + * HNF + * + * This function returns a lattice not under HNF. For careful internal use only + * + * Computes the inverse ideal for lideal as conjugate(lideal)/norm(lideal) + * + * @param inv Output: lattice which is lattice representation of the inverse ideal of lideal + * ATTENTION: is not under HNF. hnf computation must be applied before using lattice functions on it + * @param lideal Left ideal of a maximal order in alg + * @param alg The quaternion algebra + */ +void quat_lideal_inverse_lattice_without_hnf(quat_lattice_t *inv, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** @brief Computes the right transporter of two left ideals of the same maximal order + * + * Following the implementation of ideal isomorphisms in the code of LearningToSQI's sage + * implementation of SQIsign. Computes the right transporter of (J:I) as inverse(I)J. + * + * @param trans Output: lattice which is right transporter from lideal1 to lideal2 (lideal2:lideal1) + * @param lideal1 Left ideal of the same maximal order than lideal1 in alg + * @param lideal2 Left ideal of the same maximal order than lideal1 in alg + * @param alg The quaternion algebra + */ +void quat_lideal_right_transporter(quat_lattice_t *trans, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg); + +/** + * @brief Right order of a left ideal + * + * @param order Output: right order of the given ideal + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_right_order(quat_lattice_t *order, const quat_left_ideal_t *lideal, + const quat_alg_t *alg); // ideal + +/** + * @brief Gram matrix of the trace map of the ideal class + * + * Compute the Gram matrix of the bilinear form + * + * 〈a, b〉 := Tr(a·conj(b)) / norm(lideal) + * + * on the basis of the ideal. This matrix has integer entries and its + * integer congruence class only depends on the ideal class. + * + * @param G Output: Gram matrix of the trace map + * @param lideal left ideal + * @param alg the quaternion algebra + */ +void quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const quat_alg_t *alg); + +/** @brief Test if order is maximal + * + * Checks if the discriminant of the order equals the prime p defining the quaternion algebra. + * + * It is not verified whether the order is really an order. The output 1 only means that if it is an + * order, then it is maximal. + * + * @returns 1 if order is maximal (assuming it is an order), 0 otherwise + * @param order An order of the quaternion algebra (assumes to be an order, this is not tested) + * @param alg The quaternion algebra + */ +int quat_order_is_maximal(const quat_lattice_t *order, + const quat_alg_t *alg); // ideal (only in asserts) + +/** @brief Compute the discriminant of an order as sqrt(det(gram(reduced_norm))) + * + * @param disc: Output: The discriminant sqrt(det(gram(reduced_norm))) + * @param order An order of the quaternion algebra + * @param alg The quaternion algebra + */ +int quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, + const quat_alg_t *alg); // ideal + +/** @} + */ + +/** @internal + * @ingroup quat_normeq + * @{ + */ + +/** @brief Set lattice to O0 + * + * @param O0 Lattice to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set(quat_lattice_t *O0); + +/** @brief Set p-extremal maximal order to O0 + * + * @param O0 p-extremal order to be set to (1,i,(i+j)/2,(1+ij)/2) + */ +void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0); + +/** + * @brief Create an element of a extremal maximal order from its coefficients + * + * @param elem Output: the quaternion element + * @param order the order + * @param coeffs the vector of 4 ibz coefficients + * @param Bpoo quaternion algebra + * + * elem = x + z*y + z*u + t*z*v + * where coeffs = [x,y,u,v] and t = order.t z = order.z + * + */ +void quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo); // normeq, untested + +/** @} + */ +/** @} + */ + +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c new file mode 100644 index 0000000000..8c49b21d20 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c @@ -0,0 +1,190 @@ +#include +#include "lll_internals.h" +#include "internal.h" + +#include "dpe.h" + +// Access entry of symmetric matrix +#define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + dpe_t dpe_const_one, dpe_const_DELTABAR; + + dpe_init(dpe_const_one); + dpe_set_ui(dpe_const_one, 1); + + dpe_init(dpe_const_DELTABAR); + dpe_set_d(dpe_const_DELTABAR, DELTABAR); + + // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions + dpe_t r[4][4], u[4][4], lovasz[4]; + for (int i = 0; i < 4; i++) { + dpe_init(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_init(r[i][j]); + dpe_init(u[i][j]); + } + } + + // threshold for swaps + dpe_t delta_bar; + dpe_init(delta_bar); + dpe_set_d(delta_bar, DELTABAR); + + // Other work variables + dpe_t Xf, tmpF; + dpe_init(Xf); + dpe_init(tmpF); + ibz_t X, tmpI; + ibz_init(&X); + ibz_init(&tmpI); + + // Main L² loop + dpe_set_z(r[0][0], (*G)[0][0]); + int kappa = 1; + while (kappa < 4) { + // size reduce b_κ + int done = 0; + while (!done) { + // Recompute the κ-th row of the Choleski Factorisation + // Loop invariant: + // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 + for (int j = 0; j <= kappa; j++) { + dpe_set_z(r[kappa][j], (*G)[kappa][j]); + for (int k = 0; k < j; k++) { + dpe_mul(tmpF, r[kappa][k], u[j][k]); + dpe_sub(r[kappa][j], r[kappa][j], tmpF); + } + if (j < kappa) + dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + } + + done = 1; + // size reduce + for (int i = kappa - 1; i >= 0; i--) { + if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + done = 0; + dpe_set(Xf, u[kappa][i]); + dpe_round(Xf, Xf); + dpe_get_z(X, Xf); + // Update basis: b_κ ← b_κ - X·b_i + for (int j = 0; j < 4; j++) { + ibz_mul(&tmpI, &X, &(*basis)[j][i]); + ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + } + // Update lower half of the Gram matrix + // = - 2X + X² = + // - X - X( - X·) + //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 + ibz_mul(&tmpI, &X, &(*G)[kappa][i]); + ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + for (int j = 0; j < 4; j++) { // works because i < κ + // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 + ibz_mul(&tmpI, &X, SYM((*G), i, j)); + ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + } + // After the loop: + //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, + /// b_i〉) = 〈b_κ - X·b_i, b_κ - X·b_i〉 + // + // Update u[kappa][j] + for (int j = 0; j < i; j++) { + dpe_mul(tmpF, Xf, u[i][j]); + dpe_sub(u[kappa][j], u[kappa][j], tmpF); + } + } + } + } + + // Check Lovasz' conditions + // lovasz[0] = ‖b_κ‖² + dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] + for (int i = 1; i < kappa; i++) { + dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); + dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + } + int swap; + for (swap = kappa; swap > 0; swap--) { + dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); + if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + break; + } + + // Insert b_κ before b_swap + if (kappa != swap) { + // Insert b_κ before b_swap in the basis and in the lower half Gram matrix + for (int j = kappa; j > swap; j--) { + for (int i = 0; i < 4; i++) { + ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + if (i == j - 1) + ibz_swap(&(*G)[i][i], &(*G)[j][j]); + else if (i != j) + ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + } + } + // Copy row u[κ] and r[κ] in swap position, ignore what follows + for (int i = 0; i < swap; i++) { + dpe_set(u[swap][i], u[kappa][i]); + dpe_set(r[swap][i], r[kappa][i]); + } + dpe_set(r[swap][swap], lovasz[swap]); + // swap complete + kappa = swap; + } + + kappa += 1; + } + +#ifndef NDEBUG + // Check size-reducedness + for (int i = 0; i < 4; i++) + for (int j = 0; j < i; j++) { + dpe_abs(u[i][j], u[i][j]); + assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + } + // Check Lovasz' conditions + for (int i = 1; i < 4; i++) { + dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); + dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); + dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); + assert(dpe_cmp(tmpF, r[i][i]) <= 0); + } +#endif + + // Fill in the upper half of the Gram matrix + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + + // Clearinghouse + ibz_finalize(&X); + ibz_finalize(&tmpI); + dpe_clear(dpe_const_one); + dpe_clear(dpe_const_DELTABAR); + dpe_clear(Xf); + dpe_clear(tmpF); + dpe_clear(delta_bar); + for (int i = 0; i < 4; i++) { + dpe_clear(lovasz[i]); + for (int j = 0; j <= i; j++) { + dpe_clear(r[i][j]); + dpe_clear(u[i][j]); + } + } +} + +int +quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_mat_4x4_t G; // Gram Matrix + ibz_mat_4x4_init(&G); + quat_lattice_gram(&G, lattice, alg); + ibz_mat_4x4_copy(red, &lattice->basis); + quat_lll_core(&G, red); + ibz_mat_4x4_finalize(&G); + return 0; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lat_ball.c new file mode 100644 index 0000000000..c7bbb9682f --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lat_ball.c @@ -0,0 +1,139 @@ +#include +#include +#include +#include "internal.h" +#include "lll_internals.h" + +int +quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_mat_4x4_t *G, const ibz_t *radius) +{ + ibz_t denom, rem; + ibz_init(&denom); + ibz_init(&rem); + ibz_mat_4x4_t dualG; + ibz_mat_4x4_init(&dualG); + +// Compute the Gram matrix of the dual lattice +#ifndef NDEBUG + int inv_check = ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); + assert(inv_check); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(&dualG, &denom, G); +#endif + // Initialize the dual lattice basis to the identity matrix + ibz_mat_4x4_identity(U); + // Reduce the dual lattice + quat_lll_core(&dualG, U); + + // Compute the parallelogram's bounds + int trivial = 1; + for (int i = 0; i < 4; i++) { + ibz_mul(&(*box)[i], &dualG[i][i], radius); + ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); + ibz_sqrt_floor(&(*box)[i], &(*box)[i]); + trivial &= ibz_is_zero(&(*box)[i]); + } + + // Compute the transpose transformation matrix +#ifndef NDEBUG + int inv = ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#else + (void)ibz_mat_4x4_inv_with_det_as_denom(U, &denom, U); +#endif + // U is unitary, det(U) = ± 1 + ibz_mat_4x4_scalar_mul(U, &denom, U); +#ifndef NDEBUG + assert(inv); + ibz_abs(&denom, &denom); + assert(ibz_is_one(&denom)); +#endif + + ibz_mat_4x4_finalize(&dualG); + ibz_finalize(&denom); + ibz_finalize(&rem); + return !trivial; +} + +int +quat_lattice_sample_from_ball(quat_alg_elem_t *res, + const quat_lattice_t *lattice, + const quat_alg_t *alg, + const ibz_t *radius) +{ + assert(ibz_cmp(radius, &ibz_const_zero) > 0); + + ibz_vec_4_t box; + ibz_vec_4_init(&box); + ibz_mat_4x4_t U, G; + ibz_mat_4x4_init(&U); + ibz_mat_4x4_init(&G); + ibz_vec_4_t x; + ibz_vec_4_init(&x); + ibz_t rad, tmp; + ibz_init(&rad); + ibz_init(&tmp); + + // Compute the Gram matrix of the lattice + quat_lattice_gram(&G, lattice, alg); + + // Correct ball radius by the denominator + ibz_mul(&rad, radius, &lattice->denom); + ibz_mul(&rad, &rad, &lattice->denom); + // Correct by 2 (Gram matrix corresponds to twice the norm) + ibz_mul(&rad, &rad, &ibz_const_two); + + // Compute a bounding parallelogram for the ball, stop if it only + // contains the origin + int ok = quat_lattice_bound_parallelogram(&box, &U, &G, &rad); + if (!ok) + goto err; + + // Rejection sampling from the parallelogram +#ifndef NDEBUG + int cnt = 0; +#endif + do { + // Sample vector + for (int i = 0; i < 4; i++) { + if (ibz_is_zero(&box[i])) { + ibz_copy(&x[i], &ibz_const_zero); + } else { + ibz_add(&tmp, &box[i], &box[i]); + ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); + ibz_sub(&x[i], &x[i], &box[i]); + if (!ok) + goto err; + } + } + // Map to parallelogram + ibz_mat_4x4_eval_t(&x, &x, &U); + // Evaluate quadratic form + quat_qf_eval(&tmp, &G, &x); +#ifndef NDEBUG + cnt++; + if (cnt % 100 == 0) + printf("Lattice sampling rejected %d times", cnt - 1); +#endif + } while (ibz_is_zero(&tmp) || (ibz_cmp(&tmp, &rad) > 0)); + + // Evaluate linear combination + ibz_mat_4x4_eval(&(res->coord), &(lattice->basis), &x); + ibz_copy(&(res->denom), &(lattice->denom)); + quat_alg_normalize(res); + +#ifndef NDEBUG + // Check norm is smaller than radius + quat_alg_norm(&tmp, &rad, res, alg); + ibz_mul(&rad, &rad, radius); + assert(ibz_cmp(&tmp, &rad) <= 0); +#endif + +err: + ibz_finalize(&rad); + ibz_finalize(&tmp); + ibz_vec_4_finalize(&x); + ibz_mat_4x4_finalize(&U); + ibz_mat_4x4_finalize(&G); + ibz_vec_4_finalize(&box); + return ok; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lattice.c new file mode 100644 index 0000000000..c98bae9499 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lattice.c @@ -0,0 +1,328 @@ +#include +#include +#include "internal.h" + +// helper functions +int +quat_lattice_equal(const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + int equal = 1; + quat_lattice_t a, b; + quat_lattice_init(&a); + quat_lattice_init(&b); + quat_lattice_reduce_denom(&a, lat1); + quat_lattice_reduce_denom(&b, lat2); + ibz_abs(&(a.denom), &(a.denom)); + ibz_abs(&(b.denom), &(b.denom)); + quat_lattice_hnf(&a); + quat_lattice_hnf(&b); + equal = equal && (ibz_cmp(&(a.denom), &(b.denom)) == 0); + equal = equal && ibz_mat_4x4_equal(&(a.basis), &(b.basis)); + quat_lattice_finalize(&a); + quat_lattice_finalize(&b); + return (equal); +} + +// sublattice test +int +quat_lattice_inclusion(const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + int res; + quat_lattice_t sum; + quat_lattice_init(&sum); + quat_lattice_add(&sum, overlat, sublat); + res = quat_lattice_equal(&sum, overlat); + quat_lattice_finalize(&sum); + return (res); +} + +void +quat_lattice_reduce_denom(quat_lattice_t *reduced, const quat_lattice_t *lat) +{ + ibz_t gcd; + ibz_init(&gcd); + ibz_mat_4x4_gcd(&gcd, &(lat->basis)); + ibz_gcd(&gcd, &gcd, &(lat->denom)); + ibz_mat_4x4_scalar_div(&(reduced->basis), &gcd, &(lat->basis)); + ibz_div(&(reduced->denom), &gcd, &(lat->denom), &gcd); + ibz_abs(&(reduced->denom), &(reduced->denom)); + ibz_finalize(&gcd); +} + +void +quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *lat) +{ + ibz_mat_4x4_copy(&(conj->basis), &(lat->basis)); + ibz_copy(&(conj->denom), &(lat->denom)); + + for (int row = 1; row < 4; ++row) { + for (int col = 0; col < 4; ++col) { + ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + } + } +} + +// Method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_dual_without_hnf(quat_lattice_t *dual, const quat_lattice_t *lat) +{ + ibz_mat_4x4_t inv; + ibz_t det; + ibz_init(&det); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + ibz_mat_4x4_transpose(&inv, &inv); + // dual_denom = det/lat_denom + ibz_mat_4x4_scalar_mul(&(dual->basis), &(lat->denom), &inv); + ibz_copy(&(dual->denom), &det); + + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); +} + +void +quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + ibz_vec_4_t generators[8]; + ibz_mat_4x4_t tmp; + ibz_t det1, det2, detprod; + ibz_init(&det1); + ibz_init(&det2); + ibz_init(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_init(&(generators[i])); + ibz_mat_4x4_init(&tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); + ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); + assert(!ibz_is_zero(&det1)); + assert(!ibz_is_zero(&det2)); + ibz_gcd(&detprod, &det1, &det2); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 8, generators, &detprod); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_mat_4x4_finalize(&tmp); + ibz_finalize(&det1); + ibz_finalize(&det2); + ibz_finalize(&detprod); + for (int i = 0; i < 8; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// method described in https://cseweb.ucsd.edu/classes/sp14/cse206A-a/lec4.pdf consulted on 19 of +// May 2023, 12h40 CEST +void +quat_lattice_intersect(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2) +{ + quat_lattice_t dual1, dual2, dual_res; + quat_lattice_init(&dual1); + quat_lattice_init(&dual2); + quat_lattice_init(&dual_res); + quat_lattice_dual_without_hnf(&dual1, lat1); + + quat_lattice_dual_without_hnf(&dual2, lat2); + quat_lattice_add(&dual_res, &dual1, &dual2); + quat_lattice_dual_without_hnf(res, &dual_res); + quat_lattice_hnf(res); // could be removed if we do not expect HNF any more + quat_lattice_finalize(&dual1); + quat_lattice_finalize(&dual2); + quat_lattice_finalize(&dual_res); +} + +void +quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, + const ibz_mat_4x4_t *lat, + const ibz_vec_4_t *coord, + const quat_alg_t *alg) +{ + ibz_vec_4_t p, a; + ibz_vec_4_init(&p); + ibz_vec_4_init(&a); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + quat_alg_coord_mul(&p, &a, coord, alg); + ibz_copy(&((*prod)[0][i]), &(p[0])); + ibz_copy(&((*prod)[1][i]), &(p[1])); + ibz_copy(&((*prod)[2][i]), &(p[2])); + ibz_copy(&((*prod)[3][i]), &(p[3])); + } + ibz_vec_4_finalize(&p); + ibz_vec_4_finalize(&a); +} + +void +quat_lattice_alg_elem_mul(quat_lattice_t *prod, + const quat_lattice_t *lat, + const quat_alg_elem_t *elem, + const quat_alg_t *alg) +{ + quat_lattice_mat_alg_coord_mul_without_hnf(&(prod->basis), &(lat->basis), &(elem->coord), alg); + ibz_mul(&(prod->denom), &(lat->denom), &(elem->denom)); + quat_lattice_hnf(prod); +} + +void +quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lattice_t *lat2, const quat_alg_t *alg) +{ + ibz_vec_4_t elem1, elem2, elem_res; + ibz_vec_4_t generators[16]; + ibz_mat_4x4_t detmat; + ibz_t det; + quat_lattice_t lat_res; + ibz_init(&det); + ibz_mat_4x4_init(&detmat); + quat_lattice_init(&lat_res); + ibz_vec_4_init(&elem1); + ibz_vec_4_init(&elem2); + ibz_vec_4_init(&elem_res); + for (int i = 0; i < 16; i++) + ibz_vec_4_init(&(generators[i])); + for (int k = 0; k < 4; k++) { + ibz_vec_4_copy_ibz( + &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + for (int i = 0; i < 4; i++) { + ibz_vec_4_copy_ibz( + &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); + for (int j = 0; j < 4; j++) { + if (k == 0) + ibz_copy(&(detmat[i][j]), &(elem_res[j])); + ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + } + } + } + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &detmat); + ibz_abs(&det, &det); + ibz_mat_4xn_hnf_mod_core(&(res->basis), 16, generators, &det); + ibz_mul(&(res->denom), &(lat1->denom), &(lat2->denom)); + quat_lattice_reduce_denom(res, res); + ibz_vec_4_finalize(&elem1); + ibz_vec_4_finalize(&elem2); + ibz_vec_4_finalize(&elem_res); + quat_lattice_finalize(&lat_res); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&(detmat)); + for (int i = 0; i < 16; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +// lattice assumed of full rank +int +quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_alg_elem_t *x) +{ + int divisible = 0; + ibz_vec_4_t work_coord; + ibz_mat_4x4_t inv; + ibz_t det, prod; + ibz_init(&prod); + ibz_init(&det); + ibz_vec_4_init(&work_coord); + ibz_mat_4x4_init(&inv); + ibz_mat_4x4_inv_with_det_as_denom(&inv, &det, &(lat->basis)); + assert(!ibz_is_zero(&det)); + ibz_mat_4x4_eval(&work_coord, &inv, &(x->coord)); + ibz_vec_4_scalar_mul(&(work_coord), &(lat->denom), &work_coord); + ibz_mul(&prod, &(x->denom), &det); + divisible = ibz_vec_4_scalar_div(&work_coord, &prod, &work_coord); + // copy result + if (divisible && (coord != NULL)) { + for (int i = 0; i < 4; i++) { + ibz_copy(&((*coord)[i]), &(work_coord[i])); + } + } + ibz_finalize(&prod); + ibz_finalize(&det); + ibz_mat_4x4_finalize(&inv); + ibz_vec_4_finalize(&work_coord); + return (divisible); +} + +void +quat_lattice_index(ibz_t *index, const quat_lattice_t *sublat, const quat_lattice_t *overlat) +{ + ibz_t tmp, det; + ibz_init(&tmp); + ibz_init(&det); + + // det = det(sublat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &sublat->basis); + // tmp = (overlat->denom)⁴ + ibz_mul(&tmp, &overlat->denom, &overlat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // index = (overlat->denom)⁴ · det(sublat->basis) + ibz_mul(index, &det, &tmp); + // tmp = (sublat->denom)⁴ + ibz_mul(&tmp, &sublat->denom, &sublat->denom); + ibz_mul(&tmp, &tmp, &tmp); + // det = det(overlat->basis) + ibz_mat_4x4_inv_with_det_as_denom(NULL, &det, &overlat->basis); + // tmp = (sublat->denom)⁴ · det(overlat->basis) + ibz_mul(&tmp, &tmp, &det); + // index = index / tmp + ibz_div(index, &tmp, index, &tmp); + assert(ibz_is_zero(&tmp)); + // index = |index| + ibz_abs(index, index); + + ibz_finalize(&tmp); + ibz_finalize(&det); +} + +void +quat_lattice_hnf(quat_lattice_t *lat) +{ + ibz_t mod; + ibz_vec_4_t generators[4]; + ibz_init(&mod); + ibz_mat_4x4_inv_with_det_as_denom(NULL, &mod, &(lat->basis)); + ibz_abs(&mod, &mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_init(&(generators[i])); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + } + } + ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); + quat_lattice_reduce_denom(lat, lat); + ibz_finalize(&mod); + for (int i = 0; i < 4; i++) + ibz_vec_4_finalize(&(generators[i])); +} + +void +quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_alg_t *alg) +{ + ibz_t tmp; + ibz_init(&tmp); + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= i; j++) { + ibz_set(&(*G)[i][j], 0); + for (int k = 0; k < 4; k++) { + ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + if (k >= 2) + ibz_mul(&tmp, &tmp, &alg->p); + ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + } + ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + } + } + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + ibz_copy(&(*G)[i][j], &(*G)[j][i]); + } + } + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_applications.c new file mode 100644 index 0000000000..6c763b8c04 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_applications.c @@ -0,0 +1,127 @@ +#include +#include +#include "lll_internals.h" + +void +quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal, + const quat_alg_t *alg) +{ + assert(quat_order_is_maximal((lideal->parent_order), alg)); + ibz_t gram_corrector; + ibz_init(&gram_corrector); + ibz_mul(&gram_corrector, &(lideal->lattice.denom), &(lideal->lattice.denom)); + quat_lideal_class_gram(gram, lideal, alg); + ibz_mat_4x4_copy(reduced, &(lideal->lattice.basis)); + quat_lll_core(gram, reduced); + ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); + for (int i = 0; i < 4; i++) { + ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + for (int j = i + 1; j < 4; j++) { + ibz_set(&((*gram)[i][j]), 0); + } + } + ibz_finalize(&gram_corrector); +} + +void +quat_lideal_lideal_mul_reduced(quat_left_ideal_t *prod, + ibz_mat_4x4_t *gram, + const quat_left_ideal_t *lideal1, + const quat_left_ideal_t *lideal2, + const quat_alg_t *alg) +{ + ibz_mat_4x4_t red; + ibz_mat_4x4_init(&red); + + quat_lattice_mul(&(prod->lattice), &(lideal1->lattice), &(lideal2->lattice), alg); + prod->parent_order = lideal1->parent_order; + quat_lideal_norm(prod); + quat_lideal_reduce_basis(&red, gram, prod, alg); + ibz_mat_4x4_copy(&(prod->lattice.basis), &red); + + ibz_mat_4x4_finalize(&red); +} + +int +quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, + const quat_alg_t *alg, + const int primality_num_iter, + const int equiv_bound_coeff) +{ + ibz_mat_4x4_t gram, red; + ibz_mat_4x4_init(&gram); + ibz_mat_4x4_init(&red); + + int found = 0; + + // computing the reduced basis + quat_lideal_reduce_basis(&red, &gram, lideal, alg); + + quat_alg_elem_t new_alpha; + quat_alg_elem_init(&new_alpha); + ibz_t tmp, remainder, adjusted_norm; + ibz_init(&tmp); + ibz_init(&remainder); + ibz_init(&adjusted_norm); + + ibz_mul(&adjusted_norm, &lideal->lattice.denom, &lideal->lattice.denom); + + int ctr = 0; + + // equiv_num_iter = (2 * equiv_bound_coeff + 1)^4 + assert(equiv_bound_coeff < (1 << 20)); + int equiv_num_iter = (2 * equiv_bound_coeff + 1); + equiv_num_iter = equiv_num_iter * equiv_num_iter; + equiv_num_iter = equiv_num_iter * equiv_num_iter; + + while (!found && ctr < equiv_num_iter) { + ctr++; + // we select our linear combination at random + ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + + // computation of the norm of the vector sampled + quat_qf_eval(&tmp, &gram, &new_alpha.coord); + + // compute the norm of the equivalent ideal + // can be improved by removing the power of two first and the odd part only if the trial + // division failed (this should always be called on an ideal of norm 2^x * N for some + // big prime N ) + ibz_div(&tmp, &remainder, &tmp, &adjusted_norm); + + // debug : check that the remainder is zero + assert(ibz_is_zero(&remainder)); + + // pseudo-primality test + if (ibz_probab_prime(&tmp, primality_num_iter)) { + + // computes the generator using a matrix multiplication + ibz_mat_4x4_eval(&new_alpha.coord, &red, &new_alpha.coord); + ibz_copy(&new_alpha.denom, &lideal->lattice.denom); + assert(quat_lattice_contains(NULL, &lideal->lattice, &new_alpha)); + + quat_alg_conj(&new_alpha, &new_alpha); + ibz_mul(&new_alpha.denom, &new_alpha.denom, &lideal->norm); + quat_lideal_mul(lideal, lideal, &new_alpha, alg); + assert(ibz_probab_prime(&lideal->norm, primality_num_iter)); + + found = 1; + break; + } + } + assert(found); + + ibz_finalize(&tmp); + ibz_finalize(&remainder); + ibz_finalize(&adjusted_norm); + quat_alg_elem_finalize(&new_alpha); + + ibz_mat_4x4_finalize(&gram); + ibz_mat_4x4_finalize(&red); + + return found; +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_internals.h new file mode 100644 index 0000000000..e8d90141ac --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_internals.h @@ -0,0 +1,238 @@ +#ifndef LLL_INTERNALS_H +#define LLL_INTERNALS_H + +/** @file + * + * @authors Sina Schaeffler + * + * @brief Declarations of functions only used for the LLL tets + */ + +#include + +/** @internal + * @ingroup quat_helpers + * @defgroup lll_internal Functions only used for LLL or its tests + * @{ + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_params Parameters used by the L2 implementation (floats) and its tests (ints) + * @{ + */ + +#define DELTABAR 0.995 +#define DELTA_NUM 99 +#define DELTA_DENOM 100 + +#define ETABAR 0.505 +#define EPSILON_NUM 1 +#define EPSILON_DENOM 100 + +#define PREC 64 +/** + * @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup ibq_t Types for rationals + * @{ + */ + +/** @brief Type for fractions of integers + * + * @typedef ibq_t + * + * For fractions of integers of arbitrary size, used by intbig module, using gmp + */ +typedef ibz_t ibq_t[2]; +typedef ibq_t ibq_vec_4_t[4]; +typedef ibq_t ibq_mat_4x4_t[4][4]; + +/**@} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_ibq_c Constructors and Destructors and Printers + * @{ + */ + +void ibq_init(ibq_t *x); +void ibq_finalize(ibq_t *x); + +void ibq_mat_4x4_init(ibq_mat_4x4_t *mat); +void ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat); + +void ibq_vec_4_init(ibq_vec_4_t *vec); +void ibq_vec_4_finalize(ibq_vec_4_t *vec); + +void ibq_mat_4x4_print(const ibq_mat_4x4_t *mat); +void ibq_vec_4_print(const ibq_vec_4_t *vec); + +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_qa Basic fraction arithmetic + * @{ + */ + +/** @brief sum=a+b + */ +void ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b); + +/** @brief diff=a-b + */ +void ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b); + +/** @brief neg=-x + */ +void ibq_neg(ibq_t *neg, const ibq_t *x); + +/** @brief abs=|x| + */ +void ibq_abs(ibq_t *abs, const ibq_t *x); + +/** @brief prod=a*b + */ +void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b); + +/** @brief inv=1/x + * + * @returns 0 if x is 0, 1 if inverse exists and was computed + */ +int ibq_inv(ibq_t *inv, const ibq_t *x); + +/** @brief Compare a and b + * + * @returns a positive value if a > b, zero if a = b, and a negative value if a < b + */ +int ibq_cmp(const ibq_t *a, const ibq_t *b); + +/** @brief Test if x is 0 + * + * @returns 1 if x=0, 0 otherwise + */ +int ibq_is_zero(const ibq_t *x); + +/** @brief Test if x is 1 + * + * @returns 1 if x=1, 0 otherwise + */ +int ibq_is_one(const ibq_t *x); + +/** @brief Set q to a/b if b not 0 + * + * @returns 1 if b not 0 and q is set, 0 otherwise + */ +int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b); + +/** @brief Copy value into target + */ +void ibq_copy(ibq_t *target, const ibq_t *value); + +/** @brief Checks if q is an integer + * + * @returns 1 if yes, 0 if not + */ +int ibq_is_ibz(const ibq_t *q); + +/** + * @brief Converts a fraction q to an integer y, if q is an integer. + * + * @returns 1 if z is an integer, 0 if not + */ +int ibq_to_ibz(ibz_t *z, const ibq_t *q); +/** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup quat_lll_verify_helpers Helper functions for lll verification in dimension 4 + * @{ + */ + +/** @brief Set ibq to parameters delta and eta = 1/2 + epsilon using L2 constants + */ +void quat_lll_set_ibq_parameters(ibq_t *delta, ibq_t *eta); + +/** @brief Set an ibq vector to 4 given integer coefficients + */ +void ibq_vec_4_copy_ibz(ibq_vec_4_t *vec, + const ibz_t *coeff0, + const ibz_t *coeff1, + const ibz_t *coeff2, + const ibz_t *coeff3); // dim4, test/dim4 + +/** @brief Bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 for ibz_q + */ +void quat_lll_bilinear(ibq_t *b, const ibq_vec_4_t *vec0, const ibq_vec_4_t *vec1, + const ibz_t *q); // dim4, test/dim4 + +/** @brief Outputs the transposition of the orthogonalised matrix of mat (as fractions) + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +void quat_lll_gram_schmidt_transposed_with_ibq(ibq_mat_4x4_t *orthogonalised_transposed, + const ibz_mat_4x4_t *mat, + const ibz_t *q); // dim4 + +/** @brief Verifies if mat is lll-reduced for parameter coeff and norm defined by q + * + * For the bilinear form vec00*vec10+vec01*vec11+q*vec02*vec12+q*vec03*vec13 + */ +int quat_lll_verify(const ibz_mat_4x4_t *mat, + const ibq_t *delta, + const ibq_t *eta, + const quat_alg_t *alg); // test/lattice, test/dim4 + /** @} + */ + +/** @internal + * @ingroup lll_internal + * @defgroup lll_internal_gram Internal LLL function + * @{ + */ + +/** @brief In-place L2 reduction core function + * + * Given a lattice basis represented by the columns of a 4x4 matrix + * and the Gram matrix of its bilinear form, L2-reduces the basis + * in-place and updates the Gram matrix accordingly. + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param G In/Output: Gram matrix of the lattice basis + * @param basis In/Output: lattice basis + */ +void quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis); + +/** + * @brief LLL reduction on 4-dimensional lattice + * + * Implements the L2 Algorithm of Nguyen-Stehlé, also known as fplll: + * https://iacr.org/archive/eurocrypt2005/34940217/34940217.pdf + * + * Parameters are in lll/lll_internals.h + * + * @param red Output: LLL reduced basis + * @param lattice In/Output: lattice with 4-dimensional basis + * @param alg The quaternion algebra + */ +int quat_lattice_lll(ibz_mat_4x4_t *red, const quat_lattice_t *lattice, const quat_alg_t *alg); + +/** + * @} + */ + +// end of lll_internal +/** @} + */ +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.c new file mode 100644 index 0000000000..27f4a963db --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.c @@ -0,0 +1,357 @@ +#include +#include +#include +#include + +// double-wide multiplication +void +MUL(digit_t *out, const digit_t a, const digit_t b) +{ +#ifdef RADIX_32 + uint64_t r = (uint64_t)a * b; + out[0] = r & 0xFFFFFFFFUL; + out[1] = r >> 32; + +#elif defined(RADIX_64) && defined(_MSC_VER) + uint64_t umul_hi; + out[0] = _umul128(a, b, &umul_hi); + out[1] = umul_hi; + +#elif defined(RADIX_64) && defined(HAVE_UINT128) + unsigned __int128 umul_tmp; + umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); + out[0] = (uint64_t)umul_tmp; + out[1] = (uint64_t)(umul_tmp >> 64); + +#else + register digit_t al, ah, bl, bh, temp; + digit_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; + digit_t mask_low = (digit_t)(-1) >> (sizeof(digit_t) * 4), mask_high = (digit_t)(-1) << (sizeof(digit_t) * 4); + al = a & mask_low; // Low part + ah = a >> (sizeof(digit_t) * 4); // High part + bl = b & mask_low; + bh = b >> (sizeof(digit_t) * 4); + + albl = al * bl; + albh = al * bh; + ahbl = ah * bl; + ahbh = ah * bh; + out[0] = albl & mask_low; // out00 + + res1 = albl >> (sizeof(digit_t) * 4); + res2 = ahbl & mask_low; + res3 = albh & mask_low; + temp = res1 + res2 + res3; + carry = temp >> (sizeof(digit_t) * 4); + out[0] ^= temp << (sizeof(digit_t) * 4); // out01 + + res1 = ahbl >> (sizeof(digit_t) * 4); + res2 = albh >> (sizeof(digit_t) * 4); + res3 = ahbh & mask_low; + temp = res1 + res2 + res3 + carry; + out[1] = temp & mask_low; // out10 + carry = temp & mask_high; + out[1] ^= (ahbh & mask_high) + carry; // out11 + +#endif +} + +void +mp_add(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision addition + unsigned int i, carry = 0; + + for (i = 0; i < nwords; i++) { + ADDC(c[i], carry, a[i], b[i], carry); + } +} + +digit_t +mp_shiftr(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision right shift by 1...RADIX-1 + digit_t bit_out = x[0] & 1; + + for (unsigned int i = 0; i < nwords - 1; i++) { + SHIFTR(x[i + 1], x[i], shift, x[i], RADIX); + } + x[nwords - 1] >>= shift; + return bit_out; +} + +void +mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ // Multiprecision left shift by 1...RADIX-1 + + for (int i = nwords - 1; i > 0; i--) { + SHIFTL(x[i], x[i - 1], shift, x[i], RADIX); + } + x[0] <<= shift; +} + +void +multiple_mp_shiftl(digit_t *x, const unsigned int shift, const unsigned int nwords) +{ + int t = shift; + while (t > RADIX - 1) { + mp_shiftl(x, RADIX - 1, nwords); + t = t - (RADIX - 1); + } + mp_shiftl(x, t, nwords); +} + +// The below functions were taken from the EC module + +void +mp_sub(digit_t *c, const digit_t *a, const digit_t *b, const unsigned int nwords) +{ // Multiprecision subtraction, assuming a > b + unsigned int i, borrow = 0; + + for (i = 0; i < nwords; i++) { + SUBC(c[i], borrow, a[i], b[i], borrow); + } +} + +void +select_ct(digit_t *c, const digit_t *a, const digit_t *b, const digit_t mask, const int nwords) +{ // Select c <- a if mask = 0, select c <- b if mask = 1...1 + + for (int i = 0; i < nwords; i++) { + c[i] = ((a[i] ^ b[i]) & mask) ^ a[i]; + } +} + +void +swap_ct(digit_t *a, digit_t *b, const digit_t option, const int nwords) +{ // Swap entries + // If option = 0 then P <- P and Q <- Q, else if option = 0xFF...FF then a <- b and b <- a + digit_t temp; + + for (int i = 0; i < nwords; i++) { + temp = option & (a[i] ^ b[i]); + a[i] = temp ^ a[i]; + b[i] = temp ^ b[i]; + } +} + +int +mp_compare(const digit_t *a, const digit_t *b, unsigned int nwords) +{ // Multiprecision comparison, a=b? : (1) a>b, (0) a=b, (-1) a= 0; i--) { + if (a[i] > b[i]) + return 1; + else if (a[i] < b[i]) + return -1; + } + return 0; +} + +bool +mp_is_zero(const digit_t *a, unsigned int nwords) +{ // Is a multiprecision element zero? + // Returns 1 (true) if a=0, 0 (false) otherwise + digit_t r = 0; + + for (unsigned int i = 0; i < nwords; i++) + r |= a[i] ^ 0; + + return (bool)is_digit_zero_ct(r); +} + +void +mp_mul2(digit_t *c, const digit_t *a, const digit_t *b) +{ // Multiprecision multiplication fixed to two-digit operands + unsigned int carry = 0; + digit_t t0[2], t1[2], t2[2]; + + MUL(t0, a[0], b[0]); + MUL(t1, a[0], b[1]); + ADDC(t0[1], carry, t0[1], t1[0], carry); + ADDC(t1[1], carry, 0, t1[1], carry); + MUL(t2, a[1], b[1]); + ADDC(t2[0], carry, t2[0], t1[1], carry); + ADDC(t2[1], carry, 0, t2[1], carry); + c[0] = t0[0]; + c[1] = t0[1]; + c[2] = t2[0]; + c[3] = t2[1]; +} + +void +mp_print(const digit_t *a, size_t nwords) +{ + printf("0x"); + for (size_t i = 0; i < nwords; i++) { +#ifdef RADIX_32 + printf("%08" PRIx32, a[nwords - i - 1]); // Print each word with 8 hex digits +#elif defined(RADIX_64) + printf("%016" PRIx64, a[nwords - i - 1]); // Print each word with 16 hex digits +#endif + } +} + +void +mp_copy(digit_t *b, const digit_t *a, size_t nwords) +{ + for (size_t i = 0; i < nwords; i++) { + b[i] = a[i]; + } +} + +void +mp_mul(digit_t *c, const digit_t *a, const digit_t *b, size_t nwords) +{ + // Multiprecision multiplication, c = a*b, for nwords-digit inputs, with nwords-digit output + // explicitly does not use the higher half of c, as we do not need in our applications + digit_t carry, UV[2], t[nwords], cc[nwords]; + + for (size_t i = 0; i < nwords; i++) { + cc[i] = 0; + } + + for (size_t i = 0; i < nwords; i++) { + + MUL(t, a[i], b[0]); + + for (size_t j = 1; j < nwords - 1; j++) { + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + t[j + 1] = UV[1] + carry; + } + + int j = nwords - 1; + MUL(UV, a[i], b[j]); + ADDC(t[j], carry, t[j], UV[0], 0); + + mp_add(&cc[i], &cc[i], t, nwords - i); + } + + mp_copy(c, cc, nwords); +} + +void +mp_mod_2exp(digit_t *a, unsigned int e, unsigned int nwords) +{ // Multiprecision modulo 2^e, with 0 <= a < 2^(e) + unsigned int i, q = e >> LOG2RADIX, r = e & (RADIX - 1); + + if (q < nwords) { + a[q] &= ((digit_t)1 << r) - 1; + + for (i = q + 1; i < nwords; i++) { + a[i] = 0; + } + } +} + +void +mp_neg(digit_t *a, unsigned int nwords) +{ // negates a + for (size_t i = 0; i < nwords; i++) { + a[i] ^= -1; + } + + a[0] += 1; +} + +bool +mp_is_one(const digit_t *x, unsigned int nwords) +{ // returns true if x represents 1, and false otherwise + if (x[0] != 1) { + return false; + } + + for (size_t i = 1; i < nwords; i++) { + if (x[i] != 0) { + return false; + } + } + return true; +} + +void +mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) +{ // Inversion modulo 2^e, using Newton's method and Hensel lifting + // we take the first power of 2 larger than e to use + // requires a to be odd, of course + // returns b such that a*b = 1 mod 2^e + assert((a[0] & 1) == 1); + + digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + mp_copy(aa, a, nwords); + + mp_one[0] = 1; + for (unsigned int i = 1; i < nwords; i++) { + mp_one[i] = 0; + } + + int p = 1; + while ((1 << p) < e) { + p++; + } + p -= 2; // using k = 4 for initial inverse + int w = (1 << (p + 2)); + + mp_mod_2exp(aa, w, nwords); + mp_add(x, aa, aa, nwords); + mp_add(x, x, aa, nwords); // should be 3a + x[0] ^= (1 << 1); // so that x equals (3a)^2 xor 2 + mp_mod_2exp(x, w, nwords); // now x*a = 1 mod 2^4, which we lift + + mp_mul(tmp, aa, x, nwords); + mp_neg(tmp, nwords); + mp_add(y, mp_one, tmp, nwords); + + // Hensel lifting for p rounds + for (int i = 0; i < p; i++) { + mp_add(tmp, mp_one, y, nwords); + mp_mul(x, x, tmp, nwords); + mp_mul(y, y, y, nwords); + } + + mp_mod_2exp(x, w, nwords); + mp_copy(b, x, nwords); + + // verify results + mp_mul(x, x, aa, nwords); + mp_mod_2exp(x, w, nwords); + assert(mp_is_one(x, nwords)); +} + +void +mp_invert_matrix(digit_t *r1, digit_t *r2, digit_t *s1, digit_t *s2, int e, unsigned int nwords) +{ + // given a matrix ( ( a, b ), (c, d) ) of values mod 2^e + // returns the inverse matrix gamma ( (d, -b), (-c, a) ) + // where gamma is the inverse of the determinant a*d - b*c + // assumes the matrix is invertible, otherwises, inversion of determinant fails + + int p = 1; + while ((1 << p) < e) { + p++; + } + int w = (1 << (p)); + + digit_t det[nwords], tmp[nwords], resa[nwords], resb[nwords], resc[nwords], resd[nwords]; + mp_mul(tmp, r1, s2, nwords); + mp_mul(det, r2, s1, nwords); + mp_sub(det, tmp, det, nwords); + mp_inv_2e(det, det, e, nwords); + + mp_mul(resa, det, s2, nwords); + mp_mul(resb, det, r2, nwords); + mp_mul(resc, det, s1, nwords); + mp_mul(resd, det, r1, nwords); + + mp_neg(resb, nwords); + mp_neg(resc, nwords); + + mp_mod_2exp(resa, w, nwords); + mp_mod_2exp(resb, w, nwords); + mp_mod_2exp(resc, w, nwords); + mp_mod_2exp(resd, w, nwords); + + mp_copy(r1, resa, nwords); + mp_copy(r2, resb, nwords); + mp_copy(s1, resc, nwords); + mp_copy(s2, resd, nwords); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/normeq.c new file mode 100644 index 0000000000..8c133dd095 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/normeq.c @@ -0,0 +1,369 @@ +#include +#include "internal.h" + +/** @file + * + * @authors Antonin Leroux + * + * @brief Functions related to norm equation solving or special extremal orders + */ + +void +quat_lattice_O0_set(quat_lattice_t *O0) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_set(&(O0->basis[i][j]), 0); + } + } + ibz_set(&(O0->denom), 2); + ibz_set(&(O0->basis[0][0]), 2); + ibz_set(&(O0->basis[1][1]), 2); + ibz_set(&(O0->basis[2][2]), 1); + ibz_set(&(O0->basis[1][2]), 1); + ibz_set(&(O0->basis[3][3]), 1); + ibz_set(&(O0->basis[0][3]), 1); +} + +void +quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) +{ + ibz_set(&O0->z.coord[1], 1); + ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.denom, 1); + ibz_set(&O0->t.denom, 1); + O0->q = 1; + quat_lattice_O0_set(&(O0->order)); +} + +void +quat_order_elem_create(quat_alg_elem_t *elem, + const quat_p_extremal_maximal_order_t *order, + const ibz_vec_4_t *coeffs, + const quat_alg_t *Bpoo) +{ + + // var dec + quat_alg_elem_t quat_temp; + + // var init + quat_alg_elem_init(&quat_temp); + + // elem = x + quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + + // quat_temp = i*y + quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); + + // elem = x + i*y + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = z * j + quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + + // elem = x + i* + z*j + quat_alg_add(elem, elem, &quat_temp); + + // quat_temp = t * j * i + quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); + quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); + + // elem = x + i*y + j*z + j*i*t + quat_alg_add(elem, elem, &quat_temp); + + quat_alg_elem_finalize(&quat_temp); +} + +int +quat_represent_integer(quat_alg_elem_t *gamma, + const ibz_t *n_gamma, + int non_diag, + const quat_represent_integer_params_t *params) +{ + + if (ibz_is_even(n_gamma)) { + return 0; + } + // var dec + int found; + ibz_t cornacchia_target; + ibz_t adjusted_n_gamma, q; + ibz_t bound, sq_bound, temp; + ibz_t test; + ibz_vec_4_t coeffs; // coeffs = [x,y,z,t] + quat_alg_elem_t quat_temp; + + if (non_diag) + assert(params->order->q % 4 == 1); + + // var init + found = 0; + ibz_init(&bound); + ibz_init(&test); + ibz_init(&temp); + ibz_init(&q); + ibz_init(&sq_bound); + ibz_vec_4_init(&coeffs); + quat_alg_elem_init(&quat_temp); + ibz_init(&adjusted_n_gamma); + ibz_init(&cornacchia_target); + + ibz_set(&q, params->order->q); + + // this could be removed in the current state + int standard_order = (params->order->q == 1); + + // adjusting the norm of gamma (multiplying by 4 to find a solution in an order of odd level) + if (non_diag || standard_order) { + ibz_mul(&adjusted_n_gamma, n_gamma, &ibz_const_two); + ibz_mul(&adjusted_n_gamma, &adjusted_n_gamma, &ibz_const_two); + } else { + ibz_copy(&adjusted_n_gamma, n_gamma); + } + // computation of the first bound = sqrt (adjust_n_gamma / p - q) + ibz_div(&sq_bound, &bound, &adjusted_n_gamma, &((params->algebra)->p)); + ibz_set(&temp, params->order->q); + ibz_sub(&sq_bound, &sq_bound, &temp); + ibz_sqrt_floor(&bound, &sq_bound); + + // the size of the search space is roughly n_gamma / (p√q) + ibz_t counter; + ibz_init(&counter); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_mul(&temp, &temp, &((params->algebra)->p)); + ibz_sqrt_floor(&temp, &temp); + ibz_div(&counter, &temp, &adjusted_n_gamma, &temp); + + // entering the main loop + while (!found && ibz_cmp(&counter, &ibz_const_zero) != 0) { + // decreasing the counter + ibz_sub(&counter, &counter, &ibz_const_one); + + // we start by sampling the first coordinate + ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + + // then, we sample the second coordinate + // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) + ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); + ibz_sub(&temp, &adjusted_n_gamma, &temp); + ibz_mul(&sq_bound, &q, &(params->algebra->p)); + ibz_div(&temp, &sq_bound, &temp, &sq_bound); + ibz_sqrt_floor(&temp, &temp); + + if (ibz_cmp(&temp, &ibz_const_zero) == 0) { + continue; + } + // sampling the second value + ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + + // compute cornacchia_target = n_gamma - p * (z² + q*t²) + ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &q, &temp); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); + ibz_sub(&cornacchia_target, &adjusted_n_gamma, &cornacchia_target); + assert(ibz_cmp(&cornacchia_target, &ibz_const_zero) > 0); + + // applying cornacchia + if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) + found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + else + found = 0; + + if (found && non_diag && standard_order) { + // check that we can divide by two at least once + // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 + // we must have x = t mod 2 and y = z mod 2 + // if q=1 we can simply swap x and y + if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { + ibz_swap(&coeffs[1], &coeffs[0]); + } + // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the + // resulting endomorphism will behave well for dim 2 computations + found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && + ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + } + if (found) { + +#ifndef NDEBUG + ibz_set(&temp, (params->order->q)); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&temp, &temp, &(coeffs[1])); + ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_add(&temp, &temp, &test); + assert(0 == ibz_cmp(&temp, &cornacchia_target)); + + ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); + ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_set(&temp, (params->order->q)); + ibz_mul(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &temp, &(params->algebra->p)); + ibz_add(&cornacchia_target, &cornacchia_target, &temp); + assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); +#endif + // translate x,y,z,t into the quaternion element gamma + quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); +#ifndef NDEBUG + quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs[0]))); + assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); + assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); +#endif + // making gamma primitive + // coeffs contains the coefficients of primitivized gamma in the basis of order + quat_alg_make_primitive(&coeffs, &temp, gamma, &((params->order)->order)); + + if (non_diag || standard_order) + found = (ibz_cmp(&temp, &ibz_const_two) == 0); + else + found = (ibz_cmp(&temp, &ibz_const_one) == 0); + } + } + + if (found) { + // new gamma + ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); + ibz_copy(&gamma->coord[0], &coeffs[0]); + ibz_copy(&gamma->coord[1], &coeffs[1]); + ibz_copy(&gamma->coord[2], &coeffs[2]); + ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->denom, &(((params->order)->order).denom)); + } + // var finalize + ibz_finalize(&counter); + ibz_finalize(&bound); + ibz_finalize(&temp); + ibz_finalize(&sq_bound); + ibz_vec_4_finalize(&coeffs); + quat_alg_elem_finalize(&quat_temp); + ibz_finalize(&adjusted_n_gamma); + ibz_finalize(&cornacchia_target); + ibz_finalize(&q); + ibz_finalize(&test); + + return found; +} + +int +quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, + const ibz_t *norm, + int is_prime, + const quat_represent_integer_params_t *params, + const ibz_t *prime_cofactor) +{ + + ibz_t n_temp, norm_d; + ibz_t disc; + quat_alg_elem_t gen, gen_rerand; + int found = 0; + ibz_init(&n_temp); + ibz_init(&norm_d); + ibz_init(&disc); + quat_alg_elem_init(&gen); + quat_alg_elem_init(&gen_rerand); + + // when the norm is prime we can be quite efficient + // by avoiding to run represent integer + // the first step is to generate one ideal of the correct norm + if (is_prime) { + + // we find a quaternion element of norm divisible by norm + while (!found) { + // generating a trace-zero element at random + ibz_set(&gen.coord[0], 0); + ibz_sub(&n_temp, norm, &ibz_const_one); + for (int i = 1; i < 4; i++) + ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + + // and finally the negation mod norm + ibz_neg(&disc, &n_temp); + ibz_mod(&disc, &disc, norm); + // now we check that -n is a square mod norm + // and if the square root exists we compute it + found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = found && !quat_alg_elem_is_zero(&gen); + } + } else { + assert(prime_cofactor != NULL); + // if it is not prime or we don't know if it is prime, we may just use represent integer + // and use a precomputed prime as cofactor + assert(!ibz_is_zero(norm)); + ibz_mul(&n_temp, prime_cofactor, norm); + found = quat_represent_integer(&gen, &n_temp, 0, params); + found = found && !quat_alg_elem_is_zero(&gen); + } +#ifndef NDEBUG + if (found) { + // first, we compute the norm of the gen + quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_mod(&n_temp, &n_temp, norm); + assert(ibz_cmp(&n_temp, &ibz_const_zero) == 0); + } +#endif + + // now we just have to rerandomize the class of the ideal generated by gen + found = 0; + while (!found) { + for (int i = 0; i < 4; i++) { + ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + } + quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); + assert(ibz_is_one(&norm_d)); + ibz_gcd(&disc, &n_temp, norm); + found = ibz_is_one(&disc); + found = found && !quat_alg_elem_is_zero(&gen_rerand); + } + + quat_alg_mul(&gen, &gen, &gen_rerand, (params->algebra)); + // in both cases, whether norm is prime or not prime, + // gen is not divisible by any integer factor of the target norm + // therefore the call below will yield an ideal of the correct norm + quat_lideal_create(lideal, &gen, norm, &((params->order)->order), (params->algebra)); + assert(ibz_cmp(norm, &(lideal->norm)) == 0); + + ibz_finalize(&n_temp); + quat_alg_elem_finalize(&gen); + quat_alg_elem_finalize(&gen_rerand); + ibz_finalize(&norm_d); + ibz_finalize(&disc); + return (found); +} + +void +quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) +{ + ibz_t tmp; + ibz_init(&tmp); + ibz_copy(&(*vec)[2], &el->coord[2]); + ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) + ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) + ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); + ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); + ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); + + assert(ibz_divides(&(*vec)[0], &el->denom)); + assert(ibz_divides(&(*vec)[1], &el->denom)); + assert(ibz_divides(&(*vec)[2], &el->denom)); + assert(ibz_divides(&(*vec)[3], &el->denom)); + + ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); + ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); + ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); + ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + + ibz_finalize(&tmp); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_arm64crypto.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_arm64crypto.h deleted file mode 100644 index 88c4bf48d0..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/randombytes_arm64crypto.h +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef RANDOMBYTES_ARM64CRYPTO_H -#define RANDOMBYTES_ARM64CRYPTO_H - -#include - -#define RNG_SUCCESS 0 -#define RNG_BAD_MAXLEN -1 -#define RNG_BAD_OUTBUF -2 -#define RNG_BAD_REQ_LEN -3 - -typedef struct { - unsigned char buffer[16]; - int buffer_pos; - unsigned long length_remaining; - unsigned char key[32]; - unsigned char ctr[16]; -} AES_XOF_struct; - -typedef struct { - unsigned char Key[32]; - unsigned char V[16]; - int reseed_counter; -} AES256_CTR_DRBG_struct; - -#endif /* RANDOMBYTES_ARM64CRYPTO_H */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rationals.c new file mode 100644 index 0000000000..0c5387e5e8 --- /dev/null +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rationals.c @@ -0,0 +1,233 @@ +#include +#include "internal.h" +#include "lll_internals.h" + +void +ibq_init(ibq_t *x) +{ + ibz_init(&((*x)[0])); + ibz_init(&((*x)[1])); + ibz_set(&((*x)[1]), 1); +} + +void +ibq_finalize(ibq_t *x) +{ + ibz_finalize(&((*x)[0])); + ibz_finalize(&((*x)[1])); +} + +void +ibq_mat_4x4_init(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_init(&(*mat)[i][j]); + } + } +} +void +ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) +{ + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibq_finalize(&(*mat)[i][j]); + } + } +} + +void +ibq_vec_4_init(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_init(&(*vec)[i]); + } +} +void +ibq_vec_4_finalize(ibq_vec_4_t *vec) +{ + for (int i = 0; i < 4; i++) { + ibq_finalize(&(*vec)[i]); + } +} + +void +ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) +{ + printf("matrix: "); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + ibz_print(&((*mat)[i][j][0]), 10); + printf("/"); + ibz_print(&((*mat)[i][j][1]), 10); + printf(" "); + } + printf("\n "); + } + printf("\n"); +} + +void +ibq_vec_4_print(const ibq_vec_4_t *vec) +{ + printf("vector: "); + for (int i = 0; i < 4; i++) { + ibz_print(&((*vec)[i][0]), 10); + printf("/"); + ibz_print(&((*vec)[i][1]), 10); + printf(" "); + } + printf("\n\n"); +} + +void +ibq_reduce(ibq_t *x) +{ + ibz_t gcd, r; + ibz_init(&gcd); + ibz_init(&r); + ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); + ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + assert(ibz_is_zero(&r)); + ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + assert(ibz_is_zero(&r)); + ibz_finalize(&gcd); + ibz_finalize(&r); +} + +void +ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) +{ + ibz_t add, prod; + ibz_init(&add); + ibz_init(&prod); + + ibz_mul(&add, &((*a)[0]), &((*b)[1])); + ibz_mul(&prod, &((*b)[0]), &((*a)[1])); + ibz_add(&((*sum)[0]), &add, &prod); + ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_finalize(&add); + ibz_finalize(&prod); +} + +void +ibq_neg(ibq_t *neg, const ibq_t *x) +{ + ibz_copy(&((*neg)[1]), &((*x)[1])); + ibz_neg(&((*neg)[0]), &((*x)[0])); +} + +void +ibq_sub(ibq_t *diff, const ibq_t *a, const ibq_t *b) +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, b); + ibq_add(diff, a, &neg); + ibq_finalize(&neg); +} + +void +ibq_abs(ibq_t *abs, const ibq_t *x) // once +{ + ibq_t neg; + ibq_init(&neg); + ibq_neg(&neg, x); + if (ibq_cmp(x, &neg) < 0) + ibq_copy(abs, &neg); + else + ibq_copy(abs, x); + ibq_finalize(&neg); +} + +void +ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) +{ + ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); + ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); +} + +int +ibq_inv(ibq_t *inv, const ibq_t *x) +{ + int res = !ibq_is_zero(x); + if (res) { + ibz_copy(&((*inv)[0]), &((*x)[0])); + ibz_copy(&((*inv)[1]), &((*x)[1])); + ibz_swap(&((*inv)[1]), &((*inv)[0])); + } + return (res); +} + +int +ibq_cmp(const ibq_t *a, const ibq_t *b) +{ + ibz_t x, y; + ibz_init(&x); + ibz_init(&y); + ibz_copy(&x, &((*a)[0])); + ibz_copy(&y, &((*b)[0])); + ibz_mul(&y, &y, &((*a)[1])); + ibz_mul(&x, &x, &((*b)[1])); + if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + ibz_neg(&y, &y); + ibz_neg(&x, &x); + } + int res = ibz_cmp(&x, &y); + ibz_finalize(&x); + ibz_finalize(&y); + return (res); +} + +int +ibq_is_zero(const ibq_t *x) +{ + return ibz_is_zero(&((*x)[0])); +} + +int +ibq_is_one(const ibq_t *x) +{ + return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); +} + +int +ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) +{ + ibz_copy(&((*q)[0]), a); + ibz_copy(&((*q)[1]), b); + return !ibz_is_zero(b); +} + +void +ibq_copy(ibq_t *target, const ibq_t *value) // once +{ + ibz_copy(&((*target)[0]), &((*value)[0])); + ibz_copy(&((*target)[1]), &((*value)[1])); +} + +int +ibq_is_ibz(const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_mod(&r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} + +int +ibq_to_ibz(ibz_t *z, const ibq_t *q) +{ + ibz_t r; + ibz_init(&r); + ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + int res = ibz_is_zero(&r); + ibz_finalize(&r); + return (res); +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S deleted file mode 100644 index 2311fa9bc8..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/vaes256_key_expansion.S +++ /dev/null @@ -1,122 +0,0 @@ -#*************************************************************************** -# This implementation is a modified version of the code, -# written by Nir Drucker and Shay Gueron -# AWS Cryptographic Algorithms Group -# (ndrucker@amazon.com, gueron@amazon.com) -# -# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). -# You may not use this file except in compliance with the License. -# A copy of the License is located at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. -# The license is detailed in the file LICENSE.txt, and applies to this file. -#*************************************************************************** - -.intel_syntax noprefix -.data - -.p2align 4, 0x90 -MASK1: -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -CON1: -.long 1,1,1,1 - -.set k256_size, 32 - -#if defined(__linux__) && defined(__ELF__) -.section .note.GNU-stack,"",@progbits -#endif -.text - -################################################################################ -# void aes256_key_expansion(OUT aes256_ks_t* ks, IN const uint8_t* key); -# The output parameter must be 16 bytes aligned! -# -#Linux ABI -#define out rdi -#define in rsi - -#define CON xmm0 -#define MASK_REG xmm1 - -#define IN0 xmm2 -#define IN1 xmm3 - -#define TMP1 xmm4 -#define TMP2 xmm5 - -#define ZERO xmm15 - -.macro ROUND1 in0 in1 - add out, k256_size - vpshufb TMP2, \in1, MASK_REG - aesenclast TMP2, CON - vpslld CON, CON, 1 - vpslldq TMP1, \in0, 4 - vpxor \in0, \in0, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor \in0, \in0, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor \in0, \in0, TMP1 - vpxor \in0, \in0, TMP2 - vmovdqa [out], \in0 - -.endm - -.macro ROUND2 - vpshufd TMP2, IN0, 0xff - aesenclast TMP2, ZERO - vpslldq TMP1, IN1, 4 - vpxor IN1, IN1, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor IN1, IN1, TMP1 - vpslldq TMP1, TMP1, 4 - vpxor IN1, IN1, TMP1 - vpxor IN1, IN1, TMP2 - vmovdqa [out+16], IN1 -.endm - -#ifdef __APPLE__ -#define AES256_KEY_EXPANSION _aes256_key_expansion -#else -#define AES256_KEY_EXPANSION aes256_key_expansion -#endif - -#ifndef __APPLE__ -.type AES256_KEY_EXPANSION,@function -.hidden AES256_KEY_EXPANSION -#endif -.globl AES256_KEY_EXPANSION -AES256_KEY_EXPANSION: - vmovdqu IN0, [in] - vmovdqu IN1, [in+16] - vmovdqa [out], IN0 - vmovdqa [out+16], IN1 - - vmovdqa CON, [rip+CON1] - vmovdqa MASK_REG, [rip+MASK1] - - vpxor ZERO, ZERO, ZERO - - mov ax, 6 -.loop256: - - ROUND1 IN0, IN1 - dec ax - ROUND2 - jne .loop256 - - ROUND1 IN0, IN1 - - ret -#ifndef __APPLE__ -.size AES256_KEY_EXPANSION, .-AES256_KEY_EXPANSION -#endif - diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes.h deleted file mode 100644 index e35ec3705b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes.h +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef AES_H -#define AES_H - -#include -#include - -void AES_256_ECB(const uint8_t *input, const uint8_t *key, uint8_t *output); -#define AES_ECB_encrypt AES_256_ECB - -#ifdef ENABLE_AESNI -int AES_128_CTR_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -int AES_128_CTR_4R_NI(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#define AES_128_CTR AES_128_CTR_NI -#else -int AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen); -#endif - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes_c.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes_c.c deleted file mode 100644 index 5e2d7d6161..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/aes_c.c +++ /dev/null @@ -1,783 +0,0 @@ -// SPDX-License-Identifier: MIT and Apache-2.0 - -/* - * AES implementation based on code from PQClean, - * which is in turn based on BearSSL (https://bearssl.org/) - * by Thomas Pornin. - * - * - * Copyright (c) 2016 Thomas Pornin - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include - -#define AES128_KEYBYTES 16 -#define AES192_KEYBYTES 24 -#define AES256_KEYBYTES 32 -#define AESCTR_NONCEBYTES 12 -#define AES_BLOCKBYTES 16 - -#define PQC_AES128_STATESIZE 88 -typedef struct -{ - uint64_t sk_exp[PQC_AES128_STATESIZE]; -} aes128ctx; - -#define PQC_AES192_STATESIZE 104 -typedef struct -{ - uint64_t sk_exp[PQC_AES192_STATESIZE]; -} aes192ctx; - -#define PQC_AES256_STATESIZE 120 -typedef struct -{ - uint64_t sk_exp[PQC_AES256_STATESIZE]; -} aes256ctx; - -/** Initializes the context **/ -void aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key); - -void aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key); - -void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx); - -void aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx); - -/** Frees the context **/ -void aes128_ctx_release(aes128ctx *r); - -/** Initializes the context **/ -void aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key); - -void aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key); - -void aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx); - -void aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx); - -void aes192_ctx_release(aes192ctx *r); - -/** Initializes the context **/ -void aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key); - -void aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key); - -void aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx); - -void aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx); - -/** Frees the context **/ -void aes256_ctx_release(aes256ctx *r); - -static inline uint32_t -br_dec32le(const unsigned char *src) -{ - return (uint32_t)src[0] | ((uint32_t)src[1] << 8) | ((uint32_t)src[2] << 16) | - ((uint32_t)src[3] << 24); -} - -static void -br_range_dec32le(uint32_t *v, size_t num, const unsigned char *src) -{ - while (num-- > 0) { - *v++ = br_dec32le(src); - src += 4; - } -} - -static inline uint32_t -br_swap32(uint32_t x) -{ - x = ((x & (uint32_t)0x00FF00FF) << 8) | ((x >> 8) & (uint32_t)0x00FF00FF); - return (x << 16) | (x >> 16); -} - -static inline void -br_enc32le(unsigned char *dst, uint32_t x) -{ - dst[0] = (unsigned char)x; - dst[1] = (unsigned char)(x >> 8); - dst[2] = (unsigned char)(x >> 16); - dst[3] = (unsigned char)(x >> 24); -} - -static void -br_range_enc32le(unsigned char *dst, const uint32_t *v, size_t num) -{ - while (num-- > 0) { - br_enc32le(dst, *v++); - dst += 4; - } -} - -static void -br_aes_ct64_bitslice_Sbox(uint64_t *q) -{ - /* - * This S-box implementation is a straightforward translation of - * the circuit described by Boyar and Peralta in "A new - * combinational logic minimization technique with applications - * to cryptology" (https://eprint.iacr.org/2009/191.pdf). - * - * Note that variables x* (input) and s* (output) are numbered - * in "reverse" order (x0 is the high bit, x7 is the low bit). - */ - - uint64_t x0, x1, x2, x3, x4, x5, x6, x7; - uint64_t y1, y2, y3, y4, y5, y6, y7, y8, y9; - uint64_t y10, y11, y12, y13, y14, y15, y16, y17, y18, y19; - uint64_t y20, y21; - uint64_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9; - uint64_t z10, z11, z12, z13, z14, z15, z16, z17; - uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; - uint64_t t10, t11, t12, t13, t14, t15, t16, t17, t18, t19; - uint64_t t20, t21, t22, t23, t24, t25, t26, t27, t28, t29; - uint64_t t30, t31, t32, t33, t34, t35, t36, t37, t38, t39; - uint64_t t40, t41, t42, t43, t44, t45, t46, t47, t48, t49; - uint64_t t50, t51, t52, t53, t54, t55, t56, t57, t58, t59; - uint64_t t60, t61, t62, t63, t64, t65, t66, t67; - uint64_t s0, s1, s2, s3, s4, s5, s6, s7; - - x0 = q[7]; - x1 = q[6]; - x2 = q[5]; - x3 = q[4]; - x4 = q[3]; - x5 = q[2]; - x6 = q[1]; - x7 = q[0]; - - /* - * Top linear transformation. - */ - y14 = x3 ^ x5; - y13 = x0 ^ x6; - y9 = x0 ^ x3; - y8 = x0 ^ x5; - t0 = x1 ^ x2; - y1 = t0 ^ x7; - y4 = y1 ^ x3; - y12 = y13 ^ y14; - y2 = y1 ^ x0; - y5 = y1 ^ x6; - y3 = y5 ^ y8; - t1 = x4 ^ y12; - y15 = t1 ^ x5; - y20 = t1 ^ x1; - y6 = y15 ^ x7; - y10 = y15 ^ t0; - y11 = y20 ^ y9; - y7 = x7 ^ y11; - y17 = y10 ^ y11; - y19 = y10 ^ y8; - y16 = t0 ^ y11; - y21 = y13 ^ y16; - y18 = x0 ^ y16; - - /* - * Non-linear section. - */ - t2 = y12 & y15; - t3 = y3 & y6; - t4 = t3 ^ t2; - t5 = y4 & x7; - t6 = t5 ^ t2; - t7 = y13 & y16; - t8 = y5 & y1; - t9 = t8 ^ t7; - t10 = y2 & y7; - t11 = t10 ^ t7; - t12 = y9 & y11; - t13 = y14 & y17; - t14 = t13 ^ t12; - t15 = y8 & y10; - t16 = t15 ^ t12; - t17 = t4 ^ t14; - t18 = t6 ^ t16; - t19 = t9 ^ t14; - t20 = t11 ^ t16; - t21 = t17 ^ y20; - t22 = t18 ^ y19; - t23 = t19 ^ y21; - t24 = t20 ^ y18; - - t25 = t21 ^ t22; - t26 = t21 & t23; - t27 = t24 ^ t26; - t28 = t25 & t27; - t29 = t28 ^ t22; - t30 = t23 ^ t24; - t31 = t22 ^ t26; - t32 = t31 & t30; - t33 = t32 ^ t24; - t34 = t23 ^ t33; - t35 = t27 ^ t33; - t36 = t24 & t35; - t37 = t36 ^ t34; - t38 = t27 ^ t36; - t39 = t29 & t38; - t40 = t25 ^ t39; - - t41 = t40 ^ t37; - t42 = t29 ^ t33; - t43 = t29 ^ t40; - t44 = t33 ^ t37; - t45 = t42 ^ t41; - z0 = t44 & y15; - z1 = t37 & y6; - z2 = t33 & x7; - z3 = t43 & y16; - z4 = t40 & y1; - z5 = t29 & y7; - z6 = t42 & y11; - z7 = t45 & y17; - z8 = t41 & y10; - z9 = t44 & y12; - z10 = t37 & y3; - z11 = t33 & y4; - z12 = t43 & y13; - z13 = t40 & y5; - z14 = t29 & y2; - z15 = t42 & y9; - z16 = t45 & y14; - z17 = t41 & y8; - - /* - * Bottom linear transformation. - */ - t46 = z15 ^ z16; - t47 = z10 ^ z11; - t48 = z5 ^ z13; - t49 = z9 ^ z10; - t50 = z2 ^ z12; - t51 = z2 ^ z5; - t52 = z7 ^ z8; - t53 = z0 ^ z3; - t54 = z6 ^ z7; - t55 = z16 ^ z17; - t56 = z12 ^ t48; - t57 = t50 ^ t53; - t58 = z4 ^ t46; - t59 = z3 ^ t54; - t60 = t46 ^ t57; - t61 = z14 ^ t57; - t62 = t52 ^ t58; - t63 = t49 ^ t58; - t64 = z4 ^ t59; - t65 = t61 ^ t62; - t66 = z1 ^ t63; - s0 = t59 ^ t63; - s6 = t56 ^ ~t62; - s7 = t48 ^ ~t60; - t67 = t64 ^ t65; - s3 = t53 ^ t66; - s4 = t51 ^ t66; - s5 = t47 ^ t65; - s1 = t64 ^ ~s3; - s2 = t55 ^ ~t67; - - q[7] = s0; - q[6] = s1; - q[5] = s2; - q[4] = s3; - q[3] = s4; - q[2] = s5; - q[1] = s6; - q[0] = s7; -} - -static void -br_aes_ct64_ortho(uint64_t *q) -{ -#define SWAPN(cl, ch, s, x, y) \ - do { \ - uint64_t a, b; \ - a = (x); \ - b = (y); \ - (x) = (a & (uint64_t)(cl)) | ((b & (uint64_t)(cl)) << (s)); \ - (y) = ((a & (uint64_t)(ch)) >> (s)) | (b & (uint64_t)(ch)); \ - } while (0) - -#define SWAP2(x, y) SWAPN(0x5555555555555555, 0xAAAAAAAAAAAAAAAA, 1, x, y) -#define SWAP4(x, y) SWAPN(0x3333333333333333, 0xCCCCCCCCCCCCCCCC, 2, x, y) -#define SWAP8(x, y) SWAPN(0x0F0F0F0F0F0F0F0F, 0xF0F0F0F0F0F0F0F0, 4, x, y) - - SWAP2(q[0], q[1]); - SWAP2(q[2], q[3]); - SWAP2(q[4], q[5]); - SWAP2(q[6], q[7]); - - SWAP4(q[0], q[2]); - SWAP4(q[1], q[3]); - SWAP4(q[4], q[6]); - SWAP4(q[5], q[7]); - - SWAP8(q[0], q[4]); - SWAP8(q[1], q[5]); - SWAP8(q[2], q[6]); - SWAP8(q[3], q[7]); -} - -static void -br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t *w) -{ - uint64_t x0, x1, x2, x3; - - x0 = w[0]; - x1 = w[1]; - x2 = w[2]; - x3 = w[3]; - x0 |= (x0 << 16); - x1 |= (x1 << 16); - x2 |= (x2 << 16); - x3 |= (x3 << 16); - x0 &= (uint64_t)0x0000FFFF0000FFFF; - x1 &= (uint64_t)0x0000FFFF0000FFFF; - x2 &= (uint64_t)0x0000FFFF0000FFFF; - x3 &= (uint64_t)0x0000FFFF0000FFFF; - x0 |= (x0 << 8); - x1 |= (x1 << 8); - x2 |= (x2 << 8); - x3 |= (x3 << 8); - x0 &= (uint64_t)0x00FF00FF00FF00FF; - x1 &= (uint64_t)0x00FF00FF00FF00FF; - x2 &= (uint64_t)0x00FF00FF00FF00FF; - x3 &= (uint64_t)0x00FF00FF00FF00FF; - *q0 = x0 | (x2 << 8); - *q1 = x1 | (x3 << 8); -} - -static void -br_aes_ct64_interleave_out(uint32_t *w, uint64_t q0, uint64_t q1) -{ - uint64_t x0, x1, x2, x3; - - x0 = q0 & (uint64_t)0x00FF00FF00FF00FF; - x1 = q1 & (uint64_t)0x00FF00FF00FF00FF; - x2 = (q0 >> 8) & (uint64_t)0x00FF00FF00FF00FF; - x3 = (q1 >> 8) & (uint64_t)0x00FF00FF00FF00FF; - x0 |= (x0 >> 8); - x1 |= (x1 >> 8); - x2 |= (x2 >> 8); - x3 |= (x3 >> 8); - x0 &= (uint64_t)0x0000FFFF0000FFFF; - x1 &= (uint64_t)0x0000FFFF0000FFFF; - x2 &= (uint64_t)0x0000FFFF0000FFFF; - x3 &= (uint64_t)0x0000FFFF0000FFFF; - w[0] = (uint32_t)x0 | (uint32_t)(x0 >> 16); - w[1] = (uint32_t)x1 | (uint32_t)(x1 >> 16); - w[2] = (uint32_t)x2 | (uint32_t)(x2 >> 16); - w[3] = (uint32_t)x3 | (uint32_t)(x3 >> 16); -} - -static const unsigned char Rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36 }; - -static uint32_t -sub_word(uint32_t x) -{ - uint64_t q[8]; - - memset(q, 0, sizeof q); - q[0] = x; - br_aes_ct64_ortho(q); - br_aes_ct64_bitslice_Sbox(q); - br_aes_ct64_ortho(q); - return (uint32_t)q[0]; -} - -static void -br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, unsigned int key_len) -{ - unsigned int i, j, k, nk, nkf; - uint32_t tmp; - uint32_t skey[60]; - unsigned nrounds = 10 + ((key_len - 16) >> 2); - - nk = (key_len >> 2); - nkf = ((nrounds + 1) << 2); - br_range_dec32le(skey, (key_len >> 2), key); - tmp = skey[(key_len >> 2) - 1]; - for (i = nk, j = 0, k = 0; i < nkf; i++) { - if (j == 0) { - tmp = (tmp << 24) | (tmp >> 8); - tmp = sub_word(tmp) ^ Rcon[k]; - } else if (nk > 6 && j == 4) { - tmp = sub_word(tmp); - } - tmp ^= skey[i - nk]; - skey[i] = tmp; - if (++j == nk) { - j = 0; - k++; - } - } - - for (i = 0, j = 0; i < nkf; i += 4, j += 2) { - uint64_t q[8]; - - br_aes_ct64_interleave_in(&q[0], &q[4], skey + i); - q[1] = q[0]; - q[2] = q[0]; - q[3] = q[0]; - q[5] = q[4]; - q[6] = q[4]; - q[7] = q[4]; - br_aes_ct64_ortho(q); - comp_skey[j + 0] = - (q[0] & (uint64_t)0x1111111111111111) | (q[1] & (uint64_t)0x2222222222222222) | - (q[2] & (uint64_t)0x4444444444444444) | (q[3] & (uint64_t)0x8888888888888888); - comp_skey[j + 1] = - (q[4] & (uint64_t)0x1111111111111111) | (q[5] & (uint64_t)0x2222222222222222) | - (q[6] & (uint64_t)0x4444444444444444) | (q[7] & (uint64_t)0x8888888888888888); - } -} - -static void -br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, unsigned int nrounds) -{ - unsigned u, v, n; - - n = (nrounds + 1) << 1; - for (u = 0, v = 0; u < n; u++, v += 4) { - uint64_t x0, x1, x2, x3; - - x0 = x1 = x2 = x3 = comp_skey[u]; - x0 &= (uint64_t)0x1111111111111111; - x1 &= (uint64_t)0x2222222222222222; - x2 &= (uint64_t)0x4444444444444444; - x3 &= (uint64_t)0x8888888888888888; - x1 >>= 1; - x2 >>= 2; - x3 >>= 3; - skey[v + 0] = (x0 << 4) - x0; - skey[v + 1] = (x1 << 4) - x1; - skey[v + 2] = (x2 << 4) - x2; - skey[v + 3] = (x3 << 4) - x3; - } -} - -static inline void -add_round_key(uint64_t *q, const uint64_t *sk) -{ - q[0] ^= sk[0]; - q[1] ^= sk[1]; - q[2] ^= sk[2]; - q[3] ^= sk[3]; - q[4] ^= sk[4]; - q[5] ^= sk[5]; - q[6] ^= sk[6]; - q[7] ^= sk[7]; -} - -static inline void -shift_rows(uint64_t *q) -{ - int i; - - for (i = 0; i < 8; i++) { - uint64_t x; - - x = q[i]; - q[i] = - (x & (uint64_t)0x000000000000FFFF) | ((x & (uint64_t)0x00000000FFF00000) >> 4) | - ((x & (uint64_t)0x00000000000F0000) << 12) | ((x & (uint64_t)0x0000FF0000000000) >> 8) | - ((x & (uint64_t)0x000000FF00000000) << 8) | ((x & (uint64_t)0xF000000000000000) >> 12) | - ((x & (uint64_t)0x0FFF000000000000) << 4); - } -} - -static inline uint64_t -rotr32(uint64_t x) -{ - return (x << 32) | (x >> 32); -} - -static inline void -mix_columns(uint64_t *q) -{ - uint64_t q0, q1, q2, q3, q4, q5, q6, q7; - uint64_t r0, r1, r2, r3, r4, r5, r6, r7; - - q0 = q[0]; - q1 = q[1]; - q2 = q[2]; - q3 = q[3]; - q4 = q[4]; - q5 = q[5]; - q6 = q[6]; - q7 = q[7]; - r0 = (q0 >> 16) | (q0 << 48); - r1 = (q1 >> 16) | (q1 << 48); - r2 = (q2 >> 16) | (q2 << 48); - r3 = (q3 >> 16) | (q3 << 48); - r4 = (q4 >> 16) | (q4 << 48); - r5 = (q5 >> 16) | (q5 << 48); - r6 = (q6 >> 16) | (q6 << 48); - r7 = (q7 >> 16) | (q7 << 48); - - q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0); - q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1); - q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2); - q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3); - q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4); - q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5); - q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6); - q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7); -} - -static void -inc4_be(uint32_t *x) -{ - uint32_t t = br_swap32(*x) + 4; - *x = br_swap32(t); -} - -static void -aes_ecb4x(unsigned char out[64], - const uint32_t ivw[16], - const uint64_t *sk_exp, - unsigned int nrounds) -{ - uint32_t w[16]; - uint64_t q[8]; - unsigned int i; - - memcpy(w, ivw, sizeof(w)); - for (i = 0; i < 4; i++) { - br_aes_ct64_interleave_in(&q[i], &q[i + 4], w + (i << 2)); - } - br_aes_ct64_ortho(q); - - add_round_key(q, sk_exp); - for (i = 1; i < nrounds; i++) { - br_aes_ct64_bitslice_Sbox(q); - shift_rows(q); - mix_columns(q); - add_round_key(q, sk_exp + (i << 3)); - } - br_aes_ct64_bitslice_Sbox(q); - shift_rows(q); - add_round_key(q, sk_exp + 8 * nrounds); - - br_aes_ct64_ortho(q); - for (i = 0; i < 4; i++) { - br_aes_ct64_interleave_out(w + (i << 2), q[i], q[i + 4]); - } - br_range_enc32le(out, w, 16); -} - -static void -aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) -{ - aes_ecb4x(out, ivw, sk_exp, nrounds); - - /* Increase counter for next 4 blocks */ - inc4_be(ivw + 3); - inc4_be(ivw + 7); - inc4_be(ivw + 11); - inc4_be(ivw + 15); -} - -static void -aes_ecb(unsigned char *out, - const unsigned char *in, - size_t nblocks, - const uint64_t *rkeys, - unsigned int nrounds) -{ - uint32_t blocks[16]; - unsigned char t[64]; - - while (nblocks >= 4) { - br_range_dec32le(blocks, 16, in); - aes_ecb4x(out, blocks, rkeys, nrounds); - nblocks -= 4; - in += 64; - out += 64; - } - - if (nblocks) { - br_range_dec32le(blocks, nblocks * 4, in); - aes_ecb4x(t, blocks, rkeys, nrounds); - memcpy(out, t, nblocks * 16); - } -} - -static void -aes_ctr(unsigned char *out, - size_t outlen, - const unsigned char *iv, - const uint64_t *rkeys, - unsigned int nrounds) -{ - uint32_t ivw[16]; - size_t i; - uint32_t cc = 0; - - br_range_dec32le(ivw, 3, iv); - memcpy(ivw + 4, ivw, 3 * sizeof(uint32_t)); - memcpy(ivw + 8, ivw, 3 * sizeof(uint32_t)); - memcpy(ivw + 12, ivw, 3 * sizeof(uint32_t)); - ivw[3] = br_swap32(cc); - ivw[7] = br_swap32(cc + 1); - ivw[11] = br_swap32(cc + 2); - ivw[15] = br_swap32(cc + 3); - - while (outlen > 64) { - aes_ctr4x(out, ivw, rkeys, nrounds); - out += 64; - outlen -= 64; - } - if (outlen > 0) { - unsigned char tmp[64]; - aes_ctr4x(tmp, ivw, rkeys, nrounds); - for (i = 0; i < outlen; i++) { - out[i] = tmp[i]; - } - } -} - -void -aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key) -{ - uint64_t skey[22]; - - br_aes_ct64_keysched(skey, key, 16); - br_aes_ct64_skey_expand(r->sk_exp, skey, 10); -} - -void -aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key) -{ - aes128_ecb_keyexp(r, key); -} - -void -aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key) -{ - uint64_t skey[26]; - - br_aes_ct64_keysched(skey, key, 24); - br_aes_ct64_skey_expand(r->sk_exp, skey, 12); -} - -void -aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key) -{ - aes192_ecb_keyexp(r, key); -} - -void -aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key) -{ - uint64_t skey[30]; - - br_aes_ct64_keysched(skey, key, 32); - br_aes_ct64_skey_expand(r->sk_exp, skey, 14); -} - -void -aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key) -{ - aes256_ecb_keyexp(r, key); -} - -void -aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 10); -} - -void -aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 10); -} - -void -aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 12); -} - -void -aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 12); -} - -void -aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx) -{ - aes_ecb(out, in, nblocks, ctx->sk_exp, 14); -} - -void -aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx) -{ - aes_ctr(out, outlen, iv, ctx->sk_exp, 14); -} - -void -aes128_ctx_release(aes128ctx *r) -{ -} - -void -aes192_ctx_release(aes192ctx *r) -{ -} - -void -aes256_ctx_release(aes256ctx *r) -{ -} - -int -AES_128_CTR(unsigned char *output, - size_t outputByteLen, - const unsigned char *input, - size_t inputByteLen) -{ - aes128ctx ctx; - const unsigned char iv[16] = { 0 }; - - aes128_ctr_keyexp(&ctx, input); - aes128_ctr(output, outputByteLen, iv, &ctx); - aes128_ctx_release(&ctx); - - return (int)outputByteLen; -} - -void -AES_256_ECB(const uint8_t *input, const unsigned char *key, unsigned char *output) -{ - aes256ctx ctx; - - aes256_ecb_keyexp(&ctx, key); - aes256_ecb(output, input, 1, &ctx); - aes256_ctx_release(&ctx); -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.c deleted file mode 100644 index f2992d8c7f..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.c +++ /dev/null @@ -1,876 +0,0 @@ -// SPDX-License-Identifier: PD and Apache-2.0 - -/* FIPS202 implementation based on code from PQClean, - * which is in turn based based on the public domain implementation in - * crypto_hash/keccakc512/simple/ from http://bench.cr.yp.to/supercop.html - * by Ronny Van Keer - * and the public domain "TweetFips202" implementation - * from https://twitter.com/tweetfips202 - * by Gilles Van Assche, Daniel J. Bernstein, and Peter Schwabe */ - -#include -#include -#include -#include - -#include "fips202.h" - -#define NROUNDS 24 -#define ROL(a, offset) (((a) << (offset)) ^ ((a) >> (64 - (offset)))) - -/************************************************* - * Name: load64 - * - * Description: Load 8 bytes into uint64_t in little-endian order - * - * Arguments: - const uint8_t *x: pointer to input byte array - * - * Returns the loaded 64-bit unsigned integer - **************************************************/ -static uint64_t load64(const uint8_t *x) { - uint64_t r = 0; - for (size_t i = 0; i < 8; ++i) { - r |= (uint64_t)x[i] << 8 * i; - } - - return r; -} - -/************************************************* - * Name: store64 - * - * Description: Store a 64-bit integer to a byte array in little-endian order - * - * Arguments: - uint8_t *x: pointer to the output byte array - * - uint64_t u: input 64-bit unsigned integer - **************************************************/ -static void store64(uint8_t *x, uint64_t u) { - for (size_t i = 0; i < 8; ++i) { - x[i] = (uint8_t) (u >> 8 * i); - } -} - -/* Keccak round constants */ -static const uint64_t KeccakF_RoundConstants[NROUNDS] = { - 0x0000000000000001ULL, 0x0000000000008082ULL, - 0x800000000000808aULL, 0x8000000080008000ULL, - 0x000000000000808bULL, 0x0000000080000001ULL, - 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x000000000000008aULL, 0x0000000000000088ULL, - 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, - 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, - 0x000000000000800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, - 0x0000000080000001ULL, 0x8000000080008008ULL -}; - -/************************************************* - * Name: KeccakF1600_StatePermute - * - * Description: The Keccak F1600 Permutation - * - * Arguments: - uint64_t *state: pointer to input/output Keccak state - **************************************************/ -static void KeccakF1600_StatePermute(uint64_t *state) { - int round; - - uint64_t Aba, Abe, Abi, Abo, Abu; - uint64_t Aga, Age, Agi, Ago, Agu; - uint64_t Aka, Ake, Aki, Ako, Aku; - uint64_t Ama, Ame, Ami, Amo, Amu; - uint64_t Asa, Ase, Asi, Aso, Asu; - uint64_t BCa, BCe, BCi, BCo, BCu; - uint64_t Da, De, Di, Do, Du; - uint64_t Eba, Ebe, Ebi, Ebo, Ebu; - uint64_t Ega, Ege, Egi, Ego, Egu; - uint64_t Eka, Eke, Eki, Eko, Eku; - uint64_t Ema, Eme, Emi, Emo, Emu; - uint64_t Esa, Ese, Esi, Eso, Esu; - - // copyFromState(A, state) - Aba = state[0]; - Abe = state[1]; - Abi = state[2]; - Abo = state[3]; - Abu = state[4]; - Aga = state[5]; - Age = state[6]; - Agi = state[7]; - Ago = state[8]; - Agu = state[9]; - Aka = state[10]; - Ake = state[11]; - Aki = state[12]; - Ako = state[13]; - Aku = state[14]; - Ama = state[15]; - Ame = state[16]; - Ami = state[17]; - Amo = state[18]; - Amu = state[19]; - Asa = state[20]; - Ase = state[21]; - Asi = state[22]; - Aso = state[23]; - Asu = state[24]; - - for (round = 0; round < NROUNDS; round += 2) { - // prepareTheta - BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; - BCe = Abe ^ Age ^ Ake ^ Ame ^ Ase; - BCi = Abi ^ Agi ^ Aki ^ Ami ^ Asi; - BCo = Abo ^ Ago ^ Ako ^ Amo ^ Aso; - BCu = Abu ^ Agu ^ Aku ^ Amu ^ Asu; - - // thetaRhoPiChiIotaPrepareTheta(round , A, E) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Aba ^= Da; - BCa = Aba; - Age ^= De; - BCe = ROL(Age, 44); - Aki ^= Di; - BCi = ROL(Aki, 43); - Amo ^= Do; - BCo = ROL(Amo, 21); - Asu ^= Du; - BCu = ROL(Asu, 14); - Eba = BCa ^ ((~BCe) & BCi); - Eba ^= KeccakF_RoundConstants[round]; - Ebe = BCe ^ ((~BCi) & BCo); - Ebi = BCi ^ ((~BCo) & BCu); - Ebo = BCo ^ ((~BCu) & BCa); - Ebu = BCu ^ ((~BCa) & BCe); - - Abo ^= Do; - BCa = ROL(Abo, 28); - Agu ^= Du; - BCe = ROL(Agu, 20); - Aka ^= Da; - BCi = ROL(Aka, 3); - Ame ^= De; - BCo = ROL(Ame, 45); - Asi ^= Di; - BCu = ROL(Asi, 61); - Ega = BCa ^ ((~BCe) & BCi); - Ege = BCe ^ ((~BCi) & BCo); - Egi = BCi ^ ((~BCo) & BCu); - Ego = BCo ^ ((~BCu) & BCa); - Egu = BCu ^ ((~BCa) & BCe); - - Abe ^= De; - BCa = ROL(Abe, 1); - Agi ^= Di; - BCe = ROL(Agi, 6); - Ako ^= Do; - BCi = ROL(Ako, 25); - Amu ^= Du; - BCo = ROL(Amu, 8); - Asa ^= Da; - BCu = ROL(Asa, 18); - Eka = BCa ^ ((~BCe) & BCi); - Eke = BCe ^ ((~BCi) & BCo); - Eki = BCi ^ ((~BCo) & BCu); - Eko = BCo ^ ((~BCu) & BCa); - Eku = BCu ^ ((~BCa) & BCe); - - Abu ^= Du; - BCa = ROL(Abu, 27); - Aga ^= Da; - BCe = ROL(Aga, 36); - Ake ^= De; - BCi = ROL(Ake, 10); - Ami ^= Di; - BCo = ROL(Ami, 15); - Aso ^= Do; - BCu = ROL(Aso, 56); - Ema = BCa ^ ((~BCe) & BCi); - Eme = BCe ^ ((~BCi) & BCo); - Emi = BCi ^ ((~BCo) & BCu); - Emo = BCo ^ ((~BCu) & BCa); - Emu = BCu ^ ((~BCa) & BCe); - - Abi ^= Di; - BCa = ROL(Abi, 62); - Ago ^= Do; - BCe = ROL(Ago, 55); - Aku ^= Du; - BCi = ROL(Aku, 39); - Ama ^= Da; - BCo = ROL(Ama, 41); - Ase ^= De; - BCu = ROL(Ase, 2); - Esa = BCa ^ ((~BCe) & BCi); - Ese = BCe ^ ((~BCi) & BCo); - Esi = BCi ^ ((~BCo) & BCu); - Eso = BCo ^ ((~BCu) & BCa); - Esu = BCu ^ ((~BCa) & BCe); - - // prepareTheta - BCa = Eba ^ Ega ^ Eka ^ Ema ^ Esa; - BCe = Ebe ^ Ege ^ Eke ^ Eme ^ Ese; - BCi = Ebi ^ Egi ^ Eki ^ Emi ^ Esi; - BCo = Ebo ^ Ego ^ Eko ^ Emo ^ Eso; - BCu = Ebu ^ Egu ^ Eku ^ Emu ^ Esu; - - // thetaRhoPiChiIotaPrepareTheta(round+1, E, A) - Da = BCu ^ ROL(BCe, 1); - De = BCa ^ ROL(BCi, 1); - Di = BCe ^ ROL(BCo, 1); - Do = BCi ^ ROL(BCu, 1); - Du = BCo ^ ROL(BCa, 1); - - Eba ^= Da; - BCa = Eba; - Ege ^= De; - BCe = ROL(Ege, 44); - Eki ^= Di; - BCi = ROL(Eki, 43); - Emo ^= Do; - BCo = ROL(Emo, 21); - Esu ^= Du; - BCu = ROL(Esu, 14); - Aba = BCa ^ ((~BCe) & BCi); - Aba ^= KeccakF_RoundConstants[round + 1]; - Abe = BCe ^ ((~BCi) & BCo); - Abi = BCi ^ ((~BCo) & BCu); - Abo = BCo ^ ((~BCu) & BCa); - Abu = BCu ^ ((~BCa) & BCe); - - Ebo ^= Do; - BCa = ROL(Ebo, 28); - Egu ^= Du; - BCe = ROL(Egu, 20); - Eka ^= Da; - BCi = ROL(Eka, 3); - Eme ^= De; - BCo = ROL(Eme, 45); - Esi ^= Di; - BCu = ROL(Esi, 61); - Aga = BCa ^ ((~BCe) & BCi); - Age = BCe ^ ((~BCi) & BCo); - Agi = BCi ^ ((~BCo) & BCu); - Ago = BCo ^ ((~BCu) & BCa); - Agu = BCu ^ ((~BCa) & BCe); - - Ebe ^= De; - BCa = ROL(Ebe, 1); - Egi ^= Di; - BCe = ROL(Egi, 6); - Eko ^= Do; - BCi = ROL(Eko, 25); - Emu ^= Du; - BCo = ROL(Emu, 8); - Esa ^= Da; - BCu = ROL(Esa, 18); - Aka = BCa ^ ((~BCe) & BCi); - Ake = BCe ^ ((~BCi) & BCo); - Aki = BCi ^ ((~BCo) & BCu); - Ako = BCo ^ ((~BCu) & BCa); - Aku = BCu ^ ((~BCa) & BCe); - - Ebu ^= Du; - BCa = ROL(Ebu, 27); - Ega ^= Da; - BCe = ROL(Ega, 36); - Eke ^= De; - BCi = ROL(Eke, 10); - Emi ^= Di; - BCo = ROL(Emi, 15); - Eso ^= Do; - BCu = ROL(Eso, 56); - Ama = BCa ^ ((~BCe) & BCi); - Ame = BCe ^ ((~BCi) & BCo); - Ami = BCi ^ ((~BCo) & BCu); - Amo = BCo ^ ((~BCu) & BCa); - Amu = BCu ^ ((~BCa) & BCe); - - Ebi ^= Di; - BCa = ROL(Ebi, 62); - Ego ^= Do; - BCe = ROL(Ego, 55); - Eku ^= Du; - BCi = ROL(Eku, 39); - Ema ^= Da; - BCo = ROL(Ema, 41); - Ese ^= De; - BCu = ROL(Ese, 2); - Asa = BCa ^ ((~BCe) & BCi); - Ase = BCe ^ ((~BCi) & BCo); - Asi = BCi ^ ((~BCo) & BCu); - Aso = BCo ^ ((~BCu) & BCa); - Asu = BCu ^ ((~BCa) & BCe); - } - - // copyToState(state, A) - state[0] = Aba; - state[1] = Abe; - state[2] = Abi; - state[3] = Abo; - state[4] = Abu; - state[5] = Aga; - state[6] = Age; - state[7] = Agi; - state[8] = Ago; - state[9] = Agu; - state[10] = Aka; - state[11] = Ake; - state[12] = Aki; - state[13] = Ako; - state[14] = Aku; - state[15] = Ama; - state[16] = Ame; - state[17] = Ami; - state[18] = Amo; - state[19] = Amu; - state[20] = Asa; - state[21] = Ase; - state[22] = Asi; - state[23] = Aso; - state[24] = Asu; -} - -/************************************************* - * Name: keccak_absorb - * - * Description: Absorb step of Keccak; - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_absorb(uint64_t *s, uint32_t r, const uint8_t *m, - size_t mlen, uint8_t p) { - size_t i; - uint8_t t[200]; - - /* Zero state */ - for (i = 0; i < 25; ++i) { - s[i] = 0; - } - - while (mlen >= r) { - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(m + 8 * i); - } - - KeccakF1600_StatePermute(s); - mlen -= r; - m += r; - } - - for (i = 0; i < r; ++i) { - t[i] = 0; - } - for (i = 0; i < mlen; ++i) { - t[i] = m[i]; - } - t[i] = p; - t[r - 1] |= 128; - for (i = 0; i < r / 8; ++i) { - s[i] ^= load64(t + 8 * i); - } -} - -/************************************************* - * Name: keccak_squeezeblocks - * - * Description: Squeeze step of Keccak. Squeezes full blocks of r bytes each. - * Modifies the state. Can be called multiple times to keep - * squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *h: pointer to output blocks - * - size_t nblocks: number of blocks to be - * squeezed (written to h) - * - uint64_t *s: pointer to input/output Keccak state - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_squeezeblocks(uint8_t *h, size_t nblocks, - uint64_t *s, uint32_t r) { - while (nblocks > 0) { - KeccakF1600_StatePermute(s); - for (size_t i = 0; i < (r >> 3); i++) { - store64(h + 8 * i, s[i]); - } - h += r; - nblocks--; - } -} - -/************************************************* - * Name: keccak_inc_init - * - * Description: Initializes the incremental Keccak state to zero. - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - **************************************************/ -static void keccak_inc_init(uint64_t *s_inc) { - size_t i; - - for (i = 0; i < 25; ++i) { - s_inc[i] = 0; - } - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_absorb - * - * Description: Incremental keccak absorb - * Preceded by keccak_inc_init, succeeded by keccak_inc_finalize - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - const uint8_t *m: pointer to input to be absorbed into s - * - size_t mlen: length of input in bytes - **************************************************/ -static void keccak_inc_absorb(uint64_t *s_inc, uint32_t r, const uint8_t *m, - size_t mlen) { - size_t i; - - /* Recall that s_inc[25] is the non-absorbed bytes xored into the state */ - while (mlen + s_inc[25] >= r) { - for (i = 0; i < r - (uint32_t)s_inc[25]; i++) { - /* Take the i'th byte from message - xor with the s_inc[25] + i'th byte of the state; little-endian */ - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - mlen -= (size_t)(r - s_inc[25]); - m += r - s_inc[25]; - s_inc[25] = 0; - - KeccakF1600_StatePermute(s_inc); - } - - for (i = 0; i < mlen; i++) { - s_inc[(s_inc[25] + i) >> 3] ^= (uint64_t)m[i] << (8 * ((s_inc[25] + i) & 0x07)); - } - s_inc[25] += mlen; -} - -/************************************************* - * Name: keccak_inc_finalize - * - * Description: Finalizes Keccak absorb phase, prepares for squeezing - * - * Arguments: - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - * - uint8_t p: domain-separation byte for different - * Keccak-derived functions - **************************************************/ -static void keccak_inc_finalize(uint64_t *s_inc, uint32_t r, uint8_t p) { - /* After keccak_inc_absorb, we are guaranteed that s_inc[25] < r, - so we can always use one more byte for p in the current state. */ - s_inc[s_inc[25] >> 3] ^= (uint64_t)p << (8 * (s_inc[25] & 0x07)); - s_inc[(r - 1) >> 3] ^= (uint64_t)128 << (8 * ((r - 1) & 0x07)); - s_inc[25] = 0; -} - -/************************************************* - * Name: keccak_inc_squeeze - * - * Description: Incremental Keccak squeeze; can be called on byte-level - * - * Arguments: - uint8_t *h: pointer to output bytes - * - size_t outlen: number of bytes to be squeezed - * - uint64_t *s_inc: pointer to input/output incremental state - * First 25 values represent Keccak state. - * 26th value represents either the number of absorbed bytes - * that have not been permuted, or not-yet-squeezed bytes. - * - uint32_t r: rate in bytes (e.g., 168 for SHAKE128) - **************************************************/ -static void keccak_inc_squeeze(uint8_t *h, size_t outlen, - uint64_t *s_inc, uint32_t r) { - size_t i; - - /* First consume any bytes we still have sitting around */ - for (i = 0; i < outlen && i < s_inc[25]; i++) { - /* There are s_inc[25] bytes left, so r - s_inc[25] is the first - available byte. We consume from there, i.e., up to r. */ - h[i] = (uint8_t)(s_inc[(r - s_inc[25] + i) >> 3] >> (8 * ((r - s_inc[25] + i) & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] -= i; - - /* Then squeeze the remaining necessary blocks */ - while (outlen > 0) { - KeccakF1600_StatePermute(s_inc); - - for (i = 0; i < outlen && i < r; i++) { - h[i] = (uint8_t)(s_inc[i >> 3] >> (8 * (i & 0x07))); - } - h += i; - outlen -= i; - s_inc[25] = r - i; - } -} - -void shake128_inc_init(shake128incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE128_RATE, input, inlen); -} - -void shake128_inc_finalize(shake128incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE128_RATE, 0x1F); -} - -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE128_RATE); -} - -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake128_inc_ctx_release(shake128incctx *state) { - (void)state; -} - -void shake256_inc_init(shake256incctx *state) { - keccak_inc_init(state->ctx); -} - -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHAKE256_RATE, input, inlen); -} - -void shake256_inc_finalize(shake256incctx *state) { - keccak_inc_finalize(state->ctx, SHAKE256_RATE, 0x1F); -} - -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state) { - keccak_inc_squeeze(output, outlen, state->ctx, SHAKE256_RATE); -} - -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void shake256_inc_ctx_release(shake256incctx *state) { - (void)state; -} - - -/************************************************* - * Name: shake128_absorb - * - * Description: Absorb step of the SHAKE128 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - uint64_t *s: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE128_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake128_squeezeblocks - * - * Description: Squeeze step of SHAKE128 XOF. Squeezes full blocks of - * SHAKE128_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake128ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE128_RATE); -} - -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake128_ctx_release(shake128ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake256_absorb - * - * Description: Absorb step of the SHAKE256 XOF. - * non-incremental, starts by zeroeing the state. - * - * Arguments: - shake256ctx *state: pointer to (uninitialized) output Keccak state - * - const uint8_t *input: pointer to input to be absorbed - * into s - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen) { - keccak_absorb(state->ctx, SHAKE256_RATE, input, inlen, 0x1F); -} - -/************************************************* - * Name: shake256_squeezeblocks - * - * Description: Squeeze step of SHAKE256 XOF. Squeezes full blocks of - * SHAKE256_RATE bytes each. Modifies the state. Can be called - * multiple times to keep squeezing, i.e., is incremental. - * - * Arguments: - uint8_t *output: pointer to output blocks - * - size_t nblocks: number of blocks to be squeezed - * (written to output) - * - shake256ctx *state: pointer to input/output Keccak state - **************************************************/ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state) { - keccak_squeezeblocks(output, nblocks, state->ctx, SHAKE256_RATE); -} - -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKECTX_BYTES); -} - -/** Release the allocated state. Call only once. */ -void shake256_ctx_release(shake256ctx *state) { - (void)state; -} - -/************************************************* - * Name: shake128 - * - * Description: SHAKE128 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE128_RATE; - uint8_t t[SHAKE128_RATE]; - shake128ctx s; - - shake128_absorb(&s, input, inlen); - shake128_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE128_RATE; - outlen -= nblocks * SHAKE128_RATE; - - if (outlen) { - shake128_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake128_ctx_release(&s); -} - -/************************************************* - * Name: shake256 - * - * Description: SHAKE256 XOF with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - size_t outlen: requested output length in bytes - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen) { - size_t nblocks = outlen / SHAKE256_RATE; - uint8_t t[SHAKE256_RATE]; - shake256ctx s; - - shake256_absorb(&s, input, inlen); - shake256_squeezeblocks(output, nblocks, &s); - - output += nblocks * SHAKE256_RATE; - outlen -= nblocks * SHAKE256_RATE; - - if (outlen) { - shake256_squeezeblocks(t, 1, &s); - for (size_t i = 0; i < outlen; ++i) { - output[i] = t[i]; - } - } - shake256_ctx_release(&s); -} - -void sha3_256_inc_init(sha3_256incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_256_inc_ctx_release(sha3_256incctx *state) { - (void)state; -} - -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_256_RATE, input, inlen); -} - -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state) { - uint8_t t[SHA3_256_RATE]; - keccak_inc_finalize(state->ctx, SHA3_256_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_256_RATE); - - sha3_256_inc_ctx_release(state); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_256 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_256_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_256_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_256_RATE); - - for (size_t i = 0; i < 32; i++) { - output[i] = t[i]; - } -} - -void sha3_384_inc_init(sha3_384incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_384_RATE, input, inlen); -} - -void sha3_384_inc_ctx_release(sha3_384incctx *state) { - (void)state; -} - -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state) { - uint8_t t[SHA3_384_RATE]; - keccak_inc_finalize(state->ctx, SHA3_384_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_384_RATE); - - sha3_384_inc_ctx_release(state); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_384 - * - * Description: SHA3-256 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_384_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_384_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_384_RATE); - - for (size_t i = 0; i < 48; i++) { - output[i] = t[i]; - } -} - -void sha3_512_inc_init(sha3_512incctx *state) { - keccak_inc_init(state->ctx); -} - -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src) { - memcpy(dest->ctx, src->ctx, PQC_SHAKEINCCTX_BYTES); -} - -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen) { - keccak_inc_absorb(state->ctx, SHA3_512_RATE, input, inlen); -} - -void sha3_512_inc_ctx_release(sha3_512incctx *state) { - (void)state; -} - -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state) { - uint8_t t[SHA3_512_RATE]; - keccak_inc_finalize(state->ctx, SHA3_512_RATE, 0x06); - - keccak_squeezeblocks(t, 1, state->ctx, SHA3_512_RATE); - - sha3_512_inc_ctx_release(state); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} - -/************************************************* - * Name: sha3_512 - * - * Description: SHA3-512 with non-incremental API - * - * Arguments: - uint8_t *output: pointer to output - * - const uint8_t *input: pointer to input - * - size_t inlen: length of input in bytes - **************************************************/ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen) { - uint64_t s[25]; - uint8_t t[SHA3_512_RATE]; - - /* Absorb input */ - keccak_absorb(s, SHA3_512_RATE, input, inlen, 0x06); - - /* Squeeze output */ - keccak_squeezeblocks(t, 1, s, SHA3_512_RATE); - - for (size_t i = 0; i < 64; i++) { - output[i] = t[i]; - } -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h index c29ebd8f9d..21bc0c3f79 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h @@ -3,169 +3,12 @@ #ifndef FIPS202_H #define FIPS202_H -#include -#include +#include -#define SHAKE128_RATE 168 -#define SHAKE256_RATE 136 -#define SHA3_256_RATE 136 -#define SHA3_384_RATE 104 -#define SHA3_512_RATE 72 - -#define PQC_SHAKEINCCTX_U64WORDS 26 -#define PQC_SHAKECTX_U64WORDS 25 - -#define PQC_SHAKEINCCTX_BYTES (sizeof(uint64_t) * 26) -#define PQC_SHAKECTX_BYTES (sizeof(uint64_t) * 25) - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake128incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake128ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} shake256incctx; - -// Context for non-incremental API -typedef struct { - uint64_t ctx[PQC_SHAKECTX_U64WORDS]; -} shake256ctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_256incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_384incctx; - -// Context for incremental API -typedef struct { - uint64_t ctx[PQC_SHAKEINCCTX_U64WORDS]; -} sha3_512incctx; - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake128_absorb(shake128ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_squeezeblocks(uint8_t *output, size_t nblocks, shake128ctx *state); -/* Free the state */ -void shake128_ctx_release(shake128ctx *state); -/* Copy the state. */ -void shake128_ctx_clone(shake128ctx *dest, const shake128ctx *src); - -/* Initialize incremental hashing API */ -void shake128_inc_init(shake128incctx *state); -/* Absorb more information into the XOF. - * - * Can be called multiple times. - */ -void shake128_inc_absorb(shake128incctx *state, const uint8_t *input, size_t inlen); -/* Finalize the XOF for squeezing */ -void shake128_inc_finalize(shake128incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake128_inc_squeeze(uint8_t *output, size_t outlen, shake128incctx *state); -/* Copy the context of the SHAKE128 XOF */ -void shake128_inc_ctx_clone(shake128incctx *dest, const shake128incctx *src); -/* Free the context of the SHAKE128 XOF */ -void shake128_inc_ctx_release(shake128incctx *state); - -/* Initialize the state and absorb the provided input. - * - * This function does not support being called multiple times - * with the same state. - */ -void shake256_absorb(shake256ctx *state, const uint8_t *input, size_t inlen); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_squeezeblocks(uint8_t *output, size_t nblocks, shake256ctx *state); -/* Free the context held by this XOF */ -void shake256_ctx_release(shake256ctx *state); -/* Copy the context held by this XOF */ -void shake256_ctx_clone(shake256ctx *dest, const shake256ctx *src); - -/* Initialize incremental hashing API */ -void shake256_inc_init(shake256incctx *state); -void shake256_inc_absorb(shake256incctx *state, const uint8_t *input, size_t inlen); -/* Prepares for squeeze phase */ -void shake256_inc_finalize(shake256incctx *state); -/* Squeeze output out of the sponge. - * - * Supports being called multiple times - */ -void shake256_inc_squeeze(uint8_t *output, size_t outlen, shake256incctx *state); -/* Copy the state */ -void shake256_inc_ctx_clone(shake256incctx *dest, const shake256incctx *src); -/* Free the state */ -void shake256_inc_ctx_release(shake256incctx *state); - -/* One-stop SHAKE128 call */ -void shake128(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* One-stop SHAKE256 call */ -void shake256(uint8_t *output, size_t outlen, - const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_256_inc_init(sha3_256incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_256_inc_absorb(sha3_256incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_256_inc_finalize(uint8_t *output, sha3_256incctx *state); -/* Copy the context */ -void sha3_256_inc_ctx_clone(sha3_256incctx *dest, const sha3_256incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_256_inc_ctx_release(sha3_256incctx *state); - -void sha3_256(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_384_inc_init(sha3_384incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_384_inc_absorb(sha3_384incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_384_inc_finalize(uint8_t *output, sha3_384incctx *state); -/* Copy the context */ -void sha3_384_inc_ctx_clone(sha3_384incctx *dest, const sha3_384incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_384_inc_ctx_release(sha3_384incctx *state); - -/* One-stop SHA3-384 shop */ -void sha3_384(uint8_t *output, const uint8_t *input, size_t inlen); - -/* Initialize the incremental hashing state */ -void sha3_512_inc_init(sha3_512incctx *state); -/* Absorb blocks into SHA3 */ -void sha3_512_inc_absorb(sha3_512incctx *state, const uint8_t *input, size_t inlen); -/* Obtain the output of the function and free `state` */ -void sha3_512_inc_finalize(uint8_t *output, sha3_512incctx *state); -/* Copy the context */ -void sha3_512_inc_ctx_clone(sha3_512incctx *dest, const sha3_512incctx *src); -/* Release the state, don't use if `_finalize` has been used */ -void sha3_512_inc_ctx_release(sha3_512incctx *state); - -/* One-stop SHA3-512 shop */ -void sha3_512(uint8_t *output, const uint8_t *input, size_t inlen); +#define shake256incctx OQS_SHA3_shake256_inc_ctx +#define shake256_inc_init OQS_SHA3_shake256_inc_init +#define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb +#define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize +#define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze #endif diff --git a/tests/KATs/sig/kats.json b/tests/KATs/sig/kats.json index 4839e3c6b7..ba6751ff80 100644 --- a/tests/KATs/sig/kats.json +++ b/tests/KATs/sig/kats.json @@ -200,13 +200,13 @@ "single": "37d37c9b43d71341b7dd5da7f8ebbe8bbae3d7bfc53f5378446023cbcf6e04f2" }, "SQIsign-lvl1": { - "single": "cce2e6a0e6aff1179eba6b8b5cb1b096e751f9aafdf8934a2df857164a902202" + "single": "beb20e99de64fe016008e9c6b117070a93a230fab481df87fe510d0e70206c5b" }, "SQIsign-lvl3": { - "single": "cce2e6a0e6aff1179eba6b8b5cb1b096e751f9aafdf8934a2df857164a902202" + "single": "67a6ec3595b7cf6ba7641b76ccf9aabb0eb9ca17eb375f7ffad3d23ce17f0a8c" }, "SQIsign-lvl5": { - "single": "cce2e6a0e6aff1179eba6b8b5cb1b096e751f9aafdf8934a2df857164a902202" + "single": "ab4c24835843b316d94ea60252b2d81941c90dfb7a4a56d993d37ae958f7e570" }, "cross-rsdp-128-balanced": { "all": "7b12a6f71166cde8289c732b3107eaa21edf59c2f336b0921a62faa93980de77", diff --git a/tests/test_binary.py b/tests/test_binary.py index 3b8a3ec6c4..f10c40138d 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -33,7 +33,7 @@ def test_namespace(): symbols.append(line) # ideally this would be just ['oqs', 'pqclean'], but contains exceptions (e.g., providing compat implementations of unavailable platform functions) - namespaces = ['oqs', 'pqclean', 'keccak', 'pqcrystals', 'pqmayo', 'init', 'fini', 'seedexpander', '__x86.get_pc_thunk', 'libjade', 'jade', '__jade', '__jasmin_syscall', 'pqcp', 'pqov', '_snova', 'sha3'] + namespaces = ['oqs', 'pqclean', 'keccak', 'pqcrystals', 'pqmayo', 'init', 'fini', 'seedexpander', '__x86.get_pc_thunk', 'libjade', 'jade', '__jade', '__jasmin_syscall', 'pqcp', 'pqov', '_snova', '_sqisign', 'sha3'] non_namespaced = [] for symbolstr in symbols: From 19eead5e8243216740cbca27cc657bc570829f0e Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Fri, 11 Jul 2025 23:31:22 +0200 Subject: [PATCH 05/19] Update cmake logic, import Add gmp to nix [full tests] Signed-off-by: Basil Hess --- .CMake/alg_support.cmake | 14 - CMakeLists.txt | 33 + docs/algorithms/sig/sqisign.md | 4 +- docs/algorithms/sig/sqisign.yml | 5 +- flake.nix | 2 +- .../copy_from_upstream/copy_from_upstream.yml | 7 +- .../src/sig/family/CMakeLists.txt | 3 + src/CMakeLists.txt | 6 +- src/sig/sqisign/CMakeLists.txt | 36 +- .../mini-gmp-extra.c | 73 - .../mini-gmp-extra.h | 19 - .../mini-gmp.c | 4671 ----------------- .../mini-gmp.h | 311 -- .../sqisign_namespace.h | 168 +- .../fp_p5248_32.c | 2 +- .../fp_p5248_64.c | 2 +- .../mini-gmp-extra.c | 73 - .../mini-gmp-extra.h | 19 - .../the-sqisign_sqisign_lvl1_ref/mini-gmp.c | 4671 ----------------- .../the-sqisign_sqisign_lvl1_ref/mini-gmp.h | 311 -- .../sqisign_namespace.h | 168 +- .../mini-gmp-extra.c | 73 - .../mini-gmp-extra.h | 19 - .../mini-gmp.c | 4671 ----------------- .../mini-gmp.h | 311 -- .../sqisign_namespace.h | 168 +- .../fp_p65376_32.c | 2 +- .../fp_p65376_64.c | 2 +- .../mini-gmp-extra.c | 73 - .../mini-gmp-extra.h | 19 - .../the-sqisign_sqisign_lvl3_ref/mini-gmp.c | 4671 ----------------- .../the-sqisign_sqisign_lvl3_ref/mini-gmp.h | 311 -- .../sqisign_namespace.h | 168 +- .../mini-gmp-extra.c | 73 - .../mini-gmp-extra.h | 19 - .../mini-gmp.c | 4671 ----------------- .../mini-gmp.h | 311 -- .../sqisign_namespace.h | 168 +- .../fp_p27500_32.c | 2 +- .../fp_p27500_64.c | 2 +- .../mini-gmp-extra.c | 73 - .../mini-gmp-extra.h | 19 - .../the-sqisign_sqisign_lvl5_ref/mini-gmp.c | 4671 ----------------- .../the-sqisign_sqisign_lvl5_ref/mini-gmp.h | 311 -- .../sqisign_namespace.h | 168 +- 45 files changed, 778 insertions(+), 30796 deletions(-) delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.c delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.h diff --git a/.CMake/alg_support.cmake b/.CMake/alg_support.cmake index 83f94ae8d0..a1472d3324 100644 --- a/.CMake/alg_support.cmake +++ b/.CMake/alg_support.cmake @@ -230,20 +230,6 @@ cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl5 "" ON "OQS_ENABLE_SIG_SQISIGN ##### OQS_COPY_FROM_UPSTREAM_FRAGMENT_ADD_ENABLE_BY_ALG_END -# TODO Don't know where to put this. We can just fix it so that only 64-bit systems are supported. -if(CMAKE_SIZEOF_VOID_P MATCHES "4") - # TODO Should also disable boradwell builds here. - add_compile_definitions(RADIX_32) - add_compile_definitions(GMP_LIMB_BITS=32) - message(STATUS "SQISign using 32 bit stuff") -else() - add_compile_definitions(RADIX_64) - # This is potentially an issues for a 64 bit system without uint128_t support. - add_compile_definitions(HAVE_UINT128) - add_compile_definitions(GMP_LIMB_BITS=64) - message(STATUS "SQISign using 64 bit stuff") -endif() - ##### OQS_COPY_FROM_LIBJADE_FRAGMENT_ADD_ENABLE_BY_ALG_START if ((OQS_LIBJADE_BUILD STREQUAL "ON")) diff --git a/CMakeLists.txt b/CMakeLists.txt index f231e797ac..cc8c9724da 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,6 +28,7 @@ option(OQS_PERMIT_UNSUPPORTED_ARCHITECTURE "Permit compilation on an an unsuppor option(OQS_STRICT_WARNINGS "Enable all compiler warnings." OFF) option(OQS_EMBEDDED_BUILD "Compile liboqs for an Embedded environment without a full standard library." OFF) option(OQS_USE_CUPQC "Utilize cuPQC as the backend for supported PQC algorithms." OFF) +option(OQS_USE_GMP "Utilize GMP for supported PQC algorithms." ON) # Libfuzzer isn't supported on gcc if('${CMAKE_C_COMPILER_ID}' STREQUAL 'Clang') @@ -150,6 +151,38 @@ if(${OQS_USE_CUPQC}) endif() find_package(cuPQC 0.2.0 REQUIRED) endif() +if(${OQS_USE_GMP}) + message(STATUS "Using system GMP") + + find_library(GMP gmp REQUIRED) + find_path(GMP_INCLUDE gmp.h) + + add_library(GMP SHARED IMPORTED) + set_target_properties(GMP PROPERTIES + IMPORTED_LOCATION ${GMP} + INTERFACE_INCLUDE_DIRECTORIES ${GMP_INCLUDE} + ) + + if(CMAKE_SIZEOF_VOID_P MATCHES "4") + add_compile_definitions(RADIX_32) + add_compile_definitions(GMP_LIMB_BITS=32) + else() + add_compile_definitions(RADIX_64) + include(CheckCSourceCompiles) + check_c_source_compiles(" + int main() { + __uint128_t x = 0; + (void)x; + return 0; + } + " HAVE_UINT128_T) + if (HAVE_UINT128_T) + add_compile_definitions(HAVE_UINT128) + endif() + add_compile_definitions(GMP_LIMB_BITS=64) + endif() + +endif() if (NOT ((CMAKE_SYSTEM_NAME MATCHES "Linux|Darwin") AND (ARCH_X86_64 STREQUAL "ON")) AND (OQS_LIBJADE_BUILD STREQUAL "ON")) message(FATAL_ERROR "Building liboqs with libjade implementations from libjade is only supported on Linux and Darwin on x86_64.") diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md index efd0ec5b39..df6d95d2b6 100644 --- a/docs/algorithms/sig/sqisign.md +++ b/docs/algorithms/sig/sqisign.md @@ -2,11 +2,11 @@ - **Algorithm type**: Digital signature scheme. - **Main cryptographic assumption**: Computing the endomorphism ring of a supersingular elliptic curve.. -- **Principal submitters**: Marius A. Aardal, Gora Adj, Diego F.Aranha, Andrea Basso, Isaac Andrés Canales Martínez, Jorge Chávez-Saab, Maria Corte-Real Santos, Pierrick Dartois, Luca De Feo, Max Duparc, Jonathan Komada Eriksen, Tako Boris Fouotsa, Décio Luiz Gazzoni Filho, Basil Hess, David Kohel, Antonin Leroux, Patrick Longa, Luciano Maino, Michael Meyer, Kohei Nakagawa, Hiroshi Onuki, Lorenz Panny, Sikhar Patranabis, Christophe Petit, Giacomo Pope, Krijn Reijnders, Damien Robert, Francisco Rodríguez Henríquez, Sina Schaeffler, Benjamin Wesolowski. +- **Principal submitters**: Marius A. Aardal, Gora Adj, Diego F. Aranha, Andrea Basso, Isaac Andrés Canales Martínez, Jorge Chávez-Saab, Maria Corte-Real Santos, Pierrick Dartois, Luca De Feo, Max Duparc, Jonathan Komada Eriksen, Tako Boris Fouotsa, Décio Luiz Gazzoni Filho, Basil Hess, David Kohel, Antonin Leroux, Patrick Longa, Luciano Maino, Michael Meyer, Kohei Nakagawa, Hiroshi Onuki, Lorenz Panny, Sikhar Patranabis, Christophe Petit, Giacomo Pope, Krijn Reijnders, Damien Robert, Francisco Rodríguez Henríquez, Sina Schaeffler, Benjamin Wesolowski. - **Authors' website**: https://sqisign.org/ - **Specification version**: Round 2. - **Primary Source**: - - **Source**: https://github.com/shane-digi/the-sqisign/commit/09bce2f0244bd11caa90c4eaef2150a759bd945d with copy_from_upstream patches + - **Source**: https://github.com/bhess/the-sqisign/commit/39b09acd532c69e3fb1206b4502572479288df92 - **Implementation license (SPDX-Identifier)**: Apache-2.0 diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml index 4fb1092aef..541018a944 100644 --- a/docs/algorithms/sig/sqisign.yml +++ b/docs/algorithms/sig/sqisign.yml @@ -3,7 +3,7 @@ type: signature principal-submitters: - Marius A. Aardal - Gora Adj -- Diego F.Aranha +- Diego F. Aranha - Andrea Basso - Isaac Andrés Canales Martínez - Jorge Chávez-Saab @@ -36,8 +36,7 @@ website: https://sqisign.org/ nist-round: 2 spec-version: Round 2 primary-upstream: - source: https://github.com/shane-digi/the-sqisign/commit/09bce2f0244bd11caa90c4eaef2150a759bd945d - with copy_from_upstream patches + source: https://github.com/bhess/the-sqisign/commit/39b09acd532c69e3fb1206b4502572479288df92 spdx-license-identifier: Apache-2.0 parameter-sets: - name: SQIsign-lvl1 diff --git a/flake.nix b/flake.nix index bbdae197e6..4fd23d54bc 100644 --- a/flake.nix +++ b/flake.nix @@ -35,7 +35,7 @@ else [pkgs.gcc] ); - buildInputs = with pkgs; [openssl]; + buildInputs = with pkgs; [openssl gmp]; cmakeFlags = [ "-GNinja" diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index 4cfcfc616c..0e49664ddb 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -94,12 +94,11 @@ upstreams: - name: the-sqisign - git_url: https://github.com/shane-digi/the-sqisign.git + git_url: https://github.com/bhess/the-sqisign.git git_branch: oqs - git_commit: 09bce2f0244bd11caa90c4eaef2150a759bd945d + git_commit: 39b09acd532c69e3fb1206b4502572479288df92 sig_scheme_path: '.' - sig_meta_path: 'META/{pqclean_scheme}.yml' - patches: [sqisign_fp.patch, sqisign_namespace.patch] + sig_meta_path: 'integration/liboqs/{pqclean_scheme}.yml' kems: - diff --git a/scripts/copy_from_upstream/src/sig/family/CMakeLists.txt b/scripts/copy_from_upstream/src/sig/family/CMakeLists.txt index b9103baa19..522a96cb03 100644 --- a/scripts/copy_from_upstream/src/sig/family/CMakeLists.txt +++ b/scripts/copy_from_upstream/src/sig/family/CMakeLists.txt @@ -40,6 +40,9 @@ if(OQS_ENABLE_SIG_{{ family }}_{{ scheme['scheme_c'] }}_{{ impl['name'] }}{%- if {%- endif %} target_include_directories({{ family }}_{{ scheme['scheme'] }}_{{ impl['name'] }} PRIVATE ${CMAKE_CURRENT_LIST_DIR}/{{ impl['upstream']['name'] }}_{{ scheme['pqclean_scheme'] }}_{{ impl['name'] }}) target_include_directories({{ family }}_{{ scheme['scheme'] }}_{{ impl['name'] }} PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + {%- if impl['external'] and impl['external'] == 'gmp' %} + target_include_directories({{ family }}_{{ scheme['scheme'] }}_{{ impl['name'] }} PRIVATE ${GMP_INCLUDE}) + {%- endif -%} {%- if impl['name'] != scheme['default_implementation'] and impl['required_flags'] %} target_compile_options({{ family }}_{{ scheme['scheme'] }}_{{ impl['name'] }} PRIVATE {%- for flag in impl['required_flags'] %}{%- if flag == 'arm_sha3' %} -march=armv8-a+crypto+sha3 {%- else -%}{%- if flag != 'arm_neon' %} -m{%- if flag == 'bmi1' -%} bmi {%- elif flag == 'sse4_1' -%} sse4.1 {%- elif flag == 'pclmulqdq' -%} pclmul {%- else -%}{{ flag }}{%- endif -%}{%- endif -%}{%- endif -%}{%- endfor -%}) {%- endif %} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f743596496..97fca8f853 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -8,7 +8,7 @@ add_subdirectory(common) # initialize KEM|SIG_OBJS for --warn-uninitialized set(KEM_OBJS "") set(SIG_OBJS "") -set(SIG_STFL_OBJS "") +set(SIG_STFL_OBJS "") if(${OQS_ENABLE_KEM_BIKE}) add_subdirectory(kem/bike) @@ -116,6 +116,10 @@ if(${OQS_USE_CUPQC}) target_link_libraries(oqs PRIVATE cupqc) target_link_options(oqs PRIVATE $) endif() +if(${OQS_USE_GMP}) + target_link_libraries(oqs PRIVATE GMP) + target_link_libraries(oqs-internal PRIVATE GMP) +endif() target_include_directories(oqs PUBLIC diff --git a/src/sig/sqisign/CMakeLists.txt b/src/sig/sqisign/CMakeLists.txt index c53de94f18..beed24db74 100644 --- a/src/sig/sqisign/CMakeLists.txt +++ b/src/sig/sqisign/CMakeLists.txt @@ -6,56 +6,62 @@ set(_SQISIGN_OBJS "") if(OQS_ENABLE_SIG_sqisign_lvl1) - add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl1_ref/mini-gmp.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) - target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) + add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) + target_compile_options(sqisign_lvl1_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) target_include_directories(sqisign_lvl1_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_ref) target_include_directories(sqisign_lvl1_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) - target_compile_options(sqisign_lvl1_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) + target_include_directories(sqisign_lvl1_ref PRIVATE ${GMP_INCLUDE}) + target_compile_options(sqisign_lvl1_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) - add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/algebra.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/dim2.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/dim4.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/finit.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/hnf.c the-sqisign_sqisign_lvl1_broadwell/hnf_internal.c the-sqisign_sqisign_lvl1_broadwell/ibz_division.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/ideal.c the-sqisign_sqisign_lvl1_broadwell/intbig.c the-sqisign_sqisign_lvl1_broadwell/integers.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/l2.c the-sqisign_sqisign_lvl1_broadwell/lat_ball.c the-sqisign_sqisign_lvl1_broadwell/lattice.c the-sqisign_sqisign_lvl1_broadwell/lll_applications.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c the-sqisign_sqisign_lvl1_broadwell/mp.c the-sqisign_sqisign_lvl1_broadwell/normeq.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/rationals.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) + add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/algebra.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/dim2.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/dim4.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/finit.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/hnf.c the-sqisign_sqisign_lvl1_broadwell/hnf_internal.c the-sqisign_sqisign_lvl1_broadwell/ibz_division.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/ideal.c the-sqisign_sqisign_lvl1_broadwell/intbig.c the-sqisign_sqisign_lvl1_broadwell/integers.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/l2.c the-sqisign_sqisign_lvl1_broadwell/lat_ball.c the-sqisign_sqisign_lvl1_broadwell/lattice.c the-sqisign_sqisign_lvl1_broadwell/lll_applications.c the-sqisign_sqisign_lvl1_broadwell/mp.c the-sqisign_sqisign_lvl1_broadwell/normeq.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/rationals.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_broadwell) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_include_directories(sqisign_lvl1_broadwell PRIVATE ${GMP_INCLUDE}) target_compile_options(sqisign_lvl1_broadwell PRIVATE -mavx2) - target_compile_options(sqisign_lvl1_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DSQISIGN_GF_IMPL_BROADWELL) + target_compile_options(sqisign_lvl1_broadwell PUBLIC -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl3) - add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl3_ref/mini-gmp.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) - target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) + add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) + target_compile_options(sqisign_lvl3_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) target_include_directories(sqisign_lvl3_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_ref) target_include_directories(sqisign_lvl3_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) - target_compile_options(sqisign_lvl3_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) + target_include_directories(sqisign_lvl3_ref PRIVATE ${GMP_INCLUDE}) + target_compile_options(sqisign_lvl3_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) - add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/algebra.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/dim2.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/dim4.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/finit.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/hnf.c the-sqisign_sqisign_lvl3_broadwell/hnf_internal.c the-sqisign_sqisign_lvl3_broadwell/ibz_division.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/ideal.c the-sqisign_sqisign_lvl3_broadwell/intbig.c the-sqisign_sqisign_lvl3_broadwell/integers.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/l2.c the-sqisign_sqisign_lvl3_broadwell/lat_ball.c the-sqisign_sqisign_lvl3_broadwell/lattice.c the-sqisign_sqisign_lvl3_broadwell/lll_applications.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c the-sqisign_sqisign_lvl3_broadwell/mp.c the-sqisign_sqisign_lvl3_broadwell/normeq.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/rationals.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) + add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/algebra.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/dim2.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/dim4.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/finit.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/hnf.c the-sqisign_sqisign_lvl3_broadwell/hnf_internal.c the-sqisign_sqisign_lvl3_broadwell/ibz_division.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/ideal.c the-sqisign_sqisign_lvl3_broadwell/intbig.c the-sqisign_sqisign_lvl3_broadwell/integers.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/l2.c the-sqisign_sqisign_lvl3_broadwell/lat_ball.c the-sqisign_sqisign_lvl3_broadwell/lattice.c the-sqisign_sqisign_lvl3_broadwell/lll_applications.c the-sqisign_sqisign_lvl3_broadwell/mp.c the-sqisign_sqisign_lvl3_broadwell/normeq.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/rationals.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_broadwell) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_include_directories(sqisign_lvl3_broadwell PRIVATE ${GMP_INCLUDE}) target_compile_options(sqisign_lvl3_broadwell PRIVATE -mavx2) - target_compile_options(sqisign_lvl3_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DSQISIGN_GF_IMPL_BROADWELL) + target_compile_options(sqisign_lvl3_broadwell PUBLIC -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl5) - add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c the-sqisign_sqisign_lvl5_ref/mini-gmp.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) - target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) + add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) + target_compile_options(sqisign_lvl5_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) target_include_directories(sqisign_lvl5_ref PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_ref) target_include_directories(sqisign_lvl5_ref PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) - target_compile_options(sqisign_lvl5_ref PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) + target_include_directories(sqisign_lvl5_ref PRIVATE ${GMP_INCLUDE}) + target_compile_options(sqisign_lvl5_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() if(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) - add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/algebra.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/dim2.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/dim4.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/finit.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/hnf.c the-sqisign_sqisign_lvl5_broadwell/hnf_internal.c the-sqisign_sqisign_lvl5_broadwell/ibz_division.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/ideal.c the-sqisign_sqisign_lvl5_broadwell/intbig.c the-sqisign_sqisign_lvl5_broadwell/integers.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/l2.c the-sqisign_sqisign_lvl5_broadwell/lat_ball.c the-sqisign_sqisign_lvl5_broadwell/lattice.c the-sqisign_sqisign_lvl5_broadwell/lll_applications.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c the-sqisign_sqisign_lvl5_broadwell/mp.c the-sqisign_sqisign_lvl5_broadwell/normeq.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/rationals.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) + add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/algebra.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/dim2.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/dim4.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/finit.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/hnf.c the-sqisign_sqisign_lvl5_broadwell/hnf_internal.c the-sqisign_sqisign_lvl5_broadwell/ibz_division.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/ideal.c the-sqisign_sqisign_lvl5_broadwell/intbig.c the-sqisign_sqisign_lvl5_broadwell/integers.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/l2.c the-sqisign_sqisign_lvl5_broadwell/lat_ball.c the-sqisign_sqisign_lvl5_broadwell/lattice.c the-sqisign_sqisign_lvl5_broadwell/lll_applications.c the-sqisign_sqisign_lvl5_broadwell/mp.c the-sqisign_sqisign_lvl5_broadwell/normeq.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/rationals.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_broadwell) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${PROJECT_SOURCE_DIR}/src/common/pqclean_shims) + target_include_directories(sqisign_lvl5_broadwell PRIVATE ${GMP_INCLUDE}) target_compile_options(sqisign_lvl5_broadwell PRIVATE -mavx2) - target_compile_options(sqisign_lvl5_broadwell PUBLIC -DMINI_GMP=ON -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DSQISIGN_GF_IMPL_BROADWELL) + target_compile_options(sqisign_lvl5_broadwell PUBLIC -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c deleted file mode 100644 index 396d505aec..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.c +++ /dev/null @@ -1,73 +0,0 @@ -#include -#include -#if defined(MINI_GMP) -#include "mini-gmp.h" -#else -// This configuration is used only for testing -#include -#endif -#include - -// Exported for testing -int -mini_mpz_legendre(const mpz_t a, const mpz_t p) -{ - int res = 0; - mpz_t e; - mpz_init_set(e, p); - mpz_sub_ui(e, e, 1); - mpz_fdiv_q_2exp(e, e, 1); - mpz_powm(e, a, e, p); - - if (mpz_cmp_ui(e, 1) <= 0) { - res = mpz_get_si(e); - } else { - res = -1; - } - mpz_clear(e); - return res; -} - -#if defined(MINI_GMP) -int -mpz_legendre(const mpz_t a, const mpz_t p) -{ - return mini_mpz_legendre(a, p); -} -#endif - -// Exported for testing -double -mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - double ret; - int tmp_exp; - mpz_t tmp; - - // Handle the case where op is 0 - if (mpz_cmp_ui(op, 0) == 0) { - *exp = 0; - return 0.0; - } - - *exp = mpz_sizeinbase(op, 2); - - mpz_init_set(tmp, op); - - if (*exp > DBL_MAX_EXP) { - mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); - } - - ret = frexp(mpz_get_d(tmp), &tmp_exp); - mpz_clear(tmp); - - return ret; -} - -#if defined(MINI_GMP) -double -mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - return mini_mpz_get_d_2exp(exp, op); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.h deleted file mode 100644 index 0113cfdfe6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp-extra.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef MINI_GMP_EXTRA_H -#define MINI_GMP_EXTRA_H - -#if defined MINI_GMP -#include "mini-gmp.h" - -typedef long mp_exp_t; - -int mpz_legendre(const mpz_t a, const mpz_t p); -double mpz_get_d_2exp(signed long int *exp, const mpz_t op); -#else -// This configuration is used only for testing -#include -#endif - -int mini_mpz_legendre(const mpz_t a, const mpz_t p); -double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c deleted file mode 100644 index 3830ab2031..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.c +++ /dev/null @@ -1,4671 +0,0 @@ -/* Note: The code from mini-gmp is modifed from the original by - commenting out the definition of GMP_LIMB_BITS */ - -/* - mini-gmp, a minimalistic implementation of a GNU GMP subset. - - Contributed to the GNU project by Niels Möller - Additional functionalities and improvements by Marco Bodrato. - -Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* NOTE: All functions in this file which are not declared in - mini-gmp.h are internal, and are not intended to be compatible - with GMP or with future versions of mini-gmp. */ - -/* Much of the material copied from GMP files, including: gmp-impl.h, - longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, - mpn/generic/lshift.c, mpn/generic/mul_1.c, - mpn/generic/mul_basecase.c, mpn/generic/rshift.c, - mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, - mpn/generic/submul_1.c. */ - -#include -#include -#include -#include -#include -#include - -#include "mini-gmp.h" - -#if !defined(MINI_GMP_DONT_USE_FLOAT_H) -#include -#endif - - -/* Macros */ -/* Removed from here as it is passed as a compiler command-line definition */ -/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ - -#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) -#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) - -#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) -#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) - -#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) -#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) - -#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) -#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) - -#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) - -#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) - -#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 -#define GMP_DBL_MANT_BITS DBL_MANT_DIG -#else -#define GMP_DBL_MANT_BITS (53) -#endif - -/* Return non-zero if xp,xsize and yp,ysize overlap. - If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no - overlap. If both these are false, there's an overlap. */ -#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ - ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) - -#define gmp_assert_nocarry(x) do { \ - mp_limb_t __cy = (x); \ - assert (__cy == 0); \ - (void) (__cy); \ - } while (0) - -#define gmp_clz(count, x) do { \ - mp_limb_t __clz_x = (x); \ - unsigned __clz_c = 0; \ - int LOCAL_SHIFT_BITS = 8; \ - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ - for (; \ - (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ - __clz_c += 8) \ - { __clz_x <<= LOCAL_SHIFT_BITS; } \ - for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ - __clz_x <<= 1; \ - (count) = __clz_c; \ - } while (0) - -#define gmp_ctz(count, x) do { \ - mp_limb_t __ctz_x = (x); \ - unsigned __ctz_c = 0; \ - gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ - (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ - } while (0) - -#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) + (bl); \ - (sh) = (ah) + (bh) + (__x < (al)); \ - (sl) = __x; \ - } while (0) - -#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) - (bl); \ - (sh) = (ah) - (bh) - ((al) < (bl)); \ - (sl) = __x; \ - } while (0) - -#define gmp_umul_ppmm(w1, w0, u, v) \ - do { \ - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ - if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned int __ww = (unsigned int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned long int __ww = (unsigned long int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else { \ - mp_limb_t __x0, __x1, __x2, __x3; \ - unsigned __ul, __vl, __uh, __vh; \ - mp_limb_t __u = (u), __v = (v); \ - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ - \ - __ul = __u & GMP_LLIMB_MASK; \ - __uh = __u >> (GMP_LIMB_BITS / 2); \ - __vl = __v & GMP_LLIMB_MASK; \ - __vh = __v >> (GMP_LIMB_BITS / 2); \ - \ - __x0 = (mp_limb_t) __ul * __vl; \ - __x1 = (mp_limb_t) __ul * __vh; \ - __x2 = (mp_limb_t) __uh * __vl; \ - __x3 = (mp_limb_t) __uh * __vh; \ - \ - __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ - __x1 += __x2; /* but this indeed can */ \ - if (__x1 < __x2) /* did we get it? */ \ - __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ - \ - (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ - (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ - } \ - } while (0) - -/* If mp_limb_t is of size smaller than int, plain u*v implies - automatic promotion to *signed* int, and then multiply may overflow - and cause undefined behavior. Explicitly cast to unsigned int for - that case. */ -#define gmp_umullo_limb(u, v) \ - ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) - -#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ - do { \ - mp_limb_t _qh, _ql, _r, _mask; \ - gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ - gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ - _r = (nl) - gmp_umullo_limb (_qh, (d)); \ - _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ - _qh += _mask; \ - _r += _mask & (d); \ - if (_r >= (d)) \ - { \ - _r -= (d); \ - _qh++; \ - } \ - \ - (r) = _r; \ - (q) = _qh; \ - } while (0) - -#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ - do { \ - mp_limb_t _q0, _t1, _t0, _mask; \ - gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ - gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ - \ - /* Compute the two most significant limbs of n - q'd */ \ - (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ - gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ - (q)++; \ - \ - /* Conditionally adjust q and the remainders */ \ - _mask = - (mp_limb_t) ((r1) >= _q0); \ - (q) += _mask; \ - gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ - if ((r1) >= (d1)) \ - { \ - if ((r1) > (d1) || (r0) >= (d0)) \ - { \ - (q)++; \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ - } \ - } \ - } while (0) - -/* Swap macros. */ -#define MP_LIMB_T_SWAP(x, y) \ - do { \ - mp_limb_t __mp_limb_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_limb_t_swap__tmp; \ - } while (0) -#define MP_SIZE_T_SWAP(x, y) \ - do { \ - mp_size_t __mp_size_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_size_t_swap__tmp; \ - } while (0) -#define MP_BITCNT_T_SWAP(x,y) \ - do { \ - mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_bitcnt_t_swap__tmp; \ - } while (0) -#define MP_PTR_SWAP(x, y) \ - do { \ - mp_ptr __mp_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_ptr_swap__tmp; \ - } while (0) -#define MP_SRCPTR_SWAP(x, y) \ - do { \ - mp_srcptr __mp_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_srcptr_swap__tmp; \ - } while (0) - -#define MPN_PTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_PTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) -#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_SRCPTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) - -#define MPZ_PTR_SWAP(x, y) \ - do { \ - mpz_ptr __mpz_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_ptr_swap__tmp; \ - } while (0) -#define MPZ_SRCPTR_SWAP(x, y) \ - do { \ - mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_srcptr_swap__tmp; \ - } while (0) - -const int mp_bits_per_limb = GMP_LIMB_BITS; - - -/* Memory allocation and other helper functions. */ -static void -gmp_die (const char *msg) -{ - fprintf (stderr, "%s\n", msg); - abort(); -} - -static void * -gmp_default_alloc (size_t size) -{ - void *p; - - assert (size > 0); - - p = malloc (size); - if (!p) - gmp_die("gmp_default_alloc: Virtual memory exhausted."); - - return p; -} - -static void * -gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) -{ - void * p; - - p = realloc (old, new_size); - - if (!p) - gmp_die("gmp_default_realloc: Virtual memory exhausted."); - - return p; -} - -static void -gmp_default_free (void *p, size_t unused_size) -{ - free (p); -} - -static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; -static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; -static void (*gmp_free_func) (void *, size_t) = gmp_default_free; - -void -mp_get_memory_functions (void *(**alloc_func) (size_t), - void *(**realloc_func) (void *, size_t, size_t), - void (**free_func) (void *, size_t)) -{ - if (alloc_func) - *alloc_func = gmp_allocate_func; - - if (realloc_func) - *realloc_func = gmp_reallocate_func; - - if (free_func) - *free_func = gmp_free_func; -} - -void -mp_set_memory_functions (void *(*alloc_func) (size_t), - void *(*realloc_func) (void *, size_t, size_t), - void (*free_func) (void *, size_t)) -{ - if (!alloc_func) - alloc_func = gmp_default_alloc; - if (!realloc_func) - realloc_func = gmp_default_realloc; - if (!free_func) - free_func = gmp_default_free; - - gmp_allocate_func = alloc_func; - gmp_reallocate_func = realloc_func; - gmp_free_func = free_func; -} - -#define gmp_alloc(size) ((*gmp_allocate_func)((size))) -#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) -#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) - -static mp_ptr -gmp_alloc_limbs (mp_size_t size) -{ - return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); -} - -static mp_ptr -gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) -{ - assert (size > 0); - return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); -} - -static void -gmp_free_limbs (mp_ptr old, mp_size_t size) -{ - gmp_free (old, size * sizeof (mp_limb_t)); -} - - -/* MPN interface */ - -void -mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - mp_size_t i; - for (i = 0; i < n; i++) - d[i] = s[i]; -} - -void -mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - while (--n >= 0) - d[n] = s[n]; -} - -int -mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - while (--n >= 0) - { - if (ap[n] != bp[n]) - return ap[n] > bp[n] ? 1 : -1; - } - return 0; -} - -static int -mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - if (an != bn) - return an < bn ? -1 : 1; - else - return mpn_cmp (ap, bp, an); -} - -static mp_size_t -mpn_normalized_size (mp_srcptr xp, mp_size_t n) -{ - while (n > 0 && xp[n-1] == 0) - --n; - return n; -} - -int -mpn_zero_p(mp_srcptr rp, mp_size_t n) -{ - return mpn_normalized_size (rp, n) == 0; -} - -void -mpn_zero (mp_ptr rp, mp_size_t n) -{ - while (--n >= 0) - rp[n] = 0; -} - -mp_limb_t -mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - i = 0; - do - { - mp_limb_t r = ap[i] + b; - /* Carry out */ - b = (r < b); - rp[i] = r; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b, r; - a = ap[i]; b = bp[i]; - r = a + cy; - cy = (r < cy); - r += b; - cy += (r < b); - rp[i] = r; - } - return cy; -} - -mp_limb_t -mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_add_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - - i = 0; - do - { - mp_limb_t a = ap[i]; - /* Carry out */ - mp_limb_t cy = a < b; - rp[i] = a - b; - b = cy; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b; - a = ap[i]; b = bp[i]; - b += cy; - cy = (b < cy); - cy += (a < b); - rp[i] = a - b; - } - return cy; -} - -mp_limb_t -mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_sub_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl + lpl; - cl += lpl < rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl - lpl; - cl += lpl > rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn >= 1); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); - - /* We first multiply by the low order limb. This result can be - stored, not added, to rp. We also avoid a loop for zeroing this - way. */ - - rp[un] = mpn_mul_1 (rp, up, un, vp[0]); - - /* Now accumulate the product of up[] and the next higher limb from - vp[]. */ - - while (--vn >= 1) - { - rp += 1, vp += 1; - rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); - } - return rp[un]; -} - -void -mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mpn_mul (rp, ap, n, bp, n); -} - -void -mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) -{ - mpn_mul (rp, ap, n, ap, n); -} - -mp_limb_t -mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - up += n; - rp += n; - - tnc = GMP_LIMB_BITS - cnt; - low_limb = *--up; - retval = low_limb >> tnc; - high_limb = (low_limb << cnt); - - while (--n != 0) - { - low_limb = *--up; - *--rp = high_limb | (low_limb >> tnc); - high_limb = (low_limb << cnt); - } - *--rp = high_limb; - - return retval; -} - -mp_limb_t -mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - tnc = GMP_LIMB_BITS - cnt; - high_limb = *up++; - retval = (high_limb << tnc); - low_limb = high_limb >> cnt; - - while (--n != 0) - { - high_limb = *up++; - *rp++ = low_limb | (high_limb << tnc); - low_limb = high_limb >> cnt; - } - *rp = low_limb; - - return retval; -} - -static mp_bitcnt_t -mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, - mp_limb_t ux) -{ - unsigned cnt; - - assert (ux == 0 || ux == GMP_LIMB_MAX); - assert (0 <= i && i <= un ); - - while (limb == 0) - { - i++; - if (i == un) - return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); - limb = ux ^ up[i]; - } - gmp_ctz (cnt, limb); - return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; -} - -mp_bitcnt_t -mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, 0); -} - -mp_bitcnt_t -mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, GMP_LIMB_MAX); -} - -void -mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (--n >= 0) - *rp++ = ~ *up++; -} - -mp_limb_t -mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (*up == 0) - { - *rp = 0; - if (!--n) - return 0; - ++up; ++rp; - } - *rp = - *up; - mpn_com (++rp, ++up, --n); - return 1; -} - - -/* MPN division interface. */ - -/* The 3/2 inverse is defined as - - m = floor( (B^3-1) / (B u1 + u0)) - B -*/ -mp_limb_t -mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) -{ - mp_limb_t r, m; - - { - mp_limb_t p, ql; - unsigned ul, uh, qh; - - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); - /* For notation, let b denote the half-limb base, so that B = b^2. - Split u1 = b uh + ul. */ - ul = u1 & GMP_LLIMB_MASK; - uh = u1 >> (GMP_LIMB_BITS / 2); - - /* Approximation of the high half of quotient. Differs from the 2/1 - inverse of the half limb uh, since we have already subtracted - u0. */ - qh = (u1 ^ GMP_LIMB_MAX) / uh; - - /* Adjust to get a half-limb 3/2 inverse, i.e., we want - - qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u - = floor( (b (~u) + b-1) / u), - - and the remainder - - r = b (~u) + b-1 - qh (b uh + ul) - = b (~u - qh uh) + b-1 - qh ul - - Subtraction of qh ul may underflow, which implies adjustments. - But by normalization, 2 u >= B > qh ul, so we need to adjust by - at most 2. - */ - - r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; - - p = (mp_limb_t) qh * ul; - /* Adjustment steps taken from udiv_qrnnd_c */ - if (r < p) - { - qh--; - r += u1; - if (r >= u1) /* i.e. we didn't get carry when adding to r */ - if (r < p) - { - qh--; - r += u1; - } - } - r -= p; - - /* Low half of the quotient is - - ql = floor ( (b r + b-1) / u1). - - This is a 3/2 division (on half-limbs), for which qh is a - suitable inverse. */ - - p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; - /* Unlike full-limb 3/2, we can add 1 without overflow. For this to - work, it is essential that ql is a full mp_limb_t. */ - ql = (p >> (GMP_LIMB_BITS / 2)) + 1; - - /* By the 3/2 trick, we don't need the high half limb. */ - r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; - - if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) - { - ql--; - r += u1; - } - m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; - if (r >= u1) - { - m++; - r -= u1; - } - } - - /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a - 3/2 inverse. */ - if (u0 > 0) - { - mp_limb_t th, tl; - r = ~r; - r += u0; - if (r < u0) - { - m--; - if (r >= u1) - { - m--; - r -= u1; - } - r -= u1; - } - gmp_umul_ppmm (th, tl, u0, m); - r += th; - if (r < th) - { - m--; - m -= ((r > u1) | ((r == u1) & (tl > u0))); - } - } - - return m; -} - -struct gmp_div_inverse -{ - /* Normalization shift count. */ - unsigned shift; - /* Normalized divisor (d0 unused for mpn_div_qr_1) */ - mp_limb_t d1, d0; - /* Inverse, for 2/1 or 3/2. */ - mp_limb_t di; -}; - -static void -mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) -{ - unsigned shift; - - assert (d > 0); - gmp_clz (shift, d); - inv->shift = shift; - inv->d1 = d << shift; - inv->di = mpn_invert_limb (inv->d1); -} - -static void -mpn_div_qr_2_invert (struct gmp_div_inverse *inv, - mp_limb_t d1, mp_limb_t d0) -{ - unsigned shift; - - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 <<= shift; - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); -} - -static void -mpn_div_qr_invert (struct gmp_div_inverse *inv, - mp_srcptr dp, mp_size_t dn) -{ - assert (dn > 0); - - if (dn == 1) - mpn_div_qr_1_invert (inv, dp[0]); - else if (dn == 2) - mpn_div_qr_2_invert (inv, dp[1], dp[0]); - else - { - unsigned shift; - mp_limb_t d1, d0; - - d1 = dp[dn-1]; - d0 = dp[dn-2]; - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); - } -} - -/* Not matching current public gmp interface, rather corresponding to - the sbpi1_div_* functions. */ -static mp_limb_t -mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - mp_limb_t d, di; - mp_limb_t r; - mp_ptr tp = NULL; - mp_size_t tn = 0; - - if (inv->shift > 0) - { - /* Shift, reusing qp area if possible. In-place shift if qp == np. */ - tp = qp; - if (!tp) - { - tn = nn; - tp = gmp_alloc_limbs (tn); - } - r = mpn_lshift (tp, np, nn, inv->shift); - np = tp; - } - else - r = 0; - - d = inv->d1; - di = inv->di; - while (--nn >= 0) - { - mp_limb_t q; - - gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); - if (qp) - qp[nn] = q; - } - if (tn) - gmp_free_limbs (tp, tn); - - return r >> inv->shift; -} - -static void -mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - unsigned shift; - mp_size_t i; - mp_limb_t d1, d0, di, r1, r0; - - assert (nn >= 2); - shift = inv->shift; - d1 = inv->d1; - d0 = inv->d0; - di = inv->di; - - if (shift > 0) - r1 = mpn_lshift (np, np, nn, shift); - else - r1 = 0; - - r0 = np[nn - 1]; - - i = nn - 2; - do - { - mp_limb_t n0, q; - n0 = np[i]; - gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - if (shift > 0) - { - assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); - r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); - r1 >>= shift; - } - - np[1] = r1; - np[0] = r0; -} - -static void -mpn_div_qr_pi1 (mp_ptr qp, - mp_ptr np, mp_size_t nn, mp_limb_t n1, - mp_srcptr dp, mp_size_t dn, - mp_limb_t dinv) -{ - mp_size_t i; - - mp_limb_t d1, d0; - mp_limb_t cy, cy1; - mp_limb_t q; - - assert (dn > 2); - assert (nn >= dn); - - d1 = dp[dn - 1]; - d0 = dp[dn - 2]; - - assert ((d1 & GMP_LIMB_HIGHBIT) != 0); - /* Iteration variable is the index of the q limb. - * - * We divide - * by - */ - - i = nn - dn; - do - { - mp_limb_t n0 = np[dn-1+i]; - - if (n1 == d1 && n0 == d0) - { - q = GMP_LIMB_MAX; - mpn_submul_1 (np+i, dp, dn, q); - n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ - } - else - { - gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); - - cy = mpn_submul_1 (np + i, dp, dn-2, q); - - cy1 = n0 < cy; - n0 = n0 - cy; - cy = n1 < cy1; - n1 = n1 - cy1; - np[dn-2+i] = n0; - - if (cy != 0) - { - n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); - q--; - } - } - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - np[dn - 1] = n1; -} - -static void -mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - mp_srcptr dp, mp_size_t dn, - const struct gmp_div_inverse *inv) -{ - assert (dn > 0); - assert (nn >= dn); - - if (dn == 1) - np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); - else if (dn == 2) - mpn_div_qr_2_preinv (qp, np, nn, inv); - else - { - mp_limb_t nh; - unsigned shift; - - assert (inv->d1 == dp[dn-1]); - assert (inv->d0 == dp[dn-2]); - assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); - - shift = inv->shift; - if (shift > 0) - nh = mpn_lshift (np, np, nn, shift); - else - nh = 0; - - mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); - - if (shift > 0) - gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); - } -} - -static void -mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) -{ - struct gmp_div_inverse inv; - mp_ptr tp = NULL; - - assert (dn > 0); - assert (nn >= dn); - - mpn_div_qr_invert (&inv, dp, dn); - if (dn > 2 && inv.shift > 0) - { - tp = gmp_alloc_limbs (dn); - gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); - dp = tp; - } - mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); - if (tp) - gmp_free_limbs (tp, dn); -} - - -/* MPN base conversion. */ -static unsigned -mpn_base_power_of_two_p (unsigned b) -{ - switch (b) - { - case 2: return 1; - case 4: return 2; - case 8: return 3; - case 16: return 4; - case 32: return 5; - case 64: return 6; - case 128: return 7; - case 256: return 8; - default: return 0; - } -} - -struct mpn_base_info -{ - /* bb is the largest power of the base which fits in one limb, and - exp is the corresponding exponent. */ - unsigned exp; - mp_limb_t bb; -}; - -static void -mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) -{ - mp_limb_t m; - mp_limb_t p; - unsigned exp; - - m = GMP_LIMB_MAX / b; - for (exp = 1, p = b; p <= m; exp++) - p *= b; - - info->exp = exp; - info->bb = p; -} - -static mp_bitcnt_t -mpn_limb_size_in_base_2 (mp_limb_t u) -{ - unsigned shift; - - assert (u > 0); - gmp_clz (shift, u); - return GMP_LIMB_BITS - shift; -} - -static size_t -mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) -{ - unsigned char mask; - size_t sn, j; - mp_size_t i; - unsigned shift; - - sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) - + bits - 1) / bits; - - mask = (1U << bits) - 1; - - for (i = 0, j = sn, shift = 0; j-- > 0;) - { - unsigned char digit = up[i] >> shift; - - shift += bits; - - if (shift >= GMP_LIMB_BITS && ++i < un) - { - shift -= GMP_LIMB_BITS; - digit |= up[i] << (bits - shift); - } - sp[j] = digit & mask; - } - return sn; -} - -/* We generate digits from the least significant end, and reverse at - the end. */ -static size_t -mpn_limb_get_str (unsigned char *sp, mp_limb_t w, - const struct gmp_div_inverse *binv) -{ - mp_size_t i; - for (i = 0; w > 0; i++) - { - mp_limb_t h, l, r; - - h = w >> (GMP_LIMB_BITS - binv->shift); - l = w << binv->shift; - - gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); - assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); - r >>= binv->shift; - - sp[i] = r; - } - return i; -} - -static size_t -mpn_get_str_other (unsigned char *sp, - int base, const struct mpn_base_info *info, - mp_ptr up, mp_size_t un) -{ - struct gmp_div_inverse binv; - size_t sn; - size_t i; - - mpn_div_qr_1_invert (&binv, base); - - sn = 0; - - if (un > 1) - { - struct gmp_div_inverse bbinv; - mpn_div_qr_1_invert (&bbinv, info->bb); - - do - { - mp_limb_t w; - size_t done; - w = mpn_div_qr_1_preinv (up, up, un, &bbinv); - un -= (up[un-1] == 0); - done = mpn_limb_get_str (sp + sn, w, &binv); - - for (sn += done; done < info->exp; done++) - sp[sn++] = 0; - } - while (un > 1); - } - sn += mpn_limb_get_str (sp + sn, up[0], &binv); - - /* Reverse order */ - for (i = 0; 2*i + 1 < sn; i++) - { - unsigned char t = sp[i]; - sp[i] = sp[sn - i - 1]; - sp[sn - i - 1] = t; - } - - return sn; -} - -size_t -mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) -{ - unsigned bits; - - assert (un > 0); - assert (up[un-1] > 0); - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_get_str_bits (sp, bits, up, un); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_get_str_other (sp, base, &info, up, un); - } -} - -static mp_size_t -mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, - unsigned bits) -{ - mp_size_t rn; - mp_limb_t limb; - unsigned shift; - - for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) - { - limb |= (mp_limb_t) sp[sn] << shift; - shift += bits; - if (shift >= GMP_LIMB_BITS) - { - shift -= GMP_LIMB_BITS; - rp[rn++] = limb; - /* Next line is correct also if shift == 0, - bits == 8, and mp_limb_t == unsigned char. */ - limb = (unsigned int) sp[sn] >> (bits - shift); - } - } - if (limb != 0) - rp[rn++] = limb; - else - rn = mpn_normalized_size (rp, rn); - return rn; -} - -/* Result is usually normalized, except for all-zero input, in which - case a single zero limb is written at *RP, and 1 is returned. */ -static mp_size_t -mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, - mp_limb_t b, const struct mpn_base_info *info) -{ - mp_size_t rn; - mp_limb_t w; - unsigned k; - size_t j; - - assert (sn > 0); - - k = 1 + (sn - 1) % info->exp; - - j = 0; - w = sp[j++]; - while (--k != 0) - w = w * b + sp[j++]; - - rp[0] = w; - - for (rn = 1; j < sn;) - { - mp_limb_t cy; - - w = sp[j++]; - for (k = 1; k < info->exp; k++) - w = w * b + sp[j++]; - - cy = mpn_mul_1 (rp, rp, rn, info->bb); - cy += mpn_add_1 (rp, rp, rn, w); - if (cy > 0) - rp[rn++] = cy; - } - assert (j == sn); - - return rn; -} - -mp_size_t -mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) -{ - unsigned bits; - - if (sn == 0) - return 0; - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_set_str_bits (rp, sp, sn, bits); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_set_str_other (rp, sp, sn, base, &info); - } -} - - -/* MPZ interface */ -void -mpz_init (mpz_t r) -{ - static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; - - r->_mp_alloc = 0; - r->_mp_size = 0; - r->_mp_d = (mp_ptr) &dummy_limb; -} - -/* The utility of this function is a bit limited, since many functions - assigns the result variable using mpz_swap. */ -void -mpz_init2 (mpz_t r, mp_bitcnt_t bits) -{ - mp_size_t rn; - - bits -= (bits != 0); /* Round down, except if 0 */ - rn = 1 + bits / GMP_LIMB_BITS; - - r->_mp_alloc = rn; - r->_mp_size = 0; - r->_mp_d = gmp_alloc_limbs (rn); -} - -void -mpz_clear (mpz_t r) -{ - if (r->_mp_alloc) - gmp_free_limbs (r->_mp_d, r->_mp_alloc); -} - -static mp_ptr -mpz_realloc (mpz_t r, mp_size_t size) -{ - size = GMP_MAX (size, 1); - - if (r->_mp_alloc) - r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); - else - r->_mp_d = gmp_alloc_limbs (size); - r->_mp_alloc = size; - - if (GMP_ABS (r->_mp_size) > size) - r->_mp_size = 0; - - return r->_mp_d; -} - -/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ -#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ - ? mpz_realloc(z,n) \ - : (z)->_mp_d) - -/* MPZ assignment and basic conversions. */ -void -mpz_set_si (mpz_t r, signed long int x) -{ - if (x >= 0) - mpz_set_ui (r, x); - else /* (x < 0) */ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); - mpz_neg (r, r); - } - else - { - r->_mp_size = -1; - MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); - } -} - -void -mpz_set_ui (mpz_t r, unsigned long int x) -{ - if (x > 0) - { - r->_mp_size = 1; - MPZ_REALLOC (r, 1)[0] = x; - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - while (x >>= LOCAL_GMP_LIMB_BITS) - { - ++ r->_mp_size; - MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; - } - } - } - else - r->_mp_size = 0; -} - -void -mpz_set (mpz_t r, const mpz_t x) -{ - /* Allow the NOP r == x */ - if (r != x) - { - mp_size_t n; - mp_ptr rp; - - n = GMP_ABS (x->_mp_size); - rp = MPZ_REALLOC (r, n); - - mpn_copyi (rp, x->_mp_d, n); - r->_mp_size = x->_mp_size; - } -} - -void -mpz_init_set_si (mpz_t r, signed long int x) -{ - mpz_init (r); - mpz_set_si (r, x); -} - -void -mpz_init_set_ui (mpz_t r, unsigned long int x) -{ - mpz_init (r); - mpz_set_ui (r, x); -} - -void -mpz_init_set (mpz_t r, const mpz_t x) -{ - mpz_init (r); - mpz_set (r, x); -} - -int -mpz_fits_slong_p (const mpz_t u) -{ - return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; -} - -static int -mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) -{ - int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; - mp_limb_t ulongrem = 0; - - if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) - ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; - - return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); -} - -int -mpz_fits_ulong_p (const mpz_t u) -{ - mp_size_t us = u->_mp_size; - - return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); -} - -int -mpz_fits_sint_p (const mpz_t u) -{ - return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; -} - -int -mpz_fits_uint_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; -} - -int -mpz_fits_sshort_p (const mpz_t u) -{ - return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; -} - -int -mpz_fits_ushort_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; -} - -long int -mpz_get_si (const mpz_t u) -{ - unsigned long r = mpz_get_ui (u); - unsigned long c = -LONG_MAX - LONG_MIN; - - if (u->_mp_size < 0) - /* This expression is necessary to properly handle -LONG_MIN */ - return -(long) c - (long) ((r - c) & LONG_MAX); - else - return (long) (r & LONG_MAX); -} - -unsigned long int -mpz_get_ui (const mpz_t u) -{ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - unsigned long r = 0; - mp_size_t n = GMP_ABS (u->_mp_size); - n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); - while (--n >= 0) - r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; - return r; - } - - return u->_mp_size == 0 ? 0 : u->_mp_d[0]; -} - -size_t -mpz_size (const mpz_t u) -{ - return GMP_ABS (u->_mp_size); -} - -mp_limb_t -mpz_getlimbn (const mpz_t u, mp_size_t n) -{ - if (n >= 0 && n < GMP_ABS (u->_mp_size)) - return u->_mp_d[n]; - else - return 0; -} - -void -mpz_realloc2 (mpz_t x, mp_bitcnt_t n) -{ - mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); -} - -mp_srcptr -mpz_limbs_read (mpz_srcptr x) -{ - return x->_mp_d; -} - -mp_ptr -mpz_limbs_modify (mpz_t x, mp_size_t n) -{ - assert (n > 0); - return MPZ_REALLOC (x, n); -} - -mp_ptr -mpz_limbs_write (mpz_t x, mp_size_t n) -{ - return mpz_limbs_modify (x, n); -} - -void -mpz_limbs_finish (mpz_t x, mp_size_t xs) -{ - mp_size_t xn; - xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); - x->_mp_size = xs < 0 ? -xn : xn; -} - -static mpz_srcptr -mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - x->_mp_alloc = 0; - x->_mp_d = (mp_ptr) xp; - x->_mp_size = xs; - return x; -} - -mpz_srcptr -mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - mpz_roinit_normal_n (x, xp, xs); - mpz_limbs_finish (x, xs); - return x; -} - - -/* Conversions and comparison to double. */ -void -mpz_set_d (mpz_t r, double x) -{ - int sign; - mp_ptr rp; - mp_size_t rn, i; - double B; - double Bi; - mp_limb_t f; - - /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is - zero or infinity. */ - if (x != x || x == x * 0.5) - { - r->_mp_size = 0; - return; - } - - sign = x < 0.0 ; - if (sign) - x = - x; - - if (x < 1.0) - { - r->_mp_size = 0; - return; - } - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - for (rn = 1; x >= B; rn++) - x *= Bi; - - rp = MPZ_REALLOC (r, rn); - - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - i = rn-1; - rp[i] = f; - while (--i >= 0) - { - x = B * x; - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - rp[i] = f; - } - - r->_mp_size = sign ? - rn : rn; -} - -void -mpz_init_set_d (mpz_t r, double x) -{ - mpz_init (r); - mpz_set_d (r, x); -} - -double -mpz_get_d (const mpz_t u) -{ - int m; - mp_limb_t l; - mp_size_t un; - double x; - double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - - un = GMP_ABS (u->_mp_size); - - if (un == 0) - return 0.0; - - l = u->_mp_d[--un]; - gmp_clz (m, l); - m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - - for (x = l; --un >= 0;) - { - x = B*x; - if (m > 0) { - l = u->_mp_d[un]; - m -= GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - x += l; - } - } - - if (u->_mp_size < 0) - x = -x; - - return x; -} - -int -mpz_cmpabs_d (const mpz_t x, double d) -{ - mp_size_t xn; - double B, Bi; - mp_size_t i; - - xn = x->_mp_size; - d = GMP_ABS (d); - - if (xn != 0) - { - xn = GMP_ABS (xn); - - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - - /* Scale d so it can be compared with the top limb. */ - for (i = 1; i < xn; i++) - d *= Bi; - - if (d >= B) - return -1; - - /* Compare floor(d) to top limb, subtract and cancel when equal. */ - for (i = xn; i-- > 0;) - { - mp_limb_t f, xl; - - f = (mp_limb_t) d; - xl = x->_mp_d[i]; - if (xl > f) - return 1; - else if (xl < f) - return -1; - d = B * (d - f); - } - } - return - (d > 0.0); -} - -int -mpz_cmp_d (const mpz_t x, double d) -{ - if (x->_mp_size < 0) - { - if (d >= 0.0) - return -1; - else - return -mpz_cmpabs_d (x, d); - } - else - { - if (d < 0.0) - return 1; - else - return mpz_cmpabs_d (x, d); - } -} - - -/* MPZ comparisons and the like. */ -int -mpz_sgn (const mpz_t u) -{ - return GMP_CMP (u->_mp_size, 0); -} - -int -mpz_cmp_si (const mpz_t u, long v) -{ - mp_size_t usize = u->_mp_size; - - if (v >= 0) - return mpz_cmp_ui (u, v); - else if (usize >= 0) - return 1; - else - return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); -} - -int -mpz_cmp_ui (const mpz_t u, unsigned long v) -{ - mp_size_t usize = u->_mp_size; - - if (usize < 0) - return -1; - else - return mpz_cmpabs_ui (u, v); -} - -int -mpz_cmp (const mpz_t a, const mpz_t b) -{ - mp_size_t asize = a->_mp_size; - mp_size_t bsize = b->_mp_size; - - if (asize != bsize) - return (asize < bsize) ? -1 : 1; - else if (asize >= 0) - return mpn_cmp (a->_mp_d, b->_mp_d, asize); - else - return mpn_cmp (b->_mp_d, a->_mp_d, -asize); -} - -int -mpz_cmpabs_ui (const mpz_t u, unsigned long v) -{ - mp_size_t un = GMP_ABS (u->_mp_size); - - if (! mpn_absfits_ulong_p (u->_mp_d, un)) - return 1; - else - { - unsigned long uu = mpz_get_ui (u); - return GMP_CMP(uu, v); - } -} - -int -mpz_cmpabs (const mpz_t u, const mpz_t v) -{ - return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), - v->_mp_d, GMP_ABS (v->_mp_size)); -} - -void -mpz_abs (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = GMP_ABS (r->_mp_size); -} - -void -mpz_neg (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = -r->_mp_size; -} - -void -mpz_swap (mpz_t u, mpz_t v) -{ - MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); - MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); -} - - -/* MPZ addition and subtraction */ - - -void -mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_t bb; - mpz_init_set_ui (bb, b); - mpz_add (r, a, bb); - mpz_clear (bb); -} - -void -mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_ui_sub (r, b, a); - mpz_neg (r, r); -} - -void -mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) -{ - mpz_neg (r, b); - mpz_add_ui (r, r, a); -} - -static mp_size_t -mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - mp_ptr rp; - mp_limb_t cy; - - if (an < bn) - { - MPZ_SRCPTR_SWAP (a, b); - MP_SIZE_T_SWAP (an, bn); - } - - rp = MPZ_REALLOC (r, an + 1); - cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); - - rp[an] = cy; - - return an + cy; -} - -static mp_size_t -mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - int cmp; - mp_ptr rp; - - cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); - if (cmp > 0) - { - rp = MPZ_REALLOC (r, an); - gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); - return mpn_normalized_size (rp, an); - } - else if (cmp < 0) - { - rp = MPZ_REALLOC (r, bn); - gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); - return -mpn_normalized_size (rp, bn); - } - else - return 0; -} - -void -mpz_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_add (r, a, b); - else - rn = mpz_abs_sub (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - -void -mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_sub (r, a, b); - else - rn = mpz_abs_add (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - - -/* MPZ multiplication */ -void -mpz_mul_si (mpz_t r, const mpz_t u, long int v) -{ - if (v < 0) - { - mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); - mpz_neg (r, r); - } - else - mpz_mul_ui (r, u, v); -} - -void -mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t vv; - mpz_init_set_ui (vv, v); - mpz_mul (r, u, vv); - mpz_clear (vv); - return; -} - -void -mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) -{ - int sign; - mp_size_t un, vn, rn; - mpz_t t; - mp_ptr tp; - - un = u->_mp_size; - vn = v->_mp_size; - - if (un == 0 || vn == 0) - { - r->_mp_size = 0; - return; - } - - sign = (un ^ vn) < 0; - - un = GMP_ABS (un); - vn = GMP_ABS (vn); - - mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); - - tp = t->_mp_d; - if (un >= vn) - mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); - else - mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); - - rn = un + vn; - rn -= tp[rn-1] == 0; - - t->_mp_size = sign ? - rn : rn; - mpz_swap (r, t); - mpz_clear (t); -} - -void -mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) -{ - mp_size_t un, rn; - mp_size_t limbs; - unsigned shift; - mp_ptr rp; - - un = GMP_ABS (u->_mp_size); - if (un == 0) - { - r->_mp_size = 0; - return; - } - - limbs = bits / GMP_LIMB_BITS; - shift = bits % GMP_LIMB_BITS; - - rn = un + limbs + (shift > 0); - rp = MPZ_REALLOC (r, rn); - if (shift > 0) - { - mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); - rp[rn-1] = cy; - rn -= (cy == 0); - } - else - mpn_copyd (rp + limbs, u->_mp_d, un); - - mpn_zero (rp, limbs); - - r->_mp_size = (u->_mp_size < 0) ? - rn : rn; -} - -void -mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_sub (r, r, t); - mpz_clear (t); -} - -void -mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_sub (r, r, t); - mpz_clear (t); -} - - -/* MPZ division */ -enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; - -/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ -static int -mpz_div_qr (mpz_t q, mpz_t r, - const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) -{ - mp_size_t ns, ds, nn, dn, qs; - ns = n->_mp_size; - ds = d->_mp_size; - - if (ds == 0) - gmp_die("mpz_div_qr: Divide by zero."); - - if (ns == 0) - { - if (q) - q->_mp_size = 0; - if (r) - r->_mp_size = 0; - return 0; - } - - nn = GMP_ABS (ns); - dn = GMP_ABS (ds); - - qs = ds ^ ns; - - if (nn < dn) - { - if (mode == GMP_DIV_CEIL && qs >= 0) - { - /* q = 1, r = n - d */ - if (r) - mpz_sub (r, n, d); - if (q) - mpz_set_ui (q, 1); - } - else if (mode == GMP_DIV_FLOOR && qs < 0) - { - /* q = -1, r = n + d */ - if (r) - mpz_add (r, n, d); - if (q) - mpz_set_si (q, -1); - } - else - { - /* q = 0, r = d */ - if (r) - mpz_set (r, n); - if (q) - q->_mp_size = 0; - } - return 1; - } - else - { - mp_ptr np, qp; - mp_size_t qn, rn; - mpz_t tq, tr; - - mpz_init_set (tr, n); - np = tr->_mp_d; - - qn = nn - dn + 1; - - if (q) - { - mpz_init2 (tq, qn * GMP_LIMB_BITS); - qp = tq->_mp_d; - } - else - qp = NULL; - - mpn_div_qr (qp, np, nn, d->_mp_d, dn); - - if (qp) - { - qn -= (qp[qn-1] == 0); - - tq->_mp_size = qs < 0 ? -qn : qn; - } - rn = mpn_normalized_size (np, dn); - tr->_mp_size = ns < 0 ? - rn : rn; - - if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) - { - if (q) - mpz_sub_ui (tq, tq, 1); - if (r) - mpz_add (tr, tr, d); - } - else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) - { - if (q) - mpz_add_ui (tq, tq, 1); - if (r) - mpz_sub (tr, tr, d); - } - - if (q) - { - mpz_swap (tq, q); - mpz_clear (tq); - } - if (r) - mpz_swap (tr, r); - - mpz_clear (tr); - - return rn != 0; - } -} - -void -mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); -} - -static void -mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t un, qn; - mp_size_t limb_cnt; - mp_ptr qp; - int adjust; - - un = u->_mp_size; - if (un == 0) - { - q->_mp_size = 0; - return; - } - limb_cnt = bit_index / GMP_LIMB_BITS; - qn = GMP_ABS (un) - limb_cnt; - bit_index %= GMP_LIMB_BITS; - - if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ - /* Note: Below, the final indexing at limb_cnt is valid because at - that point we have qn > 0. */ - adjust = (qn <= 0 - || !mpn_zero_p (u->_mp_d, limb_cnt) - || (u->_mp_d[limb_cnt] - & (((mp_limb_t) 1 << bit_index) - 1))); - else - adjust = 0; - - if (qn <= 0) - qn = 0; - else - { - qp = MPZ_REALLOC (q, qn); - - if (bit_index != 0) - { - mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); - qn -= qp[qn - 1] == 0; - } - else - { - mpn_copyi (qp, u->_mp_d + limb_cnt, qn); - } - } - - q->_mp_size = qn; - - if (adjust) - mpz_add_ui (q, q, 1); - if (un < 0) - mpz_neg (q, q); -} - -static void -mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t us, un, rn; - mp_ptr rp; - mp_limb_t mask; - - us = u->_mp_size; - if (us == 0 || bit_index == 0) - { - r->_mp_size = 0; - return; - } - rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - assert (rn > 0); - - rp = MPZ_REALLOC (r, rn); - un = GMP_ABS (us); - - mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); - - if (rn > un) - { - /* Quotient (with truncation) is zero, and remainder is - non-zero */ - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* Have to negate and sign extend. */ - mp_size_t i; - - gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); - for (i = un; i < rn - 1; i++) - rp[i] = GMP_LIMB_MAX; - - rp[rn-1] = mask; - us = -us; - } - else - { - /* Just copy */ - if (r != u) - mpn_copyi (rp, u->_mp_d, un); - - rn = un; - } - } - else - { - if (r != u) - mpn_copyi (rp, u->_mp_d, rn - 1); - - rp[rn-1] = u->_mp_d[rn-1] & mask; - - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* If r != 0, compute 2^{bit_count} - r. */ - mpn_neg (rp, rp, rn); - - rp[rn-1] &= mask; - - /* us is not used for anything else, so we can modify it - here to indicate flipped sign. */ - us = -us; - } - } - rn = mpn_normalized_size (rp, rn); - r->_mp_size = us < 0 ? -rn : rn; -} - -void -mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) -{ - gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_p (const mpz_t n, const mpz_t d) -{ - return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - -int -mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) -{ - mpz_t t; - int res; - - /* a == b (mod 0) iff a == b */ - if (mpz_sgn (m) == 0) - return (mpz_cmp (a, b) == 0); - - mpz_init (t); - mpz_sub (t, a, b); - res = mpz_divisible_p (t, m); - mpz_clear (t); - - return res; -} - -static unsigned long -mpz_div_qr_ui (mpz_t q, mpz_t r, - const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) -{ - unsigned long ret; - mpz_t rr, dd; - - mpz_init (rr); - mpz_init_set_ui (dd, d); - mpz_div_qr (q, rr, n, dd, mode); - mpz_clear (dd); - ret = mpz_get_ui (rr); - - if (r) - mpz_swap (r, rr); - mpz_clear (rr); - - return ret; -} - -unsigned long -mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); -} -unsigned long -mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} -unsigned long -mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_ui_p (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - - -/* GCD */ -static mp_limb_t -mpn_gcd_11 (mp_limb_t u, mp_limb_t v) -{ - unsigned shift; - - assert ( (u | v) > 0); - - if (u == 0) - return v; - else if (v == 0) - return u; - - gmp_ctz (shift, u | v); - - u >>= shift; - v >>= shift; - - if ( (u & 1) == 0) - MP_LIMB_T_SWAP (u, v); - - while ( (v & 1) == 0) - v >>= 1; - - while (u != v) - { - if (u > v) - { - u -= v; - do - u >>= 1; - while ( (u & 1) == 0); - } - else - { - v -= u; - do - v >>= 1; - while ( (v & 1) == 0); - } - } - return u << shift; -} - -mp_size_t -mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn > 0); - assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); - assert (vp[vn-1] > 0); - assert ((up[0] | vp[0]) & 1); - - if (un > vn) - mpn_div_qr (NULL, up, un, vp, vn); - - un = mpn_normalized_size (up, vn); - if (un == 0) - { - mpn_copyi (rp, vp, vn); - return vn; - } - - if (!(vp[0] & 1)) - MPN_PTR_SWAP (up, un, vp, vn); - - while (un > 1 || vn > 1) - { - int shift; - assert (vp[0] & 1); - - while (up[0] == 0) - { - up++; - un--; - } - gmp_ctz (shift, up[0]); - if (shift > 0) - { - gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); - un -= (up[un-1] == 0); - } - - if (un < vn) - MPN_PTR_SWAP (up, un, vp, vn); - else if (un == vn) - { - int c = mpn_cmp (up, vp, un); - if (c == 0) - { - mpn_copyi (rp, up, un); - return un; - } - else if (c < 0) - MP_PTR_SWAP (up, vp); - } - - gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); - un = mpn_normalized_size (up, un); - } - rp[0] = mpn_gcd_11 (up[0], vp[0]); - return 1; -} - -unsigned long -mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) -{ - mpz_t t; - mpz_init_set_ui(t, v); - mpz_gcd (t, u, t); - if (v > 0) - v = mpz_get_ui (t); - - if (g) - mpz_swap (t, g); - - mpz_clear (t); - - return v; -} - -static mp_bitcnt_t -mpz_make_odd (mpz_t r) -{ - mp_bitcnt_t shift; - - assert (r->_mp_size > 0); - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - shift = mpn_scan1 (r->_mp_d, 0); - mpz_tdiv_q_2exp (r, r, shift); - - return shift; -} - -void -mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv; - mp_bitcnt_t uz, vz, gz; - - if (u->_mp_size == 0) - { - mpz_abs (g, v); - return; - } - if (v->_mp_size == 0) - { - mpz_abs (g, u); - return; - } - - mpz_init (tu); - mpz_init (tv); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - if (tu->_mp_size < tv->_mp_size) - mpz_swap (tu, tv); - - tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); - mpz_mul_2exp (g, tu, gz); - - mpz_clear (tu); - mpz_clear (tv); -} - -void -mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv, s0, s1, t0, t1; - mp_bitcnt_t uz, vz, gz; - mp_bitcnt_t power; - int cmp; - - if (u->_mp_size == 0) - { - /* g = 0 u + sgn(v) v */ - signed long sign = mpz_sgn (v); - mpz_abs (g, v); - if (s) - s->_mp_size = 0; - if (t) - mpz_set_si (t, sign); - return; - } - - if (v->_mp_size == 0) - { - /* g = sgn(u) u + 0 v */ - signed long sign = mpz_sgn (u); - mpz_abs (g, u); - if (s) - mpz_set_si (s, sign); - if (t) - t->_mp_size = 0; - return; - } - - mpz_init (tu); - mpz_init (tv); - mpz_init (s0); - mpz_init (s1); - mpz_init (t0); - mpz_init (t1); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - uz -= gz; - vz -= gz; - - /* Cofactors corresponding to odd gcd. gz handled later. */ - if (tu->_mp_size < tv->_mp_size) - { - mpz_swap (tu, tv); - MPZ_SRCPTR_SWAP (u, v); - MPZ_PTR_SWAP (s, t); - MP_BITCNT_T_SWAP (uz, vz); - } - - /* Maintain - * - * u = t0 tu + t1 tv - * v = s0 tu + s1 tv - * - * where u and v denote the inputs with common factors of two - * eliminated, and det (s0, t0; s1, t1) = 2^p. Then - * - * 2^p tu = s1 u - t1 v - * 2^p tv = -s0 u + t0 v - */ - - /* After initial division, tu = q tv + tu', we have - * - * u = 2^uz (tu' + q tv) - * v = 2^vz tv - * - * or - * - * t0 = 2^uz, t1 = 2^uz q - * s0 = 0, s1 = 2^vz - */ - - mpz_tdiv_qr (t1, tu, tu, tv); - mpz_mul_2exp (t1, t1, uz); - - mpz_setbit (s1, vz); - power = uz + vz; - - if (tu->_mp_size > 0) - { - mp_bitcnt_t shift; - shift = mpz_make_odd (tu); - mpz_setbit (t0, uz + shift); - power += shift; - - for (;;) - { - int c; - c = mpz_cmp (tu, tv); - if (c == 0) - break; - - if (c < 0) - { - /* tv = tv' + tu - * - * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' - * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ - - mpz_sub (tv, tv, tu); - mpz_add (t0, t0, t1); - mpz_add (s0, s0, s1); - - shift = mpz_make_odd (tv); - mpz_mul_2exp (t1, t1, shift); - mpz_mul_2exp (s1, s1, shift); - } - else - { - mpz_sub (tu, tu, tv); - mpz_add (t1, t0, t1); - mpz_add (s1, s0, s1); - - shift = mpz_make_odd (tu); - mpz_mul_2exp (t0, t0, shift); - mpz_mul_2exp (s0, s0, shift); - } - power += shift; - } - } - else - mpz_setbit (t0, uz); - - /* Now tv = odd part of gcd, and -s0 and t0 are corresponding - cofactors. */ - - mpz_mul_2exp (tv, tv, gz); - mpz_neg (s0, s0); - - /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To - adjust cofactors, we need u / g and v / g */ - - mpz_divexact (s1, v, tv); - mpz_abs (s1, s1); - mpz_divexact (t1, u, tv); - mpz_abs (t1, t1); - - while (power-- > 0) - { - /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ - if (mpz_odd_p (s0) || mpz_odd_p (t0)) - { - mpz_sub (s0, s0, s1); - mpz_add (t0, t0, t1); - } - assert (mpz_even_p (t0) && mpz_even_p (s0)); - mpz_tdiv_q_2exp (s0, s0, 1); - mpz_tdiv_q_2exp (t0, t0, 1); - } - - /* Choose small cofactors (they should generally satify - - |s| < |u| / 2g and |t| < |v| / 2g, - - with some documented exceptions). Always choose the smallest s, - if there are two choices for s with same absolute value, choose - the one with smallest corresponding t (this asymmetric condition - is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ - mpz_add (s1, s0, s1); - mpz_sub (t1, t0, t1); - cmp = mpz_cmpabs (s0, s1); - if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) - { - mpz_swap (s0, s1); - mpz_swap (t0, t1); - } - if (u->_mp_size < 0) - mpz_neg (s0, s0); - if (v->_mp_size < 0) - mpz_neg (t0, t0); - - mpz_swap (g, tv); - if (s) - mpz_swap (s, s0); - if (t) - mpz_swap (t, t0); - - mpz_clear (tu); - mpz_clear (tv); - mpz_clear (s0); - mpz_clear (s1); - mpz_clear (t0); - mpz_clear (t1); -} - -void -mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t g; - - if (u->_mp_size == 0 || v->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - mpz_init (g); - - mpz_gcd (g, u, v); - mpz_divexact (g, u, g); - mpz_mul (r, g, v); - - mpz_clear (g); - mpz_abs (r, r); -} - -void -mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) -{ - if (v == 0 || u->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - v /= mpz_gcd_ui (NULL, u, v); - mpz_mul_ui (r, u, v); - - mpz_abs (r, r); -} - -int -mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) -{ - mpz_t g, tr; - int invertible; - - if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) - return 0; - - mpz_init (g); - mpz_init (tr); - - mpz_gcdext (g, tr, NULL, u, m); - invertible = (mpz_cmp_ui (g, 1) == 0); - - if (invertible) - { - if (tr->_mp_size < 0) - { - if (m->_mp_size >= 0) - mpz_add (tr, tr, m); - else - mpz_sub (tr, tr, m); - } - mpz_swap (r, tr); - } - - mpz_clear (g); - mpz_clear (tr); - return invertible; -} - - -/* Higher level operations (sqrt, pow and root) */ - -void -mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) -{ - unsigned long bit; - mpz_t tr; - mpz_init_set_ui (tr, 1); - - bit = GMP_ULONG_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (e & bit) - mpz_mul (tr, tr, b); - bit >>= 1; - } - while (bit > 0); - - mpz_swap (r, tr); - mpz_clear (tr); -} - -void -mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) -{ - mpz_t b; - - mpz_init_set_ui (b, blimb); - mpz_pow_ui (r, b, e); - mpz_clear (b); -} - -void -mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) -{ - mpz_t tr; - mpz_t base; - mp_size_t en, mn; - mp_srcptr mp; - struct gmp_div_inverse minv; - unsigned shift; - mp_ptr tp = NULL; - - en = GMP_ABS (e->_mp_size); - mn = GMP_ABS (m->_mp_size); - if (mn == 0) - gmp_die ("mpz_powm: Zero modulo."); - - if (en == 0) - { - mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); - return; - } - - mp = m->_mp_d; - mpn_div_qr_invert (&minv, mp, mn); - shift = minv.shift; - - if (shift > 0) - { - /* To avoid shifts, we do all our reductions, except the final - one, using a *normalized* m. */ - minv.shift = 0; - - tp = gmp_alloc_limbs (mn); - gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); - mp = tp; - } - - mpz_init (base); - - if (e->_mp_size < 0) - { - if (!mpz_invert (base, b, m)) - gmp_die ("mpz_powm: Negative exponent and non-invertible base."); - } - else - { - mp_size_t bn; - mpz_abs (base, b); - - bn = base->_mp_size; - if (bn >= mn) - { - mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); - bn = mn; - } - - /* We have reduced the absolute value. Now take care of the - sign. Note that we get zero represented non-canonically as - m. */ - if (b->_mp_size < 0) - { - mp_ptr bp = MPZ_REALLOC (base, mn); - gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); - bn = mn; - } - base->_mp_size = mpn_normalized_size (base->_mp_d, bn); - } - mpz_init_set_ui (tr, 1); - - while (--en >= 0) - { - mp_limb_t w = e->_mp_d[en]; - mp_limb_t bit; - - bit = GMP_LIMB_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (w & bit) - mpz_mul (tr, tr, base); - if (tr->_mp_size > mn) - { - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - bit >>= 1; - } - while (bit > 0); - } - - /* Final reduction */ - if (tr->_mp_size >= mn) - { - minv.shift = shift; - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - if (tp) - gmp_free_limbs (tp, mn); - - mpz_swap (r, tr); - mpz_clear (tr); - mpz_clear (base); -} - -void -mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) -{ - mpz_t e; - - mpz_init_set_ui (e, elimb); - mpz_powm (r, b, e, m); - mpz_clear (e); -} - -/* x=trunc(y^(1/z)), r=y-x^z */ -void -mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) -{ - int sgn; - mp_bitcnt_t bc; - mpz_t t, u; - - sgn = y->_mp_size < 0; - if ((~z & sgn) != 0) - gmp_die ("mpz_rootrem: Negative argument, with even root."); - if (z == 0) - gmp_die ("mpz_rootrem: Zeroth root."); - - if (mpz_cmpabs_ui (y, 1) <= 0) { - if (x) - mpz_set (x, y); - if (r) - r->_mp_size = 0; - return; - } - - mpz_init (u); - mpz_init (t); - bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; - mpz_setbit (t, bc); - - if (z == 2) /* simplify sqrt loop: z-1 == 1 */ - do { - mpz_swap (u, t); /* u = x */ - mpz_tdiv_q (t, y, u); /* t = y/x */ - mpz_add (t, t, u); /* t = y/x + x */ - mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - else /* z != 2 */ { - mpz_t v; - - mpz_init (v); - if (sgn) - mpz_neg (t, t); - - do { - mpz_swap (u, t); /* u = x */ - mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ - mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ - mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ - mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ - mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - - mpz_clear (v); - } - - if (r) { - mpz_pow_ui (t, u, z); - mpz_sub (r, y, t); - } - if (x) - mpz_swap (x, u); - mpz_clear (u); - mpz_clear (t); -} - -int -mpz_root (mpz_t x, const mpz_t y, unsigned long z) -{ - int res; - mpz_t r; - - mpz_init (r); - mpz_rootrem (x, r, y, z); - res = r->_mp_size == 0; - mpz_clear (r); - - return res; -} - -/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ -void -mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) -{ - mpz_rootrem (s, r, u, 2); -} - -void -mpz_sqrt (mpz_t s, const mpz_t u) -{ - mpz_rootrem (s, NULL, u, 2); -} - -int -mpz_perfect_square_p (const mpz_t u) -{ - if (u->_mp_size <= 0) - return (u->_mp_size == 0); - else - return mpz_root (NULL, u, 2); -} - -int -mpn_perfect_square_p (mp_srcptr p, mp_size_t n) -{ - mpz_t t; - - assert (n > 0); - assert (p [n-1] != 0); - return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); -} - -mp_size_t -mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) -{ - mpz_t s, r, u; - mp_size_t res; - - assert (n > 0); - assert (p [n-1] != 0); - - mpz_init (r); - mpz_init (s); - mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); - - assert (s->_mp_size == (n+1)/2); - mpn_copyd (sp, s->_mp_d, s->_mp_size); - mpz_clear (s); - res = r->_mp_size; - if (rp) - mpn_copyd (rp, r->_mp_d, res); - mpz_clear (r); - return res; -} - -/* Combinatorics */ - -void -mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) -{ - mpz_set_ui (x, n + (n == 0)); - if (m + 1 < 2) return; - while (n > m + 1) - mpz_mul_ui (x, x, n -= m); -} - -void -mpz_2fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 2); -} - -void -mpz_fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 1); -} - -void -mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) -{ - mpz_t t; - - mpz_set_ui (r, k <= n); - - if (k > (n >> 1)) - k = (k <= n) ? n - k : 0; - - mpz_init (t); - mpz_fac_ui (t, k); - - for (; k > 0; --k) - mpz_mul_ui (r, r, n--); - - mpz_divexact (r, r, t); - mpz_clear (t); -} - - -/* Primality testing */ - -/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ -/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ -static int -gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) -{ - int c, bit = 0; - - assert (b & 1); - assert (a != 0); - /* assert (mpn_gcd_11 (a, b) == 1); */ - - /* Below, we represent a and b shifted right so that the least - significant one bit is implicit. */ - b >>= 1; - - gmp_ctz(c, a); - a >>= 1; - - for (;;) - { - a >>= c; - /* (2/b) = -1 if b = 3 or 5 mod 8 */ - bit ^= c & (b ^ (b >> 1)); - if (a < b) - { - if (a == 0) - return bit & 1 ? -1 : 1; - bit ^= a & b; - a = b - a; - b -= a; - } - else - { - a -= b; - assert (a != 0); - } - - gmp_ctz(c, a); - ++c; - } -} - -static void -gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) -{ - mpz_mod (Qk, Qk, n); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - mpz_mul (V, V, V); - mpz_submul_ui (V, Qk, 2); - mpz_tdiv_r (V, V, n); - /* Q^{2k} = (Q^k)^2 */ - mpz_mul (Qk, Qk, Qk); -} - -/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ -/* with P=1, Q=Q; k = (n>>b0)|1. */ -/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ -/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ -static int -gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, - mp_bitcnt_t b0, const mpz_t n) -{ - mp_bitcnt_t bs; - mpz_t U; - int res; - - assert (b0 > 0); - assert (Q <= - (LONG_MIN / 2)); - assert (Q >= - (LONG_MAX / 2)); - assert (mpz_cmp_ui (n, 4) > 0); - assert (mpz_odd_p (n)); - - mpz_init_set_ui (U, 1); /* U1 = 1 */ - mpz_set_ui (V, 1); /* V1 = 1 */ - mpz_set_si (Qk, Q); - - for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) - { - /* U_{2k} <- U_k * V_k */ - mpz_mul (U, U, V); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - /* A step k->k+1 is performed if the bit in $n$ is 1 */ - /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ - /* should be 1 in $n+1$ (bs == b0) */ - if (b0 == bs || mpz_tstbit (n, bs)) - { - /* Q^{k+1} <- Q^k * Q */ - mpz_mul_si (Qk, Qk, Q); - /* U_{k+1} <- (U_k + V_k) / 2 */ - mpz_swap (U, V); /* Keep in V the old value of U_k */ - mpz_add (U, U, V); - /* We have to compute U/2, so we need an even value, */ - /* equivalent (mod n) */ - if (mpz_odd_p (U)) - mpz_add (U, U, n); - mpz_tdiv_q_2exp (U, U, 1); - /* V_{k+1} <-(D*U_k + V_k) / 2 = - U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ - mpz_mul_si (V, V, -2*Q); - mpz_add (V, U, V); - mpz_tdiv_r (V, V, n); - } - mpz_tdiv_r (U, U, n); - } - - res = U->_mp_size == 0; - mpz_clear (U); - return res; -} - -/* Performs strong Lucas' test on x, with parameters suggested */ -/* for the BPSW test. Qk is only passed to recycle a variable. */ -/* Requires GCD (x,6) = 1.*/ -static int -gmp_stronglucas (const mpz_t x, mpz_t Qk) -{ - mp_bitcnt_t b0; - mpz_t V, n; - mp_limb_t maxD, D; /* The absolute value is stored. */ - long Q; - mp_limb_t tl; - - /* Test on the absolute value. */ - mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); - - assert (mpz_odd_p (n)); - /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ - if (mpz_root (Qk, n, 2)) - return 0; /* A square is composite. */ - - /* Check Ds up to square root (in case, n is prime) - or avoid overflows */ - maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; - - D = 3; - /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ - /* For those Ds we have (D/n) = (n/|D|) */ - do - { - if (D >= maxD) - return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ - D += 2; - tl = mpz_tdiv_ui (n, D); - if (tl == 0) - return 0; - } - while (gmp_jacobi_coprime (tl, D) == 1); - - mpz_init (V); - - /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ - b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); - /* b0 = mpz_scan0 (n, 0); */ - - /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ - Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); - - if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ - while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ - /* V <- V ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - mpz_clear (V); - return (b0 != 0); -} - -static int -gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, - const mpz_t q, mp_bitcnt_t k) -{ - assert (k > 0); - - /* Caller must initialize y to the base. */ - mpz_powm (y, y, q, n); - - if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) - return 1; - - while (--k > 0) - { - mpz_powm_ui (y, y, 2, n); - if (mpz_cmp (y, nm1) == 0) - return 1; - } - return 0; -} - -/* This product is 0xc0cfd797, and fits in 32 bits. */ -#define GMP_PRIME_PRODUCT \ - (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) - -/* Bit (p+1)/2 is set, for each odd prime <= 61 */ -#define GMP_PRIME_MASK 0xc96996dcUL - -int -mpz_probab_prime_p (const mpz_t n, int reps) -{ - mpz_t nm1; - mpz_t q; - mpz_t y; - mp_bitcnt_t k; - int is_prime; - int j; - - /* Note that we use the absolute value of n only, for compatibility - with the real GMP. */ - if (mpz_even_p (n)) - return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; - - /* Above test excludes n == 0 */ - assert (n->_mp_size != 0); - - if (mpz_cmpabs_ui (n, 64) < 0) - return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; - - if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) - return 0; - - /* All prime factors are >= 31. */ - if (mpz_cmpabs_ui (n, 31*31) < 0) - return 2; - - mpz_init (nm1); - mpz_init (q); - - /* Find q and k, where q is odd and n = 1 + 2**k * q. */ - mpz_abs (nm1, n); - nm1->_mp_d[0] -= 1; - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - k = mpn_scan1 (nm1->_mp_d, 0); - mpz_tdiv_q_2exp (q, nm1, k); - - /* BPSW test */ - mpz_init_set_ui (y, 2); - is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); - reps -= 24; /* skip the first 24 repetitions */ - - /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = - j^2 + j + 41 using Euler's polynomial. We potentially stop early, - if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > - 30 (a[30] == 971 > 31*31 == 961). */ - - for (j = 0; is_prime & (j < reps); j++) - { - mpz_set_ui (y, (unsigned long) j*j+j+41); - if (mpz_cmp (y, nm1) >= 0) - { - /* Don't try any further bases. This "early" break does not affect - the result for any reasonable reps value (<=5000 was tested) */ - assert (j >= 30); - break; - } - is_prime = gmp_millerrabin (n, nm1, y, q, k); - } - mpz_clear (nm1); - mpz_clear (q); - mpz_clear (y); - - return is_prime; -} - - -/* Logical operations and bit manipulation. */ - -/* Numbers are treated as if represented in two's complement (and - infinitely sign extended). For a negative values we get the two's - complement from -x = ~x + 1, where ~ is bitwise complement. - Negation transforms - - xxxx10...0 - - into - - yyyy10...0 - - where yyyy is the bitwise complement of xxxx. So least significant - bits, up to and including the first one bit, are unchanged, and - the more significant bits are all complemented. - - To change a bit from zero to one in a negative number, subtract the - corresponding power of two from the absolute value. This can never - underflow. To change a bit from one to zero, add the corresponding - power of two, and this might overflow. E.g., if x = -001111, the - two's complement is 110001. Clearing the least significant bit, we - get two's complement 110000, and -010000. */ - -int -mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t limb_index; - unsigned shift; - mp_size_t ds; - mp_size_t dn; - mp_limb_t w; - int bit; - - ds = d->_mp_size; - dn = GMP_ABS (ds); - limb_index = bit_index / GMP_LIMB_BITS; - if (limb_index >= dn) - return ds < 0; - - shift = bit_index % GMP_LIMB_BITS; - w = d->_mp_d[limb_index]; - bit = (w >> shift) & 1; - - if (ds < 0) - { - /* d < 0. Check if any of the bits below is set: If so, our bit - must be complemented. */ - if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) - return bit ^ 1; - while (--limb_index >= 0) - if (d->_mp_d[limb_index] > 0) - return bit ^ 1; - } - return bit; -} - -static void -mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_limb_t bit; - mp_ptr dp; - - dn = GMP_ABS (d->_mp_size); - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - if (limb_index >= dn) - { - mp_size_t i; - /* The bit should be set outside of the end of the number. - We have to increase the size of the number. */ - dp = MPZ_REALLOC (d, limb_index + 1); - - dp[limb_index] = bit; - for (i = dn; i < limb_index; i++) - dp[i] = 0; - dn = limb_index + 1; - } - else - { - mp_limb_t cy; - - dp = d->_mp_d; - - cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); - if (cy > 0) - { - dp = MPZ_REALLOC (d, dn + 1); - dp[dn++] = cy; - } - } - - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -static void -mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_ptr dp; - mp_limb_t bit; - - dn = GMP_ABS (d->_mp_size); - dp = d->_mp_d; - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - assert (limb_index < dn); - - gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, - dn - limb_index, bit)); - dn = mpn_normalized_size (dp, dn); - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -void -mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (!mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_add_bit (d, bit_index); - else - mpz_abs_sub_bit (d, bit_index); - } -} - -void -mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); - } -} - -void -mpz_combit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); -} - -void -mpz_com (mpz_t r, const mpz_t u) -{ - mpz_add_ui (r, u, 1); - mpz_neg (r, r); -} - -void -mpz_and (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - r->_mp_size = 0; - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc & vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is positive, higher limbs don't matter. */ - rn = vx ? un : vn; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul & vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul & vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc | vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is negative, by sign extension higher limbs - don't matter. */ - rn = vx ? vn : un; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul | vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul | vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc ^ vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - rp = MPZ_REALLOC (r, un + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = (ul ^ vl ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = (ul ^ ux) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[un++] = rc; - else - un = mpn_normalized_size (rp, un); - - r->_mp_size = rx ? -un : un; -} - -static unsigned -gmp_popcount_limb (mp_limb_t x) -{ - unsigned c; - - /* Do 16 bits at a time, to avoid limb-sized constants. */ - int LOCAL_SHIFT_BITS = 16; - for (c = 0; x > 0;) - { - unsigned w = x - ((x >> 1) & 0x5555); - w = ((w >> 2) & 0x3333) + (w & 0x3333); - w = (w >> 4) + w; - w = ((w >> 8) & 0x000f) + (w & 0x000f); - c += w; - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) - x >>= LOCAL_SHIFT_BITS; - else - x = 0; - } - return c; -} - -mp_bitcnt_t -mpn_popcount (mp_srcptr p, mp_size_t n) -{ - mp_size_t i; - mp_bitcnt_t c; - - for (c = 0, i = 0; i < n; i++) - c += gmp_popcount_limb (p[i]); - - return c; -} - -mp_bitcnt_t -mpz_popcount (const mpz_t u) -{ - mp_size_t un; - - un = u->_mp_size; - - if (un < 0) - return ~(mp_bitcnt_t) 0; - - return mpn_popcount (u->_mp_d, un); -} - -mp_bitcnt_t -mpz_hamdist (const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_limb_t uc, vc, ul, vl, comp; - mp_srcptr up, vp; - mp_bitcnt_t c; - - un = u->_mp_size; - vn = v->_mp_size; - - if ( (un ^ vn) < 0) - return ~(mp_bitcnt_t) 0; - - comp = - (uc = vc = (un < 0)); - if (uc) - { - assert (vn < 0); - un = -un; - vn = -vn; - } - - up = u->_mp_d; - vp = v->_mp_d; - - if (un < vn) - MPN_SRCPTR_SWAP (up, un, vp, vn); - - for (i = 0, c = 0; i < vn; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - vl = (vp[i] ^ comp) + vc; - vc = vl < vc; - - c += gmp_popcount_limb (ul ^ vl); - } - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - c += gmp_popcount_limb (ul ^ comp); - } - - return c; -} - -mp_bitcnt_t -mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit - for u<0. Notice this test picks up any u==0 too. */ - if (i >= un) - return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); - - up = u->_mp_d; - ux = 0; - limb = up[i]; - - if (starting_bit != 0) - { - if (us < 0) - { - ux = mpn_zero_p (up, i); - limb = ~ limb + ux; - ux = - (mp_limb_t) (limb >= ux); - } - - /* Mask to 0 all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - } - - return mpn_common_scan (limb, i, up, un, ux); -} - -mp_bitcnt_t -mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - ux = - (mp_limb_t) (us >= 0); - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for - u<0. Notice this test picks up all cases of u==0 too. */ - if (i >= un) - return (ux ? starting_bit : ~(mp_bitcnt_t) 0); - - up = u->_mp_d; - limb = up[i] ^ ux; - - if (ux == 0) - limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ - - /* Mask all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - - return mpn_common_scan (limb, i, up, un, ux); -} - - -/* MPZ base conversion. */ - -size_t -mpz_sizeinbase (const mpz_t u, int base) -{ - mp_size_t un, tn; - mp_srcptr up; - mp_ptr tp; - mp_bitcnt_t bits; - struct gmp_div_inverse bi; - size_t ndigits; - - assert (base >= 2); - assert (base <= 62); - - un = GMP_ABS (u->_mp_size); - if (un == 0) - return 1; - - up = u->_mp_d; - - bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); - switch (base) - { - case 2: - return bits; - case 4: - return (bits + 1) / 2; - case 8: - return (bits + 2) / 3; - case 16: - return (bits + 3) / 4; - case 32: - return (bits + 4) / 5; - /* FIXME: Do something more clever for the common case of base - 10. */ - } - - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, up, un); - mpn_div_qr_1_invert (&bi, base); - - tn = un; - ndigits = 0; - do - { - ndigits++; - mpn_div_qr_1_preinv (tp, tp, tn, &bi); - tn -= (tp[tn-1] == 0); - } - while (tn > 0); - - gmp_free_limbs (tp, un); - return ndigits; -} - -char * -mpz_get_str (char *sp, int base, const mpz_t u) -{ - unsigned bits; - const char *digits; - mp_size_t un; - size_t i, sn, osn; - - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - if (base > 1) - { - if (base <= 36) - digits = "0123456789abcdefghijklmnopqrstuvwxyz"; - else if (base > 62) - return NULL; - } - else if (base >= -1) - base = 10; - else - { - base = -base; - if (base > 36) - return NULL; - } - - sn = 1 + mpz_sizeinbase (u, base); - if (!sp) - { - osn = 1 + sn; - sp = (char *) gmp_alloc (osn); - } - else - osn = 0; - un = GMP_ABS (u->_mp_size); - - if (un == 0) - { - sp[0] = '0'; - sn = 1; - goto ret; - } - - i = 0; - - if (u->_mp_size < 0) - sp[i++] = '-'; - - bits = mpn_base_power_of_two_p (base); - - if (bits) - /* Not modified in this case. */ - sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); - else - { - struct mpn_base_info info; - mp_ptr tp; - - mpn_get_base_info (&info, base); - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, u->_mp_d, un); - - sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); - gmp_free_limbs (tp, un); - } - - for (; i < sn; i++) - sp[i] = digits[(unsigned char) sp[i]]; - -ret: - sp[sn] = '\0'; - if (osn && osn != sn + 1) - sp = (char*) gmp_realloc (sp, osn, sn + 1); - return sp; -} - -int -mpz_set_str (mpz_t r, const char *sp, int base) -{ - unsigned bits, value_of_a; - mp_size_t rn, alloc; - mp_ptr rp; - size_t dn, sn; - int sign; - unsigned char *dp; - - assert (base == 0 || (base >= 2 && base <= 62)); - - while (isspace( (unsigned char) *sp)) - sp++; - - sign = (*sp == '-'); - sp += sign; - - if (base == 0) - { - if (sp[0] == '0') - { - if (sp[1] == 'x' || sp[1] == 'X') - { - base = 16; - sp += 2; - } - else if (sp[1] == 'b' || sp[1] == 'B') - { - base = 2; - sp += 2; - } - else - base = 8; - } - else - base = 10; - } - - if (!*sp) - { - r->_mp_size = 0; - return -1; - } - sn = strlen(sp); - dp = (unsigned char *) gmp_alloc (sn); - - value_of_a = (base > 36) ? 36 : 10; - for (dn = 0; *sp; sp++) - { - unsigned digit; - - if (isspace ((unsigned char) *sp)) - continue; - else if (*sp >= '0' && *sp <= '9') - digit = *sp - '0'; - else if (*sp >= 'a' && *sp <= 'z') - digit = *sp - 'a' + value_of_a; - else if (*sp >= 'A' && *sp <= 'Z') - digit = *sp - 'A' + 10; - else - digit = base; /* fail */ - - if (digit >= (unsigned) base) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - - dp[dn++] = digit; - } - - if (!dn) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - bits = mpn_base_power_of_two_p (base); - - if (bits > 0) - { - alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_bits (rp, dp, dn, bits); - } - else - { - struct mpn_base_info info; - mpn_get_base_info (&info, base); - alloc = (dn + info.exp - 1) / info.exp; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_other (rp, dp, dn, base, &info); - /* Normalization, needed for all-zero input. */ - assert (rn > 0); - rn -= rp[rn-1] == 0; - } - assert (rn <= alloc); - gmp_free (dp, sn); - - r->_mp_size = sign ? - rn : rn; - - return 0; -} - -int -mpz_init_set_str (mpz_t r, const char *sp, int base) -{ - mpz_init (r); - return mpz_set_str (r, sp, base); -} - -size_t -mpz_out_str (FILE *stream, int base, const mpz_t x) -{ - char *str; - size_t len, n; - - str = mpz_get_str (NULL, base, x); - if (!str) - return 0; - len = strlen (str); - n = fwrite (str, 1, len, stream); - gmp_free (str, len + 1); - return n; -} - - -static int -gmp_detect_endian (void) -{ - static const int i = 2; - const unsigned char *p = (const unsigned char *) &i; - return 1 - *p; -} - -/* Import and export. Does not support nails. */ -void -mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, - size_t nails, const void *src) -{ - const unsigned char *p; - ptrdiff_t word_step; - mp_ptr rp; - mp_size_t rn; - - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes already copied to this limb (starting from - the low end). */ - size_t bytes; - /* The index where the limb should be stored, when completed. */ - mp_size_t i; - - if (nails != 0) - gmp_die ("mpz_import: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) src; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); - rp = MPZ_REALLOC (r, rn); - - for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) - { - size_t j; - for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) - { - limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); - if (bytes == sizeof(mp_limb_t)) - { - rp[i++] = limb; - bytes = 0; - limb = 0; - } - } - } - assert (i + (bytes > 0) == rn); - if (limb != 0) - rp[i++] = limb; - else - i = mpn_normalized_size (rp, i); - - r->_mp_size = i; -} - -void * -mpz_export (void *r, size_t *countp, int order, size_t size, int endian, - size_t nails, const mpz_t u) -{ - size_t count; - mp_size_t un; - - if (nails != 0) - gmp_die ("mpz_export: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - assert (size > 0 || u->_mp_size == 0); - - un = u->_mp_size; - count = 0; - if (un != 0) - { - size_t k; - unsigned char *p; - ptrdiff_t word_step; - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes left to do in this limb. */ - size_t bytes; - /* The index where the limb was read. */ - mp_size_t i; - - un = GMP_ABS (un); - - /* Count bytes in top limb. */ - limb = u->_mp_d[un-1]; - assert (limb != 0); - - k = (GMP_LIMB_BITS <= CHAR_BIT); - if (!k) - { - do { - int LOCAL_CHAR_BIT = CHAR_BIT; - k++; limb >>= LOCAL_CHAR_BIT; - } while (limb != 0); - } - /* else limb = 0; */ - - count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; - - if (!r) - r = gmp_alloc (count * size); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) r; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) - { - size_t j; - for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) - { - if (sizeof (mp_limb_t) == 1) - { - if (i < un) - *p = u->_mp_d[i++]; - else - *p = 0; - } - else - { - int LOCAL_CHAR_BIT = CHAR_BIT; - if (bytes == 0) - { - if (i < un) - limb = u->_mp_d[i++]; - bytes = sizeof (mp_limb_t); - } - *p = limb; - limb >>= LOCAL_CHAR_BIT; - bytes--; - } - } - } - assert (i == un); - assert (k == count); - } - - if (countp) - *countp = count; - - return r; -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.h deleted file mode 100644 index f28cb360ce..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mini-gmp.h +++ /dev/null @@ -1,311 +0,0 @@ -/* mini-gmp, a minimalistic implementation of a GNU GMP subset. - -Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* About mini-gmp: This is a minimal implementation of a subset of the - GMP interface. It is intended for inclusion into applications which - have modest bignums needs, as a fallback when the real GMP library - is not installed. - - This file defines the public interface. */ - -#ifndef __MINI_GMP_H__ -#define __MINI_GMP_H__ - -/* For size_t */ -#include - -#if defined (__cplusplus) -extern "C" { -#endif - -void mp_set_memory_functions (void *(*) (size_t), - void *(*) (void *, size_t, size_t), - void (*) (void *, size_t)); - -void mp_get_memory_functions (void *(**) (size_t), - void *(**) (void *, size_t, size_t), - void (**) (void *, size_t)); - -#ifndef MINI_GMP_LIMB_TYPE -#define MINI_GMP_LIMB_TYPE long -#endif - -typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; -typedef long mp_size_t; -typedef unsigned long mp_bitcnt_t; - -typedef mp_limb_t *mp_ptr; -typedef const mp_limb_t *mp_srcptr; - -typedef struct -{ - int _mp_alloc; /* Number of *limbs* allocated and pointed - to by the _mp_d field. */ - int _mp_size; /* abs(_mp_size) is the number of limbs the - last field points to. If _mp_size is - negative this is a negative number. */ - mp_limb_t *_mp_d; /* Pointer to the limbs. */ -} __mpz_struct; - -typedef __mpz_struct mpz_t[1]; - -typedef __mpz_struct *mpz_ptr; -typedef const __mpz_struct *mpz_srcptr; - -extern const int mp_bits_per_limb; - -void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); -void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); -void mpn_zero (mp_ptr, mp_size_t); - -int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); -int mpn_zero_p (mp_srcptr, mp_size_t); - -mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); - -mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); -void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); -int mpn_perfect_square_p (mp_srcptr, mp_size_t); -mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); -mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); - -mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); -mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); - -mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); -mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); - -void mpn_com (mp_ptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); - -mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); - -mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); -#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) - -size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); -mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); - -void mpz_init (mpz_t); -void mpz_init2 (mpz_t, mp_bitcnt_t); -void mpz_clear (mpz_t); - -#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) -#define mpz_even_p(z) (! mpz_odd_p (z)) - -int mpz_sgn (const mpz_t); -int mpz_cmp_si (const mpz_t, long); -int mpz_cmp_ui (const mpz_t, unsigned long); -int mpz_cmp (const mpz_t, const mpz_t); -int mpz_cmpabs_ui (const mpz_t, unsigned long); -int mpz_cmpabs (const mpz_t, const mpz_t); -int mpz_cmp_d (const mpz_t, double); -int mpz_cmpabs_d (const mpz_t, double); - -void mpz_abs (mpz_t, const mpz_t); -void mpz_neg (mpz_t, const mpz_t); -void mpz_swap (mpz_t, mpz_t); - -void mpz_add_ui (mpz_t, const mpz_t, unsigned long); -void mpz_add (mpz_t, const mpz_t, const mpz_t); -void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); -void mpz_sub (mpz_t, const mpz_t, const mpz_t); - -void mpz_mul_si (mpz_t, const mpz_t, long int); -void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_mul (mpz_t, const mpz_t, const mpz_t); -void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_addmul (mpz_t, const mpz_t, const mpz_t); -void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_submul (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); - -void mpz_mod (mpz_t, const mpz_t, const mpz_t); - -void mpz_divexact (mpz_t, const mpz_t, const mpz_t); - -int mpz_divisible_p (const mpz_t, const mpz_t); -int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); - -unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); - -unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); - -void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); - -int mpz_divisible_ui_p (const mpz_t, unsigned long); - -unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); -void mpz_gcd (mpz_t, const mpz_t, const mpz_t); -void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); -void mpz_lcm (mpz_t, const mpz_t, const mpz_t); -int mpz_invert (mpz_t, const mpz_t, const mpz_t); - -void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); -void mpz_sqrt (mpz_t, const mpz_t); -int mpz_perfect_square_p (const mpz_t); - -void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); -void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); -void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); - -void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); -int mpz_root (mpz_t, const mpz_t, unsigned long); - -void mpz_fac_ui (mpz_t, unsigned long); -void mpz_2fac_ui (mpz_t, unsigned long); -void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); -void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); - -int mpz_probab_prime_p (const mpz_t, int); - -int mpz_tstbit (const mpz_t, mp_bitcnt_t); -void mpz_setbit (mpz_t, mp_bitcnt_t); -void mpz_clrbit (mpz_t, mp_bitcnt_t); -void mpz_combit (mpz_t, mp_bitcnt_t); - -void mpz_com (mpz_t, const mpz_t); -void mpz_and (mpz_t, const mpz_t, const mpz_t); -void mpz_ior (mpz_t, const mpz_t, const mpz_t); -void mpz_xor (mpz_t, const mpz_t, const mpz_t); - -mp_bitcnt_t mpz_popcount (const mpz_t); -mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); -mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); -mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); - -int mpz_fits_slong_p (const mpz_t); -int mpz_fits_ulong_p (const mpz_t); -int mpz_fits_sint_p (const mpz_t); -int mpz_fits_uint_p (const mpz_t); -int mpz_fits_sshort_p (const mpz_t); -int mpz_fits_ushort_p (const mpz_t); -long int mpz_get_si (const mpz_t); -unsigned long int mpz_get_ui (const mpz_t); -double mpz_get_d (const mpz_t); -size_t mpz_size (const mpz_t); -mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); - -void mpz_realloc2 (mpz_t, mp_bitcnt_t); -mp_srcptr mpz_limbs_read (mpz_srcptr); -mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); -mp_ptr mpz_limbs_write (mpz_t, mp_size_t); -void mpz_limbs_finish (mpz_t, mp_size_t); -mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); - -#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} - -void mpz_set_si (mpz_t, signed long int); -void mpz_set_ui (mpz_t, unsigned long int); -void mpz_set (mpz_t, const mpz_t); -void mpz_set_d (mpz_t, double); - -void mpz_init_set_si (mpz_t, signed long int); -void mpz_init_set_ui (mpz_t, unsigned long int); -void mpz_init_set (mpz_t, const mpz_t); -void mpz_init_set_d (mpz_t, double); - -size_t mpz_sizeinbase (const mpz_t, int); -char *mpz_get_str (char *, int, const mpz_t); -int mpz_set_str (mpz_t, const char *, int); -int mpz_init_set_str (mpz_t, const char *, int); - -/* This long list taken from gmp.h. */ -/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, - defines EOF but not FILE. */ -#if defined (FILE) \ - || defined (H_STDIO) \ - || defined (_H_STDIO) /* AIX */ \ - || defined (_STDIO_H) /* glibc, Sun, SCO */ \ - || defined (_STDIO_H_) /* BSD, OSF */ \ - || defined (__STDIO_H) /* Borland */ \ - || defined (__STDIO_H__) /* IRIX */ \ - || defined (_STDIO_INCLUDED) /* HPUX */ \ - || defined (__dj_include_stdio_h_) /* DJGPP */ \ - || defined (_FILE_DEFINED) /* Microsoft */ \ - || defined (__STDIO__) /* Apple MPW MrC */ \ - || defined (_MSL_STDIO_H) /* Metrowerks */ \ - || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ - || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ - || defined (__STDIO_LOADED) /* VMS */ \ - || defined (_STDIO) /* HPE NonStop */ \ - || defined (__DEFINED_FILE) /* musl */ -size_t mpz_out_str (FILE *, int, const mpz_t); -#endif - -void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); -void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); - -#if defined (__cplusplus) -} -#endif -#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h index bbfe72c13b..54e90326be 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign_namespace.h @@ -18,6 +18,12 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -94,6 +100,16 @@ #define lift_basis SQISIGN_NAMESPACE(lift_basis) #define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) +// Namespacing symbols exported from basis.c, ec.c: +#undef xDBL_E0 + +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) + +// Namespacing symbols exported from basis.c, ec.c, isog_chains.c: +#undef xDBL_A24 + +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) + // Namespacing symbols exported from biextension.c: #undef clear_cofac #undef ec_dlog_2_tate @@ -109,6 +125,11 @@ #define reduced_tate SQISIGN_NAMESPACE(reduced_tate) #define weil SQISIGN_NAMESPACE(weil) +// Namespacing symbols exported from biextension.c, ec_jac.c, hd.c: +#undef ADD + +#define ADD SQISIGN_NAMESPACE(ADD) + // Namespacing symbols exported from common.c: #undef hash_to_challenge #undef public_key_finalize @@ -148,6 +169,28 @@ #define find_uv SQISIGN_NAMESPACE(find_uv) #define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) +// Namespacing symbols exported from dim2id2iso.c, encode_signature.c, id2iso.c, keygen.c, quaternion_data.c, sign.c: +#undef EXTREMAL_ORDERS +#undef QUATALG_PINFTY + +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) + +// Namespacing symbols exported from dim2id2iso.c, endomorphism_action.c, id2iso.c: +#undef CURVES_WITH_ENDOMORPHISMS + +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) + +// Namespacing symbols exported from dim2id2iso.c, id2iso.c, sign.c, torsion_constants.c: +#undef TORSION_PLUS_2POWER + +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) + +// Namespacing symbols exported from dim2id2iso.c, quaternion_data.c: +#undef CONNECTING_IDEALS + +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) + // Namespacing symbols exported from dim4.c: #undef ibz_inv_dim4_make_coeff_mpm #undef ibz_inv_dim4_make_coeff_pmp @@ -207,6 +250,13 @@ #define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) #define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) +// Namespacing symbols exported from e0_basis.c: +#undef BASIS_E0_PX +#undef BASIS_E0_QX + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) + // Namespacing symbols exported from ec.c: #undef cswap_points #undef ec_biscalar_mul @@ -235,8 +285,6 @@ #undef xDBL #undef xDBLADD #undef xDBLMUL -#undef xDBL_A24 -#undef xDBL_E0 #undef xMUL #define cswap_points SQISIGN_NAMESPACE(cswap_points) @@ -266,14 +314,9 @@ #define xDBL SQISIGN_NAMESPACE(xDBL) #define xDBLADD SQISIGN_NAMESPACE(xDBLADD) #define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) -#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) -#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) #define xMUL SQISIGN_NAMESPACE(xMUL) // Namespacing symbols exported from ec_jac.c: -#undef ADD -#undef DBL -#undef DBLW #undef copy_jac_point #undef jac_from_ws #undef jac_init @@ -284,9 +327,6 @@ #undef jac_to_xz_add_components #undef select_jac_point -#define ADD SQISIGN_NAMESPACE(ADD) -#define DBL SQISIGN_NAMESPACE(DBL) -#define DBLW SQISIGN_NAMESPACE(DBLW) #define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) #define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) #define jac_init SQISIGN_NAMESPACE(jac_init) @@ -297,6 +337,21 @@ #define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) #define select_jac_point SQISIGN_NAMESPACE(select_jac_point) +// Namespacing symbols exported from ec_jac.c, hd.c: +#undef DBLW + +#define DBLW SQISIGN_NAMESPACE(DBLW) + +// Namespacing symbols exported from ec_jac.c, hd.c, theta_isogenies.c: +#undef DBL + +#define DBL SQISIGN_NAMESPACE(DBL) + +// Namespacing symbols exported from ec_params.c: +#undef p_cofactor_for_2f + +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) + // Namespacing symbols exported from encode_signature.c: #undef secret_key_from_bytes #undef secret_key_to_bytes @@ -455,21 +510,24 @@ #define fp_set_one SQISIGN_NAMESPACE(fp_set_one) #define fp_set_small SQISIGN_NAMESPACE(fp_set_small) #define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) -#define ONE SQISIGN_NAMESPACE(ONE) -#define ZERO SQISIGN_NAMESPACE(ZERO) // Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef ONE +#undef ZERO #undef fp_add #undef fp_mul #undef fp_sqr #undef fp_sub +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) #define fp_add SQISIGN_NAMESPACE(fp_add) #define fp_mul SQISIGN_NAMESPACE(fp_mul) #define fp_sqr SQISIGN_NAMESPACE(fp_sqr) #define fp_sub SQISIGN_NAMESPACE(fp_sub) // Namespacing symbols exported from gf27500.c: +#undef gf27500_MINUS_ONE #undef gf27500_decode #undef gf27500_decode_reduce #undef gf27500_div @@ -479,6 +537,7 @@ #undef gf27500_legendre #undef gf27500_sqrt +#define gf27500_MINUS_ONE SQISIGN_NAMESPACE(gf27500_MINUS_ONE) #define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) #define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) #define gf27500_div SQISIGN_NAMESPACE(gf27500_div) @@ -500,6 +559,7 @@ #define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) // Namespacing symbols exported from gf5248.c: +#undef gf5248_MINUS_ONE #undef gf5248_decode #undef gf5248_decode_reduce #undef gf5248_div @@ -509,6 +569,7 @@ #undef gf5248_legendre #undef gf5248_sqrt +#define gf5248_MINUS_ONE SQISIGN_NAMESPACE(gf5248_MINUS_ONE) #define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) #define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) #define gf5248_div SQISIGN_NAMESPACE(gf5248_div) @@ -519,6 +580,7 @@ #define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) // Namespacing symbols exported from gf65376.c: +#undef gf65376_MINUS_ONE #undef gf65376_decode #undef gf65376_decode_reduce #undef gf65376_div @@ -528,6 +590,7 @@ #undef gf65376_legendre #undef gf65376_sqrt +#define gf65376_MINUS_ONE SQISIGN_NAMESPACE(gf65376_MINUS_ONE) #define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) #define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) #define gf65376_div SQISIGN_NAMESPACE(gf65376_div) @@ -554,6 +617,22 @@ #define double_couple_point SQISIGN_NAMESPACE(double_couple_point) #define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) +// Namespacing symbols exported from hd_splitting_transforms.c: +#undef CHI_EVAL + +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) + +// Namespacing symbols exported from hd_splitting_transforms.c, theta_isogenies.c: +#undef EVEN_INDEX +#undef FP2_CONSTANTS +#undef NORMALIZATION_TRANSFORMS +#undef SPLITTING_TRANSFORMS + +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) + // Namespacing symbols exported from hnf.c: #undef ibz_mat_4x4_is_hnf #undef ibz_mat_4xn_hnf_mod_core @@ -761,6 +840,11 @@ #define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) #define secret_key_init SQISIGN_NAMESPACE(secret_key_init) +// Namespacing symbols exported from keygen.c, torsion_constants.c: +#undef SEC_DEGREE + +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) + // Namespacing symbols exported from l2.c: #undef quat_lattice_lll #undef quat_lll_core @@ -910,6 +994,16 @@ #define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) #define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) +// Namespacing symbols exported from quaternion_data.c: +#undef CONJUGATING_ELEMENTS + +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) + +// Namespacing symbols exported from quaternion_data.c, sign.c: +#undef QUAT_prime_cofactor + +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) + // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation @@ -971,6 +1065,11 @@ #define protocols_sign SQISIGN_NAMESPACE(protocols_sign) +// Namespacing symbols exported from sign.c, torsion_constants.c: +#undef COM_DEGREE + +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + // Namespacing symbols exported from sqisign.c: #undef sqisign_keypair #undef sqisign_open @@ -1006,6 +1105,11 @@ #define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) #define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) +// Namespacing symbols exported from torsion_constants.c: +#undef TWO_TO_SECURITY_BITS + +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) + // Namespacing symbols exported from verify.c: #undef protocols_verify @@ -1029,45 +1133,7 @@ #define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) #define xisog_4 SQISIGN_NAMESPACE(xisog_4) -// Namespacing symbols from precomp: -#undef BASIS_E0_PX -#undef BASIS_E0_QX -#undef p_cofactor_for_2f -#undef CURVES_WITH_ENDOMORPHISMS -#undef EVEN_INDEX -#undef CHI_EVAL -#undef FP2_CONSTANTS -#undef SPLITTING_TRANSFORMS -#undef NORMALIZATION_TRANSFORMS -#undef QUAT_prime_cofactor -#undef QUATALG_PINFTY -#undef EXTREMAL_ORDERS -#undef CONNECTING_IDEALS -#undef CONJUGATING_ELEMENTS -#undef TWO_TO_SECURITY_BITS -#undef TORSION_PLUS_2POWER -#undef SEC_DEGREE -#undef COM_DEGREE - -#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) -#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) -#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) -#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) -#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) -#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) -#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) -#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) -#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) -#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) -#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) -#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) -#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) -#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) -#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) -#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) -#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) -#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) - #endif +// This file is generated by scripts/Namespace.scala, do not edit it manually! diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c index 62e5491dc1..054ded92f0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c @@ -942,4 +942,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif /* RADIX_32 */ +#endif /* RADIX_32 */ \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c index 57c2131b60..d46d1c5d85 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c @@ -791,4 +791,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif /* RADIX_64 */ +#endif // RADIX_64 \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c deleted file mode 100644 index 396d505aec..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.c +++ /dev/null @@ -1,73 +0,0 @@ -#include -#include -#if defined(MINI_GMP) -#include "mini-gmp.h" -#else -// This configuration is used only for testing -#include -#endif -#include - -// Exported for testing -int -mini_mpz_legendre(const mpz_t a, const mpz_t p) -{ - int res = 0; - mpz_t e; - mpz_init_set(e, p); - mpz_sub_ui(e, e, 1); - mpz_fdiv_q_2exp(e, e, 1); - mpz_powm(e, a, e, p); - - if (mpz_cmp_ui(e, 1) <= 0) { - res = mpz_get_si(e); - } else { - res = -1; - } - mpz_clear(e); - return res; -} - -#if defined(MINI_GMP) -int -mpz_legendre(const mpz_t a, const mpz_t p) -{ - return mini_mpz_legendre(a, p); -} -#endif - -// Exported for testing -double -mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - double ret; - int tmp_exp; - mpz_t tmp; - - // Handle the case where op is 0 - if (mpz_cmp_ui(op, 0) == 0) { - *exp = 0; - return 0.0; - } - - *exp = mpz_sizeinbase(op, 2); - - mpz_init_set(tmp, op); - - if (*exp > DBL_MAX_EXP) { - mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); - } - - ret = frexp(mpz_get_d(tmp), &tmp_exp); - mpz_clear(tmp); - - return ret; -} - -#if defined(MINI_GMP) -double -mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - return mini_mpz_get_d_2exp(exp, op); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.h deleted file mode 100644 index 0113cfdfe6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp-extra.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef MINI_GMP_EXTRA_H -#define MINI_GMP_EXTRA_H - -#if defined MINI_GMP -#include "mini-gmp.h" - -typedef long mp_exp_t; - -int mpz_legendre(const mpz_t a, const mpz_t p); -double mpz_get_d_2exp(signed long int *exp, const mpz_t op); -#else -// This configuration is used only for testing -#include -#endif - -int mini_mpz_legendre(const mpz_t a, const mpz_t p); -double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.c deleted file mode 100644 index 3830ab2031..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.c +++ /dev/null @@ -1,4671 +0,0 @@ -/* Note: The code from mini-gmp is modifed from the original by - commenting out the definition of GMP_LIMB_BITS */ - -/* - mini-gmp, a minimalistic implementation of a GNU GMP subset. - - Contributed to the GNU project by Niels Möller - Additional functionalities and improvements by Marco Bodrato. - -Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* NOTE: All functions in this file which are not declared in - mini-gmp.h are internal, and are not intended to be compatible - with GMP or with future versions of mini-gmp. */ - -/* Much of the material copied from GMP files, including: gmp-impl.h, - longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, - mpn/generic/lshift.c, mpn/generic/mul_1.c, - mpn/generic/mul_basecase.c, mpn/generic/rshift.c, - mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, - mpn/generic/submul_1.c. */ - -#include -#include -#include -#include -#include -#include - -#include "mini-gmp.h" - -#if !defined(MINI_GMP_DONT_USE_FLOAT_H) -#include -#endif - - -/* Macros */ -/* Removed from here as it is passed as a compiler command-line definition */ -/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ - -#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) -#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) - -#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) -#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) - -#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) -#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) - -#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) -#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) - -#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) - -#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) - -#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 -#define GMP_DBL_MANT_BITS DBL_MANT_DIG -#else -#define GMP_DBL_MANT_BITS (53) -#endif - -/* Return non-zero if xp,xsize and yp,ysize overlap. - If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no - overlap. If both these are false, there's an overlap. */ -#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ - ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) - -#define gmp_assert_nocarry(x) do { \ - mp_limb_t __cy = (x); \ - assert (__cy == 0); \ - (void) (__cy); \ - } while (0) - -#define gmp_clz(count, x) do { \ - mp_limb_t __clz_x = (x); \ - unsigned __clz_c = 0; \ - int LOCAL_SHIFT_BITS = 8; \ - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ - for (; \ - (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ - __clz_c += 8) \ - { __clz_x <<= LOCAL_SHIFT_BITS; } \ - for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ - __clz_x <<= 1; \ - (count) = __clz_c; \ - } while (0) - -#define gmp_ctz(count, x) do { \ - mp_limb_t __ctz_x = (x); \ - unsigned __ctz_c = 0; \ - gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ - (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ - } while (0) - -#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) + (bl); \ - (sh) = (ah) + (bh) + (__x < (al)); \ - (sl) = __x; \ - } while (0) - -#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) - (bl); \ - (sh) = (ah) - (bh) - ((al) < (bl)); \ - (sl) = __x; \ - } while (0) - -#define gmp_umul_ppmm(w1, w0, u, v) \ - do { \ - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ - if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned int __ww = (unsigned int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned long int __ww = (unsigned long int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else { \ - mp_limb_t __x0, __x1, __x2, __x3; \ - unsigned __ul, __vl, __uh, __vh; \ - mp_limb_t __u = (u), __v = (v); \ - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ - \ - __ul = __u & GMP_LLIMB_MASK; \ - __uh = __u >> (GMP_LIMB_BITS / 2); \ - __vl = __v & GMP_LLIMB_MASK; \ - __vh = __v >> (GMP_LIMB_BITS / 2); \ - \ - __x0 = (mp_limb_t) __ul * __vl; \ - __x1 = (mp_limb_t) __ul * __vh; \ - __x2 = (mp_limb_t) __uh * __vl; \ - __x3 = (mp_limb_t) __uh * __vh; \ - \ - __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ - __x1 += __x2; /* but this indeed can */ \ - if (__x1 < __x2) /* did we get it? */ \ - __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ - \ - (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ - (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ - } \ - } while (0) - -/* If mp_limb_t is of size smaller than int, plain u*v implies - automatic promotion to *signed* int, and then multiply may overflow - and cause undefined behavior. Explicitly cast to unsigned int for - that case. */ -#define gmp_umullo_limb(u, v) \ - ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) - -#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ - do { \ - mp_limb_t _qh, _ql, _r, _mask; \ - gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ - gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ - _r = (nl) - gmp_umullo_limb (_qh, (d)); \ - _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ - _qh += _mask; \ - _r += _mask & (d); \ - if (_r >= (d)) \ - { \ - _r -= (d); \ - _qh++; \ - } \ - \ - (r) = _r; \ - (q) = _qh; \ - } while (0) - -#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ - do { \ - mp_limb_t _q0, _t1, _t0, _mask; \ - gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ - gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ - \ - /* Compute the two most significant limbs of n - q'd */ \ - (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ - gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ - (q)++; \ - \ - /* Conditionally adjust q and the remainders */ \ - _mask = - (mp_limb_t) ((r1) >= _q0); \ - (q) += _mask; \ - gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ - if ((r1) >= (d1)) \ - { \ - if ((r1) > (d1) || (r0) >= (d0)) \ - { \ - (q)++; \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ - } \ - } \ - } while (0) - -/* Swap macros. */ -#define MP_LIMB_T_SWAP(x, y) \ - do { \ - mp_limb_t __mp_limb_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_limb_t_swap__tmp; \ - } while (0) -#define MP_SIZE_T_SWAP(x, y) \ - do { \ - mp_size_t __mp_size_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_size_t_swap__tmp; \ - } while (0) -#define MP_BITCNT_T_SWAP(x,y) \ - do { \ - mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_bitcnt_t_swap__tmp; \ - } while (0) -#define MP_PTR_SWAP(x, y) \ - do { \ - mp_ptr __mp_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_ptr_swap__tmp; \ - } while (0) -#define MP_SRCPTR_SWAP(x, y) \ - do { \ - mp_srcptr __mp_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_srcptr_swap__tmp; \ - } while (0) - -#define MPN_PTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_PTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) -#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_SRCPTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) - -#define MPZ_PTR_SWAP(x, y) \ - do { \ - mpz_ptr __mpz_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_ptr_swap__tmp; \ - } while (0) -#define MPZ_SRCPTR_SWAP(x, y) \ - do { \ - mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_srcptr_swap__tmp; \ - } while (0) - -const int mp_bits_per_limb = GMP_LIMB_BITS; - - -/* Memory allocation and other helper functions. */ -static void -gmp_die (const char *msg) -{ - fprintf (stderr, "%s\n", msg); - abort(); -} - -static void * -gmp_default_alloc (size_t size) -{ - void *p; - - assert (size > 0); - - p = malloc (size); - if (!p) - gmp_die("gmp_default_alloc: Virtual memory exhausted."); - - return p; -} - -static void * -gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) -{ - void * p; - - p = realloc (old, new_size); - - if (!p) - gmp_die("gmp_default_realloc: Virtual memory exhausted."); - - return p; -} - -static void -gmp_default_free (void *p, size_t unused_size) -{ - free (p); -} - -static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; -static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; -static void (*gmp_free_func) (void *, size_t) = gmp_default_free; - -void -mp_get_memory_functions (void *(**alloc_func) (size_t), - void *(**realloc_func) (void *, size_t, size_t), - void (**free_func) (void *, size_t)) -{ - if (alloc_func) - *alloc_func = gmp_allocate_func; - - if (realloc_func) - *realloc_func = gmp_reallocate_func; - - if (free_func) - *free_func = gmp_free_func; -} - -void -mp_set_memory_functions (void *(*alloc_func) (size_t), - void *(*realloc_func) (void *, size_t, size_t), - void (*free_func) (void *, size_t)) -{ - if (!alloc_func) - alloc_func = gmp_default_alloc; - if (!realloc_func) - realloc_func = gmp_default_realloc; - if (!free_func) - free_func = gmp_default_free; - - gmp_allocate_func = alloc_func; - gmp_reallocate_func = realloc_func; - gmp_free_func = free_func; -} - -#define gmp_alloc(size) ((*gmp_allocate_func)((size))) -#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) -#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) - -static mp_ptr -gmp_alloc_limbs (mp_size_t size) -{ - return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); -} - -static mp_ptr -gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) -{ - assert (size > 0); - return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); -} - -static void -gmp_free_limbs (mp_ptr old, mp_size_t size) -{ - gmp_free (old, size * sizeof (mp_limb_t)); -} - - -/* MPN interface */ - -void -mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - mp_size_t i; - for (i = 0; i < n; i++) - d[i] = s[i]; -} - -void -mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - while (--n >= 0) - d[n] = s[n]; -} - -int -mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - while (--n >= 0) - { - if (ap[n] != bp[n]) - return ap[n] > bp[n] ? 1 : -1; - } - return 0; -} - -static int -mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - if (an != bn) - return an < bn ? -1 : 1; - else - return mpn_cmp (ap, bp, an); -} - -static mp_size_t -mpn_normalized_size (mp_srcptr xp, mp_size_t n) -{ - while (n > 0 && xp[n-1] == 0) - --n; - return n; -} - -int -mpn_zero_p(mp_srcptr rp, mp_size_t n) -{ - return mpn_normalized_size (rp, n) == 0; -} - -void -mpn_zero (mp_ptr rp, mp_size_t n) -{ - while (--n >= 0) - rp[n] = 0; -} - -mp_limb_t -mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - i = 0; - do - { - mp_limb_t r = ap[i] + b; - /* Carry out */ - b = (r < b); - rp[i] = r; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b, r; - a = ap[i]; b = bp[i]; - r = a + cy; - cy = (r < cy); - r += b; - cy += (r < b); - rp[i] = r; - } - return cy; -} - -mp_limb_t -mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_add_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - - i = 0; - do - { - mp_limb_t a = ap[i]; - /* Carry out */ - mp_limb_t cy = a < b; - rp[i] = a - b; - b = cy; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b; - a = ap[i]; b = bp[i]; - b += cy; - cy = (b < cy); - cy += (a < b); - rp[i] = a - b; - } - return cy; -} - -mp_limb_t -mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_sub_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl + lpl; - cl += lpl < rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl - lpl; - cl += lpl > rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn >= 1); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); - - /* We first multiply by the low order limb. This result can be - stored, not added, to rp. We also avoid a loop for zeroing this - way. */ - - rp[un] = mpn_mul_1 (rp, up, un, vp[0]); - - /* Now accumulate the product of up[] and the next higher limb from - vp[]. */ - - while (--vn >= 1) - { - rp += 1, vp += 1; - rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); - } - return rp[un]; -} - -void -mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mpn_mul (rp, ap, n, bp, n); -} - -void -mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) -{ - mpn_mul (rp, ap, n, ap, n); -} - -mp_limb_t -mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - up += n; - rp += n; - - tnc = GMP_LIMB_BITS - cnt; - low_limb = *--up; - retval = low_limb >> tnc; - high_limb = (low_limb << cnt); - - while (--n != 0) - { - low_limb = *--up; - *--rp = high_limb | (low_limb >> tnc); - high_limb = (low_limb << cnt); - } - *--rp = high_limb; - - return retval; -} - -mp_limb_t -mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - tnc = GMP_LIMB_BITS - cnt; - high_limb = *up++; - retval = (high_limb << tnc); - low_limb = high_limb >> cnt; - - while (--n != 0) - { - high_limb = *up++; - *rp++ = low_limb | (high_limb << tnc); - low_limb = high_limb >> cnt; - } - *rp = low_limb; - - return retval; -} - -static mp_bitcnt_t -mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, - mp_limb_t ux) -{ - unsigned cnt; - - assert (ux == 0 || ux == GMP_LIMB_MAX); - assert (0 <= i && i <= un ); - - while (limb == 0) - { - i++; - if (i == un) - return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); - limb = ux ^ up[i]; - } - gmp_ctz (cnt, limb); - return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; -} - -mp_bitcnt_t -mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, 0); -} - -mp_bitcnt_t -mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, GMP_LIMB_MAX); -} - -void -mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (--n >= 0) - *rp++ = ~ *up++; -} - -mp_limb_t -mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (*up == 0) - { - *rp = 0; - if (!--n) - return 0; - ++up; ++rp; - } - *rp = - *up; - mpn_com (++rp, ++up, --n); - return 1; -} - - -/* MPN division interface. */ - -/* The 3/2 inverse is defined as - - m = floor( (B^3-1) / (B u1 + u0)) - B -*/ -mp_limb_t -mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) -{ - mp_limb_t r, m; - - { - mp_limb_t p, ql; - unsigned ul, uh, qh; - - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); - /* For notation, let b denote the half-limb base, so that B = b^2. - Split u1 = b uh + ul. */ - ul = u1 & GMP_LLIMB_MASK; - uh = u1 >> (GMP_LIMB_BITS / 2); - - /* Approximation of the high half of quotient. Differs from the 2/1 - inverse of the half limb uh, since we have already subtracted - u0. */ - qh = (u1 ^ GMP_LIMB_MAX) / uh; - - /* Adjust to get a half-limb 3/2 inverse, i.e., we want - - qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u - = floor( (b (~u) + b-1) / u), - - and the remainder - - r = b (~u) + b-1 - qh (b uh + ul) - = b (~u - qh uh) + b-1 - qh ul - - Subtraction of qh ul may underflow, which implies adjustments. - But by normalization, 2 u >= B > qh ul, so we need to adjust by - at most 2. - */ - - r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; - - p = (mp_limb_t) qh * ul; - /* Adjustment steps taken from udiv_qrnnd_c */ - if (r < p) - { - qh--; - r += u1; - if (r >= u1) /* i.e. we didn't get carry when adding to r */ - if (r < p) - { - qh--; - r += u1; - } - } - r -= p; - - /* Low half of the quotient is - - ql = floor ( (b r + b-1) / u1). - - This is a 3/2 division (on half-limbs), for which qh is a - suitable inverse. */ - - p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; - /* Unlike full-limb 3/2, we can add 1 without overflow. For this to - work, it is essential that ql is a full mp_limb_t. */ - ql = (p >> (GMP_LIMB_BITS / 2)) + 1; - - /* By the 3/2 trick, we don't need the high half limb. */ - r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; - - if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) - { - ql--; - r += u1; - } - m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; - if (r >= u1) - { - m++; - r -= u1; - } - } - - /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a - 3/2 inverse. */ - if (u0 > 0) - { - mp_limb_t th, tl; - r = ~r; - r += u0; - if (r < u0) - { - m--; - if (r >= u1) - { - m--; - r -= u1; - } - r -= u1; - } - gmp_umul_ppmm (th, tl, u0, m); - r += th; - if (r < th) - { - m--; - m -= ((r > u1) | ((r == u1) & (tl > u0))); - } - } - - return m; -} - -struct gmp_div_inverse -{ - /* Normalization shift count. */ - unsigned shift; - /* Normalized divisor (d0 unused for mpn_div_qr_1) */ - mp_limb_t d1, d0; - /* Inverse, for 2/1 or 3/2. */ - mp_limb_t di; -}; - -static void -mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) -{ - unsigned shift; - - assert (d > 0); - gmp_clz (shift, d); - inv->shift = shift; - inv->d1 = d << shift; - inv->di = mpn_invert_limb (inv->d1); -} - -static void -mpn_div_qr_2_invert (struct gmp_div_inverse *inv, - mp_limb_t d1, mp_limb_t d0) -{ - unsigned shift; - - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 <<= shift; - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); -} - -static void -mpn_div_qr_invert (struct gmp_div_inverse *inv, - mp_srcptr dp, mp_size_t dn) -{ - assert (dn > 0); - - if (dn == 1) - mpn_div_qr_1_invert (inv, dp[0]); - else if (dn == 2) - mpn_div_qr_2_invert (inv, dp[1], dp[0]); - else - { - unsigned shift; - mp_limb_t d1, d0; - - d1 = dp[dn-1]; - d0 = dp[dn-2]; - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); - } -} - -/* Not matching current public gmp interface, rather corresponding to - the sbpi1_div_* functions. */ -static mp_limb_t -mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - mp_limb_t d, di; - mp_limb_t r; - mp_ptr tp = NULL; - mp_size_t tn = 0; - - if (inv->shift > 0) - { - /* Shift, reusing qp area if possible. In-place shift if qp == np. */ - tp = qp; - if (!tp) - { - tn = nn; - tp = gmp_alloc_limbs (tn); - } - r = mpn_lshift (tp, np, nn, inv->shift); - np = tp; - } - else - r = 0; - - d = inv->d1; - di = inv->di; - while (--nn >= 0) - { - mp_limb_t q; - - gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); - if (qp) - qp[nn] = q; - } - if (tn) - gmp_free_limbs (tp, tn); - - return r >> inv->shift; -} - -static void -mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - unsigned shift; - mp_size_t i; - mp_limb_t d1, d0, di, r1, r0; - - assert (nn >= 2); - shift = inv->shift; - d1 = inv->d1; - d0 = inv->d0; - di = inv->di; - - if (shift > 0) - r1 = mpn_lshift (np, np, nn, shift); - else - r1 = 0; - - r0 = np[nn - 1]; - - i = nn - 2; - do - { - mp_limb_t n0, q; - n0 = np[i]; - gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - if (shift > 0) - { - assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); - r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); - r1 >>= shift; - } - - np[1] = r1; - np[0] = r0; -} - -static void -mpn_div_qr_pi1 (mp_ptr qp, - mp_ptr np, mp_size_t nn, mp_limb_t n1, - mp_srcptr dp, mp_size_t dn, - mp_limb_t dinv) -{ - mp_size_t i; - - mp_limb_t d1, d0; - mp_limb_t cy, cy1; - mp_limb_t q; - - assert (dn > 2); - assert (nn >= dn); - - d1 = dp[dn - 1]; - d0 = dp[dn - 2]; - - assert ((d1 & GMP_LIMB_HIGHBIT) != 0); - /* Iteration variable is the index of the q limb. - * - * We divide - * by - */ - - i = nn - dn; - do - { - mp_limb_t n0 = np[dn-1+i]; - - if (n1 == d1 && n0 == d0) - { - q = GMP_LIMB_MAX; - mpn_submul_1 (np+i, dp, dn, q); - n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ - } - else - { - gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); - - cy = mpn_submul_1 (np + i, dp, dn-2, q); - - cy1 = n0 < cy; - n0 = n0 - cy; - cy = n1 < cy1; - n1 = n1 - cy1; - np[dn-2+i] = n0; - - if (cy != 0) - { - n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); - q--; - } - } - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - np[dn - 1] = n1; -} - -static void -mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - mp_srcptr dp, mp_size_t dn, - const struct gmp_div_inverse *inv) -{ - assert (dn > 0); - assert (nn >= dn); - - if (dn == 1) - np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); - else if (dn == 2) - mpn_div_qr_2_preinv (qp, np, nn, inv); - else - { - mp_limb_t nh; - unsigned shift; - - assert (inv->d1 == dp[dn-1]); - assert (inv->d0 == dp[dn-2]); - assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); - - shift = inv->shift; - if (shift > 0) - nh = mpn_lshift (np, np, nn, shift); - else - nh = 0; - - mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); - - if (shift > 0) - gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); - } -} - -static void -mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) -{ - struct gmp_div_inverse inv; - mp_ptr tp = NULL; - - assert (dn > 0); - assert (nn >= dn); - - mpn_div_qr_invert (&inv, dp, dn); - if (dn > 2 && inv.shift > 0) - { - tp = gmp_alloc_limbs (dn); - gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); - dp = tp; - } - mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); - if (tp) - gmp_free_limbs (tp, dn); -} - - -/* MPN base conversion. */ -static unsigned -mpn_base_power_of_two_p (unsigned b) -{ - switch (b) - { - case 2: return 1; - case 4: return 2; - case 8: return 3; - case 16: return 4; - case 32: return 5; - case 64: return 6; - case 128: return 7; - case 256: return 8; - default: return 0; - } -} - -struct mpn_base_info -{ - /* bb is the largest power of the base which fits in one limb, and - exp is the corresponding exponent. */ - unsigned exp; - mp_limb_t bb; -}; - -static void -mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) -{ - mp_limb_t m; - mp_limb_t p; - unsigned exp; - - m = GMP_LIMB_MAX / b; - for (exp = 1, p = b; p <= m; exp++) - p *= b; - - info->exp = exp; - info->bb = p; -} - -static mp_bitcnt_t -mpn_limb_size_in_base_2 (mp_limb_t u) -{ - unsigned shift; - - assert (u > 0); - gmp_clz (shift, u); - return GMP_LIMB_BITS - shift; -} - -static size_t -mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) -{ - unsigned char mask; - size_t sn, j; - mp_size_t i; - unsigned shift; - - sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) - + bits - 1) / bits; - - mask = (1U << bits) - 1; - - for (i = 0, j = sn, shift = 0; j-- > 0;) - { - unsigned char digit = up[i] >> shift; - - shift += bits; - - if (shift >= GMP_LIMB_BITS && ++i < un) - { - shift -= GMP_LIMB_BITS; - digit |= up[i] << (bits - shift); - } - sp[j] = digit & mask; - } - return sn; -} - -/* We generate digits from the least significant end, and reverse at - the end. */ -static size_t -mpn_limb_get_str (unsigned char *sp, mp_limb_t w, - const struct gmp_div_inverse *binv) -{ - mp_size_t i; - for (i = 0; w > 0; i++) - { - mp_limb_t h, l, r; - - h = w >> (GMP_LIMB_BITS - binv->shift); - l = w << binv->shift; - - gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); - assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); - r >>= binv->shift; - - sp[i] = r; - } - return i; -} - -static size_t -mpn_get_str_other (unsigned char *sp, - int base, const struct mpn_base_info *info, - mp_ptr up, mp_size_t un) -{ - struct gmp_div_inverse binv; - size_t sn; - size_t i; - - mpn_div_qr_1_invert (&binv, base); - - sn = 0; - - if (un > 1) - { - struct gmp_div_inverse bbinv; - mpn_div_qr_1_invert (&bbinv, info->bb); - - do - { - mp_limb_t w; - size_t done; - w = mpn_div_qr_1_preinv (up, up, un, &bbinv); - un -= (up[un-1] == 0); - done = mpn_limb_get_str (sp + sn, w, &binv); - - for (sn += done; done < info->exp; done++) - sp[sn++] = 0; - } - while (un > 1); - } - sn += mpn_limb_get_str (sp + sn, up[0], &binv); - - /* Reverse order */ - for (i = 0; 2*i + 1 < sn; i++) - { - unsigned char t = sp[i]; - sp[i] = sp[sn - i - 1]; - sp[sn - i - 1] = t; - } - - return sn; -} - -size_t -mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) -{ - unsigned bits; - - assert (un > 0); - assert (up[un-1] > 0); - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_get_str_bits (sp, bits, up, un); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_get_str_other (sp, base, &info, up, un); - } -} - -static mp_size_t -mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, - unsigned bits) -{ - mp_size_t rn; - mp_limb_t limb; - unsigned shift; - - for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) - { - limb |= (mp_limb_t) sp[sn] << shift; - shift += bits; - if (shift >= GMP_LIMB_BITS) - { - shift -= GMP_LIMB_BITS; - rp[rn++] = limb; - /* Next line is correct also if shift == 0, - bits == 8, and mp_limb_t == unsigned char. */ - limb = (unsigned int) sp[sn] >> (bits - shift); - } - } - if (limb != 0) - rp[rn++] = limb; - else - rn = mpn_normalized_size (rp, rn); - return rn; -} - -/* Result is usually normalized, except for all-zero input, in which - case a single zero limb is written at *RP, and 1 is returned. */ -static mp_size_t -mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, - mp_limb_t b, const struct mpn_base_info *info) -{ - mp_size_t rn; - mp_limb_t w; - unsigned k; - size_t j; - - assert (sn > 0); - - k = 1 + (sn - 1) % info->exp; - - j = 0; - w = sp[j++]; - while (--k != 0) - w = w * b + sp[j++]; - - rp[0] = w; - - for (rn = 1; j < sn;) - { - mp_limb_t cy; - - w = sp[j++]; - for (k = 1; k < info->exp; k++) - w = w * b + sp[j++]; - - cy = mpn_mul_1 (rp, rp, rn, info->bb); - cy += mpn_add_1 (rp, rp, rn, w); - if (cy > 0) - rp[rn++] = cy; - } - assert (j == sn); - - return rn; -} - -mp_size_t -mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) -{ - unsigned bits; - - if (sn == 0) - return 0; - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_set_str_bits (rp, sp, sn, bits); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_set_str_other (rp, sp, sn, base, &info); - } -} - - -/* MPZ interface */ -void -mpz_init (mpz_t r) -{ - static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; - - r->_mp_alloc = 0; - r->_mp_size = 0; - r->_mp_d = (mp_ptr) &dummy_limb; -} - -/* The utility of this function is a bit limited, since many functions - assigns the result variable using mpz_swap. */ -void -mpz_init2 (mpz_t r, mp_bitcnt_t bits) -{ - mp_size_t rn; - - bits -= (bits != 0); /* Round down, except if 0 */ - rn = 1 + bits / GMP_LIMB_BITS; - - r->_mp_alloc = rn; - r->_mp_size = 0; - r->_mp_d = gmp_alloc_limbs (rn); -} - -void -mpz_clear (mpz_t r) -{ - if (r->_mp_alloc) - gmp_free_limbs (r->_mp_d, r->_mp_alloc); -} - -static mp_ptr -mpz_realloc (mpz_t r, mp_size_t size) -{ - size = GMP_MAX (size, 1); - - if (r->_mp_alloc) - r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); - else - r->_mp_d = gmp_alloc_limbs (size); - r->_mp_alloc = size; - - if (GMP_ABS (r->_mp_size) > size) - r->_mp_size = 0; - - return r->_mp_d; -} - -/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ -#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ - ? mpz_realloc(z,n) \ - : (z)->_mp_d) - -/* MPZ assignment and basic conversions. */ -void -mpz_set_si (mpz_t r, signed long int x) -{ - if (x >= 0) - mpz_set_ui (r, x); - else /* (x < 0) */ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); - mpz_neg (r, r); - } - else - { - r->_mp_size = -1; - MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); - } -} - -void -mpz_set_ui (mpz_t r, unsigned long int x) -{ - if (x > 0) - { - r->_mp_size = 1; - MPZ_REALLOC (r, 1)[0] = x; - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - while (x >>= LOCAL_GMP_LIMB_BITS) - { - ++ r->_mp_size; - MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; - } - } - } - else - r->_mp_size = 0; -} - -void -mpz_set (mpz_t r, const mpz_t x) -{ - /* Allow the NOP r == x */ - if (r != x) - { - mp_size_t n; - mp_ptr rp; - - n = GMP_ABS (x->_mp_size); - rp = MPZ_REALLOC (r, n); - - mpn_copyi (rp, x->_mp_d, n); - r->_mp_size = x->_mp_size; - } -} - -void -mpz_init_set_si (mpz_t r, signed long int x) -{ - mpz_init (r); - mpz_set_si (r, x); -} - -void -mpz_init_set_ui (mpz_t r, unsigned long int x) -{ - mpz_init (r); - mpz_set_ui (r, x); -} - -void -mpz_init_set (mpz_t r, const mpz_t x) -{ - mpz_init (r); - mpz_set (r, x); -} - -int -mpz_fits_slong_p (const mpz_t u) -{ - return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; -} - -static int -mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) -{ - int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; - mp_limb_t ulongrem = 0; - - if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) - ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; - - return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); -} - -int -mpz_fits_ulong_p (const mpz_t u) -{ - mp_size_t us = u->_mp_size; - - return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); -} - -int -mpz_fits_sint_p (const mpz_t u) -{ - return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; -} - -int -mpz_fits_uint_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; -} - -int -mpz_fits_sshort_p (const mpz_t u) -{ - return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; -} - -int -mpz_fits_ushort_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; -} - -long int -mpz_get_si (const mpz_t u) -{ - unsigned long r = mpz_get_ui (u); - unsigned long c = -LONG_MAX - LONG_MIN; - - if (u->_mp_size < 0) - /* This expression is necessary to properly handle -LONG_MIN */ - return -(long) c - (long) ((r - c) & LONG_MAX); - else - return (long) (r & LONG_MAX); -} - -unsigned long int -mpz_get_ui (const mpz_t u) -{ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - unsigned long r = 0; - mp_size_t n = GMP_ABS (u->_mp_size); - n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); - while (--n >= 0) - r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; - return r; - } - - return u->_mp_size == 0 ? 0 : u->_mp_d[0]; -} - -size_t -mpz_size (const mpz_t u) -{ - return GMP_ABS (u->_mp_size); -} - -mp_limb_t -mpz_getlimbn (const mpz_t u, mp_size_t n) -{ - if (n >= 0 && n < GMP_ABS (u->_mp_size)) - return u->_mp_d[n]; - else - return 0; -} - -void -mpz_realloc2 (mpz_t x, mp_bitcnt_t n) -{ - mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); -} - -mp_srcptr -mpz_limbs_read (mpz_srcptr x) -{ - return x->_mp_d; -} - -mp_ptr -mpz_limbs_modify (mpz_t x, mp_size_t n) -{ - assert (n > 0); - return MPZ_REALLOC (x, n); -} - -mp_ptr -mpz_limbs_write (mpz_t x, mp_size_t n) -{ - return mpz_limbs_modify (x, n); -} - -void -mpz_limbs_finish (mpz_t x, mp_size_t xs) -{ - mp_size_t xn; - xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); - x->_mp_size = xs < 0 ? -xn : xn; -} - -static mpz_srcptr -mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - x->_mp_alloc = 0; - x->_mp_d = (mp_ptr) xp; - x->_mp_size = xs; - return x; -} - -mpz_srcptr -mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - mpz_roinit_normal_n (x, xp, xs); - mpz_limbs_finish (x, xs); - return x; -} - - -/* Conversions and comparison to double. */ -void -mpz_set_d (mpz_t r, double x) -{ - int sign; - mp_ptr rp; - mp_size_t rn, i; - double B; - double Bi; - mp_limb_t f; - - /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is - zero or infinity. */ - if (x != x || x == x * 0.5) - { - r->_mp_size = 0; - return; - } - - sign = x < 0.0 ; - if (sign) - x = - x; - - if (x < 1.0) - { - r->_mp_size = 0; - return; - } - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - for (rn = 1; x >= B; rn++) - x *= Bi; - - rp = MPZ_REALLOC (r, rn); - - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - i = rn-1; - rp[i] = f; - while (--i >= 0) - { - x = B * x; - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - rp[i] = f; - } - - r->_mp_size = sign ? - rn : rn; -} - -void -mpz_init_set_d (mpz_t r, double x) -{ - mpz_init (r); - mpz_set_d (r, x); -} - -double -mpz_get_d (const mpz_t u) -{ - int m; - mp_limb_t l; - mp_size_t un; - double x; - double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - - un = GMP_ABS (u->_mp_size); - - if (un == 0) - return 0.0; - - l = u->_mp_d[--un]; - gmp_clz (m, l); - m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - - for (x = l; --un >= 0;) - { - x = B*x; - if (m > 0) { - l = u->_mp_d[un]; - m -= GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - x += l; - } - } - - if (u->_mp_size < 0) - x = -x; - - return x; -} - -int -mpz_cmpabs_d (const mpz_t x, double d) -{ - mp_size_t xn; - double B, Bi; - mp_size_t i; - - xn = x->_mp_size; - d = GMP_ABS (d); - - if (xn != 0) - { - xn = GMP_ABS (xn); - - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - - /* Scale d so it can be compared with the top limb. */ - for (i = 1; i < xn; i++) - d *= Bi; - - if (d >= B) - return -1; - - /* Compare floor(d) to top limb, subtract and cancel when equal. */ - for (i = xn; i-- > 0;) - { - mp_limb_t f, xl; - - f = (mp_limb_t) d; - xl = x->_mp_d[i]; - if (xl > f) - return 1; - else if (xl < f) - return -1; - d = B * (d - f); - } - } - return - (d > 0.0); -} - -int -mpz_cmp_d (const mpz_t x, double d) -{ - if (x->_mp_size < 0) - { - if (d >= 0.0) - return -1; - else - return -mpz_cmpabs_d (x, d); - } - else - { - if (d < 0.0) - return 1; - else - return mpz_cmpabs_d (x, d); - } -} - - -/* MPZ comparisons and the like. */ -int -mpz_sgn (const mpz_t u) -{ - return GMP_CMP (u->_mp_size, 0); -} - -int -mpz_cmp_si (const mpz_t u, long v) -{ - mp_size_t usize = u->_mp_size; - - if (v >= 0) - return mpz_cmp_ui (u, v); - else if (usize >= 0) - return 1; - else - return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); -} - -int -mpz_cmp_ui (const mpz_t u, unsigned long v) -{ - mp_size_t usize = u->_mp_size; - - if (usize < 0) - return -1; - else - return mpz_cmpabs_ui (u, v); -} - -int -mpz_cmp (const mpz_t a, const mpz_t b) -{ - mp_size_t asize = a->_mp_size; - mp_size_t bsize = b->_mp_size; - - if (asize != bsize) - return (asize < bsize) ? -1 : 1; - else if (asize >= 0) - return mpn_cmp (a->_mp_d, b->_mp_d, asize); - else - return mpn_cmp (b->_mp_d, a->_mp_d, -asize); -} - -int -mpz_cmpabs_ui (const mpz_t u, unsigned long v) -{ - mp_size_t un = GMP_ABS (u->_mp_size); - - if (! mpn_absfits_ulong_p (u->_mp_d, un)) - return 1; - else - { - unsigned long uu = mpz_get_ui (u); - return GMP_CMP(uu, v); - } -} - -int -mpz_cmpabs (const mpz_t u, const mpz_t v) -{ - return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), - v->_mp_d, GMP_ABS (v->_mp_size)); -} - -void -mpz_abs (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = GMP_ABS (r->_mp_size); -} - -void -mpz_neg (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = -r->_mp_size; -} - -void -mpz_swap (mpz_t u, mpz_t v) -{ - MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); - MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); -} - - -/* MPZ addition and subtraction */ - - -void -mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_t bb; - mpz_init_set_ui (bb, b); - mpz_add (r, a, bb); - mpz_clear (bb); -} - -void -mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_ui_sub (r, b, a); - mpz_neg (r, r); -} - -void -mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) -{ - mpz_neg (r, b); - mpz_add_ui (r, r, a); -} - -static mp_size_t -mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - mp_ptr rp; - mp_limb_t cy; - - if (an < bn) - { - MPZ_SRCPTR_SWAP (a, b); - MP_SIZE_T_SWAP (an, bn); - } - - rp = MPZ_REALLOC (r, an + 1); - cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); - - rp[an] = cy; - - return an + cy; -} - -static mp_size_t -mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - int cmp; - mp_ptr rp; - - cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); - if (cmp > 0) - { - rp = MPZ_REALLOC (r, an); - gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); - return mpn_normalized_size (rp, an); - } - else if (cmp < 0) - { - rp = MPZ_REALLOC (r, bn); - gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); - return -mpn_normalized_size (rp, bn); - } - else - return 0; -} - -void -mpz_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_add (r, a, b); - else - rn = mpz_abs_sub (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - -void -mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_sub (r, a, b); - else - rn = mpz_abs_add (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - - -/* MPZ multiplication */ -void -mpz_mul_si (mpz_t r, const mpz_t u, long int v) -{ - if (v < 0) - { - mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); - mpz_neg (r, r); - } - else - mpz_mul_ui (r, u, v); -} - -void -mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t vv; - mpz_init_set_ui (vv, v); - mpz_mul (r, u, vv); - mpz_clear (vv); - return; -} - -void -mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) -{ - int sign; - mp_size_t un, vn, rn; - mpz_t t; - mp_ptr tp; - - un = u->_mp_size; - vn = v->_mp_size; - - if (un == 0 || vn == 0) - { - r->_mp_size = 0; - return; - } - - sign = (un ^ vn) < 0; - - un = GMP_ABS (un); - vn = GMP_ABS (vn); - - mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); - - tp = t->_mp_d; - if (un >= vn) - mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); - else - mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); - - rn = un + vn; - rn -= tp[rn-1] == 0; - - t->_mp_size = sign ? - rn : rn; - mpz_swap (r, t); - mpz_clear (t); -} - -void -mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) -{ - mp_size_t un, rn; - mp_size_t limbs; - unsigned shift; - mp_ptr rp; - - un = GMP_ABS (u->_mp_size); - if (un == 0) - { - r->_mp_size = 0; - return; - } - - limbs = bits / GMP_LIMB_BITS; - shift = bits % GMP_LIMB_BITS; - - rn = un + limbs + (shift > 0); - rp = MPZ_REALLOC (r, rn); - if (shift > 0) - { - mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); - rp[rn-1] = cy; - rn -= (cy == 0); - } - else - mpn_copyd (rp + limbs, u->_mp_d, un); - - mpn_zero (rp, limbs); - - r->_mp_size = (u->_mp_size < 0) ? - rn : rn; -} - -void -mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_sub (r, r, t); - mpz_clear (t); -} - -void -mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_sub (r, r, t); - mpz_clear (t); -} - - -/* MPZ division */ -enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; - -/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ -static int -mpz_div_qr (mpz_t q, mpz_t r, - const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) -{ - mp_size_t ns, ds, nn, dn, qs; - ns = n->_mp_size; - ds = d->_mp_size; - - if (ds == 0) - gmp_die("mpz_div_qr: Divide by zero."); - - if (ns == 0) - { - if (q) - q->_mp_size = 0; - if (r) - r->_mp_size = 0; - return 0; - } - - nn = GMP_ABS (ns); - dn = GMP_ABS (ds); - - qs = ds ^ ns; - - if (nn < dn) - { - if (mode == GMP_DIV_CEIL && qs >= 0) - { - /* q = 1, r = n - d */ - if (r) - mpz_sub (r, n, d); - if (q) - mpz_set_ui (q, 1); - } - else if (mode == GMP_DIV_FLOOR && qs < 0) - { - /* q = -1, r = n + d */ - if (r) - mpz_add (r, n, d); - if (q) - mpz_set_si (q, -1); - } - else - { - /* q = 0, r = d */ - if (r) - mpz_set (r, n); - if (q) - q->_mp_size = 0; - } - return 1; - } - else - { - mp_ptr np, qp; - mp_size_t qn, rn; - mpz_t tq, tr; - - mpz_init_set (tr, n); - np = tr->_mp_d; - - qn = nn - dn + 1; - - if (q) - { - mpz_init2 (tq, qn * GMP_LIMB_BITS); - qp = tq->_mp_d; - } - else - qp = NULL; - - mpn_div_qr (qp, np, nn, d->_mp_d, dn); - - if (qp) - { - qn -= (qp[qn-1] == 0); - - tq->_mp_size = qs < 0 ? -qn : qn; - } - rn = mpn_normalized_size (np, dn); - tr->_mp_size = ns < 0 ? - rn : rn; - - if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) - { - if (q) - mpz_sub_ui (tq, tq, 1); - if (r) - mpz_add (tr, tr, d); - } - else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) - { - if (q) - mpz_add_ui (tq, tq, 1); - if (r) - mpz_sub (tr, tr, d); - } - - if (q) - { - mpz_swap (tq, q); - mpz_clear (tq); - } - if (r) - mpz_swap (tr, r); - - mpz_clear (tr); - - return rn != 0; - } -} - -void -mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); -} - -static void -mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t un, qn; - mp_size_t limb_cnt; - mp_ptr qp; - int adjust; - - un = u->_mp_size; - if (un == 0) - { - q->_mp_size = 0; - return; - } - limb_cnt = bit_index / GMP_LIMB_BITS; - qn = GMP_ABS (un) - limb_cnt; - bit_index %= GMP_LIMB_BITS; - - if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ - /* Note: Below, the final indexing at limb_cnt is valid because at - that point we have qn > 0. */ - adjust = (qn <= 0 - || !mpn_zero_p (u->_mp_d, limb_cnt) - || (u->_mp_d[limb_cnt] - & (((mp_limb_t) 1 << bit_index) - 1))); - else - adjust = 0; - - if (qn <= 0) - qn = 0; - else - { - qp = MPZ_REALLOC (q, qn); - - if (bit_index != 0) - { - mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); - qn -= qp[qn - 1] == 0; - } - else - { - mpn_copyi (qp, u->_mp_d + limb_cnt, qn); - } - } - - q->_mp_size = qn; - - if (adjust) - mpz_add_ui (q, q, 1); - if (un < 0) - mpz_neg (q, q); -} - -static void -mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t us, un, rn; - mp_ptr rp; - mp_limb_t mask; - - us = u->_mp_size; - if (us == 0 || bit_index == 0) - { - r->_mp_size = 0; - return; - } - rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - assert (rn > 0); - - rp = MPZ_REALLOC (r, rn); - un = GMP_ABS (us); - - mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); - - if (rn > un) - { - /* Quotient (with truncation) is zero, and remainder is - non-zero */ - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* Have to negate and sign extend. */ - mp_size_t i; - - gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); - for (i = un; i < rn - 1; i++) - rp[i] = GMP_LIMB_MAX; - - rp[rn-1] = mask; - us = -us; - } - else - { - /* Just copy */ - if (r != u) - mpn_copyi (rp, u->_mp_d, un); - - rn = un; - } - } - else - { - if (r != u) - mpn_copyi (rp, u->_mp_d, rn - 1); - - rp[rn-1] = u->_mp_d[rn-1] & mask; - - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* If r != 0, compute 2^{bit_count} - r. */ - mpn_neg (rp, rp, rn); - - rp[rn-1] &= mask; - - /* us is not used for anything else, so we can modify it - here to indicate flipped sign. */ - us = -us; - } - } - rn = mpn_normalized_size (rp, rn); - r->_mp_size = us < 0 ? -rn : rn; -} - -void -mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) -{ - gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_p (const mpz_t n, const mpz_t d) -{ - return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - -int -mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) -{ - mpz_t t; - int res; - - /* a == b (mod 0) iff a == b */ - if (mpz_sgn (m) == 0) - return (mpz_cmp (a, b) == 0); - - mpz_init (t); - mpz_sub (t, a, b); - res = mpz_divisible_p (t, m); - mpz_clear (t); - - return res; -} - -static unsigned long -mpz_div_qr_ui (mpz_t q, mpz_t r, - const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) -{ - unsigned long ret; - mpz_t rr, dd; - - mpz_init (rr); - mpz_init_set_ui (dd, d); - mpz_div_qr (q, rr, n, dd, mode); - mpz_clear (dd); - ret = mpz_get_ui (rr); - - if (r) - mpz_swap (r, rr); - mpz_clear (rr); - - return ret; -} - -unsigned long -mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); -} -unsigned long -mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} -unsigned long -mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_ui_p (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - - -/* GCD */ -static mp_limb_t -mpn_gcd_11 (mp_limb_t u, mp_limb_t v) -{ - unsigned shift; - - assert ( (u | v) > 0); - - if (u == 0) - return v; - else if (v == 0) - return u; - - gmp_ctz (shift, u | v); - - u >>= shift; - v >>= shift; - - if ( (u & 1) == 0) - MP_LIMB_T_SWAP (u, v); - - while ( (v & 1) == 0) - v >>= 1; - - while (u != v) - { - if (u > v) - { - u -= v; - do - u >>= 1; - while ( (u & 1) == 0); - } - else - { - v -= u; - do - v >>= 1; - while ( (v & 1) == 0); - } - } - return u << shift; -} - -mp_size_t -mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn > 0); - assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); - assert (vp[vn-1] > 0); - assert ((up[0] | vp[0]) & 1); - - if (un > vn) - mpn_div_qr (NULL, up, un, vp, vn); - - un = mpn_normalized_size (up, vn); - if (un == 0) - { - mpn_copyi (rp, vp, vn); - return vn; - } - - if (!(vp[0] & 1)) - MPN_PTR_SWAP (up, un, vp, vn); - - while (un > 1 || vn > 1) - { - int shift; - assert (vp[0] & 1); - - while (up[0] == 0) - { - up++; - un--; - } - gmp_ctz (shift, up[0]); - if (shift > 0) - { - gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); - un -= (up[un-1] == 0); - } - - if (un < vn) - MPN_PTR_SWAP (up, un, vp, vn); - else if (un == vn) - { - int c = mpn_cmp (up, vp, un); - if (c == 0) - { - mpn_copyi (rp, up, un); - return un; - } - else if (c < 0) - MP_PTR_SWAP (up, vp); - } - - gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); - un = mpn_normalized_size (up, un); - } - rp[0] = mpn_gcd_11 (up[0], vp[0]); - return 1; -} - -unsigned long -mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) -{ - mpz_t t; - mpz_init_set_ui(t, v); - mpz_gcd (t, u, t); - if (v > 0) - v = mpz_get_ui (t); - - if (g) - mpz_swap (t, g); - - mpz_clear (t); - - return v; -} - -static mp_bitcnt_t -mpz_make_odd (mpz_t r) -{ - mp_bitcnt_t shift; - - assert (r->_mp_size > 0); - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - shift = mpn_scan1 (r->_mp_d, 0); - mpz_tdiv_q_2exp (r, r, shift); - - return shift; -} - -void -mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv; - mp_bitcnt_t uz, vz, gz; - - if (u->_mp_size == 0) - { - mpz_abs (g, v); - return; - } - if (v->_mp_size == 0) - { - mpz_abs (g, u); - return; - } - - mpz_init (tu); - mpz_init (tv); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - if (tu->_mp_size < tv->_mp_size) - mpz_swap (tu, tv); - - tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); - mpz_mul_2exp (g, tu, gz); - - mpz_clear (tu); - mpz_clear (tv); -} - -void -mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv, s0, s1, t0, t1; - mp_bitcnt_t uz, vz, gz; - mp_bitcnt_t power; - int cmp; - - if (u->_mp_size == 0) - { - /* g = 0 u + sgn(v) v */ - signed long sign = mpz_sgn (v); - mpz_abs (g, v); - if (s) - s->_mp_size = 0; - if (t) - mpz_set_si (t, sign); - return; - } - - if (v->_mp_size == 0) - { - /* g = sgn(u) u + 0 v */ - signed long sign = mpz_sgn (u); - mpz_abs (g, u); - if (s) - mpz_set_si (s, sign); - if (t) - t->_mp_size = 0; - return; - } - - mpz_init (tu); - mpz_init (tv); - mpz_init (s0); - mpz_init (s1); - mpz_init (t0); - mpz_init (t1); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - uz -= gz; - vz -= gz; - - /* Cofactors corresponding to odd gcd. gz handled later. */ - if (tu->_mp_size < tv->_mp_size) - { - mpz_swap (tu, tv); - MPZ_SRCPTR_SWAP (u, v); - MPZ_PTR_SWAP (s, t); - MP_BITCNT_T_SWAP (uz, vz); - } - - /* Maintain - * - * u = t0 tu + t1 tv - * v = s0 tu + s1 tv - * - * where u and v denote the inputs with common factors of two - * eliminated, and det (s0, t0; s1, t1) = 2^p. Then - * - * 2^p tu = s1 u - t1 v - * 2^p tv = -s0 u + t0 v - */ - - /* After initial division, tu = q tv + tu', we have - * - * u = 2^uz (tu' + q tv) - * v = 2^vz tv - * - * or - * - * t0 = 2^uz, t1 = 2^uz q - * s0 = 0, s1 = 2^vz - */ - - mpz_tdiv_qr (t1, tu, tu, tv); - mpz_mul_2exp (t1, t1, uz); - - mpz_setbit (s1, vz); - power = uz + vz; - - if (tu->_mp_size > 0) - { - mp_bitcnt_t shift; - shift = mpz_make_odd (tu); - mpz_setbit (t0, uz + shift); - power += shift; - - for (;;) - { - int c; - c = mpz_cmp (tu, tv); - if (c == 0) - break; - - if (c < 0) - { - /* tv = tv' + tu - * - * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' - * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ - - mpz_sub (tv, tv, tu); - mpz_add (t0, t0, t1); - mpz_add (s0, s0, s1); - - shift = mpz_make_odd (tv); - mpz_mul_2exp (t1, t1, shift); - mpz_mul_2exp (s1, s1, shift); - } - else - { - mpz_sub (tu, tu, tv); - mpz_add (t1, t0, t1); - mpz_add (s1, s0, s1); - - shift = mpz_make_odd (tu); - mpz_mul_2exp (t0, t0, shift); - mpz_mul_2exp (s0, s0, shift); - } - power += shift; - } - } - else - mpz_setbit (t0, uz); - - /* Now tv = odd part of gcd, and -s0 and t0 are corresponding - cofactors. */ - - mpz_mul_2exp (tv, tv, gz); - mpz_neg (s0, s0); - - /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To - adjust cofactors, we need u / g and v / g */ - - mpz_divexact (s1, v, tv); - mpz_abs (s1, s1); - mpz_divexact (t1, u, tv); - mpz_abs (t1, t1); - - while (power-- > 0) - { - /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ - if (mpz_odd_p (s0) || mpz_odd_p (t0)) - { - mpz_sub (s0, s0, s1); - mpz_add (t0, t0, t1); - } - assert (mpz_even_p (t0) && mpz_even_p (s0)); - mpz_tdiv_q_2exp (s0, s0, 1); - mpz_tdiv_q_2exp (t0, t0, 1); - } - - /* Choose small cofactors (they should generally satify - - |s| < |u| / 2g and |t| < |v| / 2g, - - with some documented exceptions). Always choose the smallest s, - if there are two choices for s with same absolute value, choose - the one with smallest corresponding t (this asymmetric condition - is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ - mpz_add (s1, s0, s1); - mpz_sub (t1, t0, t1); - cmp = mpz_cmpabs (s0, s1); - if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) - { - mpz_swap (s0, s1); - mpz_swap (t0, t1); - } - if (u->_mp_size < 0) - mpz_neg (s0, s0); - if (v->_mp_size < 0) - mpz_neg (t0, t0); - - mpz_swap (g, tv); - if (s) - mpz_swap (s, s0); - if (t) - mpz_swap (t, t0); - - mpz_clear (tu); - mpz_clear (tv); - mpz_clear (s0); - mpz_clear (s1); - mpz_clear (t0); - mpz_clear (t1); -} - -void -mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t g; - - if (u->_mp_size == 0 || v->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - mpz_init (g); - - mpz_gcd (g, u, v); - mpz_divexact (g, u, g); - mpz_mul (r, g, v); - - mpz_clear (g); - mpz_abs (r, r); -} - -void -mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) -{ - if (v == 0 || u->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - v /= mpz_gcd_ui (NULL, u, v); - mpz_mul_ui (r, u, v); - - mpz_abs (r, r); -} - -int -mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) -{ - mpz_t g, tr; - int invertible; - - if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) - return 0; - - mpz_init (g); - mpz_init (tr); - - mpz_gcdext (g, tr, NULL, u, m); - invertible = (mpz_cmp_ui (g, 1) == 0); - - if (invertible) - { - if (tr->_mp_size < 0) - { - if (m->_mp_size >= 0) - mpz_add (tr, tr, m); - else - mpz_sub (tr, tr, m); - } - mpz_swap (r, tr); - } - - mpz_clear (g); - mpz_clear (tr); - return invertible; -} - - -/* Higher level operations (sqrt, pow and root) */ - -void -mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) -{ - unsigned long bit; - mpz_t tr; - mpz_init_set_ui (tr, 1); - - bit = GMP_ULONG_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (e & bit) - mpz_mul (tr, tr, b); - bit >>= 1; - } - while (bit > 0); - - mpz_swap (r, tr); - mpz_clear (tr); -} - -void -mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) -{ - mpz_t b; - - mpz_init_set_ui (b, blimb); - mpz_pow_ui (r, b, e); - mpz_clear (b); -} - -void -mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) -{ - mpz_t tr; - mpz_t base; - mp_size_t en, mn; - mp_srcptr mp; - struct gmp_div_inverse minv; - unsigned shift; - mp_ptr tp = NULL; - - en = GMP_ABS (e->_mp_size); - mn = GMP_ABS (m->_mp_size); - if (mn == 0) - gmp_die ("mpz_powm: Zero modulo."); - - if (en == 0) - { - mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); - return; - } - - mp = m->_mp_d; - mpn_div_qr_invert (&minv, mp, mn); - shift = minv.shift; - - if (shift > 0) - { - /* To avoid shifts, we do all our reductions, except the final - one, using a *normalized* m. */ - minv.shift = 0; - - tp = gmp_alloc_limbs (mn); - gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); - mp = tp; - } - - mpz_init (base); - - if (e->_mp_size < 0) - { - if (!mpz_invert (base, b, m)) - gmp_die ("mpz_powm: Negative exponent and non-invertible base."); - } - else - { - mp_size_t bn; - mpz_abs (base, b); - - bn = base->_mp_size; - if (bn >= mn) - { - mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); - bn = mn; - } - - /* We have reduced the absolute value. Now take care of the - sign. Note that we get zero represented non-canonically as - m. */ - if (b->_mp_size < 0) - { - mp_ptr bp = MPZ_REALLOC (base, mn); - gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); - bn = mn; - } - base->_mp_size = mpn_normalized_size (base->_mp_d, bn); - } - mpz_init_set_ui (tr, 1); - - while (--en >= 0) - { - mp_limb_t w = e->_mp_d[en]; - mp_limb_t bit; - - bit = GMP_LIMB_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (w & bit) - mpz_mul (tr, tr, base); - if (tr->_mp_size > mn) - { - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - bit >>= 1; - } - while (bit > 0); - } - - /* Final reduction */ - if (tr->_mp_size >= mn) - { - minv.shift = shift; - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - if (tp) - gmp_free_limbs (tp, mn); - - mpz_swap (r, tr); - mpz_clear (tr); - mpz_clear (base); -} - -void -mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) -{ - mpz_t e; - - mpz_init_set_ui (e, elimb); - mpz_powm (r, b, e, m); - mpz_clear (e); -} - -/* x=trunc(y^(1/z)), r=y-x^z */ -void -mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) -{ - int sgn; - mp_bitcnt_t bc; - mpz_t t, u; - - sgn = y->_mp_size < 0; - if ((~z & sgn) != 0) - gmp_die ("mpz_rootrem: Negative argument, with even root."); - if (z == 0) - gmp_die ("mpz_rootrem: Zeroth root."); - - if (mpz_cmpabs_ui (y, 1) <= 0) { - if (x) - mpz_set (x, y); - if (r) - r->_mp_size = 0; - return; - } - - mpz_init (u); - mpz_init (t); - bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; - mpz_setbit (t, bc); - - if (z == 2) /* simplify sqrt loop: z-1 == 1 */ - do { - mpz_swap (u, t); /* u = x */ - mpz_tdiv_q (t, y, u); /* t = y/x */ - mpz_add (t, t, u); /* t = y/x + x */ - mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - else /* z != 2 */ { - mpz_t v; - - mpz_init (v); - if (sgn) - mpz_neg (t, t); - - do { - mpz_swap (u, t); /* u = x */ - mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ - mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ - mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ - mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ - mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - - mpz_clear (v); - } - - if (r) { - mpz_pow_ui (t, u, z); - mpz_sub (r, y, t); - } - if (x) - mpz_swap (x, u); - mpz_clear (u); - mpz_clear (t); -} - -int -mpz_root (mpz_t x, const mpz_t y, unsigned long z) -{ - int res; - mpz_t r; - - mpz_init (r); - mpz_rootrem (x, r, y, z); - res = r->_mp_size == 0; - mpz_clear (r); - - return res; -} - -/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ -void -mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) -{ - mpz_rootrem (s, r, u, 2); -} - -void -mpz_sqrt (mpz_t s, const mpz_t u) -{ - mpz_rootrem (s, NULL, u, 2); -} - -int -mpz_perfect_square_p (const mpz_t u) -{ - if (u->_mp_size <= 0) - return (u->_mp_size == 0); - else - return mpz_root (NULL, u, 2); -} - -int -mpn_perfect_square_p (mp_srcptr p, mp_size_t n) -{ - mpz_t t; - - assert (n > 0); - assert (p [n-1] != 0); - return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); -} - -mp_size_t -mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) -{ - mpz_t s, r, u; - mp_size_t res; - - assert (n > 0); - assert (p [n-1] != 0); - - mpz_init (r); - mpz_init (s); - mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); - - assert (s->_mp_size == (n+1)/2); - mpn_copyd (sp, s->_mp_d, s->_mp_size); - mpz_clear (s); - res = r->_mp_size; - if (rp) - mpn_copyd (rp, r->_mp_d, res); - mpz_clear (r); - return res; -} - -/* Combinatorics */ - -void -mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) -{ - mpz_set_ui (x, n + (n == 0)); - if (m + 1 < 2) return; - while (n > m + 1) - mpz_mul_ui (x, x, n -= m); -} - -void -mpz_2fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 2); -} - -void -mpz_fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 1); -} - -void -mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) -{ - mpz_t t; - - mpz_set_ui (r, k <= n); - - if (k > (n >> 1)) - k = (k <= n) ? n - k : 0; - - mpz_init (t); - mpz_fac_ui (t, k); - - for (; k > 0; --k) - mpz_mul_ui (r, r, n--); - - mpz_divexact (r, r, t); - mpz_clear (t); -} - - -/* Primality testing */ - -/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ -/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ -static int -gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) -{ - int c, bit = 0; - - assert (b & 1); - assert (a != 0); - /* assert (mpn_gcd_11 (a, b) == 1); */ - - /* Below, we represent a and b shifted right so that the least - significant one bit is implicit. */ - b >>= 1; - - gmp_ctz(c, a); - a >>= 1; - - for (;;) - { - a >>= c; - /* (2/b) = -1 if b = 3 or 5 mod 8 */ - bit ^= c & (b ^ (b >> 1)); - if (a < b) - { - if (a == 0) - return bit & 1 ? -1 : 1; - bit ^= a & b; - a = b - a; - b -= a; - } - else - { - a -= b; - assert (a != 0); - } - - gmp_ctz(c, a); - ++c; - } -} - -static void -gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) -{ - mpz_mod (Qk, Qk, n); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - mpz_mul (V, V, V); - mpz_submul_ui (V, Qk, 2); - mpz_tdiv_r (V, V, n); - /* Q^{2k} = (Q^k)^2 */ - mpz_mul (Qk, Qk, Qk); -} - -/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ -/* with P=1, Q=Q; k = (n>>b0)|1. */ -/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ -/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ -static int -gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, - mp_bitcnt_t b0, const mpz_t n) -{ - mp_bitcnt_t bs; - mpz_t U; - int res; - - assert (b0 > 0); - assert (Q <= - (LONG_MIN / 2)); - assert (Q >= - (LONG_MAX / 2)); - assert (mpz_cmp_ui (n, 4) > 0); - assert (mpz_odd_p (n)); - - mpz_init_set_ui (U, 1); /* U1 = 1 */ - mpz_set_ui (V, 1); /* V1 = 1 */ - mpz_set_si (Qk, Q); - - for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) - { - /* U_{2k} <- U_k * V_k */ - mpz_mul (U, U, V); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - /* A step k->k+1 is performed if the bit in $n$ is 1 */ - /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ - /* should be 1 in $n+1$ (bs == b0) */ - if (b0 == bs || mpz_tstbit (n, bs)) - { - /* Q^{k+1} <- Q^k * Q */ - mpz_mul_si (Qk, Qk, Q); - /* U_{k+1} <- (U_k + V_k) / 2 */ - mpz_swap (U, V); /* Keep in V the old value of U_k */ - mpz_add (U, U, V); - /* We have to compute U/2, so we need an even value, */ - /* equivalent (mod n) */ - if (mpz_odd_p (U)) - mpz_add (U, U, n); - mpz_tdiv_q_2exp (U, U, 1); - /* V_{k+1} <-(D*U_k + V_k) / 2 = - U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ - mpz_mul_si (V, V, -2*Q); - mpz_add (V, U, V); - mpz_tdiv_r (V, V, n); - } - mpz_tdiv_r (U, U, n); - } - - res = U->_mp_size == 0; - mpz_clear (U); - return res; -} - -/* Performs strong Lucas' test on x, with parameters suggested */ -/* for the BPSW test. Qk is only passed to recycle a variable. */ -/* Requires GCD (x,6) = 1.*/ -static int -gmp_stronglucas (const mpz_t x, mpz_t Qk) -{ - mp_bitcnt_t b0; - mpz_t V, n; - mp_limb_t maxD, D; /* The absolute value is stored. */ - long Q; - mp_limb_t tl; - - /* Test on the absolute value. */ - mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); - - assert (mpz_odd_p (n)); - /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ - if (mpz_root (Qk, n, 2)) - return 0; /* A square is composite. */ - - /* Check Ds up to square root (in case, n is prime) - or avoid overflows */ - maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; - - D = 3; - /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ - /* For those Ds we have (D/n) = (n/|D|) */ - do - { - if (D >= maxD) - return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ - D += 2; - tl = mpz_tdiv_ui (n, D); - if (tl == 0) - return 0; - } - while (gmp_jacobi_coprime (tl, D) == 1); - - mpz_init (V); - - /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ - b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); - /* b0 = mpz_scan0 (n, 0); */ - - /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ - Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); - - if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ - while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ - /* V <- V ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - mpz_clear (V); - return (b0 != 0); -} - -static int -gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, - const mpz_t q, mp_bitcnt_t k) -{ - assert (k > 0); - - /* Caller must initialize y to the base. */ - mpz_powm (y, y, q, n); - - if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) - return 1; - - while (--k > 0) - { - mpz_powm_ui (y, y, 2, n); - if (mpz_cmp (y, nm1) == 0) - return 1; - } - return 0; -} - -/* This product is 0xc0cfd797, and fits in 32 bits. */ -#define GMP_PRIME_PRODUCT \ - (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) - -/* Bit (p+1)/2 is set, for each odd prime <= 61 */ -#define GMP_PRIME_MASK 0xc96996dcUL - -int -mpz_probab_prime_p (const mpz_t n, int reps) -{ - mpz_t nm1; - mpz_t q; - mpz_t y; - mp_bitcnt_t k; - int is_prime; - int j; - - /* Note that we use the absolute value of n only, for compatibility - with the real GMP. */ - if (mpz_even_p (n)) - return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; - - /* Above test excludes n == 0 */ - assert (n->_mp_size != 0); - - if (mpz_cmpabs_ui (n, 64) < 0) - return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; - - if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) - return 0; - - /* All prime factors are >= 31. */ - if (mpz_cmpabs_ui (n, 31*31) < 0) - return 2; - - mpz_init (nm1); - mpz_init (q); - - /* Find q and k, where q is odd and n = 1 + 2**k * q. */ - mpz_abs (nm1, n); - nm1->_mp_d[0] -= 1; - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - k = mpn_scan1 (nm1->_mp_d, 0); - mpz_tdiv_q_2exp (q, nm1, k); - - /* BPSW test */ - mpz_init_set_ui (y, 2); - is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); - reps -= 24; /* skip the first 24 repetitions */ - - /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = - j^2 + j + 41 using Euler's polynomial. We potentially stop early, - if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > - 30 (a[30] == 971 > 31*31 == 961). */ - - for (j = 0; is_prime & (j < reps); j++) - { - mpz_set_ui (y, (unsigned long) j*j+j+41); - if (mpz_cmp (y, nm1) >= 0) - { - /* Don't try any further bases. This "early" break does not affect - the result for any reasonable reps value (<=5000 was tested) */ - assert (j >= 30); - break; - } - is_prime = gmp_millerrabin (n, nm1, y, q, k); - } - mpz_clear (nm1); - mpz_clear (q); - mpz_clear (y); - - return is_prime; -} - - -/* Logical operations and bit manipulation. */ - -/* Numbers are treated as if represented in two's complement (and - infinitely sign extended). For a negative values we get the two's - complement from -x = ~x + 1, where ~ is bitwise complement. - Negation transforms - - xxxx10...0 - - into - - yyyy10...0 - - where yyyy is the bitwise complement of xxxx. So least significant - bits, up to and including the first one bit, are unchanged, and - the more significant bits are all complemented. - - To change a bit from zero to one in a negative number, subtract the - corresponding power of two from the absolute value. This can never - underflow. To change a bit from one to zero, add the corresponding - power of two, and this might overflow. E.g., if x = -001111, the - two's complement is 110001. Clearing the least significant bit, we - get two's complement 110000, and -010000. */ - -int -mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t limb_index; - unsigned shift; - mp_size_t ds; - mp_size_t dn; - mp_limb_t w; - int bit; - - ds = d->_mp_size; - dn = GMP_ABS (ds); - limb_index = bit_index / GMP_LIMB_BITS; - if (limb_index >= dn) - return ds < 0; - - shift = bit_index % GMP_LIMB_BITS; - w = d->_mp_d[limb_index]; - bit = (w >> shift) & 1; - - if (ds < 0) - { - /* d < 0. Check if any of the bits below is set: If so, our bit - must be complemented. */ - if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) - return bit ^ 1; - while (--limb_index >= 0) - if (d->_mp_d[limb_index] > 0) - return bit ^ 1; - } - return bit; -} - -static void -mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_limb_t bit; - mp_ptr dp; - - dn = GMP_ABS (d->_mp_size); - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - if (limb_index >= dn) - { - mp_size_t i; - /* The bit should be set outside of the end of the number. - We have to increase the size of the number. */ - dp = MPZ_REALLOC (d, limb_index + 1); - - dp[limb_index] = bit; - for (i = dn; i < limb_index; i++) - dp[i] = 0; - dn = limb_index + 1; - } - else - { - mp_limb_t cy; - - dp = d->_mp_d; - - cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); - if (cy > 0) - { - dp = MPZ_REALLOC (d, dn + 1); - dp[dn++] = cy; - } - } - - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -static void -mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_ptr dp; - mp_limb_t bit; - - dn = GMP_ABS (d->_mp_size); - dp = d->_mp_d; - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - assert (limb_index < dn); - - gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, - dn - limb_index, bit)); - dn = mpn_normalized_size (dp, dn); - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -void -mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (!mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_add_bit (d, bit_index); - else - mpz_abs_sub_bit (d, bit_index); - } -} - -void -mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); - } -} - -void -mpz_combit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); -} - -void -mpz_com (mpz_t r, const mpz_t u) -{ - mpz_add_ui (r, u, 1); - mpz_neg (r, r); -} - -void -mpz_and (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - r->_mp_size = 0; - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc & vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is positive, higher limbs don't matter. */ - rn = vx ? un : vn; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul & vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul & vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc | vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is negative, by sign extension higher limbs - don't matter. */ - rn = vx ? vn : un; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul | vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul | vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc ^ vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - rp = MPZ_REALLOC (r, un + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = (ul ^ vl ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = (ul ^ ux) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[un++] = rc; - else - un = mpn_normalized_size (rp, un); - - r->_mp_size = rx ? -un : un; -} - -static unsigned -gmp_popcount_limb (mp_limb_t x) -{ - unsigned c; - - /* Do 16 bits at a time, to avoid limb-sized constants. */ - int LOCAL_SHIFT_BITS = 16; - for (c = 0; x > 0;) - { - unsigned w = x - ((x >> 1) & 0x5555); - w = ((w >> 2) & 0x3333) + (w & 0x3333); - w = (w >> 4) + w; - w = ((w >> 8) & 0x000f) + (w & 0x000f); - c += w; - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) - x >>= LOCAL_SHIFT_BITS; - else - x = 0; - } - return c; -} - -mp_bitcnt_t -mpn_popcount (mp_srcptr p, mp_size_t n) -{ - mp_size_t i; - mp_bitcnt_t c; - - for (c = 0, i = 0; i < n; i++) - c += gmp_popcount_limb (p[i]); - - return c; -} - -mp_bitcnt_t -mpz_popcount (const mpz_t u) -{ - mp_size_t un; - - un = u->_mp_size; - - if (un < 0) - return ~(mp_bitcnt_t) 0; - - return mpn_popcount (u->_mp_d, un); -} - -mp_bitcnt_t -mpz_hamdist (const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_limb_t uc, vc, ul, vl, comp; - mp_srcptr up, vp; - mp_bitcnt_t c; - - un = u->_mp_size; - vn = v->_mp_size; - - if ( (un ^ vn) < 0) - return ~(mp_bitcnt_t) 0; - - comp = - (uc = vc = (un < 0)); - if (uc) - { - assert (vn < 0); - un = -un; - vn = -vn; - } - - up = u->_mp_d; - vp = v->_mp_d; - - if (un < vn) - MPN_SRCPTR_SWAP (up, un, vp, vn); - - for (i = 0, c = 0; i < vn; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - vl = (vp[i] ^ comp) + vc; - vc = vl < vc; - - c += gmp_popcount_limb (ul ^ vl); - } - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - c += gmp_popcount_limb (ul ^ comp); - } - - return c; -} - -mp_bitcnt_t -mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit - for u<0. Notice this test picks up any u==0 too. */ - if (i >= un) - return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); - - up = u->_mp_d; - ux = 0; - limb = up[i]; - - if (starting_bit != 0) - { - if (us < 0) - { - ux = mpn_zero_p (up, i); - limb = ~ limb + ux; - ux = - (mp_limb_t) (limb >= ux); - } - - /* Mask to 0 all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - } - - return mpn_common_scan (limb, i, up, un, ux); -} - -mp_bitcnt_t -mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - ux = - (mp_limb_t) (us >= 0); - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for - u<0. Notice this test picks up all cases of u==0 too. */ - if (i >= un) - return (ux ? starting_bit : ~(mp_bitcnt_t) 0); - - up = u->_mp_d; - limb = up[i] ^ ux; - - if (ux == 0) - limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ - - /* Mask all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - - return mpn_common_scan (limb, i, up, un, ux); -} - - -/* MPZ base conversion. */ - -size_t -mpz_sizeinbase (const mpz_t u, int base) -{ - mp_size_t un, tn; - mp_srcptr up; - mp_ptr tp; - mp_bitcnt_t bits; - struct gmp_div_inverse bi; - size_t ndigits; - - assert (base >= 2); - assert (base <= 62); - - un = GMP_ABS (u->_mp_size); - if (un == 0) - return 1; - - up = u->_mp_d; - - bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); - switch (base) - { - case 2: - return bits; - case 4: - return (bits + 1) / 2; - case 8: - return (bits + 2) / 3; - case 16: - return (bits + 3) / 4; - case 32: - return (bits + 4) / 5; - /* FIXME: Do something more clever for the common case of base - 10. */ - } - - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, up, un); - mpn_div_qr_1_invert (&bi, base); - - tn = un; - ndigits = 0; - do - { - ndigits++; - mpn_div_qr_1_preinv (tp, tp, tn, &bi); - tn -= (tp[tn-1] == 0); - } - while (tn > 0); - - gmp_free_limbs (tp, un); - return ndigits; -} - -char * -mpz_get_str (char *sp, int base, const mpz_t u) -{ - unsigned bits; - const char *digits; - mp_size_t un; - size_t i, sn, osn; - - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - if (base > 1) - { - if (base <= 36) - digits = "0123456789abcdefghijklmnopqrstuvwxyz"; - else if (base > 62) - return NULL; - } - else if (base >= -1) - base = 10; - else - { - base = -base; - if (base > 36) - return NULL; - } - - sn = 1 + mpz_sizeinbase (u, base); - if (!sp) - { - osn = 1 + sn; - sp = (char *) gmp_alloc (osn); - } - else - osn = 0; - un = GMP_ABS (u->_mp_size); - - if (un == 0) - { - sp[0] = '0'; - sn = 1; - goto ret; - } - - i = 0; - - if (u->_mp_size < 0) - sp[i++] = '-'; - - bits = mpn_base_power_of_two_p (base); - - if (bits) - /* Not modified in this case. */ - sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); - else - { - struct mpn_base_info info; - mp_ptr tp; - - mpn_get_base_info (&info, base); - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, u->_mp_d, un); - - sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); - gmp_free_limbs (tp, un); - } - - for (; i < sn; i++) - sp[i] = digits[(unsigned char) sp[i]]; - -ret: - sp[sn] = '\0'; - if (osn && osn != sn + 1) - sp = (char*) gmp_realloc (sp, osn, sn + 1); - return sp; -} - -int -mpz_set_str (mpz_t r, const char *sp, int base) -{ - unsigned bits, value_of_a; - mp_size_t rn, alloc; - mp_ptr rp; - size_t dn, sn; - int sign; - unsigned char *dp; - - assert (base == 0 || (base >= 2 && base <= 62)); - - while (isspace( (unsigned char) *sp)) - sp++; - - sign = (*sp == '-'); - sp += sign; - - if (base == 0) - { - if (sp[0] == '0') - { - if (sp[1] == 'x' || sp[1] == 'X') - { - base = 16; - sp += 2; - } - else if (sp[1] == 'b' || sp[1] == 'B') - { - base = 2; - sp += 2; - } - else - base = 8; - } - else - base = 10; - } - - if (!*sp) - { - r->_mp_size = 0; - return -1; - } - sn = strlen(sp); - dp = (unsigned char *) gmp_alloc (sn); - - value_of_a = (base > 36) ? 36 : 10; - for (dn = 0; *sp; sp++) - { - unsigned digit; - - if (isspace ((unsigned char) *sp)) - continue; - else if (*sp >= '0' && *sp <= '9') - digit = *sp - '0'; - else if (*sp >= 'a' && *sp <= 'z') - digit = *sp - 'a' + value_of_a; - else if (*sp >= 'A' && *sp <= 'Z') - digit = *sp - 'A' + 10; - else - digit = base; /* fail */ - - if (digit >= (unsigned) base) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - - dp[dn++] = digit; - } - - if (!dn) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - bits = mpn_base_power_of_two_p (base); - - if (bits > 0) - { - alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_bits (rp, dp, dn, bits); - } - else - { - struct mpn_base_info info; - mpn_get_base_info (&info, base); - alloc = (dn + info.exp - 1) / info.exp; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_other (rp, dp, dn, base, &info); - /* Normalization, needed for all-zero input. */ - assert (rn > 0); - rn -= rp[rn-1] == 0; - } - assert (rn <= alloc); - gmp_free (dp, sn); - - r->_mp_size = sign ? - rn : rn; - - return 0; -} - -int -mpz_init_set_str (mpz_t r, const char *sp, int base) -{ - mpz_init (r); - return mpz_set_str (r, sp, base); -} - -size_t -mpz_out_str (FILE *stream, int base, const mpz_t x) -{ - char *str; - size_t len, n; - - str = mpz_get_str (NULL, base, x); - if (!str) - return 0; - len = strlen (str); - n = fwrite (str, 1, len, stream); - gmp_free (str, len + 1); - return n; -} - - -static int -gmp_detect_endian (void) -{ - static const int i = 2; - const unsigned char *p = (const unsigned char *) &i; - return 1 - *p; -} - -/* Import and export. Does not support nails. */ -void -mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, - size_t nails, const void *src) -{ - const unsigned char *p; - ptrdiff_t word_step; - mp_ptr rp; - mp_size_t rn; - - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes already copied to this limb (starting from - the low end). */ - size_t bytes; - /* The index where the limb should be stored, when completed. */ - mp_size_t i; - - if (nails != 0) - gmp_die ("mpz_import: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) src; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); - rp = MPZ_REALLOC (r, rn); - - for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) - { - size_t j; - for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) - { - limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); - if (bytes == sizeof(mp_limb_t)) - { - rp[i++] = limb; - bytes = 0; - limb = 0; - } - } - } - assert (i + (bytes > 0) == rn); - if (limb != 0) - rp[i++] = limb; - else - i = mpn_normalized_size (rp, i); - - r->_mp_size = i; -} - -void * -mpz_export (void *r, size_t *countp, int order, size_t size, int endian, - size_t nails, const mpz_t u) -{ - size_t count; - mp_size_t un; - - if (nails != 0) - gmp_die ("mpz_export: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - assert (size > 0 || u->_mp_size == 0); - - un = u->_mp_size; - count = 0; - if (un != 0) - { - size_t k; - unsigned char *p; - ptrdiff_t word_step; - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes left to do in this limb. */ - size_t bytes; - /* The index where the limb was read. */ - mp_size_t i; - - un = GMP_ABS (un); - - /* Count bytes in top limb. */ - limb = u->_mp_d[un-1]; - assert (limb != 0); - - k = (GMP_LIMB_BITS <= CHAR_BIT); - if (!k) - { - do { - int LOCAL_CHAR_BIT = CHAR_BIT; - k++; limb >>= LOCAL_CHAR_BIT; - } while (limb != 0); - } - /* else limb = 0; */ - - count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; - - if (!r) - r = gmp_alloc (count * size); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) r; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) - { - size_t j; - for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) - { - if (sizeof (mp_limb_t) == 1) - { - if (i < un) - *p = u->_mp_d[i++]; - else - *p = 0; - } - else - { - int LOCAL_CHAR_BIT = CHAR_BIT; - if (bytes == 0) - { - if (i < un) - limb = u->_mp_d[i++]; - bytes = sizeof (mp_limb_t); - } - *p = limb; - limb >>= LOCAL_CHAR_BIT; - bytes--; - } - } - } - assert (i == un); - assert (k == count); - } - - if (countp) - *countp = count; - - return r; -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.h deleted file mode 100644 index f28cb360ce..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mini-gmp.h +++ /dev/null @@ -1,311 +0,0 @@ -/* mini-gmp, a minimalistic implementation of a GNU GMP subset. - -Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* About mini-gmp: This is a minimal implementation of a subset of the - GMP interface. It is intended for inclusion into applications which - have modest bignums needs, as a fallback when the real GMP library - is not installed. - - This file defines the public interface. */ - -#ifndef __MINI_GMP_H__ -#define __MINI_GMP_H__ - -/* For size_t */ -#include - -#if defined (__cplusplus) -extern "C" { -#endif - -void mp_set_memory_functions (void *(*) (size_t), - void *(*) (void *, size_t, size_t), - void (*) (void *, size_t)); - -void mp_get_memory_functions (void *(**) (size_t), - void *(**) (void *, size_t, size_t), - void (**) (void *, size_t)); - -#ifndef MINI_GMP_LIMB_TYPE -#define MINI_GMP_LIMB_TYPE long -#endif - -typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; -typedef long mp_size_t; -typedef unsigned long mp_bitcnt_t; - -typedef mp_limb_t *mp_ptr; -typedef const mp_limb_t *mp_srcptr; - -typedef struct -{ - int _mp_alloc; /* Number of *limbs* allocated and pointed - to by the _mp_d field. */ - int _mp_size; /* abs(_mp_size) is the number of limbs the - last field points to. If _mp_size is - negative this is a negative number. */ - mp_limb_t *_mp_d; /* Pointer to the limbs. */ -} __mpz_struct; - -typedef __mpz_struct mpz_t[1]; - -typedef __mpz_struct *mpz_ptr; -typedef const __mpz_struct *mpz_srcptr; - -extern const int mp_bits_per_limb; - -void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); -void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); -void mpn_zero (mp_ptr, mp_size_t); - -int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); -int mpn_zero_p (mp_srcptr, mp_size_t); - -mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); - -mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); -void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); -int mpn_perfect_square_p (mp_srcptr, mp_size_t); -mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); -mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); - -mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); -mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); - -mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); -mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); - -void mpn_com (mp_ptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); - -mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); - -mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); -#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) - -size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); -mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); - -void mpz_init (mpz_t); -void mpz_init2 (mpz_t, mp_bitcnt_t); -void mpz_clear (mpz_t); - -#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) -#define mpz_even_p(z) (! mpz_odd_p (z)) - -int mpz_sgn (const mpz_t); -int mpz_cmp_si (const mpz_t, long); -int mpz_cmp_ui (const mpz_t, unsigned long); -int mpz_cmp (const mpz_t, const mpz_t); -int mpz_cmpabs_ui (const mpz_t, unsigned long); -int mpz_cmpabs (const mpz_t, const mpz_t); -int mpz_cmp_d (const mpz_t, double); -int mpz_cmpabs_d (const mpz_t, double); - -void mpz_abs (mpz_t, const mpz_t); -void mpz_neg (mpz_t, const mpz_t); -void mpz_swap (mpz_t, mpz_t); - -void mpz_add_ui (mpz_t, const mpz_t, unsigned long); -void mpz_add (mpz_t, const mpz_t, const mpz_t); -void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); -void mpz_sub (mpz_t, const mpz_t, const mpz_t); - -void mpz_mul_si (mpz_t, const mpz_t, long int); -void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_mul (mpz_t, const mpz_t, const mpz_t); -void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_addmul (mpz_t, const mpz_t, const mpz_t); -void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_submul (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); - -void mpz_mod (mpz_t, const mpz_t, const mpz_t); - -void mpz_divexact (mpz_t, const mpz_t, const mpz_t); - -int mpz_divisible_p (const mpz_t, const mpz_t); -int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); - -unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); - -unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); - -void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); - -int mpz_divisible_ui_p (const mpz_t, unsigned long); - -unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); -void mpz_gcd (mpz_t, const mpz_t, const mpz_t); -void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); -void mpz_lcm (mpz_t, const mpz_t, const mpz_t); -int mpz_invert (mpz_t, const mpz_t, const mpz_t); - -void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); -void mpz_sqrt (mpz_t, const mpz_t); -int mpz_perfect_square_p (const mpz_t); - -void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); -void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); -void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); - -void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); -int mpz_root (mpz_t, const mpz_t, unsigned long); - -void mpz_fac_ui (mpz_t, unsigned long); -void mpz_2fac_ui (mpz_t, unsigned long); -void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); -void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); - -int mpz_probab_prime_p (const mpz_t, int); - -int mpz_tstbit (const mpz_t, mp_bitcnt_t); -void mpz_setbit (mpz_t, mp_bitcnt_t); -void mpz_clrbit (mpz_t, mp_bitcnt_t); -void mpz_combit (mpz_t, mp_bitcnt_t); - -void mpz_com (mpz_t, const mpz_t); -void mpz_and (mpz_t, const mpz_t, const mpz_t); -void mpz_ior (mpz_t, const mpz_t, const mpz_t); -void mpz_xor (mpz_t, const mpz_t, const mpz_t); - -mp_bitcnt_t mpz_popcount (const mpz_t); -mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); -mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); -mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); - -int mpz_fits_slong_p (const mpz_t); -int mpz_fits_ulong_p (const mpz_t); -int mpz_fits_sint_p (const mpz_t); -int mpz_fits_uint_p (const mpz_t); -int mpz_fits_sshort_p (const mpz_t); -int mpz_fits_ushort_p (const mpz_t); -long int mpz_get_si (const mpz_t); -unsigned long int mpz_get_ui (const mpz_t); -double mpz_get_d (const mpz_t); -size_t mpz_size (const mpz_t); -mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); - -void mpz_realloc2 (mpz_t, mp_bitcnt_t); -mp_srcptr mpz_limbs_read (mpz_srcptr); -mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); -mp_ptr mpz_limbs_write (mpz_t, mp_size_t); -void mpz_limbs_finish (mpz_t, mp_size_t); -mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); - -#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} - -void mpz_set_si (mpz_t, signed long int); -void mpz_set_ui (mpz_t, unsigned long int); -void mpz_set (mpz_t, const mpz_t); -void mpz_set_d (mpz_t, double); - -void mpz_init_set_si (mpz_t, signed long int); -void mpz_init_set_ui (mpz_t, unsigned long int); -void mpz_init_set (mpz_t, const mpz_t); -void mpz_init_set_d (mpz_t, double); - -size_t mpz_sizeinbase (const mpz_t, int); -char *mpz_get_str (char *, int, const mpz_t); -int mpz_set_str (mpz_t, const char *, int); -int mpz_init_set_str (mpz_t, const char *, int); - -/* This long list taken from gmp.h. */ -/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, - defines EOF but not FILE. */ -#if defined (FILE) \ - || defined (H_STDIO) \ - || defined (_H_STDIO) /* AIX */ \ - || defined (_STDIO_H) /* glibc, Sun, SCO */ \ - || defined (_STDIO_H_) /* BSD, OSF */ \ - || defined (__STDIO_H) /* Borland */ \ - || defined (__STDIO_H__) /* IRIX */ \ - || defined (_STDIO_INCLUDED) /* HPUX */ \ - || defined (__dj_include_stdio_h_) /* DJGPP */ \ - || defined (_FILE_DEFINED) /* Microsoft */ \ - || defined (__STDIO__) /* Apple MPW MrC */ \ - || defined (_MSL_STDIO_H) /* Metrowerks */ \ - || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ - || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ - || defined (__STDIO_LOADED) /* VMS */ \ - || defined (_STDIO) /* HPE NonStop */ \ - || defined (__DEFINED_FILE) /* musl */ -size_t mpz_out_str (FILE *, int, const mpz_t); -#endif - -void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); -void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); - -#if defined (__cplusplus) -} -#endif -#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h index bbfe72c13b..54e90326be 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign_namespace.h @@ -18,6 +18,12 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -94,6 +100,16 @@ #define lift_basis SQISIGN_NAMESPACE(lift_basis) #define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) +// Namespacing symbols exported from basis.c, ec.c: +#undef xDBL_E0 + +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) + +// Namespacing symbols exported from basis.c, ec.c, isog_chains.c: +#undef xDBL_A24 + +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) + // Namespacing symbols exported from biextension.c: #undef clear_cofac #undef ec_dlog_2_tate @@ -109,6 +125,11 @@ #define reduced_tate SQISIGN_NAMESPACE(reduced_tate) #define weil SQISIGN_NAMESPACE(weil) +// Namespacing symbols exported from biextension.c, ec_jac.c, hd.c: +#undef ADD + +#define ADD SQISIGN_NAMESPACE(ADD) + // Namespacing symbols exported from common.c: #undef hash_to_challenge #undef public_key_finalize @@ -148,6 +169,28 @@ #define find_uv SQISIGN_NAMESPACE(find_uv) #define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) +// Namespacing symbols exported from dim2id2iso.c, encode_signature.c, id2iso.c, keygen.c, quaternion_data.c, sign.c: +#undef EXTREMAL_ORDERS +#undef QUATALG_PINFTY + +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) + +// Namespacing symbols exported from dim2id2iso.c, endomorphism_action.c, id2iso.c: +#undef CURVES_WITH_ENDOMORPHISMS + +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) + +// Namespacing symbols exported from dim2id2iso.c, id2iso.c, sign.c, torsion_constants.c: +#undef TORSION_PLUS_2POWER + +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) + +// Namespacing symbols exported from dim2id2iso.c, quaternion_data.c: +#undef CONNECTING_IDEALS + +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) + // Namespacing symbols exported from dim4.c: #undef ibz_inv_dim4_make_coeff_mpm #undef ibz_inv_dim4_make_coeff_pmp @@ -207,6 +250,13 @@ #define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) #define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) +// Namespacing symbols exported from e0_basis.c: +#undef BASIS_E0_PX +#undef BASIS_E0_QX + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) + // Namespacing symbols exported from ec.c: #undef cswap_points #undef ec_biscalar_mul @@ -235,8 +285,6 @@ #undef xDBL #undef xDBLADD #undef xDBLMUL -#undef xDBL_A24 -#undef xDBL_E0 #undef xMUL #define cswap_points SQISIGN_NAMESPACE(cswap_points) @@ -266,14 +314,9 @@ #define xDBL SQISIGN_NAMESPACE(xDBL) #define xDBLADD SQISIGN_NAMESPACE(xDBLADD) #define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) -#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) -#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) #define xMUL SQISIGN_NAMESPACE(xMUL) // Namespacing symbols exported from ec_jac.c: -#undef ADD -#undef DBL -#undef DBLW #undef copy_jac_point #undef jac_from_ws #undef jac_init @@ -284,9 +327,6 @@ #undef jac_to_xz_add_components #undef select_jac_point -#define ADD SQISIGN_NAMESPACE(ADD) -#define DBL SQISIGN_NAMESPACE(DBL) -#define DBLW SQISIGN_NAMESPACE(DBLW) #define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) #define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) #define jac_init SQISIGN_NAMESPACE(jac_init) @@ -297,6 +337,21 @@ #define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) #define select_jac_point SQISIGN_NAMESPACE(select_jac_point) +// Namespacing symbols exported from ec_jac.c, hd.c: +#undef DBLW + +#define DBLW SQISIGN_NAMESPACE(DBLW) + +// Namespacing symbols exported from ec_jac.c, hd.c, theta_isogenies.c: +#undef DBL + +#define DBL SQISIGN_NAMESPACE(DBL) + +// Namespacing symbols exported from ec_params.c: +#undef p_cofactor_for_2f + +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) + // Namespacing symbols exported from encode_signature.c: #undef secret_key_from_bytes #undef secret_key_to_bytes @@ -455,21 +510,24 @@ #define fp_set_one SQISIGN_NAMESPACE(fp_set_one) #define fp_set_small SQISIGN_NAMESPACE(fp_set_small) #define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) -#define ONE SQISIGN_NAMESPACE(ONE) -#define ZERO SQISIGN_NAMESPACE(ZERO) // Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef ONE +#undef ZERO #undef fp_add #undef fp_mul #undef fp_sqr #undef fp_sub +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) #define fp_add SQISIGN_NAMESPACE(fp_add) #define fp_mul SQISIGN_NAMESPACE(fp_mul) #define fp_sqr SQISIGN_NAMESPACE(fp_sqr) #define fp_sub SQISIGN_NAMESPACE(fp_sub) // Namespacing symbols exported from gf27500.c: +#undef gf27500_MINUS_ONE #undef gf27500_decode #undef gf27500_decode_reduce #undef gf27500_div @@ -479,6 +537,7 @@ #undef gf27500_legendre #undef gf27500_sqrt +#define gf27500_MINUS_ONE SQISIGN_NAMESPACE(gf27500_MINUS_ONE) #define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) #define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) #define gf27500_div SQISIGN_NAMESPACE(gf27500_div) @@ -500,6 +559,7 @@ #define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) // Namespacing symbols exported from gf5248.c: +#undef gf5248_MINUS_ONE #undef gf5248_decode #undef gf5248_decode_reduce #undef gf5248_div @@ -509,6 +569,7 @@ #undef gf5248_legendre #undef gf5248_sqrt +#define gf5248_MINUS_ONE SQISIGN_NAMESPACE(gf5248_MINUS_ONE) #define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) #define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) #define gf5248_div SQISIGN_NAMESPACE(gf5248_div) @@ -519,6 +580,7 @@ #define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) // Namespacing symbols exported from gf65376.c: +#undef gf65376_MINUS_ONE #undef gf65376_decode #undef gf65376_decode_reduce #undef gf65376_div @@ -528,6 +590,7 @@ #undef gf65376_legendre #undef gf65376_sqrt +#define gf65376_MINUS_ONE SQISIGN_NAMESPACE(gf65376_MINUS_ONE) #define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) #define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) #define gf65376_div SQISIGN_NAMESPACE(gf65376_div) @@ -554,6 +617,22 @@ #define double_couple_point SQISIGN_NAMESPACE(double_couple_point) #define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) +// Namespacing symbols exported from hd_splitting_transforms.c: +#undef CHI_EVAL + +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) + +// Namespacing symbols exported from hd_splitting_transforms.c, theta_isogenies.c: +#undef EVEN_INDEX +#undef FP2_CONSTANTS +#undef NORMALIZATION_TRANSFORMS +#undef SPLITTING_TRANSFORMS + +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) + // Namespacing symbols exported from hnf.c: #undef ibz_mat_4x4_is_hnf #undef ibz_mat_4xn_hnf_mod_core @@ -761,6 +840,11 @@ #define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) #define secret_key_init SQISIGN_NAMESPACE(secret_key_init) +// Namespacing symbols exported from keygen.c, torsion_constants.c: +#undef SEC_DEGREE + +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) + // Namespacing symbols exported from l2.c: #undef quat_lattice_lll #undef quat_lll_core @@ -910,6 +994,16 @@ #define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) #define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) +// Namespacing symbols exported from quaternion_data.c: +#undef CONJUGATING_ELEMENTS + +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) + +// Namespacing symbols exported from quaternion_data.c, sign.c: +#undef QUAT_prime_cofactor + +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) + // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation @@ -971,6 +1065,11 @@ #define protocols_sign SQISIGN_NAMESPACE(protocols_sign) +// Namespacing symbols exported from sign.c, torsion_constants.c: +#undef COM_DEGREE + +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + // Namespacing symbols exported from sqisign.c: #undef sqisign_keypair #undef sqisign_open @@ -1006,6 +1105,11 @@ #define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) #define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) +// Namespacing symbols exported from torsion_constants.c: +#undef TWO_TO_SECURITY_BITS + +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) + // Namespacing symbols exported from verify.c: #undef protocols_verify @@ -1029,45 +1133,7 @@ #define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) #define xisog_4 SQISIGN_NAMESPACE(xisog_4) -// Namespacing symbols from precomp: -#undef BASIS_E0_PX -#undef BASIS_E0_QX -#undef p_cofactor_for_2f -#undef CURVES_WITH_ENDOMORPHISMS -#undef EVEN_INDEX -#undef CHI_EVAL -#undef FP2_CONSTANTS -#undef SPLITTING_TRANSFORMS -#undef NORMALIZATION_TRANSFORMS -#undef QUAT_prime_cofactor -#undef QUATALG_PINFTY -#undef EXTREMAL_ORDERS -#undef CONNECTING_IDEALS -#undef CONJUGATING_ELEMENTS -#undef TWO_TO_SECURITY_BITS -#undef TORSION_PLUS_2POWER -#undef SEC_DEGREE -#undef COM_DEGREE - -#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) -#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) -#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) -#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) -#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) -#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) -#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) -#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) -#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) -#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) -#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) -#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) -#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) -#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) -#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) -#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) -#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) -#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) - #endif +// This file is generated by scripts/Namespace.scala, do not edit it manually! diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c deleted file mode 100644 index 396d505aec..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.c +++ /dev/null @@ -1,73 +0,0 @@ -#include -#include -#if defined(MINI_GMP) -#include "mini-gmp.h" -#else -// This configuration is used only for testing -#include -#endif -#include - -// Exported for testing -int -mini_mpz_legendre(const mpz_t a, const mpz_t p) -{ - int res = 0; - mpz_t e; - mpz_init_set(e, p); - mpz_sub_ui(e, e, 1); - mpz_fdiv_q_2exp(e, e, 1); - mpz_powm(e, a, e, p); - - if (mpz_cmp_ui(e, 1) <= 0) { - res = mpz_get_si(e); - } else { - res = -1; - } - mpz_clear(e); - return res; -} - -#if defined(MINI_GMP) -int -mpz_legendre(const mpz_t a, const mpz_t p) -{ - return mini_mpz_legendre(a, p); -} -#endif - -// Exported for testing -double -mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - double ret; - int tmp_exp; - mpz_t tmp; - - // Handle the case where op is 0 - if (mpz_cmp_ui(op, 0) == 0) { - *exp = 0; - return 0.0; - } - - *exp = mpz_sizeinbase(op, 2); - - mpz_init_set(tmp, op); - - if (*exp > DBL_MAX_EXP) { - mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); - } - - ret = frexp(mpz_get_d(tmp), &tmp_exp); - mpz_clear(tmp); - - return ret; -} - -#if defined(MINI_GMP) -double -mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - return mini_mpz_get_d_2exp(exp, op); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.h deleted file mode 100644 index 0113cfdfe6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp-extra.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef MINI_GMP_EXTRA_H -#define MINI_GMP_EXTRA_H - -#if defined MINI_GMP -#include "mini-gmp.h" - -typedef long mp_exp_t; - -int mpz_legendre(const mpz_t a, const mpz_t p); -double mpz_get_d_2exp(signed long int *exp, const mpz_t op); -#else -// This configuration is used only for testing -#include -#endif - -int mini_mpz_legendre(const mpz_t a, const mpz_t p); -double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c deleted file mode 100644 index 3830ab2031..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.c +++ /dev/null @@ -1,4671 +0,0 @@ -/* Note: The code from mini-gmp is modifed from the original by - commenting out the definition of GMP_LIMB_BITS */ - -/* - mini-gmp, a minimalistic implementation of a GNU GMP subset. - - Contributed to the GNU project by Niels Möller - Additional functionalities and improvements by Marco Bodrato. - -Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* NOTE: All functions in this file which are not declared in - mini-gmp.h are internal, and are not intended to be compatible - with GMP or with future versions of mini-gmp. */ - -/* Much of the material copied from GMP files, including: gmp-impl.h, - longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, - mpn/generic/lshift.c, mpn/generic/mul_1.c, - mpn/generic/mul_basecase.c, mpn/generic/rshift.c, - mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, - mpn/generic/submul_1.c. */ - -#include -#include -#include -#include -#include -#include - -#include "mini-gmp.h" - -#if !defined(MINI_GMP_DONT_USE_FLOAT_H) -#include -#endif - - -/* Macros */ -/* Removed from here as it is passed as a compiler command-line definition */ -/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ - -#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) -#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) - -#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) -#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) - -#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) -#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) - -#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) -#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) - -#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) - -#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) - -#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 -#define GMP_DBL_MANT_BITS DBL_MANT_DIG -#else -#define GMP_DBL_MANT_BITS (53) -#endif - -/* Return non-zero if xp,xsize and yp,ysize overlap. - If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no - overlap. If both these are false, there's an overlap. */ -#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ - ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) - -#define gmp_assert_nocarry(x) do { \ - mp_limb_t __cy = (x); \ - assert (__cy == 0); \ - (void) (__cy); \ - } while (0) - -#define gmp_clz(count, x) do { \ - mp_limb_t __clz_x = (x); \ - unsigned __clz_c = 0; \ - int LOCAL_SHIFT_BITS = 8; \ - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ - for (; \ - (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ - __clz_c += 8) \ - { __clz_x <<= LOCAL_SHIFT_BITS; } \ - for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ - __clz_x <<= 1; \ - (count) = __clz_c; \ - } while (0) - -#define gmp_ctz(count, x) do { \ - mp_limb_t __ctz_x = (x); \ - unsigned __ctz_c = 0; \ - gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ - (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ - } while (0) - -#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) + (bl); \ - (sh) = (ah) + (bh) + (__x < (al)); \ - (sl) = __x; \ - } while (0) - -#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) - (bl); \ - (sh) = (ah) - (bh) - ((al) < (bl)); \ - (sl) = __x; \ - } while (0) - -#define gmp_umul_ppmm(w1, w0, u, v) \ - do { \ - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ - if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned int __ww = (unsigned int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned long int __ww = (unsigned long int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else { \ - mp_limb_t __x0, __x1, __x2, __x3; \ - unsigned __ul, __vl, __uh, __vh; \ - mp_limb_t __u = (u), __v = (v); \ - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ - \ - __ul = __u & GMP_LLIMB_MASK; \ - __uh = __u >> (GMP_LIMB_BITS / 2); \ - __vl = __v & GMP_LLIMB_MASK; \ - __vh = __v >> (GMP_LIMB_BITS / 2); \ - \ - __x0 = (mp_limb_t) __ul * __vl; \ - __x1 = (mp_limb_t) __ul * __vh; \ - __x2 = (mp_limb_t) __uh * __vl; \ - __x3 = (mp_limb_t) __uh * __vh; \ - \ - __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ - __x1 += __x2; /* but this indeed can */ \ - if (__x1 < __x2) /* did we get it? */ \ - __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ - \ - (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ - (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ - } \ - } while (0) - -/* If mp_limb_t is of size smaller than int, plain u*v implies - automatic promotion to *signed* int, and then multiply may overflow - and cause undefined behavior. Explicitly cast to unsigned int for - that case. */ -#define gmp_umullo_limb(u, v) \ - ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) - -#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ - do { \ - mp_limb_t _qh, _ql, _r, _mask; \ - gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ - gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ - _r = (nl) - gmp_umullo_limb (_qh, (d)); \ - _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ - _qh += _mask; \ - _r += _mask & (d); \ - if (_r >= (d)) \ - { \ - _r -= (d); \ - _qh++; \ - } \ - \ - (r) = _r; \ - (q) = _qh; \ - } while (0) - -#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ - do { \ - mp_limb_t _q0, _t1, _t0, _mask; \ - gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ - gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ - \ - /* Compute the two most significant limbs of n - q'd */ \ - (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ - gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ - (q)++; \ - \ - /* Conditionally adjust q and the remainders */ \ - _mask = - (mp_limb_t) ((r1) >= _q0); \ - (q) += _mask; \ - gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ - if ((r1) >= (d1)) \ - { \ - if ((r1) > (d1) || (r0) >= (d0)) \ - { \ - (q)++; \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ - } \ - } \ - } while (0) - -/* Swap macros. */ -#define MP_LIMB_T_SWAP(x, y) \ - do { \ - mp_limb_t __mp_limb_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_limb_t_swap__tmp; \ - } while (0) -#define MP_SIZE_T_SWAP(x, y) \ - do { \ - mp_size_t __mp_size_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_size_t_swap__tmp; \ - } while (0) -#define MP_BITCNT_T_SWAP(x,y) \ - do { \ - mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_bitcnt_t_swap__tmp; \ - } while (0) -#define MP_PTR_SWAP(x, y) \ - do { \ - mp_ptr __mp_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_ptr_swap__tmp; \ - } while (0) -#define MP_SRCPTR_SWAP(x, y) \ - do { \ - mp_srcptr __mp_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_srcptr_swap__tmp; \ - } while (0) - -#define MPN_PTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_PTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) -#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_SRCPTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) - -#define MPZ_PTR_SWAP(x, y) \ - do { \ - mpz_ptr __mpz_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_ptr_swap__tmp; \ - } while (0) -#define MPZ_SRCPTR_SWAP(x, y) \ - do { \ - mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_srcptr_swap__tmp; \ - } while (0) - -const int mp_bits_per_limb = GMP_LIMB_BITS; - - -/* Memory allocation and other helper functions. */ -static void -gmp_die (const char *msg) -{ - fprintf (stderr, "%s\n", msg); - abort(); -} - -static void * -gmp_default_alloc (size_t size) -{ - void *p; - - assert (size > 0); - - p = malloc (size); - if (!p) - gmp_die("gmp_default_alloc: Virtual memory exhausted."); - - return p; -} - -static void * -gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) -{ - void * p; - - p = realloc (old, new_size); - - if (!p) - gmp_die("gmp_default_realloc: Virtual memory exhausted."); - - return p; -} - -static void -gmp_default_free (void *p, size_t unused_size) -{ - free (p); -} - -static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; -static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; -static void (*gmp_free_func) (void *, size_t) = gmp_default_free; - -void -mp_get_memory_functions (void *(**alloc_func) (size_t), - void *(**realloc_func) (void *, size_t, size_t), - void (**free_func) (void *, size_t)) -{ - if (alloc_func) - *alloc_func = gmp_allocate_func; - - if (realloc_func) - *realloc_func = gmp_reallocate_func; - - if (free_func) - *free_func = gmp_free_func; -} - -void -mp_set_memory_functions (void *(*alloc_func) (size_t), - void *(*realloc_func) (void *, size_t, size_t), - void (*free_func) (void *, size_t)) -{ - if (!alloc_func) - alloc_func = gmp_default_alloc; - if (!realloc_func) - realloc_func = gmp_default_realloc; - if (!free_func) - free_func = gmp_default_free; - - gmp_allocate_func = alloc_func; - gmp_reallocate_func = realloc_func; - gmp_free_func = free_func; -} - -#define gmp_alloc(size) ((*gmp_allocate_func)((size))) -#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) -#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) - -static mp_ptr -gmp_alloc_limbs (mp_size_t size) -{ - return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); -} - -static mp_ptr -gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) -{ - assert (size > 0); - return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); -} - -static void -gmp_free_limbs (mp_ptr old, mp_size_t size) -{ - gmp_free (old, size * sizeof (mp_limb_t)); -} - - -/* MPN interface */ - -void -mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - mp_size_t i; - for (i = 0; i < n; i++) - d[i] = s[i]; -} - -void -mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - while (--n >= 0) - d[n] = s[n]; -} - -int -mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - while (--n >= 0) - { - if (ap[n] != bp[n]) - return ap[n] > bp[n] ? 1 : -1; - } - return 0; -} - -static int -mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - if (an != bn) - return an < bn ? -1 : 1; - else - return mpn_cmp (ap, bp, an); -} - -static mp_size_t -mpn_normalized_size (mp_srcptr xp, mp_size_t n) -{ - while (n > 0 && xp[n-1] == 0) - --n; - return n; -} - -int -mpn_zero_p(mp_srcptr rp, mp_size_t n) -{ - return mpn_normalized_size (rp, n) == 0; -} - -void -mpn_zero (mp_ptr rp, mp_size_t n) -{ - while (--n >= 0) - rp[n] = 0; -} - -mp_limb_t -mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - i = 0; - do - { - mp_limb_t r = ap[i] + b; - /* Carry out */ - b = (r < b); - rp[i] = r; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b, r; - a = ap[i]; b = bp[i]; - r = a + cy; - cy = (r < cy); - r += b; - cy += (r < b); - rp[i] = r; - } - return cy; -} - -mp_limb_t -mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_add_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - - i = 0; - do - { - mp_limb_t a = ap[i]; - /* Carry out */ - mp_limb_t cy = a < b; - rp[i] = a - b; - b = cy; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b; - a = ap[i]; b = bp[i]; - b += cy; - cy = (b < cy); - cy += (a < b); - rp[i] = a - b; - } - return cy; -} - -mp_limb_t -mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_sub_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl + lpl; - cl += lpl < rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl - lpl; - cl += lpl > rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn >= 1); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); - - /* We first multiply by the low order limb. This result can be - stored, not added, to rp. We also avoid a loop for zeroing this - way. */ - - rp[un] = mpn_mul_1 (rp, up, un, vp[0]); - - /* Now accumulate the product of up[] and the next higher limb from - vp[]. */ - - while (--vn >= 1) - { - rp += 1, vp += 1; - rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); - } - return rp[un]; -} - -void -mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mpn_mul (rp, ap, n, bp, n); -} - -void -mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) -{ - mpn_mul (rp, ap, n, ap, n); -} - -mp_limb_t -mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - up += n; - rp += n; - - tnc = GMP_LIMB_BITS - cnt; - low_limb = *--up; - retval = low_limb >> tnc; - high_limb = (low_limb << cnt); - - while (--n != 0) - { - low_limb = *--up; - *--rp = high_limb | (low_limb >> tnc); - high_limb = (low_limb << cnt); - } - *--rp = high_limb; - - return retval; -} - -mp_limb_t -mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - tnc = GMP_LIMB_BITS - cnt; - high_limb = *up++; - retval = (high_limb << tnc); - low_limb = high_limb >> cnt; - - while (--n != 0) - { - high_limb = *up++; - *rp++ = low_limb | (high_limb << tnc); - low_limb = high_limb >> cnt; - } - *rp = low_limb; - - return retval; -} - -static mp_bitcnt_t -mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, - mp_limb_t ux) -{ - unsigned cnt; - - assert (ux == 0 || ux == GMP_LIMB_MAX); - assert (0 <= i && i <= un ); - - while (limb == 0) - { - i++; - if (i == un) - return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); - limb = ux ^ up[i]; - } - gmp_ctz (cnt, limb); - return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; -} - -mp_bitcnt_t -mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, 0); -} - -mp_bitcnt_t -mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, GMP_LIMB_MAX); -} - -void -mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (--n >= 0) - *rp++ = ~ *up++; -} - -mp_limb_t -mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (*up == 0) - { - *rp = 0; - if (!--n) - return 0; - ++up; ++rp; - } - *rp = - *up; - mpn_com (++rp, ++up, --n); - return 1; -} - - -/* MPN division interface. */ - -/* The 3/2 inverse is defined as - - m = floor( (B^3-1) / (B u1 + u0)) - B -*/ -mp_limb_t -mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) -{ - mp_limb_t r, m; - - { - mp_limb_t p, ql; - unsigned ul, uh, qh; - - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); - /* For notation, let b denote the half-limb base, so that B = b^2. - Split u1 = b uh + ul. */ - ul = u1 & GMP_LLIMB_MASK; - uh = u1 >> (GMP_LIMB_BITS / 2); - - /* Approximation of the high half of quotient. Differs from the 2/1 - inverse of the half limb uh, since we have already subtracted - u0. */ - qh = (u1 ^ GMP_LIMB_MAX) / uh; - - /* Adjust to get a half-limb 3/2 inverse, i.e., we want - - qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u - = floor( (b (~u) + b-1) / u), - - and the remainder - - r = b (~u) + b-1 - qh (b uh + ul) - = b (~u - qh uh) + b-1 - qh ul - - Subtraction of qh ul may underflow, which implies adjustments. - But by normalization, 2 u >= B > qh ul, so we need to adjust by - at most 2. - */ - - r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; - - p = (mp_limb_t) qh * ul; - /* Adjustment steps taken from udiv_qrnnd_c */ - if (r < p) - { - qh--; - r += u1; - if (r >= u1) /* i.e. we didn't get carry when adding to r */ - if (r < p) - { - qh--; - r += u1; - } - } - r -= p; - - /* Low half of the quotient is - - ql = floor ( (b r + b-1) / u1). - - This is a 3/2 division (on half-limbs), for which qh is a - suitable inverse. */ - - p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; - /* Unlike full-limb 3/2, we can add 1 without overflow. For this to - work, it is essential that ql is a full mp_limb_t. */ - ql = (p >> (GMP_LIMB_BITS / 2)) + 1; - - /* By the 3/2 trick, we don't need the high half limb. */ - r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; - - if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) - { - ql--; - r += u1; - } - m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; - if (r >= u1) - { - m++; - r -= u1; - } - } - - /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a - 3/2 inverse. */ - if (u0 > 0) - { - mp_limb_t th, tl; - r = ~r; - r += u0; - if (r < u0) - { - m--; - if (r >= u1) - { - m--; - r -= u1; - } - r -= u1; - } - gmp_umul_ppmm (th, tl, u0, m); - r += th; - if (r < th) - { - m--; - m -= ((r > u1) | ((r == u1) & (tl > u0))); - } - } - - return m; -} - -struct gmp_div_inverse -{ - /* Normalization shift count. */ - unsigned shift; - /* Normalized divisor (d0 unused for mpn_div_qr_1) */ - mp_limb_t d1, d0; - /* Inverse, for 2/1 or 3/2. */ - mp_limb_t di; -}; - -static void -mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) -{ - unsigned shift; - - assert (d > 0); - gmp_clz (shift, d); - inv->shift = shift; - inv->d1 = d << shift; - inv->di = mpn_invert_limb (inv->d1); -} - -static void -mpn_div_qr_2_invert (struct gmp_div_inverse *inv, - mp_limb_t d1, mp_limb_t d0) -{ - unsigned shift; - - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 <<= shift; - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); -} - -static void -mpn_div_qr_invert (struct gmp_div_inverse *inv, - mp_srcptr dp, mp_size_t dn) -{ - assert (dn > 0); - - if (dn == 1) - mpn_div_qr_1_invert (inv, dp[0]); - else if (dn == 2) - mpn_div_qr_2_invert (inv, dp[1], dp[0]); - else - { - unsigned shift; - mp_limb_t d1, d0; - - d1 = dp[dn-1]; - d0 = dp[dn-2]; - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); - } -} - -/* Not matching current public gmp interface, rather corresponding to - the sbpi1_div_* functions. */ -static mp_limb_t -mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - mp_limb_t d, di; - mp_limb_t r; - mp_ptr tp = NULL; - mp_size_t tn = 0; - - if (inv->shift > 0) - { - /* Shift, reusing qp area if possible. In-place shift if qp == np. */ - tp = qp; - if (!tp) - { - tn = nn; - tp = gmp_alloc_limbs (tn); - } - r = mpn_lshift (tp, np, nn, inv->shift); - np = tp; - } - else - r = 0; - - d = inv->d1; - di = inv->di; - while (--nn >= 0) - { - mp_limb_t q; - - gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); - if (qp) - qp[nn] = q; - } - if (tn) - gmp_free_limbs (tp, tn); - - return r >> inv->shift; -} - -static void -mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - unsigned shift; - mp_size_t i; - mp_limb_t d1, d0, di, r1, r0; - - assert (nn >= 2); - shift = inv->shift; - d1 = inv->d1; - d0 = inv->d0; - di = inv->di; - - if (shift > 0) - r1 = mpn_lshift (np, np, nn, shift); - else - r1 = 0; - - r0 = np[nn - 1]; - - i = nn - 2; - do - { - mp_limb_t n0, q; - n0 = np[i]; - gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - if (shift > 0) - { - assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); - r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); - r1 >>= shift; - } - - np[1] = r1; - np[0] = r0; -} - -static void -mpn_div_qr_pi1 (mp_ptr qp, - mp_ptr np, mp_size_t nn, mp_limb_t n1, - mp_srcptr dp, mp_size_t dn, - mp_limb_t dinv) -{ - mp_size_t i; - - mp_limb_t d1, d0; - mp_limb_t cy, cy1; - mp_limb_t q; - - assert (dn > 2); - assert (nn >= dn); - - d1 = dp[dn - 1]; - d0 = dp[dn - 2]; - - assert ((d1 & GMP_LIMB_HIGHBIT) != 0); - /* Iteration variable is the index of the q limb. - * - * We divide - * by - */ - - i = nn - dn; - do - { - mp_limb_t n0 = np[dn-1+i]; - - if (n1 == d1 && n0 == d0) - { - q = GMP_LIMB_MAX; - mpn_submul_1 (np+i, dp, dn, q); - n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ - } - else - { - gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); - - cy = mpn_submul_1 (np + i, dp, dn-2, q); - - cy1 = n0 < cy; - n0 = n0 - cy; - cy = n1 < cy1; - n1 = n1 - cy1; - np[dn-2+i] = n0; - - if (cy != 0) - { - n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); - q--; - } - } - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - np[dn - 1] = n1; -} - -static void -mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - mp_srcptr dp, mp_size_t dn, - const struct gmp_div_inverse *inv) -{ - assert (dn > 0); - assert (nn >= dn); - - if (dn == 1) - np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); - else if (dn == 2) - mpn_div_qr_2_preinv (qp, np, nn, inv); - else - { - mp_limb_t nh; - unsigned shift; - - assert (inv->d1 == dp[dn-1]); - assert (inv->d0 == dp[dn-2]); - assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); - - shift = inv->shift; - if (shift > 0) - nh = mpn_lshift (np, np, nn, shift); - else - nh = 0; - - mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); - - if (shift > 0) - gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); - } -} - -static void -mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) -{ - struct gmp_div_inverse inv; - mp_ptr tp = NULL; - - assert (dn > 0); - assert (nn >= dn); - - mpn_div_qr_invert (&inv, dp, dn); - if (dn > 2 && inv.shift > 0) - { - tp = gmp_alloc_limbs (dn); - gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); - dp = tp; - } - mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); - if (tp) - gmp_free_limbs (tp, dn); -} - - -/* MPN base conversion. */ -static unsigned -mpn_base_power_of_two_p (unsigned b) -{ - switch (b) - { - case 2: return 1; - case 4: return 2; - case 8: return 3; - case 16: return 4; - case 32: return 5; - case 64: return 6; - case 128: return 7; - case 256: return 8; - default: return 0; - } -} - -struct mpn_base_info -{ - /* bb is the largest power of the base which fits in one limb, and - exp is the corresponding exponent. */ - unsigned exp; - mp_limb_t bb; -}; - -static void -mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) -{ - mp_limb_t m; - mp_limb_t p; - unsigned exp; - - m = GMP_LIMB_MAX / b; - for (exp = 1, p = b; p <= m; exp++) - p *= b; - - info->exp = exp; - info->bb = p; -} - -static mp_bitcnt_t -mpn_limb_size_in_base_2 (mp_limb_t u) -{ - unsigned shift; - - assert (u > 0); - gmp_clz (shift, u); - return GMP_LIMB_BITS - shift; -} - -static size_t -mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) -{ - unsigned char mask; - size_t sn, j; - mp_size_t i; - unsigned shift; - - sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) - + bits - 1) / bits; - - mask = (1U << bits) - 1; - - for (i = 0, j = sn, shift = 0; j-- > 0;) - { - unsigned char digit = up[i] >> shift; - - shift += bits; - - if (shift >= GMP_LIMB_BITS && ++i < un) - { - shift -= GMP_LIMB_BITS; - digit |= up[i] << (bits - shift); - } - sp[j] = digit & mask; - } - return sn; -} - -/* We generate digits from the least significant end, and reverse at - the end. */ -static size_t -mpn_limb_get_str (unsigned char *sp, mp_limb_t w, - const struct gmp_div_inverse *binv) -{ - mp_size_t i; - for (i = 0; w > 0; i++) - { - mp_limb_t h, l, r; - - h = w >> (GMP_LIMB_BITS - binv->shift); - l = w << binv->shift; - - gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); - assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); - r >>= binv->shift; - - sp[i] = r; - } - return i; -} - -static size_t -mpn_get_str_other (unsigned char *sp, - int base, const struct mpn_base_info *info, - mp_ptr up, mp_size_t un) -{ - struct gmp_div_inverse binv; - size_t sn; - size_t i; - - mpn_div_qr_1_invert (&binv, base); - - sn = 0; - - if (un > 1) - { - struct gmp_div_inverse bbinv; - mpn_div_qr_1_invert (&bbinv, info->bb); - - do - { - mp_limb_t w; - size_t done; - w = mpn_div_qr_1_preinv (up, up, un, &bbinv); - un -= (up[un-1] == 0); - done = mpn_limb_get_str (sp + sn, w, &binv); - - for (sn += done; done < info->exp; done++) - sp[sn++] = 0; - } - while (un > 1); - } - sn += mpn_limb_get_str (sp + sn, up[0], &binv); - - /* Reverse order */ - for (i = 0; 2*i + 1 < sn; i++) - { - unsigned char t = sp[i]; - sp[i] = sp[sn - i - 1]; - sp[sn - i - 1] = t; - } - - return sn; -} - -size_t -mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) -{ - unsigned bits; - - assert (un > 0); - assert (up[un-1] > 0); - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_get_str_bits (sp, bits, up, un); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_get_str_other (sp, base, &info, up, un); - } -} - -static mp_size_t -mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, - unsigned bits) -{ - mp_size_t rn; - mp_limb_t limb; - unsigned shift; - - for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) - { - limb |= (mp_limb_t) sp[sn] << shift; - shift += bits; - if (shift >= GMP_LIMB_BITS) - { - shift -= GMP_LIMB_BITS; - rp[rn++] = limb; - /* Next line is correct also if shift == 0, - bits == 8, and mp_limb_t == unsigned char. */ - limb = (unsigned int) sp[sn] >> (bits - shift); - } - } - if (limb != 0) - rp[rn++] = limb; - else - rn = mpn_normalized_size (rp, rn); - return rn; -} - -/* Result is usually normalized, except for all-zero input, in which - case a single zero limb is written at *RP, and 1 is returned. */ -static mp_size_t -mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, - mp_limb_t b, const struct mpn_base_info *info) -{ - mp_size_t rn; - mp_limb_t w; - unsigned k; - size_t j; - - assert (sn > 0); - - k = 1 + (sn - 1) % info->exp; - - j = 0; - w = sp[j++]; - while (--k != 0) - w = w * b + sp[j++]; - - rp[0] = w; - - for (rn = 1; j < sn;) - { - mp_limb_t cy; - - w = sp[j++]; - for (k = 1; k < info->exp; k++) - w = w * b + sp[j++]; - - cy = mpn_mul_1 (rp, rp, rn, info->bb); - cy += mpn_add_1 (rp, rp, rn, w); - if (cy > 0) - rp[rn++] = cy; - } - assert (j == sn); - - return rn; -} - -mp_size_t -mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) -{ - unsigned bits; - - if (sn == 0) - return 0; - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_set_str_bits (rp, sp, sn, bits); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_set_str_other (rp, sp, sn, base, &info); - } -} - - -/* MPZ interface */ -void -mpz_init (mpz_t r) -{ - static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; - - r->_mp_alloc = 0; - r->_mp_size = 0; - r->_mp_d = (mp_ptr) &dummy_limb; -} - -/* The utility of this function is a bit limited, since many functions - assigns the result variable using mpz_swap. */ -void -mpz_init2 (mpz_t r, mp_bitcnt_t bits) -{ - mp_size_t rn; - - bits -= (bits != 0); /* Round down, except if 0 */ - rn = 1 + bits / GMP_LIMB_BITS; - - r->_mp_alloc = rn; - r->_mp_size = 0; - r->_mp_d = gmp_alloc_limbs (rn); -} - -void -mpz_clear (mpz_t r) -{ - if (r->_mp_alloc) - gmp_free_limbs (r->_mp_d, r->_mp_alloc); -} - -static mp_ptr -mpz_realloc (mpz_t r, mp_size_t size) -{ - size = GMP_MAX (size, 1); - - if (r->_mp_alloc) - r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); - else - r->_mp_d = gmp_alloc_limbs (size); - r->_mp_alloc = size; - - if (GMP_ABS (r->_mp_size) > size) - r->_mp_size = 0; - - return r->_mp_d; -} - -/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ -#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ - ? mpz_realloc(z,n) \ - : (z)->_mp_d) - -/* MPZ assignment and basic conversions. */ -void -mpz_set_si (mpz_t r, signed long int x) -{ - if (x >= 0) - mpz_set_ui (r, x); - else /* (x < 0) */ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); - mpz_neg (r, r); - } - else - { - r->_mp_size = -1; - MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); - } -} - -void -mpz_set_ui (mpz_t r, unsigned long int x) -{ - if (x > 0) - { - r->_mp_size = 1; - MPZ_REALLOC (r, 1)[0] = x; - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - while (x >>= LOCAL_GMP_LIMB_BITS) - { - ++ r->_mp_size; - MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; - } - } - } - else - r->_mp_size = 0; -} - -void -mpz_set (mpz_t r, const mpz_t x) -{ - /* Allow the NOP r == x */ - if (r != x) - { - mp_size_t n; - mp_ptr rp; - - n = GMP_ABS (x->_mp_size); - rp = MPZ_REALLOC (r, n); - - mpn_copyi (rp, x->_mp_d, n); - r->_mp_size = x->_mp_size; - } -} - -void -mpz_init_set_si (mpz_t r, signed long int x) -{ - mpz_init (r); - mpz_set_si (r, x); -} - -void -mpz_init_set_ui (mpz_t r, unsigned long int x) -{ - mpz_init (r); - mpz_set_ui (r, x); -} - -void -mpz_init_set (mpz_t r, const mpz_t x) -{ - mpz_init (r); - mpz_set (r, x); -} - -int -mpz_fits_slong_p (const mpz_t u) -{ - return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; -} - -static int -mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) -{ - int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; - mp_limb_t ulongrem = 0; - - if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) - ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; - - return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); -} - -int -mpz_fits_ulong_p (const mpz_t u) -{ - mp_size_t us = u->_mp_size; - - return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); -} - -int -mpz_fits_sint_p (const mpz_t u) -{ - return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; -} - -int -mpz_fits_uint_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; -} - -int -mpz_fits_sshort_p (const mpz_t u) -{ - return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; -} - -int -mpz_fits_ushort_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; -} - -long int -mpz_get_si (const mpz_t u) -{ - unsigned long r = mpz_get_ui (u); - unsigned long c = -LONG_MAX - LONG_MIN; - - if (u->_mp_size < 0) - /* This expression is necessary to properly handle -LONG_MIN */ - return -(long) c - (long) ((r - c) & LONG_MAX); - else - return (long) (r & LONG_MAX); -} - -unsigned long int -mpz_get_ui (const mpz_t u) -{ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - unsigned long r = 0; - mp_size_t n = GMP_ABS (u->_mp_size); - n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); - while (--n >= 0) - r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; - return r; - } - - return u->_mp_size == 0 ? 0 : u->_mp_d[0]; -} - -size_t -mpz_size (const mpz_t u) -{ - return GMP_ABS (u->_mp_size); -} - -mp_limb_t -mpz_getlimbn (const mpz_t u, mp_size_t n) -{ - if (n >= 0 && n < GMP_ABS (u->_mp_size)) - return u->_mp_d[n]; - else - return 0; -} - -void -mpz_realloc2 (mpz_t x, mp_bitcnt_t n) -{ - mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); -} - -mp_srcptr -mpz_limbs_read (mpz_srcptr x) -{ - return x->_mp_d; -} - -mp_ptr -mpz_limbs_modify (mpz_t x, mp_size_t n) -{ - assert (n > 0); - return MPZ_REALLOC (x, n); -} - -mp_ptr -mpz_limbs_write (mpz_t x, mp_size_t n) -{ - return mpz_limbs_modify (x, n); -} - -void -mpz_limbs_finish (mpz_t x, mp_size_t xs) -{ - mp_size_t xn; - xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); - x->_mp_size = xs < 0 ? -xn : xn; -} - -static mpz_srcptr -mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - x->_mp_alloc = 0; - x->_mp_d = (mp_ptr) xp; - x->_mp_size = xs; - return x; -} - -mpz_srcptr -mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - mpz_roinit_normal_n (x, xp, xs); - mpz_limbs_finish (x, xs); - return x; -} - - -/* Conversions and comparison to double. */ -void -mpz_set_d (mpz_t r, double x) -{ - int sign; - mp_ptr rp; - mp_size_t rn, i; - double B; - double Bi; - mp_limb_t f; - - /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is - zero or infinity. */ - if (x != x || x == x * 0.5) - { - r->_mp_size = 0; - return; - } - - sign = x < 0.0 ; - if (sign) - x = - x; - - if (x < 1.0) - { - r->_mp_size = 0; - return; - } - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - for (rn = 1; x >= B; rn++) - x *= Bi; - - rp = MPZ_REALLOC (r, rn); - - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - i = rn-1; - rp[i] = f; - while (--i >= 0) - { - x = B * x; - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - rp[i] = f; - } - - r->_mp_size = sign ? - rn : rn; -} - -void -mpz_init_set_d (mpz_t r, double x) -{ - mpz_init (r); - mpz_set_d (r, x); -} - -double -mpz_get_d (const mpz_t u) -{ - int m; - mp_limb_t l; - mp_size_t un; - double x; - double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - - un = GMP_ABS (u->_mp_size); - - if (un == 0) - return 0.0; - - l = u->_mp_d[--un]; - gmp_clz (m, l); - m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - - for (x = l; --un >= 0;) - { - x = B*x; - if (m > 0) { - l = u->_mp_d[un]; - m -= GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - x += l; - } - } - - if (u->_mp_size < 0) - x = -x; - - return x; -} - -int -mpz_cmpabs_d (const mpz_t x, double d) -{ - mp_size_t xn; - double B, Bi; - mp_size_t i; - - xn = x->_mp_size; - d = GMP_ABS (d); - - if (xn != 0) - { - xn = GMP_ABS (xn); - - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - - /* Scale d so it can be compared with the top limb. */ - for (i = 1; i < xn; i++) - d *= Bi; - - if (d >= B) - return -1; - - /* Compare floor(d) to top limb, subtract and cancel when equal. */ - for (i = xn; i-- > 0;) - { - mp_limb_t f, xl; - - f = (mp_limb_t) d; - xl = x->_mp_d[i]; - if (xl > f) - return 1; - else if (xl < f) - return -1; - d = B * (d - f); - } - } - return - (d > 0.0); -} - -int -mpz_cmp_d (const mpz_t x, double d) -{ - if (x->_mp_size < 0) - { - if (d >= 0.0) - return -1; - else - return -mpz_cmpabs_d (x, d); - } - else - { - if (d < 0.0) - return 1; - else - return mpz_cmpabs_d (x, d); - } -} - - -/* MPZ comparisons and the like. */ -int -mpz_sgn (const mpz_t u) -{ - return GMP_CMP (u->_mp_size, 0); -} - -int -mpz_cmp_si (const mpz_t u, long v) -{ - mp_size_t usize = u->_mp_size; - - if (v >= 0) - return mpz_cmp_ui (u, v); - else if (usize >= 0) - return 1; - else - return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); -} - -int -mpz_cmp_ui (const mpz_t u, unsigned long v) -{ - mp_size_t usize = u->_mp_size; - - if (usize < 0) - return -1; - else - return mpz_cmpabs_ui (u, v); -} - -int -mpz_cmp (const mpz_t a, const mpz_t b) -{ - mp_size_t asize = a->_mp_size; - mp_size_t bsize = b->_mp_size; - - if (asize != bsize) - return (asize < bsize) ? -1 : 1; - else if (asize >= 0) - return mpn_cmp (a->_mp_d, b->_mp_d, asize); - else - return mpn_cmp (b->_mp_d, a->_mp_d, -asize); -} - -int -mpz_cmpabs_ui (const mpz_t u, unsigned long v) -{ - mp_size_t un = GMP_ABS (u->_mp_size); - - if (! mpn_absfits_ulong_p (u->_mp_d, un)) - return 1; - else - { - unsigned long uu = mpz_get_ui (u); - return GMP_CMP(uu, v); - } -} - -int -mpz_cmpabs (const mpz_t u, const mpz_t v) -{ - return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), - v->_mp_d, GMP_ABS (v->_mp_size)); -} - -void -mpz_abs (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = GMP_ABS (r->_mp_size); -} - -void -mpz_neg (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = -r->_mp_size; -} - -void -mpz_swap (mpz_t u, mpz_t v) -{ - MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); - MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); -} - - -/* MPZ addition and subtraction */ - - -void -mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_t bb; - mpz_init_set_ui (bb, b); - mpz_add (r, a, bb); - mpz_clear (bb); -} - -void -mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_ui_sub (r, b, a); - mpz_neg (r, r); -} - -void -mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) -{ - mpz_neg (r, b); - mpz_add_ui (r, r, a); -} - -static mp_size_t -mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - mp_ptr rp; - mp_limb_t cy; - - if (an < bn) - { - MPZ_SRCPTR_SWAP (a, b); - MP_SIZE_T_SWAP (an, bn); - } - - rp = MPZ_REALLOC (r, an + 1); - cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); - - rp[an] = cy; - - return an + cy; -} - -static mp_size_t -mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - int cmp; - mp_ptr rp; - - cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); - if (cmp > 0) - { - rp = MPZ_REALLOC (r, an); - gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); - return mpn_normalized_size (rp, an); - } - else if (cmp < 0) - { - rp = MPZ_REALLOC (r, bn); - gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); - return -mpn_normalized_size (rp, bn); - } - else - return 0; -} - -void -mpz_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_add (r, a, b); - else - rn = mpz_abs_sub (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - -void -mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_sub (r, a, b); - else - rn = mpz_abs_add (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - - -/* MPZ multiplication */ -void -mpz_mul_si (mpz_t r, const mpz_t u, long int v) -{ - if (v < 0) - { - mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); - mpz_neg (r, r); - } - else - mpz_mul_ui (r, u, v); -} - -void -mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t vv; - mpz_init_set_ui (vv, v); - mpz_mul (r, u, vv); - mpz_clear (vv); - return; -} - -void -mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) -{ - int sign; - mp_size_t un, vn, rn; - mpz_t t; - mp_ptr tp; - - un = u->_mp_size; - vn = v->_mp_size; - - if (un == 0 || vn == 0) - { - r->_mp_size = 0; - return; - } - - sign = (un ^ vn) < 0; - - un = GMP_ABS (un); - vn = GMP_ABS (vn); - - mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); - - tp = t->_mp_d; - if (un >= vn) - mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); - else - mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); - - rn = un + vn; - rn -= tp[rn-1] == 0; - - t->_mp_size = sign ? - rn : rn; - mpz_swap (r, t); - mpz_clear (t); -} - -void -mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) -{ - mp_size_t un, rn; - mp_size_t limbs; - unsigned shift; - mp_ptr rp; - - un = GMP_ABS (u->_mp_size); - if (un == 0) - { - r->_mp_size = 0; - return; - } - - limbs = bits / GMP_LIMB_BITS; - shift = bits % GMP_LIMB_BITS; - - rn = un + limbs + (shift > 0); - rp = MPZ_REALLOC (r, rn); - if (shift > 0) - { - mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); - rp[rn-1] = cy; - rn -= (cy == 0); - } - else - mpn_copyd (rp + limbs, u->_mp_d, un); - - mpn_zero (rp, limbs); - - r->_mp_size = (u->_mp_size < 0) ? - rn : rn; -} - -void -mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_sub (r, r, t); - mpz_clear (t); -} - -void -mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_sub (r, r, t); - mpz_clear (t); -} - - -/* MPZ division */ -enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; - -/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ -static int -mpz_div_qr (mpz_t q, mpz_t r, - const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) -{ - mp_size_t ns, ds, nn, dn, qs; - ns = n->_mp_size; - ds = d->_mp_size; - - if (ds == 0) - gmp_die("mpz_div_qr: Divide by zero."); - - if (ns == 0) - { - if (q) - q->_mp_size = 0; - if (r) - r->_mp_size = 0; - return 0; - } - - nn = GMP_ABS (ns); - dn = GMP_ABS (ds); - - qs = ds ^ ns; - - if (nn < dn) - { - if (mode == GMP_DIV_CEIL && qs >= 0) - { - /* q = 1, r = n - d */ - if (r) - mpz_sub (r, n, d); - if (q) - mpz_set_ui (q, 1); - } - else if (mode == GMP_DIV_FLOOR && qs < 0) - { - /* q = -1, r = n + d */ - if (r) - mpz_add (r, n, d); - if (q) - mpz_set_si (q, -1); - } - else - { - /* q = 0, r = d */ - if (r) - mpz_set (r, n); - if (q) - q->_mp_size = 0; - } - return 1; - } - else - { - mp_ptr np, qp; - mp_size_t qn, rn; - mpz_t tq, tr; - - mpz_init_set (tr, n); - np = tr->_mp_d; - - qn = nn - dn + 1; - - if (q) - { - mpz_init2 (tq, qn * GMP_LIMB_BITS); - qp = tq->_mp_d; - } - else - qp = NULL; - - mpn_div_qr (qp, np, nn, d->_mp_d, dn); - - if (qp) - { - qn -= (qp[qn-1] == 0); - - tq->_mp_size = qs < 0 ? -qn : qn; - } - rn = mpn_normalized_size (np, dn); - tr->_mp_size = ns < 0 ? - rn : rn; - - if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) - { - if (q) - mpz_sub_ui (tq, tq, 1); - if (r) - mpz_add (tr, tr, d); - } - else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) - { - if (q) - mpz_add_ui (tq, tq, 1); - if (r) - mpz_sub (tr, tr, d); - } - - if (q) - { - mpz_swap (tq, q); - mpz_clear (tq); - } - if (r) - mpz_swap (tr, r); - - mpz_clear (tr); - - return rn != 0; - } -} - -void -mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); -} - -static void -mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t un, qn; - mp_size_t limb_cnt; - mp_ptr qp; - int adjust; - - un = u->_mp_size; - if (un == 0) - { - q->_mp_size = 0; - return; - } - limb_cnt = bit_index / GMP_LIMB_BITS; - qn = GMP_ABS (un) - limb_cnt; - bit_index %= GMP_LIMB_BITS; - - if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ - /* Note: Below, the final indexing at limb_cnt is valid because at - that point we have qn > 0. */ - adjust = (qn <= 0 - || !mpn_zero_p (u->_mp_d, limb_cnt) - || (u->_mp_d[limb_cnt] - & (((mp_limb_t) 1 << bit_index) - 1))); - else - adjust = 0; - - if (qn <= 0) - qn = 0; - else - { - qp = MPZ_REALLOC (q, qn); - - if (bit_index != 0) - { - mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); - qn -= qp[qn - 1] == 0; - } - else - { - mpn_copyi (qp, u->_mp_d + limb_cnt, qn); - } - } - - q->_mp_size = qn; - - if (adjust) - mpz_add_ui (q, q, 1); - if (un < 0) - mpz_neg (q, q); -} - -static void -mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t us, un, rn; - mp_ptr rp; - mp_limb_t mask; - - us = u->_mp_size; - if (us == 0 || bit_index == 0) - { - r->_mp_size = 0; - return; - } - rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - assert (rn > 0); - - rp = MPZ_REALLOC (r, rn); - un = GMP_ABS (us); - - mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); - - if (rn > un) - { - /* Quotient (with truncation) is zero, and remainder is - non-zero */ - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* Have to negate and sign extend. */ - mp_size_t i; - - gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); - for (i = un; i < rn - 1; i++) - rp[i] = GMP_LIMB_MAX; - - rp[rn-1] = mask; - us = -us; - } - else - { - /* Just copy */ - if (r != u) - mpn_copyi (rp, u->_mp_d, un); - - rn = un; - } - } - else - { - if (r != u) - mpn_copyi (rp, u->_mp_d, rn - 1); - - rp[rn-1] = u->_mp_d[rn-1] & mask; - - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* If r != 0, compute 2^{bit_count} - r. */ - mpn_neg (rp, rp, rn); - - rp[rn-1] &= mask; - - /* us is not used for anything else, so we can modify it - here to indicate flipped sign. */ - us = -us; - } - } - rn = mpn_normalized_size (rp, rn); - r->_mp_size = us < 0 ? -rn : rn; -} - -void -mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) -{ - gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_p (const mpz_t n, const mpz_t d) -{ - return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - -int -mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) -{ - mpz_t t; - int res; - - /* a == b (mod 0) iff a == b */ - if (mpz_sgn (m) == 0) - return (mpz_cmp (a, b) == 0); - - mpz_init (t); - mpz_sub (t, a, b); - res = mpz_divisible_p (t, m); - mpz_clear (t); - - return res; -} - -static unsigned long -mpz_div_qr_ui (mpz_t q, mpz_t r, - const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) -{ - unsigned long ret; - mpz_t rr, dd; - - mpz_init (rr); - mpz_init_set_ui (dd, d); - mpz_div_qr (q, rr, n, dd, mode); - mpz_clear (dd); - ret = mpz_get_ui (rr); - - if (r) - mpz_swap (r, rr); - mpz_clear (rr); - - return ret; -} - -unsigned long -mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); -} -unsigned long -mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} -unsigned long -mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_ui_p (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - - -/* GCD */ -static mp_limb_t -mpn_gcd_11 (mp_limb_t u, mp_limb_t v) -{ - unsigned shift; - - assert ( (u | v) > 0); - - if (u == 0) - return v; - else if (v == 0) - return u; - - gmp_ctz (shift, u | v); - - u >>= shift; - v >>= shift; - - if ( (u & 1) == 0) - MP_LIMB_T_SWAP (u, v); - - while ( (v & 1) == 0) - v >>= 1; - - while (u != v) - { - if (u > v) - { - u -= v; - do - u >>= 1; - while ( (u & 1) == 0); - } - else - { - v -= u; - do - v >>= 1; - while ( (v & 1) == 0); - } - } - return u << shift; -} - -mp_size_t -mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn > 0); - assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); - assert (vp[vn-1] > 0); - assert ((up[0] | vp[0]) & 1); - - if (un > vn) - mpn_div_qr (NULL, up, un, vp, vn); - - un = mpn_normalized_size (up, vn); - if (un == 0) - { - mpn_copyi (rp, vp, vn); - return vn; - } - - if (!(vp[0] & 1)) - MPN_PTR_SWAP (up, un, vp, vn); - - while (un > 1 || vn > 1) - { - int shift; - assert (vp[0] & 1); - - while (up[0] == 0) - { - up++; - un--; - } - gmp_ctz (shift, up[0]); - if (shift > 0) - { - gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); - un -= (up[un-1] == 0); - } - - if (un < vn) - MPN_PTR_SWAP (up, un, vp, vn); - else if (un == vn) - { - int c = mpn_cmp (up, vp, un); - if (c == 0) - { - mpn_copyi (rp, up, un); - return un; - } - else if (c < 0) - MP_PTR_SWAP (up, vp); - } - - gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); - un = mpn_normalized_size (up, un); - } - rp[0] = mpn_gcd_11 (up[0], vp[0]); - return 1; -} - -unsigned long -mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) -{ - mpz_t t; - mpz_init_set_ui(t, v); - mpz_gcd (t, u, t); - if (v > 0) - v = mpz_get_ui (t); - - if (g) - mpz_swap (t, g); - - mpz_clear (t); - - return v; -} - -static mp_bitcnt_t -mpz_make_odd (mpz_t r) -{ - mp_bitcnt_t shift; - - assert (r->_mp_size > 0); - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - shift = mpn_scan1 (r->_mp_d, 0); - mpz_tdiv_q_2exp (r, r, shift); - - return shift; -} - -void -mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv; - mp_bitcnt_t uz, vz, gz; - - if (u->_mp_size == 0) - { - mpz_abs (g, v); - return; - } - if (v->_mp_size == 0) - { - mpz_abs (g, u); - return; - } - - mpz_init (tu); - mpz_init (tv); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - if (tu->_mp_size < tv->_mp_size) - mpz_swap (tu, tv); - - tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); - mpz_mul_2exp (g, tu, gz); - - mpz_clear (tu); - mpz_clear (tv); -} - -void -mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv, s0, s1, t0, t1; - mp_bitcnt_t uz, vz, gz; - mp_bitcnt_t power; - int cmp; - - if (u->_mp_size == 0) - { - /* g = 0 u + sgn(v) v */ - signed long sign = mpz_sgn (v); - mpz_abs (g, v); - if (s) - s->_mp_size = 0; - if (t) - mpz_set_si (t, sign); - return; - } - - if (v->_mp_size == 0) - { - /* g = sgn(u) u + 0 v */ - signed long sign = mpz_sgn (u); - mpz_abs (g, u); - if (s) - mpz_set_si (s, sign); - if (t) - t->_mp_size = 0; - return; - } - - mpz_init (tu); - mpz_init (tv); - mpz_init (s0); - mpz_init (s1); - mpz_init (t0); - mpz_init (t1); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - uz -= gz; - vz -= gz; - - /* Cofactors corresponding to odd gcd. gz handled later. */ - if (tu->_mp_size < tv->_mp_size) - { - mpz_swap (tu, tv); - MPZ_SRCPTR_SWAP (u, v); - MPZ_PTR_SWAP (s, t); - MP_BITCNT_T_SWAP (uz, vz); - } - - /* Maintain - * - * u = t0 tu + t1 tv - * v = s0 tu + s1 tv - * - * where u and v denote the inputs with common factors of two - * eliminated, and det (s0, t0; s1, t1) = 2^p. Then - * - * 2^p tu = s1 u - t1 v - * 2^p tv = -s0 u + t0 v - */ - - /* After initial division, tu = q tv + tu', we have - * - * u = 2^uz (tu' + q tv) - * v = 2^vz tv - * - * or - * - * t0 = 2^uz, t1 = 2^uz q - * s0 = 0, s1 = 2^vz - */ - - mpz_tdiv_qr (t1, tu, tu, tv); - mpz_mul_2exp (t1, t1, uz); - - mpz_setbit (s1, vz); - power = uz + vz; - - if (tu->_mp_size > 0) - { - mp_bitcnt_t shift; - shift = mpz_make_odd (tu); - mpz_setbit (t0, uz + shift); - power += shift; - - for (;;) - { - int c; - c = mpz_cmp (tu, tv); - if (c == 0) - break; - - if (c < 0) - { - /* tv = tv' + tu - * - * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' - * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ - - mpz_sub (tv, tv, tu); - mpz_add (t0, t0, t1); - mpz_add (s0, s0, s1); - - shift = mpz_make_odd (tv); - mpz_mul_2exp (t1, t1, shift); - mpz_mul_2exp (s1, s1, shift); - } - else - { - mpz_sub (tu, tu, tv); - mpz_add (t1, t0, t1); - mpz_add (s1, s0, s1); - - shift = mpz_make_odd (tu); - mpz_mul_2exp (t0, t0, shift); - mpz_mul_2exp (s0, s0, shift); - } - power += shift; - } - } - else - mpz_setbit (t0, uz); - - /* Now tv = odd part of gcd, and -s0 and t0 are corresponding - cofactors. */ - - mpz_mul_2exp (tv, tv, gz); - mpz_neg (s0, s0); - - /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To - adjust cofactors, we need u / g and v / g */ - - mpz_divexact (s1, v, tv); - mpz_abs (s1, s1); - mpz_divexact (t1, u, tv); - mpz_abs (t1, t1); - - while (power-- > 0) - { - /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ - if (mpz_odd_p (s0) || mpz_odd_p (t0)) - { - mpz_sub (s0, s0, s1); - mpz_add (t0, t0, t1); - } - assert (mpz_even_p (t0) && mpz_even_p (s0)); - mpz_tdiv_q_2exp (s0, s0, 1); - mpz_tdiv_q_2exp (t0, t0, 1); - } - - /* Choose small cofactors (they should generally satify - - |s| < |u| / 2g and |t| < |v| / 2g, - - with some documented exceptions). Always choose the smallest s, - if there are two choices for s with same absolute value, choose - the one with smallest corresponding t (this asymmetric condition - is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ - mpz_add (s1, s0, s1); - mpz_sub (t1, t0, t1); - cmp = mpz_cmpabs (s0, s1); - if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) - { - mpz_swap (s0, s1); - mpz_swap (t0, t1); - } - if (u->_mp_size < 0) - mpz_neg (s0, s0); - if (v->_mp_size < 0) - mpz_neg (t0, t0); - - mpz_swap (g, tv); - if (s) - mpz_swap (s, s0); - if (t) - mpz_swap (t, t0); - - mpz_clear (tu); - mpz_clear (tv); - mpz_clear (s0); - mpz_clear (s1); - mpz_clear (t0); - mpz_clear (t1); -} - -void -mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t g; - - if (u->_mp_size == 0 || v->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - mpz_init (g); - - mpz_gcd (g, u, v); - mpz_divexact (g, u, g); - mpz_mul (r, g, v); - - mpz_clear (g); - mpz_abs (r, r); -} - -void -mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) -{ - if (v == 0 || u->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - v /= mpz_gcd_ui (NULL, u, v); - mpz_mul_ui (r, u, v); - - mpz_abs (r, r); -} - -int -mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) -{ - mpz_t g, tr; - int invertible; - - if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) - return 0; - - mpz_init (g); - mpz_init (tr); - - mpz_gcdext (g, tr, NULL, u, m); - invertible = (mpz_cmp_ui (g, 1) == 0); - - if (invertible) - { - if (tr->_mp_size < 0) - { - if (m->_mp_size >= 0) - mpz_add (tr, tr, m); - else - mpz_sub (tr, tr, m); - } - mpz_swap (r, tr); - } - - mpz_clear (g); - mpz_clear (tr); - return invertible; -} - - -/* Higher level operations (sqrt, pow and root) */ - -void -mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) -{ - unsigned long bit; - mpz_t tr; - mpz_init_set_ui (tr, 1); - - bit = GMP_ULONG_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (e & bit) - mpz_mul (tr, tr, b); - bit >>= 1; - } - while (bit > 0); - - mpz_swap (r, tr); - mpz_clear (tr); -} - -void -mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) -{ - mpz_t b; - - mpz_init_set_ui (b, blimb); - mpz_pow_ui (r, b, e); - mpz_clear (b); -} - -void -mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) -{ - mpz_t tr; - mpz_t base; - mp_size_t en, mn; - mp_srcptr mp; - struct gmp_div_inverse minv; - unsigned shift; - mp_ptr tp = NULL; - - en = GMP_ABS (e->_mp_size); - mn = GMP_ABS (m->_mp_size); - if (mn == 0) - gmp_die ("mpz_powm: Zero modulo."); - - if (en == 0) - { - mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); - return; - } - - mp = m->_mp_d; - mpn_div_qr_invert (&minv, mp, mn); - shift = minv.shift; - - if (shift > 0) - { - /* To avoid shifts, we do all our reductions, except the final - one, using a *normalized* m. */ - minv.shift = 0; - - tp = gmp_alloc_limbs (mn); - gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); - mp = tp; - } - - mpz_init (base); - - if (e->_mp_size < 0) - { - if (!mpz_invert (base, b, m)) - gmp_die ("mpz_powm: Negative exponent and non-invertible base."); - } - else - { - mp_size_t bn; - mpz_abs (base, b); - - bn = base->_mp_size; - if (bn >= mn) - { - mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); - bn = mn; - } - - /* We have reduced the absolute value. Now take care of the - sign. Note that we get zero represented non-canonically as - m. */ - if (b->_mp_size < 0) - { - mp_ptr bp = MPZ_REALLOC (base, mn); - gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); - bn = mn; - } - base->_mp_size = mpn_normalized_size (base->_mp_d, bn); - } - mpz_init_set_ui (tr, 1); - - while (--en >= 0) - { - mp_limb_t w = e->_mp_d[en]; - mp_limb_t bit; - - bit = GMP_LIMB_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (w & bit) - mpz_mul (tr, tr, base); - if (tr->_mp_size > mn) - { - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - bit >>= 1; - } - while (bit > 0); - } - - /* Final reduction */ - if (tr->_mp_size >= mn) - { - minv.shift = shift; - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - if (tp) - gmp_free_limbs (tp, mn); - - mpz_swap (r, tr); - mpz_clear (tr); - mpz_clear (base); -} - -void -mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) -{ - mpz_t e; - - mpz_init_set_ui (e, elimb); - mpz_powm (r, b, e, m); - mpz_clear (e); -} - -/* x=trunc(y^(1/z)), r=y-x^z */ -void -mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) -{ - int sgn; - mp_bitcnt_t bc; - mpz_t t, u; - - sgn = y->_mp_size < 0; - if ((~z & sgn) != 0) - gmp_die ("mpz_rootrem: Negative argument, with even root."); - if (z == 0) - gmp_die ("mpz_rootrem: Zeroth root."); - - if (mpz_cmpabs_ui (y, 1) <= 0) { - if (x) - mpz_set (x, y); - if (r) - r->_mp_size = 0; - return; - } - - mpz_init (u); - mpz_init (t); - bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; - mpz_setbit (t, bc); - - if (z == 2) /* simplify sqrt loop: z-1 == 1 */ - do { - mpz_swap (u, t); /* u = x */ - mpz_tdiv_q (t, y, u); /* t = y/x */ - mpz_add (t, t, u); /* t = y/x + x */ - mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - else /* z != 2 */ { - mpz_t v; - - mpz_init (v); - if (sgn) - mpz_neg (t, t); - - do { - mpz_swap (u, t); /* u = x */ - mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ - mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ - mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ - mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ - mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - - mpz_clear (v); - } - - if (r) { - mpz_pow_ui (t, u, z); - mpz_sub (r, y, t); - } - if (x) - mpz_swap (x, u); - mpz_clear (u); - mpz_clear (t); -} - -int -mpz_root (mpz_t x, const mpz_t y, unsigned long z) -{ - int res; - mpz_t r; - - mpz_init (r); - mpz_rootrem (x, r, y, z); - res = r->_mp_size == 0; - mpz_clear (r); - - return res; -} - -/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ -void -mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) -{ - mpz_rootrem (s, r, u, 2); -} - -void -mpz_sqrt (mpz_t s, const mpz_t u) -{ - mpz_rootrem (s, NULL, u, 2); -} - -int -mpz_perfect_square_p (const mpz_t u) -{ - if (u->_mp_size <= 0) - return (u->_mp_size == 0); - else - return mpz_root (NULL, u, 2); -} - -int -mpn_perfect_square_p (mp_srcptr p, mp_size_t n) -{ - mpz_t t; - - assert (n > 0); - assert (p [n-1] != 0); - return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); -} - -mp_size_t -mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) -{ - mpz_t s, r, u; - mp_size_t res; - - assert (n > 0); - assert (p [n-1] != 0); - - mpz_init (r); - mpz_init (s); - mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); - - assert (s->_mp_size == (n+1)/2); - mpn_copyd (sp, s->_mp_d, s->_mp_size); - mpz_clear (s); - res = r->_mp_size; - if (rp) - mpn_copyd (rp, r->_mp_d, res); - mpz_clear (r); - return res; -} - -/* Combinatorics */ - -void -mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) -{ - mpz_set_ui (x, n + (n == 0)); - if (m + 1 < 2) return; - while (n > m + 1) - mpz_mul_ui (x, x, n -= m); -} - -void -mpz_2fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 2); -} - -void -mpz_fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 1); -} - -void -mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) -{ - mpz_t t; - - mpz_set_ui (r, k <= n); - - if (k > (n >> 1)) - k = (k <= n) ? n - k : 0; - - mpz_init (t); - mpz_fac_ui (t, k); - - for (; k > 0; --k) - mpz_mul_ui (r, r, n--); - - mpz_divexact (r, r, t); - mpz_clear (t); -} - - -/* Primality testing */ - -/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ -/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ -static int -gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) -{ - int c, bit = 0; - - assert (b & 1); - assert (a != 0); - /* assert (mpn_gcd_11 (a, b) == 1); */ - - /* Below, we represent a and b shifted right so that the least - significant one bit is implicit. */ - b >>= 1; - - gmp_ctz(c, a); - a >>= 1; - - for (;;) - { - a >>= c; - /* (2/b) = -1 if b = 3 or 5 mod 8 */ - bit ^= c & (b ^ (b >> 1)); - if (a < b) - { - if (a == 0) - return bit & 1 ? -1 : 1; - bit ^= a & b; - a = b - a; - b -= a; - } - else - { - a -= b; - assert (a != 0); - } - - gmp_ctz(c, a); - ++c; - } -} - -static void -gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) -{ - mpz_mod (Qk, Qk, n); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - mpz_mul (V, V, V); - mpz_submul_ui (V, Qk, 2); - mpz_tdiv_r (V, V, n); - /* Q^{2k} = (Q^k)^2 */ - mpz_mul (Qk, Qk, Qk); -} - -/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ -/* with P=1, Q=Q; k = (n>>b0)|1. */ -/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ -/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ -static int -gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, - mp_bitcnt_t b0, const mpz_t n) -{ - mp_bitcnt_t bs; - mpz_t U; - int res; - - assert (b0 > 0); - assert (Q <= - (LONG_MIN / 2)); - assert (Q >= - (LONG_MAX / 2)); - assert (mpz_cmp_ui (n, 4) > 0); - assert (mpz_odd_p (n)); - - mpz_init_set_ui (U, 1); /* U1 = 1 */ - mpz_set_ui (V, 1); /* V1 = 1 */ - mpz_set_si (Qk, Q); - - for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) - { - /* U_{2k} <- U_k * V_k */ - mpz_mul (U, U, V); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - /* A step k->k+1 is performed if the bit in $n$ is 1 */ - /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ - /* should be 1 in $n+1$ (bs == b0) */ - if (b0 == bs || mpz_tstbit (n, bs)) - { - /* Q^{k+1} <- Q^k * Q */ - mpz_mul_si (Qk, Qk, Q); - /* U_{k+1} <- (U_k + V_k) / 2 */ - mpz_swap (U, V); /* Keep in V the old value of U_k */ - mpz_add (U, U, V); - /* We have to compute U/2, so we need an even value, */ - /* equivalent (mod n) */ - if (mpz_odd_p (U)) - mpz_add (U, U, n); - mpz_tdiv_q_2exp (U, U, 1); - /* V_{k+1} <-(D*U_k + V_k) / 2 = - U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ - mpz_mul_si (V, V, -2*Q); - mpz_add (V, U, V); - mpz_tdiv_r (V, V, n); - } - mpz_tdiv_r (U, U, n); - } - - res = U->_mp_size == 0; - mpz_clear (U); - return res; -} - -/* Performs strong Lucas' test on x, with parameters suggested */ -/* for the BPSW test. Qk is only passed to recycle a variable. */ -/* Requires GCD (x,6) = 1.*/ -static int -gmp_stronglucas (const mpz_t x, mpz_t Qk) -{ - mp_bitcnt_t b0; - mpz_t V, n; - mp_limb_t maxD, D; /* The absolute value is stored. */ - long Q; - mp_limb_t tl; - - /* Test on the absolute value. */ - mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); - - assert (mpz_odd_p (n)); - /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ - if (mpz_root (Qk, n, 2)) - return 0; /* A square is composite. */ - - /* Check Ds up to square root (in case, n is prime) - or avoid overflows */ - maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; - - D = 3; - /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ - /* For those Ds we have (D/n) = (n/|D|) */ - do - { - if (D >= maxD) - return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ - D += 2; - tl = mpz_tdiv_ui (n, D); - if (tl == 0) - return 0; - } - while (gmp_jacobi_coprime (tl, D) == 1); - - mpz_init (V); - - /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ - b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); - /* b0 = mpz_scan0 (n, 0); */ - - /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ - Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); - - if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ - while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ - /* V <- V ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - mpz_clear (V); - return (b0 != 0); -} - -static int -gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, - const mpz_t q, mp_bitcnt_t k) -{ - assert (k > 0); - - /* Caller must initialize y to the base. */ - mpz_powm (y, y, q, n); - - if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) - return 1; - - while (--k > 0) - { - mpz_powm_ui (y, y, 2, n); - if (mpz_cmp (y, nm1) == 0) - return 1; - } - return 0; -} - -/* This product is 0xc0cfd797, and fits in 32 bits. */ -#define GMP_PRIME_PRODUCT \ - (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) - -/* Bit (p+1)/2 is set, for each odd prime <= 61 */ -#define GMP_PRIME_MASK 0xc96996dcUL - -int -mpz_probab_prime_p (const mpz_t n, int reps) -{ - mpz_t nm1; - mpz_t q; - mpz_t y; - mp_bitcnt_t k; - int is_prime; - int j; - - /* Note that we use the absolute value of n only, for compatibility - with the real GMP. */ - if (mpz_even_p (n)) - return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; - - /* Above test excludes n == 0 */ - assert (n->_mp_size != 0); - - if (mpz_cmpabs_ui (n, 64) < 0) - return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; - - if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) - return 0; - - /* All prime factors are >= 31. */ - if (mpz_cmpabs_ui (n, 31*31) < 0) - return 2; - - mpz_init (nm1); - mpz_init (q); - - /* Find q and k, where q is odd and n = 1 + 2**k * q. */ - mpz_abs (nm1, n); - nm1->_mp_d[0] -= 1; - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - k = mpn_scan1 (nm1->_mp_d, 0); - mpz_tdiv_q_2exp (q, nm1, k); - - /* BPSW test */ - mpz_init_set_ui (y, 2); - is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); - reps -= 24; /* skip the first 24 repetitions */ - - /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = - j^2 + j + 41 using Euler's polynomial. We potentially stop early, - if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > - 30 (a[30] == 971 > 31*31 == 961). */ - - for (j = 0; is_prime & (j < reps); j++) - { - mpz_set_ui (y, (unsigned long) j*j+j+41); - if (mpz_cmp (y, nm1) >= 0) - { - /* Don't try any further bases. This "early" break does not affect - the result for any reasonable reps value (<=5000 was tested) */ - assert (j >= 30); - break; - } - is_prime = gmp_millerrabin (n, nm1, y, q, k); - } - mpz_clear (nm1); - mpz_clear (q); - mpz_clear (y); - - return is_prime; -} - - -/* Logical operations and bit manipulation. */ - -/* Numbers are treated as if represented in two's complement (and - infinitely sign extended). For a negative values we get the two's - complement from -x = ~x + 1, where ~ is bitwise complement. - Negation transforms - - xxxx10...0 - - into - - yyyy10...0 - - where yyyy is the bitwise complement of xxxx. So least significant - bits, up to and including the first one bit, are unchanged, and - the more significant bits are all complemented. - - To change a bit from zero to one in a negative number, subtract the - corresponding power of two from the absolute value. This can never - underflow. To change a bit from one to zero, add the corresponding - power of two, and this might overflow. E.g., if x = -001111, the - two's complement is 110001. Clearing the least significant bit, we - get two's complement 110000, and -010000. */ - -int -mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t limb_index; - unsigned shift; - mp_size_t ds; - mp_size_t dn; - mp_limb_t w; - int bit; - - ds = d->_mp_size; - dn = GMP_ABS (ds); - limb_index = bit_index / GMP_LIMB_BITS; - if (limb_index >= dn) - return ds < 0; - - shift = bit_index % GMP_LIMB_BITS; - w = d->_mp_d[limb_index]; - bit = (w >> shift) & 1; - - if (ds < 0) - { - /* d < 0. Check if any of the bits below is set: If so, our bit - must be complemented. */ - if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) - return bit ^ 1; - while (--limb_index >= 0) - if (d->_mp_d[limb_index] > 0) - return bit ^ 1; - } - return bit; -} - -static void -mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_limb_t bit; - mp_ptr dp; - - dn = GMP_ABS (d->_mp_size); - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - if (limb_index >= dn) - { - mp_size_t i; - /* The bit should be set outside of the end of the number. - We have to increase the size of the number. */ - dp = MPZ_REALLOC (d, limb_index + 1); - - dp[limb_index] = bit; - for (i = dn; i < limb_index; i++) - dp[i] = 0; - dn = limb_index + 1; - } - else - { - mp_limb_t cy; - - dp = d->_mp_d; - - cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); - if (cy > 0) - { - dp = MPZ_REALLOC (d, dn + 1); - dp[dn++] = cy; - } - } - - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -static void -mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_ptr dp; - mp_limb_t bit; - - dn = GMP_ABS (d->_mp_size); - dp = d->_mp_d; - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - assert (limb_index < dn); - - gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, - dn - limb_index, bit)); - dn = mpn_normalized_size (dp, dn); - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -void -mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (!mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_add_bit (d, bit_index); - else - mpz_abs_sub_bit (d, bit_index); - } -} - -void -mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); - } -} - -void -mpz_combit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); -} - -void -mpz_com (mpz_t r, const mpz_t u) -{ - mpz_add_ui (r, u, 1); - mpz_neg (r, r); -} - -void -mpz_and (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - r->_mp_size = 0; - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc & vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is positive, higher limbs don't matter. */ - rn = vx ? un : vn; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul & vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul & vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc | vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is negative, by sign extension higher limbs - don't matter. */ - rn = vx ? vn : un; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul | vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul | vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc ^ vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - rp = MPZ_REALLOC (r, un + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = (ul ^ vl ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = (ul ^ ux) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[un++] = rc; - else - un = mpn_normalized_size (rp, un); - - r->_mp_size = rx ? -un : un; -} - -static unsigned -gmp_popcount_limb (mp_limb_t x) -{ - unsigned c; - - /* Do 16 bits at a time, to avoid limb-sized constants. */ - int LOCAL_SHIFT_BITS = 16; - for (c = 0; x > 0;) - { - unsigned w = x - ((x >> 1) & 0x5555); - w = ((w >> 2) & 0x3333) + (w & 0x3333); - w = (w >> 4) + w; - w = ((w >> 8) & 0x000f) + (w & 0x000f); - c += w; - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) - x >>= LOCAL_SHIFT_BITS; - else - x = 0; - } - return c; -} - -mp_bitcnt_t -mpn_popcount (mp_srcptr p, mp_size_t n) -{ - mp_size_t i; - mp_bitcnt_t c; - - for (c = 0, i = 0; i < n; i++) - c += gmp_popcount_limb (p[i]); - - return c; -} - -mp_bitcnt_t -mpz_popcount (const mpz_t u) -{ - mp_size_t un; - - un = u->_mp_size; - - if (un < 0) - return ~(mp_bitcnt_t) 0; - - return mpn_popcount (u->_mp_d, un); -} - -mp_bitcnt_t -mpz_hamdist (const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_limb_t uc, vc, ul, vl, comp; - mp_srcptr up, vp; - mp_bitcnt_t c; - - un = u->_mp_size; - vn = v->_mp_size; - - if ( (un ^ vn) < 0) - return ~(mp_bitcnt_t) 0; - - comp = - (uc = vc = (un < 0)); - if (uc) - { - assert (vn < 0); - un = -un; - vn = -vn; - } - - up = u->_mp_d; - vp = v->_mp_d; - - if (un < vn) - MPN_SRCPTR_SWAP (up, un, vp, vn); - - for (i = 0, c = 0; i < vn; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - vl = (vp[i] ^ comp) + vc; - vc = vl < vc; - - c += gmp_popcount_limb (ul ^ vl); - } - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - c += gmp_popcount_limb (ul ^ comp); - } - - return c; -} - -mp_bitcnt_t -mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit - for u<0. Notice this test picks up any u==0 too. */ - if (i >= un) - return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); - - up = u->_mp_d; - ux = 0; - limb = up[i]; - - if (starting_bit != 0) - { - if (us < 0) - { - ux = mpn_zero_p (up, i); - limb = ~ limb + ux; - ux = - (mp_limb_t) (limb >= ux); - } - - /* Mask to 0 all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - } - - return mpn_common_scan (limb, i, up, un, ux); -} - -mp_bitcnt_t -mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - ux = - (mp_limb_t) (us >= 0); - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for - u<0. Notice this test picks up all cases of u==0 too. */ - if (i >= un) - return (ux ? starting_bit : ~(mp_bitcnt_t) 0); - - up = u->_mp_d; - limb = up[i] ^ ux; - - if (ux == 0) - limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ - - /* Mask all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - - return mpn_common_scan (limb, i, up, un, ux); -} - - -/* MPZ base conversion. */ - -size_t -mpz_sizeinbase (const mpz_t u, int base) -{ - mp_size_t un, tn; - mp_srcptr up; - mp_ptr tp; - mp_bitcnt_t bits; - struct gmp_div_inverse bi; - size_t ndigits; - - assert (base >= 2); - assert (base <= 62); - - un = GMP_ABS (u->_mp_size); - if (un == 0) - return 1; - - up = u->_mp_d; - - bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); - switch (base) - { - case 2: - return bits; - case 4: - return (bits + 1) / 2; - case 8: - return (bits + 2) / 3; - case 16: - return (bits + 3) / 4; - case 32: - return (bits + 4) / 5; - /* FIXME: Do something more clever for the common case of base - 10. */ - } - - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, up, un); - mpn_div_qr_1_invert (&bi, base); - - tn = un; - ndigits = 0; - do - { - ndigits++; - mpn_div_qr_1_preinv (tp, tp, tn, &bi); - tn -= (tp[tn-1] == 0); - } - while (tn > 0); - - gmp_free_limbs (tp, un); - return ndigits; -} - -char * -mpz_get_str (char *sp, int base, const mpz_t u) -{ - unsigned bits; - const char *digits; - mp_size_t un; - size_t i, sn, osn; - - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - if (base > 1) - { - if (base <= 36) - digits = "0123456789abcdefghijklmnopqrstuvwxyz"; - else if (base > 62) - return NULL; - } - else if (base >= -1) - base = 10; - else - { - base = -base; - if (base > 36) - return NULL; - } - - sn = 1 + mpz_sizeinbase (u, base); - if (!sp) - { - osn = 1 + sn; - sp = (char *) gmp_alloc (osn); - } - else - osn = 0; - un = GMP_ABS (u->_mp_size); - - if (un == 0) - { - sp[0] = '0'; - sn = 1; - goto ret; - } - - i = 0; - - if (u->_mp_size < 0) - sp[i++] = '-'; - - bits = mpn_base_power_of_two_p (base); - - if (bits) - /* Not modified in this case. */ - sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); - else - { - struct mpn_base_info info; - mp_ptr tp; - - mpn_get_base_info (&info, base); - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, u->_mp_d, un); - - sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); - gmp_free_limbs (tp, un); - } - - for (; i < sn; i++) - sp[i] = digits[(unsigned char) sp[i]]; - -ret: - sp[sn] = '\0'; - if (osn && osn != sn + 1) - sp = (char*) gmp_realloc (sp, osn, sn + 1); - return sp; -} - -int -mpz_set_str (mpz_t r, const char *sp, int base) -{ - unsigned bits, value_of_a; - mp_size_t rn, alloc; - mp_ptr rp; - size_t dn, sn; - int sign; - unsigned char *dp; - - assert (base == 0 || (base >= 2 && base <= 62)); - - while (isspace( (unsigned char) *sp)) - sp++; - - sign = (*sp == '-'); - sp += sign; - - if (base == 0) - { - if (sp[0] == '0') - { - if (sp[1] == 'x' || sp[1] == 'X') - { - base = 16; - sp += 2; - } - else if (sp[1] == 'b' || sp[1] == 'B') - { - base = 2; - sp += 2; - } - else - base = 8; - } - else - base = 10; - } - - if (!*sp) - { - r->_mp_size = 0; - return -1; - } - sn = strlen(sp); - dp = (unsigned char *) gmp_alloc (sn); - - value_of_a = (base > 36) ? 36 : 10; - for (dn = 0; *sp; sp++) - { - unsigned digit; - - if (isspace ((unsigned char) *sp)) - continue; - else if (*sp >= '0' && *sp <= '9') - digit = *sp - '0'; - else if (*sp >= 'a' && *sp <= 'z') - digit = *sp - 'a' + value_of_a; - else if (*sp >= 'A' && *sp <= 'Z') - digit = *sp - 'A' + 10; - else - digit = base; /* fail */ - - if (digit >= (unsigned) base) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - - dp[dn++] = digit; - } - - if (!dn) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - bits = mpn_base_power_of_two_p (base); - - if (bits > 0) - { - alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_bits (rp, dp, dn, bits); - } - else - { - struct mpn_base_info info; - mpn_get_base_info (&info, base); - alloc = (dn + info.exp - 1) / info.exp; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_other (rp, dp, dn, base, &info); - /* Normalization, needed for all-zero input. */ - assert (rn > 0); - rn -= rp[rn-1] == 0; - } - assert (rn <= alloc); - gmp_free (dp, sn); - - r->_mp_size = sign ? - rn : rn; - - return 0; -} - -int -mpz_init_set_str (mpz_t r, const char *sp, int base) -{ - mpz_init (r); - return mpz_set_str (r, sp, base); -} - -size_t -mpz_out_str (FILE *stream, int base, const mpz_t x) -{ - char *str; - size_t len, n; - - str = mpz_get_str (NULL, base, x); - if (!str) - return 0; - len = strlen (str); - n = fwrite (str, 1, len, stream); - gmp_free (str, len + 1); - return n; -} - - -static int -gmp_detect_endian (void) -{ - static const int i = 2; - const unsigned char *p = (const unsigned char *) &i; - return 1 - *p; -} - -/* Import and export. Does not support nails. */ -void -mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, - size_t nails, const void *src) -{ - const unsigned char *p; - ptrdiff_t word_step; - mp_ptr rp; - mp_size_t rn; - - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes already copied to this limb (starting from - the low end). */ - size_t bytes; - /* The index where the limb should be stored, when completed. */ - mp_size_t i; - - if (nails != 0) - gmp_die ("mpz_import: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) src; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); - rp = MPZ_REALLOC (r, rn); - - for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) - { - size_t j; - for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) - { - limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); - if (bytes == sizeof(mp_limb_t)) - { - rp[i++] = limb; - bytes = 0; - limb = 0; - } - } - } - assert (i + (bytes > 0) == rn); - if (limb != 0) - rp[i++] = limb; - else - i = mpn_normalized_size (rp, i); - - r->_mp_size = i; -} - -void * -mpz_export (void *r, size_t *countp, int order, size_t size, int endian, - size_t nails, const mpz_t u) -{ - size_t count; - mp_size_t un; - - if (nails != 0) - gmp_die ("mpz_export: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - assert (size > 0 || u->_mp_size == 0); - - un = u->_mp_size; - count = 0; - if (un != 0) - { - size_t k; - unsigned char *p; - ptrdiff_t word_step; - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes left to do in this limb. */ - size_t bytes; - /* The index where the limb was read. */ - mp_size_t i; - - un = GMP_ABS (un); - - /* Count bytes in top limb. */ - limb = u->_mp_d[un-1]; - assert (limb != 0); - - k = (GMP_LIMB_BITS <= CHAR_BIT); - if (!k) - { - do { - int LOCAL_CHAR_BIT = CHAR_BIT; - k++; limb >>= LOCAL_CHAR_BIT; - } while (limb != 0); - } - /* else limb = 0; */ - - count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; - - if (!r) - r = gmp_alloc (count * size); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) r; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) - { - size_t j; - for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) - { - if (sizeof (mp_limb_t) == 1) - { - if (i < un) - *p = u->_mp_d[i++]; - else - *p = 0; - } - else - { - int LOCAL_CHAR_BIT = CHAR_BIT; - if (bytes == 0) - { - if (i < un) - limb = u->_mp_d[i++]; - bytes = sizeof (mp_limb_t); - } - *p = limb; - limb >>= LOCAL_CHAR_BIT; - bytes--; - } - } - } - assert (i == un); - assert (k == count); - } - - if (countp) - *countp = count; - - return r; -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.h deleted file mode 100644 index f28cb360ce..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mini-gmp.h +++ /dev/null @@ -1,311 +0,0 @@ -/* mini-gmp, a minimalistic implementation of a GNU GMP subset. - -Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* About mini-gmp: This is a minimal implementation of a subset of the - GMP interface. It is intended for inclusion into applications which - have modest bignums needs, as a fallback when the real GMP library - is not installed. - - This file defines the public interface. */ - -#ifndef __MINI_GMP_H__ -#define __MINI_GMP_H__ - -/* For size_t */ -#include - -#if defined (__cplusplus) -extern "C" { -#endif - -void mp_set_memory_functions (void *(*) (size_t), - void *(*) (void *, size_t, size_t), - void (*) (void *, size_t)); - -void mp_get_memory_functions (void *(**) (size_t), - void *(**) (void *, size_t, size_t), - void (**) (void *, size_t)); - -#ifndef MINI_GMP_LIMB_TYPE -#define MINI_GMP_LIMB_TYPE long -#endif - -typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; -typedef long mp_size_t; -typedef unsigned long mp_bitcnt_t; - -typedef mp_limb_t *mp_ptr; -typedef const mp_limb_t *mp_srcptr; - -typedef struct -{ - int _mp_alloc; /* Number of *limbs* allocated and pointed - to by the _mp_d field. */ - int _mp_size; /* abs(_mp_size) is the number of limbs the - last field points to. If _mp_size is - negative this is a negative number. */ - mp_limb_t *_mp_d; /* Pointer to the limbs. */ -} __mpz_struct; - -typedef __mpz_struct mpz_t[1]; - -typedef __mpz_struct *mpz_ptr; -typedef const __mpz_struct *mpz_srcptr; - -extern const int mp_bits_per_limb; - -void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); -void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); -void mpn_zero (mp_ptr, mp_size_t); - -int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); -int mpn_zero_p (mp_srcptr, mp_size_t); - -mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); - -mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); -void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); -int mpn_perfect_square_p (mp_srcptr, mp_size_t); -mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); -mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); - -mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); -mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); - -mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); -mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); - -void mpn_com (mp_ptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); - -mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); - -mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); -#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) - -size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); -mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); - -void mpz_init (mpz_t); -void mpz_init2 (mpz_t, mp_bitcnt_t); -void mpz_clear (mpz_t); - -#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) -#define mpz_even_p(z) (! mpz_odd_p (z)) - -int mpz_sgn (const mpz_t); -int mpz_cmp_si (const mpz_t, long); -int mpz_cmp_ui (const mpz_t, unsigned long); -int mpz_cmp (const mpz_t, const mpz_t); -int mpz_cmpabs_ui (const mpz_t, unsigned long); -int mpz_cmpabs (const mpz_t, const mpz_t); -int mpz_cmp_d (const mpz_t, double); -int mpz_cmpabs_d (const mpz_t, double); - -void mpz_abs (mpz_t, const mpz_t); -void mpz_neg (mpz_t, const mpz_t); -void mpz_swap (mpz_t, mpz_t); - -void mpz_add_ui (mpz_t, const mpz_t, unsigned long); -void mpz_add (mpz_t, const mpz_t, const mpz_t); -void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); -void mpz_sub (mpz_t, const mpz_t, const mpz_t); - -void mpz_mul_si (mpz_t, const mpz_t, long int); -void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_mul (mpz_t, const mpz_t, const mpz_t); -void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_addmul (mpz_t, const mpz_t, const mpz_t); -void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_submul (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); - -void mpz_mod (mpz_t, const mpz_t, const mpz_t); - -void mpz_divexact (mpz_t, const mpz_t, const mpz_t); - -int mpz_divisible_p (const mpz_t, const mpz_t); -int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); - -unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); - -unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); - -void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); - -int mpz_divisible_ui_p (const mpz_t, unsigned long); - -unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); -void mpz_gcd (mpz_t, const mpz_t, const mpz_t); -void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); -void mpz_lcm (mpz_t, const mpz_t, const mpz_t); -int mpz_invert (mpz_t, const mpz_t, const mpz_t); - -void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); -void mpz_sqrt (mpz_t, const mpz_t); -int mpz_perfect_square_p (const mpz_t); - -void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); -void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); -void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); - -void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); -int mpz_root (mpz_t, const mpz_t, unsigned long); - -void mpz_fac_ui (mpz_t, unsigned long); -void mpz_2fac_ui (mpz_t, unsigned long); -void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); -void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); - -int mpz_probab_prime_p (const mpz_t, int); - -int mpz_tstbit (const mpz_t, mp_bitcnt_t); -void mpz_setbit (mpz_t, mp_bitcnt_t); -void mpz_clrbit (mpz_t, mp_bitcnt_t); -void mpz_combit (mpz_t, mp_bitcnt_t); - -void mpz_com (mpz_t, const mpz_t); -void mpz_and (mpz_t, const mpz_t, const mpz_t); -void mpz_ior (mpz_t, const mpz_t, const mpz_t); -void mpz_xor (mpz_t, const mpz_t, const mpz_t); - -mp_bitcnt_t mpz_popcount (const mpz_t); -mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); -mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); -mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); - -int mpz_fits_slong_p (const mpz_t); -int mpz_fits_ulong_p (const mpz_t); -int mpz_fits_sint_p (const mpz_t); -int mpz_fits_uint_p (const mpz_t); -int mpz_fits_sshort_p (const mpz_t); -int mpz_fits_ushort_p (const mpz_t); -long int mpz_get_si (const mpz_t); -unsigned long int mpz_get_ui (const mpz_t); -double mpz_get_d (const mpz_t); -size_t mpz_size (const mpz_t); -mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); - -void mpz_realloc2 (mpz_t, mp_bitcnt_t); -mp_srcptr mpz_limbs_read (mpz_srcptr); -mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); -mp_ptr mpz_limbs_write (mpz_t, mp_size_t); -void mpz_limbs_finish (mpz_t, mp_size_t); -mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); - -#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} - -void mpz_set_si (mpz_t, signed long int); -void mpz_set_ui (mpz_t, unsigned long int); -void mpz_set (mpz_t, const mpz_t); -void mpz_set_d (mpz_t, double); - -void mpz_init_set_si (mpz_t, signed long int); -void mpz_init_set_ui (mpz_t, unsigned long int); -void mpz_init_set (mpz_t, const mpz_t); -void mpz_init_set_d (mpz_t, double); - -size_t mpz_sizeinbase (const mpz_t, int); -char *mpz_get_str (char *, int, const mpz_t); -int mpz_set_str (mpz_t, const char *, int); -int mpz_init_set_str (mpz_t, const char *, int); - -/* This long list taken from gmp.h. */ -/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, - defines EOF but not FILE. */ -#if defined (FILE) \ - || defined (H_STDIO) \ - || defined (_H_STDIO) /* AIX */ \ - || defined (_STDIO_H) /* glibc, Sun, SCO */ \ - || defined (_STDIO_H_) /* BSD, OSF */ \ - || defined (__STDIO_H) /* Borland */ \ - || defined (__STDIO_H__) /* IRIX */ \ - || defined (_STDIO_INCLUDED) /* HPUX */ \ - || defined (__dj_include_stdio_h_) /* DJGPP */ \ - || defined (_FILE_DEFINED) /* Microsoft */ \ - || defined (__STDIO__) /* Apple MPW MrC */ \ - || defined (_MSL_STDIO_H) /* Metrowerks */ \ - || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ - || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ - || defined (__STDIO_LOADED) /* VMS */ \ - || defined (_STDIO) /* HPE NonStop */ \ - || defined (__DEFINED_FILE) /* musl */ -size_t mpz_out_str (FILE *, int, const mpz_t); -#endif - -void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); -void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); - -#if defined (__cplusplus) -} -#endif -#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h index bbfe72c13b..54e90326be 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign_namespace.h @@ -18,6 +18,12 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -94,6 +100,16 @@ #define lift_basis SQISIGN_NAMESPACE(lift_basis) #define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) +// Namespacing symbols exported from basis.c, ec.c: +#undef xDBL_E0 + +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) + +// Namespacing symbols exported from basis.c, ec.c, isog_chains.c: +#undef xDBL_A24 + +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) + // Namespacing symbols exported from biextension.c: #undef clear_cofac #undef ec_dlog_2_tate @@ -109,6 +125,11 @@ #define reduced_tate SQISIGN_NAMESPACE(reduced_tate) #define weil SQISIGN_NAMESPACE(weil) +// Namespacing symbols exported from biextension.c, ec_jac.c, hd.c: +#undef ADD + +#define ADD SQISIGN_NAMESPACE(ADD) + // Namespacing symbols exported from common.c: #undef hash_to_challenge #undef public_key_finalize @@ -148,6 +169,28 @@ #define find_uv SQISIGN_NAMESPACE(find_uv) #define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) +// Namespacing symbols exported from dim2id2iso.c, encode_signature.c, id2iso.c, keygen.c, quaternion_data.c, sign.c: +#undef EXTREMAL_ORDERS +#undef QUATALG_PINFTY + +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) + +// Namespacing symbols exported from dim2id2iso.c, endomorphism_action.c, id2iso.c: +#undef CURVES_WITH_ENDOMORPHISMS + +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) + +// Namespacing symbols exported from dim2id2iso.c, id2iso.c, sign.c, torsion_constants.c: +#undef TORSION_PLUS_2POWER + +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) + +// Namespacing symbols exported from dim2id2iso.c, quaternion_data.c: +#undef CONNECTING_IDEALS + +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) + // Namespacing symbols exported from dim4.c: #undef ibz_inv_dim4_make_coeff_mpm #undef ibz_inv_dim4_make_coeff_pmp @@ -207,6 +250,13 @@ #define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) #define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) +// Namespacing symbols exported from e0_basis.c: +#undef BASIS_E0_PX +#undef BASIS_E0_QX + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) + // Namespacing symbols exported from ec.c: #undef cswap_points #undef ec_biscalar_mul @@ -235,8 +285,6 @@ #undef xDBL #undef xDBLADD #undef xDBLMUL -#undef xDBL_A24 -#undef xDBL_E0 #undef xMUL #define cswap_points SQISIGN_NAMESPACE(cswap_points) @@ -266,14 +314,9 @@ #define xDBL SQISIGN_NAMESPACE(xDBL) #define xDBLADD SQISIGN_NAMESPACE(xDBLADD) #define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) -#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) -#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) #define xMUL SQISIGN_NAMESPACE(xMUL) // Namespacing symbols exported from ec_jac.c: -#undef ADD -#undef DBL -#undef DBLW #undef copy_jac_point #undef jac_from_ws #undef jac_init @@ -284,9 +327,6 @@ #undef jac_to_xz_add_components #undef select_jac_point -#define ADD SQISIGN_NAMESPACE(ADD) -#define DBL SQISIGN_NAMESPACE(DBL) -#define DBLW SQISIGN_NAMESPACE(DBLW) #define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) #define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) #define jac_init SQISIGN_NAMESPACE(jac_init) @@ -297,6 +337,21 @@ #define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) #define select_jac_point SQISIGN_NAMESPACE(select_jac_point) +// Namespacing symbols exported from ec_jac.c, hd.c: +#undef DBLW + +#define DBLW SQISIGN_NAMESPACE(DBLW) + +// Namespacing symbols exported from ec_jac.c, hd.c, theta_isogenies.c: +#undef DBL + +#define DBL SQISIGN_NAMESPACE(DBL) + +// Namespacing symbols exported from ec_params.c: +#undef p_cofactor_for_2f + +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) + // Namespacing symbols exported from encode_signature.c: #undef secret_key_from_bytes #undef secret_key_to_bytes @@ -455,21 +510,24 @@ #define fp_set_one SQISIGN_NAMESPACE(fp_set_one) #define fp_set_small SQISIGN_NAMESPACE(fp_set_small) #define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) -#define ONE SQISIGN_NAMESPACE(ONE) -#define ZERO SQISIGN_NAMESPACE(ZERO) // Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef ONE +#undef ZERO #undef fp_add #undef fp_mul #undef fp_sqr #undef fp_sub +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) #define fp_add SQISIGN_NAMESPACE(fp_add) #define fp_mul SQISIGN_NAMESPACE(fp_mul) #define fp_sqr SQISIGN_NAMESPACE(fp_sqr) #define fp_sub SQISIGN_NAMESPACE(fp_sub) // Namespacing symbols exported from gf27500.c: +#undef gf27500_MINUS_ONE #undef gf27500_decode #undef gf27500_decode_reduce #undef gf27500_div @@ -479,6 +537,7 @@ #undef gf27500_legendre #undef gf27500_sqrt +#define gf27500_MINUS_ONE SQISIGN_NAMESPACE(gf27500_MINUS_ONE) #define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) #define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) #define gf27500_div SQISIGN_NAMESPACE(gf27500_div) @@ -500,6 +559,7 @@ #define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) // Namespacing symbols exported from gf5248.c: +#undef gf5248_MINUS_ONE #undef gf5248_decode #undef gf5248_decode_reduce #undef gf5248_div @@ -509,6 +569,7 @@ #undef gf5248_legendre #undef gf5248_sqrt +#define gf5248_MINUS_ONE SQISIGN_NAMESPACE(gf5248_MINUS_ONE) #define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) #define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) #define gf5248_div SQISIGN_NAMESPACE(gf5248_div) @@ -519,6 +580,7 @@ #define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) // Namespacing symbols exported from gf65376.c: +#undef gf65376_MINUS_ONE #undef gf65376_decode #undef gf65376_decode_reduce #undef gf65376_div @@ -528,6 +590,7 @@ #undef gf65376_legendre #undef gf65376_sqrt +#define gf65376_MINUS_ONE SQISIGN_NAMESPACE(gf65376_MINUS_ONE) #define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) #define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) #define gf65376_div SQISIGN_NAMESPACE(gf65376_div) @@ -554,6 +617,22 @@ #define double_couple_point SQISIGN_NAMESPACE(double_couple_point) #define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) +// Namespacing symbols exported from hd_splitting_transforms.c: +#undef CHI_EVAL + +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) + +// Namespacing symbols exported from hd_splitting_transforms.c, theta_isogenies.c: +#undef EVEN_INDEX +#undef FP2_CONSTANTS +#undef NORMALIZATION_TRANSFORMS +#undef SPLITTING_TRANSFORMS + +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) + // Namespacing symbols exported from hnf.c: #undef ibz_mat_4x4_is_hnf #undef ibz_mat_4xn_hnf_mod_core @@ -761,6 +840,11 @@ #define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) #define secret_key_init SQISIGN_NAMESPACE(secret_key_init) +// Namespacing symbols exported from keygen.c, torsion_constants.c: +#undef SEC_DEGREE + +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) + // Namespacing symbols exported from l2.c: #undef quat_lattice_lll #undef quat_lll_core @@ -910,6 +994,16 @@ #define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) #define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) +// Namespacing symbols exported from quaternion_data.c: +#undef CONJUGATING_ELEMENTS + +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) + +// Namespacing symbols exported from quaternion_data.c, sign.c: +#undef QUAT_prime_cofactor + +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) + // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation @@ -971,6 +1065,11 @@ #define protocols_sign SQISIGN_NAMESPACE(protocols_sign) +// Namespacing symbols exported from sign.c, torsion_constants.c: +#undef COM_DEGREE + +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + // Namespacing symbols exported from sqisign.c: #undef sqisign_keypair #undef sqisign_open @@ -1006,6 +1105,11 @@ #define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) #define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) +// Namespacing symbols exported from torsion_constants.c: +#undef TWO_TO_SECURITY_BITS + +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) + // Namespacing symbols exported from verify.c: #undef protocols_verify @@ -1029,45 +1133,7 @@ #define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) #define xisog_4 SQISIGN_NAMESPACE(xisog_4) -// Namespacing symbols from precomp: -#undef BASIS_E0_PX -#undef BASIS_E0_QX -#undef p_cofactor_for_2f -#undef CURVES_WITH_ENDOMORPHISMS -#undef EVEN_INDEX -#undef CHI_EVAL -#undef FP2_CONSTANTS -#undef SPLITTING_TRANSFORMS -#undef NORMALIZATION_TRANSFORMS -#undef QUAT_prime_cofactor -#undef QUATALG_PINFTY -#undef EXTREMAL_ORDERS -#undef CONNECTING_IDEALS -#undef CONJUGATING_ELEMENTS -#undef TWO_TO_SECURITY_BITS -#undef TORSION_PLUS_2POWER -#undef SEC_DEGREE -#undef COM_DEGREE - -#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) -#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) -#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) -#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) -#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) -#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) -#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) -#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) -#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) -#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) -#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) -#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) -#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) -#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) -#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) -#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) -#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) -#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) - #endif +// This file is generated by scripts/Namespace.scala, do not edit it manually! diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c index 2aaad84dc1..eacf6e28eb 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c @@ -1231,4 +1231,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif +#endif /* RADIX_32 */ \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c index 9ac5fc5495..b5947aaac0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c @@ -872,4 +872,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif +#endif /* RADIX_64 */ \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c deleted file mode 100644 index 396d505aec..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.c +++ /dev/null @@ -1,73 +0,0 @@ -#include -#include -#if defined(MINI_GMP) -#include "mini-gmp.h" -#else -// This configuration is used only for testing -#include -#endif -#include - -// Exported for testing -int -mini_mpz_legendre(const mpz_t a, const mpz_t p) -{ - int res = 0; - mpz_t e; - mpz_init_set(e, p); - mpz_sub_ui(e, e, 1); - mpz_fdiv_q_2exp(e, e, 1); - mpz_powm(e, a, e, p); - - if (mpz_cmp_ui(e, 1) <= 0) { - res = mpz_get_si(e); - } else { - res = -1; - } - mpz_clear(e); - return res; -} - -#if defined(MINI_GMP) -int -mpz_legendre(const mpz_t a, const mpz_t p) -{ - return mini_mpz_legendre(a, p); -} -#endif - -// Exported for testing -double -mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - double ret; - int tmp_exp; - mpz_t tmp; - - // Handle the case where op is 0 - if (mpz_cmp_ui(op, 0) == 0) { - *exp = 0; - return 0.0; - } - - *exp = mpz_sizeinbase(op, 2); - - mpz_init_set(tmp, op); - - if (*exp > DBL_MAX_EXP) { - mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); - } - - ret = frexp(mpz_get_d(tmp), &tmp_exp); - mpz_clear(tmp); - - return ret; -} - -#if defined(MINI_GMP) -double -mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - return mini_mpz_get_d_2exp(exp, op); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.h deleted file mode 100644 index 0113cfdfe6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp-extra.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef MINI_GMP_EXTRA_H -#define MINI_GMP_EXTRA_H - -#if defined MINI_GMP -#include "mini-gmp.h" - -typedef long mp_exp_t; - -int mpz_legendre(const mpz_t a, const mpz_t p); -double mpz_get_d_2exp(signed long int *exp, const mpz_t op); -#else -// This configuration is used only for testing -#include -#endif - -int mini_mpz_legendre(const mpz_t a, const mpz_t p); -double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.c deleted file mode 100644 index 3830ab2031..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.c +++ /dev/null @@ -1,4671 +0,0 @@ -/* Note: The code from mini-gmp is modifed from the original by - commenting out the definition of GMP_LIMB_BITS */ - -/* - mini-gmp, a minimalistic implementation of a GNU GMP subset. - - Contributed to the GNU project by Niels Möller - Additional functionalities and improvements by Marco Bodrato. - -Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* NOTE: All functions in this file which are not declared in - mini-gmp.h are internal, and are not intended to be compatible - with GMP or with future versions of mini-gmp. */ - -/* Much of the material copied from GMP files, including: gmp-impl.h, - longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, - mpn/generic/lshift.c, mpn/generic/mul_1.c, - mpn/generic/mul_basecase.c, mpn/generic/rshift.c, - mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, - mpn/generic/submul_1.c. */ - -#include -#include -#include -#include -#include -#include - -#include "mini-gmp.h" - -#if !defined(MINI_GMP_DONT_USE_FLOAT_H) -#include -#endif - - -/* Macros */ -/* Removed from here as it is passed as a compiler command-line definition */ -/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ - -#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) -#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) - -#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) -#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) - -#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) -#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) - -#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) -#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) - -#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) - -#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) - -#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 -#define GMP_DBL_MANT_BITS DBL_MANT_DIG -#else -#define GMP_DBL_MANT_BITS (53) -#endif - -/* Return non-zero if xp,xsize and yp,ysize overlap. - If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no - overlap. If both these are false, there's an overlap. */ -#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ - ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) - -#define gmp_assert_nocarry(x) do { \ - mp_limb_t __cy = (x); \ - assert (__cy == 0); \ - (void) (__cy); \ - } while (0) - -#define gmp_clz(count, x) do { \ - mp_limb_t __clz_x = (x); \ - unsigned __clz_c = 0; \ - int LOCAL_SHIFT_BITS = 8; \ - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ - for (; \ - (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ - __clz_c += 8) \ - { __clz_x <<= LOCAL_SHIFT_BITS; } \ - for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ - __clz_x <<= 1; \ - (count) = __clz_c; \ - } while (0) - -#define gmp_ctz(count, x) do { \ - mp_limb_t __ctz_x = (x); \ - unsigned __ctz_c = 0; \ - gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ - (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ - } while (0) - -#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) + (bl); \ - (sh) = (ah) + (bh) + (__x < (al)); \ - (sl) = __x; \ - } while (0) - -#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) - (bl); \ - (sh) = (ah) - (bh) - ((al) < (bl)); \ - (sl) = __x; \ - } while (0) - -#define gmp_umul_ppmm(w1, w0, u, v) \ - do { \ - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ - if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned int __ww = (unsigned int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned long int __ww = (unsigned long int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else { \ - mp_limb_t __x0, __x1, __x2, __x3; \ - unsigned __ul, __vl, __uh, __vh; \ - mp_limb_t __u = (u), __v = (v); \ - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ - \ - __ul = __u & GMP_LLIMB_MASK; \ - __uh = __u >> (GMP_LIMB_BITS / 2); \ - __vl = __v & GMP_LLIMB_MASK; \ - __vh = __v >> (GMP_LIMB_BITS / 2); \ - \ - __x0 = (mp_limb_t) __ul * __vl; \ - __x1 = (mp_limb_t) __ul * __vh; \ - __x2 = (mp_limb_t) __uh * __vl; \ - __x3 = (mp_limb_t) __uh * __vh; \ - \ - __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ - __x1 += __x2; /* but this indeed can */ \ - if (__x1 < __x2) /* did we get it? */ \ - __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ - \ - (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ - (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ - } \ - } while (0) - -/* If mp_limb_t is of size smaller than int, plain u*v implies - automatic promotion to *signed* int, and then multiply may overflow - and cause undefined behavior. Explicitly cast to unsigned int for - that case. */ -#define gmp_umullo_limb(u, v) \ - ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) - -#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ - do { \ - mp_limb_t _qh, _ql, _r, _mask; \ - gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ - gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ - _r = (nl) - gmp_umullo_limb (_qh, (d)); \ - _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ - _qh += _mask; \ - _r += _mask & (d); \ - if (_r >= (d)) \ - { \ - _r -= (d); \ - _qh++; \ - } \ - \ - (r) = _r; \ - (q) = _qh; \ - } while (0) - -#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ - do { \ - mp_limb_t _q0, _t1, _t0, _mask; \ - gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ - gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ - \ - /* Compute the two most significant limbs of n - q'd */ \ - (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ - gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ - (q)++; \ - \ - /* Conditionally adjust q and the remainders */ \ - _mask = - (mp_limb_t) ((r1) >= _q0); \ - (q) += _mask; \ - gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ - if ((r1) >= (d1)) \ - { \ - if ((r1) > (d1) || (r0) >= (d0)) \ - { \ - (q)++; \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ - } \ - } \ - } while (0) - -/* Swap macros. */ -#define MP_LIMB_T_SWAP(x, y) \ - do { \ - mp_limb_t __mp_limb_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_limb_t_swap__tmp; \ - } while (0) -#define MP_SIZE_T_SWAP(x, y) \ - do { \ - mp_size_t __mp_size_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_size_t_swap__tmp; \ - } while (0) -#define MP_BITCNT_T_SWAP(x,y) \ - do { \ - mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_bitcnt_t_swap__tmp; \ - } while (0) -#define MP_PTR_SWAP(x, y) \ - do { \ - mp_ptr __mp_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_ptr_swap__tmp; \ - } while (0) -#define MP_SRCPTR_SWAP(x, y) \ - do { \ - mp_srcptr __mp_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_srcptr_swap__tmp; \ - } while (0) - -#define MPN_PTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_PTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) -#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_SRCPTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) - -#define MPZ_PTR_SWAP(x, y) \ - do { \ - mpz_ptr __mpz_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_ptr_swap__tmp; \ - } while (0) -#define MPZ_SRCPTR_SWAP(x, y) \ - do { \ - mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_srcptr_swap__tmp; \ - } while (0) - -const int mp_bits_per_limb = GMP_LIMB_BITS; - - -/* Memory allocation and other helper functions. */ -static void -gmp_die (const char *msg) -{ - fprintf (stderr, "%s\n", msg); - abort(); -} - -static void * -gmp_default_alloc (size_t size) -{ - void *p; - - assert (size > 0); - - p = malloc (size); - if (!p) - gmp_die("gmp_default_alloc: Virtual memory exhausted."); - - return p; -} - -static void * -gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) -{ - void * p; - - p = realloc (old, new_size); - - if (!p) - gmp_die("gmp_default_realloc: Virtual memory exhausted."); - - return p; -} - -static void -gmp_default_free (void *p, size_t unused_size) -{ - free (p); -} - -static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; -static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; -static void (*gmp_free_func) (void *, size_t) = gmp_default_free; - -void -mp_get_memory_functions (void *(**alloc_func) (size_t), - void *(**realloc_func) (void *, size_t, size_t), - void (**free_func) (void *, size_t)) -{ - if (alloc_func) - *alloc_func = gmp_allocate_func; - - if (realloc_func) - *realloc_func = gmp_reallocate_func; - - if (free_func) - *free_func = gmp_free_func; -} - -void -mp_set_memory_functions (void *(*alloc_func) (size_t), - void *(*realloc_func) (void *, size_t, size_t), - void (*free_func) (void *, size_t)) -{ - if (!alloc_func) - alloc_func = gmp_default_alloc; - if (!realloc_func) - realloc_func = gmp_default_realloc; - if (!free_func) - free_func = gmp_default_free; - - gmp_allocate_func = alloc_func; - gmp_reallocate_func = realloc_func; - gmp_free_func = free_func; -} - -#define gmp_alloc(size) ((*gmp_allocate_func)((size))) -#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) -#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) - -static mp_ptr -gmp_alloc_limbs (mp_size_t size) -{ - return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); -} - -static mp_ptr -gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) -{ - assert (size > 0); - return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); -} - -static void -gmp_free_limbs (mp_ptr old, mp_size_t size) -{ - gmp_free (old, size * sizeof (mp_limb_t)); -} - - -/* MPN interface */ - -void -mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - mp_size_t i; - for (i = 0; i < n; i++) - d[i] = s[i]; -} - -void -mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - while (--n >= 0) - d[n] = s[n]; -} - -int -mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - while (--n >= 0) - { - if (ap[n] != bp[n]) - return ap[n] > bp[n] ? 1 : -1; - } - return 0; -} - -static int -mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - if (an != bn) - return an < bn ? -1 : 1; - else - return mpn_cmp (ap, bp, an); -} - -static mp_size_t -mpn_normalized_size (mp_srcptr xp, mp_size_t n) -{ - while (n > 0 && xp[n-1] == 0) - --n; - return n; -} - -int -mpn_zero_p(mp_srcptr rp, mp_size_t n) -{ - return mpn_normalized_size (rp, n) == 0; -} - -void -mpn_zero (mp_ptr rp, mp_size_t n) -{ - while (--n >= 0) - rp[n] = 0; -} - -mp_limb_t -mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - i = 0; - do - { - mp_limb_t r = ap[i] + b; - /* Carry out */ - b = (r < b); - rp[i] = r; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b, r; - a = ap[i]; b = bp[i]; - r = a + cy; - cy = (r < cy); - r += b; - cy += (r < b); - rp[i] = r; - } - return cy; -} - -mp_limb_t -mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_add_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - - i = 0; - do - { - mp_limb_t a = ap[i]; - /* Carry out */ - mp_limb_t cy = a < b; - rp[i] = a - b; - b = cy; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b; - a = ap[i]; b = bp[i]; - b += cy; - cy = (b < cy); - cy += (a < b); - rp[i] = a - b; - } - return cy; -} - -mp_limb_t -mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_sub_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl + lpl; - cl += lpl < rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl - lpl; - cl += lpl > rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn >= 1); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); - - /* We first multiply by the low order limb. This result can be - stored, not added, to rp. We also avoid a loop for zeroing this - way. */ - - rp[un] = mpn_mul_1 (rp, up, un, vp[0]); - - /* Now accumulate the product of up[] and the next higher limb from - vp[]. */ - - while (--vn >= 1) - { - rp += 1, vp += 1; - rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); - } - return rp[un]; -} - -void -mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mpn_mul (rp, ap, n, bp, n); -} - -void -mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) -{ - mpn_mul (rp, ap, n, ap, n); -} - -mp_limb_t -mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - up += n; - rp += n; - - tnc = GMP_LIMB_BITS - cnt; - low_limb = *--up; - retval = low_limb >> tnc; - high_limb = (low_limb << cnt); - - while (--n != 0) - { - low_limb = *--up; - *--rp = high_limb | (low_limb >> tnc); - high_limb = (low_limb << cnt); - } - *--rp = high_limb; - - return retval; -} - -mp_limb_t -mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - tnc = GMP_LIMB_BITS - cnt; - high_limb = *up++; - retval = (high_limb << tnc); - low_limb = high_limb >> cnt; - - while (--n != 0) - { - high_limb = *up++; - *rp++ = low_limb | (high_limb << tnc); - low_limb = high_limb >> cnt; - } - *rp = low_limb; - - return retval; -} - -static mp_bitcnt_t -mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, - mp_limb_t ux) -{ - unsigned cnt; - - assert (ux == 0 || ux == GMP_LIMB_MAX); - assert (0 <= i && i <= un ); - - while (limb == 0) - { - i++; - if (i == un) - return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); - limb = ux ^ up[i]; - } - gmp_ctz (cnt, limb); - return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; -} - -mp_bitcnt_t -mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, 0); -} - -mp_bitcnt_t -mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, GMP_LIMB_MAX); -} - -void -mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (--n >= 0) - *rp++ = ~ *up++; -} - -mp_limb_t -mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (*up == 0) - { - *rp = 0; - if (!--n) - return 0; - ++up; ++rp; - } - *rp = - *up; - mpn_com (++rp, ++up, --n); - return 1; -} - - -/* MPN division interface. */ - -/* The 3/2 inverse is defined as - - m = floor( (B^3-1) / (B u1 + u0)) - B -*/ -mp_limb_t -mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) -{ - mp_limb_t r, m; - - { - mp_limb_t p, ql; - unsigned ul, uh, qh; - - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); - /* For notation, let b denote the half-limb base, so that B = b^2. - Split u1 = b uh + ul. */ - ul = u1 & GMP_LLIMB_MASK; - uh = u1 >> (GMP_LIMB_BITS / 2); - - /* Approximation of the high half of quotient. Differs from the 2/1 - inverse of the half limb uh, since we have already subtracted - u0. */ - qh = (u1 ^ GMP_LIMB_MAX) / uh; - - /* Adjust to get a half-limb 3/2 inverse, i.e., we want - - qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u - = floor( (b (~u) + b-1) / u), - - and the remainder - - r = b (~u) + b-1 - qh (b uh + ul) - = b (~u - qh uh) + b-1 - qh ul - - Subtraction of qh ul may underflow, which implies adjustments. - But by normalization, 2 u >= B > qh ul, so we need to adjust by - at most 2. - */ - - r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; - - p = (mp_limb_t) qh * ul; - /* Adjustment steps taken from udiv_qrnnd_c */ - if (r < p) - { - qh--; - r += u1; - if (r >= u1) /* i.e. we didn't get carry when adding to r */ - if (r < p) - { - qh--; - r += u1; - } - } - r -= p; - - /* Low half of the quotient is - - ql = floor ( (b r + b-1) / u1). - - This is a 3/2 division (on half-limbs), for which qh is a - suitable inverse. */ - - p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; - /* Unlike full-limb 3/2, we can add 1 without overflow. For this to - work, it is essential that ql is a full mp_limb_t. */ - ql = (p >> (GMP_LIMB_BITS / 2)) + 1; - - /* By the 3/2 trick, we don't need the high half limb. */ - r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; - - if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) - { - ql--; - r += u1; - } - m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; - if (r >= u1) - { - m++; - r -= u1; - } - } - - /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a - 3/2 inverse. */ - if (u0 > 0) - { - mp_limb_t th, tl; - r = ~r; - r += u0; - if (r < u0) - { - m--; - if (r >= u1) - { - m--; - r -= u1; - } - r -= u1; - } - gmp_umul_ppmm (th, tl, u0, m); - r += th; - if (r < th) - { - m--; - m -= ((r > u1) | ((r == u1) & (tl > u0))); - } - } - - return m; -} - -struct gmp_div_inverse -{ - /* Normalization shift count. */ - unsigned shift; - /* Normalized divisor (d0 unused for mpn_div_qr_1) */ - mp_limb_t d1, d0; - /* Inverse, for 2/1 or 3/2. */ - mp_limb_t di; -}; - -static void -mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) -{ - unsigned shift; - - assert (d > 0); - gmp_clz (shift, d); - inv->shift = shift; - inv->d1 = d << shift; - inv->di = mpn_invert_limb (inv->d1); -} - -static void -mpn_div_qr_2_invert (struct gmp_div_inverse *inv, - mp_limb_t d1, mp_limb_t d0) -{ - unsigned shift; - - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 <<= shift; - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); -} - -static void -mpn_div_qr_invert (struct gmp_div_inverse *inv, - mp_srcptr dp, mp_size_t dn) -{ - assert (dn > 0); - - if (dn == 1) - mpn_div_qr_1_invert (inv, dp[0]); - else if (dn == 2) - mpn_div_qr_2_invert (inv, dp[1], dp[0]); - else - { - unsigned shift; - mp_limb_t d1, d0; - - d1 = dp[dn-1]; - d0 = dp[dn-2]; - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); - } -} - -/* Not matching current public gmp interface, rather corresponding to - the sbpi1_div_* functions. */ -static mp_limb_t -mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - mp_limb_t d, di; - mp_limb_t r; - mp_ptr tp = NULL; - mp_size_t tn = 0; - - if (inv->shift > 0) - { - /* Shift, reusing qp area if possible. In-place shift if qp == np. */ - tp = qp; - if (!tp) - { - tn = nn; - tp = gmp_alloc_limbs (tn); - } - r = mpn_lshift (tp, np, nn, inv->shift); - np = tp; - } - else - r = 0; - - d = inv->d1; - di = inv->di; - while (--nn >= 0) - { - mp_limb_t q; - - gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); - if (qp) - qp[nn] = q; - } - if (tn) - gmp_free_limbs (tp, tn); - - return r >> inv->shift; -} - -static void -mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - unsigned shift; - mp_size_t i; - mp_limb_t d1, d0, di, r1, r0; - - assert (nn >= 2); - shift = inv->shift; - d1 = inv->d1; - d0 = inv->d0; - di = inv->di; - - if (shift > 0) - r1 = mpn_lshift (np, np, nn, shift); - else - r1 = 0; - - r0 = np[nn - 1]; - - i = nn - 2; - do - { - mp_limb_t n0, q; - n0 = np[i]; - gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - if (shift > 0) - { - assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); - r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); - r1 >>= shift; - } - - np[1] = r1; - np[0] = r0; -} - -static void -mpn_div_qr_pi1 (mp_ptr qp, - mp_ptr np, mp_size_t nn, mp_limb_t n1, - mp_srcptr dp, mp_size_t dn, - mp_limb_t dinv) -{ - mp_size_t i; - - mp_limb_t d1, d0; - mp_limb_t cy, cy1; - mp_limb_t q; - - assert (dn > 2); - assert (nn >= dn); - - d1 = dp[dn - 1]; - d0 = dp[dn - 2]; - - assert ((d1 & GMP_LIMB_HIGHBIT) != 0); - /* Iteration variable is the index of the q limb. - * - * We divide - * by - */ - - i = nn - dn; - do - { - mp_limb_t n0 = np[dn-1+i]; - - if (n1 == d1 && n0 == d0) - { - q = GMP_LIMB_MAX; - mpn_submul_1 (np+i, dp, dn, q); - n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ - } - else - { - gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); - - cy = mpn_submul_1 (np + i, dp, dn-2, q); - - cy1 = n0 < cy; - n0 = n0 - cy; - cy = n1 < cy1; - n1 = n1 - cy1; - np[dn-2+i] = n0; - - if (cy != 0) - { - n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); - q--; - } - } - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - np[dn - 1] = n1; -} - -static void -mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - mp_srcptr dp, mp_size_t dn, - const struct gmp_div_inverse *inv) -{ - assert (dn > 0); - assert (nn >= dn); - - if (dn == 1) - np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); - else if (dn == 2) - mpn_div_qr_2_preinv (qp, np, nn, inv); - else - { - mp_limb_t nh; - unsigned shift; - - assert (inv->d1 == dp[dn-1]); - assert (inv->d0 == dp[dn-2]); - assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); - - shift = inv->shift; - if (shift > 0) - nh = mpn_lshift (np, np, nn, shift); - else - nh = 0; - - mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); - - if (shift > 0) - gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); - } -} - -static void -mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) -{ - struct gmp_div_inverse inv; - mp_ptr tp = NULL; - - assert (dn > 0); - assert (nn >= dn); - - mpn_div_qr_invert (&inv, dp, dn); - if (dn > 2 && inv.shift > 0) - { - tp = gmp_alloc_limbs (dn); - gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); - dp = tp; - } - mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); - if (tp) - gmp_free_limbs (tp, dn); -} - - -/* MPN base conversion. */ -static unsigned -mpn_base_power_of_two_p (unsigned b) -{ - switch (b) - { - case 2: return 1; - case 4: return 2; - case 8: return 3; - case 16: return 4; - case 32: return 5; - case 64: return 6; - case 128: return 7; - case 256: return 8; - default: return 0; - } -} - -struct mpn_base_info -{ - /* bb is the largest power of the base which fits in one limb, and - exp is the corresponding exponent. */ - unsigned exp; - mp_limb_t bb; -}; - -static void -mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) -{ - mp_limb_t m; - mp_limb_t p; - unsigned exp; - - m = GMP_LIMB_MAX / b; - for (exp = 1, p = b; p <= m; exp++) - p *= b; - - info->exp = exp; - info->bb = p; -} - -static mp_bitcnt_t -mpn_limb_size_in_base_2 (mp_limb_t u) -{ - unsigned shift; - - assert (u > 0); - gmp_clz (shift, u); - return GMP_LIMB_BITS - shift; -} - -static size_t -mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) -{ - unsigned char mask; - size_t sn, j; - mp_size_t i; - unsigned shift; - - sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) - + bits - 1) / bits; - - mask = (1U << bits) - 1; - - for (i = 0, j = sn, shift = 0; j-- > 0;) - { - unsigned char digit = up[i] >> shift; - - shift += bits; - - if (shift >= GMP_LIMB_BITS && ++i < un) - { - shift -= GMP_LIMB_BITS; - digit |= up[i] << (bits - shift); - } - sp[j] = digit & mask; - } - return sn; -} - -/* We generate digits from the least significant end, and reverse at - the end. */ -static size_t -mpn_limb_get_str (unsigned char *sp, mp_limb_t w, - const struct gmp_div_inverse *binv) -{ - mp_size_t i; - for (i = 0; w > 0; i++) - { - mp_limb_t h, l, r; - - h = w >> (GMP_LIMB_BITS - binv->shift); - l = w << binv->shift; - - gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); - assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); - r >>= binv->shift; - - sp[i] = r; - } - return i; -} - -static size_t -mpn_get_str_other (unsigned char *sp, - int base, const struct mpn_base_info *info, - mp_ptr up, mp_size_t un) -{ - struct gmp_div_inverse binv; - size_t sn; - size_t i; - - mpn_div_qr_1_invert (&binv, base); - - sn = 0; - - if (un > 1) - { - struct gmp_div_inverse bbinv; - mpn_div_qr_1_invert (&bbinv, info->bb); - - do - { - mp_limb_t w; - size_t done; - w = mpn_div_qr_1_preinv (up, up, un, &bbinv); - un -= (up[un-1] == 0); - done = mpn_limb_get_str (sp + sn, w, &binv); - - for (sn += done; done < info->exp; done++) - sp[sn++] = 0; - } - while (un > 1); - } - sn += mpn_limb_get_str (sp + sn, up[0], &binv); - - /* Reverse order */ - for (i = 0; 2*i + 1 < sn; i++) - { - unsigned char t = sp[i]; - sp[i] = sp[sn - i - 1]; - sp[sn - i - 1] = t; - } - - return sn; -} - -size_t -mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) -{ - unsigned bits; - - assert (un > 0); - assert (up[un-1] > 0); - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_get_str_bits (sp, bits, up, un); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_get_str_other (sp, base, &info, up, un); - } -} - -static mp_size_t -mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, - unsigned bits) -{ - mp_size_t rn; - mp_limb_t limb; - unsigned shift; - - for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) - { - limb |= (mp_limb_t) sp[sn] << shift; - shift += bits; - if (shift >= GMP_LIMB_BITS) - { - shift -= GMP_LIMB_BITS; - rp[rn++] = limb; - /* Next line is correct also if shift == 0, - bits == 8, and mp_limb_t == unsigned char. */ - limb = (unsigned int) sp[sn] >> (bits - shift); - } - } - if (limb != 0) - rp[rn++] = limb; - else - rn = mpn_normalized_size (rp, rn); - return rn; -} - -/* Result is usually normalized, except for all-zero input, in which - case a single zero limb is written at *RP, and 1 is returned. */ -static mp_size_t -mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, - mp_limb_t b, const struct mpn_base_info *info) -{ - mp_size_t rn; - mp_limb_t w; - unsigned k; - size_t j; - - assert (sn > 0); - - k = 1 + (sn - 1) % info->exp; - - j = 0; - w = sp[j++]; - while (--k != 0) - w = w * b + sp[j++]; - - rp[0] = w; - - for (rn = 1; j < sn;) - { - mp_limb_t cy; - - w = sp[j++]; - for (k = 1; k < info->exp; k++) - w = w * b + sp[j++]; - - cy = mpn_mul_1 (rp, rp, rn, info->bb); - cy += mpn_add_1 (rp, rp, rn, w); - if (cy > 0) - rp[rn++] = cy; - } - assert (j == sn); - - return rn; -} - -mp_size_t -mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) -{ - unsigned bits; - - if (sn == 0) - return 0; - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_set_str_bits (rp, sp, sn, bits); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_set_str_other (rp, sp, sn, base, &info); - } -} - - -/* MPZ interface */ -void -mpz_init (mpz_t r) -{ - static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; - - r->_mp_alloc = 0; - r->_mp_size = 0; - r->_mp_d = (mp_ptr) &dummy_limb; -} - -/* The utility of this function is a bit limited, since many functions - assigns the result variable using mpz_swap. */ -void -mpz_init2 (mpz_t r, mp_bitcnt_t bits) -{ - mp_size_t rn; - - bits -= (bits != 0); /* Round down, except if 0 */ - rn = 1 + bits / GMP_LIMB_BITS; - - r->_mp_alloc = rn; - r->_mp_size = 0; - r->_mp_d = gmp_alloc_limbs (rn); -} - -void -mpz_clear (mpz_t r) -{ - if (r->_mp_alloc) - gmp_free_limbs (r->_mp_d, r->_mp_alloc); -} - -static mp_ptr -mpz_realloc (mpz_t r, mp_size_t size) -{ - size = GMP_MAX (size, 1); - - if (r->_mp_alloc) - r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); - else - r->_mp_d = gmp_alloc_limbs (size); - r->_mp_alloc = size; - - if (GMP_ABS (r->_mp_size) > size) - r->_mp_size = 0; - - return r->_mp_d; -} - -/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ -#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ - ? mpz_realloc(z,n) \ - : (z)->_mp_d) - -/* MPZ assignment and basic conversions. */ -void -mpz_set_si (mpz_t r, signed long int x) -{ - if (x >= 0) - mpz_set_ui (r, x); - else /* (x < 0) */ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); - mpz_neg (r, r); - } - else - { - r->_mp_size = -1; - MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); - } -} - -void -mpz_set_ui (mpz_t r, unsigned long int x) -{ - if (x > 0) - { - r->_mp_size = 1; - MPZ_REALLOC (r, 1)[0] = x; - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - while (x >>= LOCAL_GMP_LIMB_BITS) - { - ++ r->_mp_size; - MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; - } - } - } - else - r->_mp_size = 0; -} - -void -mpz_set (mpz_t r, const mpz_t x) -{ - /* Allow the NOP r == x */ - if (r != x) - { - mp_size_t n; - mp_ptr rp; - - n = GMP_ABS (x->_mp_size); - rp = MPZ_REALLOC (r, n); - - mpn_copyi (rp, x->_mp_d, n); - r->_mp_size = x->_mp_size; - } -} - -void -mpz_init_set_si (mpz_t r, signed long int x) -{ - mpz_init (r); - mpz_set_si (r, x); -} - -void -mpz_init_set_ui (mpz_t r, unsigned long int x) -{ - mpz_init (r); - mpz_set_ui (r, x); -} - -void -mpz_init_set (mpz_t r, const mpz_t x) -{ - mpz_init (r); - mpz_set (r, x); -} - -int -mpz_fits_slong_p (const mpz_t u) -{ - return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; -} - -static int -mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) -{ - int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; - mp_limb_t ulongrem = 0; - - if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) - ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; - - return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); -} - -int -mpz_fits_ulong_p (const mpz_t u) -{ - mp_size_t us = u->_mp_size; - - return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); -} - -int -mpz_fits_sint_p (const mpz_t u) -{ - return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; -} - -int -mpz_fits_uint_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; -} - -int -mpz_fits_sshort_p (const mpz_t u) -{ - return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; -} - -int -mpz_fits_ushort_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; -} - -long int -mpz_get_si (const mpz_t u) -{ - unsigned long r = mpz_get_ui (u); - unsigned long c = -LONG_MAX - LONG_MIN; - - if (u->_mp_size < 0) - /* This expression is necessary to properly handle -LONG_MIN */ - return -(long) c - (long) ((r - c) & LONG_MAX); - else - return (long) (r & LONG_MAX); -} - -unsigned long int -mpz_get_ui (const mpz_t u) -{ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - unsigned long r = 0; - mp_size_t n = GMP_ABS (u->_mp_size); - n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); - while (--n >= 0) - r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; - return r; - } - - return u->_mp_size == 0 ? 0 : u->_mp_d[0]; -} - -size_t -mpz_size (const mpz_t u) -{ - return GMP_ABS (u->_mp_size); -} - -mp_limb_t -mpz_getlimbn (const mpz_t u, mp_size_t n) -{ - if (n >= 0 && n < GMP_ABS (u->_mp_size)) - return u->_mp_d[n]; - else - return 0; -} - -void -mpz_realloc2 (mpz_t x, mp_bitcnt_t n) -{ - mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); -} - -mp_srcptr -mpz_limbs_read (mpz_srcptr x) -{ - return x->_mp_d; -} - -mp_ptr -mpz_limbs_modify (mpz_t x, mp_size_t n) -{ - assert (n > 0); - return MPZ_REALLOC (x, n); -} - -mp_ptr -mpz_limbs_write (mpz_t x, mp_size_t n) -{ - return mpz_limbs_modify (x, n); -} - -void -mpz_limbs_finish (mpz_t x, mp_size_t xs) -{ - mp_size_t xn; - xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); - x->_mp_size = xs < 0 ? -xn : xn; -} - -static mpz_srcptr -mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - x->_mp_alloc = 0; - x->_mp_d = (mp_ptr) xp; - x->_mp_size = xs; - return x; -} - -mpz_srcptr -mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - mpz_roinit_normal_n (x, xp, xs); - mpz_limbs_finish (x, xs); - return x; -} - - -/* Conversions and comparison to double. */ -void -mpz_set_d (mpz_t r, double x) -{ - int sign; - mp_ptr rp; - mp_size_t rn, i; - double B; - double Bi; - mp_limb_t f; - - /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is - zero or infinity. */ - if (x != x || x == x * 0.5) - { - r->_mp_size = 0; - return; - } - - sign = x < 0.0 ; - if (sign) - x = - x; - - if (x < 1.0) - { - r->_mp_size = 0; - return; - } - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - for (rn = 1; x >= B; rn++) - x *= Bi; - - rp = MPZ_REALLOC (r, rn); - - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - i = rn-1; - rp[i] = f; - while (--i >= 0) - { - x = B * x; - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - rp[i] = f; - } - - r->_mp_size = sign ? - rn : rn; -} - -void -mpz_init_set_d (mpz_t r, double x) -{ - mpz_init (r); - mpz_set_d (r, x); -} - -double -mpz_get_d (const mpz_t u) -{ - int m; - mp_limb_t l; - mp_size_t un; - double x; - double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - - un = GMP_ABS (u->_mp_size); - - if (un == 0) - return 0.0; - - l = u->_mp_d[--un]; - gmp_clz (m, l); - m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - - for (x = l; --un >= 0;) - { - x = B*x; - if (m > 0) { - l = u->_mp_d[un]; - m -= GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - x += l; - } - } - - if (u->_mp_size < 0) - x = -x; - - return x; -} - -int -mpz_cmpabs_d (const mpz_t x, double d) -{ - mp_size_t xn; - double B, Bi; - mp_size_t i; - - xn = x->_mp_size; - d = GMP_ABS (d); - - if (xn != 0) - { - xn = GMP_ABS (xn); - - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - - /* Scale d so it can be compared with the top limb. */ - for (i = 1; i < xn; i++) - d *= Bi; - - if (d >= B) - return -1; - - /* Compare floor(d) to top limb, subtract and cancel when equal. */ - for (i = xn; i-- > 0;) - { - mp_limb_t f, xl; - - f = (mp_limb_t) d; - xl = x->_mp_d[i]; - if (xl > f) - return 1; - else if (xl < f) - return -1; - d = B * (d - f); - } - } - return - (d > 0.0); -} - -int -mpz_cmp_d (const mpz_t x, double d) -{ - if (x->_mp_size < 0) - { - if (d >= 0.0) - return -1; - else - return -mpz_cmpabs_d (x, d); - } - else - { - if (d < 0.0) - return 1; - else - return mpz_cmpabs_d (x, d); - } -} - - -/* MPZ comparisons and the like. */ -int -mpz_sgn (const mpz_t u) -{ - return GMP_CMP (u->_mp_size, 0); -} - -int -mpz_cmp_si (const mpz_t u, long v) -{ - mp_size_t usize = u->_mp_size; - - if (v >= 0) - return mpz_cmp_ui (u, v); - else if (usize >= 0) - return 1; - else - return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); -} - -int -mpz_cmp_ui (const mpz_t u, unsigned long v) -{ - mp_size_t usize = u->_mp_size; - - if (usize < 0) - return -1; - else - return mpz_cmpabs_ui (u, v); -} - -int -mpz_cmp (const mpz_t a, const mpz_t b) -{ - mp_size_t asize = a->_mp_size; - mp_size_t bsize = b->_mp_size; - - if (asize != bsize) - return (asize < bsize) ? -1 : 1; - else if (asize >= 0) - return mpn_cmp (a->_mp_d, b->_mp_d, asize); - else - return mpn_cmp (b->_mp_d, a->_mp_d, -asize); -} - -int -mpz_cmpabs_ui (const mpz_t u, unsigned long v) -{ - mp_size_t un = GMP_ABS (u->_mp_size); - - if (! mpn_absfits_ulong_p (u->_mp_d, un)) - return 1; - else - { - unsigned long uu = mpz_get_ui (u); - return GMP_CMP(uu, v); - } -} - -int -mpz_cmpabs (const mpz_t u, const mpz_t v) -{ - return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), - v->_mp_d, GMP_ABS (v->_mp_size)); -} - -void -mpz_abs (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = GMP_ABS (r->_mp_size); -} - -void -mpz_neg (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = -r->_mp_size; -} - -void -mpz_swap (mpz_t u, mpz_t v) -{ - MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); - MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); -} - - -/* MPZ addition and subtraction */ - - -void -mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_t bb; - mpz_init_set_ui (bb, b); - mpz_add (r, a, bb); - mpz_clear (bb); -} - -void -mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_ui_sub (r, b, a); - mpz_neg (r, r); -} - -void -mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) -{ - mpz_neg (r, b); - mpz_add_ui (r, r, a); -} - -static mp_size_t -mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - mp_ptr rp; - mp_limb_t cy; - - if (an < bn) - { - MPZ_SRCPTR_SWAP (a, b); - MP_SIZE_T_SWAP (an, bn); - } - - rp = MPZ_REALLOC (r, an + 1); - cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); - - rp[an] = cy; - - return an + cy; -} - -static mp_size_t -mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - int cmp; - mp_ptr rp; - - cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); - if (cmp > 0) - { - rp = MPZ_REALLOC (r, an); - gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); - return mpn_normalized_size (rp, an); - } - else if (cmp < 0) - { - rp = MPZ_REALLOC (r, bn); - gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); - return -mpn_normalized_size (rp, bn); - } - else - return 0; -} - -void -mpz_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_add (r, a, b); - else - rn = mpz_abs_sub (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - -void -mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_sub (r, a, b); - else - rn = mpz_abs_add (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - - -/* MPZ multiplication */ -void -mpz_mul_si (mpz_t r, const mpz_t u, long int v) -{ - if (v < 0) - { - mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); - mpz_neg (r, r); - } - else - mpz_mul_ui (r, u, v); -} - -void -mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t vv; - mpz_init_set_ui (vv, v); - mpz_mul (r, u, vv); - mpz_clear (vv); - return; -} - -void -mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) -{ - int sign; - mp_size_t un, vn, rn; - mpz_t t; - mp_ptr tp; - - un = u->_mp_size; - vn = v->_mp_size; - - if (un == 0 || vn == 0) - { - r->_mp_size = 0; - return; - } - - sign = (un ^ vn) < 0; - - un = GMP_ABS (un); - vn = GMP_ABS (vn); - - mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); - - tp = t->_mp_d; - if (un >= vn) - mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); - else - mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); - - rn = un + vn; - rn -= tp[rn-1] == 0; - - t->_mp_size = sign ? - rn : rn; - mpz_swap (r, t); - mpz_clear (t); -} - -void -mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) -{ - mp_size_t un, rn; - mp_size_t limbs; - unsigned shift; - mp_ptr rp; - - un = GMP_ABS (u->_mp_size); - if (un == 0) - { - r->_mp_size = 0; - return; - } - - limbs = bits / GMP_LIMB_BITS; - shift = bits % GMP_LIMB_BITS; - - rn = un + limbs + (shift > 0); - rp = MPZ_REALLOC (r, rn); - if (shift > 0) - { - mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); - rp[rn-1] = cy; - rn -= (cy == 0); - } - else - mpn_copyd (rp + limbs, u->_mp_d, un); - - mpn_zero (rp, limbs); - - r->_mp_size = (u->_mp_size < 0) ? - rn : rn; -} - -void -mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_sub (r, r, t); - mpz_clear (t); -} - -void -mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_sub (r, r, t); - mpz_clear (t); -} - - -/* MPZ division */ -enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; - -/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ -static int -mpz_div_qr (mpz_t q, mpz_t r, - const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) -{ - mp_size_t ns, ds, nn, dn, qs; - ns = n->_mp_size; - ds = d->_mp_size; - - if (ds == 0) - gmp_die("mpz_div_qr: Divide by zero."); - - if (ns == 0) - { - if (q) - q->_mp_size = 0; - if (r) - r->_mp_size = 0; - return 0; - } - - nn = GMP_ABS (ns); - dn = GMP_ABS (ds); - - qs = ds ^ ns; - - if (nn < dn) - { - if (mode == GMP_DIV_CEIL && qs >= 0) - { - /* q = 1, r = n - d */ - if (r) - mpz_sub (r, n, d); - if (q) - mpz_set_ui (q, 1); - } - else if (mode == GMP_DIV_FLOOR && qs < 0) - { - /* q = -1, r = n + d */ - if (r) - mpz_add (r, n, d); - if (q) - mpz_set_si (q, -1); - } - else - { - /* q = 0, r = d */ - if (r) - mpz_set (r, n); - if (q) - q->_mp_size = 0; - } - return 1; - } - else - { - mp_ptr np, qp; - mp_size_t qn, rn; - mpz_t tq, tr; - - mpz_init_set (tr, n); - np = tr->_mp_d; - - qn = nn - dn + 1; - - if (q) - { - mpz_init2 (tq, qn * GMP_LIMB_BITS); - qp = tq->_mp_d; - } - else - qp = NULL; - - mpn_div_qr (qp, np, nn, d->_mp_d, dn); - - if (qp) - { - qn -= (qp[qn-1] == 0); - - tq->_mp_size = qs < 0 ? -qn : qn; - } - rn = mpn_normalized_size (np, dn); - tr->_mp_size = ns < 0 ? - rn : rn; - - if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) - { - if (q) - mpz_sub_ui (tq, tq, 1); - if (r) - mpz_add (tr, tr, d); - } - else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) - { - if (q) - mpz_add_ui (tq, tq, 1); - if (r) - mpz_sub (tr, tr, d); - } - - if (q) - { - mpz_swap (tq, q); - mpz_clear (tq); - } - if (r) - mpz_swap (tr, r); - - mpz_clear (tr); - - return rn != 0; - } -} - -void -mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); -} - -static void -mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t un, qn; - mp_size_t limb_cnt; - mp_ptr qp; - int adjust; - - un = u->_mp_size; - if (un == 0) - { - q->_mp_size = 0; - return; - } - limb_cnt = bit_index / GMP_LIMB_BITS; - qn = GMP_ABS (un) - limb_cnt; - bit_index %= GMP_LIMB_BITS; - - if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ - /* Note: Below, the final indexing at limb_cnt is valid because at - that point we have qn > 0. */ - adjust = (qn <= 0 - || !mpn_zero_p (u->_mp_d, limb_cnt) - || (u->_mp_d[limb_cnt] - & (((mp_limb_t) 1 << bit_index) - 1))); - else - adjust = 0; - - if (qn <= 0) - qn = 0; - else - { - qp = MPZ_REALLOC (q, qn); - - if (bit_index != 0) - { - mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); - qn -= qp[qn - 1] == 0; - } - else - { - mpn_copyi (qp, u->_mp_d + limb_cnt, qn); - } - } - - q->_mp_size = qn; - - if (adjust) - mpz_add_ui (q, q, 1); - if (un < 0) - mpz_neg (q, q); -} - -static void -mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t us, un, rn; - mp_ptr rp; - mp_limb_t mask; - - us = u->_mp_size; - if (us == 0 || bit_index == 0) - { - r->_mp_size = 0; - return; - } - rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - assert (rn > 0); - - rp = MPZ_REALLOC (r, rn); - un = GMP_ABS (us); - - mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); - - if (rn > un) - { - /* Quotient (with truncation) is zero, and remainder is - non-zero */ - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* Have to negate and sign extend. */ - mp_size_t i; - - gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); - for (i = un; i < rn - 1; i++) - rp[i] = GMP_LIMB_MAX; - - rp[rn-1] = mask; - us = -us; - } - else - { - /* Just copy */ - if (r != u) - mpn_copyi (rp, u->_mp_d, un); - - rn = un; - } - } - else - { - if (r != u) - mpn_copyi (rp, u->_mp_d, rn - 1); - - rp[rn-1] = u->_mp_d[rn-1] & mask; - - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* If r != 0, compute 2^{bit_count} - r. */ - mpn_neg (rp, rp, rn); - - rp[rn-1] &= mask; - - /* us is not used for anything else, so we can modify it - here to indicate flipped sign. */ - us = -us; - } - } - rn = mpn_normalized_size (rp, rn); - r->_mp_size = us < 0 ? -rn : rn; -} - -void -mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) -{ - gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_p (const mpz_t n, const mpz_t d) -{ - return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - -int -mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) -{ - mpz_t t; - int res; - - /* a == b (mod 0) iff a == b */ - if (mpz_sgn (m) == 0) - return (mpz_cmp (a, b) == 0); - - mpz_init (t); - mpz_sub (t, a, b); - res = mpz_divisible_p (t, m); - mpz_clear (t); - - return res; -} - -static unsigned long -mpz_div_qr_ui (mpz_t q, mpz_t r, - const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) -{ - unsigned long ret; - mpz_t rr, dd; - - mpz_init (rr); - mpz_init_set_ui (dd, d); - mpz_div_qr (q, rr, n, dd, mode); - mpz_clear (dd); - ret = mpz_get_ui (rr); - - if (r) - mpz_swap (r, rr); - mpz_clear (rr); - - return ret; -} - -unsigned long -mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); -} -unsigned long -mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} -unsigned long -mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_ui_p (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - - -/* GCD */ -static mp_limb_t -mpn_gcd_11 (mp_limb_t u, mp_limb_t v) -{ - unsigned shift; - - assert ( (u | v) > 0); - - if (u == 0) - return v; - else if (v == 0) - return u; - - gmp_ctz (shift, u | v); - - u >>= shift; - v >>= shift; - - if ( (u & 1) == 0) - MP_LIMB_T_SWAP (u, v); - - while ( (v & 1) == 0) - v >>= 1; - - while (u != v) - { - if (u > v) - { - u -= v; - do - u >>= 1; - while ( (u & 1) == 0); - } - else - { - v -= u; - do - v >>= 1; - while ( (v & 1) == 0); - } - } - return u << shift; -} - -mp_size_t -mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn > 0); - assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); - assert (vp[vn-1] > 0); - assert ((up[0] | vp[0]) & 1); - - if (un > vn) - mpn_div_qr (NULL, up, un, vp, vn); - - un = mpn_normalized_size (up, vn); - if (un == 0) - { - mpn_copyi (rp, vp, vn); - return vn; - } - - if (!(vp[0] & 1)) - MPN_PTR_SWAP (up, un, vp, vn); - - while (un > 1 || vn > 1) - { - int shift; - assert (vp[0] & 1); - - while (up[0] == 0) - { - up++; - un--; - } - gmp_ctz (shift, up[0]); - if (shift > 0) - { - gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); - un -= (up[un-1] == 0); - } - - if (un < vn) - MPN_PTR_SWAP (up, un, vp, vn); - else if (un == vn) - { - int c = mpn_cmp (up, vp, un); - if (c == 0) - { - mpn_copyi (rp, up, un); - return un; - } - else if (c < 0) - MP_PTR_SWAP (up, vp); - } - - gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); - un = mpn_normalized_size (up, un); - } - rp[0] = mpn_gcd_11 (up[0], vp[0]); - return 1; -} - -unsigned long -mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) -{ - mpz_t t; - mpz_init_set_ui(t, v); - mpz_gcd (t, u, t); - if (v > 0) - v = mpz_get_ui (t); - - if (g) - mpz_swap (t, g); - - mpz_clear (t); - - return v; -} - -static mp_bitcnt_t -mpz_make_odd (mpz_t r) -{ - mp_bitcnt_t shift; - - assert (r->_mp_size > 0); - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - shift = mpn_scan1 (r->_mp_d, 0); - mpz_tdiv_q_2exp (r, r, shift); - - return shift; -} - -void -mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv; - mp_bitcnt_t uz, vz, gz; - - if (u->_mp_size == 0) - { - mpz_abs (g, v); - return; - } - if (v->_mp_size == 0) - { - mpz_abs (g, u); - return; - } - - mpz_init (tu); - mpz_init (tv); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - if (tu->_mp_size < tv->_mp_size) - mpz_swap (tu, tv); - - tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); - mpz_mul_2exp (g, tu, gz); - - mpz_clear (tu); - mpz_clear (tv); -} - -void -mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv, s0, s1, t0, t1; - mp_bitcnt_t uz, vz, gz; - mp_bitcnt_t power; - int cmp; - - if (u->_mp_size == 0) - { - /* g = 0 u + sgn(v) v */ - signed long sign = mpz_sgn (v); - mpz_abs (g, v); - if (s) - s->_mp_size = 0; - if (t) - mpz_set_si (t, sign); - return; - } - - if (v->_mp_size == 0) - { - /* g = sgn(u) u + 0 v */ - signed long sign = mpz_sgn (u); - mpz_abs (g, u); - if (s) - mpz_set_si (s, sign); - if (t) - t->_mp_size = 0; - return; - } - - mpz_init (tu); - mpz_init (tv); - mpz_init (s0); - mpz_init (s1); - mpz_init (t0); - mpz_init (t1); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - uz -= gz; - vz -= gz; - - /* Cofactors corresponding to odd gcd. gz handled later. */ - if (tu->_mp_size < tv->_mp_size) - { - mpz_swap (tu, tv); - MPZ_SRCPTR_SWAP (u, v); - MPZ_PTR_SWAP (s, t); - MP_BITCNT_T_SWAP (uz, vz); - } - - /* Maintain - * - * u = t0 tu + t1 tv - * v = s0 tu + s1 tv - * - * where u and v denote the inputs with common factors of two - * eliminated, and det (s0, t0; s1, t1) = 2^p. Then - * - * 2^p tu = s1 u - t1 v - * 2^p tv = -s0 u + t0 v - */ - - /* After initial division, tu = q tv + tu', we have - * - * u = 2^uz (tu' + q tv) - * v = 2^vz tv - * - * or - * - * t0 = 2^uz, t1 = 2^uz q - * s0 = 0, s1 = 2^vz - */ - - mpz_tdiv_qr (t1, tu, tu, tv); - mpz_mul_2exp (t1, t1, uz); - - mpz_setbit (s1, vz); - power = uz + vz; - - if (tu->_mp_size > 0) - { - mp_bitcnt_t shift; - shift = mpz_make_odd (tu); - mpz_setbit (t0, uz + shift); - power += shift; - - for (;;) - { - int c; - c = mpz_cmp (tu, tv); - if (c == 0) - break; - - if (c < 0) - { - /* tv = tv' + tu - * - * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' - * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ - - mpz_sub (tv, tv, tu); - mpz_add (t0, t0, t1); - mpz_add (s0, s0, s1); - - shift = mpz_make_odd (tv); - mpz_mul_2exp (t1, t1, shift); - mpz_mul_2exp (s1, s1, shift); - } - else - { - mpz_sub (tu, tu, tv); - mpz_add (t1, t0, t1); - mpz_add (s1, s0, s1); - - shift = mpz_make_odd (tu); - mpz_mul_2exp (t0, t0, shift); - mpz_mul_2exp (s0, s0, shift); - } - power += shift; - } - } - else - mpz_setbit (t0, uz); - - /* Now tv = odd part of gcd, and -s0 and t0 are corresponding - cofactors. */ - - mpz_mul_2exp (tv, tv, gz); - mpz_neg (s0, s0); - - /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To - adjust cofactors, we need u / g and v / g */ - - mpz_divexact (s1, v, tv); - mpz_abs (s1, s1); - mpz_divexact (t1, u, tv); - mpz_abs (t1, t1); - - while (power-- > 0) - { - /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ - if (mpz_odd_p (s0) || mpz_odd_p (t0)) - { - mpz_sub (s0, s0, s1); - mpz_add (t0, t0, t1); - } - assert (mpz_even_p (t0) && mpz_even_p (s0)); - mpz_tdiv_q_2exp (s0, s0, 1); - mpz_tdiv_q_2exp (t0, t0, 1); - } - - /* Choose small cofactors (they should generally satify - - |s| < |u| / 2g and |t| < |v| / 2g, - - with some documented exceptions). Always choose the smallest s, - if there are two choices for s with same absolute value, choose - the one with smallest corresponding t (this asymmetric condition - is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ - mpz_add (s1, s0, s1); - mpz_sub (t1, t0, t1); - cmp = mpz_cmpabs (s0, s1); - if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) - { - mpz_swap (s0, s1); - mpz_swap (t0, t1); - } - if (u->_mp_size < 0) - mpz_neg (s0, s0); - if (v->_mp_size < 0) - mpz_neg (t0, t0); - - mpz_swap (g, tv); - if (s) - mpz_swap (s, s0); - if (t) - mpz_swap (t, t0); - - mpz_clear (tu); - mpz_clear (tv); - mpz_clear (s0); - mpz_clear (s1); - mpz_clear (t0); - mpz_clear (t1); -} - -void -mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t g; - - if (u->_mp_size == 0 || v->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - mpz_init (g); - - mpz_gcd (g, u, v); - mpz_divexact (g, u, g); - mpz_mul (r, g, v); - - mpz_clear (g); - mpz_abs (r, r); -} - -void -mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) -{ - if (v == 0 || u->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - v /= mpz_gcd_ui (NULL, u, v); - mpz_mul_ui (r, u, v); - - mpz_abs (r, r); -} - -int -mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) -{ - mpz_t g, tr; - int invertible; - - if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) - return 0; - - mpz_init (g); - mpz_init (tr); - - mpz_gcdext (g, tr, NULL, u, m); - invertible = (mpz_cmp_ui (g, 1) == 0); - - if (invertible) - { - if (tr->_mp_size < 0) - { - if (m->_mp_size >= 0) - mpz_add (tr, tr, m); - else - mpz_sub (tr, tr, m); - } - mpz_swap (r, tr); - } - - mpz_clear (g); - mpz_clear (tr); - return invertible; -} - - -/* Higher level operations (sqrt, pow and root) */ - -void -mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) -{ - unsigned long bit; - mpz_t tr; - mpz_init_set_ui (tr, 1); - - bit = GMP_ULONG_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (e & bit) - mpz_mul (tr, tr, b); - bit >>= 1; - } - while (bit > 0); - - mpz_swap (r, tr); - mpz_clear (tr); -} - -void -mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) -{ - mpz_t b; - - mpz_init_set_ui (b, blimb); - mpz_pow_ui (r, b, e); - mpz_clear (b); -} - -void -mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) -{ - mpz_t tr; - mpz_t base; - mp_size_t en, mn; - mp_srcptr mp; - struct gmp_div_inverse minv; - unsigned shift; - mp_ptr tp = NULL; - - en = GMP_ABS (e->_mp_size); - mn = GMP_ABS (m->_mp_size); - if (mn == 0) - gmp_die ("mpz_powm: Zero modulo."); - - if (en == 0) - { - mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); - return; - } - - mp = m->_mp_d; - mpn_div_qr_invert (&minv, mp, mn); - shift = minv.shift; - - if (shift > 0) - { - /* To avoid shifts, we do all our reductions, except the final - one, using a *normalized* m. */ - minv.shift = 0; - - tp = gmp_alloc_limbs (mn); - gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); - mp = tp; - } - - mpz_init (base); - - if (e->_mp_size < 0) - { - if (!mpz_invert (base, b, m)) - gmp_die ("mpz_powm: Negative exponent and non-invertible base."); - } - else - { - mp_size_t bn; - mpz_abs (base, b); - - bn = base->_mp_size; - if (bn >= mn) - { - mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); - bn = mn; - } - - /* We have reduced the absolute value. Now take care of the - sign. Note that we get zero represented non-canonically as - m. */ - if (b->_mp_size < 0) - { - mp_ptr bp = MPZ_REALLOC (base, mn); - gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); - bn = mn; - } - base->_mp_size = mpn_normalized_size (base->_mp_d, bn); - } - mpz_init_set_ui (tr, 1); - - while (--en >= 0) - { - mp_limb_t w = e->_mp_d[en]; - mp_limb_t bit; - - bit = GMP_LIMB_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (w & bit) - mpz_mul (tr, tr, base); - if (tr->_mp_size > mn) - { - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - bit >>= 1; - } - while (bit > 0); - } - - /* Final reduction */ - if (tr->_mp_size >= mn) - { - minv.shift = shift; - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - if (tp) - gmp_free_limbs (tp, mn); - - mpz_swap (r, tr); - mpz_clear (tr); - mpz_clear (base); -} - -void -mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) -{ - mpz_t e; - - mpz_init_set_ui (e, elimb); - mpz_powm (r, b, e, m); - mpz_clear (e); -} - -/* x=trunc(y^(1/z)), r=y-x^z */ -void -mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) -{ - int sgn; - mp_bitcnt_t bc; - mpz_t t, u; - - sgn = y->_mp_size < 0; - if ((~z & sgn) != 0) - gmp_die ("mpz_rootrem: Negative argument, with even root."); - if (z == 0) - gmp_die ("mpz_rootrem: Zeroth root."); - - if (mpz_cmpabs_ui (y, 1) <= 0) { - if (x) - mpz_set (x, y); - if (r) - r->_mp_size = 0; - return; - } - - mpz_init (u); - mpz_init (t); - bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; - mpz_setbit (t, bc); - - if (z == 2) /* simplify sqrt loop: z-1 == 1 */ - do { - mpz_swap (u, t); /* u = x */ - mpz_tdiv_q (t, y, u); /* t = y/x */ - mpz_add (t, t, u); /* t = y/x + x */ - mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - else /* z != 2 */ { - mpz_t v; - - mpz_init (v); - if (sgn) - mpz_neg (t, t); - - do { - mpz_swap (u, t); /* u = x */ - mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ - mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ - mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ - mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ - mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - - mpz_clear (v); - } - - if (r) { - mpz_pow_ui (t, u, z); - mpz_sub (r, y, t); - } - if (x) - mpz_swap (x, u); - mpz_clear (u); - mpz_clear (t); -} - -int -mpz_root (mpz_t x, const mpz_t y, unsigned long z) -{ - int res; - mpz_t r; - - mpz_init (r); - mpz_rootrem (x, r, y, z); - res = r->_mp_size == 0; - mpz_clear (r); - - return res; -} - -/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ -void -mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) -{ - mpz_rootrem (s, r, u, 2); -} - -void -mpz_sqrt (mpz_t s, const mpz_t u) -{ - mpz_rootrem (s, NULL, u, 2); -} - -int -mpz_perfect_square_p (const mpz_t u) -{ - if (u->_mp_size <= 0) - return (u->_mp_size == 0); - else - return mpz_root (NULL, u, 2); -} - -int -mpn_perfect_square_p (mp_srcptr p, mp_size_t n) -{ - mpz_t t; - - assert (n > 0); - assert (p [n-1] != 0); - return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); -} - -mp_size_t -mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) -{ - mpz_t s, r, u; - mp_size_t res; - - assert (n > 0); - assert (p [n-1] != 0); - - mpz_init (r); - mpz_init (s); - mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); - - assert (s->_mp_size == (n+1)/2); - mpn_copyd (sp, s->_mp_d, s->_mp_size); - mpz_clear (s); - res = r->_mp_size; - if (rp) - mpn_copyd (rp, r->_mp_d, res); - mpz_clear (r); - return res; -} - -/* Combinatorics */ - -void -mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) -{ - mpz_set_ui (x, n + (n == 0)); - if (m + 1 < 2) return; - while (n > m + 1) - mpz_mul_ui (x, x, n -= m); -} - -void -mpz_2fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 2); -} - -void -mpz_fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 1); -} - -void -mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) -{ - mpz_t t; - - mpz_set_ui (r, k <= n); - - if (k > (n >> 1)) - k = (k <= n) ? n - k : 0; - - mpz_init (t); - mpz_fac_ui (t, k); - - for (; k > 0; --k) - mpz_mul_ui (r, r, n--); - - mpz_divexact (r, r, t); - mpz_clear (t); -} - - -/* Primality testing */ - -/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ -/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ -static int -gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) -{ - int c, bit = 0; - - assert (b & 1); - assert (a != 0); - /* assert (mpn_gcd_11 (a, b) == 1); */ - - /* Below, we represent a and b shifted right so that the least - significant one bit is implicit. */ - b >>= 1; - - gmp_ctz(c, a); - a >>= 1; - - for (;;) - { - a >>= c; - /* (2/b) = -1 if b = 3 or 5 mod 8 */ - bit ^= c & (b ^ (b >> 1)); - if (a < b) - { - if (a == 0) - return bit & 1 ? -1 : 1; - bit ^= a & b; - a = b - a; - b -= a; - } - else - { - a -= b; - assert (a != 0); - } - - gmp_ctz(c, a); - ++c; - } -} - -static void -gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) -{ - mpz_mod (Qk, Qk, n); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - mpz_mul (V, V, V); - mpz_submul_ui (V, Qk, 2); - mpz_tdiv_r (V, V, n); - /* Q^{2k} = (Q^k)^2 */ - mpz_mul (Qk, Qk, Qk); -} - -/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ -/* with P=1, Q=Q; k = (n>>b0)|1. */ -/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ -/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ -static int -gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, - mp_bitcnt_t b0, const mpz_t n) -{ - mp_bitcnt_t bs; - mpz_t U; - int res; - - assert (b0 > 0); - assert (Q <= - (LONG_MIN / 2)); - assert (Q >= - (LONG_MAX / 2)); - assert (mpz_cmp_ui (n, 4) > 0); - assert (mpz_odd_p (n)); - - mpz_init_set_ui (U, 1); /* U1 = 1 */ - mpz_set_ui (V, 1); /* V1 = 1 */ - mpz_set_si (Qk, Q); - - for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) - { - /* U_{2k} <- U_k * V_k */ - mpz_mul (U, U, V); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - /* A step k->k+1 is performed if the bit in $n$ is 1 */ - /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ - /* should be 1 in $n+1$ (bs == b0) */ - if (b0 == bs || mpz_tstbit (n, bs)) - { - /* Q^{k+1} <- Q^k * Q */ - mpz_mul_si (Qk, Qk, Q); - /* U_{k+1} <- (U_k + V_k) / 2 */ - mpz_swap (U, V); /* Keep in V the old value of U_k */ - mpz_add (U, U, V); - /* We have to compute U/2, so we need an even value, */ - /* equivalent (mod n) */ - if (mpz_odd_p (U)) - mpz_add (U, U, n); - mpz_tdiv_q_2exp (U, U, 1); - /* V_{k+1} <-(D*U_k + V_k) / 2 = - U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ - mpz_mul_si (V, V, -2*Q); - mpz_add (V, U, V); - mpz_tdiv_r (V, V, n); - } - mpz_tdiv_r (U, U, n); - } - - res = U->_mp_size == 0; - mpz_clear (U); - return res; -} - -/* Performs strong Lucas' test on x, with parameters suggested */ -/* for the BPSW test. Qk is only passed to recycle a variable. */ -/* Requires GCD (x,6) = 1.*/ -static int -gmp_stronglucas (const mpz_t x, mpz_t Qk) -{ - mp_bitcnt_t b0; - mpz_t V, n; - mp_limb_t maxD, D; /* The absolute value is stored. */ - long Q; - mp_limb_t tl; - - /* Test on the absolute value. */ - mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); - - assert (mpz_odd_p (n)); - /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ - if (mpz_root (Qk, n, 2)) - return 0; /* A square is composite. */ - - /* Check Ds up to square root (in case, n is prime) - or avoid overflows */ - maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; - - D = 3; - /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ - /* For those Ds we have (D/n) = (n/|D|) */ - do - { - if (D >= maxD) - return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ - D += 2; - tl = mpz_tdiv_ui (n, D); - if (tl == 0) - return 0; - } - while (gmp_jacobi_coprime (tl, D) == 1); - - mpz_init (V); - - /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ - b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); - /* b0 = mpz_scan0 (n, 0); */ - - /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ - Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); - - if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ - while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ - /* V <- V ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - mpz_clear (V); - return (b0 != 0); -} - -static int -gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, - const mpz_t q, mp_bitcnt_t k) -{ - assert (k > 0); - - /* Caller must initialize y to the base. */ - mpz_powm (y, y, q, n); - - if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) - return 1; - - while (--k > 0) - { - mpz_powm_ui (y, y, 2, n); - if (mpz_cmp (y, nm1) == 0) - return 1; - } - return 0; -} - -/* This product is 0xc0cfd797, and fits in 32 bits. */ -#define GMP_PRIME_PRODUCT \ - (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) - -/* Bit (p+1)/2 is set, for each odd prime <= 61 */ -#define GMP_PRIME_MASK 0xc96996dcUL - -int -mpz_probab_prime_p (const mpz_t n, int reps) -{ - mpz_t nm1; - mpz_t q; - mpz_t y; - mp_bitcnt_t k; - int is_prime; - int j; - - /* Note that we use the absolute value of n only, for compatibility - with the real GMP. */ - if (mpz_even_p (n)) - return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; - - /* Above test excludes n == 0 */ - assert (n->_mp_size != 0); - - if (mpz_cmpabs_ui (n, 64) < 0) - return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; - - if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) - return 0; - - /* All prime factors are >= 31. */ - if (mpz_cmpabs_ui (n, 31*31) < 0) - return 2; - - mpz_init (nm1); - mpz_init (q); - - /* Find q and k, where q is odd and n = 1 + 2**k * q. */ - mpz_abs (nm1, n); - nm1->_mp_d[0] -= 1; - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - k = mpn_scan1 (nm1->_mp_d, 0); - mpz_tdiv_q_2exp (q, nm1, k); - - /* BPSW test */ - mpz_init_set_ui (y, 2); - is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); - reps -= 24; /* skip the first 24 repetitions */ - - /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = - j^2 + j + 41 using Euler's polynomial. We potentially stop early, - if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > - 30 (a[30] == 971 > 31*31 == 961). */ - - for (j = 0; is_prime & (j < reps); j++) - { - mpz_set_ui (y, (unsigned long) j*j+j+41); - if (mpz_cmp (y, nm1) >= 0) - { - /* Don't try any further bases. This "early" break does not affect - the result for any reasonable reps value (<=5000 was tested) */ - assert (j >= 30); - break; - } - is_prime = gmp_millerrabin (n, nm1, y, q, k); - } - mpz_clear (nm1); - mpz_clear (q); - mpz_clear (y); - - return is_prime; -} - - -/* Logical operations and bit manipulation. */ - -/* Numbers are treated as if represented in two's complement (and - infinitely sign extended). For a negative values we get the two's - complement from -x = ~x + 1, where ~ is bitwise complement. - Negation transforms - - xxxx10...0 - - into - - yyyy10...0 - - where yyyy is the bitwise complement of xxxx. So least significant - bits, up to and including the first one bit, are unchanged, and - the more significant bits are all complemented. - - To change a bit from zero to one in a negative number, subtract the - corresponding power of two from the absolute value. This can never - underflow. To change a bit from one to zero, add the corresponding - power of two, and this might overflow. E.g., if x = -001111, the - two's complement is 110001. Clearing the least significant bit, we - get two's complement 110000, and -010000. */ - -int -mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t limb_index; - unsigned shift; - mp_size_t ds; - mp_size_t dn; - mp_limb_t w; - int bit; - - ds = d->_mp_size; - dn = GMP_ABS (ds); - limb_index = bit_index / GMP_LIMB_BITS; - if (limb_index >= dn) - return ds < 0; - - shift = bit_index % GMP_LIMB_BITS; - w = d->_mp_d[limb_index]; - bit = (w >> shift) & 1; - - if (ds < 0) - { - /* d < 0. Check if any of the bits below is set: If so, our bit - must be complemented. */ - if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) - return bit ^ 1; - while (--limb_index >= 0) - if (d->_mp_d[limb_index] > 0) - return bit ^ 1; - } - return bit; -} - -static void -mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_limb_t bit; - mp_ptr dp; - - dn = GMP_ABS (d->_mp_size); - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - if (limb_index >= dn) - { - mp_size_t i; - /* The bit should be set outside of the end of the number. - We have to increase the size of the number. */ - dp = MPZ_REALLOC (d, limb_index + 1); - - dp[limb_index] = bit; - for (i = dn; i < limb_index; i++) - dp[i] = 0; - dn = limb_index + 1; - } - else - { - mp_limb_t cy; - - dp = d->_mp_d; - - cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); - if (cy > 0) - { - dp = MPZ_REALLOC (d, dn + 1); - dp[dn++] = cy; - } - } - - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -static void -mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_ptr dp; - mp_limb_t bit; - - dn = GMP_ABS (d->_mp_size); - dp = d->_mp_d; - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - assert (limb_index < dn); - - gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, - dn - limb_index, bit)); - dn = mpn_normalized_size (dp, dn); - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -void -mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (!mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_add_bit (d, bit_index); - else - mpz_abs_sub_bit (d, bit_index); - } -} - -void -mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); - } -} - -void -mpz_combit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); -} - -void -mpz_com (mpz_t r, const mpz_t u) -{ - mpz_add_ui (r, u, 1); - mpz_neg (r, r); -} - -void -mpz_and (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - r->_mp_size = 0; - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc & vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is positive, higher limbs don't matter. */ - rn = vx ? un : vn; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul & vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul & vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc | vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is negative, by sign extension higher limbs - don't matter. */ - rn = vx ? vn : un; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul | vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul | vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc ^ vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - rp = MPZ_REALLOC (r, un + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = (ul ^ vl ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = (ul ^ ux) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[un++] = rc; - else - un = mpn_normalized_size (rp, un); - - r->_mp_size = rx ? -un : un; -} - -static unsigned -gmp_popcount_limb (mp_limb_t x) -{ - unsigned c; - - /* Do 16 bits at a time, to avoid limb-sized constants. */ - int LOCAL_SHIFT_BITS = 16; - for (c = 0; x > 0;) - { - unsigned w = x - ((x >> 1) & 0x5555); - w = ((w >> 2) & 0x3333) + (w & 0x3333); - w = (w >> 4) + w; - w = ((w >> 8) & 0x000f) + (w & 0x000f); - c += w; - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) - x >>= LOCAL_SHIFT_BITS; - else - x = 0; - } - return c; -} - -mp_bitcnt_t -mpn_popcount (mp_srcptr p, mp_size_t n) -{ - mp_size_t i; - mp_bitcnt_t c; - - for (c = 0, i = 0; i < n; i++) - c += gmp_popcount_limb (p[i]); - - return c; -} - -mp_bitcnt_t -mpz_popcount (const mpz_t u) -{ - mp_size_t un; - - un = u->_mp_size; - - if (un < 0) - return ~(mp_bitcnt_t) 0; - - return mpn_popcount (u->_mp_d, un); -} - -mp_bitcnt_t -mpz_hamdist (const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_limb_t uc, vc, ul, vl, comp; - mp_srcptr up, vp; - mp_bitcnt_t c; - - un = u->_mp_size; - vn = v->_mp_size; - - if ( (un ^ vn) < 0) - return ~(mp_bitcnt_t) 0; - - comp = - (uc = vc = (un < 0)); - if (uc) - { - assert (vn < 0); - un = -un; - vn = -vn; - } - - up = u->_mp_d; - vp = v->_mp_d; - - if (un < vn) - MPN_SRCPTR_SWAP (up, un, vp, vn); - - for (i = 0, c = 0; i < vn; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - vl = (vp[i] ^ comp) + vc; - vc = vl < vc; - - c += gmp_popcount_limb (ul ^ vl); - } - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - c += gmp_popcount_limb (ul ^ comp); - } - - return c; -} - -mp_bitcnt_t -mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit - for u<0. Notice this test picks up any u==0 too. */ - if (i >= un) - return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); - - up = u->_mp_d; - ux = 0; - limb = up[i]; - - if (starting_bit != 0) - { - if (us < 0) - { - ux = mpn_zero_p (up, i); - limb = ~ limb + ux; - ux = - (mp_limb_t) (limb >= ux); - } - - /* Mask to 0 all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - } - - return mpn_common_scan (limb, i, up, un, ux); -} - -mp_bitcnt_t -mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - ux = - (mp_limb_t) (us >= 0); - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for - u<0. Notice this test picks up all cases of u==0 too. */ - if (i >= un) - return (ux ? starting_bit : ~(mp_bitcnt_t) 0); - - up = u->_mp_d; - limb = up[i] ^ ux; - - if (ux == 0) - limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ - - /* Mask all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - - return mpn_common_scan (limb, i, up, un, ux); -} - - -/* MPZ base conversion. */ - -size_t -mpz_sizeinbase (const mpz_t u, int base) -{ - mp_size_t un, tn; - mp_srcptr up; - mp_ptr tp; - mp_bitcnt_t bits; - struct gmp_div_inverse bi; - size_t ndigits; - - assert (base >= 2); - assert (base <= 62); - - un = GMP_ABS (u->_mp_size); - if (un == 0) - return 1; - - up = u->_mp_d; - - bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); - switch (base) - { - case 2: - return bits; - case 4: - return (bits + 1) / 2; - case 8: - return (bits + 2) / 3; - case 16: - return (bits + 3) / 4; - case 32: - return (bits + 4) / 5; - /* FIXME: Do something more clever for the common case of base - 10. */ - } - - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, up, un); - mpn_div_qr_1_invert (&bi, base); - - tn = un; - ndigits = 0; - do - { - ndigits++; - mpn_div_qr_1_preinv (tp, tp, tn, &bi); - tn -= (tp[tn-1] == 0); - } - while (tn > 0); - - gmp_free_limbs (tp, un); - return ndigits; -} - -char * -mpz_get_str (char *sp, int base, const mpz_t u) -{ - unsigned bits; - const char *digits; - mp_size_t un; - size_t i, sn, osn; - - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - if (base > 1) - { - if (base <= 36) - digits = "0123456789abcdefghijklmnopqrstuvwxyz"; - else if (base > 62) - return NULL; - } - else if (base >= -1) - base = 10; - else - { - base = -base; - if (base > 36) - return NULL; - } - - sn = 1 + mpz_sizeinbase (u, base); - if (!sp) - { - osn = 1 + sn; - sp = (char *) gmp_alloc (osn); - } - else - osn = 0; - un = GMP_ABS (u->_mp_size); - - if (un == 0) - { - sp[0] = '0'; - sn = 1; - goto ret; - } - - i = 0; - - if (u->_mp_size < 0) - sp[i++] = '-'; - - bits = mpn_base_power_of_two_p (base); - - if (bits) - /* Not modified in this case. */ - sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); - else - { - struct mpn_base_info info; - mp_ptr tp; - - mpn_get_base_info (&info, base); - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, u->_mp_d, un); - - sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); - gmp_free_limbs (tp, un); - } - - for (; i < sn; i++) - sp[i] = digits[(unsigned char) sp[i]]; - -ret: - sp[sn] = '\0'; - if (osn && osn != sn + 1) - sp = (char*) gmp_realloc (sp, osn, sn + 1); - return sp; -} - -int -mpz_set_str (mpz_t r, const char *sp, int base) -{ - unsigned bits, value_of_a; - mp_size_t rn, alloc; - mp_ptr rp; - size_t dn, sn; - int sign; - unsigned char *dp; - - assert (base == 0 || (base >= 2 && base <= 62)); - - while (isspace( (unsigned char) *sp)) - sp++; - - sign = (*sp == '-'); - sp += sign; - - if (base == 0) - { - if (sp[0] == '0') - { - if (sp[1] == 'x' || sp[1] == 'X') - { - base = 16; - sp += 2; - } - else if (sp[1] == 'b' || sp[1] == 'B') - { - base = 2; - sp += 2; - } - else - base = 8; - } - else - base = 10; - } - - if (!*sp) - { - r->_mp_size = 0; - return -1; - } - sn = strlen(sp); - dp = (unsigned char *) gmp_alloc (sn); - - value_of_a = (base > 36) ? 36 : 10; - for (dn = 0; *sp; sp++) - { - unsigned digit; - - if (isspace ((unsigned char) *sp)) - continue; - else if (*sp >= '0' && *sp <= '9') - digit = *sp - '0'; - else if (*sp >= 'a' && *sp <= 'z') - digit = *sp - 'a' + value_of_a; - else if (*sp >= 'A' && *sp <= 'Z') - digit = *sp - 'A' + 10; - else - digit = base; /* fail */ - - if (digit >= (unsigned) base) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - - dp[dn++] = digit; - } - - if (!dn) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - bits = mpn_base_power_of_two_p (base); - - if (bits > 0) - { - alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_bits (rp, dp, dn, bits); - } - else - { - struct mpn_base_info info; - mpn_get_base_info (&info, base); - alloc = (dn + info.exp - 1) / info.exp; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_other (rp, dp, dn, base, &info); - /* Normalization, needed for all-zero input. */ - assert (rn > 0); - rn -= rp[rn-1] == 0; - } - assert (rn <= alloc); - gmp_free (dp, sn); - - r->_mp_size = sign ? - rn : rn; - - return 0; -} - -int -mpz_init_set_str (mpz_t r, const char *sp, int base) -{ - mpz_init (r); - return mpz_set_str (r, sp, base); -} - -size_t -mpz_out_str (FILE *stream, int base, const mpz_t x) -{ - char *str; - size_t len, n; - - str = mpz_get_str (NULL, base, x); - if (!str) - return 0; - len = strlen (str); - n = fwrite (str, 1, len, stream); - gmp_free (str, len + 1); - return n; -} - - -static int -gmp_detect_endian (void) -{ - static const int i = 2; - const unsigned char *p = (const unsigned char *) &i; - return 1 - *p; -} - -/* Import and export. Does not support nails. */ -void -mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, - size_t nails, const void *src) -{ - const unsigned char *p; - ptrdiff_t word_step; - mp_ptr rp; - mp_size_t rn; - - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes already copied to this limb (starting from - the low end). */ - size_t bytes; - /* The index where the limb should be stored, when completed. */ - mp_size_t i; - - if (nails != 0) - gmp_die ("mpz_import: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) src; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); - rp = MPZ_REALLOC (r, rn); - - for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) - { - size_t j; - for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) - { - limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); - if (bytes == sizeof(mp_limb_t)) - { - rp[i++] = limb; - bytes = 0; - limb = 0; - } - } - } - assert (i + (bytes > 0) == rn); - if (limb != 0) - rp[i++] = limb; - else - i = mpn_normalized_size (rp, i); - - r->_mp_size = i; -} - -void * -mpz_export (void *r, size_t *countp, int order, size_t size, int endian, - size_t nails, const mpz_t u) -{ - size_t count; - mp_size_t un; - - if (nails != 0) - gmp_die ("mpz_export: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - assert (size > 0 || u->_mp_size == 0); - - un = u->_mp_size; - count = 0; - if (un != 0) - { - size_t k; - unsigned char *p; - ptrdiff_t word_step; - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes left to do in this limb. */ - size_t bytes; - /* The index where the limb was read. */ - mp_size_t i; - - un = GMP_ABS (un); - - /* Count bytes in top limb. */ - limb = u->_mp_d[un-1]; - assert (limb != 0); - - k = (GMP_LIMB_BITS <= CHAR_BIT); - if (!k) - { - do { - int LOCAL_CHAR_BIT = CHAR_BIT; - k++; limb >>= LOCAL_CHAR_BIT; - } while (limb != 0); - } - /* else limb = 0; */ - - count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; - - if (!r) - r = gmp_alloc (count * size); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) r; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) - { - size_t j; - for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) - { - if (sizeof (mp_limb_t) == 1) - { - if (i < un) - *p = u->_mp_d[i++]; - else - *p = 0; - } - else - { - int LOCAL_CHAR_BIT = CHAR_BIT; - if (bytes == 0) - { - if (i < un) - limb = u->_mp_d[i++]; - bytes = sizeof (mp_limb_t); - } - *p = limb; - limb >>= LOCAL_CHAR_BIT; - bytes--; - } - } - } - assert (i == un); - assert (k == count); - } - - if (countp) - *countp = count; - - return r; -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.h deleted file mode 100644 index f28cb360ce..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mini-gmp.h +++ /dev/null @@ -1,311 +0,0 @@ -/* mini-gmp, a minimalistic implementation of a GNU GMP subset. - -Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* About mini-gmp: This is a minimal implementation of a subset of the - GMP interface. It is intended for inclusion into applications which - have modest bignums needs, as a fallback when the real GMP library - is not installed. - - This file defines the public interface. */ - -#ifndef __MINI_GMP_H__ -#define __MINI_GMP_H__ - -/* For size_t */ -#include - -#if defined (__cplusplus) -extern "C" { -#endif - -void mp_set_memory_functions (void *(*) (size_t), - void *(*) (void *, size_t, size_t), - void (*) (void *, size_t)); - -void mp_get_memory_functions (void *(**) (size_t), - void *(**) (void *, size_t, size_t), - void (**) (void *, size_t)); - -#ifndef MINI_GMP_LIMB_TYPE -#define MINI_GMP_LIMB_TYPE long -#endif - -typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; -typedef long mp_size_t; -typedef unsigned long mp_bitcnt_t; - -typedef mp_limb_t *mp_ptr; -typedef const mp_limb_t *mp_srcptr; - -typedef struct -{ - int _mp_alloc; /* Number of *limbs* allocated and pointed - to by the _mp_d field. */ - int _mp_size; /* abs(_mp_size) is the number of limbs the - last field points to. If _mp_size is - negative this is a negative number. */ - mp_limb_t *_mp_d; /* Pointer to the limbs. */ -} __mpz_struct; - -typedef __mpz_struct mpz_t[1]; - -typedef __mpz_struct *mpz_ptr; -typedef const __mpz_struct *mpz_srcptr; - -extern const int mp_bits_per_limb; - -void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); -void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); -void mpn_zero (mp_ptr, mp_size_t); - -int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); -int mpn_zero_p (mp_srcptr, mp_size_t); - -mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); - -mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); -void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); -int mpn_perfect_square_p (mp_srcptr, mp_size_t); -mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); -mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); - -mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); -mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); - -mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); -mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); - -void mpn_com (mp_ptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); - -mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); - -mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); -#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) - -size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); -mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); - -void mpz_init (mpz_t); -void mpz_init2 (mpz_t, mp_bitcnt_t); -void mpz_clear (mpz_t); - -#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) -#define mpz_even_p(z) (! mpz_odd_p (z)) - -int mpz_sgn (const mpz_t); -int mpz_cmp_si (const mpz_t, long); -int mpz_cmp_ui (const mpz_t, unsigned long); -int mpz_cmp (const mpz_t, const mpz_t); -int mpz_cmpabs_ui (const mpz_t, unsigned long); -int mpz_cmpabs (const mpz_t, const mpz_t); -int mpz_cmp_d (const mpz_t, double); -int mpz_cmpabs_d (const mpz_t, double); - -void mpz_abs (mpz_t, const mpz_t); -void mpz_neg (mpz_t, const mpz_t); -void mpz_swap (mpz_t, mpz_t); - -void mpz_add_ui (mpz_t, const mpz_t, unsigned long); -void mpz_add (mpz_t, const mpz_t, const mpz_t); -void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); -void mpz_sub (mpz_t, const mpz_t, const mpz_t); - -void mpz_mul_si (mpz_t, const mpz_t, long int); -void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_mul (mpz_t, const mpz_t, const mpz_t); -void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_addmul (mpz_t, const mpz_t, const mpz_t); -void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_submul (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); - -void mpz_mod (mpz_t, const mpz_t, const mpz_t); - -void mpz_divexact (mpz_t, const mpz_t, const mpz_t); - -int mpz_divisible_p (const mpz_t, const mpz_t); -int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); - -unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); - -unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); - -void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); - -int mpz_divisible_ui_p (const mpz_t, unsigned long); - -unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); -void mpz_gcd (mpz_t, const mpz_t, const mpz_t); -void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); -void mpz_lcm (mpz_t, const mpz_t, const mpz_t); -int mpz_invert (mpz_t, const mpz_t, const mpz_t); - -void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); -void mpz_sqrt (mpz_t, const mpz_t); -int mpz_perfect_square_p (const mpz_t); - -void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); -void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); -void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); - -void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); -int mpz_root (mpz_t, const mpz_t, unsigned long); - -void mpz_fac_ui (mpz_t, unsigned long); -void mpz_2fac_ui (mpz_t, unsigned long); -void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); -void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); - -int mpz_probab_prime_p (const mpz_t, int); - -int mpz_tstbit (const mpz_t, mp_bitcnt_t); -void mpz_setbit (mpz_t, mp_bitcnt_t); -void mpz_clrbit (mpz_t, mp_bitcnt_t); -void mpz_combit (mpz_t, mp_bitcnt_t); - -void mpz_com (mpz_t, const mpz_t); -void mpz_and (mpz_t, const mpz_t, const mpz_t); -void mpz_ior (mpz_t, const mpz_t, const mpz_t); -void mpz_xor (mpz_t, const mpz_t, const mpz_t); - -mp_bitcnt_t mpz_popcount (const mpz_t); -mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); -mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); -mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); - -int mpz_fits_slong_p (const mpz_t); -int mpz_fits_ulong_p (const mpz_t); -int mpz_fits_sint_p (const mpz_t); -int mpz_fits_uint_p (const mpz_t); -int mpz_fits_sshort_p (const mpz_t); -int mpz_fits_ushort_p (const mpz_t); -long int mpz_get_si (const mpz_t); -unsigned long int mpz_get_ui (const mpz_t); -double mpz_get_d (const mpz_t); -size_t mpz_size (const mpz_t); -mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); - -void mpz_realloc2 (mpz_t, mp_bitcnt_t); -mp_srcptr mpz_limbs_read (mpz_srcptr); -mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); -mp_ptr mpz_limbs_write (mpz_t, mp_size_t); -void mpz_limbs_finish (mpz_t, mp_size_t); -mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); - -#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} - -void mpz_set_si (mpz_t, signed long int); -void mpz_set_ui (mpz_t, unsigned long int); -void mpz_set (mpz_t, const mpz_t); -void mpz_set_d (mpz_t, double); - -void mpz_init_set_si (mpz_t, signed long int); -void mpz_init_set_ui (mpz_t, unsigned long int); -void mpz_init_set (mpz_t, const mpz_t); -void mpz_init_set_d (mpz_t, double); - -size_t mpz_sizeinbase (const mpz_t, int); -char *mpz_get_str (char *, int, const mpz_t); -int mpz_set_str (mpz_t, const char *, int); -int mpz_init_set_str (mpz_t, const char *, int); - -/* This long list taken from gmp.h. */ -/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, - defines EOF but not FILE. */ -#if defined (FILE) \ - || defined (H_STDIO) \ - || defined (_H_STDIO) /* AIX */ \ - || defined (_STDIO_H) /* glibc, Sun, SCO */ \ - || defined (_STDIO_H_) /* BSD, OSF */ \ - || defined (__STDIO_H) /* Borland */ \ - || defined (__STDIO_H__) /* IRIX */ \ - || defined (_STDIO_INCLUDED) /* HPUX */ \ - || defined (__dj_include_stdio_h_) /* DJGPP */ \ - || defined (_FILE_DEFINED) /* Microsoft */ \ - || defined (__STDIO__) /* Apple MPW MrC */ \ - || defined (_MSL_STDIO_H) /* Metrowerks */ \ - || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ - || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ - || defined (__STDIO_LOADED) /* VMS */ \ - || defined (_STDIO) /* HPE NonStop */ \ - || defined (__DEFINED_FILE) /* musl */ -size_t mpz_out_str (FILE *, int, const mpz_t); -#endif - -void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); -void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); - -#if defined (__cplusplus) -} -#endif -#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h index bbfe72c13b..54e90326be 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign_namespace.h @@ -18,6 +18,12 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -94,6 +100,16 @@ #define lift_basis SQISIGN_NAMESPACE(lift_basis) #define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) +// Namespacing symbols exported from basis.c, ec.c: +#undef xDBL_E0 + +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) + +// Namespacing symbols exported from basis.c, ec.c, isog_chains.c: +#undef xDBL_A24 + +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) + // Namespacing symbols exported from biextension.c: #undef clear_cofac #undef ec_dlog_2_tate @@ -109,6 +125,11 @@ #define reduced_tate SQISIGN_NAMESPACE(reduced_tate) #define weil SQISIGN_NAMESPACE(weil) +// Namespacing symbols exported from biextension.c, ec_jac.c, hd.c: +#undef ADD + +#define ADD SQISIGN_NAMESPACE(ADD) + // Namespacing symbols exported from common.c: #undef hash_to_challenge #undef public_key_finalize @@ -148,6 +169,28 @@ #define find_uv SQISIGN_NAMESPACE(find_uv) #define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) +// Namespacing symbols exported from dim2id2iso.c, encode_signature.c, id2iso.c, keygen.c, quaternion_data.c, sign.c: +#undef EXTREMAL_ORDERS +#undef QUATALG_PINFTY + +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) + +// Namespacing symbols exported from dim2id2iso.c, endomorphism_action.c, id2iso.c: +#undef CURVES_WITH_ENDOMORPHISMS + +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) + +// Namespacing symbols exported from dim2id2iso.c, id2iso.c, sign.c, torsion_constants.c: +#undef TORSION_PLUS_2POWER + +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) + +// Namespacing symbols exported from dim2id2iso.c, quaternion_data.c: +#undef CONNECTING_IDEALS + +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) + // Namespacing symbols exported from dim4.c: #undef ibz_inv_dim4_make_coeff_mpm #undef ibz_inv_dim4_make_coeff_pmp @@ -207,6 +250,13 @@ #define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) #define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) +// Namespacing symbols exported from e0_basis.c: +#undef BASIS_E0_PX +#undef BASIS_E0_QX + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) + // Namespacing symbols exported from ec.c: #undef cswap_points #undef ec_biscalar_mul @@ -235,8 +285,6 @@ #undef xDBL #undef xDBLADD #undef xDBLMUL -#undef xDBL_A24 -#undef xDBL_E0 #undef xMUL #define cswap_points SQISIGN_NAMESPACE(cswap_points) @@ -266,14 +314,9 @@ #define xDBL SQISIGN_NAMESPACE(xDBL) #define xDBLADD SQISIGN_NAMESPACE(xDBLADD) #define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) -#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) -#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) #define xMUL SQISIGN_NAMESPACE(xMUL) // Namespacing symbols exported from ec_jac.c: -#undef ADD -#undef DBL -#undef DBLW #undef copy_jac_point #undef jac_from_ws #undef jac_init @@ -284,9 +327,6 @@ #undef jac_to_xz_add_components #undef select_jac_point -#define ADD SQISIGN_NAMESPACE(ADD) -#define DBL SQISIGN_NAMESPACE(DBL) -#define DBLW SQISIGN_NAMESPACE(DBLW) #define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) #define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) #define jac_init SQISIGN_NAMESPACE(jac_init) @@ -297,6 +337,21 @@ #define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) #define select_jac_point SQISIGN_NAMESPACE(select_jac_point) +// Namespacing symbols exported from ec_jac.c, hd.c: +#undef DBLW + +#define DBLW SQISIGN_NAMESPACE(DBLW) + +// Namespacing symbols exported from ec_jac.c, hd.c, theta_isogenies.c: +#undef DBL + +#define DBL SQISIGN_NAMESPACE(DBL) + +// Namespacing symbols exported from ec_params.c: +#undef p_cofactor_for_2f + +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) + // Namespacing symbols exported from encode_signature.c: #undef secret_key_from_bytes #undef secret_key_to_bytes @@ -455,21 +510,24 @@ #define fp_set_one SQISIGN_NAMESPACE(fp_set_one) #define fp_set_small SQISIGN_NAMESPACE(fp_set_small) #define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) -#define ONE SQISIGN_NAMESPACE(ONE) -#define ZERO SQISIGN_NAMESPACE(ZERO) // Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef ONE +#undef ZERO #undef fp_add #undef fp_mul #undef fp_sqr #undef fp_sub +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) #define fp_add SQISIGN_NAMESPACE(fp_add) #define fp_mul SQISIGN_NAMESPACE(fp_mul) #define fp_sqr SQISIGN_NAMESPACE(fp_sqr) #define fp_sub SQISIGN_NAMESPACE(fp_sub) // Namespacing symbols exported from gf27500.c: +#undef gf27500_MINUS_ONE #undef gf27500_decode #undef gf27500_decode_reduce #undef gf27500_div @@ -479,6 +537,7 @@ #undef gf27500_legendre #undef gf27500_sqrt +#define gf27500_MINUS_ONE SQISIGN_NAMESPACE(gf27500_MINUS_ONE) #define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) #define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) #define gf27500_div SQISIGN_NAMESPACE(gf27500_div) @@ -500,6 +559,7 @@ #define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) // Namespacing symbols exported from gf5248.c: +#undef gf5248_MINUS_ONE #undef gf5248_decode #undef gf5248_decode_reduce #undef gf5248_div @@ -509,6 +569,7 @@ #undef gf5248_legendre #undef gf5248_sqrt +#define gf5248_MINUS_ONE SQISIGN_NAMESPACE(gf5248_MINUS_ONE) #define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) #define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) #define gf5248_div SQISIGN_NAMESPACE(gf5248_div) @@ -519,6 +580,7 @@ #define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) // Namespacing symbols exported from gf65376.c: +#undef gf65376_MINUS_ONE #undef gf65376_decode #undef gf65376_decode_reduce #undef gf65376_div @@ -528,6 +590,7 @@ #undef gf65376_legendre #undef gf65376_sqrt +#define gf65376_MINUS_ONE SQISIGN_NAMESPACE(gf65376_MINUS_ONE) #define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) #define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) #define gf65376_div SQISIGN_NAMESPACE(gf65376_div) @@ -554,6 +617,22 @@ #define double_couple_point SQISIGN_NAMESPACE(double_couple_point) #define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) +// Namespacing symbols exported from hd_splitting_transforms.c: +#undef CHI_EVAL + +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) + +// Namespacing symbols exported from hd_splitting_transforms.c, theta_isogenies.c: +#undef EVEN_INDEX +#undef FP2_CONSTANTS +#undef NORMALIZATION_TRANSFORMS +#undef SPLITTING_TRANSFORMS + +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) + // Namespacing symbols exported from hnf.c: #undef ibz_mat_4x4_is_hnf #undef ibz_mat_4xn_hnf_mod_core @@ -761,6 +840,11 @@ #define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) #define secret_key_init SQISIGN_NAMESPACE(secret_key_init) +// Namespacing symbols exported from keygen.c, torsion_constants.c: +#undef SEC_DEGREE + +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) + // Namespacing symbols exported from l2.c: #undef quat_lattice_lll #undef quat_lll_core @@ -910,6 +994,16 @@ #define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) #define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) +// Namespacing symbols exported from quaternion_data.c: +#undef CONJUGATING_ELEMENTS + +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) + +// Namespacing symbols exported from quaternion_data.c, sign.c: +#undef QUAT_prime_cofactor + +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) + // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation @@ -971,6 +1065,11 @@ #define protocols_sign SQISIGN_NAMESPACE(protocols_sign) +// Namespacing symbols exported from sign.c, torsion_constants.c: +#undef COM_DEGREE + +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + // Namespacing symbols exported from sqisign.c: #undef sqisign_keypair #undef sqisign_open @@ -1006,6 +1105,11 @@ #define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) #define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) +// Namespacing symbols exported from torsion_constants.c: +#undef TWO_TO_SECURITY_BITS + +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) + // Namespacing symbols exported from verify.c: #undef protocols_verify @@ -1029,45 +1133,7 @@ #define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) #define xisog_4 SQISIGN_NAMESPACE(xisog_4) -// Namespacing symbols from precomp: -#undef BASIS_E0_PX -#undef BASIS_E0_QX -#undef p_cofactor_for_2f -#undef CURVES_WITH_ENDOMORPHISMS -#undef EVEN_INDEX -#undef CHI_EVAL -#undef FP2_CONSTANTS -#undef SPLITTING_TRANSFORMS -#undef NORMALIZATION_TRANSFORMS -#undef QUAT_prime_cofactor -#undef QUATALG_PINFTY -#undef EXTREMAL_ORDERS -#undef CONNECTING_IDEALS -#undef CONJUGATING_ELEMENTS -#undef TWO_TO_SECURITY_BITS -#undef TORSION_PLUS_2POWER -#undef SEC_DEGREE -#undef COM_DEGREE - -#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) -#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) -#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) -#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) -#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) -#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) -#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) -#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) -#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) -#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) -#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) -#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) -#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) -#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) -#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) -#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) -#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) -#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) - #endif +// This file is generated by scripts/Namespace.scala, do not edit it manually! diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c deleted file mode 100644 index 396d505aec..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.c +++ /dev/null @@ -1,73 +0,0 @@ -#include -#include -#if defined(MINI_GMP) -#include "mini-gmp.h" -#else -// This configuration is used only for testing -#include -#endif -#include - -// Exported for testing -int -mini_mpz_legendre(const mpz_t a, const mpz_t p) -{ - int res = 0; - mpz_t e; - mpz_init_set(e, p); - mpz_sub_ui(e, e, 1); - mpz_fdiv_q_2exp(e, e, 1); - mpz_powm(e, a, e, p); - - if (mpz_cmp_ui(e, 1) <= 0) { - res = mpz_get_si(e); - } else { - res = -1; - } - mpz_clear(e); - return res; -} - -#if defined(MINI_GMP) -int -mpz_legendre(const mpz_t a, const mpz_t p) -{ - return mini_mpz_legendre(a, p); -} -#endif - -// Exported for testing -double -mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - double ret; - int tmp_exp; - mpz_t tmp; - - // Handle the case where op is 0 - if (mpz_cmp_ui(op, 0) == 0) { - *exp = 0; - return 0.0; - } - - *exp = mpz_sizeinbase(op, 2); - - mpz_init_set(tmp, op); - - if (*exp > DBL_MAX_EXP) { - mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); - } - - ret = frexp(mpz_get_d(tmp), &tmp_exp); - mpz_clear(tmp); - - return ret; -} - -#if defined(MINI_GMP) -double -mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - return mini_mpz_get_d_2exp(exp, op); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.h deleted file mode 100644 index 0113cfdfe6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp-extra.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef MINI_GMP_EXTRA_H -#define MINI_GMP_EXTRA_H - -#if defined MINI_GMP -#include "mini-gmp.h" - -typedef long mp_exp_t; - -int mpz_legendre(const mpz_t a, const mpz_t p); -double mpz_get_d_2exp(signed long int *exp, const mpz_t op); -#else -// This configuration is used only for testing -#include -#endif - -int mini_mpz_legendre(const mpz_t a, const mpz_t p); -double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c deleted file mode 100644 index 3830ab2031..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.c +++ /dev/null @@ -1,4671 +0,0 @@ -/* Note: The code from mini-gmp is modifed from the original by - commenting out the definition of GMP_LIMB_BITS */ - -/* - mini-gmp, a minimalistic implementation of a GNU GMP subset. - - Contributed to the GNU project by Niels Möller - Additional functionalities and improvements by Marco Bodrato. - -Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* NOTE: All functions in this file which are not declared in - mini-gmp.h are internal, and are not intended to be compatible - with GMP or with future versions of mini-gmp. */ - -/* Much of the material copied from GMP files, including: gmp-impl.h, - longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, - mpn/generic/lshift.c, mpn/generic/mul_1.c, - mpn/generic/mul_basecase.c, mpn/generic/rshift.c, - mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, - mpn/generic/submul_1.c. */ - -#include -#include -#include -#include -#include -#include - -#include "mini-gmp.h" - -#if !defined(MINI_GMP_DONT_USE_FLOAT_H) -#include -#endif - - -/* Macros */ -/* Removed from here as it is passed as a compiler command-line definition */ -/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ - -#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) -#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) - -#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) -#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) - -#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) -#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) - -#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) -#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) - -#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) - -#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) - -#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 -#define GMP_DBL_MANT_BITS DBL_MANT_DIG -#else -#define GMP_DBL_MANT_BITS (53) -#endif - -/* Return non-zero if xp,xsize and yp,ysize overlap. - If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no - overlap. If both these are false, there's an overlap. */ -#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ - ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) - -#define gmp_assert_nocarry(x) do { \ - mp_limb_t __cy = (x); \ - assert (__cy == 0); \ - (void) (__cy); \ - } while (0) - -#define gmp_clz(count, x) do { \ - mp_limb_t __clz_x = (x); \ - unsigned __clz_c = 0; \ - int LOCAL_SHIFT_BITS = 8; \ - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ - for (; \ - (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ - __clz_c += 8) \ - { __clz_x <<= LOCAL_SHIFT_BITS; } \ - for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ - __clz_x <<= 1; \ - (count) = __clz_c; \ - } while (0) - -#define gmp_ctz(count, x) do { \ - mp_limb_t __ctz_x = (x); \ - unsigned __ctz_c = 0; \ - gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ - (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ - } while (0) - -#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) + (bl); \ - (sh) = (ah) + (bh) + (__x < (al)); \ - (sl) = __x; \ - } while (0) - -#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) - (bl); \ - (sh) = (ah) - (bh) - ((al) < (bl)); \ - (sl) = __x; \ - } while (0) - -#define gmp_umul_ppmm(w1, w0, u, v) \ - do { \ - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ - if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned int __ww = (unsigned int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned long int __ww = (unsigned long int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else { \ - mp_limb_t __x0, __x1, __x2, __x3; \ - unsigned __ul, __vl, __uh, __vh; \ - mp_limb_t __u = (u), __v = (v); \ - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ - \ - __ul = __u & GMP_LLIMB_MASK; \ - __uh = __u >> (GMP_LIMB_BITS / 2); \ - __vl = __v & GMP_LLIMB_MASK; \ - __vh = __v >> (GMP_LIMB_BITS / 2); \ - \ - __x0 = (mp_limb_t) __ul * __vl; \ - __x1 = (mp_limb_t) __ul * __vh; \ - __x2 = (mp_limb_t) __uh * __vl; \ - __x3 = (mp_limb_t) __uh * __vh; \ - \ - __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ - __x1 += __x2; /* but this indeed can */ \ - if (__x1 < __x2) /* did we get it? */ \ - __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ - \ - (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ - (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ - } \ - } while (0) - -/* If mp_limb_t is of size smaller than int, plain u*v implies - automatic promotion to *signed* int, and then multiply may overflow - and cause undefined behavior. Explicitly cast to unsigned int for - that case. */ -#define gmp_umullo_limb(u, v) \ - ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) - -#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ - do { \ - mp_limb_t _qh, _ql, _r, _mask; \ - gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ - gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ - _r = (nl) - gmp_umullo_limb (_qh, (d)); \ - _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ - _qh += _mask; \ - _r += _mask & (d); \ - if (_r >= (d)) \ - { \ - _r -= (d); \ - _qh++; \ - } \ - \ - (r) = _r; \ - (q) = _qh; \ - } while (0) - -#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ - do { \ - mp_limb_t _q0, _t1, _t0, _mask; \ - gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ - gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ - \ - /* Compute the two most significant limbs of n - q'd */ \ - (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ - gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ - (q)++; \ - \ - /* Conditionally adjust q and the remainders */ \ - _mask = - (mp_limb_t) ((r1) >= _q0); \ - (q) += _mask; \ - gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ - if ((r1) >= (d1)) \ - { \ - if ((r1) > (d1) || (r0) >= (d0)) \ - { \ - (q)++; \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ - } \ - } \ - } while (0) - -/* Swap macros. */ -#define MP_LIMB_T_SWAP(x, y) \ - do { \ - mp_limb_t __mp_limb_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_limb_t_swap__tmp; \ - } while (0) -#define MP_SIZE_T_SWAP(x, y) \ - do { \ - mp_size_t __mp_size_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_size_t_swap__tmp; \ - } while (0) -#define MP_BITCNT_T_SWAP(x,y) \ - do { \ - mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_bitcnt_t_swap__tmp; \ - } while (0) -#define MP_PTR_SWAP(x, y) \ - do { \ - mp_ptr __mp_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_ptr_swap__tmp; \ - } while (0) -#define MP_SRCPTR_SWAP(x, y) \ - do { \ - mp_srcptr __mp_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_srcptr_swap__tmp; \ - } while (0) - -#define MPN_PTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_PTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) -#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_SRCPTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) - -#define MPZ_PTR_SWAP(x, y) \ - do { \ - mpz_ptr __mpz_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_ptr_swap__tmp; \ - } while (0) -#define MPZ_SRCPTR_SWAP(x, y) \ - do { \ - mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_srcptr_swap__tmp; \ - } while (0) - -const int mp_bits_per_limb = GMP_LIMB_BITS; - - -/* Memory allocation and other helper functions. */ -static void -gmp_die (const char *msg) -{ - fprintf (stderr, "%s\n", msg); - abort(); -} - -static void * -gmp_default_alloc (size_t size) -{ - void *p; - - assert (size > 0); - - p = malloc (size); - if (!p) - gmp_die("gmp_default_alloc: Virtual memory exhausted."); - - return p; -} - -static void * -gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) -{ - void * p; - - p = realloc (old, new_size); - - if (!p) - gmp_die("gmp_default_realloc: Virtual memory exhausted."); - - return p; -} - -static void -gmp_default_free (void *p, size_t unused_size) -{ - free (p); -} - -static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; -static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; -static void (*gmp_free_func) (void *, size_t) = gmp_default_free; - -void -mp_get_memory_functions (void *(**alloc_func) (size_t), - void *(**realloc_func) (void *, size_t, size_t), - void (**free_func) (void *, size_t)) -{ - if (alloc_func) - *alloc_func = gmp_allocate_func; - - if (realloc_func) - *realloc_func = gmp_reallocate_func; - - if (free_func) - *free_func = gmp_free_func; -} - -void -mp_set_memory_functions (void *(*alloc_func) (size_t), - void *(*realloc_func) (void *, size_t, size_t), - void (*free_func) (void *, size_t)) -{ - if (!alloc_func) - alloc_func = gmp_default_alloc; - if (!realloc_func) - realloc_func = gmp_default_realloc; - if (!free_func) - free_func = gmp_default_free; - - gmp_allocate_func = alloc_func; - gmp_reallocate_func = realloc_func; - gmp_free_func = free_func; -} - -#define gmp_alloc(size) ((*gmp_allocate_func)((size))) -#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) -#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) - -static mp_ptr -gmp_alloc_limbs (mp_size_t size) -{ - return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); -} - -static mp_ptr -gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) -{ - assert (size > 0); - return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); -} - -static void -gmp_free_limbs (mp_ptr old, mp_size_t size) -{ - gmp_free (old, size * sizeof (mp_limb_t)); -} - - -/* MPN interface */ - -void -mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - mp_size_t i; - for (i = 0; i < n; i++) - d[i] = s[i]; -} - -void -mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - while (--n >= 0) - d[n] = s[n]; -} - -int -mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - while (--n >= 0) - { - if (ap[n] != bp[n]) - return ap[n] > bp[n] ? 1 : -1; - } - return 0; -} - -static int -mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - if (an != bn) - return an < bn ? -1 : 1; - else - return mpn_cmp (ap, bp, an); -} - -static mp_size_t -mpn_normalized_size (mp_srcptr xp, mp_size_t n) -{ - while (n > 0 && xp[n-1] == 0) - --n; - return n; -} - -int -mpn_zero_p(mp_srcptr rp, mp_size_t n) -{ - return mpn_normalized_size (rp, n) == 0; -} - -void -mpn_zero (mp_ptr rp, mp_size_t n) -{ - while (--n >= 0) - rp[n] = 0; -} - -mp_limb_t -mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - i = 0; - do - { - mp_limb_t r = ap[i] + b; - /* Carry out */ - b = (r < b); - rp[i] = r; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b, r; - a = ap[i]; b = bp[i]; - r = a + cy; - cy = (r < cy); - r += b; - cy += (r < b); - rp[i] = r; - } - return cy; -} - -mp_limb_t -mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_add_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - - i = 0; - do - { - mp_limb_t a = ap[i]; - /* Carry out */ - mp_limb_t cy = a < b; - rp[i] = a - b; - b = cy; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b; - a = ap[i]; b = bp[i]; - b += cy; - cy = (b < cy); - cy += (a < b); - rp[i] = a - b; - } - return cy; -} - -mp_limb_t -mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_sub_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl + lpl; - cl += lpl < rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl - lpl; - cl += lpl > rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn >= 1); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); - - /* We first multiply by the low order limb. This result can be - stored, not added, to rp. We also avoid a loop for zeroing this - way. */ - - rp[un] = mpn_mul_1 (rp, up, un, vp[0]); - - /* Now accumulate the product of up[] and the next higher limb from - vp[]. */ - - while (--vn >= 1) - { - rp += 1, vp += 1; - rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); - } - return rp[un]; -} - -void -mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mpn_mul (rp, ap, n, bp, n); -} - -void -mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) -{ - mpn_mul (rp, ap, n, ap, n); -} - -mp_limb_t -mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - up += n; - rp += n; - - tnc = GMP_LIMB_BITS - cnt; - low_limb = *--up; - retval = low_limb >> tnc; - high_limb = (low_limb << cnt); - - while (--n != 0) - { - low_limb = *--up; - *--rp = high_limb | (low_limb >> tnc); - high_limb = (low_limb << cnt); - } - *--rp = high_limb; - - return retval; -} - -mp_limb_t -mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - tnc = GMP_LIMB_BITS - cnt; - high_limb = *up++; - retval = (high_limb << tnc); - low_limb = high_limb >> cnt; - - while (--n != 0) - { - high_limb = *up++; - *rp++ = low_limb | (high_limb << tnc); - low_limb = high_limb >> cnt; - } - *rp = low_limb; - - return retval; -} - -static mp_bitcnt_t -mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, - mp_limb_t ux) -{ - unsigned cnt; - - assert (ux == 0 || ux == GMP_LIMB_MAX); - assert (0 <= i && i <= un ); - - while (limb == 0) - { - i++; - if (i == un) - return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); - limb = ux ^ up[i]; - } - gmp_ctz (cnt, limb); - return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; -} - -mp_bitcnt_t -mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, 0); -} - -mp_bitcnt_t -mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, GMP_LIMB_MAX); -} - -void -mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (--n >= 0) - *rp++ = ~ *up++; -} - -mp_limb_t -mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (*up == 0) - { - *rp = 0; - if (!--n) - return 0; - ++up; ++rp; - } - *rp = - *up; - mpn_com (++rp, ++up, --n); - return 1; -} - - -/* MPN division interface. */ - -/* The 3/2 inverse is defined as - - m = floor( (B^3-1) / (B u1 + u0)) - B -*/ -mp_limb_t -mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) -{ - mp_limb_t r, m; - - { - mp_limb_t p, ql; - unsigned ul, uh, qh; - - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); - /* For notation, let b denote the half-limb base, so that B = b^2. - Split u1 = b uh + ul. */ - ul = u1 & GMP_LLIMB_MASK; - uh = u1 >> (GMP_LIMB_BITS / 2); - - /* Approximation of the high half of quotient. Differs from the 2/1 - inverse of the half limb uh, since we have already subtracted - u0. */ - qh = (u1 ^ GMP_LIMB_MAX) / uh; - - /* Adjust to get a half-limb 3/2 inverse, i.e., we want - - qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u - = floor( (b (~u) + b-1) / u), - - and the remainder - - r = b (~u) + b-1 - qh (b uh + ul) - = b (~u - qh uh) + b-1 - qh ul - - Subtraction of qh ul may underflow, which implies adjustments. - But by normalization, 2 u >= B > qh ul, so we need to adjust by - at most 2. - */ - - r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; - - p = (mp_limb_t) qh * ul; - /* Adjustment steps taken from udiv_qrnnd_c */ - if (r < p) - { - qh--; - r += u1; - if (r >= u1) /* i.e. we didn't get carry when adding to r */ - if (r < p) - { - qh--; - r += u1; - } - } - r -= p; - - /* Low half of the quotient is - - ql = floor ( (b r + b-1) / u1). - - This is a 3/2 division (on half-limbs), for which qh is a - suitable inverse. */ - - p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; - /* Unlike full-limb 3/2, we can add 1 without overflow. For this to - work, it is essential that ql is a full mp_limb_t. */ - ql = (p >> (GMP_LIMB_BITS / 2)) + 1; - - /* By the 3/2 trick, we don't need the high half limb. */ - r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; - - if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) - { - ql--; - r += u1; - } - m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; - if (r >= u1) - { - m++; - r -= u1; - } - } - - /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a - 3/2 inverse. */ - if (u0 > 0) - { - mp_limb_t th, tl; - r = ~r; - r += u0; - if (r < u0) - { - m--; - if (r >= u1) - { - m--; - r -= u1; - } - r -= u1; - } - gmp_umul_ppmm (th, tl, u0, m); - r += th; - if (r < th) - { - m--; - m -= ((r > u1) | ((r == u1) & (tl > u0))); - } - } - - return m; -} - -struct gmp_div_inverse -{ - /* Normalization shift count. */ - unsigned shift; - /* Normalized divisor (d0 unused for mpn_div_qr_1) */ - mp_limb_t d1, d0; - /* Inverse, for 2/1 or 3/2. */ - mp_limb_t di; -}; - -static void -mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) -{ - unsigned shift; - - assert (d > 0); - gmp_clz (shift, d); - inv->shift = shift; - inv->d1 = d << shift; - inv->di = mpn_invert_limb (inv->d1); -} - -static void -mpn_div_qr_2_invert (struct gmp_div_inverse *inv, - mp_limb_t d1, mp_limb_t d0) -{ - unsigned shift; - - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 <<= shift; - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); -} - -static void -mpn_div_qr_invert (struct gmp_div_inverse *inv, - mp_srcptr dp, mp_size_t dn) -{ - assert (dn > 0); - - if (dn == 1) - mpn_div_qr_1_invert (inv, dp[0]); - else if (dn == 2) - mpn_div_qr_2_invert (inv, dp[1], dp[0]); - else - { - unsigned shift; - mp_limb_t d1, d0; - - d1 = dp[dn-1]; - d0 = dp[dn-2]; - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); - } -} - -/* Not matching current public gmp interface, rather corresponding to - the sbpi1_div_* functions. */ -static mp_limb_t -mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - mp_limb_t d, di; - mp_limb_t r; - mp_ptr tp = NULL; - mp_size_t tn = 0; - - if (inv->shift > 0) - { - /* Shift, reusing qp area if possible. In-place shift if qp == np. */ - tp = qp; - if (!tp) - { - tn = nn; - tp = gmp_alloc_limbs (tn); - } - r = mpn_lshift (tp, np, nn, inv->shift); - np = tp; - } - else - r = 0; - - d = inv->d1; - di = inv->di; - while (--nn >= 0) - { - mp_limb_t q; - - gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); - if (qp) - qp[nn] = q; - } - if (tn) - gmp_free_limbs (tp, tn); - - return r >> inv->shift; -} - -static void -mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - unsigned shift; - mp_size_t i; - mp_limb_t d1, d0, di, r1, r0; - - assert (nn >= 2); - shift = inv->shift; - d1 = inv->d1; - d0 = inv->d0; - di = inv->di; - - if (shift > 0) - r1 = mpn_lshift (np, np, nn, shift); - else - r1 = 0; - - r0 = np[nn - 1]; - - i = nn - 2; - do - { - mp_limb_t n0, q; - n0 = np[i]; - gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - if (shift > 0) - { - assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); - r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); - r1 >>= shift; - } - - np[1] = r1; - np[0] = r0; -} - -static void -mpn_div_qr_pi1 (mp_ptr qp, - mp_ptr np, mp_size_t nn, mp_limb_t n1, - mp_srcptr dp, mp_size_t dn, - mp_limb_t dinv) -{ - mp_size_t i; - - mp_limb_t d1, d0; - mp_limb_t cy, cy1; - mp_limb_t q; - - assert (dn > 2); - assert (nn >= dn); - - d1 = dp[dn - 1]; - d0 = dp[dn - 2]; - - assert ((d1 & GMP_LIMB_HIGHBIT) != 0); - /* Iteration variable is the index of the q limb. - * - * We divide - * by - */ - - i = nn - dn; - do - { - mp_limb_t n0 = np[dn-1+i]; - - if (n1 == d1 && n0 == d0) - { - q = GMP_LIMB_MAX; - mpn_submul_1 (np+i, dp, dn, q); - n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ - } - else - { - gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); - - cy = mpn_submul_1 (np + i, dp, dn-2, q); - - cy1 = n0 < cy; - n0 = n0 - cy; - cy = n1 < cy1; - n1 = n1 - cy1; - np[dn-2+i] = n0; - - if (cy != 0) - { - n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); - q--; - } - } - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - np[dn - 1] = n1; -} - -static void -mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - mp_srcptr dp, mp_size_t dn, - const struct gmp_div_inverse *inv) -{ - assert (dn > 0); - assert (nn >= dn); - - if (dn == 1) - np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); - else if (dn == 2) - mpn_div_qr_2_preinv (qp, np, nn, inv); - else - { - mp_limb_t nh; - unsigned shift; - - assert (inv->d1 == dp[dn-1]); - assert (inv->d0 == dp[dn-2]); - assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); - - shift = inv->shift; - if (shift > 0) - nh = mpn_lshift (np, np, nn, shift); - else - nh = 0; - - mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); - - if (shift > 0) - gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); - } -} - -static void -mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) -{ - struct gmp_div_inverse inv; - mp_ptr tp = NULL; - - assert (dn > 0); - assert (nn >= dn); - - mpn_div_qr_invert (&inv, dp, dn); - if (dn > 2 && inv.shift > 0) - { - tp = gmp_alloc_limbs (dn); - gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); - dp = tp; - } - mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); - if (tp) - gmp_free_limbs (tp, dn); -} - - -/* MPN base conversion. */ -static unsigned -mpn_base_power_of_two_p (unsigned b) -{ - switch (b) - { - case 2: return 1; - case 4: return 2; - case 8: return 3; - case 16: return 4; - case 32: return 5; - case 64: return 6; - case 128: return 7; - case 256: return 8; - default: return 0; - } -} - -struct mpn_base_info -{ - /* bb is the largest power of the base which fits in one limb, and - exp is the corresponding exponent. */ - unsigned exp; - mp_limb_t bb; -}; - -static void -mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) -{ - mp_limb_t m; - mp_limb_t p; - unsigned exp; - - m = GMP_LIMB_MAX / b; - for (exp = 1, p = b; p <= m; exp++) - p *= b; - - info->exp = exp; - info->bb = p; -} - -static mp_bitcnt_t -mpn_limb_size_in_base_2 (mp_limb_t u) -{ - unsigned shift; - - assert (u > 0); - gmp_clz (shift, u); - return GMP_LIMB_BITS - shift; -} - -static size_t -mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) -{ - unsigned char mask; - size_t sn, j; - mp_size_t i; - unsigned shift; - - sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) - + bits - 1) / bits; - - mask = (1U << bits) - 1; - - for (i = 0, j = sn, shift = 0; j-- > 0;) - { - unsigned char digit = up[i] >> shift; - - shift += bits; - - if (shift >= GMP_LIMB_BITS && ++i < un) - { - shift -= GMP_LIMB_BITS; - digit |= up[i] << (bits - shift); - } - sp[j] = digit & mask; - } - return sn; -} - -/* We generate digits from the least significant end, and reverse at - the end. */ -static size_t -mpn_limb_get_str (unsigned char *sp, mp_limb_t w, - const struct gmp_div_inverse *binv) -{ - mp_size_t i; - for (i = 0; w > 0; i++) - { - mp_limb_t h, l, r; - - h = w >> (GMP_LIMB_BITS - binv->shift); - l = w << binv->shift; - - gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); - assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); - r >>= binv->shift; - - sp[i] = r; - } - return i; -} - -static size_t -mpn_get_str_other (unsigned char *sp, - int base, const struct mpn_base_info *info, - mp_ptr up, mp_size_t un) -{ - struct gmp_div_inverse binv; - size_t sn; - size_t i; - - mpn_div_qr_1_invert (&binv, base); - - sn = 0; - - if (un > 1) - { - struct gmp_div_inverse bbinv; - mpn_div_qr_1_invert (&bbinv, info->bb); - - do - { - mp_limb_t w; - size_t done; - w = mpn_div_qr_1_preinv (up, up, un, &bbinv); - un -= (up[un-1] == 0); - done = mpn_limb_get_str (sp + sn, w, &binv); - - for (sn += done; done < info->exp; done++) - sp[sn++] = 0; - } - while (un > 1); - } - sn += mpn_limb_get_str (sp + sn, up[0], &binv); - - /* Reverse order */ - for (i = 0; 2*i + 1 < sn; i++) - { - unsigned char t = sp[i]; - sp[i] = sp[sn - i - 1]; - sp[sn - i - 1] = t; - } - - return sn; -} - -size_t -mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) -{ - unsigned bits; - - assert (un > 0); - assert (up[un-1] > 0); - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_get_str_bits (sp, bits, up, un); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_get_str_other (sp, base, &info, up, un); - } -} - -static mp_size_t -mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, - unsigned bits) -{ - mp_size_t rn; - mp_limb_t limb; - unsigned shift; - - for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) - { - limb |= (mp_limb_t) sp[sn] << shift; - shift += bits; - if (shift >= GMP_LIMB_BITS) - { - shift -= GMP_LIMB_BITS; - rp[rn++] = limb; - /* Next line is correct also if shift == 0, - bits == 8, and mp_limb_t == unsigned char. */ - limb = (unsigned int) sp[sn] >> (bits - shift); - } - } - if (limb != 0) - rp[rn++] = limb; - else - rn = mpn_normalized_size (rp, rn); - return rn; -} - -/* Result is usually normalized, except for all-zero input, in which - case a single zero limb is written at *RP, and 1 is returned. */ -static mp_size_t -mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, - mp_limb_t b, const struct mpn_base_info *info) -{ - mp_size_t rn; - mp_limb_t w; - unsigned k; - size_t j; - - assert (sn > 0); - - k = 1 + (sn - 1) % info->exp; - - j = 0; - w = sp[j++]; - while (--k != 0) - w = w * b + sp[j++]; - - rp[0] = w; - - for (rn = 1; j < sn;) - { - mp_limb_t cy; - - w = sp[j++]; - for (k = 1; k < info->exp; k++) - w = w * b + sp[j++]; - - cy = mpn_mul_1 (rp, rp, rn, info->bb); - cy += mpn_add_1 (rp, rp, rn, w); - if (cy > 0) - rp[rn++] = cy; - } - assert (j == sn); - - return rn; -} - -mp_size_t -mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) -{ - unsigned bits; - - if (sn == 0) - return 0; - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_set_str_bits (rp, sp, sn, bits); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_set_str_other (rp, sp, sn, base, &info); - } -} - - -/* MPZ interface */ -void -mpz_init (mpz_t r) -{ - static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; - - r->_mp_alloc = 0; - r->_mp_size = 0; - r->_mp_d = (mp_ptr) &dummy_limb; -} - -/* The utility of this function is a bit limited, since many functions - assigns the result variable using mpz_swap. */ -void -mpz_init2 (mpz_t r, mp_bitcnt_t bits) -{ - mp_size_t rn; - - bits -= (bits != 0); /* Round down, except if 0 */ - rn = 1 + bits / GMP_LIMB_BITS; - - r->_mp_alloc = rn; - r->_mp_size = 0; - r->_mp_d = gmp_alloc_limbs (rn); -} - -void -mpz_clear (mpz_t r) -{ - if (r->_mp_alloc) - gmp_free_limbs (r->_mp_d, r->_mp_alloc); -} - -static mp_ptr -mpz_realloc (mpz_t r, mp_size_t size) -{ - size = GMP_MAX (size, 1); - - if (r->_mp_alloc) - r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); - else - r->_mp_d = gmp_alloc_limbs (size); - r->_mp_alloc = size; - - if (GMP_ABS (r->_mp_size) > size) - r->_mp_size = 0; - - return r->_mp_d; -} - -/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ -#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ - ? mpz_realloc(z,n) \ - : (z)->_mp_d) - -/* MPZ assignment and basic conversions. */ -void -mpz_set_si (mpz_t r, signed long int x) -{ - if (x >= 0) - mpz_set_ui (r, x); - else /* (x < 0) */ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); - mpz_neg (r, r); - } - else - { - r->_mp_size = -1; - MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); - } -} - -void -mpz_set_ui (mpz_t r, unsigned long int x) -{ - if (x > 0) - { - r->_mp_size = 1; - MPZ_REALLOC (r, 1)[0] = x; - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - while (x >>= LOCAL_GMP_LIMB_BITS) - { - ++ r->_mp_size; - MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; - } - } - } - else - r->_mp_size = 0; -} - -void -mpz_set (mpz_t r, const mpz_t x) -{ - /* Allow the NOP r == x */ - if (r != x) - { - mp_size_t n; - mp_ptr rp; - - n = GMP_ABS (x->_mp_size); - rp = MPZ_REALLOC (r, n); - - mpn_copyi (rp, x->_mp_d, n); - r->_mp_size = x->_mp_size; - } -} - -void -mpz_init_set_si (mpz_t r, signed long int x) -{ - mpz_init (r); - mpz_set_si (r, x); -} - -void -mpz_init_set_ui (mpz_t r, unsigned long int x) -{ - mpz_init (r); - mpz_set_ui (r, x); -} - -void -mpz_init_set (mpz_t r, const mpz_t x) -{ - mpz_init (r); - mpz_set (r, x); -} - -int -mpz_fits_slong_p (const mpz_t u) -{ - return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; -} - -static int -mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) -{ - int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; - mp_limb_t ulongrem = 0; - - if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) - ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; - - return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); -} - -int -mpz_fits_ulong_p (const mpz_t u) -{ - mp_size_t us = u->_mp_size; - - return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); -} - -int -mpz_fits_sint_p (const mpz_t u) -{ - return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; -} - -int -mpz_fits_uint_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; -} - -int -mpz_fits_sshort_p (const mpz_t u) -{ - return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; -} - -int -mpz_fits_ushort_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; -} - -long int -mpz_get_si (const mpz_t u) -{ - unsigned long r = mpz_get_ui (u); - unsigned long c = -LONG_MAX - LONG_MIN; - - if (u->_mp_size < 0) - /* This expression is necessary to properly handle -LONG_MIN */ - return -(long) c - (long) ((r - c) & LONG_MAX); - else - return (long) (r & LONG_MAX); -} - -unsigned long int -mpz_get_ui (const mpz_t u) -{ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - unsigned long r = 0; - mp_size_t n = GMP_ABS (u->_mp_size); - n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); - while (--n >= 0) - r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; - return r; - } - - return u->_mp_size == 0 ? 0 : u->_mp_d[0]; -} - -size_t -mpz_size (const mpz_t u) -{ - return GMP_ABS (u->_mp_size); -} - -mp_limb_t -mpz_getlimbn (const mpz_t u, mp_size_t n) -{ - if (n >= 0 && n < GMP_ABS (u->_mp_size)) - return u->_mp_d[n]; - else - return 0; -} - -void -mpz_realloc2 (mpz_t x, mp_bitcnt_t n) -{ - mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); -} - -mp_srcptr -mpz_limbs_read (mpz_srcptr x) -{ - return x->_mp_d; -} - -mp_ptr -mpz_limbs_modify (mpz_t x, mp_size_t n) -{ - assert (n > 0); - return MPZ_REALLOC (x, n); -} - -mp_ptr -mpz_limbs_write (mpz_t x, mp_size_t n) -{ - return mpz_limbs_modify (x, n); -} - -void -mpz_limbs_finish (mpz_t x, mp_size_t xs) -{ - mp_size_t xn; - xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); - x->_mp_size = xs < 0 ? -xn : xn; -} - -static mpz_srcptr -mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - x->_mp_alloc = 0; - x->_mp_d = (mp_ptr) xp; - x->_mp_size = xs; - return x; -} - -mpz_srcptr -mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - mpz_roinit_normal_n (x, xp, xs); - mpz_limbs_finish (x, xs); - return x; -} - - -/* Conversions and comparison to double. */ -void -mpz_set_d (mpz_t r, double x) -{ - int sign; - mp_ptr rp; - mp_size_t rn, i; - double B; - double Bi; - mp_limb_t f; - - /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is - zero or infinity. */ - if (x != x || x == x * 0.5) - { - r->_mp_size = 0; - return; - } - - sign = x < 0.0 ; - if (sign) - x = - x; - - if (x < 1.0) - { - r->_mp_size = 0; - return; - } - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - for (rn = 1; x >= B; rn++) - x *= Bi; - - rp = MPZ_REALLOC (r, rn); - - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - i = rn-1; - rp[i] = f; - while (--i >= 0) - { - x = B * x; - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - rp[i] = f; - } - - r->_mp_size = sign ? - rn : rn; -} - -void -mpz_init_set_d (mpz_t r, double x) -{ - mpz_init (r); - mpz_set_d (r, x); -} - -double -mpz_get_d (const mpz_t u) -{ - int m; - mp_limb_t l; - mp_size_t un; - double x; - double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - - un = GMP_ABS (u->_mp_size); - - if (un == 0) - return 0.0; - - l = u->_mp_d[--un]; - gmp_clz (m, l); - m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - - for (x = l; --un >= 0;) - { - x = B*x; - if (m > 0) { - l = u->_mp_d[un]; - m -= GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - x += l; - } - } - - if (u->_mp_size < 0) - x = -x; - - return x; -} - -int -mpz_cmpabs_d (const mpz_t x, double d) -{ - mp_size_t xn; - double B, Bi; - mp_size_t i; - - xn = x->_mp_size; - d = GMP_ABS (d); - - if (xn != 0) - { - xn = GMP_ABS (xn); - - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - - /* Scale d so it can be compared with the top limb. */ - for (i = 1; i < xn; i++) - d *= Bi; - - if (d >= B) - return -1; - - /* Compare floor(d) to top limb, subtract and cancel when equal. */ - for (i = xn; i-- > 0;) - { - mp_limb_t f, xl; - - f = (mp_limb_t) d; - xl = x->_mp_d[i]; - if (xl > f) - return 1; - else if (xl < f) - return -1; - d = B * (d - f); - } - } - return - (d > 0.0); -} - -int -mpz_cmp_d (const mpz_t x, double d) -{ - if (x->_mp_size < 0) - { - if (d >= 0.0) - return -1; - else - return -mpz_cmpabs_d (x, d); - } - else - { - if (d < 0.0) - return 1; - else - return mpz_cmpabs_d (x, d); - } -} - - -/* MPZ comparisons and the like. */ -int -mpz_sgn (const mpz_t u) -{ - return GMP_CMP (u->_mp_size, 0); -} - -int -mpz_cmp_si (const mpz_t u, long v) -{ - mp_size_t usize = u->_mp_size; - - if (v >= 0) - return mpz_cmp_ui (u, v); - else if (usize >= 0) - return 1; - else - return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); -} - -int -mpz_cmp_ui (const mpz_t u, unsigned long v) -{ - mp_size_t usize = u->_mp_size; - - if (usize < 0) - return -1; - else - return mpz_cmpabs_ui (u, v); -} - -int -mpz_cmp (const mpz_t a, const mpz_t b) -{ - mp_size_t asize = a->_mp_size; - mp_size_t bsize = b->_mp_size; - - if (asize != bsize) - return (asize < bsize) ? -1 : 1; - else if (asize >= 0) - return mpn_cmp (a->_mp_d, b->_mp_d, asize); - else - return mpn_cmp (b->_mp_d, a->_mp_d, -asize); -} - -int -mpz_cmpabs_ui (const mpz_t u, unsigned long v) -{ - mp_size_t un = GMP_ABS (u->_mp_size); - - if (! mpn_absfits_ulong_p (u->_mp_d, un)) - return 1; - else - { - unsigned long uu = mpz_get_ui (u); - return GMP_CMP(uu, v); - } -} - -int -mpz_cmpabs (const mpz_t u, const mpz_t v) -{ - return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), - v->_mp_d, GMP_ABS (v->_mp_size)); -} - -void -mpz_abs (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = GMP_ABS (r->_mp_size); -} - -void -mpz_neg (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = -r->_mp_size; -} - -void -mpz_swap (mpz_t u, mpz_t v) -{ - MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); - MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); -} - - -/* MPZ addition and subtraction */ - - -void -mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_t bb; - mpz_init_set_ui (bb, b); - mpz_add (r, a, bb); - mpz_clear (bb); -} - -void -mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_ui_sub (r, b, a); - mpz_neg (r, r); -} - -void -mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) -{ - mpz_neg (r, b); - mpz_add_ui (r, r, a); -} - -static mp_size_t -mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - mp_ptr rp; - mp_limb_t cy; - - if (an < bn) - { - MPZ_SRCPTR_SWAP (a, b); - MP_SIZE_T_SWAP (an, bn); - } - - rp = MPZ_REALLOC (r, an + 1); - cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); - - rp[an] = cy; - - return an + cy; -} - -static mp_size_t -mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - int cmp; - mp_ptr rp; - - cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); - if (cmp > 0) - { - rp = MPZ_REALLOC (r, an); - gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); - return mpn_normalized_size (rp, an); - } - else if (cmp < 0) - { - rp = MPZ_REALLOC (r, bn); - gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); - return -mpn_normalized_size (rp, bn); - } - else - return 0; -} - -void -mpz_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_add (r, a, b); - else - rn = mpz_abs_sub (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - -void -mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_sub (r, a, b); - else - rn = mpz_abs_add (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - - -/* MPZ multiplication */ -void -mpz_mul_si (mpz_t r, const mpz_t u, long int v) -{ - if (v < 0) - { - mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); - mpz_neg (r, r); - } - else - mpz_mul_ui (r, u, v); -} - -void -mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t vv; - mpz_init_set_ui (vv, v); - mpz_mul (r, u, vv); - mpz_clear (vv); - return; -} - -void -mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) -{ - int sign; - mp_size_t un, vn, rn; - mpz_t t; - mp_ptr tp; - - un = u->_mp_size; - vn = v->_mp_size; - - if (un == 0 || vn == 0) - { - r->_mp_size = 0; - return; - } - - sign = (un ^ vn) < 0; - - un = GMP_ABS (un); - vn = GMP_ABS (vn); - - mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); - - tp = t->_mp_d; - if (un >= vn) - mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); - else - mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); - - rn = un + vn; - rn -= tp[rn-1] == 0; - - t->_mp_size = sign ? - rn : rn; - mpz_swap (r, t); - mpz_clear (t); -} - -void -mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) -{ - mp_size_t un, rn; - mp_size_t limbs; - unsigned shift; - mp_ptr rp; - - un = GMP_ABS (u->_mp_size); - if (un == 0) - { - r->_mp_size = 0; - return; - } - - limbs = bits / GMP_LIMB_BITS; - shift = bits % GMP_LIMB_BITS; - - rn = un + limbs + (shift > 0); - rp = MPZ_REALLOC (r, rn); - if (shift > 0) - { - mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); - rp[rn-1] = cy; - rn -= (cy == 0); - } - else - mpn_copyd (rp + limbs, u->_mp_d, un); - - mpn_zero (rp, limbs); - - r->_mp_size = (u->_mp_size < 0) ? - rn : rn; -} - -void -mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_sub (r, r, t); - mpz_clear (t); -} - -void -mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_sub (r, r, t); - mpz_clear (t); -} - - -/* MPZ division */ -enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; - -/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ -static int -mpz_div_qr (mpz_t q, mpz_t r, - const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) -{ - mp_size_t ns, ds, nn, dn, qs; - ns = n->_mp_size; - ds = d->_mp_size; - - if (ds == 0) - gmp_die("mpz_div_qr: Divide by zero."); - - if (ns == 0) - { - if (q) - q->_mp_size = 0; - if (r) - r->_mp_size = 0; - return 0; - } - - nn = GMP_ABS (ns); - dn = GMP_ABS (ds); - - qs = ds ^ ns; - - if (nn < dn) - { - if (mode == GMP_DIV_CEIL && qs >= 0) - { - /* q = 1, r = n - d */ - if (r) - mpz_sub (r, n, d); - if (q) - mpz_set_ui (q, 1); - } - else if (mode == GMP_DIV_FLOOR && qs < 0) - { - /* q = -1, r = n + d */ - if (r) - mpz_add (r, n, d); - if (q) - mpz_set_si (q, -1); - } - else - { - /* q = 0, r = d */ - if (r) - mpz_set (r, n); - if (q) - q->_mp_size = 0; - } - return 1; - } - else - { - mp_ptr np, qp; - mp_size_t qn, rn; - mpz_t tq, tr; - - mpz_init_set (tr, n); - np = tr->_mp_d; - - qn = nn - dn + 1; - - if (q) - { - mpz_init2 (tq, qn * GMP_LIMB_BITS); - qp = tq->_mp_d; - } - else - qp = NULL; - - mpn_div_qr (qp, np, nn, d->_mp_d, dn); - - if (qp) - { - qn -= (qp[qn-1] == 0); - - tq->_mp_size = qs < 0 ? -qn : qn; - } - rn = mpn_normalized_size (np, dn); - tr->_mp_size = ns < 0 ? - rn : rn; - - if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) - { - if (q) - mpz_sub_ui (tq, tq, 1); - if (r) - mpz_add (tr, tr, d); - } - else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) - { - if (q) - mpz_add_ui (tq, tq, 1); - if (r) - mpz_sub (tr, tr, d); - } - - if (q) - { - mpz_swap (tq, q); - mpz_clear (tq); - } - if (r) - mpz_swap (tr, r); - - mpz_clear (tr); - - return rn != 0; - } -} - -void -mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); -} - -static void -mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t un, qn; - mp_size_t limb_cnt; - mp_ptr qp; - int adjust; - - un = u->_mp_size; - if (un == 0) - { - q->_mp_size = 0; - return; - } - limb_cnt = bit_index / GMP_LIMB_BITS; - qn = GMP_ABS (un) - limb_cnt; - bit_index %= GMP_LIMB_BITS; - - if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ - /* Note: Below, the final indexing at limb_cnt is valid because at - that point we have qn > 0. */ - adjust = (qn <= 0 - || !mpn_zero_p (u->_mp_d, limb_cnt) - || (u->_mp_d[limb_cnt] - & (((mp_limb_t) 1 << bit_index) - 1))); - else - adjust = 0; - - if (qn <= 0) - qn = 0; - else - { - qp = MPZ_REALLOC (q, qn); - - if (bit_index != 0) - { - mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); - qn -= qp[qn - 1] == 0; - } - else - { - mpn_copyi (qp, u->_mp_d + limb_cnt, qn); - } - } - - q->_mp_size = qn; - - if (adjust) - mpz_add_ui (q, q, 1); - if (un < 0) - mpz_neg (q, q); -} - -static void -mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t us, un, rn; - mp_ptr rp; - mp_limb_t mask; - - us = u->_mp_size; - if (us == 0 || bit_index == 0) - { - r->_mp_size = 0; - return; - } - rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - assert (rn > 0); - - rp = MPZ_REALLOC (r, rn); - un = GMP_ABS (us); - - mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); - - if (rn > un) - { - /* Quotient (with truncation) is zero, and remainder is - non-zero */ - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* Have to negate and sign extend. */ - mp_size_t i; - - gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); - for (i = un; i < rn - 1; i++) - rp[i] = GMP_LIMB_MAX; - - rp[rn-1] = mask; - us = -us; - } - else - { - /* Just copy */ - if (r != u) - mpn_copyi (rp, u->_mp_d, un); - - rn = un; - } - } - else - { - if (r != u) - mpn_copyi (rp, u->_mp_d, rn - 1); - - rp[rn-1] = u->_mp_d[rn-1] & mask; - - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* If r != 0, compute 2^{bit_count} - r. */ - mpn_neg (rp, rp, rn); - - rp[rn-1] &= mask; - - /* us is not used for anything else, so we can modify it - here to indicate flipped sign. */ - us = -us; - } - } - rn = mpn_normalized_size (rp, rn); - r->_mp_size = us < 0 ? -rn : rn; -} - -void -mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) -{ - gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_p (const mpz_t n, const mpz_t d) -{ - return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - -int -mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) -{ - mpz_t t; - int res; - - /* a == b (mod 0) iff a == b */ - if (mpz_sgn (m) == 0) - return (mpz_cmp (a, b) == 0); - - mpz_init (t); - mpz_sub (t, a, b); - res = mpz_divisible_p (t, m); - mpz_clear (t); - - return res; -} - -static unsigned long -mpz_div_qr_ui (mpz_t q, mpz_t r, - const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) -{ - unsigned long ret; - mpz_t rr, dd; - - mpz_init (rr); - mpz_init_set_ui (dd, d); - mpz_div_qr (q, rr, n, dd, mode); - mpz_clear (dd); - ret = mpz_get_ui (rr); - - if (r) - mpz_swap (r, rr); - mpz_clear (rr); - - return ret; -} - -unsigned long -mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); -} -unsigned long -mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} -unsigned long -mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_ui_p (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - - -/* GCD */ -static mp_limb_t -mpn_gcd_11 (mp_limb_t u, mp_limb_t v) -{ - unsigned shift; - - assert ( (u | v) > 0); - - if (u == 0) - return v; - else if (v == 0) - return u; - - gmp_ctz (shift, u | v); - - u >>= shift; - v >>= shift; - - if ( (u & 1) == 0) - MP_LIMB_T_SWAP (u, v); - - while ( (v & 1) == 0) - v >>= 1; - - while (u != v) - { - if (u > v) - { - u -= v; - do - u >>= 1; - while ( (u & 1) == 0); - } - else - { - v -= u; - do - v >>= 1; - while ( (v & 1) == 0); - } - } - return u << shift; -} - -mp_size_t -mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn > 0); - assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); - assert (vp[vn-1] > 0); - assert ((up[0] | vp[0]) & 1); - - if (un > vn) - mpn_div_qr (NULL, up, un, vp, vn); - - un = mpn_normalized_size (up, vn); - if (un == 0) - { - mpn_copyi (rp, vp, vn); - return vn; - } - - if (!(vp[0] & 1)) - MPN_PTR_SWAP (up, un, vp, vn); - - while (un > 1 || vn > 1) - { - int shift; - assert (vp[0] & 1); - - while (up[0] == 0) - { - up++; - un--; - } - gmp_ctz (shift, up[0]); - if (shift > 0) - { - gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); - un -= (up[un-1] == 0); - } - - if (un < vn) - MPN_PTR_SWAP (up, un, vp, vn); - else if (un == vn) - { - int c = mpn_cmp (up, vp, un); - if (c == 0) - { - mpn_copyi (rp, up, un); - return un; - } - else if (c < 0) - MP_PTR_SWAP (up, vp); - } - - gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); - un = mpn_normalized_size (up, un); - } - rp[0] = mpn_gcd_11 (up[0], vp[0]); - return 1; -} - -unsigned long -mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) -{ - mpz_t t; - mpz_init_set_ui(t, v); - mpz_gcd (t, u, t); - if (v > 0) - v = mpz_get_ui (t); - - if (g) - mpz_swap (t, g); - - mpz_clear (t); - - return v; -} - -static mp_bitcnt_t -mpz_make_odd (mpz_t r) -{ - mp_bitcnt_t shift; - - assert (r->_mp_size > 0); - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - shift = mpn_scan1 (r->_mp_d, 0); - mpz_tdiv_q_2exp (r, r, shift); - - return shift; -} - -void -mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv; - mp_bitcnt_t uz, vz, gz; - - if (u->_mp_size == 0) - { - mpz_abs (g, v); - return; - } - if (v->_mp_size == 0) - { - mpz_abs (g, u); - return; - } - - mpz_init (tu); - mpz_init (tv); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - if (tu->_mp_size < tv->_mp_size) - mpz_swap (tu, tv); - - tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); - mpz_mul_2exp (g, tu, gz); - - mpz_clear (tu); - mpz_clear (tv); -} - -void -mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv, s0, s1, t0, t1; - mp_bitcnt_t uz, vz, gz; - mp_bitcnt_t power; - int cmp; - - if (u->_mp_size == 0) - { - /* g = 0 u + sgn(v) v */ - signed long sign = mpz_sgn (v); - mpz_abs (g, v); - if (s) - s->_mp_size = 0; - if (t) - mpz_set_si (t, sign); - return; - } - - if (v->_mp_size == 0) - { - /* g = sgn(u) u + 0 v */ - signed long sign = mpz_sgn (u); - mpz_abs (g, u); - if (s) - mpz_set_si (s, sign); - if (t) - t->_mp_size = 0; - return; - } - - mpz_init (tu); - mpz_init (tv); - mpz_init (s0); - mpz_init (s1); - mpz_init (t0); - mpz_init (t1); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - uz -= gz; - vz -= gz; - - /* Cofactors corresponding to odd gcd. gz handled later. */ - if (tu->_mp_size < tv->_mp_size) - { - mpz_swap (tu, tv); - MPZ_SRCPTR_SWAP (u, v); - MPZ_PTR_SWAP (s, t); - MP_BITCNT_T_SWAP (uz, vz); - } - - /* Maintain - * - * u = t0 tu + t1 tv - * v = s0 tu + s1 tv - * - * where u and v denote the inputs with common factors of two - * eliminated, and det (s0, t0; s1, t1) = 2^p. Then - * - * 2^p tu = s1 u - t1 v - * 2^p tv = -s0 u + t0 v - */ - - /* After initial division, tu = q tv + tu', we have - * - * u = 2^uz (tu' + q tv) - * v = 2^vz tv - * - * or - * - * t0 = 2^uz, t1 = 2^uz q - * s0 = 0, s1 = 2^vz - */ - - mpz_tdiv_qr (t1, tu, tu, tv); - mpz_mul_2exp (t1, t1, uz); - - mpz_setbit (s1, vz); - power = uz + vz; - - if (tu->_mp_size > 0) - { - mp_bitcnt_t shift; - shift = mpz_make_odd (tu); - mpz_setbit (t0, uz + shift); - power += shift; - - for (;;) - { - int c; - c = mpz_cmp (tu, tv); - if (c == 0) - break; - - if (c < 0) - { - /* tv = tv' + tu - * - * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' - * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ - - mpz_sub (tv, tv, tu); - mpz_add (t0, t0, t1); - mpz_add (s0, s0, s1); - - shift = mpz_make_odd (tv); - mpz_mul_2exp (t1, t1, shift); - mpz_mul_2exp (s1, s1, shift); - } - else - { - mpz_sub (tu, tu, tv); - mpz_add (t1, t0, t1); - mpz_add (s1, s0, s1); - - shift = mpz_make_odd (tu); - mpz_mul_2exp (t0, t0, shift); - mpz_mul_2exp (s0, s0, shift); - } - power += shift; - } - } - else - mpz_setbit (t0, uz); - - /* Now tv = odd part of gcd, and -s0 and t0 are corresponding - cofactors. */ - - mpz_mul_2exp (tv, tv, gz); - mpz_neg (s0, s0); - - /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To - adjust cofactors, we need u / g and v / g */ - - mpz_divexact (s1, v, tv); - mpz_abs (s1, s1); - mpz_divexact (t1, u, tv); - mpz_abs (t1, t1); - - while (power-- > 0) - { - /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ - if (mpz_odd_p (s0) || mpz_odd_p (t0)) - { - mpz_sub (s0, s0, s1); - mpz_add (t0, t0, t1); - } - assert (mpz_even_p (t0) && mpz_even_p (s0)); - mpz_tdiv_q_2exp (s0, s0, 1); - mpz_tdiv_q_2exp (t0, t0, 1); - } - - /* Choose small cofactors (they should generally satify - - |s| < |u| / 2g and |t| < |v| / 2g, - - with some documented exceptions). Always choose the smallest s, - if there are two choices for s with same absolute value, choose - the one with smallest corresponding t (this asymmetric condition - is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ - mpz_add (s1, s0, s1); - mpz_sub (t1, t0, t1); - cmp = mpz_cmpabs (s0, s1); - if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) - { - mpz_swap (s0, s1); - mpz_swap (t0, t1); - } - if (u->_mp_size < 0) - mpz_neg (s0, s0); - if (v->_mp_size < 0) - mpz_neg (t0, t0); - - mpz_swap (g, tv); - if (s) - mpz_swap (s, s0); - if (t) - mpz_swap (t, t0); - - mpz_clear (tu); - mpz_clear (tv); - mpz_clear (s0); - mpz_clear (s1); - mpz_clear (t0); - mpz_clear (t1); -} - -void -mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t g; - - if (u->_mp_size == 0 || v->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - mpz_init (g); - - mpz_gcd (g, u, v); - mpz_divexact (g, u, g); - mpz_mul (r, g, v); - - mpz_clear (g); - mpz_abs (r, r); -} - -void -mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) -{ - if (v == 0 || u->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - v /= mpz_gcd_ui (NULL, u, v); - mpz_mul_ui (r, u, v); - - mpz_abs (r, r); -} - -int -mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) -{ - mpz_t g, tr; - int invertible; - - if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) - return 0; - - mpz_init (g); - mpz_init (tr); - - mpz_gcdext (g, tr, NULL, u, m); - invertible = (mpz_cmp_ui (g, 1) == 0); - - if (invertible) - { - if (tr->_mp_size < 0) - { - if (m->_mp_size >= 0) - mpz_add (tr, tr, m); - else - mpz_sub (tr, tr, m); - } - mpz_swap (r, tr); - } - - mpz_clear (g); - mpz_clear (tr); - return invertible; -} - - -/* Higher level operations (sqrt, pow and root) */ - -void -mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) -{ - unsigned long bit; - mpz_t tr; - mpz_init_set_ui (tr, 1); - - bit = GMP_ULONG_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (e & bit) - mpz_mul (tr, tr, b); - bit >>= 1; - } - while (bit > 0); - - mpz_swap (r, tr); - mpz_clear (tr); -} - -void -mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) -{ - mpz_t b; - - mpz_init_set_ui (b, blimb); - mpz_pow_ui (r, b, e); - mpz_clear (b); -} - -void -mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) -{ - mpz_t tr; - mpz_t base; - mp_size_t en, mn; - mp_srcptr mp; - struct gmp_div_inverse minv; - unsigned shift; - mp_ptr tp = NULL; - - en = GMP_ABS (e->_mp_size); - mn = GMP_ABS (m->_mp_size); - if (mn == 0) - gmp_die ("mpz_powm: Zero modulo."); - - if (en == 0) - { - mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); - return; - } - - mp = m->_mp_d; - mpn_div_qr_invert (&minv, mp, mn); - shift = minv.shift; - - if (shift > 0) - { - /* To avoid shifts, we do all our reductions, except the final - one, using a *normalized* m. */ - minv.shift = 0; - - tp = gmp_alloc_limbs (mn); - gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); - mp = tp; - } - - mpz_init (base); - - if (e->_mp_size < 0) - { - if (!mpz_invert (base, b, m)) - gmp_die ("mpz_powm: Negative exponent and non-invertible base."); - } - else - { - mp_size_t bn; - mpz_abs (base, b); - - bn = base->_mp_size; - if (bn >= mn) - { - mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); - bn = mn; - } - - /* We have reduced the absolute value. Now take care of the - sign. Note that we get zero represented non-canonically as - m. */ - if (b->_mp_size < 0) - { - mp_ptr bp = MPZ_REALLOC (base, mn); - gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); - bn = mn; - } - base->_mp_size = mpn_normalized_size (base->_mp_d, bn); - } - mpz_init_set_ui (tr, 1); - - while (--en >= 0) - { - mp_limb_t w = e->_mp_d[en]; - mp_limb_t bit; - - bit = GMP_LIMB_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (w & bit) - mpz_mul (tr, tr, base); - if (tr->_mp_size > mn) - { - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - bit >>= 1; - } - while (bit > 0); - } - - /* Final reduction */ - if (tr->_mp_size >= mn) - { - minv.shift = shift; - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - if (tp) - gmp_free_limbs (tp, mn); - - mpz_swap (r, tr); - mpz_clear (tr); - mpz_clear (base); -} - -void -mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) -{ - mpz_t e; - - mpz_init_set_ui (e, elimb); - mpz_powm (r, b, e, m); - mpz_clear (e); -} - -/* x=trunc(y^(1/z)), r=y-x^z */ -void -mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) -{ - int sgn; - mp_bitcnt_t bc; - mpz_t t, u; - - sgn = y->_mp_size < 0; - if ((~z & sgn) != 0) - gmp_die ("mpz_rootrem: Negative argument, with even root."); - if (z == 0) - gmp_die ("mpz_rootrem: Zeroth root."); - - if (mpz_cmpabs_ui (y, 1) <= 0) { - if (x) - mpz_set (x, y); - if (r) - r->_mp_size = 0; - return; - } - - mpz_init (u); - mpz_init (t); - bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; - mpz_setbit (t, bc); - - if (z == 2) /* simplify sqrt loop: z-1 == 1 */ - do { - mpz_swap (u, t); /* u = x */ - mpz_tdiv_q (t, y, u); /* t = y/x */ - mpz_add (t, t, u); /* t = y/x + x */ - mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - else /* z != 2 */ { - mpz_t v; - - mpz_init (v); - if (sgn) - mpz_neg (t, t); - - do { - mpz_swap (u, t); /* u = x */ - mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ - mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ - mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ - mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ - mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - - mpz_clear (v); - } - - if (r) { - mpz_pow_ui (t, u, z); - mpz_sub (r, y, t); - } - if (x) - mpz_swap (x, u); - mpz_clear (u); - mpz_clear (t); -} - -int -mpz_root (mpz_t x, const mpz_t y, unsigned long z) -{ - int res; - mpz_t r; - - mpz_init (r); - mpz_rootrem (x, r, y, z); - res = r->_mp_size == 0; - mpz_clear (r); - - return res; -} - -/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ -void -mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) -{ - mpz_rootrem (s, r, u, 2); -} - -void -mpz_sqrt (mpz_t s, const mpz_t u) -{ - mpz_rootrem (s, NULL, u, 2); -} - -int -mpz_perfect_square_p (const mpz_t u) -{ - if (u->_mp_size <= 0) - return (u->_mp_size == 0); - else - return mpz_root (NULL, u, 2); -} - -int -mpn_perfect_square_p (mp_srcptr p, mp_size_t n) -{ - mpz_t t; - - assert (n > 0); - assert (p [n-1] != 0); - return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); -} - -mp_size_t -mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) -{ - mpz_t s, r, u; - mp_size_t res; - - assert (n > 0); - assert (p [n-1] != 0); - - mpz_init (r); - mpz_init (s); - mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); - - assert (s->_mp_size == (n+1)/2); - mpn_copyd (sp, s->_mp_d, s->_mp_size); - mpz_clear (s); - res = r->_mp_size; - if (rp) - mpn_copyd (rp, r->_mp_d, res); - mpz_clear (r); - return res; -} - -/* Combinatorics */ - -void -mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) -{ - mpz_set_ui (x, n + (n == 0)); - if (m + 1 < 2) return; - while (n > m + 1) - mpz_mul_ui (x, x, n -= m); -} - -void -mpz_2fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 2); -} - -void -mpz_fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 1); -} - -void -mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) -{ - mpz_t t; - - mpz_set_ui (r, k <= n); - - if (k > (n >> 1)) - k = (k <= n) ? n - k : 0; - - mpz_init (t); - mpz_fac_ui (t, k); - - for (; k > 0; --k) - mpz_mul_ui (r, r, n--); - - mpz_divexact (r, r, t); - mpz_clear (t); -} - - -/* Primality testing */ - -/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ -/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ -static int -gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) -{ - int c, bit = 0; - - assert (b & 1); - assert (a != 0); - /* assert (mpn_gcd_11 (a, b) == 1); */ - - /* Below, we represent a and b shifted right so that the least - significant one bit is implicit. */ - b >>= 1; - - gmp_ctz(c, a); - a >>= 1; - - for (;;) - { - a >>= c; - /* (2/b) = -1 if b = 3 or 5 mod 8 */ - bit ^= c & (b ^ (b >> 1)); - if (a < b) - { - if (a == 0) - return bit & 1 ? -1 : 1; - bit ^= a & b; - a = b - a; - b -= a; - } - else - { - a -= b; - assert (a != 0); - } - - gmp_ctz(c, a); - ++c; - } -} - -static void -gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) -{ - mpz_mod (Qk, Qk, n); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - mpz_mul (V, V, V); - mpz_submul_ui (V, Qk, 2); - mpz_tdiv_r (V, V, n); - /* Q^{2k} = (Q^k)^2 */ - mpz_mul (Qk, Qk, Qk); -} - -/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ -/* with P=1, Q=Q; k = (n>>b0)|1. */ -/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ -/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ -static int -gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, - mp_bitcnt_t b0, const mpz_t n) -{ - mp_bitcnt_t bs; - mpz_t U; - int res; - - assert (b0 > 0); - assert (Q <= - (LONG_MIN / 2)); - assert (Q >= - (LONG_MAX / 2)); - assert (mpz_cmp_ui (n, 4) > 0); - assert (mpz_odd_p (n)); - - mpz_init_set_ui (U, 1); /* U1 = 1 */ - mpz_set_ui (V, 1); /* V1 = 1 */ - mpz_set_si (Qk, Q); - - for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) - { - /* U_{2k} <- U_k * V_k */ - mpz_mul (U, U, V); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - /* A step k->k+1 is performed if the bit in $n$ is 1 */ - /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ - /* should be 1 in $n+1$ (bs == b0) */ - if (b0 == bs || mpz_tstbit (n, bs)) - { - /* Q^{k+1} <- Q^k * Q */ - mpz_mul_si (Qk, Qk, Q); - /* U_{k+1} <- (U_k + V_k) / 2 */ - mpz_swap (U, V); /* Keep in V the old value of U_k */ - mpz_add (U, U, V); - /* We have to compute U/2, so we need an even value, */ - /* equivalent (mod n) */ - if (mpz_odd_p (U)) - mpz_add (U, U, n); - mpz_tdiv_q_2exp (U, U, 1); - /* V_{k+1} <-(D*U_k + V_k) / 2 = - U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ - mpz_mul_si (V, V, -2*Q); - mpz_add (V, U, V); - mpz_tdiv_r (V, V, n); - } - mpz_tdiv_r (U, U, n); - } - - res = U->_mp_size == 0; - mpz_clear (U); - return res; -} - -/* Performs strong Lucas' test on x, with parameters suggested */ -/* for the BPSW test. Qk is only passed to recycle a variable. */ -/* Requires GCD (x,6) = 1.*/ -static int -gmp_stronglucas (const mpz_t x, mpz_t Qk) -{ - mp_bitcnt_t b0; - mpz_t V, n; - mp_limb_t maxD, D; /* The absolute value is stored. */ - long Q; - mp_limb_t tl; - - /* Test on the absolute value. */ - mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); - - assert (mpz_odd_p (n)); - /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ - if (mpz_root (Qk, n, 2)) - return 0; /* A square is composite. */ - - /* Check Ds up to square root (in case, n is prime) - or avoid overflows */ - maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; - - D = 3; - /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ - /* For those Ds we have (D/n) = (n/|D|) */ - do - { - if (D >= maxD) - return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ - D += 2; - tl = mpz_tdiv_ui (n, D); - if (tl == 0) - return 0; - } - while (gmp_jacobi_coprime (tl, D) == 1); - - mpz_init (V); - - /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ - b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); - /* b0 = mpz_scan0 (n, 0); */ - - /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ - Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); - - if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ - while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ - /* V <- V ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - mpz_clear (V); - return (b0 != 0); -} - -static int -gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, - const mpz_t q, mp_bitcnt_t k) -{ - assert (k > 0); - - /* Caller must initialize y to the base. */ - mpz_powm (y, y, q, n); - - if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) - return 1; - - while (--k > 0) - { - mpz_powm_ui (y, y, 2, n); - if (mpz_cmp (y, nm1) == 0) - return 1; - } - return 0; -} - -/* This product is 0xc0cfd797, and fits in 32 bits. */ -#define GMP_PRIME_PRODUCT \ - (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) - -/* Bit (p+1)/2 is set, for each odd prime <= 61 */ -#define GMP_PRIME_MASK 0xc96996dcUL - -int -mpz_probab_prime_p (const mpz_t n, int reps) -{ - mpz_t nm1; - mpz_t q; - mpz_t y; - mp_bitcnt_t k; - int is_prime; - int j; - - /* Note that we use the absolute value of n only, for compatibility - with the real GMP. */ - if (mpz_even_p (n)) - return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; - - /* Above test excludes n == 0 */ - assert (n->_mp_size != 0); - - if (mpz_cmpabs_ui (n, 64) < 0) - return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; - - if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) - return 0; - - /* All prime factors are >= 31. */ - if (mpz_cmpabs_ui (n, 31*31) < 0) - return 2; - - mpz_init (nm1); - mpz_init (q); - - /* Find q and k, where q is odd and n = 1 + 2**k * q. */ - mpz_abs (nm1, n); - nm1->_mp_d[0] -= 1; - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - k = mpn_scan1 (nm1->_mp_d, 0); - mpz_tdiv_q_2exp (q, nm1, k); - - /* BPSW test */ - mpz_init_set_ui (y, 2); - is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); - reps -= 24; /* skip the first 24 repetitions */ - - /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = - j^2 + j + 41 using Euler's polynomial. We potentially stop early, - if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > - 30 (a[30] == 971 > 31*31 == 961). */ - - for (j = 0; is_prime & (j < reps); j++) - { - mpz_set_ui (y, (unsigned long) j*j+j+41); - if (mpz_cmp (y, nm1) >= 0) - { - /* Don't try any further bases. This "early" break does not affect - the result for any reasonable reps value (<=5000 was tested) */ - assert (j >= 30); - break; - } - is_prime = gmp_millerrabin (n, nm1, y, q, k); - } - mpz_clear (nm1); - mpz_clear (q); - mpz_clear (y); - - return is_prime; -} - - -/* Logical operations and bit manipulation. */ - -/* Numbers are treated as if represented in two's complement (and - infinitely sign extended). For a negative values we get the two's - complement from -x = ~x + 1, where ~ is bitwise complement. - Negation transforms - - xxxx10...0 - - into - - yyyy10...0 - - where yyyy is the bitwise complement of xxxx. So least significant - bits, up to and including the first one bit, are unchanged, and - the more significant bits are all complemented. - - To change a bit from zero to one in a negative number, subtract the - corresponding power of two from the absolute value. This can never - underflow. To change a bit from one to zero, add the corresponding - power of two, and this might overflow. E.g., if x = -001111, the - two's complement is 110001. Clearing the least significant bit, we - get two's complement 110000, and -010000. */ - -int -mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t limb_index; - unsigned shift; - mp_size_t ds; - mp_size_t dn; - mp_limb_t w; - int bit; - - ds = d->_mp_size; - dn = GMP_ABS (ds); - limb_index = bit_index / GMP_LIMB_BITS; - if (limb_index >= dn) - return ds < 0; - - shift = bit_index % GMP_LIMB_BITS; - w = d->_mp_d[limb_index]; - bit = (w >> shift) & 1; - - if (ds < 0) - { - /* d < 0. Check if any of the bits below is set: If so, our bit - must be complemented. */ - if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) - return bit ^ 1; - while (--limb_index >= 0) - if (d->_mp_d[limb_index] > 0) - return bit ^ 1; - } - return bit; -} - -static void -mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_limb_t bit; - mp_ptr dp; - - dn = GMP_ABS (d->_mp_size); - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - if (limb_index >= dn) - { - mp_size_t i; - /* The bit should be set outside of the end of the number. - We have to increase the size of the number. */ - dp = MPZ_REALLOC (d, limb_index + 1); - - dp[limb_index] = bit; - for (i = dn; i < limb_index; i++) - dp[i] = 0; - dn = limb_index + 1; - } - else - { - mp_limb_t cy; - - dp = d->_mp_d; - - cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); - if (cy > 0) - { - dp = MPZ_REALLOC (d, dn + 1); - dp[dn++] = cy; - } - } - - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -static void -mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_ptr dp; - mp_limb_t bit; - - dn = GMP_ABS (d->_mp_size); - dp = d->_mp_d; - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - assert (limb_index < dn); - - gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, - dn - limb_index, bit)); - dn = mpn_normalized_size (dp, dn); - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -void -mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (!mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_add_bit (d, bit_index); - else - mpz_abs_sub_bit (d, bit_index); - } -} - -void -mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); - } -} - -void -mpz_combit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); -} - -void -mpz_com (mpz_t r, const mpz_t u) -{ - mpz_add_ui (r, u, 1); - mpz_neg (r, r); -} - -void -mpz_and (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - r->_mp_size = 0; - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc & vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is positive, higher limbs don't matter. */ - rn = vx ? un : vn; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul & vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul & vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc | vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is negative, by sign extension higher limbs - don't matter. */ - rn = vx ? vn : un; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul | vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul | vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc ^ vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - rp = MPZ_REALLOC (r, un + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = (ul ^ vl ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = (ul ^ ux) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[un++] = rc; - else - un = mpn_normalized_size (rp, un); - - r->_mp_size = rx ? -un : un; -} - -static unsigned -gmp_popcount_limb (mp_limb_t x) -{ - unsigned c; - - /* Do 16 bits at a time, to avoid limb-sized constants. */ - int LOCAL_SHIFT_BITS = 16; - for (c = 0; x > 0;) - { - unsigned w = x - ((x >> 1) & 0x5555); - w = ((w >> 2) & 0x3333) + (w & 0x3333); - w = (w >> 4) + w; - w = ((w >> 8) & 0x000f) + (w & 0x000f); - c += w; - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) - x >>= LOCAL_SHIFT_BITS; - else - x = 0; - } - return c; -} - -mp_bitcnt_t -mpn_popcount (mp_srcptr p, mp_size_t n) -{ - mp_size_t i; - mp_bitcnt_t c; - - for (c = 0, i = 0; i < n; i++) - c += gmp_popcount_limb (p[i]); - - return c; -} - -mp_bitcnt_t -mpz_popcount (const mpz_t u) -{ - mp_size_t un; - - un = u->_mp_size; - - if (un < 0) - return ~(mp_bitcnt_t) 0; - - return mpn_popcount (u->_mp_d, un); -} - -mp_bitcnt_t -mpz_hamdist (const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_limb_t uc, vc, ul, vl, comp; - mp_srcptr up, vp; - mp_bitcnt_t c; - - un = u->_mp_size; - vn = v->_mp_size; - - if ( (un ^ vn) < 0) - return ~(mp_bitcnt_t) 0; - - comp = - (uc = vc = (un < 0)); - if (uc) - { - assert (vn < 0); - un = -un; - vn = -vn; - } - - up = u->_mp_d; - vp = v->_mp_d; - - if (un < vn) - MPN_SRCPTR_SWAP (up, un, vp, vn); - - for (i = 0, c = 0; i < vn; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - vl = (vp[i] ^ comp) + vc; - vc = vl < vc; - - c += gmp_popcount_limb (ul ^ vl); - } - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - c += gmp_popcount_limb (ul ^ comp); - } - - return c; -} - -mp_bitcnt_t -mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit - for u<0. Notice this test picks up any u==0 too. */ - if (i >= un) - return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); - - up = u->_mp_d; - ux = 0; - limb = up[i]; - - if (starting_bit != 0) - { - if (us < 0) - { - ux = mpn_zero_p (up, i); - limb = ~ limb + ux; - ux = - (mp_limb_t) (limb >= ux); - } - - /* Mask to 0 all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - } - - return mpn_common_scan (limb, i, up, un, ux); -} - -mp_bitcnt_t -mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - ux = - (mp_limb_t) (us >= 0); - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for - u<0. Notice this test picks up all cases of u==0 too. */ - if (i >= un) - return (ux ? starting_bit : ~(mp_bitcnt_t) 0); - - up = u->_mp_d; - limb = up[i] ^ ux; - - if (ux == 0) - limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ - - /* Mask all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - - return mpn_common_scan (limb, i, up, un, ux); -} - - -/* MPZ base conversion. */ - -size_t -mpz_sizeinbase (const mpz_t u, int base) -{ - mp_size_t un, tn; - mp_srcptr up; - mp_ptr tp; - mp_bitcnt_t bits; - struct gmp_div_inverse bi; - size_t ndigits; - - assert (base >= 2); - assert (base <= 62); - - un = GMP_ABS (u->_mp_size); - if (un == 0) - return 1; - - up = u->_mp_d; - - bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); - switch (base) - { - case 2: - return bits; - case 4: - return (bits + 1) / 2; - case 8: - return (bits + 2) / 3; - case 16: - return (bits + 3) / 4; - case 32: - return (bits + 4) / 5; - /* FIXME: Do something more clever for the common case of base - 10. */ - } - - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, up, un); - mpn_div_qr_1_invert (&bi, base); - - tn = un; - ndigits = 0; - do - { - ndigits++; - mpn_div_qr_1_preinv (tp, tp, tn, &bi); - tn -= (tp[tn-1] == 0); - } - while (tn > 0); - - gmp_free_limbs (tp, un); - return ndigits; -} - -char * -mpz_get_str (char *sp, int base, const mpz_t u) -{ - unsigned bits; - const char *digits; - mp_size_t un; - size_t i, sn, osn; - - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - if (base > 1) - { - if (base <= 36) - digits = "0123456789abcdefghijklmnopqrstuvwxyz"; - else if (base > 62) - return NULL; - } - else if (base >= -1) - base = 10; - else - { - base = -base; - if (base > 36) - return NULL; - } - - sn = 1 + mpz_sizeinbase (u, base); - if (!sp) - { - osn = 1 + sn; - sp = (char *) gmp_alloc (osn); - } - else - osn = 0; - un = GMP_ABS (u->_mp_size); - - if (un == 0) - { - sp[0] = '0'; - sn = 1; - goto ret; - } - - i = 0; - - if (u->_mp_size < 0) - sp[i++] = '-'; - - bits = mpn_base_power_of_two_p (base); - - if (bits) - /* Not modified in this case. */ - sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); - else - { - struct mpn_base_info info; - mp_ptr tp; - - mpn_get_base_info (&info, base); - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, u->_mp_d, un); - - sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); - gmp_free_limbs (tp, un); - } - - for (; i < sn; i++) - sp[i] = digits[(unsigned char) sp[i]]; - -ret: - sp[sn] = '\0'; - if (osn && osn != sn + 1) - sp = (char*) gmp_realloc (sp, osn, sn + 1); - return sp; -} - -int -mpz_set_str (mpz_t r, const char *sp, int base) -{ - unsigned bits, value_of_a; - mp_size_t rn, alloc; - mp_ptr rp; - size_t dn, sn; - int sign; - unsigned char *dp; - - assert (base == 0 || (base >= 2 && base <= 62)); - - while (isspace( (unsigned char) *sp)) - sp++; - - sign = (*sp == '-'); - sp += sign; - - if (base == 0) - { - if (sp[0] == '0') - { - if (sp[1] == 'x' || sp[1] == 'X') - { - base = 16; - sp += 2; - } - else if (sp[1] == 'b' || sp[1] == 'B') - { - base = 2; - sp += 2; - } - else - base = 8; - } - else - base = 10; - } - - if (!*sp) - { - r->_mp_size = 0; - return -1; - } - sn = strlen(sp); - dp = (unsigned char *) gmp_alloc (sn); - - value_of_a = (base > 36) ? 36 : 10; - for (dn = 0; *sp; sp++) - { - unsigned digit; - - if (isspace ((unsigned char) *sp)) - continue; - else if (*sp >= '0' && *sp <= '9') - digit = *sp - '0'; - else if (*sp >= 'a' && *sp <= 'z') - digit = *sp - 'a' + value_of_a; - else if (*sp >= 'A' && *sp <= 'Z') - digit = *sp - 'A' + 10; - else - digit = base; /* fail */ - - if (digit >= (unsigned) base) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - - dp[dn++] = digit; - } - - if (!dn) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - bits = mpn_base_power_of_two_p (base); - - if (bits > 0) - { - alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_bits (rp, dp, dn, bits); - } - else - { - struct mpn_base_info info; - mpn_get_base_info (&info, base); - alloc = (dn + info.exp - 1) / info.exp; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_other (rp, dp, dn, base, &info); - /* Normalization, needed for all-zero input. */ - assert (rn > 0); - rn -= rp[rn-1] == 0; - } - assert (rn <= alloc); - gmp_free (dp, sn); - - r->_mp_size = sign ? - rn : rn; - - return 0; -} - -int -mpz_init_set_str (mpz_t r, const char *sp, int base) -{ - mpz_init (r); - return mpz_set_str (r, sp, base); -} - -size_t -mpz_out_str (FILE *stream, int base, const mpz_t x) -{ - char *str; - size_t len, n; - - str = mpz_get_str (NULL, base, x); - if (!str) - return 0; - len = strlen (str); - n = fwrite (str, 1, len, stream); - gmp_free (str, len + 1); - return n; -} - - -static int -gmp_detect_endian (void) -{ - static const int i = 2; - const unsigned char *p = (const unsigned char *) &i; - return 1 - *p; -} - -/* Import and export. Does not support nails. */ -void -mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, - size_t nails, const void *src) -{ - const unsigned char *p; - ptrdiff_t word_step; - mp_ptr rp; - mp_size_t rn; - - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes already copied to this limb (starting from - the low end). */ - size_t bytes; - /* The index where the limb should be stored, when completed. */ - mp_size_t i; - - if (nails != 0) - gmp_die ("mpz_import: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) src; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); - rp = MPZ_REALLOC (r, rn); - - for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) - { - size_t j; - for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) - { - limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); - if (bytes == sizeof(mp_limb_t)) - { - rp[i++] = limb; - bytes = 0; - limb = 0; - } - } - } - assert (i + (bytes > 0) == rn); - if (limb != 0) - rp[i++] = limb; - else - i = mpn_normalized_size (rp, i); - - r->_mp_size = i; -} - -void * -mpz_export (void *r, size_t *countp, int order, size_t size, int endian, - size_t nails, const mpz_t u) -{ - size_t count; - mp_size_t un; - - if (nails != 0) - gmp_die ("mpz_export: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - assert (size > 0 || u->_mp_size == 0); - - un = u->_mp_size; - count = 0; - if (un != 0) - { - size_t k; - unsigned char *p; - ptrdiff_t word_step; - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes left to do in this limb. */ - size_t bytes; - /* The index where the limb was read. */ - mp_size_t i; - - un = GMP_ABS (un); - - /* Count bytes in top limb. */ - limb = u->_mp_d[un-1]; - assert (limb != 0); - - k = (GMP_LIMB_BITS <= CHAR_BIT); - if (!k) - { - do { - int LOCAL_CHAR_BIT = CHAR_BIT; - k++; limb >>= LOCAL_CHAR_BIT; - } while (limb != 0); - } - /* else limb = 0; */ - - count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; - - if (!r) - r = gmp_alloc (count * size); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) r; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) - { - size_t j; - for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) - { - if (sizeof (mp_limb_t) == 1) - { - if (i < un) - *p = u->_mp_d[i++]; - else - *p = 0; - } - else - { - int LOCAL_CHAR_BIT = CHAR_BIT; - if (bytes == 0) - { - if (i < un) - limb = u->_mp_d[i++]; - bytes = sizeof (mp_limb_t); - } - *p = limb; - limb >>= LOCAL_CHAR_BIT; - bytes--; - } - } - } - assert (i == un); - assert (k == count); - } - - if (countp) - *countp = count; - - return r; -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.h deleted file mode 100644 index f28cb360ce..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mini-gmp.h +++ /dev/null @@ -1,311 +0,0 @@ -/* mini-gmp, a minimalistic implementation of a GNU GMP subset. - -Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* About mini-gmp: This is a minimal implementation of a subset of the - GMP interface. It is intended for inclusion into applications which - have modest bignums needs, as a fallback when the real GMP library - is not installed. - - This file defines the public interface. */ - -#ifndef __MINI_GMP_H__ -#define __MINI_GMP_H__ - -/* For size_t */ -#include - -#if defined (__cplusplus) -extern "C" { -#endif - -void mp_set_memory_functions (void *(*) (size_t), - void *(*) (void *, size_t, size_t), - void (*) (void *, size_t)); - -void mp_get_memory_functions (void *(**) (size_t), - void *(**) (void *, size_t, size_t), - void (**) (void *, size_t)); - -#ifndef MINI_GMP_LIMB_TYPE -#define MINI_GMP_LIMB_TYPE long -#endif - -typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; -typedef long mp_size_t; -typedef unsigned long mp_bitcnt_t; - -typedef mp_limb_t *mp_ptr; -typedef const mp_limb_t *mp_srcptr; - -typedef struct -{ - int _mp_alloc; /* Number of *limbs* allocated and pointed - to by the _mp_d field. */ - int _mp_size; /* abs(_mp_size) is the number of limbs the - last field points to. If _mp_size is - negative this is a negative number. */ - mp_limb_t *_mp_d; /* Pointer to the limbs. */ -} __mpz_struct; - -typedef __mpz_struct mpz_t[1]; - -typedef __mpz_struct *mpz_ptr; -typedef const __mpz_struct *mpz_srcptr; - -extern const int mp_bits_per_limb; - -void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); -void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); -void mpn_zero (mp_ptr, mp_size_t); - -int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); -int mpn_zero_p (mp_srcptr, mp_size_t); - -mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); - -mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); -void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); -int mpn_perfect_square_p (mp_srcptr, mp_size_t); -mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); -mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); - -mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); -mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); - -mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); -mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); - -void mpn_com (mp_ptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); - -mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); - -mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); -#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) - -size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); -mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); - -void mpz_init (mpz_t); -void mpz_init2 (mpz_t, mp_bitcnt_t); -void mpz_clear (mpz_t); - -#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) -#define mpz_even_p(z) (! mpz_odd_p (z)) - -int mpz_sgn (const mpz_t); -int mpz_cmp_si (const mpz_t, long); -int mpz_cmp_ui (const mpz_t, unsigned long); -int mpz_cmp (const mpz_t, const mpz_t); -int mpz_cmpabs_ui (const mpz_t, unsigned long); -int mpz_cmpabs (const mpz_t, const mpz_t); -int mpz_cmp_d (const mpz_t, double); -int mpz_cmpabs_d (const mpz_t, double); - -void mpz_abs (mpz_t, const mpz_t); -void mpz_neg (mpz_t, const mpz_t); -void mpz_swap (mpz_t, mpz_t); - -void mpz_add_ui (mpz_t, const mpz_t, unsigned long); -void mpz_add (mpz_t, const mpz_t, const mpz_t); -void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); -void mpz_sub (mpz_t, const mpz_t, const mpz_t); - -void mpz_mul_si (mpz_t, const mpz_t, long int); -void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_mul (mpz_t, const mpz_t, const mpz_t); -void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_addmul (mpz_t, const mpz_t, const mpz_t); -void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_submul (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); - -void mpz_mod (mpz_t, const mpz_t, const mpz_t); - -void mpz_divexact (mpz_t, const mpz_t, const mpz_t); - -int mpz_divisible_p (const mpz_t, const mpz_t); -int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); - -unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); - -unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); - -void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); - -int mpz_divisible_ui_p (const mpz_t, unsigned long); - -unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); -void mpz_gcd (mpz_t, const mpz_t, const mpz_t); -void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); -void mpz_lcm (mpz_t, const mpz_t, const mpz_t); -int mpz_invert (mpz_t, const mpz_t, const mpz_t); - -void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); -void mpz_sqrt (mpz_t, const mpz_t); -int mpz_perfect_square_p (const mpz_t); - -void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); -void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); -void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); - -void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); -int mpz_root (mpz_t, const mpz_t, unsigned long); - -void mpz_fac_ui (mpz_t, unsigned long); -void mpz_2fac_ui (mpz_t, unsigned long); -void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); -void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); - -int mpz_probab_prime_p (const mpz_t, int); - -int mpz_tstbit (const mpz_t, mp_bitcnt_t); -void mpz_setbit (mpz_t, mp_bitcnt_t); -void mpz_clrbit (mpz_t, mp_bitcnt_t); -void mpz_combit (mpz_t, mp_bitcnt_t); - -void mpz_com (mpz_t, const mpz_t); -void mpz_and (mpz_t, const mpz_t, const mpz_t); -void mpz_ior (mpz_t, const mpz_t, const mpz_t); -void mpz_xor (mpz_t, const mpz_t, const mpz_t); - -mp_bitcnt_t mpz_popcount (const mpz_t); -mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); -mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); -mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); - -int mpz_fits_slong_p (const mpz_t); -int mpz_fits_ulong_p (const mpz_t); -int mpz_fits_sint_p (const mpz_t); -int mpz_fits_uint_p (const mpz_t); -int mpz_fits_sshort_p (const mpz_t); -int mpz_fits_ushort_p (const mpz_t); -long int mpz_get_si (const mpz_t); -unsigned long int mpz_get_ui (const mpz_t); -double mpz_get_d (const mpz_t); -size_t mpz_size (const mpz_t); -mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); - -void mpz_realloc2 (mpz_t, mp_bitcnt_t); -mp_srcptr mpz_limbs_read (mpz_srcptr); -mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); -mp_ptr mpz_limbs_write (mpz_t, mp_size_t); -void mpz_limbs_finish (mpz_t, mp_size_t); -mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); - -#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} - -void mpz_set_si (mpz_t, signed long int); -void mpz_set_ui (mpz_t, unsigned long int); -void mpz_set (mpz_t, const mpz_t); -void mpz_set_d (mpz_t, double); - -void mpz_init_set_si (mpz_t, signed long int); -void mpz_init_set_ui (mpz_t, unsigned long int); -void mpz_init_set (mpz_t, const mpz_t); -void mpz_init_set_d (mpz_t, double); - -size_t mpz_sizeinbase (const mpz_t, int); -char *mpz_get_str (char *, int, const mpz_t); -int mpz_set_str (mpz_t, const char *, int); -int mpz_init_set_str (mpz_t, const char *, int); - -/* This long list taken from gmp.h. */ -/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, - defines EOF but not FILE. */ -#if defined (FILE) \ - || defined (H_STDIO) \ - || defined (_H_STDIO) /* AIX */ \ - || defined (_STDIO_H) /* glibc, Sun, SCO */ \ - || defined (_STDIO_H_) /* BSD, OSF */ \ - || defined (__STDIO_H) /* Borland */ \ - || defined (__STDIO_H__) /* IRIX */ \ - || defined (_STDIO_INCLUDED) /* HPUX */ \ - || defined (__dj_include_stdio_h_) /* DJGPP */ \ - || defined (_FILE_DEFINED) /* Microsoft */ \ - || defined (__STDIO__) /* Apple MPW MrC */ \ - || defined (_MSL_STDIO_H) /* Metrowerks */ \ - || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ - || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ - || defined (__STDIO_LOADED) /* VMS */ \ - || defined (_STDIO) /* HPE NonStop */ \ - || defined (__DEFINED_FILE) /* musl */ -size_t mpz_out_str (FILE *, int, const mpz_t); -#endif - -void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); -void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); - -#if defined (__cplusplus) -} -#endif -#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h index bbfe72c13b..54e90326be 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign_namespace.h @@ -18,6 +18,12 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -94,6 +100,16 @@ #define lift_basis SQISIGN_NAMESPACE(lift_basis) #define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) +// Namespacing symbols exported from basis.c, ec.c: +#undef xDBL_E0 + +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) + +// Namespacing symbols exported from basis.c, ec.c, isog_chains.c: +#undef xDBL_A24 + +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) + // Namespacing symbols exported from biextension.c: #undef clear_cofac #undef ec_dlog_2_tate @@ -109,6 +125,11 @@ #define reduced_tate SQISIGN_NAMESPACE(reduced_tate) #define weil SQISIGN_NAMESPACE(weil) +// Namespacing symbols exported from biextension.c, ec_jac.c, hd.c: +#undef ADD + +#define ADD SQISIGN_NAMESPACE(ADD) + // Namespacing symbols exported from common.c: #undef hash_to_challenge #undef public_key_finalize @@ -148,6 +169,28 @@ #define find_uv SQISIGN_NAMESPACE(find_uv) #define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) +// Namespacing symbols exported from dim2id2iso.c, encode_signature.c, id2iso.c, keygen.c, quaternion_data.c, sign.c: +#undef EXTREMAL_ORDERS +#undef QUATALG_PINFTY + +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) + +// Namespacing symbols exported from dim2id2iso.c, endomorphism_action.c, id2iso.c: +#undef CURVES_WITH_ENDOMORPHISMS + +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) + +// Namespacing symbols exported from dim2id2iso.c, id2iso.c, sign.c, torsion_constants.c: +#undef TORSION_PLUS_2POWER + +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) + +// Namespacing symbols exported from dim2id2iso.c, quaternion_data.c: +#undef CONNECTING_IDEALS + +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) + // Namespacing symbols exported from dim4.c: #undef ibz_inv_dim4_make_coeff_mpm #undef ibz_inv_dim4_make_coeff_pmp @@ -207,6 +250,13 @@ #define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) #define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) +// Namespacing symbols exported from e0_basis.c: +#undef BASIS_E0_PX +#undef BASIS_E0_QX + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) + // Namespacing symbols exported from ec.c: #undef cswap_points #undef ec_biscalar_mul @@ -235,8 +285,6 @@ #undef xDBL #undef xDBLADD #undef xDBLMUL -#undef xDBL_A24 -#undef xDBL_E0 #undef xMUL #define cswap_points SQISIGN_NAMESPACE(cswap_points) @@ -266,14 +314,9 @@ #define xDBL SQISIGN_NAMESPACE(xDBL) #define xDBLADD SQISIGN_NAMESPACE(xDBLADD) #define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) -#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) -#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) #define xMUL SQISIGN_NAMESPACE(xMUL) // Namespacing symbols exported from ec_jac.c: -#undef ADD -#undef DBL -#undef DBLW #undef copy_jac_point #undef jac_from_ws #undef jac_init @@ -284,9 +327,6 @@ #undef jac_to_xz_add_components #undef select_jac_point -#define ADD SQISIGN_NAMESPACE(ADD) -#define DBL SQISIGN_NAMESPACE(DBL) -#define DBLW SQISIGN_NAMESPACE(DBLW) #define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) #define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) #define jac_init SQISIGN_NAMESPACE(jac_init) @@ -297,6 +337,21 @@ #define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) #define select_jac_point SQISIGN_NAMESPACE(select_jac_point) +// Namespacing symbols exported from ec_jac.c, hd.c: +#undef DBLW + +#define DBLW SQISIGN_NAMESPACE(DBLW) + +// Namespacing symbols exported from ec_jac.c, hd.c, theta_isogenies.c: +#undef DBL + +#define DBL SQISIGN_NAMESPACE(DBL) + +// Namespacing symbols exported from ec_params.c: +#undef p_cofactor_for_2f + +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) + // Namespacing symbols exported from encode_signature.c: #undef secret_key_from_bytes #undef secret_key_to_bytes @@ -455,21 +510,24 @@ #define fp_set_one SQISIGN_NAMESPACE(fp_set_one) #define fp_set_small SQISIGN_NAMESPACE(fp_set_small) #define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) -#define ONE SQISIGN_NAMESPACE(ONE) -#define ZERO SQISIGN_NAMESPACE(ZERO) // Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef ONE +#undef ZERO #undef fp_add #undef fp_mul #undef fp_sqr #undef fp_sub +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) #define fp_add SQISIGN_NAMESPACE(fp_add) #define fp_mul SQISIGN_NAMESPACE(fp_mul) #define fp_sqr SQISIGN_NAMESPACE(fp_sqr) #define fp_sub SQISIGN_NAMESPACE(fp_sub) // Namespacing symbols exported from gf27500.c: +#undef gf27500_MINUS_ONE #undef gf27500_decode #undef gf27500_decode_reduce #undef gf27500_div @@ -479,6 +537,7 @@ #undef gf27500_legendre #undef gf27500_sqrt +#define gf27500_MINUS_ONE SQISIGN_NAMESPACE(gf27500_MINUS_ONE) #define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) #define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) #define gf27500_div SQISIGN_NAMESPACE(gf27500_div) @@ -500,6 +559,7 @@ #define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) // Namespacing symbols exported from gf5248.c: +#undef gf5248_MINUS_ONE #undef gf5248_decode #undef gf5248_decode_reduce #undef gf5248_div @@ -509,6 +569,7 @@ #undef gf5248_legendre #undef gf5248_sqrt +#define gf5248_MINUS_ONE SQISIGN_NAMESPACE(gf5248_MINUS_ONE) #define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) #define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) #define gf5248_div SQISIGN_NAMESPACE(gf5248_div) @@ -519,6 +580,7 @@ #define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) // Namespacing symbols exported from gf65376.c: +#undef gf65376_MINUS_ONE #undef gf65376_decode #undef gf65376_decode_reduce #undef gf65376_div @@ -528,6 +590,7 @@ #undef gf65376_legendre #undef gf65376_sqrt +#define gf65376_MINUS_ONE SQISIGN_NAMESPACE(gf65376_MINUS_ONE) #define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) #define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) #define gf65376_div SQISIGN_NAMESPACE(gf65376_div) @@ -554,6 +617,22 @@ #define double_couple_point SQISIGN_NAMESPACE(double_couple_point) #define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) +// Namespacing symbols exported from hd_splitting_transforms.c: +#undef CHI_EVAL + +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) + +// Namespacing symbols exported from hd_splitting_transforms.c, theta_isogenies.c: +#undef EVEN_INDEX +#undef FP2_CONSTANTS +#undef NORMALIZATION_TRANSFORMS +#undef SPLITTING_TRANSFORMS + +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) + // Namespacing symbols exported from hnf.c: #undef ibz_mat_4x4_is_hnf #undef ibz_mat_4xn_hnf_mod_core @@ -761,6 +840,11 @@ #define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) #define secret_key_init SQISIGN_NAMESPACE(secret_key_init) +// Namespacing symbols exported from keygen.c, torsion_constants.c: +#undef SEC_DEGREE + +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) + // Namespacing symbols exported from l2.c: #undef quat_lattice_lll #undef quat_lll_core @@ -910,6 +994,16 @@ #define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) #define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) +// Namespacing symbols exported from quaternion_data.c: +#undef CONJUGATING_ELEMENTS + +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) + +// Namespacing symbols exported from quaternion_data.c, sign.c: +#undef QUAT_prime_cofactor + +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) + // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation @@ -971,6 +1065,11 @@ #define protocols_sign SQISIGN_NAMESPACE(protocols_sign) +// Namespacing symbols exported from sign.c, torsion_constants.c: +#undef COM_DEGREE + +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + // Namespacing symbols exported from sqisign.c: #undef sqisign_keypair #undef sqisign_open @@ -1006,6 +1105,11 @@ #define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) #define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) +// Namespacing symbols exported from torsion_constants.c: +#undef TWO_TO_SECURITY_BITS + +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) + // Namespacing symbols exported from verify.c: #undef protocols_verify @@ -1029,45 +1133,7 @@ #define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) #define xisog_4 SQISIGN_NAMESPACE(xisog_4) -// Namespacing symbols from precomp: -#undef BASIS_E0_PX -#undef BASIS_E0_QX -#undef p_cofactor_for_2f -#undef CURVES_WITH_ENDOMORPHISMS -#undef EVEN_INDEX -#undef CHI_EVAL -#undef FP2_CONSTANTS -#undef SPLITTING_TRANSFORMS -#undef NORMALIZATION_TRANSFORMS -#undef QUAT_prime_cofactor -#undef QUATALG_PINFTY -#undef EXTREMAL_ORDERS -#undef CONNECTING_IDEALS -#undef CONJUGATING_ELEMENTS -#undef TWO_TO_SECURITY_BITS -#undef TORSION_PLUS_2POWER -#undef SEC_DEGREE -#undef COM_DEGREE - -#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) -#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) -#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) -#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) -#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) -#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) -#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) -#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) -#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) -#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) -#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) -#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) -#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) -#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) -#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) -#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) -#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) -#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) - #endif +// This file is generated by scripts/Namespace.scala, do not edit it manually! diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c index f002495c59..491b052b7b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c @@ -1514,4 +1514,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif +#endif /* RADIX_32 */ \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c index c187e878eb..cc1f136321 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c @@ -970,4 +970,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif +#endif /* RADIX_64 */ \ No newline at end of file diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c deleted file mode 100644 index 396d505aec..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.c +++ /dev/null @@ -1,73 +0,0 @@ -#include -#include -#if defined(MINI_GMP) -#include "mini-gmp.h" -#else -// This configuration is used only for testing -#include -#endif -#include - -// Exported for testing -int -mini_mpz_legendre(const mpz_t a, const mpz_t p) -{ - int res = 0; - mpz_t e; - mpz_init_set(e, p); - mpz_sub_ui(e, e, 1); - mpz_fdiv_q_2exp(e, e, 1); - mpz_powm(e, a, e, p); - - if (mpz_cmp_ui(e, 1) <= 0) { - res = mpz_get_si(e); - } else { - res = -1; - } - mpz_clear(e); - return res; -} - -#if defined(MINI_GMP) -int -mpz_legendre(const mpz_t a, const mpz_t p) -{ - return mini_mpz_legendre(a, p); -} -#endif - -// Exported for testing -double -mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - double ret; - int tmp_exp; - mpz_t tmp; - - // Handle the case where op is 0 - if (mpz_cmp_ui(op, 0) == 0) { - *exp = 0; - return 0.0; - } - - *exp = mpz_sizeinbase(op, 2); - - mpz_init_set(tmp, op); - - if (*exp > DBL_MAX_EXP) { - mpz_fdiv_q_2exp(tmp, tmp, *exp - DBL_MAX_EXP); - } - - ret = frexp(mpz_get_d(tmp), &tmp_exp); - mpz_clear(tmp); - - return ret; -} - -#if defined(MINI_GMP) -double -mpz_get_d_2exp(signed long int *exp, const mpz_t op) -{ - return mini_mpz_get_d_2exp(exp, op); -} -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.h deleted file mode 100644 index 0113cfdfe6..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp-extra.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef MINI_GMP_EXTRA_H -#define MINI_GMP_EXTRA_H - -#if defined MINI_GMP -#include "mini-gmp.h" - -typedef long mp_exp_t; - -int mpz_legendre(const mpz_t a, const mpz_t p); -double mpz_get_d_2exp(signed long int *exp, const mpz_t op); -#else -// This configuration is used only for testing -#include -#endif - -int mini_mpz_legendre(const mpz_t a, const mpz_t p); -double mini_mpz_get_d_2exp(signed long int *exp, const mpz_t op); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.c deleted file mode 100644 index 3830ab2031..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.c +++ /dev/null @@ -1,4671 +0,0 @@ -/* Note: The code from mini-gmp is modifed from the original by - commenting out the definition of GMP_LIMB_BITS */ - -/* - mini-gmp, a minimalistic implementation of a GNU GMP subset. - - Contributed to the GNU project by Niels Möller - Additional functionalities and improvements by Marco Bodrato. - -Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* NOTE: All functions in this file which are not declared in - mini-gmp.h are internal, and are not intended to be compatible - with GMP or with future versions of mini-gmp. */ - -/* Much of the material copied from GMP files, including: gmp-impl.h, - longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c, - mpn/generic/lshift.c, mpn/generic/mul_1.c, - mpn/generic/mul_basecase.c, mpn/generic/rshift.c, - mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c, - mpn/generic/submul_1.c. */ - -#include -#include -#include -#include -#include -#include - -#include "mini-gmp.h" - -#if !defined(MINI_GMP_DONT_USE_FLOAT_H) -#include -#endif - - -/* Macros */ -/* Removed from here as it is passed as a compiler command-line definition */ -/* #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT) */ - -#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0) -#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1)) - -#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2)) -#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1) - -#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT) -#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1)) - -#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) -#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1)) - -#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b)) - -#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b))) - -#if defined(DBL_MANT_DIG) && FLT_RADIX == 2 -#define GMP_DBL_MANT_BITS DBL_MANT_DIG -#else -#define GMP_DBL_MANT_BITS (53) -#endif - -/* Return non-zero if xp,xsize and yp,ysize overlap. - If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no - overlap. If both these are false, there's an overlap. */ -#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \ - ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp)) - -#define gmp_assert_nocarry(x) do { \ - mp_limb_t __cy = (x); \ - assert (__cy == 0); \ - (void) (__cy); \ - } while (0) - -#define gmp_clz(count, x) do { \ - mp_limb_t __clz_x = (x); \ - unsigned __clz_c = 0; \ - int LOCAL_SHIFT_BITS = 8; \ - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \ - for (; \ - (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \ - __clz_c += 8) \ - { __clz_x <<= LOCAL_SHIFT_BITS; } \ - for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \ - __clz_x <<= 1; \ - (count) = __clz_c; \ - } while (0) - -#define gmp_ctz(count, x) do { \ - mp_limb_t __ctz_x = (x); \ - unsigned __ctz_c = 0; \ - gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \ - (count) = GMP_LIMB_BITS - 1 - __ctz_c; \ - } while (0) - -#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) + (bl); \ - (sh) = (ah) + (bh) + (__x < (al)); \ - (sl) = __x; \ - } while (0) - -#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \ - do { \ - mp_limb_t __x; \ - __x = (al) - (bl); \ - (sh) = (ah) - (bh) - ((al) < (bl)); \ - (sl) = __x; \ - } while (0) - -#define gmp_umul_ppmm(w1, w0, u, v) \ - do { \ - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \ - if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned int __ww = (unsigned int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \ - { \ - unsigned long int __ww = (unsigned long int) (u) * (v); \ - w0 = (mp_limb_t) __ww; \ - w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \ - } \ - else { \ - mp_limb_t __x0, __x1, __x2, __x3; \ - unsigned __ul, __vl, __uh, __vh; \ - mp_limb_t __u = (u), __v = (v); \ - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); \ - \ - __ul = __u & GMP_LLIMB_MASK; \ - __uh = __u >> (GMP_LIMB_BITS / 2); \ - __vl = __v & GMP_LLIMB_MASK; \ - __vh = __v >> (GMP_LIMB_BITS / 2); \ - \ - __x0 = (mp_limb_t) __ul * __vl; \ - __x1 = (mp_limb_t) __ul * __vh; \ - __x2 = (mp_limb_t) __uh * __vl; \ - __x3 = (mp_limb_t) __uh * __vh; \ - \ - __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \ - __x1 += __x2; /* but this indeed can */ \ - if (__x1 < __x2) /* did we get it? */ \ - __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \ - \ - (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \ - (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \ - } \ - } while (0) - -/* If mp_limb_t is of size smaller than int, plain u*v implies - automatic promotion to *signed* int, and then multiply may overflow - and cause undefined behavior. Explicitly cast to unsigned int for - that case. */ -#define gmp_umullo_limb(u, v) \ - ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v)) - -#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \ - do { \ - mp_limb_t _qh, _ql, _r, _mask; \ - gmp_umul_ppmm (_qh, _ql, (nh), (di)); \ - gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \ - _r = (nl) - gmp_umullo_limb (_qh, (d)); \ - _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \ - _qh += _mask; \ - _r += _mask & (d); \ - if (_r >= (d)) \ - { \ - _r -= (d); \ - _qh++; \ - } \ - \ - (r) = _r; \ - (q) = _qh; \ - } while (0) - -#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \ - do { \ - mp_limb_t _q0, _t1, _t0, _mask; \ - gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \ - gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \ - \ - /* Compute the two most significant limbs of n - q'd */ \ - (r1) = (n1) - gmp_umullo_limb ((d1), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \ - gmp_umul_ppmm (_t1, _t0, (d0), (q)); \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \ - (q)++; \ - \ - /* Conditionally adjust q and the remainders */ \ - _mask = - (mp_limb_t) ((r1) >= _q0); \ - (q) += _mask; \ - gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \ - if ((r1) >= (d1)) \ - { \ - if ((r1) > (d1) || (r0) >= (d0)) \ - { \ - (q)++; \ - gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \ - } \ - } \ - } while (0) - -/* Swap macros. */ -#define MP_LIMB_T_SWAP(x, y) \ - do { \ - mp_limb_t __mp_limb_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_limb_t_swap__tmp; \ - } while (0) -#define MP_SIZE_T_SWAP(x, y) \ - do { \ - mp_size_t __mp_size_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_size_t_swap__tmp; \ - } while (0) -#define MP_BITCNT_T_SWAP(x,y) \ - do { \ - mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_bitcnt_t_swap__tmp; \ - } while (0) -#define MP_PTR_SWAP(x, y) \ - do { \ - mp_ptr __mp_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_ptr_swap__tmp; \ - } while (0) -#define MP_SRCPTR_SWAP(x, y) \ - do { \ - mp_srcptr __mp_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mp_srcptr_swap__tmp; \ - } while (0) - -#define MPN_PTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_PTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) -#define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \ - do { \ - MP_SRCPTR_SWAP (xp, yp); \ - MP_SIZE_T_SWAP (xs, ys); \ - } while(0) - -#define MPZ_PTR_SWAP(x, y) \ - do { \ - mpz_ptr __mpz_ptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_ptr_swap__tmp; \ - } while (0) -#define MPZ_SRCPTR_SWAP(x, y) \ - do { \ - mpz_srcptr __mpz_srcptr_swap__tmp = (x); \ - (x) = (y); \ - (y) = __mpz_srcptr_swap__tmp; \ - } while (0) - -const int mp_bits_per_limb = GMP_LIMB_BITS; - - -/* Memory allocation and other helper functions. */ -static void -gmp_die (const char *msg) -{ - fprintf (stderr, "%s\n", msg); - abort(); -} - -static void * -gmp_default_alloc (size_t size) -{ - void *p; - - assert (size > 0); - - p = malloc (size); - if (!p) - gmp_die("gmp_default_alloc: Virtual memory exhausted."); - - return p; -} - -static void * -gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size) -{ - void * p; - - p = realloc (old, new_size); - - if (!p) - gmp_die("gmp_default_realloc: Virtual memory exhausted."); - - return p; -} - -static void -gmp_default_free (void *p, size_t unused_size) -{ - free (p); -} - -static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc; -static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc; -static void (*gmp_free_func) (void *, size_t) = gmp_default_free; - -void -mp_get_memory_functions (void *(**alloc_func) (size_t), - void *(**realloc_func) (void *, size_t, size_t), - void (**free_func) (void *, size_t)) -{ - if (alloc_func) - *alloc_func = gmp_allocate_func; - - if (realloc_func) - *realloc_func = gmp_reallocate_func; - - if (free_func) - *free_func = gmp_free_func; -} - -void -mp_set_memory_functions (void *(*alloc_func) (size_t), - void *(*realloc_func) (void *, size_t, size_t), - void (*free_func) (void *, size_t)) -{ - if (!alloc_func) - alloc_func = gmp_default_alloc; - if (!realloc_func) - realloc_func = gmp_default_realloc; - if (!free_func) - free_func = gmp_default_free; - - gmp_allocate_func = alloc_func; - gmp_reallocate_func = realloc_func; - gmp_free_func = free_func; -} - -#define gmp_alloc(size) ((*gmp_allocate_func)((size))) -#define gmp_free(p, size) ((*gmp_free_func) ((p), (size))) -#define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size)) - -static mp_ptr -gmp_alloc_limbs (mp_size_t size) -{ - return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t)); -} - -static mp_ptr -gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size) -{ - assert (size > 0); - return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t)); -} - -static void -gmp_free_limbs (mp_ptr old, mp_size_t size) -{ - gmp_free (old, size * sizeof (mp_limb_t)); -} - - -/* MPN interface */ - -void -mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - mp_size_t i; - for (i = 0; i < n; i++) - d[i] = s[i]; -} - -void -mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n) -{ - while (--n >= 0) - d[n] = s[n]; -} - -int -mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - while (--n >= 0) - { - if (ap[n] != bp[n]) - return ap[n] > bp[n] ? 1 : -1; - } - return 0; -} - -static int -mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - if (an != bn) - return an < bn ? -1 : 1; - else - return mpn_cmp (ap, bp, an); -} - -static mp_size_t -mpn_normalized_size (mp_srcptr xp, mp_size_t n) -{ - while (n > 0 && xp[n-1] == 0) - --n; - return n; -} - -int -mpn_zero_p(mp_srcptr rp, mp_size_t n) -{ - return mpn_normalized_size (rp, n) == 0; -} - -void -mpn_zero (mp_ptr rp, mp_size_t n) -{ - while (--n >= 0) - rp[n] = 0; -} - -mp_limb_t -mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - i = 0; - do - { - mp_limb_t r = ap[i] + b; - /* Carry out */ - b = (r < b); - rp[i] = r; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b, r; - a = ap[i]; b = bp[i]; - r = a + cy; - cy = (r < cy); - r += b; - cy += (r < b); - rp[i] = r; - } - return cy; -} - -mp_limb_t -mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_add_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b) -{ - mp_size_t i; - - assert (n > 0); - - i = 0; - do - { - mp_limb_t a = ap[i]; - /* Carry out */ - mp_limb_t cy = a < b; - rp[i] = a - b; - b = cy; - } - while (++i < n); - - return b; -} - -mp_limb_t -mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mp_size_t i; - mp_limb_t cy; - - for (i = 0, cy = 0; i < n; i++) - { - mp_limb_t a, b; - a = ap[i]; b = bp[i]; - b += cy; - cy = (b < cy); - cy += (a < b); - rp[i] = a - b; - } - return cy; -} - -mp_limb_t -mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn) -{ - mp_limb_t cy; - - assert (an >= bn); - - cy = mpn_sub_n (rp, ap, bp, bn); - if (an > bn) - cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy); - return cy; -} - -mp_limb_t -mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl + lpl; - cl += lpl < rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) -{ - mp_limb_t ul, cl, hpl, lpl, rl; - - assert (n >= 1); - - cl = 0; - do - { - ul = *up++; - gmp_umul_ppmm (hpl, lpl, ul, vl); - - lpl += cl; - cl = (lpl < cl) + hpl; - - rl = *rp; - lpl = rl - lpl; - cl += lpl > rl; - *rp++ = lpl; - } - while (--n != 0); - - return cl; -} - -mp_limb_t -mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn >= 1); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un)); - assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn)); - - /* We first multiply by the low order limb. This result can be - stored, not added, to rp. We also avoid a loop for zeroing this - way. */ - - rp[un] = mpn_mul_1 (rp, up, un, vp[0]); - - /* Now accumulate the product of up[] and the next higher limb from - vp[]. */ - - while (--vn >= 1) - { - rp += 1, vp += 1; - rp[un] = mpn_addmul_1 (rp, up, un, vp[0]); - } - return rp[un]; -} - -void -mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n) -{ - mpn_mul (rp, ap, n, bp, n); -} - -void -mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n) -{ - mpn_mul (rp, ap, n, ap, n); -} - -mp_limb_t -mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - up += n; - rp += n; - - tnc = GMP_LIMB_BITS - cnt; - low_limb = *--up; - retval = low_limb >> tnc; - high_limb = (low_limb << cnt); - - while (--n != 0) - { - low_limb = *--up; - *--rp = high_limb | (low_limb >> tnc); - high_limb = (low_limb << cnt); - } - *--rp = high_limb; - - return retval; -} - -mp_limb_t -mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt) -{ - mp_limb_t high_limb, low_limb; - unsigned int tnc; - mp_limb_t retval; - - assert (n >= 1); - assert (cnt >= 1); - assert (cnt < GMP_LIMB_BITS); - - tnc = GMP_LIMB_BITS - cnt; - high_limb = *up++; - retval = (high_limb << tnc); - low_limb = high_limb >> cnt; - - while (--n != 0) - { - high_limb = *up++; - *rp++ = low_limb | (high_limb << tnc); - low_limb = high_limb >> cnt; - } - *rp = low_limb; - - return retval; -} - -static mp_bitcnt_t -mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un, - mp_limb_t ux) -{ - unsigned cnt; - - assert (ux == 0 || ux == GMP_LIMB_MAX); - assert (0 <= i && i <= un ); - - while (limb == 0) - { - i++; - if (i == un) - return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS); - limb = ux ^ up[i]; - } - gmp_ctz (cnt, limb); - return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt; -} - -mp_bitcnt_t -mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, 0); -} - -mp_bitcnt_t -mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit) -{ - mp_size_t i; - i = bit / GMP_LIMB_BITS; - - return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)), - i, ptr, i, GMP_LIMB_MAX); -} - -void -mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (--n >= 0) - *rp++ = ~ *up++; -} - -mp_limb_t -mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n) -{ - while (*up == 0) - { - *rp = 0; - if (!--n) - return 0; - ++up; ++rp; - } - *rp = - *up; - mpn_com (++rp, ++up, --n); - return 1; -} - - -/* MPN division interface. */ - -/* The 3/2 inverse is defined as - - m = floor( (B^3-1) / (B u1 + u0)) - B -*/ -mp_limb_t -mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0) -{ - mp_limb_t r, m; - - { - mp_limb_t p, ql; - unsigned ul, uh, qh; - - assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t)); - /* For notation, let b denote the half-limb base, so that B = b^2. - Split u1 = b uh + ul. */ - ul = u1 & GMP_LLIMB_MASK; - uh = u1 >> (GMP_LIMB_BITS / 2); - - /* Approximation of the high half of quotient. Differs from the 2/1 - inverse of the half limb uh, since we have already subtracted - u0. */ - qh = (u1 ^ GMP_LIMB_MAX) / uh; - - /* Adjust to get a half-limb 3/2 inverse, i.e., we want - - qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u - = floor( (b (~u) + b-1) / u), - - and the remainder - - r = b (~u) + b-1 - qh (b uh + ul) - = b (~u - qh uh) + b-1 - qh ul - - Subtraction of qh ul may underflow, which implies adjustments. - But by normalization, 2 u >= B > qh ul, so we need to adjust by - at most 2. - */ - - r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK; - - p = (mp_limb_t) qh * ul; - /* Adjustment steps taken from udiv_qrnnd_c */ - if (r < p) - { - qh--; - r += u1; - if (r >= u1) /* i.e. we didn't get carry when adding to r */ - if (r < p) - { - qh--; - r += u1; - } - } - r -= p; - - /* Low half of the quotient is - - ql = floor ( (b r + b-1) / u1). - - This is a 3/2 division (on half-limbs), for which qh is a - suitable inverse. */ - - p = (r >> (GMP_LIMB_BITS / 2)) * qh + r; - /* Unlike full-limb 3/2, we can add 1 without overflow. For this to - work, it is essential that ql is a full mp_limb_t. */ - ql = (p >> (GMP_LIMB_BITS / 2)) + 1; - - /* By the 3/2 trick, we don't need the high half limb. */ - r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1; - - if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2)))) - { - ql--; - r += u1; - } - m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql; - if (r >= u1) - { - m++; - r -= u1; - } - } - - /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a - 3/2 inverse. */ - if (u0 > 0) - { - mp_limb_t th, tl; - r = ~r; - r += u0; - if (r < u0) - { - m--; - if (r >= u1) - { - m--; - r -= u1; - } - r -= u1; - } - gmp_umul_ppmm (th, tl, u0, m); - r += th; - if (r < th) - { - m--; - m -= ((r > u1) | ((r == u1) & (tl > u0))); - } - } - - return m; -} - -struct gmp_div_inverse -{ - /* Normalization shift count. */ - unsigned shift; - /* Normalized divisor (d0 unused for mpn_div_qr_1) */ - mp_limb_t d1, d0; - /* Inverse, for 2/1 or 3/2. */ - mp_limb_t di; -}; - -static void -mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d) -{ - unsigned shift; - - assert (d > 0); - gmp_clz (shift, d); - inv->shift = shift; - inv->d1 = d << shift; - inv->di = mpn_invert_limb (inv->d1); -} - -static void -mpn_div_qr_2_invert (struct gmp_div_inverse *inv, - mp_limb_t d1, mp_limb_t d0) -{ - unsigned shift; - - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 <<= shift; - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); -} - -static void -mpn_div_qr_invert (struct gmp_div_inverse *inv, - mp_srcptr dp, mp_size_t dn) -{ - assert (dn > 0); - - if (dn == 1) - mpn_div_qr_1_invert (inv, dp[0]); - else if (dn == 2) - mpn_div_qr_2_invert (inv, dp[1], dp[0]); - else - { - unsigned shift; - mp_limb_t d1, d0; - - d1 = dp[dn-1]; - d0 = dp[dn-2]; - assert (d1 > 0); - gmp_clz (shift, d1); - inv->shift = shift; - if (shift > 0) - { - d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift)); - d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift)); - } - inv->d1 = d1; - inv->d0 = d0; - inv->di = mpn_invert_3by2 (d1, d0); - } -} - -/* Not matching current public gmp interface, rather corresponding to - the sbpi1_div_* functions. */ -static mp_limb_t -mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - mp_limb_t d, di; - mp_limb_t r; - mp_ptr tp = NULL; - mp_size_t tn = 0; - - if (inv->shift > 0) - { - /* Shift, reusing qp area if possible. In-place shift if qp == np. */ - tp = qp; - if (!tp) - { - tn = nn; - tp = gmp_alloc_limbs (tn); - } - r = mpn_lshift (tp, np, nn, inv->shift); - np = tp; - } - else - r = 0; - - d = inv->d1; - di = inv->di; - while (--nn >= 0) - { - mp_limb_t q; - - gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di); - if (qp) - qp[nn] = q; - } - if (tn) - gmp_free_limbs (tp, tn); - - return r >> inv->shift; -} - -static void -mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - const struct gmp_div_inverse *inv) -{ - unsigned shift; - mp_size_t i; - mp_limb_t d1, d0, di, r1, r0; - - assert (nn >= 2); - shift = inv->shift; - d1 = inv->d1; - d0 = inv->d0; - di = inv->di; - - if (shift > 0) - r1 = mpn_lshift (np, np, nn, shift); - else - r1 = 0; - - r0 = np[nn - 1]; - - i = nn - 2; - do - { - mp_limb_t n0, q; - n0 = np[i]; - gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di); - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - if (shift > 0) - { - assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0); - r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift)); - r1 >>= shift; - } - - np[1] = r1; - np[0] = r0; -} - -static void -mpn_div_qr_pi1 (mp_ptr qp, - mp_ptr np, mp_size_t nn, mp_limb_t n1, - mp_srcptr dp, mp_size_t dn, - mp_limb_t dinv) -{ - mp_size_t i; - - mp_limb_t d1, d0; - mp_limb_t cy, cy1; - mp_limb_t q; - - assert (dn > 2); - assert (nn >= dn); - - d1 = dp[dn - 1]; - d0 = dp[dn - 2]; - - assert ((d1 & GMP_LIMB_HIGHBIT) != 0); - /* Iteration variable is the index of the q limb. - * - * We divide - * by - */ - - i = nn - dn; - do - { - mp_limb_t n0 = np[dn-1+i]; - - if (n1 == d1 && n0 == d0) - { - q = GMP_LIMB_MAX; - mpn_submul_1 (np+i, dp, dn, q); - n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */ - } - else - { - gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv); - - cy = mpn_submul_1 (np + i, dp, dn-2, q); - - cy1 = n0 < cy; - n0 = n0 - cy; - cy = n1 < cy1; - n1 = n1 - cy1; - np[dn-2+i] = n0; - - if (cy != 0) - { - n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1); - q--; - } - } - - if (qp) - qp[i] = q; - } - while (--i >= 0); - - np[dn - 1] = n1; -} - -static void -mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn, - mp_srcptr dp, mp_size_t dn, - const struct gmp_div_inverse *inv) -{ - assert (dn > 0); - assert (nn >= dn); - - if (dn == 1) - np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv); - else if (dn == 2) - mpn_div_qr_2_preinv (qp, np, nn, inv); - else - { - mp_limb_t nh; - unsigned shift; - - assert (inv->d1 == dp[dn-1]); - assert (inv->d0 == dp[dn-2]); - assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0); - - shift = inv->shift; - if (shift > 0) - nh = mpn_lshift (np, np, nn, shift); - else - nh = 0; - - mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di); - - if (shift > 0) - gmp_assert_nocarry (mpn_rshift (np, np, dn, shift)); - } -} - -static void -mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn) -{ - struct gmp_div_inverse inv; - mp_ptr tp = NULL; - - assert (dn > 0); - assert (nn >= dn); - - mpn_div_qr_invert (&inv, dp, dn); - if (dn > 2 && inv.shift > 0) - { - tp = gmp_alloc_limbs (dn); - gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift)); - dp = tp; - } - mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv); - if (tp) - gmp_free_limbs (tp, dn); -} - - -/* MPN base conversion. */ -static unsigned -mpn_base_power_of_two_p (unsigned b) -{ - switch (b) - { - case 2: return 1; - case 4: return 2; - case 8: return 3; - case 16: return 4; - case 32: return 5; - case 64: return 6; - case 128: return 7; - case 256: return 8; - default: return 0; - } -} - -struct mpn_base_info -{ - /* bb is the largest power of the base which fits in one limb, and - exp is the corresponding exponent. */ - unsigned exp; - mp_limb_t bb; -}; - -static void -mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b) -{ - mp_limb_t m; - mp_limb_t p; - unsigned exp; - - m = GMP_LIMB_MAX / b; - for (exp = 1, p = b; p <= m; exp++) - p *= b; - - info->exp = exp; - info->bb = p; -} - -static mp_bitcnt_t -mpn_limb_size_in_base_2 (mp_limb_t u) -{ - unsigned shift; - - assert (u > 0); - gmp_clz (shift, u); - return GMP_LIMB_BITS - shift; -} - -static size_t -mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un) -{ - unsigned char mask; - size_t sn, j; - mp_size_t i; - unsigned shift; - - sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]) - + bits - 1) / bits; - - mask = (1U << bits) - 1; - - for (i = 0, j = sn, shift = 0; j-- > 0;) - { - unsigned char digit = up[i] >> shift; - - shift += bits; - - if (shift >= GMP_LIMB_BITS && ++i < un) - { - shift -= GMP_LIMB_BITS; - digit |= up[i] << (bits - shift); - } - sp[j] = digit & mask; - } - return sn; -} - -/* We generate digits from the least significant end, and reverse at - the end. */ -static size_t -mpn_limb_get_str (unsigned char *sp, mp_limb_t w, - const struct gmp_div_inverse *binv) -{ - mp_size_t i; - for (i = 0; w > 0; i++) - { - mp_limb_t h, l, r; - - h = w >> (GMP_LIMB_BITS - binv->shift); - l = w << binv->shift; - - gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di); - assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0); - r >>= binv->shift; - - sp[i] = r; - } - return i; -} - -static size_t -mpn_get_str_other (unsigned char *sp, - int base, const struct mpn_base_info *info, - mp_ptr up, mp_size_t un) -{ - struct gmp_div_inverse binv; - size_t sn; - size_t i; - - mpn_div_qr_1_invert (&binv, base); - - sn = 0; - - if (un > 1) - { - struct gmp_div_inverse bbinv; - mpn_div_qr_1_invert (&bbinv, info->bb); - - do - { - mp_limb_t w; - size_t done; - w = mpn_div_qr_1_preinv (up, up, un, &bbinv); - un -= (up[un-1] == 0); - done = mpn_limb_get_str (sp + sn, w, &binv); - - for (sn += done; done < info->exp; done++) - sp[sn++] = 0; - } - while (un > 1); - } - sn += mpn_limb_get_str (sp + sn, up[0], &binv); - - /* Reverse order */ - for (i = 0; 2*i + 1 < sn; i++) - { - unsigned char t = sp[i]; - sp[i] = sp[sn - i - 1]; - sp[sn - i - 1] = t; - } - - return sn; -} - -size_t -mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un) -{ - unsigned bits; - - assert (un > 0); - assert (up[un-1] > 0); - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_get_str_bits (sp, bits, up, un); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_get_str_other (sp, base, &info, up, un); - } -} - -static mp_size_t -mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn, - unsigned bits) -{ - mp_size_t rn; - mp_limb_t limb; - unsigned shift; - - for (limb = 0, rn = 0, shift = 0; sn-- > 0; ) - { - limb |= (mp_limb_t) sp[sn] << shift; - shift += bits; - if (shift >= GMP_LIMB_BITS) - { - shift -= GMP_LIMB_BITS; - rp[rn++] = limb; - /* Next line is correct also if shift == 0, - bits == 8, and mp_limb_t == unsigned char. */ - limb = (unsigned int) sp[sn] >> (bits - shift); - } - } - if (limb != 0) - rp[rn++] = limb; - else - rn = mpn_normalized_size (rp, rn); - return rn; -} - -/* Result is usually normalized, except for all-zero input, in which - case a single zero limb is written at *RP, and 1 is returned. */ -static mp_size_t -mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn, - mp_limb_t b, const struct mpn_base_info *info) -{ - mp_size_t rn; - mp_limb_t w; - unsigned k; - size_t j; - - assert (sn > 0); - - k = 1 + (sn - 1) % info->exp; - - j = 0; - w = sp[j++]; - while (--k != 0) - w = w * b + sp[j++]; - - rp[0] = w; - - for (rn = 1; j < sn;) - { - mp_limb_t cy; - - w = sp[j++]; - for (k = 1; k < info->exp; k++) - w = w * b + sp[j++]; - - cy = mpn_mul_1 (rp, rp, rn, info->bb); - cy += mpn_add_1 (rp, rp, rn, w); - if (cy > 0) - rp[rn++] = cy; - } - assert (j == sn); - - return rn; -} - -mp_size_t -mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base) -{ - unsigned bits; - - if (sn == 0) - return 0; - - bits = mpn_base_power_of_two_p (base); - if (bits) - return mpn_set_str_bits (rp, sp, sn, bits); - else - { - struct mpn_base_info info; - - mpn_get_base_info (&info, base); - return mpn_set_str_other (rp, sp, sn, base, &info); - } -} - - -/* MPZ interface */ -void -mpz_init (mpz_t r) -{ - static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0; - - r->_mp_alloc = 0; - r->_mp_size = 0; - r->_mp_d = (mp_ptr) &dummy_limb; -} - -/* The utility of this function is a bit limited, since many functions - assigns the result variable using mpz_swap. */ -void -mpz_init2 (mpz_t r, mp_bitcnt_t bits) -{ - mp_size_t rn; - - bits -= (bits != 0); /* Round down, except if 0 */ - rn = 1 + bits / GMP_LIMB_BITS; - - r->_mp_alloc = rn; - r->_mp_size = 0; - r->_mp_d = gmp_alloc_limbs (rn); -} - -void -mpz_clear (mpz_t r) -{ - if (r->_mp_alloc) - gmp_free_limbs (r->_mp_d, r->_mp_alloc); -} - -static mp_ptr -mpz_realloc (mpz_t r, mp_size_t size) -{ - size = GMP_MAX (size, 1); - - if (r->_mp_alloc) - r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size); - else - r->_mp_d = gmp_alloc_limbs (size); - r->_mp_alloc = size; - - if (GMP_ABS (r->_mp_size) > size) - r->_mp_size = 0; - - return r->_mp_d; -} - -/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */ -#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \ - ? mpz_realloc(z,n) \ - : (z)->_mp_d) - -/* MPZ assignment and basic conversions. */ -void -mpz_set_si (mpz_t r, signed long int x) -{ - if (x >= 0) - mpz_set_ui (r, x); - else /* (x < 0) */ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x)); - mpz_neg (r, r); - } - else - { - r->_mp_size = -1; - MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x); - } -} - -void -mpz_set_ui (mpz_t r, unsigned long int x) -{ - if (x > 0) - { - r->_mp_size = 1; - MPZ_REALLOC (r, 1)[0] = x; - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - while (x >>= LOCAL_GMP_LIMB_BITS) - { - ++ r->_mp_size; - MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x; - } - } - } - else - r->_mp_size = 0; -} - -void -mpz_set (mpz_t r, const mpz_t x) -{ - /* Allow the NOP r == x */ - if (r != x) - { - mp_size_t n; - mp_ptr rp; - - n = GMP_ABS (x->_mp_size); - rp = MPZ_REALLOC (r, n); - - mpn_copyi (rp, x->_mp_d, n); - r->_mp_size = x->_mp_size; - } -} - -void -mpz_init_set_si (mpz_t r, signed long int x) -{ - mpz_init (r); - mpz_set_si (r, x); -} - -void -mpz_init_set_ui (mpz_t r, unsigned long int x) -{ - mpz_init (r); - mpz_set_ui (r, x); -} - -void -mpz_init_set (mpz_t r, const mpz_t x) -{ - mpz_init (r); - mpz_set (r, x); -} - -int -mpz_fits_slong_p (const mpz_t u) -{ - return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0; -} - -static int -mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un) -{ - int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS; - mp_limb_t ulongrem = 0; - - if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0) - ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1; - - return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1); -} - -int -mpz_fits_ulong_p (const mpz_t u) -{ - mp_size_t us = u->_mp_size; - - return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us); -} - -int -mpz_fits_sint_p (const mpz_t u) -{ - return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0; -} - -int -mpz_fits_uint_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0; -} - -int -mpz_fits_sshort_p (const mpz_t u) -{ - return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0; -} - -int -mpz_fits_ushort_p (const mpz_t u) -{ - return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0; -} - -long int -mpz_get_si (const mpz_t u) -{ - unsigned long r = mpz_get_ui (u); - unsigned long c = -LONG_MAX - LONG_MIN; - - if (u->_mp_size < 0) - /* This expression is necessary to properly handle -LONG_MIN */ - return -(long) c - (long) ((r - c) & LONG_MAX); - else - return (long) (r & LONG_MAX); -} - -unsigned long int -mpz_get_ui (const mpz_t u) -{ - if (GMP_LIMB_BITS < GMP_ULONG_BITS) - { - int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; - unsigned long r = 0; - mp_size_t n = GMP_ABS (u->_mp_size); - n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS); - while (--n >= 0) - r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n]; - return r; - } - - return u->_mp_size == 0 ? 0 : u->_mp_d[0]; -} - -size_t -mpz_size (const mpz_t u) -{ - return GMP_ABS (u->_mp_size); -} - -mp_limb_t -mpz_getlimbn (const mpz_t u, mp_size_t n) -{ - if (n >= 0 && n < GMP_ABS (u->_mp_size)) - return u->_mp_d[n]; - else - return 0; -} - -void -mpz_realloc2 (mpz_t x, mp_bitcnt_t n) -{ - mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS); -} - -mp_srcptr -mpz_limbs_read (mpz_srcptr x) -{ - return x->_mp_d; -} - -mp_ptr -mpz_limbs_modify (mpz_t x, mp_size_t n) -{ - assert (n > 0); - return MPZ_REALLOC (x, n); -} - -mp_ptr -mpz_limbs_write (mpz_t x, mp_size_t n) -{ - return mpz_limbs_modify (x, n); -} - -void -mpz_limbs_finish (mpz_t x, mp_size_t xs) -{ - mp_size_t xn; - xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs)); - x->_mp_size = xs < 0 ? -xn : xn; -} - -static mpz_srcptr -mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - x->_mp_alloc = 0; - x->_mp_d = (mp_ptr) xp; - x->_mp_size = xs; - return x; -} - -mpz_srcptr -mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs) -{ - mpz_roinit_normal_n (x, xp, xs); - mpz_limbs_finish (x, xs); - return x; -} - - -/* Conversions and comparison to double. */ -void -mpz_set_d (mpz_t r, double x) -{ - int sign; - mp_ptr rp; - mp_size_t rn, i; - double B; - double Bi; - mp_limb_t f; - - /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is - zero or infinity. */ - if (x != x || x == x * 0.5) - { - r->_mp_size = 0; - return; - } - - sign = x < 0.0 ; - if (sign) - x = - x; - - if (x < 1.0) - { - r->_mp_size = 0; - return; - } - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - for (rn = 1; x >= B; rn++) - x *= Bi; - - rp = MPZ_REALLOC (r, rn); - - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - i = rn-1; - rp[i] = f; - while (--i >= 0) - { - x = B * x; - f = (mp_limb_t) x; - x -= f; - assert (x < 1.0); - rp[i] = f; - } - - r->_mp_size = sign ? - rn : rn; -} - -void -mpz_init_set_d (mpz_t r, double x) -{ - mpz_init (r); - mpz_set_d (r, x); -} - -double -mpz_get_d (const mpz_t u) -{ - int m; - mp_limb_t l; - mp_size_t un; - double x; - double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - - un = GMP_ABS (u->_mp_size); - - if (un == 0) - return 0.0; - - l = u->_mp_d[--un]; - gmp_clz (m, l); - m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - - for (x = l; --un >= 0;) - { - x = B*x; - if (m > 0) { - l = u->_mp_d[un]; - m -= GMP_LIMB_BITS; - if (m < 0) - l &= GMP_LIMB_MAX << -m; - x += l; - } - } - - if (u->_mp_size < 0) - x = -x; - - return x; -} - -int -mpz_cmpabs_d (const mpz_t x, double d) -{ - mp_size_t xn; - double B, Bi; - mp_size_t i; - - xn = x->_mp_size; - d = GMP_ABS (d); - - if (xn != 0) - { - xn = GMP_ABS (xn); - - B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1); - Bi = 1.0 / B; - - /* Scale d so it can be compared with the top limb. */ - for (i = 1; i < xn; i++) - d *= Bi; - - if (d >= B) - return -1; - - /* Compare floor(d) to top limb, subtract and cancel when equal. */ - for (i = xn; i-- > 0;) - { - mp_limb_t f, xl; - - f = (mp_limb_t) d; - xl = x->_mp_d[i]; - if (xl > f) - return 1; - else if (xl < f) - return -1; - d = B * (d - f); - } - } - return - (d > 0.0); -} - -int -mpz_cmp_d (const mpz_t x, double d) -{ - if (x->_mp_size < 0) - { - if (d >= 0.0) - return -1; - else - return -mpz_cmpabs_d (x, d); - } - else - { - if (d < 0.0) - return 1; - else - return mpz_cmpabs_d (x, d); - } -} - - -/* MPZ comparisons and the like. */ -int -mpz_sgn (const mpz_t u) -{ - return GMP_CMP (u->_mp_size, 0); -} - -int -mpz_cmp_si (const mpz_t u, long v) -{ - mp_size_t usize = u->_mp_size; - - if (v >= 0) - return mpz_cmp_ui (u, v); - else if (usize >= 0) - return 1; - else - return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v)); -} - -int -mpz_cmp_ui (const mpz_t u, unsigned long v) -{ - mp_size_t usize = u->_mp_size; - - if (usize < 0) - return -1; - else - return mpz_cmpabs_ui (u, v); -} - -int -mpz_cmp (const mpz_t a, const mpz_t b) -{ - mp_size_t asize = a->_mp_size; - mp_size_t bsize = b->_mp_size; - - if (asize != bsize) - return (asize < bsize) ? -1 : 1; - else if (asize >= 0) - return mpn_cmp (a->_mp_d, b->_mp_d, asize); - else - return mpn_cmp (b->_mp_d, a->_mp_d, -asize); -} - -int -mpz_cmpabs_ui (const mpz_t u, unsigned long v) -{ - mp_size_t un = GMP_ABS (u->_mp_size); - - if (! mpn_absfits_ulong_p (u->_mp_d, un)) - return 1; - else - { - unsigned long uu = mpz_get_ui (u); - return GMP_CMP(uu, v); - } -} - -int -mpz_cmpabs (const mpz_t u, const mpz_t v) -{ - return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size), - v->_mp_d, GMP_ABS (v->_mp_size)); -} - -void -mpz_abs (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = GMP_ABS (r->_mp_size); -} - -void -mpz_neg (mpz_t r, const mpz_t u) -{ - mpz_set (r, u); - r->_mp_size = -r->_mp_size; -} - -void -mpz_swap (mpz_t u, mpz_t v) -{ - MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc); - MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size); -} - - -/* MPZ addition and subtraction */ - - -void -mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_t bb; - mpz_init_set_ui (bb, b); - mpz_add (r, a, bb); - mpz_clear (bb); -} - -void -mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b) -{ - mpz_ui_sub (r, b, a); - mpz_neg (r, r); -} - -void -mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b) -{ - mpz_neg (r, b); - mpz_add_ui (r, r, a); -} - -static mp_size_t -mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - mp_ptr rp; - mp_limb_t cy; - - if (an < bn) - { - MPZ_SRCPTR_SWAP (a, b); - MP_SIZE_T_SWAP (an, bn); - } - - rp = MPZ_REALLOC (r, an + 1); - cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn); - - rp[an] = cy; - - return an + cy; -} - -static mp_size_t -mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t an = GMP_ABS (a->_mp_size); - mp_size_t bn = GMP_ABS (b->_mp_size); - int cmp; - mp_ptr rp; - - cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn); - if (cmp > 0) - { - rp = MPZ_REALLOC (r, an); - gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn)); - return mpn_normalized_size (rp, an); - } - else if (cmp < 0) - { - rp = MPZ_REALLOC (r, bn); - gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an)); - return -mpn_normalized_size (rp, bn); - } - else - return 0; -} - -void -mpz_add (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_add (r, a, b); - else - rn = mpz_abs_sub (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - -void -mpz_sub (mpz_t r, const mpz_t a, const mpz_t b) -{ - mp_size_t rn; - - if ( (a->_mp_size ^ b->_mp_size) >= 0) - rn = mpz_abs_sub (r, a, b); - else - rn = mpz_abs_add (r, a, b); - - r->_mp_size = a->_mp_size >= 0 ? rn : - rn; -} - - -/* MPZ multiplication */ -void -mpz_mul_si (mpz_t r, const mpz_t u, long int v) -{ - if (v < 0) - { - mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v)); - mpz_neg (r, r); - } - else - mpz_mul_ui (r, u, v); -} - -void -mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t vv; - mpz_init_set_ui (vv, v); - mpz_mul (r, u, vv); - mpz_clear (vv); - return; -} - -void -mpz_mul (mpz_t r, const mpz_t u, const mpz_t v) -{ - int sign; - mp_size_t un, vn, rn; - mpz_t t; - mp_ptr tp; - - un = u->_mp_size; - vn = v->_mp_size; - - if (un == 0 || vn == 0) - { - r->_mp_size = 0; - return; - } - - sign = (un ^ vn) < 0; - - un = GMP_ABS (un); - vn = GMP_ABS (vn); - - mpz_init2 (t, (un + vn) * GMP_LIMB_BITS); - - tp = t->_mp_d; - if (un >= vn) - mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn); - else - mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un); - - rn = un + vn; - rn -= tp[rn-1] == 0; - - t->_mp_size = sign ? - rn : rn; - mpz_swap (r, t); - mpz_clear (t); -} - -void -mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits) -{ - mp_size_t un, rn; - mp_size_t limbs; - unsigned shift; - mp_ptr rp; - - un = GMP_ABS (u->_mp_size); - if (un == 0) - { - r->_mp_size = 0; - return; - } - - limbs = bits / GMP_LIMB_BITS; - shift = bits % GMP_LIMB_BITS; - - rn = un + limbs + (shift > 0); - rp = MPZ_REALLOC (r, rn); - if (shift > 0) - { - mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift); - rp[rn-1] = cy; - rn -= (cy == 0); - } - else - mpn_copyd (rp + limbs, u->_mp_d, un); - - mpn_zero (rp, limbs); - - r->_mp_size = (u->_mp_size < 0) ? - rn : rn; -} - -void -mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v) -{ - mpz_t t; - mpz_init_set_ui (t, v); - mpz_mul (t, u, t); - mpz_sub (r, r, t); - mpz_clear (t); -} - -void -mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_add (r, r, t); - mpz_clear (t); -} - -void -mpz_submul (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t t; - mpz_init (t); - mpz_mul (t, u, v); - mpz_sub (r, r, t); - mpz_clear (t); -} - - -/* MPZ division */ -enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC }; - -/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */ -static int -mpz_div_qr (mpz_t q, mpz_t r, - const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode) -{ - mp_size_t ns, ds, nn, dn, qs; - ns = n->_mp_size; - ds = d->_mp_size; - - if (ds == 0) - gmp_die("mpz_div_qr: Divide by zero."); - - if (ns == 0) - { - if (q) - q->_mp_size = 0; - if (r) - r->_mp_size = 0; - return 0; - } - - nn = GMP_ABS (ns); - dn = GMP_ABS (ds); - - qs = ds ^ ns; - - if (nn < dn) - { - if (mode == GMP_DIV_CEIL && qs >= 0) - { - /* q = 1, r = n - d */ - if (r) - mpz_sub (r, n, d); - if (q) - mpz_set_ui (q, 1); - } - else if (mode == GMP_DIV_FLOOR && qs < 0) - { - /* q = -1, r = n + d */ - if (r) - mpz_add (r, n, d); - if (q) - mpz_set_si (q, -1); - } - else - { - /* q = 0, r = d */ - if (r) - mpz_set (r, n); - if (q) - q->_mp_size = 0; - } - return 1; - } - else - { - mp_ptr np, qp; - mp_size_t qn, rn; - mpz_t tq, tr; - - mpz_init_set (tr, n); - np = tr->_mp_d; - - qn = nn - dn + 1; - - if (q) - { - mpz_init2 (tq, qn * GMP_LIMB_BITS); - qp = tq->_mp_d; - } - else - qp = NULL; - - mpn_div_qr (qp, np, nn, d->_mp_d, dn); - - if (qp) - { - qn -= (qp[qn-1] == 0); - - tq->_mp_size = qs < 0 ? -qn : qn; - } - rn = mpn_normalized_size (np, dn); - tr->_mp_size = ns < 0 ? - rn : rn; - - if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0) - { - if (q) - mpz_sub_ui (tq, tq, 1); - if (r) - mpz_add (tr, tr, d); - } - else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0) - { - if (q) - mpz_add_ui (tq, tq, 1); - if (r) - mpz_sub (tr, tr, d); - } - - if (q) - { - mpz_swap (tq, q); - mpz_clear (tq); - } - if (r) - mpz_swap (tr, r); - - mpz_clear (tr); - - return rn != 0; - } -} - -void -mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC); -} - -void -mpz_mod (mpz_t r, const mpz_t n, const mpz_t d) -{ - mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL); -} - -static void -mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t un, qn; - mp_size_t limb_cnt; - mp_ptr qp; - int adjust; - - un = u->_mp_size; - if (un == 0) - { - q->_mp_size = 0; - return; - } - limb_cnt = bit_index / GMP_LIMB_BITS; - qn = GMP_ABS (un) - limb_cnt; - bit_index %= GMP_LIMB_BITS; - - if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */ - /* Note: Below, the final indexing at limb_cnt is valid because at - that point we have qn > 0. */ - adjust = (qn <= 0 - || !mpn_zero_p (u->_mp_d, limb_cnt) - || (u->_mp_d[limb_cnt] - & (((mp_limb_t) 1 << bit_index) - 1))); - else - adjust = 0; - - if (qn <= 0) - qn = 0; - else - { - qp = MPZ_REALLOC (q, qn); - - if (bit_index != 0) - { - mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index); - qn -= qp[qn - 1] == 0; - } - else - { - mpn_copyi (qp, u->_mp_d + limb_cnt, qn); - } - } - - q->_mp_size = qn; - - if (adjust) - mpz_add_ui (q, q, 1); - if (un < 0) - mpz_neg (q, q); -} - -static void -mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index, - enum mpz_div_round_mode mode) -{ - mp_size_t us, un, rn; - mp_ptr rp; - mp_limb_t mask; - - us = u->_mp_size; - if (us == 0 || bit_index == 0) - { - r->_mp_size = 0; - return; - } - rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - assert (rn > 0); - - rp = MPZ_REALLOC (r, rn); - un = GMP_ABS (us); - - mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index); - - if (rn > un) - { - /* Quotient (with truncation) is zero, and remainder is - non-zero */ - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* Have to negate and sign extend. */ - mp_size_t i; - - gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un)); - for (i = un; i < rn - 1; i++) - rp[i] = GMP_LIMB_MAX; - - rp[rn-1] = mask; - us = -us; - } - else - { - /* Just copy */ - if (r != u) - mpn_copyi (rp, u->_mp_d, un); - - rn = un; - } - } - else - { - if (r != u) - mpn_copyi (rp, u->_mp_d, rn - 1); - - rp[rn-1] = u->_mp_d[rn-1] & mask; - - if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */ - { - /* If r != 0, compute 2^{bit_count} - r. */ - mpn_neg (rp, rp, rn); - - rp[rn-1] &= mask; - - /* us is not used for anything else, so we can modify it - here to indicate flipped sign. */ - us = -us; - } - } - rn = mpn_normalized_size (rp, rn); - r->_mp_size = us < 0 ? -rn : rn; -} - -void -mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL); -} - -void -mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR); -} - -void -mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt) -{ - mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC); -} - -void -mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d) -{ - gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_p (const mpz_t n, const mpz_t d) -{ - return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - -int -mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m) -{ - mpz_t t; - int res; - - /* a == b (mod 0) iff a == b */ - if (mpz_sgn (m) == 0) - return (mpz_cmp (a, b) == 0); - - mpz_init (t); - mpz_sub (t, a, b); - res = mpz_divisible_p (t, m); - mpz_clear (t); - - return res; -} - -static unsigned long -mpz_div_qr_ui (mpz_t q, mpz_t r, - const mpz_t n, unsigned long d, enum mpz_div_round_mode mode) -{ - unsigned long ret; - mpz_t rr, dd; - - mpz_init (rr); - mpz_init_set_ui (dd, d); - mpz_div_qr (q, rr, n, dd, mode); - mpz_clear (dd); - ret = mpz_get_ui (rr); - - if (r) - mpz_swap (r, rr); - mpz_clear (rr); - - return ret; -} - -unsigned long -mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL); -} -unsigned long -mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} -unsigned long -mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_cdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL); -} - -unsigned long -mpz_fdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR); -} - -unsigned long -mpz_tdiv_ui (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC); -} - -unsigned long -mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR); -} - -void -mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d) -{ - gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC)); -} - -int -mpz_divisible_ui_p (const mpz_t n, unsigned long d) -{ - return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0; -} - - -/* GCD */ -static mp_limb_t -mpn_gcd_11 (mp_limb_t u, mp_limb_t v) -{ - unsigned shift; - - assert ( (u | v) > 0); - - if (u == 0) - return v; - else if (v == 0) - return u; - - gmp_ctz (shift, u | v); - - u >>= shift; - v >>= shift; - - if ( (u & 1) == 0) - MP_LIMB_T_SWAP (u, v); - - while ( (v & 1) == 0) - v >>= 1; - - while (u != v) - { - if (u > v) - { - u -= v; - do - u >>= 1; - while ( (u & 1) == 0); - } - else - { - v -= u; - do - v >>= 1; - while ( (v & 1) == 0); - } - } - return u << shift; -} - -mp_size_t -mpn_gcd (mp_ptr rp, mp_ptr up, mp_size_t un, mp_ptr vp, mp_size_t vn) -{ - assert (un >= vn); - assert (vn > 0); - assert (!GMP_MPN_OVERLAP_P (up, un, vp, vn)); - assert (vp[vn-1] > 0); - assert ((up[0] | vp[0]) & 1); - - if (un > vn) - mpn_div_qr (NULL, up, un, vp, vn); - - un = mpn_normalized_size (up, vn); - if (un == 0) - { - mpn_copyi (rp, vp, vn); - return vn; - } - - if (!(vp[0] & 1)) - MPN_PTR_SWAP (up, un, vp, vn); - - while (un > 1 || vn > 1) - { - int shift; - assert (vp[0] & 1); - - while (up[0] == 0) - { - up++; - un--; - } - gmp_ctz (shift, up[0]); - if (shift > 0) - { - gmp_assert_nocarry (mpn_rshift(up, up, un, shift)); - un -= (up[un-1] == 0); - } - - if (un < vn) - MPN_PTR_SWAP (up, un, vp, vn); - else if (un == vn) - { - int c = mpn_cmp (up, vp, un); - if (c == 0) - { - mpn_copyi (rp, up, un); - return un; - } - else if (c < 0) - MP_PTR_SWAP (up, vp); - } - - gmp_assert_nocarry (mpn_sub (up, up, un, vp, vn)); - un = mpn_normalized_size (up, un); - } - rp[0] = mpn_gcd_11 (up[0], vp[0]); - return 1; -} - -unsigned long -mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v) -{ - mpz_t t; - mpz_init_set_ui(t, v); - mpz_gcd (t, u, t); - if (v > 0) - v = mpz_get_ui (t); - - if (g) - mpz_swap (t, g); - - mpz_clear (t); - - return v; -} - -static mp_bitcnt_t -mpz_make_odd (mpz_t r) -{ - mp_bitcnt_t shift; - - assert (r->_mp_size > 0); - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - shift = mpn_scan1 (r->_mp_d, 0); - mpz_tdiv_q_2exp (r, r, shift); - - return shift; -} - -void -mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv; - mp_bitcnt_t uz, vz, gz; - - if (u->_mp_size == 0) - { - mpz_abs (g, v); - return; - } - if (v->_mp_size == 0) - { - mpz_abs (g, u); - return; - } - - mpz_init (tu); - mpz_init (tv); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - if (tu->_mp_size < tv->_mp_size) - mpz_swap (tu, tv); - - tu->_mp_size = mpn_gcd (tu->_mp_d, tu->_mp_d, tu->_mp_size, tv->_mp_d, tv->_mp_size); - mpz_mul_2exp (g, tu, gz); - - mpz_clear (tu); - mpz_clear (tv); -} - -void -mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v) -{ - mpz_t tu, tv, s0, s1, t0, t1; - mp_bitcnt_t uz, vz, gz; - mp_bitcnt_t power; - int cmp; - - if (u->_mp_size == 0) - { - /* g = 0 u + sgn(v) v */ - signed long sign = mpz_sgn (v); - mpz_abs (g, v); - if (s) - s->_mp_size = 0; - if (t) - mpz_set_si (t, sign); - return; - } - - if (v->_mp_size == 0) - { - /* g = sgn(u) u + 0 v */ - signed long sign = mpz_sgn (u); - mpz_abs (g, u); - if (s) - mpz_set_si (s, sign); - if (t) - t->_mp_size = 0; - return; - } - - mpz_init (tu); - mpz_init (tv); - mpz_init (s0); - mpz_init (s1); - mpz_init (t0); - mpz_init (t1); - - mpz_abs (tu, u); - uz = mpz_make_odd (tu); - mpz_abs (tv, v); - vz = mpz_make_odd (tv); - gz = GMP_MIN (uz, vz); - - uz -= gz; - vz -= gz; - - /* Cofactors corresponding to odd gcd. gz handled later. */ - if (tu->_mp_size < tv->_mp_size) - { - mpz_swap (tu, tv); - MPZ_SRCPTR_SWAP (u, v); - MPZ_PTR_SWAP (s, t); - MP_BITCNT_T_SWAP (uz, vz); - } - - /* Maintain - * - * u = t0 tu + t1 tv - * v = s0 tu + s1 tv - * - * where u and v denote the inputs with common factors of two - * eliminated, and det (s0, t0; s1, t1) = 2^p. Then - * - * 2^p tu = s1 u - t1 v - * 2^p tv = -s0 u + t0 v - */ - - /* After initial division, tu = q tv + tu', we have - * - * u = 2^uz (tu' + q tv) - * v = 2^vz tv - * - * or - * - * t0 = 2^uz, t1 = 2^uz q - * s0 = 0, s1 = 2^vz - */ - - mpz_tdiv_qr (t1, tu, tu, tv); - mpz_mul_2exp (t1, t1, uz); - - mpz_setbit (s1, vz); - power = uz + vz; - - if (tu->_mp_size > 0) - { - mp_bitcnt_t shift; - shift = mpz_make_odd (tu); - mpz_setbit (t0, uz + shift); - power += shift; - - for (;;) - { - int c; - c = mpz_cmp (tu, tv); - if (c == 0) - break; - - if (c < 0) - { - /* tv = tv' + tu - * - * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv' - * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */ - - mpz_sub (tv, tv, tu); - mpz_add (t0, t0, t1); - mpz_add (s0, s0, s1); - - shift = mpz_make_odd (tv); - mpz_mul_2exp (t1, t1, shift); - mpz_mul_2exp (s1, s1, shift); - } - else - { - mpz_sub (tu, tu, tv); - mpz_add (t1, t0, t1); - mpz_add (s1, s0, s1); - - shift = mpz_make_odd (tu); - mpz_mul_2exp (t0, t0, shift); - mpz_mul_2exp (s0, s0, shift); - } - power += shift; - } - } - else - mpz_setbit (t0, uz); - - /* Now tv = odd part of gcd, and -s0 and t0 are corresponding - cofactors. */ - - mpz_mul_2exp (tv, tv, gz); - mpz_neg (s0, s0); - - /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To - adjust cofactors, we need u / g and v / g */ - - mpz_divexact (s1, v, tv); - mpz_abs (s1, s1); - mpz_divexact (t1, u, tv); - mpz_abs (t1, t1); - - while (power-- > 0) - { - /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */ - if (mpz_odd_p (s0) || mpz_odd_p (t0)) - { - mpz_sub (s0, s0, s1); - mpz_add (t0, t0, t1); - } - assert (mpz_even_p (t0) && mpz_even_p (s0)); - mpz_tdiv_q_2exp (s0, s0, 1); - mpz_tdiv_q_2exp (t0, t0, 1); - } - - /* Choose small cofactors (they should generally satify - - |s| < |u| / 2g and |t| < |v| / 2g, - - with some documented exceptions). Always choose the smallest s, - if there are two choices for s with same absolute value, choose - the one with smallest corresponding t (this asymmetric condition - is needed to prefer s = 0, |t| = 1 when g = |a| = |b|). */ - mpz_add (s1, s0, s1); - mpz_sub (t1, t0, t1); - cmp = mpz_cmpabs (s0, s1); - if (cmp > 0 || (cmp == 0 && mpz_cmpabs (t0, t1) > 0)) - { - mpz_swap (s0, s1); - mpz_swap (t0, t1); - } - if (u->_mp_size < 0) - mpz_neg (s0, s0); - if (v->_mp_size < 0) - mpz_neg (t0, t0); - - mpz_swap (g, tv); - if (s) - mpz_swap (s, s0); - if (t) - mpz_swap (t, t0); - - mpz_clear (tu); - mpz_clear (tv); - mpz_clear (s0); - mpz_clear (s1); - mpz_clear (t0); - mpz_clear (t1); -} - -void -mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v) -{ - mpz_t g; - - if (u->_mp_size == 0 || v->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - mpz_init (g); - - mpz_gcd (g, u, v); - mpz_divexact (g, u, g); - mpz_mul (r, g, v); - - mpz_clear (g); - mpz_abs (r, r); -} - -void -mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v) -{ - if (v == 0 || u->_mp_size == 0) - { - r->_mp_size = 0; - return; - } - - v /= mpz_gcd_ui (NULL, u, v); - mpz_mul_ui (r, u, v); - - mpz_abs (r, r); -} - -int -mpz_invert (mpz_t r, const mpz_t u, const mpz_t m) -{ - mpz_t g, tr; - int invertible; - - if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0) - return 0; - - mpz_init (g); - mpz_init (tr); - - mpz_gcdext (g, tr, NULL, u, m); - invertible = (mpz_cmp_ui (g, 1) == 0); - - if (invertible) - { - if (tr->_mp_size < 0) - { - if (m->_mp_size >= 0) - mpz_add (tr, tr, m); - else - mpz_sub (tr, tr, m); - } - mpz_swap (r, tr); - } - - mpz_clear (g); - mpz_clear (tr); - return invertible; -} - - -/* Higher level operations (sqrt, pow and root) */ - -void -mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e) -{ - unsigned long bit; - mpz_t tr; - mpz_init_set_ui (tr, 1); - - bit = GMP_ULONG_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (e & bit) - mpz_mul (tr, tr, b); - bit >>= 1; - } - while (bit > 0); - - mpz_swap (r, tr); - mpz_clear (tr); -} - -void -mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e) -{ - mpz_t b; - - mpz_init_set_ui (b, blimb); - mpz_pow_ui (r, b, e); - mpz_clear (b); -} - -void -mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m) -{ - mpz_t tr; - mpz_t base; - mp_size_t en, mn; - mp_srcptr mp; - struct gmp_div_inverse minv; - unsigned shift; - mp_ptr tp = NULL; - - en = GMP_ABS (e->_mp_size); - mn = GMP_ABS (m->_mp_size); - if (mn == 0) - gmp_die ("mpz_powm: Zero modulo."); - - if (en == 0) - { - mpz_set_ui (r, mpz_cmpabs_ui (m, 1)); - return; - } - - mp = m->_mp_d; - mpn_div_qr_invert (&minv, mp, mn); - shift = minv.shift; - - if (shift > 0) - { - /* To avoid shifts, we do all our reductions, except the final - one, using a *normalized* m. */ - minv.shift = 0; - - tp = gmp_alloc_limbs (mn); - gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift)); - mp = tp; - } - - mpz_init (base); - - if (e->_mp_size < 0) - { - if (!mpz_invert (base, b, m)) - gmp_die ("mpz_powm: Negative exponent and non-invertible base."); - } - else - { - mp_size_t bn; - mpz_abs (base, b); - - bn = base->_mp_size; - if (bn >= mn) - { - mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv); - bn = mn; - } - - /* We have reduced the absolute value. Now take care of the - sign. Note that we get zero represented non-canonically as - m. */ - if (b->_mp_size < 0) - { - mp_ptr bp = MPZ_REALLOC (base, mn); - gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn)); - bn = mn; - } - base->_mp_size = mpn_normalized_size (base->_mp_d, bn); - } - mpz_init_set_ui (tr, 1); - - while (--en >= 0) - { - mp_limb_t w = e->_mp_d[en]; - mp_limb_t bit; - - bit = GMP_LIMB_HIGHBIT; - do - { - mpz_mul (tr, tr, tr); - if (w & bit) - mpz_mul (tr, tr, base); - if (tr->_mp_size > mn) - { - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - bit >>= 1; - } - while (bit > 0); - } - - /* Final reduction */ - if (tr->_mp_size >= mn) - { - minv.shift = shift; - mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv); - tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn); - } - if (tp) - gmp_free_limbs (tp, mn); - - mpz_swap (r, tr); - mpz_clear (tr); - mpz_clear (base); -} - -void -mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m) -{ - mpz_t e; - - mpz_init_set_ui (e, elimb); - mpz_powm (r, b, e, m); - mpz_clear (e); -} - -/* x=trunc(y^(1/z)), r=y-x^z */ -void -mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z) -{ - int sgn; - mp_bitcnt_t bc; - mpz_t t, u; - - sgn = y->_mp_size < 0; - if ((~z & sgn) != 0) - gmp_die ("mpz_rootrem: Negative argument, with even root."); - if (z == 0) - gmp_die ("mpz_rootrem: Zeroth root."); - - if (mpz_cmpabs_ui (y, 1) <= 0) { - if (x) - mpz_set (x, y); - if (r) - r->_mp_size = 0; - return; - } - - mpz_init (u); - mpz_init (t); - bc = (mpz_sizeinbase (y, 2) - 1) / z + 1; - mpz_setbit (t, bc); - - if (z == 2) /* simplify sqrt loop: z-1 == 1 */ - do { - mpz_swap (u, t); /* u = x */ - mpz_tdiv_q (t, y, u); /* t = y/x */ - mpz_add (t, t, u); /* t = y/x + x */ - mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - else /* z != 2 */ { - mpz_t v; - - mpz_init (v); - if (sgn) - mpz_neg (t, t); - - do { - mpz_swap (u, t); /* u = x */ - mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */ - mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */ - mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */ - mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */ - mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */ - } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */ - - mpz_clear (v); - } - - if (r) { - mpz_pow_ui (t, u, z); - mpz_sub (r, y, t); - } - if (x) - mpz_swap (x, u); - mpz_clear (u); - mpz_clear (t); -} - -int -mpz_root (mpz_t x, const mpz_t y, unsigned long z) -{ - int res; - mpz_t r; - - mpz_init (r); - mpz_rootrem (x, r, y, z); - res = r->_mp_size == 0; - mpz_clear (r); - - return res; -} - -/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */ -void -mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u) -{ - mpz_rootrem (s, r, u, 2); -} - -void -mpz_sqrt (mpz_t s, const mpz_t u) -{ - mpz_rootrem (s, NULL, u, 2); -} - -int -mpz_perfect_square_p (const mpz_t u) -{ - if (u->_mp_size <= 0) - return (u->_mp_size == 0); - else - return mpz_root (NULL, u, 2); -} - -int -mpn_perfect_square_p (mp_srcptr p, mp_size_t n) -{ - mpz_t t; - - assert (n > 0); - assert (p [n-1] != 0); - return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2); -} - -mp_size_t -mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n) -{ - mpz_t s, r, u; - mp_size_t res; - - assert (n > 0); - assert (p [n-1] != 0); - - mpz_init (r); - mpz_init (s); - mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2); - - assert (s->_mp_size == (n+1)/2); - mpn_copyd (sp, s->_mp_d, s->_mp_size); - mpz_clear (s); - res = r->_mp_size; - if (rp) - mpn_copyd (rp, r->_mp_d, res); - mpz_clear (r); - return res; -} - -/* Combinatorics */ - -void -mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m) -{ - mpz_set_ui (x, n + (n == 0)); - if (m + 1 < 2) return; - while (n > m + 1) - mpz_mul_ui (x, x, n -= m); -} - -void -mpz_2fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 2); -} - -void -mpz_fac_ui (mpz_t x, unsigned long n) -{ - mpz_mfac_uiui (x, n, 1); -} - -void -mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k) -{ - mpz_t t; - - mpz_set_ui (r, k <= n); - - if (k > (n >> 1)) - k = (k <= n) ? n - k : 0; - - mpz_init (t); - mpz_fac_ui (t, k); - - for (; k > 0; --k) - mpz_mul_ui (r, r, n--); - - mpz_divexact (r, r, t); - mpz_clear (t); -} - - -/* Primality testing */ - -/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */ -/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */ -static int -gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b) -{ - int c, bit = 0; - - assert (b & 1); - assert (a != 0); - /* assert (mpn_gcd_11 (a, b) == 1); */ - - /* Below, we represent a and b shifted right so that the least - significant one bit is implicit. */ - b >>= 1; - - gmp_ctz(c, a); - a >>= 1; - - for (;;) - { - a >>= c; - /* (2/b) = -1 if b = 3 or 5 mod 8 */ - bit ^= c & (b ^ (b >> 1)); - if (a < b) - { - if (a == 0) - return bit & 1 ? -1 : 1; - bit ^= a & b; - a = b - a; - b -= a; - } - else - { - a -= b; - assert (a != 0); - } - - gmp_ctz(c, a); - ++c; - } -} - -static void -gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n) -{ - mpz_mod (Qk, Qk, n); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - mpz_mul (V, V, V); - mpz_submul_ui (V, Qk, 2); - mpz_tdiv_r (V, V, n); - /* Q^{2k} = (Q^k)^2 */ - mpz_mul (Qk, Qk, Qk); -} - -/* Computes V_k, Q^k (mod n) for the Lucas' sequence */ -/* with P=1, Q=Q; k = (n>>b0)|1. */ -/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */ -/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */ -static int -gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q, - mp_bitcnt_t b0, const mpz_t n) -{ - mp_bitcnt_t bs; - mpz_t U; - int res; - - assert (b0 > 0); - assert (Q <= - (LONG_MIN / 2)); - assert (Q >= - (LONG_MAX / 2)); - assert (mpz_cmp_ui (n, 4) > 0); - assert (mpz_odd_p (n)); - - mpz_init_set_ui (U, 1); /* U1 = 1 */ - mpz_set_ui (V, 1); /* V1 = 1 */ - mpz_set_si (Qk, Q); - - for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;) - { - /* U_{2k} <- U_k * V_k */ - mpz_mul (U, U, V); - /* V_{2k} <- V_k ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - /* A step k->k+1 is performed if the bit in $n$ is 1 */ - /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */ - /* should be 1 in $n+1$ (bs == b0) */ - if (b0 == bs || mpz_tstbit (n, bs)) - { - /* Q^{k+1} <- Q^k * Q */ - mpz_mul_si (Qk, Qk, Q); - /* U_{k+1} <- (U_k + V_k) / 2 */ - mpz_swap (U, V); /* Keep in V the old value of U_k */ - mpz_add (U, U, V); - /* We have to compute U/2, so we need an even value, */ - /* equivalent (mod n) */ - if (mpz_odd_p (U)) - mpz_add (U, U, n); - mpz_tdiv_q_2exp (U, U, 1); - /* V_{k+1} <-(D*U_k + V_k) / 2 = - U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */ - mpz_mul_si (V, V, -2*Q); - mpz_add (V, U, V); - mpz_tdiv_r (V, V, n); - } - mpz_tdiv_r (U, U, n); - } - - res = U->_mp_size == 0; - mpz_clear (U); - return res; -} - -/* Performs strong Lucas' test on x, with parameters suggested */ -/* for the BPSW test. Qk is only passed to recycle a variable. */ -/* Requires GCD (x,6) = 1.*/ -static int -gmp_stronglucas (const mpz_t x, mpz_t Qk) -{ - mp_bitcnt_t b0; - mpz_t V, n; - mp_limb_t maxD, D; /* The absolute value is stored. */ - long Q; - mp_limb_t tl; - - /* Test on the absolute value. */ - mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size)); - - assert (mpz_odd_p (n)); - /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */ - if (mpz_root (Qk, n, 2)) - return 0; /* A square is composite. */ - - /* Check Ds up to square root (in case, n is prime) - or avoid overflows */ - maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX; - - D = 3; - /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */ - /* For those Ds we have (D/n) = (n/|D|) */ - do - { - if (D >= maxD) - return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */ - D += 2; - tl = mpz_tdiv_ui (n, D); - if (tl == 0) - return 0; - } - while (gmp_jacobi_coprime (tl, D) == 1); - - mpz_init (V); - - /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */ - b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX); - /* b0 = mpz_scan0 (n, 0); */ - - /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */ - Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2); - - if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */ - while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */ - /* V <- V ^ 2 - 2Q^k */ - /* Q^{2k} = (Q^k)^2 */ - gmp_lucas_step_k_2k (V, Qk, n); - - mpz_clear (V); - return (b0 != 0); -} - -static int -gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y, - const mpz_t q, mp_bitcnt_t k) -{ - assert (k > 0); - - /* Caller must initialize y to the base. */ - mpz_powm (y, y, q, n); - - if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0) - return 1; - - while (--k > 0) - { - mpz_powm_ui (y, y, 2, n); - if (mpz_cmp (y, nm1) == 0) - return 1; - } - return 0; -} - -/* This product is 0xc0cfd797, and fits in 32 bits. */ -#define GMP_PRIME_PRODUCT \ - (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL) - -/* Bit (p+1)/2 is set, for each odd prime <= 61 */ -#define GMP_PRIME_MASK 0xc96996dcUL - -int -mpz_probab_prime_p (const mpz_t n, int reps) -{ - mpz_t nm1; - mpz_t q; - mpz_t y; - mp_bitcnt_t k; - int is_prime; - int j; - - /* Note that we use the absolute value of n only, for compatibility - with the real GMP. */ - if (mpz_even_p (n)) - return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0; - - /* Above test excludes n == 0 */ - assert (n->_mp_size != 0); - - if (mpz_cmpabs_ui (n, 64) < 0) - return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2; - - if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1) - return 0; - - /* All prime factors are >= 31. */ - if (mpz_cmpabs_ui (n, 31*31) < 0) - return 2; - - mpz_init (nm1); - mpz_init (q); - - /* Find q and k, where q is odd and n = 1 + 2**k * q. */ - mpz_abs (nm1, n); - nm1->_mp_d[0] -= 1; - /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */ - k = mpn_scan1 (nm1->_mp_d, 0); - mpz_tdiv_q_2exp (q, nm1, k); - - /* BPSW test */ - mpz_init_set_ui (y, 2); - is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y); - reps -= 24; /* skip the first 24 repetitions */ - - /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] = - j^2 + j + 41 using Euler's polynomial. We potentially stop early, - if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps > - 30 (a[30] == 971 > 31*31 == 961). */ - - for (j = 0; is_prime & (j < reps); j++) - { - mpz_set_ui (y, (unsigned long) j*j+j+41); - if (mpz_cmp (y, nm1) >= 0) - { - /* Don't try any further bases. This "early" break does not affect - the result for any reasonable reps value (<=5000 was tested) */ - assert (j >= 30); - break; - } - is_prime = gmp_millerrabin (n, nm1, y, q, k); - } - mpz_clear (nm1); - mpz_clear (q); - mpz_clear (y); - - return is_prime; -} - - -/* Logical operations and bit manipulation. */ - -/* Numbers are treated as if represented in two's complement (and - infinitely sign extended). For a negative values we get the two's - complement from -x = ~x + 1, where ~ is bitwise complement. - Negation transforms - - xxxx10...0 - - into - - yyyy10...0 - - where yyyy is the bitwise complement of xxxx. So least significant - bits, up to and including the first one bit, are unchanged, and - the more significant bits are all complemented. - - To change a bit from zero to one in a negative number, subtract the - corresponding power of two from the absolute value. This can never - underflow. To change a bit from one to zero, add the corresponding - power of two, and this might overflow. E.g., if x = -001111, the - two's complement is 110001. Clearing the least significant bit, we - get two's complement 110000, and -010000. */ - -int -mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t limb_index; - unsigned shift; - mp_size_t ds; - mp_size_t dn; - mp_limb_t w; - int bit; - - ds = d->_mp_size; - dn = GMP_ABS (ds); - limb_index = bit_index / GMP_LIMB_BITS; - if (limb_index >= dn) - return ds < 0; - - shift = bit_index % GMP_LIMB_BITS; - w = d->_mp_d[limb_index]; - bit = (w >> shift) & 1; - - if (ds < 0) - { - /* d < 0. Check if any of the bits below is set: If so, our bit - must be complemented. */ - if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0) - return bit ^ 1; - while (--limb_index >= 0) - if (d->_mp_d[limb_index] > 0) - return bit ^ 1; - } - return bit; -} - -static void -mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_limb_t bit; - mp_ptr dp; - - dn = GMP_ABS (d->_mp_size); - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - if (limb_index >= dn) - { - mp_size_t i; - /* The bit should be set outside of the end of the number. - We have to increase the size of the number. */ - dp = MPZ_REALLOC (d, limb_index + 1); - - dp[limb_index] = bit; - for (i = dn; i < limb_index; i++) - dp[i] = 0; - dn = limb_index + 1; - } - else - { - mp_limb_t cy; - - dp = d->_mp_d; - - cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit); - if (cy > 0) - { - dp = MPZ_REALLOC (d, dn + 1); - dp[dn++] = cy; - } - } - - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -static void -mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index) -{ - mp_size_t dn, limb_index; - mp_ptr dp; - mp_limb_t bit; - - dn = GMP_ABS (d->_mp_size); - dp = d->_mp_d; - - limb_index = bit_index / GMP_LIMB_BITS; - bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS); - - assert (limb_index < dn); - - gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index, - dn - limb_index, bit)); - dn = mpn_normalized_size (dp, dn); - d->_mp_size = (d->_mp_size < 0) ? - dn : dn; -} - -void -mpz_setbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (!mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_add_bit (d, bit_index); - else - mpz_abs_sub_bit (d, bit_index); - } -} - -void -mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index)) - { - if (d->_mp_size >= 0) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); - } -} - -void -mpz_combit (mpz_t d, mp_bitcnt_t bit_index) -{ - if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0)) - mpz_abs_sub_bit (d, bit_index); - else - mpz_abs_add_bit (d, bit_index); -} - -void -mpz_com (mpz_t r, const mpz_t u) -{ - mpz_add_ui (r, u, 1); - mpz_neg (r, r); -} - -void -mpz_and (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - r->_mp_size = 0; - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc & vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is positive, higher limbs don't matter. */ - rn = vx ? un : vn; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul & vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul & vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_ior (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, rn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc | vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - /* If the smaller input is negative, by sign extension higher limbs - don't matter. */ - rn = vx ? vn : un; - - rp = MPZ_REALLOC (r, rn + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = ( (ul | vl) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < rn; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = ( (ul | vx) ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[rn++] = rc; - else - rn = mpn_normalized_size (rp, rn); - - r->_mp_size = rx ? -rn : rn; -} - -void -mpz_xor (mpz_t r, const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_ptr up, vp, rp; - - mp_limb_t ux, vx, rx; - mp_limb_t uc, vc, rc; - mp_limb_t ul, vl, rl; - - un = GMP_ABS (u->_mp_size); - vn = GMP_ABS (v->_mp_size); - if (un < vn) - { - MPZ_SRCPTR_SWAP (u, v); - MP_SIZE_T_SWAP (un, vn); - } - if (vn == 0) - { - mpz_set (r, u); - return; - } - - uc = u->_mp_size < 0; - vc = v->_mp_size < 0; - rc = uc ^ vc; - - ux = -uc; - vx = -vc; - rx = -rc; - - rp = MPZ_REALLOC (r, un + (mp_size_t) rc); - - up = u->_mp_d; - vp = v->_mp_d; - - i = 0; - do - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - vl = (vp[i] ^ vx) + vc; - vc = vl < vc; - - rl = (ul ^ vl ^ rx) + rc; - rc = rl < rc; - rp[i] = rl; - } - while (++i < vn); - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ ux) + uc; - uc = ul < uc; - - rl = (ul ^ ux) + rc; - rc = rl < rc; - rp[i] = rl; - } - if (rc) - rp[un++] = rc; - else - un = mpn_normalized_size (rp, un); - - r->_mp_size = rx ? -un : un; -} - -static unsigned -gmp_popcount_limb (mp_limb_t x) -{ - unsigned c; - - /* Do 16 bits at a time, to avoid limb-sized constants. */ - int LOCAL_SHIFT_BITS = 16; - for (c = 0; x > 0;) - { - unsigned w = x - ((x >> 1) & 0x5555); - w = ((w >> 2) & 0x3333) + (w & 0x3333); - w = (w >> 4) + w; - w = ((w >> 8) & 0x000f) + (w & 0x000f); - c += w; - if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) - x >>= LOCAL_SHIFT_BITS; - else - x = 0; - } - return c; -} - -mp_bitcnt_t -mpn_popcount (mp_srcptr p, mp_size_t n) -{ - mp_size_t i; - mp_bitcnt_t c; - - for (c = 0, i = 0; i < n; i++) - c += gmp_popcount_limb (p[i]); - - return c; -} - -mp_bitcnt_t -mpz_popcount (const mpz_t u) -{ - mp_size_t un; - - un = u->_mp_size; - - if (un < 0) - return ~(mp_bitcnt_t) 0; - - return mpn_popcount (u->_mp_d, un); -} - -mp_bitcnt_t -mpz_hamdist (const mpz_t u, const mpz_t v) -{ - mp_size_t un, vn, i; - mp_limb_t uc, vc, ul, vl, comp; - mp_srcptr up, vp; - mp_bitcnt_t c; - - un = u->_mp_size; - vn = v->_mp_size; - - if ( (un ^ vn) < 0) - return ~(mp_bitcnt_t) 0; - - comp = - (uc = vc = (un < 0)); - if (uc) - { - assert (vn < 0); - un = -un; - vn = -vn; - } - - up = u->_mp_d; - vp = v->_mp_d; - - if (un < vn) - MPN_SRCPTR_SWAP (up, un, vp, vn); - - for (i = 0, c = 0; i < vn; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - vl = (vp[i] ^ comp) + vc; - vc = vl < vc; - - c += gmp_popcount_limb (ul ^ vl); - } - assert (vc == 0); - - for (; i < un; i++) - { - ul = (up[i] ^ comp) + uc; - uc = ul < uc; - - c += gmp_popcount_limb (ul ^ comp); - } - - return c; -} - -mp_bitcnt_t -mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit - for u<0. Notice this test picks up any u==0 too. */ - if (i >= un) - return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit); - - up = u->_mp_d; - ux = 0; - limb = up[i]; - - if (starting_bit != 0) - { - if (us < 0) - { - ux = mpn_zero_p (up, i); - limb = ~ limb + ux; - ux = - (mp_limb_t) (limb >= ux); - } - - /* Mask to 0 all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - } - - return mpn_common_scan (limb, i, up, un, ux); -} - -mp_bitcnt_t -mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit) -{ - mp_ptr up; - mp_size_t us, un, i; - mp_limb_t limb, ux; - - us = u->_mp_size; - ux = - (mp_limb_t) (us >= 0); - un = GMP_ABS (us); - i = starting_bit / GMP_LIMB_BITS; - - /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for - u<0. Notice this test picks up all cases of u==0 too. */ - if (i >= un) - return (ux ? starting_bit : ~(mp_bitcnt_t) 0); - - up = u->_mp_d; - limb = up[i] ^ ux; - - if (ux == 0) - limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */ - - /* Mask all bits before starting_bit, thus ignoring them. */ - limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS); - - return mpn_common_scan (limb, i, up, un, ux); -} - - -/* MPZ base conversion. */ - -size_t -mpz_sizeinbase (const mpz_t u, int base) -{ - mp_size_t un, tn; - mp_srcptr up; - mp_ptr tp; - mp_bitcnt_t bits; - struct gmp_div_inverse bi; - size_t ndigits; - - assert (base >= 2); - assert (base <= 62); - - un = GMP_ABS (u->_mp_size); - if (un == 0) - return 1; - - up = u->_mp_d; - - bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]); - switch (base) - { - case 2: - return bits; - case 4: - return (bits + 1) / 2; - case 8: - return (bits + 2) / 3; - case 16: - return (bits + 3) / 4; - case 32: - return (bits + 4) / 5; - /* FIXME: Do something more clever for the common case of base - 10. */ - } - - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, up, un); - mpn_div_qr_1_invert (&bi, base); - - tn = un; - ndigits = 0; - do - { - ndigits++; - mpn_div_qr_1_preinv (tp, tp, tn, &bi); - tn -= (tp[tn-1] == 0); - } - while (tn > 0); - - gmp_free_limbs (tp, un); - return ndigits; -} - -char * -mpz_get_str (char *sp, int base, const mpz_t u) -{ - unsigned bits; - const char *digits; - mp_size_t un; - size_t i, sn, osn; - - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - if (base > 1) - { - if (base <= 36) - digits = "0123456789abcdefghijklmnopqrstuvwxyz"; - else if (base > 62) - return NULL; - } - else if (base >= -1) - base = 10; - else - { - base = -base; - if (base > 36) - return NULL; - } - - sn = 1 + mpz_sizeinbase (u, base); - if (!sp) - { - osn = 1 + sn; - sp = (char *) gmp_alloc (osn); - } - else - osn = 0; - un = GMP_ABS (u->_mp_size); - - if (un == 0) - { - sp[0] = '0'; - sn = 1; - goto ret; - } - - i = 0; - - if (u->_mp_size < 0) - sp[i++] = '-'; - - bits = mpn_base_power_of_two_p (base); - - if (bits) - /* Not modified in this case. */ - sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un); - else - { - struct mpn_base_info info; - mp_ptr tp; - - mpn_get_base_info (&info, base); - tp = gmp_alloc_limbs (un); - mpn_copyi (tp, u->_mp_d, un); - - sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un); - gmp_free_limbs (tp, un); - } - - for (; i < sn; i++) - sp[i] = digits[(unsigned char) sp[i]]; - -ret: - sp[sn] = '\0'; - if (osn && osn != sn + 1) - sp = (char*) gmp_realloc (sp, osn, sn + 1); - return sp; -} - -int -mpz_set_str (mpz_t r, const char *sp, int base) -{ - unsigned bits, value_of_a; - mp_size_t rn, alloc; - mp_ptr rp; - size_t dn, sn; - int sign; - unsigned char *dp; - - assert (base == 0 || (base >= 2 && base <= 62)); - - while (isspace( (unsigned char) *sp)) - sp++; - - sign = (*sp == '-'); - sp += sign; - - if (base == 0) - { - if (sp[0] == '0') - { - if (sp[1] == 'x' || sp[1] == 'X') - { - base = 16; - sp += 2; - } - else if (sp[1] == 'b' || sp[1] == 'B') - { - base = 2; - sp += 2; - } - else - base = 8; - } - else - base = 10; - } - - if (!*sp) - { - r->_mp_size = 0; - return -1; - } - sn = strlen(sp); - dp = (unsigned char *) gmp_alloc (sn); - - value_of_a = (base > 36) ? 36 : 10; - for (dn = 0; *sp; sp++) - { - unsigned digit; - - if (isspace ((unsigned char) *sp)) - continue; - else if (*sp >= '0' && *sp <= '9') - digit = *sp - '0'; - else if (*sp >= 'a' && *sp <= 'z') - digit = *sp - 'a' + value_of_a; - else if (*sp >= 'A' && *sp <= 'Z') - digit = *sp - 'A' + 10; - else - digit = base; /* fail */ - - if (digit >= (unsigned) base) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - - dp[dn++] = digit; - } - - if (!dn) - { - gmp_free (dp, sn); - r->_mp_size = 0; - return -1; - } - bits = mpn_base_power_of_two_p (base); - - if (bits > 0) - { - alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_bits (rp, dp, dn, bits); - } - else - { - struct mpn_base_info info; - mpn_get_base_info (&info, base); - alloc = (dn + info.exp - 1) / info.exp; - rp = MPZ_REALLOC (r, alloc); - rn = mpn_set_str_other (rp, dp, dn, base, &info); - /* Normalization, needed for all-zero input. */ - assert (rn > 0); - rn -= rp[rn-1] == 0; - } - assert (rn <= alloc); - gmp_free (dp, sn); - - r->_mp_size = sign ? - rn : rn; - - return 0; -} - -int -mpz_init_set_str (mpz_t r, const char *sp, int base) -{ - mpz_init (r); - return mpz_set_str (r, sp, base); -} - -size_t -mpz_out_str (FILE *stream, int base, const mpz_t x) -{ - char *str; - size_t len, n; - - str = mpz_get_str (NULL, base, x); - if (!str) - return 0; - len = strlen (str); - n = fwrite (str, 1, len, stream); - gmp_free (str, len + 1); - return n; -} - - -static int -gmp_detect_endian (void) -{ - static const int i = 2; - const unsigned char *p = (const unsigned char *) &i; - return 1 - *p; -} - -/* Import and export. Does not support nails. */ -void -mpz_import (mpz_t r, size_t count, int order, size_t size, int endian, - size_t nails, const void *src) -{ - const unsigned char *p; - ptrdiff_t word_step; - mp_ptr rp; - mp_size_t rn; - - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes already copied to this limb (starting from - the low end). */ - size_t bytes; - /* The index where the limb should be stored, when completed. */ - mp_size_t i; - - if (nails != 0) - gmp_die ("mpz_import: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) src; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t); - rp = MPZ_REALLOC (r, rn); - - for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step) - { - size_t j; - for (j = 0; j < size; j++, p -= (ptrdiff_t) endian) - { - limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT); - if (bytes == sizeof(mp_limb_t)) - { - rp[i++] = limb; - bytes = 0; - limb = 0; - } - } - } - assert (i + (bytes > 0) == rn); - if (limb != 0) - rp[i++] = limb; - else - i = mpn_normalized_size (rp, i); - - r->_mp_size = i; -} - -void * -mpz_export (void *r, size_t *countp, int order, size_t size, int endian, - size_t nails, const mpz_t u) -{ - size_t count; - mp_size_t un; - - if (nails != 0) - gmp_die ("mpz_export: Nails not supported."); - - assert (order == 1 || order == -1); - assert (endian >= -1 && endian <= 1); - assert (size > 0 || u->_mp_size == 0); - - un = u->_mp_size; - count = 0; - if (un != 0) - { - size_t k; - unsigned char *p; - ptrdiff_t word_step; - /* The current (partial) limb. */ - mp_limb_t limb; - /* The number of bytes left to do in this limb. */ - size_t bytes; - /* The index where the limb was read. */ - mp_size_t i; - - un = GMP_ABS (un); - - /* Count bytes in top limb. */ - limb = u->_mp_d[un-1]; - assert (limb != 0); - - k = (GMP_LIMB_BITS <= CHAR_BIT); - if (!k) - { - do { - int LOCAL_CHAR_BIT = CHAR_BIT; - k++; limb >>= LOCAL_CHAR_BIT; - } while (limb != 0); - } - /* else limb = 0; */ - - count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size; - - if (!r) - r = gmp_alloc (count * size); - - if (endian == 0) - endian = gmp_detect_endian (); - - p = (unsigned char *) r; - - word_step = (order != endian) ? 2 * size : 0; - - /* Process bytes from the least significant end, so point p at the - least significant word. */ - if (order == 1) - { - p += size * (count - 1); - word_step = - word_step; - } - - /* And at least significant byte of that word. */ - if (endian == 1) - p += (size - 1); - - for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step) - { - size_t j; - for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian) - { - if (sizeof (mp_limb_t) == 1) - { - if (i < un) - *p = u->_mp_d[i++]; - else - *p = 0; - } - else - { - int LOCAL_CHAR_BIT = CHAR_BIT; - if (bytes == 0) - { - if (i < un) - limb = u->_mp_d[i++]; - bytes = sizeof (mp_limb_t); - } - *p = limb; - limb >>= LOCAL_CHAR_BIT; - bytes--; - } - } - } - assert (i == un); - assert (k == count); - } - - if (countp) - *countp = count; - - return r; -} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.h deleted file mode 100644 index f28cb360ce..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mini-gmp.h +++ /dev/null @@ -1,311 +0,0 @@ -/* mini-gmp, a minimalistic implementation of a GNU GMP subset. - -Copyright 2011-2015, 2017, 2019-2021 Free Software Foundation, Inc. - -This file is part of the GNU MP Library. - -The GNU MP Library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 2 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The GNU MP Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the GNU MP Library. If not, -see https://www.gnu.org/licenses/. */ - -/* About mini-gmp: This is a minimal implementation of a subset of the - GMP interface. It is intended for inclusion into applications which - have modest bignums needs, as a fallback when the real GMP library - is not installed. - - This file defines the public interface. */ - -#ifndef __MINI_GMP_H__ -#define __MINI_GMP_H__ - -/* For size_t */ -#include - -#if defined (__cplusplus) -extern "C" { -#endif - -void mp_set_memory_functions (void *(*) (size_t), - void *(*) (void *, size_t, size_t), - void (*) (void *, size_t)); - -void mp_get_memory_functions (void *(**) (size_t), - void *(**) (void *, size_t, size_t), - void (**) (void *, size_t)); - -#ifndef MINI_GMP_LIMB_TYPE -#define MINI_GMP_LIMB_TYPE long -#endif - -typedef unsigned MINI_GMP_LIMB_TYPE mp_limb_t; -typedef long mp_size_t; -typedef unsigned long mp_bitcnt_t; - -typedef mp_limb_t *mp_ptr; -typedef const mp_limb_t *mp_srcptr; - -typedef struct -{ - int _mp_alloc; /* Number of *limbs* allocated and pointed - to by the _mp_d field. */ - int _mp_size; /* abs(_mp_size) is the number of limbs the - last field points to. If _mp_size is - negative this is a negative number. */ - mp_limb_t *_mp_d; /* Pointer to the limbs. */ -} __mpz_struct; - -typedef __mpz_struct mpz_t[1]; - -typedef __mpz_struct *mpz_ptr; -typedef const __mpz_struct *mpz_srcptr; - -extern const int mp_bits_per_limb; - -void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); -void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); -void mpn_zero (mp_ptr, mp_size_t); - -int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t); -int mpn_zero_p (mp_srcptr, mp_size_t); - -mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); - -mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); -mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); - -mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); -void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); -void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); -int mpn_perfect_square_p (mp_srcptr, mp_size_t); -mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); -mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); - -mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); -mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); - -mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t); -mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t); - -void mpn_com (mp_ptr, mp_srcptr, mp_size_t); -mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); - -mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t); - -mp_limb_t mpn_invert_3by2 (mp_limb_t, mp_limb_t); -#define mpn_invert_limb(x) mpn_invert_3by2 ((x), 0) - -size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); -mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); - -void mpz_init (mpz_t); -void mpz_init2 (mpz_t, mp_bitcnt_t); -void mpz_clear (mpz_t); - -#define mpz_odd_p(z) (((z)->_mp_size != 0) & (int) (z)->_mp_d[0]) -#define mpz_even_p(z) (! mpz_odd_p (z)) - -int mpz_sgn (const mpz_t); -int mpz_cmp_si (const mpz_t, long); -int mpz_cmp_ui (const mpz_t, unsigned long); -int mpz_cmp (const mpz_t, const mpz_t); -int mpz_cmpabs_ui (const mpz_t, unsigned long); -int mpz_cmpabs (const mpz_t, const mpz_t); -int mpz_cmp_d (const mpz_t, double); -int mpz_cmpabs_d (const mpz_t, double); - -void mpz_abs (mpz_t, const mpz_t); -void mpz_neg (mpz_t, const mpz_t); -void mpz_swap (mpz_t, mpz_t); - -void mpz_add_ui (mpz_t, const mpz_t, unsigned long); -void mpz_add (mpz_t, const mpz_t, const mpz_t); -void mpz_sub_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_sub (mpz_t, unsigned long, const mpz_t); -void mpz_sub (mpz_t, const mpz_t, const mpz_t); - -void mpz_mul_si (mpz_t, const mpz_t, long int); -void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_mul (mpz_t, const mpz_t, const mpz_t); -void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_addmul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_addmul (mpz_t, const mpz_t, const mpz_t); -void mpz_submul_ui (mpz_t, const mpz_t, unsigned long int); -void mpz_submul (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_qr (mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_q (mpz_t, const mpz_t, const mpz_t); -void mpz_cdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_fdiv_r (mpz_t, const mpz_t, const mpz_t); -void mpz_tdiv_r (mpz_t, const mpz_t, const mpz_t); - -void mpz_cdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_q_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_cdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_fdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); -void mpz_tdiv_r_2exp (mpz_t, const mpz_t, mp_bitcnt_t); - -void mpz_mod (mpz_t, const mpz_t, const mpz_t); - -void mpz_divexact (mpz_t, const mpz_t, const mpz_t); - -int mpz_divisible_p (const mpz_t, const mpz_t); -int mpz_congruent_p (const mpz_t, const mpz_t, const mpz_t); - -unsigned long mpz_cdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_qr_ui (mpz_t, mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_q_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_fdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_tdiv_r_ui (mpz_t, const mpz_t, unsigned long); -unsigned long mpz_cdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_fdiv_ui (const mpz_t, unsigned long); -unsigned long mpz_tdiv_ui (const mpz_t, unsigned long); - -unsigned long mpz_mod_ui (mpz_t, const mpz_t, unsigned long); - -void mpz_divexact_ui (mpz_t, const mpz_t, unsigned long); - -int mpz_divisible_ui_p (const mpz_t, unsigned long); - -unsigned long mpz_gcd_ui (mpz_t, const mpz_t, unsigned long); -void mpz_gcd (mpz_t, const mpz_t, const mpz_t); -void mpz_gcdext (mpz_t, mpz_t, mpz_t, const mpz_t, const mpz_t); -void mpz_lcm_ui (mpz_t, const mpz_t, unsigned long); -void mpz_lcm (mpz_t, const mpz_t, const mpz_t); -int mpz_invert (mpz_t, const mpz_t, const mpz_t); - -void mpz_sqrtrem (mpz_t, mpz_t, const mpz_t); -void mpz_sqrt (mpz_t, const mpz_t); -int mpz_perfect_square_p (const mpz_t); - -void mpz_pow_ui (mpz_t, const mpz_t, unsigned long); -void mpz_ui_pow_ui (mpz_t, unsigned long, unsigned long); -void mpz_powm (mpz_t, const mpz_t, const mpz_t, const mpz_t); -void mpz_powm_ui (mpz_t, const mpz_t, unsigned long, const mpz_t); - -void mpz_rootrem (mpz_t, mpz_t, const mpz_t, unsigned long); -int mpz_root (mpz_t, const mpz_t, unsigned long); - -void mpz_fac_ui (mpz_t, unsigned long); -void mpz_2fac_ui (mpz_t, unsigned long); -void mpz_mfac_uiui (mpz_t, unsigned long, unsigned long); -void mpz_bin_uiui (mpz_t, unsigned long, unsigned long); - -int mpz_probab_prime_p (const mpz_t, int); - -int mpz_tstbit (const mpz_t, mp_bitcnt_t); -void mpz_setbit (mpz_t, mp_bitcnt_t); -void mpz_clrbit (mpz_t, mp_bitcnt_t); -void mpz_combit (mpz_t, mp_bitcnt_t); - -void mpz_com (mpz_t, const mpz_t); -void mpz_and (mpz_t, const mpz_t, const mpz_t); -void mpz_ior (mpz_t, const mpz_t, const mpz_t); -void mpz_xor (mpz_t, const mpz_t, const mpz_t); - -mp_bitcnt_t mpz_popcount (const mpz_t); -mp_bitcnt_t mpz_hamdist (const mpz_t, const mpz_t); -mp_bitcnt_t mpz_scan0 (const mpz_t, mp_bitcnt_t); -mp_bitcnt_t mpz_scan1 (const mpz_t, mp_bitcnt_t); - -int mpz_fits_slong_p (const mpz_t); -int mpz_fits_ulong_p (const mpz_t); -int mpz_fits_sint_p (const mpz_t); -int mpz_fits_uint_p (const mpz_t); -int mpz_fits_sshort_p (const mpz_t); -int mpz_fits_ushort_p (const mpz_t); -long int mpz_get_si (const mpz_t); -unsigned long int mpz_get_ui (const mpz_t); -double mpz_get_d (const mpz_t); -size_t mpz_size (const mpz_t); -mp_limb_t mpz_getlimbn (const mpz_t, mp_size_t); - -void mpz_realloc2 (mpz_t, mp_bitcnt_t); -mp_srcptr mpz_limbs_read (mpz_srcptr); -mp_ptr mpz_limbs_modify (mpz_t, mp_size_t); -mp_ptr mpz_limbs_write (mpz_t, mp_size_t); -void mpz_limbs_finish (mpz_t, mp_size_t); -mpz_srcptr mpz_roinit_n (mpz_t, mp_srcptr, mp_size_t); - -#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} - -void mpz_set_si (mpz_t, signed long int); -void mpz_set_ui (mpz_t, unsigned long int); -void mpz_set (mpz_t, const mpz_t); -void mpz_set_d (mpz_t, double); - -void mpz_init_set_si (mpz_t, signed long int); -void mpz_init_set_ui (mpz_t, unsigned long int); -void mpz_init_set (mpz_t, const mpz_t); -void mpz_init_set_d (mpz_t, double); - -size_t mpz_sizeinbase (const mpz_t, int); -char *mpz_get_str (char *, int, const mpz_t); -int mpz_set_str (mpz_t, const char *, int); -int mpz_init_set_str (mpz_t, const char *, int); - -/* This long list taken from gmp.h. */ -/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, - defines EOF but not FILE. */ -#if defined (FILE) \ - || defined (H_STDIO) \ - || defined (_H_STDIO) /* AIX */ \ - || defined (_STDIO_H) /* glibc, Sun, SCO */ \ - || defined (_STDIO_H_) /* BSD, OSF */ \ - || defined (__STDIO_H) /* Borland */ \ - || defined (__STDIO_H__) /* IRIX */ \ - || defined (_STDIO_INCLUDED) /* HPUX */ \ - || defined (__dj_include_stdio_h_) /* DJGPP */ \ - || defined (_FILE_DEFINED) /* Microsoft */ \ - || defined (__STDIO__) /* Apple MPW MrC */ \ - || defined (_MSL_STDIO_H) /* Metrowerks */ \ - || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ - || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ - || defined (__STDIO_LOADED) /* VMS */ \ - || defined (_STDIO) /* HPE NonStop */ \ - || defined (__DEFINED_FILE) /* musl */ -size_t mpz_out_str (FILE *, int, const mpz_t); -#endif - -void mpz_import (mpz_t, size_t, int, size_t, int, size_t, const void *); -void *mpz_export (void *, size_t *, int, size_t, int, size_t, const mpz_t); - -#if defined (__cplusplus) -} -#endif -#endif /* __MINI_GMP_H__ */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h index bbfe72c13b..54e90326be 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign_namespace.h @@ -18,6 +18,12 @@ #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) +#ifndef DISABLE_NAMESPACING +#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) +#else +#define SQISIGN_NAMESPACE_GENERIC(s) s +#endif + #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) #if defined(SQISIGN_BUILD_TYPE_REF) #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) @@ -94,6 +100,16 @@ #define lift_basis SQISIGN_NAMESPACE(lift_basis) #define lift_basis_normalized SQISIGN_NAMESPACE(lift_basis_normalized) +// Namespacing symbols exported from basis.c, ec.c: +#undef xDBL_E0 + +#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) + +// Namespacing symbols exported from basis.c, ec.c, isog_chains.c: +#undef xDBL_A24 + +#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) + // Namespacing symbols exported from biextension.c: #undef clear_cofac #undef ec_dlog_2_tate @@ -109,6 +125,11 @@ #define reduced_tate SQISIGN_NAMESPACE(reduced_tate) #define weil SQISIGN_NAMESPACE(weil) +// Namespacing symbols exported from biextension.c, ec_jac.c, hd.c: +#undef ADD + +#define ADD SQISIGN_NAMESPACE(ADD) + // Namespacing symbols exported from common.c: #undef hash_to_challenge #undef public_key_finalize @@ -148,6 +169,28 @@ #define find_uv SQISIGN_NAMESPACE(find_uv) #define fixed_degree_isogeny_and_eval SQISIGN_NAMESPACE(fixed_degree_isogeny_and_eval) +// Namespacing symbols exported from dim2id2iso.c, encode_signature.c, id2iso.c, keygen.c, quaternion_data.c, sign.c: +#undef EXTREMAL_ORDERS +#undef QUATALG_PINFTY + +#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) +#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) + +// Namespacing symbols exported from dim2id2iso.c, endomorphism_action.c, id2iso.c: +#undef CURVES_WITH_ENDOMORPHISMS + +#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) + +// Namespacing symbols exported from dim2id2iso.c, id2iso.c, sign.c, torsion_constants.c: +#undef TORSION_PLUS_2POWER + +#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) + +// Namespacing symbols exported from dim2id2iso.c, quaternion_data.c: +#undef CONNECTING_IDEALS + +#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) + // Namespacing symbols exported from dim4.c: #undef ibz_inv_dim4_make_coeff_mpm #undef ibz_inv_dim4_make_coeff_pmp @@ -207,6 +250,13 @@ #define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) #define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) +// Namespacing symbols exported from e0_basis.c: +#undef BASIS_E0_PX +#undef BASIS_E0_QX + +#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) +#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) + // Namespacing symbols exported from ec.c: #undef cswap_points #undef ec_biscalar_mul @@ -235,8 +285,6 @@ #undef xDBL #undef xDBLADD #undef xDBLMUL -#undef xDBL_A24 -#undef xDBL_E0 #undef xMUL #define cswap_points SQISIGN_NAMESPACE(cswap_points) @@ -266,14 +314,9 @@ #define xDBL SQISIGN_NAMESPACE(xDBL) #define xDBLADD SQISIGN_NAMESPACE(xDBLADD) #define xDBLMUL SQISIGN_NAMESPACE(xDBLMUL) -#define xDBL_A24 SQISIGN_NAMESPACE(xDBL_A24) -#define xDBL_E0 SQISIGN_NAMESPACE(xDBL_E0) #define xMUL SQISIGN_NAMESPACE(xMUL) // Namespacing symbols exported from ec_jac.c: -#undef ADD -#undef DBL -#undef DBLW #undef copy_jac_point #undef jac_from_ws #undef jac_init @@ -284,9 +327,6 @@ #undef jac_to_xz_add_components #undef select_jac_point -#define ADD SQISIGN_NAMESPACE(ADD) -#define DBL SQISIGN_NAMESPACE(DBL) -#define DBLW SQISIGN_NAMESPACE(DBLW) #define copy_jac_point SQISIGN_NAMESPACE(copy_jac_point) #define jac_from_ws SQISIGN_NAMESPACE(jac_from_ws) #define jac_init SQISIGN_NAMESPACE(jac_init) @@ -297,6 +337,21 @@ #define jac_to_xz_add_components SQISIGN_NAMESPACE(jac_to_xz_add_components) #define select_jac_point SQISIGN_NAMESPACE(select_jac_point) +// Namespacing symbols exported from ec_jac.c, hd.c: +#undef DBLW + +#define DBLW SQISIGN_NAMESPACE(DBLW) + +// Namespacing symbols exported from ec_jac.c, hd.c, theta_isogenies.c: +#undef DBL + +#define DBL SQISIGN_NAMESPACE(DBL) + +// Namespacing symbols exported from ec_params.c: +#undef p_cofactor_for_2f + +#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) + // Namespacing symbols exported from encode_signature.c: #undef secret_key_from_bytes #undef secret_key_to_bytes @@ -455,21 +510,24 @@ #define fp_set_one SQISIGN_NAMESPACE(fp_set_one) #define fp_set_small SQISIGN_NAMESPACE(fp_set_small) #define fp_set_zero SQISIGN_NAMESPACE(fp_set_zero) -#define ONE SQISIGN_NAMESPACE(ONE) -#define ZERO SQISIGN_NAMESPACE(ZERO) // Namespacing symbols exported from fp_p27500_64.c, fp_p5248_64.c, fp_p65376_64.c, gf27500.c, gf5248.c, gf65376.c: +#undef ONE +#undef ZERO #undef fp_add #undef fp_mul #undef fp_sqr #undef fp_sub +#define ONE SQISIGN_NAMESPACE(ONE) +#define ZERO SQISIGN_NAMESPACE(ZERO) #define fp_add SQISIGN_NAMESPACE(fp_add) #define fp_mul SQISIGN_NAMESPACE(fp_mul) #define fp_sqr SQISIGN_NAMESPACE(fp_sqr) #define fp_sub SQISIGN_NAMESPACE(fp_sub) // Namespacing symbols exported from gf27500.c: +#undef gf27500_MINUS_ONE #undef gf27500_decode #undef gf27500_decode_reduce #undef gf27500_div @@ -479,6 +537,7 @@ #undef gf27500_legendre #undef gf27500_sqrt +#define gf27500_MINUS_ONE SQISIGN_NAMESPACE(gf27500_MINUS_ONE) #define gf27500_decode SQISIGN_NAMESPACE(gf27500_decode) #define gf27500_decode_reduce SQISIGN_NAMESPACE(gf27500_decode_reduce) #define gf27500_div SQISIGN_NAMESPACE(gf27500_div) @@ -500,6 +559,7 @@ #define fp2_sq_c1 SQISIGN_NAMESPACE(fp2_sq_c1) // Namespacing symbols exported from gf5248.c: +#undef gf5248_MINUS_ONE #undef gf5248_decode #undef gf5248_decode_reduce #undef gf5248_div @@ -509,6 +569,7 @@ #undef gf5248_legendre #undef gf5248_sqrt +#define gf5248_MINUS_ONE SQISIGN_NAMESPACE(gf5248_MINUS_ONE) #define gf5248_decode SQISIGN_NAMESPACE(gf5248_decode) #define gf5248_decode_reduce SQISIGN_NAMESPACE(gf5248_decode_reduce) #define gf5248_div SQISIGN_NAMESPACE(gf5248_div) @@ -519,6 +580,7 @@ #define gf5248_sqrt SQISIGN_NAMESPACE(gf5248_sqrt) // Namespacing symbols exported from gf65376.c: +#undef gf65376_MINUS_ONE #undef gf65376_decode #undef gf65376_decode_reduce #undef gf65376_div @@ -528,6 +590,7 @@ #undef gf65376_legendre #undef gf65376_sqrt +#define gf65376_MINUS_ONE SQISIGN_NAMESPACE(gf65376_MINUS_ONE) #define gf65376_decode SQISIGN_NAMESPACE(gf65376_decode) #define gf65376_decode_reduce SQISIGN_NAMESPACE(gf65376_decode_reduce) #define gf65376_div SQISIGN_NAMESPACE(gf65376_div) @@ -554,6 +617,22 @@ #define double_couple_point SQISIGN_NAMESPACE(double_couple_point) #define double_couple_point_iter SQISIGN_NAMESPACE(double_couple_point_iter) +// Namespacing symbols exported from hd_splitting_transforms.c: +#undef CHI_EVAL + +#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) + +// Namespacing symbols exported from hd_splitting_transforms.c, theta_isogenies.c: +#undef EVEN_INDEX +#undef FP2_CONSTANTS +#undef NORMALIZATION_TRANSFORMS +#undef SPLITTING_TRANSFORMS + +#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) +#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) +#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) +#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) + // Namespacing symbols exported from hnf.c: #undef ibz_mat_4x4_is_hnf #undef ibz_mat_4xn_hnf_mod_core @@ -761,6 +840,11 @@ #define secret_key_finalize SQISIGN_NAMESPACE(secret_key_finalize) #define secret_key_init SQISIGN_NAMESPACE(secret_key_init) +// Namespacing symbols exported from keygen.c, torsion_constants.c: +#undef SEC_DEGREE + +#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) + // Namespacing symbols exported from l2.c: #undef quat_lattice_lll #undef quat_lll_core @@ -910,6 +994,16 @@ #define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) #define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) +// Namespacing symbols exported from quaternion_data.c: +#undef CONJUGATING_ELEMENTS + +#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) + +// Namespacing symbols exported from quaternion_data.c, sign.c: +#undef QUAT_prime_cofactor + +#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) + // Namespacing symbols exported from random_input_generation.c: #undef quat_test_input_random_ideal_generation #undef quat_test_input_random_ideal_lattice_generation @@ -971,6 +1065,11 @@ #define protocols_sign SQISIGN_NAMESPACE(protocols_sign) +// Namespacing symbols exported from sign.c, torsion_constants.c: +#undef COM_DEGREE + +#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) + // Namespacing symbols exported from sqisign.c: #undef sqisign_keypair #undef sqisign_open @@ -1006,6 +1105,11 @@ #define is_product_theta_point SQISIGN_NAMESPACE(is_product_theta_point) #define theta_precomputation SQISIGN_NAMESPACE(theta_precomputation) +// Namespacing symbols exported from torsion_constants.c: +#undef TWO_TO_SECURITY_BITS + +#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) + // Namespacing symbols exported from verify.c: #undef protocols_verify @@ -1029,45 +1133,7 @@ #define xisog_2_singular SQISIGN_NAMESPACE(xisog_2_singular) #define xisog_4 SQISIGN_NAMESPACE(xisog_4) -// Namespacing symbols from precomp: -#undef BASIS_E0_PX -#undef BASIS_E0_QX -#undef p_cofactor_for_2f -#undef CURVES_WITH_ENDOMORPHISMS -#undef EVEN_INDEX -#undef CHI_EVAL -#undef FP2_CONSTANTS -#undef SPLITTING_TRANSFORMS -#undef NORMALIZATION_TRANSFORMS -#undef QUAT_prime_cofactor -#undef QUATALG_PINFTY -#undef EXTREMAL_ORDERS -#undef CONNECTING_IDEALS -#undef CONJUGATING_ELEMENTS -#undef TWO_TO_SECURITY_BITS -#undef TORSION_PLUS_2POWER -#undef SEC_DEGREE -#undef COM_DEGREE - -#define BASIS_E0_PX SQISIGN_NAMESPACE(BASIS_E0_PX) -#define BASIS_E0_QX SQISIGN_NAMESPACE(BASIS_E0_QX) -#define p_cofactor_for_2f SQISIGN_NAMESPACE(p_cofactor_for_2f) -#define CURVES_WITH_ENDOMORPHISMS SQISIGN_NAMESPACE(CURVES_WITH_ENDOMORPHISMS) -#define EVEN_INDEX SQISIGN_NAMESPACE(EVEN_INDEX) -#define CHI_EVAL SQISIGN_NAMESPACE(CHI_EVAL) -#define FP2_CONSTANTS SQISIGN_NAMESPACE(FP2_CONSTANTS) -#define SPLITTING_TRANSFORMS SQISIGN_NAMESPACE(SPLITTING_TRANSFORMS) -#define NORMALIZATION_TRANSFORMS SQISIGN_NAMESPACE(NORMALIZATION_TRANSFORMS) -#define QUAT_prime_cofactor SQISIGN_NAMESPACE(QUAT_prime_cofactor) -#define QUATALG_PINFTY SQISIGN_NAMESPACE(QUATALG_PINFTY) -#define EXTREMAL_ORDERS SQISIGN_NAMESPACE(EXTREMAL_ORDERS) -#define CONNECTING_IDEALS SQISIGN_NAMESPACE(CONNECTING_IDEALS) -#define CONJUGATING_ELEMENTS SQISIGN_NAMESPACE(CONJUGATING_ELEMENTS) -#define TWO_TO_SECURITY_BITS SQISIGN_NAMESPACE(TWO_TO_SECURITY_BITS) -#define TORSION_PLUS_2POWER SQISIGN_NAMESPACE(TORSION_PLUS_2POWER) -#define SEC_DEGREE SQISIGN_NAMESPACE(SEC_DEGREE) -#define COM_DEGREE SQISIGN_NAMESPACE(COM_DEGREE) - #endif +// This file is generated by scripts/Namespace.scala, do not edit it manually! From 6bde07d535537737421d2d4eafb8038b58b48d7b Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Fri, 15 Aug 2025 11:32:27 +0200 Subject: [PATCH 06/19] pull Signed-off-by: Basil Hess --- .CMake/alg_support.cmake | 2 +- docs/algorithms/sig/sqisign.md | 2 +- docs/algorithms/sig/sqisign.yml | 2 +- .../copy_from_upstream/copy_from_upstream.yml | 2 +- .../the-sqisign_sqisign_lvl1_broadwell/dpe.h | 743 ------------------ .../the-sqisign_sqisign_lvl1_broadwell/l2.c | 271 +++++-- .../the-sqisign_sqisign_lvl1_ref/COPYING.LGPL | 165 ---- .../the-sqisign_sqisign_lvl1_ref/dpe.h | 743 ------------------ .../sqisign/the-sqisign_sqisign_lvl1_ref/l2.c | 271 +++++-- .../COPYING.LGPL | 165 ---- .../the-sqisign_sqisign_lvl3_broadwell/dpe.h | 743 ------------------ .../the-sqisign_sqisign_lvl3_broadwell/l2.c | 271 +++++-- .../the-sqisign_sqisign_lvl3_broadwell/mem.h | 24 - .../the-sqisign_sqisign_lvl3_ref/COPYING.LGPL | 165 ---- .../the-sqisign_sqisign_lvl3_ref/dpe.h | 743 ------------------ .../sqisign/the-sqisign_sqisign_lvl3_ref/l2.c | 271 +++++-- .../COPYING.LGPL | 165 ---- .../the-sqisign_sqisign_lvl5_broadwell/dpe.h | 743 ------------------ .../the-sqisign_sqisign_lvl5_broadwell/l2.c | 271 +++++-- .../the-sqisign_sqisign_lvl5_ref/COPYING.LGPL | 165 ---- .../the-sqisign_sqisign_lvl5_ref/dpe.h | 743 ------------------ .../sqisign/the-sqisign_sqisign_lvl5_ref/l2.c | 271 +++++-- 22 files changed, 1264 insertions(+), 5677 deletions(-) delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dpe.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/COPYING.LGPL delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dpe.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/COPYING.LGPL delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dpe.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/COPYING.LGPL delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dpe.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/COPYING.LGPL delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dpe.h delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/COPYING.LGPL delete mode 100644 src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dpe.h diff --git a/.CMake/alg_support.cmake b/.CMake/alg_support.cmake index a1472d3324..3fe002f406 100644 --- a/.CMake/alg_support.cmake +++ b/.CMake/alg_support.cmake @@ -256,7 +256,7 @@ elseif (${OQS_ALGS_ENABLED} STREQUAL "STD") elseif(${OQS_ALGS_ENABLED} STREQUAL "NIST_R4") filter_algs("KEM_classic_mceliece_348864;KEM_classic_mceliece_348864f;KEM_classic_mceliece_460896;KEM_classic_mceliece_460896f;KEM_classic_mceliece_6688128;KEM_classic_mceliece_6688128f;KEM_classic_mceliece_6960119;KEM_classic_mceliece_6960119f;KEM_classic_mceliece_8192128;KEM_classic_mceliece_8192128f;KEM_hqc_128;KEM_hqc_192;KEM_hqc_256;KEM_bike_l1;KEM_bike_l3;KEM_bike_l5") elseif(${OQS_ALGS_ENABLED} STREQUAL "NIST_SIG_ONRAMP") - filter_algs("SIG_mayo_1;SIG_mayo_2;SIG_mayo_3;SIG_mayo_5;SIG_cross_rsdp_128_balanced;SIG_cross_rsdp_128_fast;SIG_cross_rsdp_128_small;SIG_cross_rsdp_192_balanced;SIG_cross_rsdp_192_fast;SIG_cross_rsdp_192_small;SIG_cross_rsdp_256_balanced;SIG_cross_rsdp_256_fast;SIG_cross_rsdp_256_small;SIG_cross_rsdpg_128_balanced;SIG_cross_rsdpg_128_fast;SIG_cross_rsdpg_128_small;SIG_cross_rsdpg_192_balanced;SIG_cross_rsdpg_192_fast;SIG_cross_rsdpg_192_small;SIG_cross_rsdpg_256_balanced;SIG_cross_rsdpg_256_fast;SIG_cross_rsdpg_256_small;SIG_uov_ov_Ip;SIG_uov_ov_Is;SIG_uov_ov_III;SIG_uov_ov_V;SIG_uov_ov_Ip_pkc;SIG_uov_ov_Is_pkc;SIG_uov_ov_III_pkc;SIG_uov_ov_V_pkc;SIG_uov_ov_Ip_pkc_skc;SIG_uov_ov_Is_pkc_skc;SIG_uov_ov_III_pkc_skc;SIG_uov_ov_V_pkc_skc;SNOVA_24_5_4;SNOVA_24_5_4_SHAKE;SNOVA_24_5_4_esk;SNOVA_24_5_4_SHAKE_esk;SNOVA_37_17_2;SNOVA_25_8_3;SNOVA_56_25_2;SNOVA_49_11_3;SNOVA_37_8_4;SNOVA_24_5_5;SNOVA_60_10_4;SNOVA_29_6_5") + filter_algs("SIG_mayo_1;SIG_mayo_2;SIG_mayo_3;SIG_mayo_5;SIG_cross_rsdp_128_balanced;SIG_cross_rsdp_128_fast;SIG_cross_rsdp_128_small;SIG_cross_rsdp_192_balanced;SIG_cross_rsdp_192_fast;SIG_cross_rsdp_192_small;SIG_cross_rsdp_256_balanced;SIG_cross_rsdp_256_fast;SIG_cross_rsdp_256_small;SIG_cross_rsdpg_128_balanced;SIG_cross_rsdpg_128_fast;SIG_cross_rsdpg_128_small;SIG_cross_rsdpg_192_balanced;SIG_cross_rsdpg_192_fast;SIG_cross_rsdpg_192_small;SIG_cross_rsdpg_256_balanced;SIG_cross_rsdpg_256_fast;SIG_cross_rsdpg_256_small;SIG_uov_ov_Ip;SIG_uov_ov_Is;SIG_uov_ov_III;SIG_uov_ov_V;SIG_uov_ov_Ip_pkc;SIG_uov_ov_Is_pkc;SIG_uov_ov_III_pkc;SIG_uov_ov_V_pkc;SIG_uov_ov_Ip_pkc_skc;SIG_uov_ov_Is_pkc_skc;SIG_uov_ov_III_pkc_skc;SIG_uov_ov_V_pkc_skc;SNOVA_24_5_4;SNOVA_24_5_4_SHAKE;SNOVA_24_5_4_esk;SNOVA_24_5_4_SHAKE_esk;SNOVA_37_17_2;SNOVA_25_8_3;SNOVA_56_25_2;SNOVA_49_11_3;SNOVA_37_8_4;SNOVA_24_5_5;SNOVA_60_10_4;SNOVA_29_6_5;SIG_sqisign_lvl1;SIG_sqisign_lvl3;SIG_sqisign_lvl5") else() message(STATUS "Alg enablement unchanged") endif() diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md index df6d95d2b6..3a5b213015 100644 --- a/docs/algorithms/sig/sqisign.md +++ b/docs/algorithms/sig/sqisign.md @@ -6,7 +6,7 @@ - **Authors' website**: https://sqisign.org/ - **Specification version**: Round 2. - **Primary Source**: - - **Source**: https://github.com/bhess/the-sqisign/commit/39b09acd532c69e3fb1206b4502572479288df92 + - **Source**: https://github.com/bhess/the-sqisign/commit/323648fa9c28c69b3f24d3cc22986530ffe8e8d7 - **Implementation license (SPDX-Identifier)**: Apache-2.0 diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml index 541018a944..0dc5498412 100644 --- a/docs/algorithms/sig/sqisign.yml +++ b/docs/algorithms/sig/sqisign.yml @@ -36,7 +36,7 @@ website: https://sqisign.org/ nist-round: 2 spec-version: Round 2 primary-upstream: - source: https://github.com/bhess/the-sqisign/commit/39b09acd532c69e3fb1206b4502572479288df92 + source: https://github.com/bhess/the-sqisign/commit/323648fa9c28c69b3f24d3cc22986530ffe8e8d7 spdx-license-identifier: Apache-2.0 parameter-sets: - name: SQIsign-lvl1 diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index 0e49664ddb..f378ffc6a7 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -96,7 +96,7 @@ upstreams: name: the-sqisign git_url: https://github.com/bhess/the-sqisign.git git_branch: oqs - git_commit: 39b09acd532c69e3fb1206b4502572479288df92 + git_commit: 323648fa9c28c69b3f24d3cc22986530ffe8e8d7 sig_scheme_path: '.' sig_meta_path: 'integration/liboqs/{pqclean_scheme}.yml' diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dpe.h deleted file mode 100644 index b9a7a35e0b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dpe.h +++ /dev/null @@ -1,743 +0,0 @@ -/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. - -This file is part of the DPE Library. - -The DPE Library is free software; you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation; either version 3 of the License, or (at your -option) any later version. - -The DPE Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public -License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with the DPE Library; see the file COPYING.LIB. -If not, see . */ - -#ifndef __DPE -#define __DPE - -#include /* For abort */ -#include /* For fprintf */ -#include /* for round, floor, ceil */ -#include - -/* if you change the version, please change it in Makefile too */ -#define DPE_VERSION_MAJOR 1 -#define DPE_VERSION_MINOR 7 - -#if defined(__GNUC__) && (__GNUC__ >= 3) -# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) -# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) -# define DPE_UNUSED_ATTR __attribute__((unused)) -#else -# define DPE_LIKELY(x) (x) -# define DPE_UNLIKELY(x) (x) -# define DPE_UNUSED_ATTR -#endif - -/* If no user defined mode, define it to double */ -#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) -# define DPE_USE_DOUBLE -#endif - -#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) -# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." -#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#endif - -#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) -# define DPE_LITTLEENDIAN32 -#endif - -#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) -# define DPE_DEFINE_ROUND_TRUNC -#endif - -#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 -# define DPE_ISFINITE __builtin_isfinite -#elif defined(isfinite) -# define DPE_ISFINITE isfinite /* new C99 function */ -#else -# define DPE_ISFINITE finite /* obsolete BSD function */ -#endif - -/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ -/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with - 1/2 <= m < 1 */ -/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ -#if defined(DPE_USE_DOUBLE) -# define DPE_DOUBLE double /* mantissa type */ -# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ -# define DPE_2_POW_BITSIZE 0x1P53 -# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 -# define DPE_LDEXP __builtin_ldexp -# define DPE_FREXP __builtin_frexp -# define DPE_FLOOR __builtin_floor -# define DPE_CEIL __builtin_ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND __builtin_round -# define DPE_TRUNC __builtin_trunc -# endif -# else -# define DPE_LDEXP ldexp -# define DPE_FREXP frexp -# define DPE_FLOOR floor -# define DPE_CEIL ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND round -# define DPE_TRUNC trunc -# endif -# endif - -#elif defined(DPE_USE_LONGDOUBLE) -# define DPE_DOUBLE long double -# define DPE_BITSIZE 64 -# define DPE_2_POW_BITSIZE 0x1P64 -# define DPE_LDEXP ldexpl -# define DPE_FREXP frexpl -# define DPE_FLOOR floorl -# define DPE_CEIL ceill -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundl -# define DPE_TRUNC truncl -# endif - -#elif defined(DPE_USE_FLOAT128) -# include "quadmath.h" -# define DPE_DOUBLE __float128 -# define DPE_BITSIZE 113 -# define DPE_2_POW_BITSIZE 0x1P113 -# define DPE_LDEXP ldexpq -# define DPE_FLOOR floorq -# define DPE_CEIL ceilq -# define DPE_FREXP frexpq -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundq -# define DPE_TRUNC truncq -# endif - -#else -# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" -#endif - -/* If no C99, do what we can */ -#ifndef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) -# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) -#endif - -#if defined(DPE_USE_LONG) -# define DPE_EXP_T long /* exponent type */ -# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ -#elif defined(DPE_USE_LONGLONG) -# define DPE_EXP_T long long -# define DPE_EXPMIN LLONG_MIN -#else -# define DPE_EXP_T int /* exponent type */ -# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ -#endif - -#ifdef DPE_LITTLEENDIAN32 -typedef union -{ - double d; -#if INT_MAX == 0x7FFFFFFFL - int i[2]; -#elif LONG_MAX == 0x7FFFFFFFL - long i[2]; -#elif SHRT_MAX == 0x7FFFFFFFL - short i[2]; -#else -# error Cannot find a 32 bits integer type. -#endif -} dpe_double_words; -#endif - -typedef struct -{ - DPE_DOUBLE d; /* significand */ - DPE_EXP_T exp; /* exponent */ -} dpe_struct; - -typedef dpe_struct dpe_t[1]; - -#define DPE_MANT(x) ((x)->d) -#define DPE_EXP(x) ((x)->exp) -#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) - -#define DPE_INLINE static inline - -/* initialize */ -DPE_INLINE void -dpe_init (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* clear */ -DPE_INLINE void -dpe_clear (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* set x to y */ -DPE_INLINE void -dpe_set (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to -y */ -DPE_INLINE void -dpe_neg (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to |y| */ -DPE_INLINE void -dpe_abs (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ -/* FIXME: don't inline this function yet ? */ -static void -dpe_normalize (dpe_t x) -{ - if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) - { - if (DPE_MANT(x) == 0.0) - DPE_EXP(x) = DPE_EXPMIN; - /* otherwise let the exponent of NaN, Inf unchanged */ - } - else - { - DPE_EXP_T e; -#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ - dpe_double_words dw; - dw.d = DPE_MANT(x); - e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ - DPE_EXP(x) += e - 1022; - dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; - DPE_MANT(x) = dw.d; -#else /* portable code */ - double m = DPE_MANT(x); - DPE_MANT(x) = DPE_FREXP (m, &e); - DPE_EXP(x) += e; -#endif - } -} - -#if defined(DPE_USE_DOUBLE) -static const double dpe_scale_tab[54] = { - 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, - 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, - 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, - 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, - 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, - 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, - 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; -#endif - -DPE_INLINE DPE_DOUBLE -dpe_scale (DPE_DOUBLE d, int s) -{ - /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ -#if defined(DPE_USE_DOUBLE) - return d * dpe_scale_tab [-s]; -#else /* portable code */ - return DPE_LDEXP (d, s); -#endif -} - -/* set x to y */ -DPE_INLINE void -dpe_set_d (dpe_t x, double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ld (dpe_t x, long double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ui (dpe_t x, unsigned long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_si (dpe_t x, long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -DPE_INLINE long -dpe_get_si (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (long) d; -} - -DPE_INLINE unsigned long -dpe_get_ui (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (d < 0.0) ? 0 : (unsigned long) d; -} - -DPE_INLINE double -dpe_get_d (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -DPE_INLINE long double -dpe_get_ld (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -#if defined(__GMP_H__) || defined(__MINI_GMP_H__) -/* set x to y */ -DPE_INLINE void -dpe_set_z (dpe_t x, mpz_t y) -{ - long e; - DPE_MANT(x) = mpz_get_d_2exp (&e, y); - DPE_EXP(x) = (DPE_EXP_T) e; -} - -/* set x to y, rounded to nearest */ -DPE_INLINE void -dpe_get_z (mpz_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey >= DPE_BITSIZE) /* y is an integer */ - { - DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ - mpz_set_d (x, d); /* should be exact */ - mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); - } - else /* DPE_EXP(y) < DPE_BITSIZE */ - { - if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ - mpz_set_ui (x, 0); - else - { - DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); - mpz_set_d (x, (double) DPE_ROUND(d)); - } - } -} - -/* return e and x such that y = x*2^e */ -DPE_INLINE mp_exp_t -dpe_get_z_exp (mpz_t x, dpe_t y) -{ - mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); - return DPE_EXP(y) - DPE_BITSIZE; -} -#endif - -/* x <- y + z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_add (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y+z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_set (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y - z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_sub (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y-z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_neg (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y * z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_mul (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- sqrt(y), assuming y is normalized, returns x normalized */ -DPE_INLINE void -dpe_sqrt (dpe_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey % 2) - { - /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ - DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); - DPE_EXP(x) = (ey + 1) / 2; - } - else - { - DPE_MANT(x) = sqrt (DPE_MANT(y)); - DPE_EXP(x) = ey / 2; - } -} - -/* x <- y / z, assuming y and z are normalized, returns x normalized. - Assumes z is not zero. */ -DPE_INLINE void -dpe_div (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- y * z, assuming y normalized, returns x normalized */ -DPE_INLINE void -dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ -DPE_INLINE void -dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y * 2^e */ -DPE_INLINE void -dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; -} - -/* x <- y / 2^e */ -DPE_INLINE void -dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; -} - -/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' - type has fewer bits than the significand in dpe_t) */ -DPE_INLINE DPE_EXP_T -dpe_get_si_exp (long *x, dpe_t y) -{ - if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ - { - *x = (long) (DPE_MANT(y) * 2147483648.0); - return DPE_EXP(y) - 31; - } - else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ - { - *x = (long) (DPE_MANT (y) * 9223372036854775808.0); - return DPE_EXP(y) - 63; - } - else - { - fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); - exit (1); - } -} - -static DPE_UNUSED_ATTR int dpe_str_prec = 16; -static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; - -static int -dpe_out_str (FILE *s, int base, dpe_t x) -{ - DPE_DOUBLE d = DPE_MANT(x); - DPE_EXP_T e2 = DPE_EXP(x); - int e10 = 0; - char sign = ' '; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } - if (d == 0.0) -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%1.*f", dpe_str_prec, d); -#else - return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); -#endif - if (d < 0) - { - d = -d; - sign = '-'; - } - if (e2 > 0) - { - while (e2 > 0) - { - e2 --; - d *= 2.0; - if (d >= 10.0) - { - d /= 10.0; - e10 ++; - } - } - } - else /* e2 <= 0 */ - { - while (e2 < 0) - { - e2 ++; - d /= 2.0; - if (d < 1.0) - { - d *= 10.0; - e10 --; - } - } - } -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); -#else - return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); -#endif -} - -static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; - -static size_t -dpe_inp_str (dpe_t x, FILE *s, int base) -{ - size_t res; - DPE_DOUBLE d; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } -#ifdef DPE_USE_DOUBLE - res = fscanf (s, "%lf", &d); -#elif defined(DPE_USE_LONGDOUBLE) - res = fscanf (s, "%Lf", &d); -#else - { - long double d_ld; - res = fscanf (s, "%Lf", &d_ld); - d = d_ld; - } -#endif - dpe_set_d (x, d); - return res; -} - -DPE_INLINE void -dpe_dump (dpe_t x) -{ - dpe_out_str (stdout, 10, x); - putchar ('\n'); -} - -DPE_INLINE int -dpe_zero_p (dpe_t x) -{ - return DPE_MANT (x) == 0; -} - -/* return a positive value if x > y - a negative value if x < y - and 0 otherwise (x=y). */ -DPE_INLINE int -dpe_cmp (dpe_t x, dpe_t y) -{ - int sx = DPE_SIGN(x); - int d = sx - DPE_SIGN(y); - - if (d != 0) - return d; - else if (DPE_EXP(x) > DPE_EXP(y)) - return (sx > 0) ? 1 : -1; - else if (DPE_EXP(y) > DPE_EXP(x)) - return (sx > 0) ? -1 : 1; - else /* DPE_EXP(x) = DPE_EXP(y) */ - return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); -} - -DPE_INLINE int -dpe_cmp_d (dpe_t x, double d) -{ - dpe_t y; - dpe_set_d (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_ui (dpe_t x, unsigned long d) -{ - dpe_t y; - dpe_set_ui (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_si (dpe_t x, long d) -{ - dpe_t y; - dpe_set_si (y, d); - return dpe_cmp (x, y); -} - -/* set x to integer nearest to y */ -DPE_INLINE void -dpe_round (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) < 0) /* |y| < 1/2 */ - dpe_set_ui (x, 0); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_ROUND(d)); - } -} - -/* set x to the fractional part of y, defined as y - trunc(y), thus the - fractional part has absolute value in [0, 1), and same sign as y */ -DPE_INLINE void -dpe_frac (dpe_t x, dpe_t y) -{ - /* If |y| is smaller than 1, keep it */ - if (DPE_EXP(y) <= 0) - dpe_set (x, y); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set_ui (x, 0); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, d - DPE_TRUNC(d)); - } -} - -/* set x to largest integer <= y */ -DPE_INLINE void -dpe_floor (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ - dpe_set_ui (x, 0); - else /* -1 < y < 0 */ - dpe_set_si (x, -1); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_FLOOR(d)); - } -} - -/* set x to smallest integer >= y */ -DPE_INLINE void -dpe_ceil (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ - dpe_set_ui (x, 1); - else /* -1 < y <= 0 */ - dpe_set_si (x, 0); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_CEIL(d)); - } -} - -DPE_INLINE void -dpe_swap (dpe_t x, dpe_t y) -{ - DPE_EXP_T i = DPE_EXP (x); - DPE_DOUBLE d = DPE_MANT (x); - DPE_EXP (x) = DPE_EXP (y); - DPE_MANT (x) = DPE_MANT (y); - DPE_EXP (y) = i; - DPE_MANT (y) = d; -} - -#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c index 8c49b21d20..5491ee44d0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c @@ -2,47 +2,208 @@ #include "lll_internals.h" #include "internal.h" -#include "dpe.h" +#include +#include + // Access entry of symmetric matrix #define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) -void -quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +typedef struct fp_num { + double s; + int e; +} fp_num; + +static void +copy(fp_num *x, fp_num *r) { - dpe_t dpe_const_one, dpe_const_DELTABAR; + r->s = x->s; + r->e = x->e; +} - dpe_init(dpe_const_one); - dpe_set_ui(dpe_const_one, 1); +static void +normalize(fp_num *x) +{ + if (x->s == 0.0 || isfinite(x->s) == 0) { + if (x->s == 0.0) { + x->e = INT_MIN; + } + } else { + int e; + x->s = frexp(x->s, &e); + x->e += e; + } +} - dpe_init(dpe_const_DELTABAR); - dpe_set_d(dpe_const_DELTABAR, DELTABAR); +static void +to_one(fp_num *x) +{ + x->s = 1; + x->e = 0; +} - // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions - dpe_t r[4][4], u[4][4], lovasz[4]; - for (int i = 0; i < 4; i++) { - dpe_init(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_init(r[i][j]); - dpe_init(u[i][j]); - } +static void +to_deltabar(fp_num *x) +{ + x->s = DELTABAR; + x->e = 0; +} + +static void +to_etabar(fp_num *x) +{ + x->s = ETABAR; + x->e = 0; +} + +static void +from_mpz(const mpz_t x, fp_num *r) +{ + long exp = 0; + r->s = mpz_get_d_2exp(&exp, x); + r->e = exp; +} + +static void +to_mpz(const fp_num *x, mpz_t r) +{ + if (x->e >= DBL_MANT_DIG) { + double s = x->s * 0x1P53; + mpz_set_d(r, s); + mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + } else if (x->e < 0) { + mpz_set_ui(r, 0); + } else { + double s = ldexp(x->s, x->e); + mpz_set_d(r, round(s)); } +} - // threshold for swaps - dpe_t delta_bar; - dpe_init(delta_bar); - dpe_set_d(delta_bar, DELTABAR); +static void +fp_mul(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s * y->s; + r->e = x->e + y->e; + normalize(r); + +} + +static void +fp_div(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s / y->s; + r->e = x->e - y->e; + normalize(r); +} + +static void +fp_sub(const fp_num *x, const fp_num *y, fp_num *r) +{ + if (x->e > y->e + DBL_MANT_DIG) { + r->s = x->s; + r->e = x->e; + } else if (y->e > x->e + DBL_MANT_DIG) { + r->s = -y->s; + r->e = y->e; + } else { + int e = x->e - y->e; + + if (e >= 0) { + r->s = x->s - ldexp(y->s, -e); + r->e = x->e; + } else { + r->s = ldexp(x->s, e) - y->s; + r->e = y->e; + } + + normalize(r); + } +} + +static inline int +sign(const fp_num *x) +{ + if (x->s < 0.0) + return -1; + return 1; +} + +static int +fp_cmp(const fp_num *x, const fp_num *y) +{ + int sign_x = sign(x); + int sign_y = sign(y); + + if (sign_x != sign_y) + return sign_x - sign_y; + else if (x->e > y->e) + return sign_x; + else if (y->e > x->e) + return -sign_x; + else if (x->s > y->s) + return 1; + else if (x->s < y->s) + return -1; + else + return 0; +} + +static void +fp_round(fp_num *x) +{ + if (x->e < 0) { + x->s = 0; + x->e = 0; + } else if (x->e >= DBL_MANT_DIG) { + return; + } else { + double tmp; + tmp = ldexp(x->s, x->e); + x->s = round(tmp); + x->e = 0; + normalize(x); + } +} + +static void +fp_abs(const fp_num *x, fp_num *y) { + if (x->s < 0.0) { + y->s = -x->s; + } else { + y->s = x->s; + } + y->e = x->e; +} + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + fp_num const_one = {0}; + fp_num delta_bar = {0}; + fp_num eta_bar = {0}; + fp_num neg_eta_bar = {0}; + to_one(&const_one); + to_deltabar(&delta_bar); + eta_bar.s = ETABAR; + eta_bar.e = 0; + neg_eta_bar.s = -ETABAR; + neg_eta_bar.e = 0; + normalize(&eta_bar); + normalize(&neg_eta_bar); + + fp_num r[4][4] = {0}; + fp_num u[4][4] = {0}; + fp_num lovasz[4] = {0}; + + fp_num Xf = {0}; + fp_num tmpF = {0}; - // Other work variables - dpe_t Xf, tmpF; - dpe_init(Xf); - dpe_init(tmpF); ibz_t X, tmpI; ibz_init(&X); ibz_init(&tmpI); // Main L² loop - dpe_set_z(r[0][0], (*G)[0][0]); + from_mpz((*G)[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -52,23 +213,23 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - dpe_set_z(r[kappa][j], (*G)[kappa][j]); + from_mpz((*G)[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { - dpe_mul(tmpF, r[kappa][k], u[j][k]); - dpe_sub(r[kappa][j], r[kappa][j], tmpF); + fp_mul(&r[kappa][k], &u[j][k], &tmpF); + fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); } if (j < kappa) - dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + fp_div(&r[kappa][j], &r[j][j], &u[kappa][j]); } done = 1; // size reduce for (int i = kappa - 1; i >= 0; i--) { - if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + if (fp_cmp(&u[kappa][i], &eta_bar) > 0 || fp_cmp(&u[kappa][i], &neg_eta_bar) < 0) { done = 0; - dpe_set(Xf, u[kappa][i]); - dpe_round(Xf, Xf); - dpe_get_z(X, Xf); + copy(&u[kappa][i], &Xf); + fp_round(&Xf); + to_mpz(&Xf, X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { ibz_mul(&tmpI, &X, &(*basis)[j][i]); @@ -91,8 +252,8 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // // Update u[kappa][j] for (int j = 0; j < i; j++) { - dpe_mul(tmpF, Xf, u[i][j]); - dpe_sub(u[kappa][j], u[kappa][j], tmpF); + fp_mul(&Xf, &u[i][j], &tmpF); + fp_sub(&u[kappa][j], &tmpF, &u[kappa][j]); } } } @@ -100,16 +261,16 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + from_mpz((*G)[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { - dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); - dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); + fp_sub(&lovasz[i - 1], &tmpF, &lovasz[i]); } int swap; for (swap = kappa; swap > 0; swap--) { - dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); - if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + fp_mul(&delta_bar, &r[swap - 1][swap - 1], &tmpF); + if (fp_cmp(&tmpF, &lovasz[swap - 1]) < 0) break; } @@ -127,10 +288,10 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) } // Copy row u[κ] and r[κ] in swap position, ignore what follows for (int i = 0; i < swap; i++) { - dpe_set(u[swap][i], u[kappa][i]); - dpe_set(r[swap][i], r[kappa][i]); + copy(&u[kappa][i], &u[swap][i]); + copy(&r[kappa][i], &r[swap][i]); } - dpe_set(r[swap][swap], lovasz[swap]); + copy(&lovasz[swap], &r[swap][swap]); // swap complete kappa = swap; } @@ -142,15 +303,15 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check size-reducedness for (int i = 0; i < 4; i++) for (int j = 0; j < i; j++) { - dpe_abs(u[i][j], u[i][j]); - assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + fp_abs(&u[i][j], &u[i][j]); + assert(fp_cmp(&u[i][j], &eta_bar) <= 0); } // Check Lovasz' conditions for (int i = 1; i < 4; i++) { - dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); - dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); - dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); - assert(dpe_cmp(tmpF, r[i][i]) <= 0); + fp_mul(&u[i][i - 1], &u[i][i - 1], &tmpF); + fp_sub(&delta_bar, &tmpF, &tmpF); + fp_mul(&tmpF, &r[i - 1][i - 1], &tmpF); + assert(fp_cmp(&tmpF, &r[i][i]) <= 0); } #endif @@ -163,18 +324,6 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Clearinghouse ibz_finalize(&X); ibz_finalize(&tmpI); - dpe_clear(dpe_const_one); - dpe_clear(dpe_const_DELTABAR); - dpe_clear(Xf); - dpe_clear(tmpF); - dpe_clear(delta_bar); - for (int i = 0; i < 4; i++) { - dpe_clear(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_clear(r[i][j]); - dpe_clear(u[i][j]); - } - } } int diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/COPYING.LGPL deleted file mode 100644 index 0a041280bd..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/COPYING.LGPL +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dpe.h deleted file mode 100644 index b9a7a35e0b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dpe.h +++ /dev/null @@ -1,743 +0,0 @@ -/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. - -This file is part of the DPE Library. - -The DPE Library is free software; you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation; either version 3 of the License, or (at your -option) any later version. - -The DPE Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public -License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with the DPE Library; see the file COPYING.LIB. -If not, see . */ - -#ifndef __DPE -#define __DPE - -#include /* For abort */ -#include /* For fprintf */ -#include /* for round, floor, ceil */ -#include - -/* if you change the version, please change it in Makefile too */ -#define DPE_VERSION_MAJOR 1 -#define DPE_VERSION_MINOR 7 - -#if defined(__GNUC__) && (__GNUC__ >= 3) -# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) -# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) -# define DPE_UNUSED_ATTR __attribute__((unused)) -#else -# define DPE_LIKELY(x) (x) -# define DPE_UNLIKELY(x) (x) -# define DPE_UNUSED_ATTR -#endif - -/* If no user defined mode, define it to double */ -#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) -# define DPE_USE_DOUBLE -#endif - -#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) -# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." -#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#endif - -#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) -# define DPE_LITTLEENDIAN32 -#endif - -#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) -# define DPE_DEFINE_ROUND_TRUNC -#endif - -#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 -# define DPE_ISFINITE __builtin_isfinite -#elif defined(isfinite) -# define DPE_ISFINITE isfinite /* new C99 function */ -#else -# define DPE_ISFINITE finite /* obsolete BSD function */ -#endif - -/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ -/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with - 1/2 <= m < 1 */ -/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ -#if defined(DPE_USE_DOUBLE) -# define DPE_DOUBLE double /* mantissa type */ -# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ -# define DPE_2_POW_BITSIZE 0x1P53 -# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 -# define DPE_LDEXP __builtin_ldexp -# define DPE_FREXP __builtin_frexp -# define DPE_FLOOR __builtin_floor -# define DPE_CEIL __builtin_ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND __builtin_round -# define DPE_TRUNC __builtin_trunc -# endif -# else -# define DPE_LDEXP ldexp -# define DPE_FREXP frexp -# define DPE_FLOOR floor -# define DPE_CEIL ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND round -# define DPE_TRUNC trunc -# endif -# endif - -#elif defined(DPE_USE_LONGDOUBLE) -# define DPE_DOUBLE long double -# define DPE_BITSIZE 64 -# define DPE_2_POW_BITSIZE 0x1P64 -# define DPE_LDEXP ldexpl -# define DPE_FREXP frexpl -# define DPE_FLOOR floorl -# define DPE_CEIL ceill -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundl -# define DPE_TRUNC truncl -# endif - -#elif defined(DPE_USE_FLOAT128) -# include "quadmath.h" -# define DPE_DOUBLE __float128 -# define DPE_BITSIZE 113 -# define DPE_2_POW_BITSIZE 0x1P113 -# define DPE_LDEXP ldexpq -# define DPE_FLOOR floorq -# define DPE_CEIL ceilq -# define DPE_FREXP frexpq -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundq -# define DPE_TRUNC truncq -# endif - -#else -# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" -#endif - -/* If no C99, do what we can */ -#ifndef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) -# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) -#endif - -#if defined(DPE_USE_LONG) -# define DPE_EXP_T long /* exponent type */ -# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ -#elif defined(DPE_USE_LONGLONG) -# define DPE_EXP_T long long -# define DPE_EXPMIN LLONG_MIN -#else -# define DPE_EXP_T int /* exponent type */ -# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ -#endif - -#ifdef DPE_LITTLEENDIAN32 -typedef union -{ - double d; -#if INT_MAX == 0x7FFFFFFFL - int i[2]; -#elif LONG_MAX == 0x7FFFFFFFL - long i[2]; -#elif SHRT_MAX == 0x7FFFFFFFL - short i[2]; -#else -# error Cannot find a 32 bits integer type. -#endif -} dpe_double_words; -#endif - -typedef struct -{ - DPE_DOUBLE d; /* significand */ - DPE_EXP_T exp; /* exponent */ -} dpe_struct; - -typedef dpe_struct dpe_t[1]; - -#define DPE_MANT(x) ((x)->d) -#define DPE_EXP(x) ((x)->exp) -#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) - -#define DPE_INLINE static inline - -/* initialize */ -DPE_INLINE void -dpe_init (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* clear */ -DPE_INLINE void -dpe_clear (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* set x to y */ -DPE_INLINE void -dpe_set (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to -y */ -DPE_INLINE void -dpe_neg (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to |y| */ -DPE_INLINE void -dpe_abs (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ -/* FIXME: don't inline this function yet ? */ -static void -dpe_normalize (dpe_t x) -{ - if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) - { - if (DPE_MANT(x) == 0.0) - DPE_EXP(x) = DPE_EXPMIN; - /* otherwise let the exponent of NaN, Inf unchanged */ - } - else - { - DPE_EXP_T e; -#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ - dpe_double_words dw; - dw.d = DPE_MANT(x); - e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ - DPE_EXP(x) += e - 1022; - dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; - DPE_MANT(x) = dw.d; -#else /* portable code */ - double m = DPE_MANT(x); - DPE_MANT(x) = DPE_FREXP (m, &e); - DPE_EXP(x) += e; -#endif - } -} - -#if defined(DPE_USE_DOUBLE) -static const double dpe_scale_tab[54] = { - 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, - 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, - 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, - 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, - 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, - 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, - 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; -#endif - -DPE_INLINE DPE_DOUBLE -dpe_scale (DPE_DOUBLE d, int s) -{ - /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ -#if defined(DPE_USE_DOUBLE) - return d * dpe_scale_tab [-s]; -#else /* portable code */ - return DPE_LDEXP (d, s); -#endif -} - -/* set x to y */ -DPE_INLINE void -dpe_set_d (dpe_t x, double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ld (dpe_t x, long double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ui (dpe_t x, unsigned long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_si (dpe_t x, long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -DPE_INLINE long -dpe_get_si (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (long) d; -} - -DPE_INLINE unsigned long -dpe_get_ui (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (d < 0.0) ? 0 : (unsigned long) d; -} - -DPE_INLINE double -dpe_get_d (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -DPE_INLINE long double -dpe_get_ld (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -#if defined(__GMP_H__) || defined(__MINI_GMP_H__) -/* set x to y */ -DPE_INLINE void -dpe_set_z (dpe_t x, mpz_t y) -{ - long e; - DPE_MANT(x) = mpz_get_d_2exp (&e, y); - DPE_EXP(x) = (DPE_EXP_T) e; -} - -/* set x to y, rounded to nearest */ -DPE_INLINE void -dpe_get_z (mpz_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey >= DPE_BITSIZE) /* y is an integer */ - { - DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ - mpz_set_d (x, d); /* should be exact */ - mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); - } - else /* DPE_EXP(y) < DPE_BITSIZE */ - { - if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ - mpz_set_ui (x, 0); - else - { - DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); - mpz_set_d (x, (double) DPE_ROUND(d)); - } - } -} - -/* return e and x such that y = x*2^e */ -DPE_INLINE mp_exp_t -dpe_get_z_exp (mpz_t x, dpe_t y) -{ - mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); - return DPE_EXP(y) - DPE_BITSIZE; -} -#endif - -/* x <- y + z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_add (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y+z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_set (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y - z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_sub (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y-z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_neg (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y * z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_mul (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- sqrt(y), assuming y is normalized, returns x normalized */ -DPE_INLINE void -dpe_sqrt (dpe_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey % 2) - { - /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ - DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); - DPE_EXP(x) = (ey + 1) / 2; - } - else - { - DPE_MANT(x) = sqrt (DPE_MANT(y)); - DPE_EXP(x) = ey / 2; - } -} - -/* x <- y / z, assuming y and z are normalized, returns x normalized. - Assumes z is not zero. */ -DPE_INLINE void -dpe_div (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- y * z, assuming y normalized, returns x normalized */ -DPE_INLINE void -dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ -DPE_INLINE void -dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y * 2^e */ -DPE_INLINE void -dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; -} - -/* x <- y / 2^e */ -DPE_INLINE void -dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; -} - -/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' - type has fewer bits than the significand in dpe_t) */ -DPE_INLINE DPE_EXP_T -dpe_get_si_exp (long *x, dpe_t y) -{ - if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ - { - *x = (long) (DPE_MANT(y) * 2147483648.0); - return DPE_EXP(y) - 31; - } - else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ - { - *x = (long) (DPE_MANT (y) * 9223372036854775808.0); - return DPE_EXP(y) - 63; - } - else - { - fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); - exit (1); - } -} - -static DPE_UNUSED_ATTR int dpe_str_prec = 16; -static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; - -static int -dpe_out_str (FILE *s, int base, dpe_t x) -{ - DPE_DOUBLE d = DPE_MANT(x); - DPE_EXP_T e2 = DPE_EXP(x); - int e10 = 0; - char sign = ' '; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } - if (d == 0.0) -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%1.*f", dpe_str_prec, d); -#else - return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); -#endif - if (d < 0) - { - d = -d; - sign = '-'; - } - if (e2 > 0) - { - while (e2 > 0) - { - e2 --; - d *= 2.0; - if (d >= 10.0) - { - d /= 10.0; - e10 ++; - } - } - } - else /* e2 <= 0 */ - { - while (e2 < 0) - { - e2 ++; - d /= 2.0; - if (d < 1.0) - { - d *= 10.0; - e10 --; - } - } - } -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); -#else - return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); -#endif -} - -static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; - -static size_t -dpe_inp_str (dpe_t x, FILE *s, int base) -{ - size_t res; - DPE_DOUBLE d; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } -#ifdef DPE_USE_DOUBLE - res = fscanf (s, "%lf", &d); -#elif defined(DPE_USE_LONGDOUBLE) - res = fscanf (s, "%Lf", &d); -#else - { - long double d_ld; - res = fscanf (s, "%Lf", &d_ld); - d = d_ld; - } -#endif - dpe_set_d (x, d); - return res; -} - -DPE_INLINE void -dpe_dump (dpe_t x) -{ - dpe_out_str (stdout, 10, x); - putchar ('\n'); -} - -DPE_INLINE int -dpe_zero_p (dpe_t x) -{ - return DPE_MANT (x) == 0; -} - -/* return a positive value if x > y - a negative value if x < y - and 0 otherwise (x=y). */ -DPE_INLINE int -dpe_cmp (dpe_t x, dpe_t y) -{ - int sx = DPE_SIGN(x); - int d = sx - DPE_SIGN(y); - - if (d != 0) - return d; - else if (DPE_EXP(x) > DPE_EXP(y)) - return (sx > 0) ? 1 : -1; - else if (DPE_EXP(y) > DPE_EXP(x)) - return (sx > 0) ? -1 : 1; - else /* DPE_EXP(x) = DPE_EXP(y) */ - return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); -} - -DPE_INLINE int -dpe_cmp_d (dpe_t x, double d) -{ - dpe_t y; - dpe_set_d (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_ui (dpe_t x, unsigned long d) -{ - dpe_t y; - dpe_set_ui (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_si (dpe_t x, long d) -{ - dpe_t y; - dpe_set_si (y, d); - return dpe_cmp (x, y); -} - -/* set x to integer nearest to y */ -DPE_INLINE void -dpe_round (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) < 0) /* |y| < 1/2 */ - dpe_set_ui (x, 0); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_ROUND(d)); - } -} - -/* set x to the fractional part of y, defined as y - trunc(y), thus the - fractional part has absolute value in [0, 1), and same sign as y */ -DPE_INLINE void -dpe_frac (dpe_t x, dpe_t y) -{ - /* If |y| is smaller than 1, keep it */ - if (DPE_EXP(y) <= 0) - dpe_set (x, y); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set_ui (x, 0); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, d - DPE_TRUNC(d)); - } -} - -/* set x to largest integer <= y */ -DPE_INLINE void -dpe_floor (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ - dpe_set_ui (x, 0); - else /* -1 < y < 0 */ - dpe_set_si (x, -1); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_FLOOR(d)); - } -} - -/* set x to smallest integer >= y */ -DPE_INLINE void -dpe_ceil (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ - dpe_set_ui (x, 1); - else /* -1 < y <= 0 */ - dpe_set_si (x, 0); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_CEIL(d)); - } -} - -DPE_INLINE void -dpe_swap (dpe_t x, dpe_t y) -{ - DPE_EXP_T i = DPE_EXP (x); - DPE_DOUBLE d = DPE_MANT (x); - DPE_EXP (x) = DPE_EXP (y); - DPE_MANT (x) = DPE_MANT (y); - DPE_EXP (y) = i; - DPE_MANT (y) = d; -} - -#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c index 8c49b21d20..5491ee44d0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c @@ -2,47 +2,208 @@ #include "lll_internals.h" #include "internal.h" -#include "dpe.h" +#include +#include + // Access entry of symmetric matrix #define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) -void -quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +typedef struct fp_num { + double s; + int e; +} fp_num; + +static void +copy(fp_num *x, fp_num *r) { - dpe_t dpe_const_one, dpe_const_DELTABAR; + r->s = x->s; + r->e = x->e; +} - dpe_init(dpe_const_one); - dpe_set_ui(dpe_const_one, 1); +static void +normalize(fp_num *x) +{ + if (x->s == 0.0 || isfinite(x->s) == 0) { + if (x->s == 0.0) { + x->e = INT_MIN; + } + } else { + int e; + x->s = frexp(x->s, &e); + x->e += e; + } +} - dpe_init(dpe_const_DELTABAR); - dpe_set_d(dpe_const_DELTABAR, DELTABAR); +static void +to_one(fp_num *x) +{ + x->s = 1; + x->e = 0; +} - // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions - dpe_t r[4][4], u[4][4], lovasz[4]; - for (int i = 0; i < 4; i++) { - dpe_init(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_init(r[i][j]); - dpe_init(u[i][j]); - } +static void +to_deltabar(fp_num *x) +{ + x->s = DELTABAR; + x->e = 0; +} + +static void +to_etabar(fp_num *x) +{ + x->s = ETABAR; + x->e = 0; +} + +static void +from_mpz(const mpz_t x, fp_num *r) +{ + long exp = 0; + r->s = mpz_get_d_2exp(&exp, x); + r->e = exp; +} + +static void +to_mpz(const fp_num *x, mpz_t r) +{ + if (x->e >= DBL_MANT_DIG) { + double s = x->s * 0x1P53; + mpz_set_d(r, s); + mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + } else if (x->e < 0) { + mpz_set_ui(r, 0); + } else { + double s = ldexp(x->s, x->e); + mpz_set_d(r, round(s)); } +} - // threshold for swaps - dpe_t delta_bar; - dpe_init(delta_bar); - dpe_set_d(delta_bar, DELTABAR); +static void +fp_mul(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s * y->s; + r->e = x->e + y->e; + normalize(r); + +} + +static void +fp_div(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s / y->s; + r->e = x->e - y->e; + normalize(r); +} + +static void +fp_sub(const fp_num *x, const fp_num *y, fp_num *r) +{ + if (x->e > y->e + DBL_MANT_DIG) { + r->s = x->s; + r->e = x->e; + } else if (y->e > x->e + DBL_MANT_DIG) { + r->s = -y->s; + r->e = y->e; + } else { + int e = x->e - y->e; + + if (e >= 0) { + r->s = x->s - ldexp(y->s, -e); + r->e = x->e; + } else { + r->s = ldexp(x->s, e) - y->s; + r->e = y->e; + } + + normalize(r); + } +} + +static inline int +sign(const fp_num *x) +{ + if (x->s < 0.0) + return -1; + return 1; +} + +static int +fp_cmp(const fp_num *x, const fp_num *y) +{ + int sign_x = sign(x); + int sign_y = sign(y); + + if (sign_x != sign_y) + return sign_x - sign_y; + else if (x->e > y->e) + return sign_x; + else if (y->e > x->e) + return -sign_x; + else if (x->s > y->s) + return 1; + else if (x->s < y->s) + return -1; + else + return 0; +} + +static void +fp_round(fp_num *x) +{ + if (x->e < 0) { + x->s = 0; + x->e = 0; + } else if (x->e >= DBL_MANT_DIG) { + return; + } else { + double tmp; + tmp = ldexp(x->s, x->e); + x->s = round(tmp); + x->e = 0; + normalize(x); + } +} + +static void +fp_abs(const fp_num *x, fp_num *y) { + if (x->s < 0.0) { + y->s = -x->s; + } else { + y->s = x->s; + } + y->e = x->e; +} + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + fp_num const_one = {0}; + fp_num delta_bar = {0}; + fp_num eta_bar = {0}; + fp_num neg_eta_bar = {0}; + to_one(&const_one); + to_deltabar(&delta_bar); + eta_bar.s = ETABAR; + eta_bar.e = 0; + neg_eta_bar.s = -ETABAR; + neg_eta_bar.e = 0; + normalize(&eta_bar); + normalize(&neg_eta_bar); + + fp_num r[4][4] = {0}; + fp_num u[4][4] = {0}; + fp_num lovasz[4] = {0}; + + fp_num Xf = {0}; + fp_num tmpF = {0}; - // Other work variables - dpe_t Xf, tmpF; - dpe_init(Xf); - dpe_init(tmpF); ibz_t X, tmpI; ibz_init(&X); ibz_init(&tmpI); // Main L² loop - dpe_set_z(r[0][0], (*G)[0][0]); + from_mpz((*G)[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -52,23 +213,23 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - dpe_set_z(r[kappa][j], (*G)[kappa][j]); + from_mpz((*G)[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { - dpe_mul(tmpF, r[kappa][k], u[j][k]); - dpe_sub(r[kappa][j], r[kappa][j], tmpF); + fp_mul(&r[kappa][k], &u[j][k], &tmpF); + fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); } if (j < kappa) - dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + fp_div(&r[kappa][j], &r[j][j], &u[kappa][j]); } done = 1; // size reduce for (int i = kappa - 1; i >= 0; i--) { - if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + if (fp_cmp(&u[kappa][i], &eta_bar) > 0 || fp_cmp(&u[kappa][i], &neg_eta_bar) < 0) { done = 0; - dpe_set(Xf, u[kappa][i]); - dpe_round(Xf, Xf); - dpe_get_z(X, Xf); + copy(&u[kappa][i], &Xf); + fp_round(&Xf); + to_mpz(&Xf, X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { ibz_mul(&tmpI, &X, &(*basis)[j][i]); @@ -91,8 +252,8 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // // Update u[kappa][j] for (int j = 0; j < i; j++) { - dpe_mul(tmpF, Xf, u[i][j]); - dpe_sub(u[kappa][j], u[kappa][j], tmpF); + fp_mul(&Xf, &u[i][j], &tmpF); + fp_sub(&u[kappa][j], &tmpF, &u[kappa][j]); } } } @@ -100,16 +261,16 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + from_mpz((*G)[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { - dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); - dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); + fp_sub(&lovasz[i - 1], &tmpF, &lovasz[i]); } int swap; for (swap = kappa; swap > 0; swap--) { - dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); - if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + fp_mul(&delta_bar, &r[swap - 1][swap - 1], &tmpF); + if (fp_cmp(&tmpF, &lovasz[swap - 1]) < 0) break; } @@ -127,10 +288,10 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) } // Copy row u[κ] and r[κ] in swap position, ignore what follows for (int i = 0; i < swap; i++) { - dpe_set(u[swap][i], u[kappa][i]); - dpe_set(r[swap][i], r[kappa][i]); + copy(&u[kappa][i], &u[swap][i]); + copy(&r[kappa][i], &r[swap][i]); } - dpe_set(r[swap][swap], lovasz[swap]); + copy(&lovasz[swap], &r[swap][swap]); // swap complete kappa = swap; } @@ -142,15 +303,15 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check size-reducedness for (int i = 0; i < 4; i++) for (int j = 0; j < i; j++) { - dpe_abs(u[i][j], u[i][j]); - assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + fp_abs(&u[i][j], &u[i][j]); + assert(fp_cmp(&u[i][j], &eta_bar) <= 0); } // Check Lovasz' conditions for (int i = 1; i < 4; i++) { - dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); - dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); - dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); - assert(dpe_cmp(tmpF, r[i][i]) <= 0); + fp_mul(&u[i][i - 1], &u[i][i - 1], &tmpF); + fp_sub(&delta_bar, &tmpF, &tmpF); + fp_mul(&tmpF, &r[i - 1][i - 1], &tmpF); + assert(fp_cmp(&tmpF, &r[i][i]) <= 0); } #endif @@ -163,18 +324,6 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Clearinghouse ibz_finalize(&X); ibz_finalize(&tmpI); - dpe_clear(dpe_const_one); - dpe_clear(dpe_const_DELTABAR); - dpe_clear(Xf); - dpe_clear(tmpF); - dpe_clear(delta_bar); - for (int i = 0; i < 4; i++) { - dpe_clear(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_clear(r[i][j]); - dpe_clear(u[i][j]); - } - } } int diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/COPYING.LGPL deleted file mode 100644 index 0a041280bd..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/COPYING.LGPL +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dpe.h deleted file mode 100644 index b9a7a35e0b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dpe.h +++ /dev/null @@ -1,743 +0,0 @@ -/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. - -This file is part of the DPE Library. - -The DPE Library is free software; you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation; either version 3 of the License, or (at your -option) any later version. - -The DPE Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public -License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with the DPE Library; see the file COPYING.LIB. -If not, see . */ - -#ifndef __DPE -#define __DPE - -#include /* For abort */ -#include /* For fprintf */ -#include /* for round, floor, ceil */ -#include - -/* if you change the version, please change it in Makefile too */ -#define DPE_VERSION_MAJOR 1 -#define DPE_VERSION_MINOR 7 - -#if defined(__GNUC__) && (__GNUC__ >= 3) -# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) -# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) -# define DPE_UNUSED_ATTR __attribute__((unused)) -#else -# define DPE_LIKELY(x) (x) -# define DPE_UNLIKELY(x) (x) -# define DPE_UNUSED_ATTR -#endif - -/* If no user defined mode, define it to double */ -#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) -# define DPE_USE_DOUBLE -#endif - -#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) -# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." -#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#endif - -#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) -# define DPE_LITTLEENDIAN32 -#endif - -#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) -# define DPE_DEFINE_ROUND_TRUNC -#endif - -#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 -# define DPE_ISFINITE __builtin_isfinite -#elif defined(isfinite) -# define DPE_ISFINITE isfinite /* new C99 function */ -#else -# define DPE_ISFINITE finite /* obsolete BSD function */ -#endif - -/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ -/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with - 1/2 <= m < 1 */ -/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ -#if defined(DPE_USE_DOUBLE) -# define DPE_DOUBLE double /* mantissa type */ -# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ -# define DPE_2_POW_BITSIZE 0x1P53 -# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 -# define DPE_LDEXP __builtin_ldexp -# define DPE_FREXP __builtin_frexp -# define DPE_FLOOR __builtin_floor -# define DPE_CEIL __builtin_ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND __builtin_round -# define DPE_TRUNC __builtin_trunc -# endif -# else -# define DPE_LDEXP ldexp -# define DPE_FREXP frexp -# define DPE_FLOOR floor -# define DPE_CEIL ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND round -# define DPE_TRUNC trunc -# endif -# endif - -#elif defined(DPE_USE_LONGDOUBLE) -# define DPE_DOUBLE long double -# define DPE_BITSIZE 64 -# define DPE_2_POW_BITSIZE 0x1P64 -# define DPE_LDEXP ldexpl -# define DPE_FREXP frexpl -# define DPE_FLOOR floorl -# define DPE_CEIL ceill -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundl -# define DPE_TRUNC truncl -# endif - -#elif defined(DPE_USE_FLOAT128) -# include "quadmath.h" -# define DPE_DOUBLE __float128 -# define DPE_BITSIZE 113 -# define DPE_2_POW_BITSIZE 0x1P113 -# define DPE_LDEXP ldexpq -# define DPE_FLOOR floorq -# define DPE_CEIL ceilq -# define DPE_FREXP frexpq -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundq -# define DPE_TRUNC truncq -# endif - -#else -# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" -#endif - -/* If no C99, do what we can */ -#ifndef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) -# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) -#endif - -#if defined(DPE_USE_LONG) -# define DPE_EXP_T long /* exponent type */ -# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ -#elif defined(DPE_USE_LONGLONG) -# define DPE_EXP_T long long -# define DPE_EXPMIN LLONG_MIN -#else -# define DPE_EXP_T int /* exponent type */ -# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ -#endif - -#ifdef DPE_LITTLEENDIAN32 -typedef union -{ - double d; -#if INT_MAX == 0x7FFFFFFFL - int i[2]; -#elif LONG_MAX == 0x7FFFFFFFL - long i[2]; -#elif SHRT_MAX == 0x7FFFFFFFL - short i[2]; -#else -# error Cannot find a 32 bits integer type. -#endif -} dpe_double_words; -#endif - -typedef struct -{ - DPE_DOUBLE d; /* significand */ - DPE_EXP_T exp; /* exponent */ -} dpe_struct; - -typedef dpe_struct dpe_t[1]; - -#define DPE_MANT(x) ((x)->d) -#define DPE_EXP(x) ((x)->exp) -#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) - -#define DPE_INLINE static inline - -/* initialize */ -DPE_INLINE void -dpe_init (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* clear */ -DPE_INLINE void -dpe_clear (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* set x to y */ -DPE_INLINE void -dpe_set (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to -y */ -DPE_INLINE void -dpe_neg (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to |y| */ -DPE_INLINE void -dpe_abs (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ -/* FIXME: don't inline this function yet ? */ -static void -dpe_normalize (dpe_t x) -{ - if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) - { - if (DPE_MANT(x) == 0.0) - DPE_EXP(x) = DPE_EXPMIN; - /* otherwise let the exponent of NaN, Inf unchanged */ - } - else - { - DPE_EXP_T e; -#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ - dpe_double_words dw; - dw.d = DPE_MANT(x); - e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ - DPE_EXP(x) += e - 1022; - dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; - DPE_MANT(x) = dw.d; -#else /* portable code */ - double m = DPE_MANT(x); - DPE_MANT(x) = DPE_FREXP (m, &e); - DPE_EXP(x) += e; -#endif - } -} - -#if defined(DPE_USE_DOUBLE) -static const double dpe_scale_tab[54] = { - 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, - 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, - 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, - 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, - 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, - 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, - 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; -#endif - -DPE_INLINE DPE_DOUBLE -dpe_scale (DPE_DOUBLE d, int s) -{ - /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ -#if defined(DPE_USE_DOUBLE) - return d * dpe_scale_tab [-s]; -#else /* portable code */ - return DPE_LDEXP (d, s); -#endif -} - -/* set x to y */ -DPE_INLINE void -dpe_set_d (dpe_t x, double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ld (dpe_t x, long double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ui (dpe_t x, unsigned long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_si (dpe_t x, long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -DPE_INLINE long -dpe_get_si (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (long) d; -} - -DPE_INLINE unsigned long -dpe_get_ui (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (d < 0.0) ? 0 : (unsigned long) d; -} - -DPE_INLINE double -dpe_get_d (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -DPE_INLINE long double -dpe_get_ld (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -#if defined(__GMP_H__) || defined(__MINI_GMP_H__) -/* set x to y */ -DPE_INLINE void -dpe_set_z (dpe_t x, mpz_t y) -{ - long e; - DPE_MANT(x) = mpz_get_d_2exp (&e, y); - DPE_EXP(x) = (DPE_EXP_T) e; -} - -/* set x to y, rounded to nearest */ -DPE_INLINE void -dpe_get_z (mpz_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey >= DPE_BITSIZE) /* y is an integer */ - { - DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ - mpz_set_d (x, d); /* should be exact */ - mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); - } - else /* DPE_EXP(y) < DPE_BITSIZE */ - { - if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ - mpz_set_ui (x, 0); - else - { - DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); - mpz_set_d (x, (double) DPE_ROUND(d)); - } - } -} - -/* return e and x such that y = x*2^e */ -DPE_INLINE mp_exp_t -dpe_get_z_exp (mpz_t x, dpe_t y) -{ - mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); - return DPE_EXP(y) - DPE_BITSIZE; -} -#endif - -/* x <- y + z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_add (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y+z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_set (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y - z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_sub (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y-z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_neg (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y * z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_mul (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- sqrt(y), assuming y is normalized, returns x normalized */ -DPE_INLINE void -dpe_sqrt (dpe_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey % 2) - { - /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ - DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); - DPE_EXP(x) = (ey + 1) / 2; - } - else - { - DPE_MANT(x) = sqrt (DPE_MANT(y)); - DPE_EXP(x) = ey / 2; - } -} - -/* x <- y / z, assuming y and z are normalized, returns x normalized. - Assumes z is not zero. */ -DPE_INLINE void -dpe_div (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- y * z, assuming y normalized, returns x normalized */ -DPE_INLINE void -dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ -DPE_INLINE void -dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y * 2^e */ -DPE_INLINE void -dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; -} - -/* x <- y / 2^e */ -DPE_INLINE void -dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; -} - -/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' - type has fewer bits than the significand in dpe_t) */ -DPE_INLINE DPE_EXP_T -dpe_get_si_exp (long *x, dpe_t y) -{ - if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ - { - *x = (long) (DPE_MANT(y) * 2147483648.0); - return DPE_EXP(y) - 31; - } - else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ - { - *x = (long) (DPE_MANT (y) * 9223372036854775808.0); - return DPE_EXP(y) - 63; - } - else - { - fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); - exit (1); - } -} - -static DPE_UNUSED_ATTR int dpe_str_prec = 16; -static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; - -static int -dpe_out_str (FILE *s, int base, dpe_t x) -{ - DPE_DOUBLE d = DPE_MANT(x); - DPE_EXP_T e2 = DPE_EXP(x); - int e10 = 0; - char sign = ' '; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } - if (d == 0.0) -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%1.*f", dpe_str_prec, d); -#else - return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); -#endif - if (d < 0) - { - d = -d; - sign = '-'; - } - if (e2 > 0) - { - while (e2 > 0) - { - e2 --; - d *= 2.0; - if (d >= 10.0) - { - d /= 10.0; - e10 ++; - } - } - } - else /* e2 <= 0 */ - { - while (e2 < 0) - { - e2 ++; - d /= 2.0; - if (d < 1.0) - { - d *= 10.0; - e10 --; - } - } - } -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); -#else - return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); -#endif -} - -static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; - -static size_t -dpe_inp_str (dpe_t x, FILE *s, int base) -{ - size_t res; - DPE_DOUBLE d; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } -#ifdef DPE_USE_DOUBLE - res = fscanf (s, "%lf", &d); -#elif defined(DPE_USE_LONGDOUBLE) - res = fscanf (s, "%Lf", &d); -#else - { - long double d_ld; - res = fscanf (s, "%Lf", &d_ld); - d = d_ld; - } -#endif - dpe_set_d (x, d); - return res; -} - -DPE_INLINE void -dpe_dump (dpe_t x) -{ - dpe_out_str (stdout, 10, x); - putchar ('\n'); -} - -DPE_INLINE int -dpe_zero_p (dpe_t x) -{ - return DPE_MANT (x) == 0; -} - -/* return a positive value if x > y - a negative value if x < y - and 0 otherwise (x=y). */ -DPE_INLINE int -dpe_cmp (dpe_t x, dpe_t y) -{ - int sx = DPE_SIGN(x); - int d = sx - DPE_SIGN(y); - - if (d != 0) - return d; - else if (DPE_EXP(x) > DPE_EXP(y)) - return (sx > 0) ? 1 : -1; - else if (DPE_EXP(y) > DPE_EXP(x)) - return (sx > 0) ? -1 : 1; - else /* DPE_EXP(x) = DPE_EXP(y) */ - return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); -} - -DPE_INLINE int -dpe_cmp_d (dpe_t x, double d) -{ - dpe_t y; - dpe_set_d (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_ui (dpe_t x, unsigned long d) -{ - dpe_t y; - dpe_set_ui (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_si (dpe_t x, long d) -{ - dpe_t y; - dpe_set_si (y, d); - return dpe_cmp (x, y); -} - -/* set x to integer nearest to y */ -DPE_INLINE void -dpe_round (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) < 0) /* |y| < 1/2 */ - dpe_set_ui (x, 0); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_ROUND(d)); - } -} - -/* set x to the fractional part of y, defined as y - trunc(y), thus the - fractional part has absolute value in [0, 1), and same sign as y */ -DPE_INLINE void -dpe_frac (dpe_t x, dpe_t y) -{ - /* If |y| is smaller than 1, keep it */ - if (DPE_EXP(y) <= 0) - dpe_set (x, y); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set_ui (x, 0); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, d - DPE_TRUNC(d)); - } -} - -/* set x to largest integer <= y */ -DPE_INLINE void -dpe_floor (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ - dpe_set_ui (x, 0); - else /* -1 < y < 0 */ - dpe_set_si (x, -1); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_FLOOR(d)); - } -} - -/* set x to smallest integer >= y */ -DPE_INLINE void -dpe_ceil (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ - dpe_set_ui (x, 1); - else /* -1 < y <= 0 */ - dpe_set_si (x, 0); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_CEIL(d)); - } -} - -DPE_INLINE void -dpe_swap (dpe_t x, dpe_t y) -{ - DPE_EXP_T i = DPE_EXP (x); - DPE_DOUBLE d = DPE_MANT (x); - DPE_EXP (x) = DPE_EXP (y); - DPE_MANT (x) = DPE_MANT (y); - DPE_EXP (y) = i; - DPE_MANT (y) = d; -} - -#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c index 8c49b21d20..5491ee44d0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c @@ -2,47 +2,208 @@ #include "lll_internals.h" #include "internal.h" -#include "dpe.h" +#include +#include + // Access entry of symmetric matrix #define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) -void -quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +typedef struct fp_num { + double s; + int e; +} fp_num; + +static void +copy(fp_num *x, fp_num *r) { - dpe_t dpe_const_one, dpe_const_DELTABAR; + r->s = x->s; + r->e = x->e; +} - dpe_init(dpe_const_one); - dpe_set_ui(dpe_const_one, 1); +static void +normalize(fp_num *x) +{ + if (x->s == 0.0 || isfinite(x->s) == 0) { + if (x->s == 0.0) { + x->e = INT_MIN; + } + } else { + int e; + x->s = frexp(x->s, &e); + x->e += e; + } +} - dpe_init(dpe_const_DELTABAR); - dpe_set_d(dpe_const_DELTABAR, DELTABAR); +static void +to_one(fp_num *x) +{ + x->s = 1; + x->e = 0; +} - // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions - dpe_t r[4][4], u[4][4], lovasz[4]; - for (int i = 0; i < 4; i++) { - dpe_init(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_init(r[i][j]); - dpe_init(u[i][j]); - } +static void +to_deltabar(fp_num *x) +{ + x->s = DELTABAR; + x->e = 0; +} + +static void +to_etabar(fp_num *x) +{ + x->s = ETABAR; + x->e = 0; +} + +static void +from_mpz(const mpz_t x, fp_num *r) +{ + long exp = 0; + r->s = mpz_get_d_2exp(&exp, x); + r->e = exp; +} + +static void +to_mpz(const fp_num *x, mpz_t r) +{ + if (x->e >= DBL_MANT_DIG) { + double s = x->s * 0x1P53; + mpz_set_d(r, s); + mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + } else if (x->e < 0) { + mpz_set_ui(r, 0); + } else { + double s = ldexp(x->s, x->e); + mpz_set_d(r, round(s)); } +} - // threshold for swaps - dpe_t delta_bar; - dpe_init(delta_bar); - dpe_set_d(delta_bar, DELTABAR); +static void +fp_mul(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s * y->s; + r->e = x->e + y->e; + normalize(r); + +} + +static void +fp_div(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s / y->s; + r->e = x->e - y->e; + normalize(r); +} + +static void +fp_sub(const fp_num *x, const fp_num *y, fp_num *r) +{ + if (x->e > y->e + DBL_MANT_DIG) { + r->s = x->s; + r->e = x->e; + } else if (y->e > x->e + DBL_MANT_DIG) { + r->s = -y->s; + r->e = y->e; + } else { + int e = x->e - y->e; + + if (e >= 0) { + r->s = x->s - ldexp(y->s, -e); + r->e = x->e; + } else { + r->s = ldexp(x->s, e) - y->s; + r->e = y->e; + } + + normalize(r); + } +} + +static inline int +sign(const fp_num *x) +{ + if (x->s < 0.0) + return -1; + return 1; +} + +static int +fp_cmp(const fp_num *x, const fp_num *y) +{ + int sign_x = sign(x); + int sign_y = sign(y); + + if (sign_x != sign_y) + return sign_x - sign_y; + else if (x->e > y->e) + return sign_x; + else if (y->e > x->e) + return -sign_x; + else if (x->s > y->s) + return 1; + else if (x->s < y->s) + return -1; + else + return 0; +} + +static void +fp_round(fp_num *x) +{ + if (x->e < 0) { + x->s = 0; + x->e = 0; + } else if (x->e >= DBL_MANT_DIG) { + return; + } else { + double tmp; + tmp = ldexp(x->s, x->e); + x->s = round(tmp); + x->e = 0; + normalize(x); + } +} + +static void +fp_abs(const fp_num *x, fp_num *y) { + if (x->s < 0.0) { + y->s = -x->s; + } else { + y->s = x->s; + } + y->e = x->e; +} + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + fp_num const_one = {0}; + fp_num delta_bar = {0}; + fp_num eta_bar = {0}; + fp_num neg_eta_bar = {0}; + to_one(&const_one); + to_deltabar(&delta_bar); + eta_bar.s = ETABAR; + eta_bar.e = 0; + neg_eta_bar.s = -ETABAR; + neg_eta_bar.e = 0; + normalize(&eta_bar); + normalize(&neg_eta_bar); + + fp_num r[4][4] = {0}; + fp_num u[4][4] = {0}; + fp_num lovasz[4] = {0}; + + fp_num Xf = {0}; + fp_num tmpF = {0}; - // Other work variables - dpe_t Xf, tmpF; - dpe_init(Xf); - dpe_init(tmpF); ibz_t X, tmpI; ibz_init(&X); ibz_init(&tmpI); // Main L² loop - dpe_set_z(r[0][0], (*G)[0][0]); + from_mpz((*G)[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -52,23 +213,23 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - dpe_set_z(r[kappa][j], (*G)[kappa][j]); + from_mpz((*G)[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { - dpe_mul(tmpF, r[kappa][k], u[j][k]); - dpe_sub(r[kappa][j], r[kappa][j], tmpF); + fp_mul(&r[kappa][k], &u[j][k], &tmpF); + fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); } if (j < kappa) - dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + fp_div(&r[kappa][j], &r[j][j], &u[kappa][j]); } done = 1; // size reduce for (int i = kappa - 1; i >= 0; i--) { - if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + if (fp_cmp(&u[kappa][i], &eta_bar) > 0 || fp_cmp(&u[kappa][i], &neg_eta_bar) < 0) { done = 0; - dpe_set(Xf, u[kappa][i]); - dpe_round(Xf, Xf); - dpe_get_z(X, Xf); + copy(&u[kappa][i], &Xf); + fp_round(&Xf); + to_mpz(&Xf, X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { ibz_mul(&tmpI, &X, &(*basis)[j][i]); @@ -91,8 +252,8 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // // Update u[kappa][j] for (int j = 0; j < i; j++) { - dpe_mul(tmpF, Xf, u[i][j]); - dpe_sub(u[kappa][j], u[kappa][j], tmpF); + fp_mul(&Xf, &u[i][j], &tmpF); + fp_sub(&u[kappa][j], &tmpF, &u[kappa][j]); } } } @@ -100,16 +261,16 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + from_mpz((*G)[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { - dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); - dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); + fp_sub(&lovasz[i - 1], &tmpF, &lovasz[i]); } int swap; for (swap = kappa; swap > 0; swap--) { - dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); - if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + fp_mul(&delta_bar, &r[swap - 1][swap - 1], &tmpF); + if (fp_cmp(&tmpF, &lovasz[swap - 1]) < 0) break; } @@ -127,10 +288,10 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) } // Copy row u[κ] and r[κ] in swap position, ignore what follows for (int i = 0; i < swap; i++) { - dpe_set(u[swap][i], u[kappa][i]); - dpe_set(r[swap][i], r[kappa][i]); + copy(&u[kappa][i], &u[swap][i]); + copy(&r[kappa][i], &r[swap][i]); } - dpe_set(r[swap][swap], lovasz[swap]); + copy(&lovasz[swap], &r[swap][swap]); // swap complete kappa = swap; } @@ -142,15 +303,15 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check size-reducedness for (int i = 0; i < 4; i++) for (int j = 0; j < i; j++) { - dpe_abs(u[i][j], u[i][j]); - assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + fp_abs(&u[i][j], &u[i][j]); + assert(fp_cmp(&u[i][j], &eta_bar) <= 0); } // Check Lovasz' conditions for (int i = 1; i < 4; i++) { - dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); - dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); - dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); - assert(dpe_cmp(tmpF, r[i][i]) <= 0); + fp_mul(&u[i][i - 1], &u[i][i - 1], &tmpF); + fp_sub(&delta_bar, &tmpF, &tmpF); + fp_mul(&tmpF, &r[i - 1][i - 1], &tmpF); + assert(fp_cmp(&tmpF, &r[i][i]) <= 0); } #endif @@ -163,18 +324,6 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Clearinghouse ibz_finalize(&X); ibz_finalize(&tmpI); - dpe_clear(dpe_const_one); - dpe_clear(dpe_const_DELTABAR); - dpe_clear(Xf); - dpe_clear(tmpF); - dpe_clear(delta_bar); - for (int i = 0; i < 4; i++) { - dpe_clear(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_clear(r[i][j]); - dpe_clear(u[i][j]); - } - } } int diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.h deleted file mode 100644 index ab8f6c6481..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mem.h +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -#ifndef MEM_H -#define MEM_H -#include -#include - -/** - * Clears and frees allocated memory. - * - * @param[out] mem Memory to be cleared and freed. - * @param size Size of memory to be cleared and freed. - */ -void sqisign_secure_free(void *mem, size_t size); - -/** - * Clears memory. - * - * @param[out] mem Memory to be cleared. - * @param size Size of memory to be cleared. - */ -void sqisign_secure_clear(void *mem, size_t size); - -#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/COPYING.LGPL deleted file mode 100644 index 0a041280bd..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/COPYING.LGPL +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dpe.h deleted file mode 100644 index b9a7a35e0b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dpe.h +++ /dev/null @@ -1,743 +0,0 @@ -/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. - -This file is part of the DPE Library. - -The DPE Library is free software; you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation; either version 3 of the License, or (at your -option) any later version. - -The DPE Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public -License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with the DPE Library; see the file COPYING.LIB. -If not, see . */ - -#ifndef __DPE -#define __DPE - -#include /* For abort */ -#include /* For fprintf */ -#include /* for round, floor, ceil */ -#include - -/* if you change the version, please change it in Makefile too */ -#define DPE_VERSION_MAJOR 1 -#define DPE_VERSION_MINOR 7 - -#if defined(__GNUC__) && (__GNUC__ >= 3) -# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) -# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) -# define DPE_UNUSED_ATTR __attribute__((unused)) -#else -# define DPE_LIKELY(x) (x) -# define DPE_UNLIKELY(x) (x) -# define DPE_UNUSED_ATTR -#endif - -/* If no user defined mode, define it to double */ -#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) -# define DPE_USE_DOUBLE -#endif - -#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) -# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." -#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#endif - -#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) -# define DPE_LITTLEENDIAN32 -#endif - -#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) -# define DPE_DEFINE_ROUND_TRUNC -#endif - -#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 -# define DPE_ISFINITE __builtin_isfinite -#elif defined(isfinite) -# define DPE_ISFINITE isfinite /* new C99 function */ -#else -# define DPE_ISFINITE finite /* obsolete BSD function */ -#endif - -/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ -/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with - 1/2 <= m < 1 */ -/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ -#if defined(DPE_USE_DOUBLE) -# define DPE_DOUBLE double /* mantissa type */ -# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ -# define DPE_2_POW_BITSIZE 0x1P53 -# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 -# define DPE_LDEXP __builtin_ldexp -# define DPE_FREXP __builtin_frexp -# define DPE_FLOOR __builtin_floor -# define DPE_CEIL __builtin_ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND __builtin_round -# define DPE_TRUNC __builtin_trunc -# endif -# else -# define DPE_LDEXP ldexp -# define DPE_FREXP frexp -# define DPE_FLOOR floor -# define DPE_CEIL ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND round -# define DPE_TRUNC trunc -# endif -# endif - -#elif defined(DPE_USE_LONGDOUBLE) -# define DPE_DOUBLE long double -# define DPE_BITSIZE 64 -# define DPE_2_POW_BITSIZE 0x1P64 -# define DPE_LDEXP ldexpl -# define DPE_FREXP frexpl -# define DPE_FLOOR floorl -# define DPE_CEIL ceill -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundl -# define DPE_TRUNC truncl -# endif - -#elif defined(DPE_USE_FLOAT128) -# include "quadmath.h" -# define DPE_DOUBLE __float128 -# define DPE_BITSIZE 113 -# define DPE_2_POW_BITSIZE 0x1P113 -# define DPE_LDEXP ldexpq -# define DPE_FLOOR floorq -# define DPE_CEIL ceilq -# define DPE_FREXP frexpq -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundq -# define DPE_TRUNC truncq -# endif - -#else -# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" -#endif - -/* If no C99, do what we can */ -#ifndef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) -# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) -#endif - -#if defined(DPE_USE_LONG) -# define DPE_EXP_T long /* exponent type */ -# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ -#elif defined(DPE_USE_LONGLONG) -# define DPE_EXP_T long long -# define DPE_EXPMIN LLONG_MIN -#else -# define DPE_EXP_T int /* exponent type */ -# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ -#endif - -#ifdef DPE_LITTLEENDIAN32 -typedef union -{ - double d; -#if INT_MAX == 0x7FFFFFFFL - int i[2]; -#elif LONG_MAX == 0x7FFFFFFFL - long i[2]; -#elif SHRT_MAX == 0x7FFFFFFFL - short i[2]; -#else -# error Cannot find a 32 bits integer type. -#endif -} dpe_double_words; -#endif - -typedef struct -{ - DPE_DOUBLE d; /* significand */ - DPE_EXP_T exp; /* exponent */ -} dpe_struct; - -typedef dpe_struct dpe_t[1]; - -#define DPE_MANT(x) ((x)->d) -#define DPE_EXP(x) ((x)->exp) -#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) - -#define DPE_INLINE static inline - -/* initialize */ -DPE_INLINE void -dpe_init (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* clear */ -DPE_INLINE void -dpe_clear (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* set x to y */ -DPE_INLINE void -dpe_set (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to -y */ -DPE_INLINE void -dpe_neg (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to |y| */ -DPE_INLINE void -dpe_abs (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ -/* FIXME: don't inline this function yet ? */ -static void -dpe_normalize (dpe_t x) -{ - if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) - { - if (DPE_MANT(x) == 0.0) - DPE_EXP(x) = DPE_EXPMIN; - /* otherwise let the exponent of NaN, Inf unchanged */ - } - else - { - DPE_EXP_T e; -#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ - dpe_double_words dw; - dw.d = DPE_MANT(x); - e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ - DPE_EXP(x) += e - 1022; - dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; - DPE_MANT(x) = dw.d; -#else /* portable code */ - double m = DPE_MANT(x); - DPE_MANT(x) = DPE_FREXP (m, &e); - DPE_EXP(x) += e; -#endif - } -} - -#if defined(DPE_USE_DOUBLE) -static const double dpe_scale_tab[54] = { - 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, - 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, - 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, - 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, - 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, - 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, - 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; -#endif - -DPE_INLINE DPE_DOUBLE -dpe_scale (DPE_DOUBLE d, int s) -{ - /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ -#if defined(DPE_USE_DOUBLE) - return d * dpe_scale_tab [-s]; -#else /* portable code */ - return DPE_LDEXP (d, s); -#endif -} - -/* set x to y */ -DPE_INLINE void -dpe_set_d (dpe_t x, double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ld (dpe_t x, long double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ui (dpe_t x, unsigned long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_si (dpe_t x, long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -DPE_INLINE long -dpe_get_si (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (long) d; -} - -DPE_INLINE unsigned long -dpe_get_ui (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (d < 0.0) ? 0 : (unsigned long) d; -} - -DPE_INLINE double -dpe_get_d (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -DPE_INLINE long double -dpe_get_ld (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -#if defined(__GMP_H__) || defined(__MINI_GMP_H__) -/* set x to y */ -DPE_INLINE void -dpe_set_z (dpe_t x, mpz_t y) -{ - long e; - DPE_MANT(x) = mpz_get_d_2exp (&e, y); - DPE_EXP(x) = (DPE_EXP_T) e; -} - -/* set x to y, rounded to nearest */ -DPE_INLINE void -dpe_get_z (mpz_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey >= DPE_BITSIZE) /* y is an integer */ - { - DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ - mpz_set_d (x, d); /* should be exact */ - mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); - } - else /* DPE_EXP(y) < DPE_BITSIZE */ - { - if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ - mpz_set_ui (x, 0); - else - { - DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); - mpz_set_d (x, (double) DPE_ROUND(d)); - } - } -} - -/* return e and x such that y = x*2^e */ -DPE_INLINE mp_exp_t -dpe_get_z_exp (mpz_t x, dpe_t y) -{ - mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); - return DPE_EXP(y) - DPE_BITSIZE; -} -#endif - -/* x <- y + z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_add (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y+z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_set (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y - z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_sub (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y-z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_neg (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y * z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_mul (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- sqrt(y), assuming y is normalized, returns x normalized */ -DPE_INLINE void -dpe_sqrt (dpe_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey % 2) - { - /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ - DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); - DPE_EXP(x) = (ey + 1) / 2; - } - else - { - DPE_MANT(x) = sqrt (DPE_MANT(y)); - DPE_EXP(x) = ey / 2; - } -} - -/* x <- y / z, assuming y and z are normalized, returns x normalized. - Assumes z is not zero. */ -DPE_INLINE void -dpe_div (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- y * z, assuming y normalized, returns x normalized */ -DPE_INLINE void -dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ -DPE_INLINE void -dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y * 2^e */ -DPE_INLINE void -dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; -} - -/* x <- y / 2^e */ -DPE_INLINE void -dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; -} - -/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' - type has fewer bits than the significand in dpe_t) */ -DPE_INLINE DPE_EXP_T -dpe_get_si_exp (long *x, dpe_t y) -{ - if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ - { - *x = (long) (DPE_MANT(y) * 2147483648.0); - return DPE_EXP(y) - 31; - } - else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ - { - *x = (long) (DPE_MANT (y) * 9223372036854775808.0); - return DPE_EXP(y) - 63; - } - else - { - fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); - exit (1); - } -} - -static DPE_UNUSED_ATTR int dpe_str_prec = 16; -static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; - -static int -dpe_out_str (FILE *s, int base, dpe_t x) -{ - DPE_DOUBLE d = DPE_MANT(x); - DPE_EXP_T e2 = DPE_EXP(x); - int e10 = 0; - char sign = ' '; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } - if (d == 0.0) -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%1.*f", dpe_str_prec, d); -#else - return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); -#endif - if (d < 0) - { - d = -d; - sign = '-'; - } - if (e2 > 0) - { - while (e2 > 0) - { - e2 --; - d *= 2.0; - if (d >= 10.0) - { - d /= 10.0; - e10 ++; - } - } - } - else /* e2 <= 0 */ - { - while (e2 < 0) - { - e2 ++; - d /= 2.0; - if (d < 1.0) - { - d *= 10.0; - e10 --; - } - } - } -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); -#else - return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); -#endif -} - -static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; - -static size_t -dpe_inp_str (dpe_t x, FILE *s, int base) -{ - size_t res; - DPE_DOUBLE d; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } -#ifdef DPE_USE_DOUBLE - res = fscanf (s, "%lf", &d); -#elif defined(DPE_USE_LONGDOUBLE) - res = fscanf (s, "%Lf", &d); -#else - { - long double d_ld; - res = fscanf (s, "%Lf", &d_ld); - d = d_ld; - } -#endif - dpe_set_d (x, d); - return res; -} - -DPE_INLINE void -dpe_dump (dpe_t x) -{ - dpe_out_str (stdout, 10, x); - putchar ('\n'); -} - -DPE_INLINE int -dpe_zero_p (dpe_t x) -{ - return DPE_MANT (x) == 0; -} - -/* return a positive value if x > y - a negative value if x < y - and 0 otherwise (x=y). */ -DPE_INLINE int -dpe_cmp (dpe_t x, dpe_t y) -{ - int sx = DPE_SIGN(x); - int d = sx - DPE_SIGN(y); - - if (d != 0) - return d; - else if (DPE_EXP(x) > DPE_EXP(y)) - return (sx > 0) ? 1 : -1; - else if (DPE_EXP(y) > DPE_EXP(x)) - return (sx > 0) ? -1 : 1; - else /* DPE_EXP(x) = DPE_EXP(y) */ - return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); -} - -DPE_INLINE int -dpe_cmp_d (dpe_t x, double d) -{ - dpe_t y; - dpe_set_d (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_ui (dpe_t x, unsigned long d) -{ - dpe_t y; - dpe_set_ui (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_si (dpe_t x, long d) -{ - dpe_t y; - dpe_set_si (y, d); - return dpe_cmp (x, y); -} - -/* set x to integer nearest to y */ -DPE_INLINE void -dpe_round (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) < 0) /* |y| < 1/2 */ - dpe_set_ui (x, 0); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_ROUND(d)); - } -} - -/* set x to the fractional part of y, defined as y - trunc(y), thus the - fractional part has absolute value in [0, 1), and same sign as y */ -DPE_INLINE void -dpe_frac (dpe_t x, dpe_t y) -{ - /* If |y| is smaller than 1, keep it */ - if (DPE_EXP(y) <= 0) - dpe_set (x, y); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set_ui (x, 0); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, d - DPE_TRUNC(d)); - } -} - -/* set x to largest integer <= y */ -DPE_INLINE void -dpe_floor (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ - dpe_set_ui (x, 0); - else /* -1 < y < 0 */ - dpe_set_si (x, -1); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_FLOOR(d)); - } -} - -/* set x to smallest integer >= y */ -DPE_INLINE void -dpe_ceil (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ - dpe_set_ui (x, 1); - else /* -1 < y <= 0 */ - dpe_set_si (x, 0); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_CEIL(d)); - } -} - -DPE_INLINE void -dpe_swap (dpe_t x, dpe_t y) -{ - DPE_EXP_T i = DPE_EXP (x); - DPE_DOUBLE d = DPE_MANT (x); - DPE_EXP (x) = DPE_EXP (y); - DPE_MANT (x) = DPE_MANT (y); - DPE_EXP (y) = i; - DPE_MANT (y) = d; -} - -#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c index 8c49b21d20..5491ee44d0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c @@ -2,47 +2,208 @@ #include "lll_internals.h" #include "internal.h" -#include "dpe.h" +#include +#include + // Access entry of symmetric matrix #define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) -void -quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +typedef struct fp_num { + double s; + int e; +} fp_num; + +static void +copy(fp_num *x, fp_num *r) { - dpe_t dpe_const_one, dpe_const_DELTABAR; + r->s = x->s; + r->e = x->e; +} - dpe_init(dpe_const_one); - dpe_set_ui(dpe_const_one, 1); +static void +normalize(fp_num *x) +{ + if (x->s == 0.0 || isfinite(x->s) == 0) { + if (x->s == 0.0) { + x->e = INT_MIN; + } + } else { + int e; + x->s = frexp(x->s, &e); + x->e += e; + } +} - dpe_init(dpe_const_DELTABAR); - dpe_set_d(dpe_const_DELTABAR, DELTABAR); +static void +to_one(fp_num *x) +{ + x->s = 1; + x->e = 0; +} - // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions - dpe_t r[4][4], u[4][4], lovasz[4]; - for (int i = 0; i < 4; i++) { - dpe_init(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_init(r[i][j]); - dpe_init(u[i][j]); - } +static void +to_deltabar(fp_num *x) +{ + x->s = DELTABAR; + x->e = 0; +} + +static void +to_etabar(fp_num *x) +{ + x->s = ETABAR; + x->e = 0; +} + +static void +from_mpz(const mpz_t x, fp_num *r) +{ + long exp = 0; + r->s = mpz_get_d_2exp(&exp, x); + r->e = exp; +} + +static void +to_mpz(const fp_num *x, mpz_t r) +{ + if (x->e >= DBL_MANT_DIG) { + double s = x->s * 0x1P53; + mpz_set_d(r, s); + mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + } else if (x->e < 0) { + mpz_set_ui(r, 0); + } else { + double s = ldexp(x->s, x->e); + mpz_set_d(r, round(s)); } +} - // threshold for swaps - dpe_t delta_bar; - dpe_init(delta_bar); - dpe_set_d(delta_bar, DELTABAR); +static void +fp_mul(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s * y->s; + r->e = x->e + y->e; + normalize(r); + +} + +static void +fp_div(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s / y->s; + r->e = x->e - y->e; + normalize(r); +} + +static void +fp_sub(const fp_num *x, const fp_num *y, fp_num *r) +{ + if (x->e > y->e + DBL_MANT_DIG) { + r->s = x->s; + r->e = x->e; + } else if (y->e > x->e + DBL_MANT_DIG) { + r->s = -y->s; + r->e = y->e; + } else { + int e = x->e - y->e; + + if (e >= 0) { + r->s = x->s - ldexp(y->s, -e); + r->e = x->e; + } else { + r->s = ldexp(x->s, e) - y->s; + r->e = y->e; + } + + normalize(r); + } +} + +static inline int +sign(const fp_num *x) +{ + if (x->s < 0.0) + return -1; + return 1; +} + +static int +fp_cmp(const fp_num *x, const fp_num *y) +{ + int sign_x = sign(x); + int sign_y = sign(y); + + if (sign_x != sign_y) + return sign_x - sign_y; + else if (x->e > y->e) + return sign_x; + else if (y->e > x->e) + return -sign_x; + else if (x->s > y->s) + return 1; + else if (x->s < y->s) + return -1; + else + return 0; +} + +static void +fp_round(fp_num *x) +{ + if (x->e < 0) { + x->s = 0; + x->e = 0; + } else if (x->e >= DBL_MANT_DIG) { + return; + } else { + double tmp; + tmp = ldexp(x->s, x->e); + x->s = round(tmp); + x->e = 0; + normalize(x); + } +} + +static void +fp_abs(const fp_num *x, fp_num *y) { + if (x->s < 0.0) { + y->s = -x->s; + } else { + y->s = x->s; + } + y->e = x->e; +} + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + fp_num const_one = {0}; + fp_num delta_bar = {0}; + fp_num eta_bar = {0}; + fp_num neg_eta_bar = {0}; + to_one(&const_one); + to_deltabar(&delta_bar); + eta_bar.s = ETABAR; + eta_bar.e = 0; + neg_eta_bar.s = -ETABAR; + neg_eta_bar.e = 0; + normalize(&eta_bar); + normalize(&neg_eta_bar); + + fp_num r[4][4] = {0}; + fp_num u[4][4] = {0}; + fp_num lovasz[4] = {0}; + + fp_num Xf = {0}; + fp_num tmpF = {0}; - // Other work variables - dpe_t Xf, tmpF; - dpe_init(Xf); - dpe_init(tmpF); ibz_t X, tmpI; ibz_init(&X); ibz_init(&tmpI); // Main L² loop - dpe_set_z(r[0][0], (*G)[0][0]); + from_mpz((*G)[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -52,23 +213,23 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - dpe_set_z(r[kappa][j], (*G)[kappa][j]); + from_mpz((*G)[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { - dpe_mul(tmpF, r[kappa][k], u[j][k]); - dpe_sub(r[kappa][j], r[kappa][j], tmpF); + fp_mul(&r[kappa][k], &u[j][k], &tmpF); + fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); } if (j < kappa) - dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + fp_div(&r[kappa][j], &r[j][j], &u[kappa][j]); } done = 1; // size reduce for (int i = kappa - 1; i >= 0; i--) { - if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + if (fp_cmp(&u[kappa][i], &eta_bar) > 0 || fp_cmp(&u[kappa][i], &neg_eta_bar) < 0) { done = 0; - dpe_set(Xf, u[kappa][i]); - dpe_round(Xf, Xf); - dpe_get_z(X, Xf); + copy(&u[kappa][i], &Xf); + fp_round(&Xf); + to_mpz(&Xf, X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { ibz_mul(&tmpI, &X, &(*basis)[j][i]); @@ -91,8 +252,8 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // // Update u[kappa][j] for (int j = 0; j < i; j++) { - dpe_mul(tmpF, Xf, u[i][j]); - dpe_sub(u[kappa][j], u[kappa][j], tmpF); + fp_mul(&Xf, &u[i][j], &tmpF); + fp_sub(&u[kappa][j], &tmpF, &u[kappa][j]); } } } @@ -100,16 +261,16 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + from_mpz((*G)[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { - dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); - dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); + fp_sub(&lovasz[i - 1], &tmpF, &lovasz[i]); } int swap; for (swap = kappa; swap > 0; swap--) { - dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); - if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + fp_mul(&delta_bar, &r[swap - 1][swap - 1], &tmpF); + if (fp_cmp(&tmpF, &lovasz[swap - 1]) < 0) break; } @@ -127,10 +288,10 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) } // Copy row u[κ] and r[κ] in swap position, ignore what follows for (int i = 0; i < swap; i++) { - dpe_set(u[swap][i], u[kappa][i]); - dpe_set(r[swap][i], r[kappa][i]); + copy(&u[kappa][i], &u[swap][i]); + copy(&r[kappa][i], &r[swap][i]); } - dpe_set(r[swap][swap], lovasz[swap]); + copy(&lovasz[swap], &r[swap][swap]); // swap complete kappa = swap; } @@ -142,15 +303,15 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check size-reducedness for (int i = 0; i < 4; i++) for (int j = 0; j < i; j++) { - dpe_abs(u[i][j], u[i][j]); - assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + fp_abs(&u[i][j], &u[i][j]); + assert(fp_cmp(&u[i][j], &eta_bar) <= 0); } // Check Lovasz' conditions for (int i = 1; i < 4; i++) { - dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); - dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); - dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); - assert(dpe_cmp(tmpF, r[i][i]) <= 0); + fp_mul(&u[i][i - 1], &u[i][i - 1], &tmpF); + fp_sub(&delta_bar, &tmpF, &tmpF); + fp_mul(&tmpF, &r[i - 1][i - 1], &tmpF); + assert(fp_cmp(&tmpF, &r[i][i]) <= 0); } #endif @@ -163,18 +324,6 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Clearinghouse ibz_finalize(&X); ibz_finalize(&tmpI); - dpe_clear(dpe_const_one); - dpe_clear(dpe_const_DELTABAR); - dpe_clear(Xf); - dpe_clear(tmpF); - dpe_clear(delta_bar); - for (int i = 0; i < 4; i++) { - dpe_clear(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_clear(r[i][j]); - dpe_clear(u[i][j]); - } - } } int diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/COPYING.LGPL deleted file mode 100644 index 0a041280bd..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/COPYING.LGPL +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dpe.h deleted file mode 100644 index b9a7a35e0b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dpe.h +++ /dev/null @@ -1,743 +0,0 @@ -/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. - -This file is part of the DPE Library. - -The DPE Library is free software; you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation; either version 3 of the License, or (at your -option) any later version. - -The DPE Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public -License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with the DPE Library; see the file COPYING.LIB. -If not, see . */ - -#ifndef __DPE -#define __DPE - -#include /* For abort */ -#include /* For fprintf */ -#include /* for round, floor, ceil */ -#include - -/* if you change the version, please change it in Makefile too */ -#define DPE_VERSION_MAJOR 1 -#define DPE_VERSION_MINOR 7 - -#if defined(__GNUC__) && (__GNUC__ >= 3) -# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) -# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) -# define DPE_UNUSED_ATTR __attribute__((unused)) -#else -# define DPE_LIKELY(x) (x) -# define DPE_UNLIKELY(x) (x) -# define DPE_UNUSED_ATTR -#endif - -/* If no user defined mode, define it to double */ -#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) -# define DPE_USE_DOUBLE -#endif - -#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) -# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." -#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#endif - -#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) -# define DPE_LITTLEENDIAN32 -#endif - -#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) -# define DPE_DEFINE_ROUND_TRUNC -#endif - -#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 -# define DPE_ISFINITE __builtin_isfinite -#elif defined(isfinite) -# define DPE_ISFINITE isfinite /* new C99 function */ -#else -# define DPE_ISFINITE finite /* obsolete BSD function */ -#endif - -/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ -/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with - 1/2 <= m < 1 */ -/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ -#if defined(DPE_USE_DOUBLE) -# define DPE_DOUBLE double /* mantissa type */ -# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ -# define DPE_2_POW_BITSIZE 0x1P53 -# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 -# define DPE_LDEXP __builtin_ldexp -# define DPE_FREXP __builtin_frexp -# define DPE_FLOOR __builtin_floor -# define DPE_CEIL __builtin_ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND __builtin_round -# define DPE_TRUNC __builtin_trunc -# endif -# else -# define DPE_LDEXP ldexp -# define DPE_FREXP frexp -# define DPE_FLOOR floor -# define DPE_CEIL ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND round -# define DPE_TRUNC trunc -# endif -# endif - -#elif defined(DPE_USE_LONGDOUBLE) -# define DPE_DOUBLE long double -# define DPE_BITSIZE 64 -# define DPE_2_POW_BITSIZE 0x1P64 -# define DPE_LDEXP ldexpl -# define DPE_FREXP frexpl -# define DPE_FLOOR floorl -# define DPE_CEIL ceill -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundl -# define DPE_TRUNC truncl -# endif - -#elif defined(DPE_USE_FLOAT128) -# include "quadmath.h" -# define DPE_DOUBLE __float128 -# define DPE_BITSIZE 113 -# define DPE_2_POW_BITSIZE 0x1P113 -# define DPE_LDEXP ldexpq -# define DPE_FLOOR floorq -# define DPE_CEIL ceilq -# define DPE_FREXP frexpq -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundq -# define DPE_TRUNC truncq -# endif - -#else -# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" -#endif - -/* If no C99, do what we can */ -#ifndef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) -# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) -#endif - -#if defined(DPE_USE_LONG) -# define DPE_EXP_T long /* exponent type */ -# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ -#elif defined(DPE_USE_LONGLONG) -# define DPE_EXP_T long long -# define DPE_EXPMIN LLONG_MIN -#else -# define DPE_EXP_T int /* exponent type */ -# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ -#endif - -#ifdef DPE_LITTLEENDIAN32 -typedef union -{ - double d; -#if INT_MAX == 0x7FFFFFFFL - int i[2]; -#elif LONG_MAX == 0x7FFFFFFFL - long i[2]; -#elif SHRT_MAX == 0x7FFFFFFFL - short i[2]; -#else -# error Cannot find a 32 bits integer type. -#endif -} dpe_double_words; -#endif - -typedef struct -{ - DPE_DOUBLE d; /* significand */ - DPE_EXP_T exp; /* exponent */ -} dpe_struct; - -typedef dpe_struct dpe_t[1]; - -#define DPE_MANT(x) ((x)->d) -#define DPE_EXP(x) ((x)->exp) -#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) - -#define DPE_INLINE static inline - -/* initialize */ -DPE_INLINE void -dpe_init (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* clear */ -DPE_INLINE void -dpe_clear (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* set x to y */ -DPE_INLINE void -dpe_set (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to -y */ -DPE_INLINE void -dpe_neg (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to |y| */ -DPE_INLINE void -dpe_abs (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ -/* FIXME: don't inline this function yet ? */ -static void -dpe_normalize (dpe_t x) -{ - if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) - { - if (DPE_MANT(x) == 0.0) - DPE_EXP(x) = DPE_EXPMIN; - /* otherwise let the exponent of NaN, Inf unchanged */ - } - else - { - DPE_EXP_T e; -#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ - dpe_double_words dw; - dw.d = DPE_MANT(x); - e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ - DPE_EXP(x) += e - 1022; - dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; - DPE_MANT(x) = dw.d; -#else /* portable code */ - double m = DPE_MANT(x); - DPE_MANT(x) = DPE_FREXP (m, &e); - DPE_EXP(x) += e; -#endif - } -} - -#if defined(DPE_USE_DOUBLE) -static const double dpe_scale_tab[54] = { - 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, - 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, - 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, - 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, - 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, - 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, - 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; -#endif - -DPE_INLINE DPE_DOUBLE -dpe_scale (DPE_DOUBLE d, int s) -{ - /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ -#if defined(DPE_USE_DOUBLE) - return d * dpe_scale_tab [-s]; -#else /* portable code */ - return DPE_LDEXP (d, s); -#endif -} - -/* set x to y */ -DPE_INLINE void -dpe_set_d (dpe_t x, double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ld (dpe_t x, long double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ui (dpe_t x, unsigned long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_si (dpe_t x, long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -DPE_INLINE long -dpe_get_si (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (long) d; -} - -DPE_INLINE unsigned long -dpe_get_ui (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (d < 0.0) ? 0 : (unsigned long) d; -} - -DPE_INLINE double -dpe_get_d (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -DPE_INLINE long double -dpe_get_ld (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -#if defined(__GMP_H__) || defined(__MINI_GMP_H__) -/* set x to y */ -DPE_INLINE void -dpe_set_z (dpe_t x, mpz_t y) -{ - long e; - DPE_MANT(x) = mpz_get_d_2exp (&e, y); - DPE_EXP(x) = (DPE_EXP_T) e; -} - -/* set x to y, rounded to nearest */ -DPE_INLINE void -dpe_get_z (mpz_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey >= DPE_BITSIZE) /* y is an integer */ - { - DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ - mpz_set_d (x, d); /* should be exact */ - mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); - } - else /* DPE_EXP(y) < DPE_BITSIZE */ - { - if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ - mpz_set_ui (x, 0); - else - { - DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); - mpz_set_d (x, (double) DPE_ROUND(d)); - } - } -} - -/* return e and x such that y = x*2^e */ -DPE_INLINE mp_exp_t -dpe_get_z_exp (mpz_t x, dpe_t y) -{ - mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); - return DPE_EXP(y) - DPE_BITSIZE; -} -#endif - -/* x <- y + z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_add (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y+z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_set (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y - z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_sub (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y-z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_neg (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y * z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_mul (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- sqrt(y), assuming y is normalized, returns x normalized */ -DPE_INLINE void -dpe_sqrt (dpe_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey % 2) - { - /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ - DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); - DPE_EXP(x) = (ey + 1) / 2; - } - else - { - DPE_MANT(x) = sqrt (DPE_MANT(y)); - DPE_EXP(x) = ey / 2; - } -} - -/* x <- y / z, assuming y and z are normalized, returns x normalized. - Assumes z is not zero. */ -DPE_INLINE void -dpe_div (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- y * z, assuming y normalized, returns x normalized */ -DPE_INLINE void -dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ -DPE_INLINE void -dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y * 2^e */ -DPE_INLINE void -dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; -} - -/* x <- y / 2^e */ -DPE_INLINE void -dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; -} - -/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' - type has fewer bits than the significand in dpe_t) */ -DPE_INLINE DPE_EXP_T -dpe_get_si_exp (long *x, dpe_t y) -{ - if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ - { - *x = (long) (DPE_MANT(y) * 2147483648.0); - return DPE_EXP(y) - 31; - } - else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ - { - *x = (long) (DPE_MANT (y) * 9223372036854775808.0); - return DPE_EXP(y) - 63; - } - else - { - fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); - exit (1); - } -} - -static DPE_UNUSED_ATTR int dpe_str_prec = 16; -static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; - -static int -dpe_out_str (FILE *s, int base, dpe_t x) -{ - DPE_DOUBLE d = DPE_MANT(x); - DPE_EXP_T e2 = DPE_EXP(x); - int e10 = 0; - char sign = ' '; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } - if (d == 0.0) -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%1.*f", dpe_str_prec, d); -#else - return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); -#endif - if (d < 0) - { - d = -d; - sign = '-'; - } - if (e2 > 0) - { - while (e2 > 0) - { - e2 --; - d *= 2.0; - if (d >= 10.0) - { - d /= 10.0; - e10 ++; - } - } - } - else /* e2 <= 0 */ - { - while (e2 < 0) - { - e2 ++; - d /= 2.0; - if (d < 1.0) - { - d *= 10.0; - e10 --; - } - } - } -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); -#else - return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); -#endif -} - -static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; - -static size_t -dpe_inp_str (dpe_t x, FILE *s, int base) -{ - size_t res; - DPE_DOUBLE d; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } -#ifdef DPE_USE_DOUBLE - res = fscanf (s, "%lf", &d); -#elif defined(DPE_USE_LONGDOUBLE) - res = fscanf (s, "%Lf", &d); -#else - { - long double d_ld; - res = fscanf (s, "%Lf", &d_ld); - d = d_ld; - } -#endif - dpe_set_d (x, d); - return res; -} - -DPE_INLINE void -dpe_dump (dpe_t x) -{ - dpe_out_str (stdout, 10, x); - putchar ('\n'); -} - -DPE_INLINE int -dpe_zero_p (dpe_t x) -{ - return DPE_MANT (x) == 0; -} - -/* return a positive value if x > y - a negative value if x < y - and 0 otherwise (x=y). */ -DPE_INLINE int -dpe_cmp (dpe_t x, dpe_t y) -{ - int sx = DPE_SIGN(x); - int d = sx - DPE_SIGN(y); - - if (d != 0) - return d; - else if (DPE_EXP(x) > DPE_EXP(y)) - return (sx > 0) ? 1 : -1; - else if (DPE_EXP(y) > DPE_EXP(x)) - return (sx > 0) ? -1 : 1; - else /* DPE_EXP(x) = DPE_EXP(y) */ - return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); -} - -DPE_INLINE int -dpe_cmp_d (dpe_t x, double d) -{ - dpe_t y; - dpe_set_d (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_ui (dpe_t x, unsigned long d) -{ - dpe_t y; - dpe_set_ui (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_si (dpe_t x, long d) -{ - dpe_t y; - dpe_set_si (y, d); - return dpe_cmp (x, y); -} - -/* set x to integer nearest to y */ -DPE_INLINE void -dpe_round (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) < 0) /* |y| < 1/2 */ - dpe_set_ui (x, 0); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_ROUND(d)); - } -} - -/* set x to the fractional part of y, defined as y - trunc(y), thus the - fractional part has absolute value in [0, 1), and same sign as y */ -DPE_INLINE void -dpe_frac (dpe_t x, dpe_t y) -{ - /* If |y| is smaller than 1, keep it */ - if (DPE_EXP(y) <= 0) - dpe_set (x, y); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set_ui (x, 0); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, d - DPE_TRUNC(d)); - } -} - -/* set x to largest integer <= y */ -DPE_INLINE void -dpe_floor (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ - dpe_set_ui (x, 0); - else /* -1 < y < 0 */ - dpe_set_si (x, -1); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_FLOOR(d)); - } -} - -/* set x to smallest integer >= y */ -DPE_INLINE void -dpe_ceil (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ - dpe_set_ui (x, 1); - else /* -1 < y <= 0 */ - dpe_set_si (x, 0); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_CEIL(d)); - } -} - -DPE_INLINE void -dpe_swap (dpe_t x, dpe_t y) -{ - DPE_EXP_T i = DPE_EXP (x); - DPE_DOUBLE d = DPE_MANT (x); - DPE_EXP (x) = DPE_EXP (y); - DPE_MANT (x) = DPE_MANT (y); - DPE_EXP (y) = i; - DPE_MANT (y) = d; -} - -#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c index 8c49b21d20..5491ee44d0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c @@ -2,47 +2,208 @@ #include "lll_internals.h" #include "internal.h" -#include "dpe.h" +#include +#include + // Access entry of symmetric matrix #define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) -void -quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +typedef struct fp_num { + double s; + int e; +} fp_num; + +static void +copy(fp_num *x, fp_num *r) { - dpe_t dpe_const_one, dpe_const_DELTABAR; + r->s = x->s; + r->e = x->e; +} - dpe_init(dpe_const_one); - dpe_set_ui(dpe_const_one, 1); +static void +normalize(fp_num *x) +{ + if (x->s == 0.0 || isfinite(x->s) == 0) { + if (x->s == 0.0) { + x->e = INT_MIN; + } + } else { + int e; + x->s = frexp(x->s, &e); + x->e += e; + } +} - dpe_init(dpe_const_DELTABAR); - dpe_set_d(dpe_const_DELTABAR, DELTABAR); +static void +to_one(fp_num *x) +{ + x->s = 1; + x->e = 0; +} - // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions - dpe_t r[4][4], u[4][4], lovasz[4]; - for (int i = 0; i < 4; i++) { - dpe_init(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_init(r[i][j]); - dpe_init(u[i][j]); - } +static void +to_deltabar(fp_num *x) +{ + x->s = DELTABAR; + x->e = 0; +} + +static void +to_etabar(fp_num *x) +{ + x->s = ETABAR; + x->e = 0; +} + +static void +from_mpz(const mpz_t x, fp_num *r) +{ + long exp = 0; + r->s = mpz_get_d_2exp(&exp, x); + r->e = exp; +} + +static void +to_mpz(const fp_num *x, mpz_t r) +{ + if (x->e >= DBL_MANT_DIG) { + double s = x->s * 0x1P53; + mpz_set_d(r, s); + mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + } else if (x->e < 0) { + mpz_set_ui(r, 0); + } else { + double s = ldexp(x->s, x->e); + mpz_set_d(r, round(s)); } +} - // threshold for swaps - dpe_t delta_bar; - dpe_init(delta_bar); - dpe_set_d(delta_bar, DELTABAR); +static void +fp_mul(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s * y->s; + r->e = x->e + y->e; + normalize(r); + +} + +static void +fp_div(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s / y->s; + r->e = x->e - y->e; + normalize(r); +} + +static void +fp_sub(const fp_num *x, const fp_num *y, fp_num *r) +{ + if (x->e > y->e + DBL_MANT_DIG) { + r->s = x->s; + r->e = x->e; + } else if (y->e > x->e + DBL_MANT_DIG) { + r->s = -y->s; + r->e = y->e; + } else { + int e = x->e - y->e; + + if (e >= 0) { + r->s = x->s - ldexp(y->s, -e); + r->e = x->e; + } else { + r->s = ldexp(x->s, e) - y->s; + r->e = y->e; + } + + normalize(r); + } +} + +static inline int +sign(const fp_num *x) +{ + if (x->s < 0.0) + return -1; + return 1; +} + +static int +fp_cmp(const fp_num *x, const fp_num *y) +{ + int sign_x = sign(x); + int sign_y = sign(y); + + if (sign_x != sign_y) + return sign_x - sign_y; + else if (x->e > y->e) + return sign_x; + else if (y->e > x->e) + return -sign_x; + else if (x->s > y->s) + return 1; + else if (x->s < y->s) + return -1; + else + return 0; +} + +static void +fp_round(fp_num *x) +{ + if (x->e < 0) { + x->s = 0; + x->e = 0; + } else if (x->e >= DBL_MANT_DIG) { + return; + } else { + double tmp; + tmp = ldexp(x->s, x->e); + x->s = round(tmp); + x->e = 0; + normalize(x); + } +} + +static void +fp_abs(const fp_num *x, fp_num *y) { + if (x->s < 0.0) { + y->s = -x->s; + } else { + y->s = x->s; + } + y->e = x->e; +} + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + fp_num const_one = {0}; + fp_num delta_bar = {0}; + fp_num eta_bar = {0}; + fp_num neg_eta_bar = {0}; + to_one(&const_one); + to_deltabar(&delta_bar); + eta_bar.s = ETABAR; + eta_bar.e = 0; + neg_eta_bar.s = -ETABAR; + neg_eta_bar.e = 0; + normalize(&eta_bar); + normalize(&neg_eta_bar); + + fp_num r[4][4] = {0}; + fp_num u[4][4] = {0}; + fp_num lovasz[4] = {0}; + + fp_num Xf = {0}; + fp_num tmpF = {0}; - // Other work variables - dpe_t Xf, tmpF; - dpe_init(Xf); - dpe_init(tmpF); ibz_t X, tmpI; ibz_init(&X); ibz_init(&tmpI); // Main L² loop - dpe_set_z(r[0][0], (*G)[0][0]); + from_mpz((*G)[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -52,23 +213,23 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - dpe_set_z(r[kappa][j], (*G)[kappa][j]); + from_mpz((*G)[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { - dpe_mul(tmpF, r[kappa][k], u[j][k]); - dpe_sub(r[kappa][j], r[kappa][j], tmpF); + fp_mul(&r[kappa][k], &u[j][k], &tmpF); + fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); } if (j < kappa) - dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + fp_div(&r[kappa][j], &r[j][j], &u[kappa][j]); } done = 1; // size reduce for (int i = kappa - 1; i >= 0; i--) { - if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + if (fp_cmp(&u[kappa][i], &eta_bar) > 0 || fp_cmp(&u[kappa][i], &neg_eta_bar) < 0) { done = 0; - dpe_set(Xf, u[kappa][i]); - dpe_round(Xf, Xf); - dpe_get_z(X, Xf); + copy(&u[kappa][i], &Xf); + fp_round(&Xf); + to_mpz(&Xf, X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { ibz_mul(&tmpI, &X, &(*basis)[j][i]); @@ -91,8 +252,8 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // // Update u[kappa][j] for (int j = 0; j < i; j++) { - dpe_mul(tmpF, Xf, u[i][j]); - dpe_sub(u[kappa][j], u[kappa][j], tmpF); + fp_mul(&Xf, &u[i][j], &tmpF); + fp_sub(&u[kappa][j], &tmpF, &u[kappa][j]); } } } @@ -100,16 +261,16 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + from_mpz((*G)[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { - dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); - dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); + fp_sub(&lovasz[i - 1], &tmpF, &lovasz[i]); } int swap; for (swap = kappa; swap > 0; swap--) { - dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); - if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + fp_mul(&delta_bar, &r[swap - 1][swap - 1], &tmpF); + if (fp_cmp(&tmpF, &lovasz[swap - 1]) < 0) break; } @@ -127,10 +288,10 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) } // Copy row u[κ] and r[κ] in swap position, ignore what follows for (int i = 0; i < swap; i++) { - dpe_set(u[swap][i], u[kappa][i]); - dpe_set(r[swap][i], r[kappa][i]); + copy(&u[kappa][i], &u[swap][i]); + copy(&r[kappa][i], &r[swap][i]); } - dpe_set(r[swap][swap], lovasz[swap]); + copy(&lovasz[swap], &r[swap][swap]); // swap complete kappa = swap; } @@ -142,15 +303,15 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check size-reducedness for (int i = 0; i < 4; i++) for (int j = 0; j < i; j++) { - dpe_abs(u[i][j], u[i][j]); - assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + fp_abs(&u[i][j], &u[i][j]); + assert(fp_cmp(&u[i][j], &eta_bar) <= 0); } // Check Lovasz' conditions for (int i = 1; i < 4; i++) { - dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); - dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); - dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); - assert(dpe_cmp(tmpF, r[i][i]) <= 0); + fp_mul(&u[i][i - 1], &u[i][i - 1], &tmpF); + fp_sub(&delta_bar, &tmpF, &tmpF); + fp_mul(&tmpF, &r[i - 1][i - 1], &tmpF); + assert(fp_cmp(&tmpF, &r[i][i]) <= 0); } #endif @@ -163,18 +324,6 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Clearinghouse ibz_finalize(&X); ibz_finalize(&tmpI); - dpe_clear(dpe_const_one); - dpe_clear(dpe_const_DELTABAR); - dpe_clear(Xf); - dpe_clear(tmpF); - dpe_clear(delta_bar); - for (int i = 0; i < 4; i++) { - dpe_clear(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_clear(r[i][j]); - dpe_clear(u[i][j]); - } - } } int diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/COPYING.LGPL b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/COPYING.LGPL deleted file mode 100644 index 0a041280bd..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/COPYING.LGPL +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dpe.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dpe.h deleted file mode 100644 index b9a7a35e0b..0000000000 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dpe.h +++ /dev/null @@ -1,743 +0,0 @@ -/* Copyright (C) 2004-2024 Patrick Pelissier, Paul Zimmermann, LORIA/INRIA. - -This file is part of the DPE Library. - -The DPE Library is free software; you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation; either version 3 of the License, or (at your -option) any later version. - -The DPE Library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public -License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with the DPE Library; see the file COPYING.LIB. -If not, see . */ - -#ifndef __DPE -#define __DPE - -#include /* For abort */ -#include /* For fprintf */ -#include /* for round, floor, ceil */ -#include - -/* if you change the version, please change it in Makefile too */ -#define DPE_VERSION_MAJOR 1 -#define DPE_VERSION_MINOR 7 - -#if defined(__GNUC__) && (__GNUC__ >= 3) -# define DPE_LIKELY(x) (__builtin_expect(!!(x),1)) -# define DPE_UNLIKELY(x) (__builtin_expect((x),0)) -# define DPE_UNUSED_ATTR __attribute__((unused)) -#else -# define DPE_LIKELY(x) (x) -# define DPE_UNLIKELY(x) (x) -# define DPE_UNUSED_ATTR -#endif - -/* If no user defined mode, define it to double */ -#if !defined(DPE_USE_DOUBLE) && !defined(DPE_USE_LONGDOUBLE) && !defined(DPE_USE_FLOAT128) -# define DPE_USE_DOUBLE -#endif - -#if defined(DPE_USE_DOUBLE) && defined(DPE_USE_LONGDOUBLE) -# error "Either DPE_USE_DOUBLE or DPE_USE_LONGDOUBLE shall be defined." -#elif defined(DPE_USE_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#elif defined(DPE_USE_LONG_DOUBLE) && defined(DPE_USE_USE_FLOAT128) -# error "Either DPE_USE_LONG_DOUBLE or DPE_USE_FLOAT128 shall be defined." -#endif - -#if (defined(__i386) || defined (__x86_64)) && !defined(DPE_LITTLEENDIAN32) && defined(DPE_USE_DOUBLE) -# define DPE_LITTLEENDIAN32 -#endif - -#if (defined(__STDC_VERSION__) && (__STDC_VERSION__>=199901L)) || defined (__GLIBC__) -# define DPE_DEFINE_ROUND_TRUNC -#endif - -#if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 43 -# define DPE_ISFINITE __builtin_isfinite -#elif defined(isfinite) -# define DPE_ISFINITE isfinite /* new C99 function */ -#else -# define DPE_ISFINITE finite /* obsolete BSD function */ -#endif - -/* DPE_LDEXP(DPE_DOUBLE m, DPEEXP e) return x = m * 2^e */ -/* DPE_FREXP(DPE_DOUBLE x, DPEEXP *e) returns m, e such that x = m * 2^e with - 1/2 <= m < 1 */ -/* DPE_ROUND(DPE_DOUBLE x) returns the nearest integer to x */ -#if defined(DPE_USE_DOUBLE) -# define DPE_DOUBLE double /* mantissa type */ -# define DPE_BITSIZE 53 /* bitsize of DPE_DOUBLE */ -# define DPE_2_POW_BITSIZE 0x1P53 -# if defined(__GNUC__) && (__GNUC__ *10 + __GNUC_MINOR__) >= 40 -# define DPE_LDEXP __builtin_ldexp -# define DPE_FREXP __builtin_frexp -# define DPE_FLOOR __builtin_floor -# define DPE_CEIL __builtin_ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND __builtin_round -# define DPE_TRUNC __builtin_trunc -# endif -# else -# define DPE_LDEXP ldexp -# define DPE_FREXP frexp -# define DPE_FLOOR floor -# define DPE_CEIL ceil -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND round -# define DPE_TRUNC trunc -# endif -# endif - -#elif defined(DPE_USE_LONGDOUBLE) -# define DPE_DOUBLE long double -# define DPE_BITSIZE 64 -# define DPE_2_POW_BITSIZE 0x1P64 -# define DPE_LDEXP ldexpl -# define DPE_FREXP frexpl -# define DPE_FLOOR floorl -# define DPE_CEIL ceill -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundl -# define DPE_TRUNC truncl -# endif - -#elif defined(DPE_USE_FLOAT128) -# include "quadmath.h" -# define DPE_DOUBLE __float128 -# define DPE_BITSIZE 113 -# define DPE_2_POW_BITSIZE 0x1P113 -# define DPE_LDEXP ldexpq -# define DPE_FLOOR floorq -# define DPE_CEIL ceilq -# define DPE_FREXP frexpq -# ifdef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND roundq -# define DPE_TRUNC truncq -# endif - -#else -# error "neither DPE_USE_DOUBLE, nor DPE_USE_LONGDOUBLE, nor DPE_USE_FLOAT128 is defined" -#endif - -/* If no C99, do what we can */ -#ifndef DPE_DEFINE_ROUND_TRUNC -# define DPE_ROUND(x) ((DPE_DOUBLE) ((long long) ((x) + ((x) >= 0.0 ? 0.5 : -0.5)))) -# define DPE_TRUNC(x) ((DPE_DOUBLE) ((long long) ((x) + 0.0))) -#endif - -#if defined(DPE_USE_LONG) -# define DPE_EXP_T long /* exponent type */ -# define DPE_EXPMIN LONG_MIN /* smallest possible exponent */ -#elif defined(DPE_USE_LONGLONG) -# define DPE_EXP_T long long -# define DPE_EXPMIN LLONG_MIN -#else -# define DPE_EXP_T int /* exponent type */ -# define DPE_EXPMIN INT_MIN /* smallest possible exponent */ -#endif - -#ifdef DPE_LITTLEENDIAN32 -typedef union -{ - double d; -#if INT_MAX == 0x7FFFFFFFL - int i[2]; -#elif LONG_MAX == 0x7FFFFFFFL - long i[2]; -#elif SHRT_MAX == 0x7FFFFFFFL - short i[2]; -#else -# error Cannot find a 32 bits integer type. -#endif -} dpe_double_words; -#endif - -typedef struct -{ - DPE_DOUBLE d; /* significand */ - DPE_EXP_T exp; /* exponent */ -} dpe_struct; - -typedef dpe_struct dpe_t[1]; - -#define DPE_MANT(x) ((x)->d) -#define DPE_EXP(x) ((x)->exp) -#define DPE_SIGN(x) ((DPE_MANT(x) < 0.0) ? -1 : (DPE_MANT(x) > 0.0)) - -#define DPE_INLINE static inline - -/* initialize */ -DPE_INLINE void -dpe_init (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* clear */ -DPE_INLINE void -dpe_clear (dpe_t x DPE_UNUSED_ATTR) -{ -} - -/* set x to y */ -DPE_INLINE void -dpe_set (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to -y */ -DPE_INLINE void -dpe_neg (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set x to |y| */ -DPE_INLINE void -dpe_abs (dpe_t x, dpe_t y) -{ - DPE_MANT(x) = (DPE_MANT(y) >= 0) ? DPE_MANT(y) : -DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y); -} - -/* set mantissa in [1/2, 1), except for 0 which has minimum exponent */ -/* FIXME: don't inline this function yet ? */ -static void -dpe_normalize (dpe_t x) -{ - if (DPE_UNLIKELY (DPE_MANT(x) == 0.0 || DPE_ISFINITE (DPE_MANT(x)) == 0)) - { - if (DPE_MANT(x) == 0.0) - DPE_EXP(x) = DPE_EXPMIN; - /* otherwise let the exponent of NaN, Inf unchanged */ - } - else - { - DPE_EXP_T e; -#ifdef DPE_LITTLEENDIAN32 /* 32-bit little endian */ - dpe_double_words dw; - dw.d = DPE_MANT(x); - e = (dw.i[1] >> 20) & 0x7FF; /* unbiased exponent, 1022 for m=1/2 */ - DPE_EXP(x) += e - 1022; - dw.i[1] = (dw.i[1] & 0x800FFFFF) | 0x3FE00000; - DPE_MANT(x) = dw.d; -#else /* portable code */ - double m = DPE_MANT(x); - DPE_MANT(x) = DPE_FREXP (m, &e); - DPE_EXP(x) += e; -#endif - } -} - -#if defined(DPE_USE_DOUBLE) -static const double dpe_scale_tab[54] = { - 0x1P0, 0x1P-1, 0x1P-2, 0x1P-3, 0x1P-4, 0x1P-5, 0x1P-6, 0x1P-7, 0x1P-8, - 0x1P-9, 0x1P-10, 0x1P-11, 0x1P-12, 0x1P-13, 0x1P-14, 0x1P-15, 0x1P-16, - 0x1P-17, 0x1P-18, 0x1P-19, 0x1P-20, 0x1P-21, 0x1P-22, 0x1P-23, 0x1P-24, - 0x1P-25, 0x1P-26, 0x1P-27, 0x1P-28, 0x1P-29, 0x1P-30, 0x1P-31, 0x1P-32, - 0x1P-33, 0x1P-34, 0x1P-35, 0x1P-36, 0x1P-37, 0x1P-38, 0x1P-39, 0x1P-40, - 0x1P-41, 0x1P-42, 0x1P-43, 0x1P-44, 0x1P-45, 0x1P-46, 0x1P-47, 0x1P-48, - 0x1P-49, 0x1P-50, 0x1P-51, 0x1P-52, 0x1P-53}; -#endif - -DPE_INLINE DPE_DOUBLE -dpe_scale (DPE_DOUBLE d, int s) -{ - /* -DPE_BITSIZE < s <= 0 and 1/2 <= d < 1 */ -#if defined(DPE_USE_DOUBLE) - return d * dpe_scale_tab [-s]; -#else /* portable code */ - return DPE_LDEXP (d, s); -#endif -} - -/* set x to y */ -DPE_INLINE void -dpe_set_d (dpe_t x, double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ld (dpe_t x, long double y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_ui (dpe_t x, unsigned long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -/* set x to y */ -DPE_INLINE void -dpe_set_si (dpe_t x, long y) -{ - DPE_MANT(x) = (DPE_DOUBLE) y; - DPE_EXP(x) = 0; - dpe_normalize (x); -} - -DPE_INLINE long -dpe_get_si (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (long) d; -} - -DPE_INLINE unsigned long -dpe_get_ui (dpe_t x) -{ - DPE_DOUBLE d = DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); - return (d < 0.0) ? 0 : (unsigned long) d; -} - -DPE_INLINE double -dpe_get_d (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -DPE_INLINE long double -dpe_get_ld (dpe_t x) -{ - return DPE_LDEXP (DPE_MANT (x), DPE_EXP (x)); -} - -#if defined(__GMP_H__) || defined(__MINI_GMP_H__) -/* set x to y */ -DPE_INLINE void -dpe_set_z (dpe_t x, mpz_t y) -{ - long e; - DPE_MANT(x) = mpz_get_d_2exp (&e, y); - DPE_EXP(x) = (DPE_EXP_T) e; -} - -/* set x to y, rounded to nearest */ -DPE_INLINE void -dpe_get_z (mpz_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey >= DPE_BITSIZE) /* y is an integer */ - { - DPE_DOUBLE d = DPE_MANT(y) * DPE_2_POW_BITSIZE; /* d is an integer */ - mpz_set_d (x, d); /* should be exact */ - mpz_mul_2exp (x, x, (unsigned long) ey - DPE_BITSIZE); - } - else /* DPE_EXP(y) < DPE_BITSIZE */ - { - if (DPE_UNLIKELY (ey < 0)) /* |y| < 1/2 */ - mpz_set_ui (x, 0); - else - { - DPE_DOUBLE d = DPE_LDEXP(DPE_MANT(y), ey); - mpz_set_d (x, (double) DPE_ROUND(d)); - } - } -} - -/* return e and x such that y = x*2^e */ -DPE_INLINE mp_exp_t -dpe_get_z_exp (mpz_t x, dpe_t y) -{ - mpz_set_d (x, DPE_MANT (y) * DPE_2_POW_BITSIZE); - return DPE_EXP(y) - DPE_BITSIZE; -} -#endif - -/* x <- y + z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_add (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y+z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_set (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) + dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = DPE_MANT(z) + dpe_scale (DPE_MANT(y), d); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y - z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_sub (dpe_t x, dpe_t y, dpe_t z) -{ - if (DPE_UNLIKELY (DPE_EXP(y) > DPE_EXP(z) + DPE_BITSIZE)) - /* |z| < 1/2*ulp(y), thus o(y-z) = y */ - dpe_set (x, y); - else if (DPE_UNLIKELY (DPE_EXP(z) > DPE_EXP(y) + DPE_BITSIZE)) - dpe_neg (x, z); - else - { - DPE_EXP_T d = DPE_EXP(y) - DPE_EXP(z); /* |d| <= DPE_BITSIZE */ - - if (d >= 0) - { - DPE_MANT(x) = DPE_MANT(y) - dpe_scale (DPE_MANT(z), -d); - DPE_EXP(x) = DPE_EXP(y); - } - else - { - DPE_MANT(x) = dpe_scale (DPE_MANT(y), d) - DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(z); - } - dpe_normalize (x); - } -} - -/* x <- y * z, assuming y and z are normalized, returns x normalized */ -DPE_INLINE void -dpe_mul (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) * DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) + DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- sqrt(y), assuming y is normalized, returns x normalized */ -DPE_INLINE void -dpe_sqrt (dpe_t x, dpe_t y) -{ - DPE_EXP_T ey = DPE_EXP(y); - if (ey % 2) - { - /* since 1/2 <= my < 1, 1/4 <= my/2 < 1 */ - DPE_MANT(x) = sqrt (0.5 * DPE_MANT(y)); - DPE_EXP(x) = (ey + 1) / 2; - } - else - { - DPE_MANT(x) = sqrt (DPE_MANT(y)); - DPE_EXP(x) = ey / 2; - } -} - -/* x <- y / z, assuming y and z are normalized, returns x normalized. - Assumes z is not zero. */ -DPE_INLINE void -dpe_div (dpe_t x, dpe_t y, dpe_t z) -{ - DPE_MANT(x) = DPE_MANT(y) / DPE_MANT(z); - DPE_EXP(x) = DPE_EXP(y) - DPE_EXP(z); - dpe_normalize (x); -} - -/* x <- y * z, assuming y normalized, returns x normalized */ -DPE_INLINE void -dpe_mul_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) * (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y / z, assuming y normalized, z non-zero, returns x normalized */ -DPE_INLINE void -dpe_div_ui (dpe_t x, dpe_t y, unsigned long z) -{ - DPE_MANT(x) = DPE_MANT(y) / (DPE_DOUBLE) z; - DPE_EXP(x) = DPE_EXP(y); - dpe_normalize (x); -} - -/* x <- y * 2^e */ -DPE_INLINE void -dpe_mul_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) + (DPE_EXP_T) e; -} - -/* x <- y / 2^e */ -DPE_INLINE void -dpe_div_2exp (dpe_t x, dpe_t y, unsigned long e) -{ - DPE_MANT(x) = DPE_MANT(y); - DPE_EXP(x) = DPE_EXP(y) - (DPE_EXP_T) e; -} - -/* return e and x such that y = x*2^e (equality is not guaranteed if the 'long' - type has fewer bits than the significand in dpe_t) */ -DPE_INLINE DPE_EXP_T -dpe_get_si_exp (long *x, dpe_t y) -{ - if (sizeof(long) == 4) /* 32-bit word: long has 31 bits */ - { - *x = (long) (DPE_MANT(y) * 2147483648.0); - return DPE_EXP(y) - 31; - } - else if (sizeof(long) == 8) /* 64-bit word: long has 63 bits */ - { - *x = (long) (DPE_MANT (y) * 9223372036854775808.0); - return DPE_EXP(y) - 63; - } - else - { - fprintf (stderr, "Error, neither 32-bit nor 64-bit word\n"); - exit (1); - } -} - -static DPE_UNUSED_ATTR int dpe_str_prec = 16; -static int dpe_out_str (FILE *s, int base, dpe_t x) DPE_UNUSED_ATTR; - -static int -dpe_out_str (FILE *s, int base, dpe_t x) -{ - DPE_DOUBLE d = DPE_MANT(x); - DPE_EXP_T e2 = DPE_EXP(x); - int e10 = 0; - char sign = ' '; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } - if (d == 0.0) -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%1.*f", dpe_str_prec, d); -#else - return fprintf (s, "%1.*Lf", dpe_str_prec, (long double) d); -#endif - if (d < 0) - { - d = -d; - sign = '-'; - } - if (e2 > 0) - { - while (e2 > 0) - { - e2 --; - d *= 2.0; - if (d >= 10.0) - { - d /= 10.0; - e10 ++; - } - } - } - else /* e2 <= 0 */ - { - while (e2 < 0) - { - e2 ++; - d /= 2.0; - if (d < 1.0) - { - d *= 10.0; - e10 --; - } - } - } -#ifdef DPE_USE_DOUBLE - return fprintf (s, "%c%1.*f*10^%d", sign, dpe_str_prec, d, e10); -#else - return fprintf (s, "%c%1.*Lf*10^%d", sign, dpe_str_prec, (long double) d, e10); -#endif -} - -static size_t dpe_inp_str (dpe_t x, FILE *s, int base) DPE_UNUSED_ATTR; - -static size_t -dpe_inp_str (dpe_t x, FILE *s, int base) -{ - size_t res; - DPE_DOUBLE d; - if (DPE_UNLIKELY (base != 10)) - { - fprintf (stderr, "Error in dpe_out_str, only base 10 allowed\n"); - exit (1); - } -#ifdef DPE_USE_DOUBLE - res = fscanf (s, "%lf", &d); -#elif defined(DPE_USE_LONGDOUBLE) - res = fscanf (s, "%Lf", &d); -#else - { - long double d_ld; - res = fscanf (s, "%Lf", &d_ld); - d = d_ld; - } -#endif - dpe_set_d (x, d); - return res; -} - -DPE_INLINE void -dpe_dump (dpe_t x) -{ - dpe_out_str (stdout, 10, x); - putchar ('\n'); -} - -DPE_INLINE int -dpe_zero_p (dpe_t x) -{ - return DPE_MANT (x) == 0; -} - -/* return a positive value if x > y - a negative value if x < y - and 0 otherwise (x=y). */ -DPE_INLINE int -dpe_cmp (dpe_t x, dpe_t y) -{ - int sx = DPE_SIGN(x); - int d = sx - DPE_SIGN(y); - - if (d != 0) - return d; - else if (DPE_EXP(x) > DPE_EXP(y)) - return (sx > 0) ? 1 : -1; - else if (DPE_EXP(y) > DPE_EXP(x)) - return (sx > 0) ? -1 : 1; - else /* DPE_EXP(x) = DPE_EXP(y) */ - return (DPE_MANT(x) < DPE_MANT(y)) ? -1 : (DPE_MANT(x) > DPE_MANT(y)); -} - -DPE_INLINE int -dpe_cmp_d (dpe_t x, double d) -{ - dpe_t y; - dpe_set_d (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_ui (dpe_t x, unsigned long d) -{ - dpe_t y; - dpe_set_ui (y, d); - return dpe_cmp (x, y); -} - -DPE_INLINE int -dpe_cmp_si (dpe_t x, long d) -{ - dpe_t y; - dpe_set_si (y, d); - return dpe_cmp (x, y); -} - -/* set x to integer nearest to y */ -DPE_INLINE void -dpe_round (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) < 0) /* |y| < 1/2 */ - dpe_set_ui (x, 0); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_ROUND(d)); - } -} - -/* set x to the fractional part of y, defined as y - trunc(y), thus the - fractional part has absolute value in [0, 1), and same sign as y */ -DPE_INLINE void -dpe_frac (dpe_t x, dpe_t y) -{ - /* If |y| is smaller than 1, keep it */ - if (DPE_EXP(y) <= 0) - dpe_set (x, y); - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set_ui (x, 0); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, d - DPE_TRUNC(d)); - } -} - -/* set x to largest integer <= y */ -DPE_INLINE void -dpe_floor (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) >= 0) /* 0 <= y < 1 */ - dpe_set_ui (x, 0); - else /* -1 < y < 0 */ - dpe_set_si (x, -1); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_FLOOR(d)); - } -} - -/* set x to smallest integer >= y */ -DPE_INLINE void -dpe_ceil (dpe_t x, dpe_t y) -{ - if (DPE_EXP(y) <= 0) /* |y| < 1 */ - { - if (DPE_SIGN(y) > 0) /* 0 < y < 1 */ - dpe_set_ui (x, 1); - else /* -1 < y <= 0 */ - dpe_set_si (x, 0); - } - else if (DPE_EXP(y) >= DPE_BITSIZE) /* y is an integer */ - dpe_set (x, y); - else - { - DPE_DOUBLE d; - d = DPE_LDEXP(DPE_MANT(y), DPE_EXP(y)); - dpe_set_d (x, DPE_CEIL(d)); - } -} - -DPE_INLINE void -dpe_swap (dpe_t x, dpe_t y) -{ - DPE_EXP_T i = DPE_EXP (x); - DPE_DOUBLE d = DPE_MANT (x); - DPE_EXP (x) = DPE_EXP (y); - DPE_MANT (x) = DPE_MANT (y); - DPE_EXP (y) = i; - DPE_MANT (y) = d; -} - -#endif /* __DPE */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c index 8c49b21d20..5491ee44d0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c @@ -2,47 +2,208 @@ #include "lll_internals.h" #include "internal.h" -#include "dpe.h" +#include +#include + // Access entry of symmetric matrix #define SYM(M, i, j) (i < j ? &M[j][i] : &M[i][j]) -void -quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +typedef struct fp_num { + double s; + int e; +} fp_num; + +static void +copy(fp_num *x, fp_num *r) { - dpe_t dpe_const_one, dpe_const_DELTABAR; + r->s = x->s; + r->e = x->e; +} - dpe_init(dpe_const_one); - dpe_set_ui(dpe_const_one, 1); +static void +normalize(fp_num *x) +{ + if (x->s == 0.0 || isfinite(x->s) == 0) { + if (x->s == 0.0) { + x->e = INT_MIN; + } + } else { + int e; + x->s = frexp(x->s, &e); + x->e += e; + } +} - dpe_init(dpe_const_DELTABAR); - dpe_set_d(dpe_const_DELTABAR, DELTABAR); +static void +to_one(fp_num *x) +{ + x->s = 1; + x->e = 0; +} - // fp variables for Gram-Schmidt orthogonalization and Lovasz' conditions - dpe_t r[4][4], u[4][4], lovasz[4]; - for (int i = 0; i < 4; i++) { - dpe_init(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_init(r[i][j]); - dpe_init(u[i][j]); - } +static void +to_deltabar(fp_num *x) +{ + x->s = DELTABAR; + x->e = 0; +} + +static void +to_etabar(fp_num *x) +{ + x->s = ETABAR; + x->e = 0; +} + +static void +from_mpz(const mpz_t x, fp_num *r) +{ + long exp = 0; + r->s = mpz_get_d_2exp(&exp, x); + r->e = exp; +} + +static void +to_mpz(const fp_num *x, mpz_t r) +{ + if (x->e >= DBL_MANT_DIG) { + double s = x->s * 0x1P53; + mpz_set_d(r, s); + mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + } else if (x->e < 0) { + mpz_set_ui(r, 0); + } else { + double s = ldexp(x->s, x->e); + mpz_set_d(r, round(s)); } +} - // threshold for swaps - dpe_t delta_bar; - dpe_init(delta_bar); - dpe_set_d(delta_bar, DELTABAR); +static void +fp_mul(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s * y->s; + r->e = x->e + y->e; + normalize(r); + +} + +static void +fp_div(const fp_num *x, const fp_num *y, fp_num *r) +{ + r->s = x->s / y->s; + r->e = x->e - y->e; + normalize(r); +} + +static void +fp_sub(const fp_num *x, const fp_num *y, fp_num *r) +{ + if (x->e > y->e + DBL_MANT_DIG) { + r->s = x->s; + r->e = x->e; + } else if (y->e > x->e + DBL_MANT_DIG) { + r->s = -y->s; + r->e = y->e; + } else { + int e = x->e - y->e; + + if (e >= 0) { + r->s = x->s - ldexp(y->s, -e); + r->e = x->e; + } else { + r->s = ldexp(x->s, e) - y->s; + r->e = y->e; + } + + normalize(r); + } +} + +static inline int +sign(const fp_num *x) +{ + if (x->s < 0.0) + return -1; + return 1; +} + +static int +fp_cmp(const fp_num *x, const fp_num *y) +{ + int sign_x = sign(x); + int sign_y = sign(y); + + if (sign_x != sign_y) + return sign_x - sign_y; + else if (x->e > y->e) + return sign_x; + else if (y->e > x->e) + return -sign_x; + else if (x->s > y->s) + return 1; + else if (x->s < y->s) + return -1; + else + return 0; +} + +static void +fp_round(fp_num *x) +{ + if (x->e < 0) { + x->s = 0; + x->e = 0; + } else if (x->e >= DBL_MANT_DIG) { + return; + } else { + double tmp; + tmp = ldexp(x->s, x->e); + x->s = round(tmp); + x->e = 0; + normalize(x); + } +} + +static void +fp_abs(const fp_num *x, fp_num *y) { + if (x->s < 0.0) { + y->s = -x->s; + } else { + y->s = x->s; + } + y->e = x->e; +} + +void +quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) +{ + fp_num const_one = {0}; + fp_num delta_bar = {0}; + fp_num eta_bar = {0}; + fp_num neg_eta_bar = {0}; + to_one(&const_one); + to_deltabar(&delta_bar); + eta_bar.s = ETABAR; + eta_bar.e = 0; + neg_eta_bar.s = -ETABAR; + neg_eta_bar.e = 0; + normalize(&eta_bar); + normalize(&neg_eta_bar); + + fp_num r[4][4] = {0}; + fp_num u[4][4] = {0}; + fp_num lovasz[4] = {0}; + + fp_num Xf = {0}; + fp_num tmpF = {0}; - // Other work variables - dpe_t Xf, tmpF; - dpe_init(Xf); - dpe_init(tmpF); ibz_t X, tmpI; ibz_init(&X); ibz_init(&tmpI); // Main L² loop - dpe_set_z(r[0][0], (*G)[0][0]); + from_mpz((*G)[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -52,23 +213,23 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - dpe_set_z(r[kappa][j], (*G)[kappa][j]); + from_mpz((*G)[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { - dpe_mul(tmpF, r[kappa][k], u[j][k]); - dpe_sub(r[kappa][j], r[kappa][j], tmpF); + fp_mul(&r[kappa][k], &u[j][k], &tmpF); + fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); } if (j < kappa) - dpe_div(u[kappa][j], r[kappa][j], r[j][j]); + fp_div(&r[kappa][j], &r[j][j], &u[kappa][j]); } done = 1; // size reduce for (int i = kappa - 1; i >= 0; i--) { - if (dpe_cmp_d(u[kappa][i], ETABAR) > 0 || dpe_cmp_d(u[kappa][i], -ETABAR) < 0) { + if (fp_cmp(&u[kappa][i], &eta_bar) > 0 || fp_cmp(&u[kappa][i], &neg_eta_bar) < 0) { done = 0; - dpe_set(Xf, u[kappa][i]); - dpe_round(Xf, Xf); - dpe_get_z(X, Xf); + copy(&u[kappa][i], &Xf); + fp_round(&Xf); + to_mpz(&Xf, X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { ibz_mul(&tmpI, &X, &(*basis)[j][i]); @@ -91,8 +252,8 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // // Update u[kappa][j] for (int j = 0; j < i; j++) { - dpe_mul(tmpF, Xf, u[i][j]); - dpe_sub(u[kappa][j], u[kappa][j], tmpF); + fp_mul(&Xf, &u[i][j], &tmpF); + fp_sub(&u[kappa][j], &tmpF, &u[kappa][j]); } } } @@ -100,16 +261,16 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - dpe_set_z(lovasz[0], (*G)[kappa][kappa]); + from_mpz((*G)[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { - dpe_mul(tmpF, u[kappa][i - 1], r[kappa][i - 1]); - dpe_sub(lovasz[i], lovasz[i - 1], tmpF); + fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); + fp_sub(&lovasz[i - 1], &tmpF, &lovasz[i]); } int swap; for (swap = kappa; swap > 0; swap--) { - dpe_mul(tmpF, delta_bar, r[swap - 1][swap - 1]); - if (dpe_cmp(tmpF, lovasz[swap - 1]) < 0) + fp_mul(&delta_bar, &r[swap - 1][swap - 1], &tmpF); + if (fp_cmp(&tmpF, &lovasz[swap - 1]) < 0) break; } @@ -127,10 +288,10 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) } // Copy row u[κ] and r[κ] in swap position, ignore what follows for (int i = 0; i < swap; i++) { - dpe_set(u[swap][i], u[kappa][i]); - dpe_set(r[swap][i], r[kappa][i]); + copy(&u[kappa][i], &u[swap][i]); + copy(&r[kappa][i], &r[swap][i]); } - dpe_set(r[swap][swap], lovasz[swap]); + copy(&lovasz[swap], &r[swap][swap]); // swap complete kappa = swap; } @@ -142,15 +303,15 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check size-reducedness for (int i = 0; i < 4; i++) for (int j = 0; j < i; j++) { - dpe_abs(u[i][j], u[i][j]); - assert(dpe_cmp_d(u[i][j], ETABAR) <= 0); + fp_abs(&u[i][j], &u[i][j]); + assert(fp_cmp(&u[i][j], &eta_bar) <= 0); } // Check Lovasz' conditions for (int i = 1; i < 4; i++) { - dpe_mul(tmpF, u[i][i - 1], u[i][i - 1]); - dpe_sub(tmpF, dpe_const_DELTABAR, tmpF); - dpe_mul(tmpF, tmpF, r[i - 1][i - 1]); - assert(dpe_cmp(tmpF, r[i][i]) <= 0); + fp_mul(&u[i][i - 1], &u[i][i - 1], &tmpF); + fp_sub(&delta_bar, &tmpF, &tmpF); + fp_mul(&tmpF, &r[i - 1][i - 1], &tmpF); + assert(fp_cmp(&tmpF, &r[i][i]) <= 0); } #endif @@ -163,18 +324,6 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Clearinghouse ibz_finalize(&X); ibz_finalize(&tmpI); - dpe_clear(dpe_const_one); - dpe_clear(dpe_const_DELTABAR); - dpe_clear(Xf); - dpe_clear(tmpF); - dpe_clear(delta_bar); - for (int i = 0; i < 4; i++) { - dpe_clear(lovasz[i]); - for (int j = 0; j <= i; j++) { - dpe_clear(r[i][j]); - dpe_clear(u[i][j]); - } - } } int From 0275b6282993241f7f900d290b574a6d44db2b2e Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Fri, 15 Aug 2025 13:09:01 +0200 Subject: [PATCH 07/19] guards around GMP dependencies Signed-off-by: Basil Hess --- CMakeLists.txt | 26 +++++++++++-------- .../src/sig/family/CMakeLists.txt | 11 ++++++-- src/sig/sqisign/CMakeLists.txt | 12 +++++++++ 3 files changed, 36 insertions(+), 13 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cc8c9724da..87bd4d28a6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -152,21 +152,22 @@ if(${OQS_USE_CUPQC}) find_package(cuPQC 0.2.0 REQUIRED) endif() if(${OQS_USE_GMP}) - message(STATUS "Using system GMP") - - find_library(GMP gmp REQUIRED) + message(STATUS "Looking for GMP") + find_library(GMP gmp) find_path(GMP_INCLUDE gmp.h) - add_library(GMP SHARED IMPORTED) - set_target_properties(GMP PROPERTIES - IMPORTED_LOCATION ${GMP} - INTERFACE_INCLUDE_DIRECTORIES ${GMP_INCLUDE} - ) + if(GMP AND GMP_INCLUDE) + message(STATUS "GMP found, enabling GMP support") + add_library(GMP SHARED IMPORTED) + set_target_properties(GMP PROPERTIES + IMPORTED_LOCATION ${GMP} + INTERFACE_INCLUDE_DIRECTORIES ${GMP_INCLUDE} + ) - if(CMAKE_SIZEOF_VOID_P MATCHES "4") + if(CMAKE_SIZEOF_VOID_P MATCHES "4") add_compile_definitions(RADIX_32) add_compile_definitions(GMP_LIMB_BITS=32) - else() + else() add_compile_definitions(RADIX_64) include(CheckCSourceCompiles) check_c_source_compiles(" @@ -180,8 +181,11 @@ if(${OQS_USE_GMP}) add_compile_definitions(HAVE_UINT128) endif() add_compile_definitions(GMP_LIMB_BITS=64) + endif() + else() + message(STATUS "GMP not found, disabling GMP support") + set(OQS_USE_GMP OFF) endif() - endif() if (NOT ((CMAKE_SYSTEM_NAME MATCHES "Linux|Darwin") AND (ARCH_X86_64 STREQUAL "ON")) AND (OQS_LIBJADE_BUILD STREQUAL "ON")) diff --git a/scripts/copy_from_upstream/src/sig/family/CMakeLists.txt b/scripts/copy_from_upstream/src/sig/family/CMakeLists.txt index 522a96cb03..2148b2acd4 100644 --- a/scripts/copy_from_upstream/src/sig/family/CMakeLists.txt +++ b/scripts/copy_from_upstream/src/sig/family/CMakeLists.txt @@ -26,7 +26,9 @@ endif() {%- for scheme in schemes -%} {%- for impl in scheme['metadata']['implementations'] -%} {%- if impl['name'] == scheme['default_implementation'] %} - +{% if impl['external'] and impl['external'] == 'gmp' %} +if(OQS_USE_GMP) +{%- endif %} if(OQS_ENABLE_SIG_{{ family }}_{{ scheme['scheme_c'] }}{%- if 'alias_scheme' in scheme %} OR OQS_ENABLE_SIG_{{ family }}_{{ scheme['alias_scheme'] }}{%- endif %}) add_library({{ family }}_{{ scheme['scheme'] }}_{{ impl['name'] }} OBJECT sig_{{ family }}_{{ scheme['scheme'] }}.c {% for source_file in impl['sources']|sort -%}{{ impl['upstream']['name'] }}_{{ scheme['pqclean_scheme'] }}_{{ impl['name'] }}/{{ source_file }}{%- if not loop.last %} {% endif -%}{%- endfor -%}) {%- if impl['compile_opts'] %} @@ -34,7 +36,9 @@ if(OQS_ENABLE_SIG_{{ family }}_{{ scheme['scheme_c'] }}{%- if 'alias_scheme' in {%- endif -%} {%- else %} - +{% if impl['external'] and impl['external'] == 'gmp' %} +if(OQS_USE_GMP) +{%- endif %} if(OQS_ENABLE_SIG_{{ family }}_{{ scheme['scheme_c'] }}_{{ impl['name'] }}{%- if 'alias_scheme' in scheme %} OR OQS_ENABLE_SIG_{{ family }}_{{ scheme['alias_scheme'] }}_{{ impl['name'] }}{%- endif %}) add_library({{ family }}_{{ scheme['scheme'] }}_{{ impl['name'] }} OBJECT {% for source_file in impl['sources']|sort -%}{{ impl['upstream']['name'] }}_{{ scheme['pqclean_scheme'] }}_{{ impl['name'] }}/{{ source_file }}{%- if not loop.last %} {% endif -%}{%- endfor -%}) {%- endif %} @@ -51,6 +55,9 @@ if(OQS_ENABLE_SIG_{{ family }}_{{ scheme['scheme_c'] }}_{{ impl['name'] }}{%- if {%- endif %} set(_{{ family|upper }}_OBJS ${_{{ family|upper }}_OBJS} $) endif() +{%- if impl['external'] and impl['external'] == 'gmp' %} +endif() +{%- endif %} {%- endfor -%} {%- endfor %} diff --git a/src/sig/sqisign/CMakeLists.txt b/src/sig/sqisign/CMakeLists.txt index beed24db74..587e899eab 100644 --- a/src/sig/sqisign/CMakeLists.txt +++ b/src/sig/sqisign/CMakeLists.txt @@ -5,6 +5,7 @@ set(_SQISIGN_OBJS "") +if(OQS_USE_GMP) if(OQS_ENABLE_SIG_sqisign_lvl1) add_library(sqisign_lvl1_ref OBJECT sig_sqisign_lvl1.c the-sqisign_sqisign_lvl1_ref/algebra.c the-sqisign_sqisign_lvl1_ref/api.c the-sqisign_sqisign_lvl1_ref/basis.c the-sqisign_sqisign_lvl1_ref/biextension.c the-sqisign_sqisign_lvl1_ref/common.c the-sqisign_sqisign_lvl1_ref/dim2.c the-sqisign_sqisign_lvl1_ref/dim2id2iso.c the-sqisign_sqisign_lvl1_ref/dim4.c the-sqisign_sqisign_lvl1_ref/e0_basis.c the-sqisign_sqisign_lvl1_ref/ec.c the-sqisign_sqisign_lvl1_ref/ec_jac.c the-sqisign_sqisign_lvl1_ref/ec_params.c the-sqisign_sqisign_lvl1_ref/encode_signature.c the-sqisign_sqisign_lvl1_ref/encode_verification.c the-sqisign_sqisign_lvl1_ref/endomorphism_action.c the-sqisign_sqisign_lvl1_ref/finit.c the-sqisign_sqisign_lvl1_ref/fp.c the-sqisign_sqisign_lvl1_ref/fp2.c the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c the-sqisign_sqisign_lvl1_ref/hd.c the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_ref/hnf.c the-sqisign_sqisign_lvl1_ref/hnf_internal.c the-sqisign_sqisign_lvl1_ref/ibz_division.c the-sqisign_sqisign_lvl1_ref/id2iso.c the-sqisign_sqisign_lvl1_ref/ideal.c the-sqisign_sqisign_lvl1_ref/intbig.c the-sqisign_sqisign_lvl1_ref/integers.c the-sqisign_sqisign_lvl1_ref/isog_chains.c the-sqisign_sqisign_lvl1_ref/keygen.c the-sqisign_sqisign_lvl1_ref/l2.c the-sqisign_sqisign_lvl1_ref/lat_ball.c the-sqisign_sqisign_lvl1_ref/lattice.c the-sqisign_sqisign_lvl1_ref/lll_applications.c the-sqisign_sqisign_lvl1_ref/mp.c the-sqisign_sqisign_lvl1_ref/normeq.c the-sqisign_sqisign_lvl1_ref/printer.c the-sqisign_sqisign_lvl1_ref/quaternion_data.c the-sqisign_sqisign_lvl1_ref/rationals.c the-sqisign_sqisign_lvl1_ref/sign.c the-sqisign_sqisign_lvl1_ref/sqisign.c the-sqisign_sqisign_lvl1_ref/theta_isogenies.c the-sqisign_sqisign_lvl1_ref/theta_structure.c the-sqisign_sqisign_lvl1_ref/torsion_constants.c the-sqisign_sqisign_lvl1_ref/verify.c the-sqisign_sqisign_lvl1_ref/xeval.c the-sqisign_sqisign_lvl1_ref/xisog.c) target_compile_options(sqisign_lvl1_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) @@ -14,7 +15,9 @@ if(OQS_ENABLE_SIG_sqisign_lvl1) target_compile_options(sqisign_lvl1_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() +endif() +if(OQS_USE_GMP) if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) add_library(sqisign_lvl1_broadwell OBJECT the-sqisign_sqisign_lvl1_broadwell/algebra.c the-sqisign_sqisign_lvl1_broadwell/api.c the-sqisign_sqisign_lvl1_broadwell/basis.c the-sqisign_sqisign_lvl1_broadwell/biextension.c the-sqisign_sqisign_lvl1_broadwell/common.c the-sqisign_sqisign_lvl1_broadwell/dim2.c the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl1_broadwell/dim4.c the-sqisign_sqisign_lvl1_broadwell/e0_basis.c the-sqisign_sqisign_lvl1_broadwell/ec.c the-sqisign_sqisign_lvl1_broadwell/ec_jac.c the-sqisign_sqisign_lvl1_broadwell/ec_params.c the-sqisign_sqisign_lvl1_broadwell/encode_signature.c the-sqisign_sqisign_lvl1_broadwell/encode_verification.c the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl1_broadwell/finit.c the-sqisign_sqisign_lvl1_broadwell/fp.c the-sqisign_sqisign_lvl1_broadwell/fp2.c the-sqisign_sqisign_lvl1_broadwell/fp_asm.S the-sqisign_sqisign_lvl1_broadwell/gf5248.c the-sqisign_sqisign_lvl1_broadwell/hd.c the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl1_broadwell/hnf.c the-sqisign_sqisign_lvl1_broadwell/hnf_internal.c the-sqisign_sqisign_lvl1_broadwell/ibz_division.c the-sqisign_sqisign_lvl1_broadwell/id2iso.c the-sqisign_sqisign_lvl1_broadwell/ideal.c the-sqisign_sqisign_lvl1_broadwell/intbig.c the-sqisign_sqisign_lvl1_broadwell/integers.c the-sqisign_sqisign_lvl1_broadwell/isog_chains.c the-sqisign_sqisign_lvl1_broadwell/keygen.c the-sqisign_sqisign_lvl1_broadwell/l2.c the-sqisign_sqisign_lvl1_broadwell/lat_ball.c the-sqisign_sqisign_lvl1_broadwell/lattice.c the-sqisign_sqisign_lvl1_broadwell/lll_applications.c the-sqisign_sqisign_lvl1_broadwell/mp.c the-sqisign_sqisign_lvl1_broadwell/normeq.c the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c the-sqisign_sqisign_lvl1_broadwell/rationals.c the-sqisign_sqisign_lvl1_broadwell/sign.c the-sqisign_sqisign_lvl1_broadwell/sqisign.c the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl1_broadwell/theta_structure.c the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c the-sqisign_sqisign_lvl1_broadwell/verify.c the-sqisign_sqisign_lvl1_broadwell/xeval.c the-sqisign_sqisign_lvl1_broadwell/xisog.c) target_include_directories(sqisign_lvl1_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl1_broadwell) @@ -24,7 +27,9 @@ if(OQS_ENABLE_SIG_sqisign_lvl1_broadwell) target_compile_options(sqisign_lvl1_broadwell PUBLIC -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl1 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() +endif() +if(OQS_USE_GMP) if(OQS_ENABLE_SIG_sqisign_lvl3) add_library(sqisign_lvl3_ref OBJECT sig_sqisign_lvl3.c the-sqisign_sqisign_lvl3_ref/algebra.c the-sqisign_sqisign_lvl3_ref/api.c the-sqisign_sqisign_lvl3_ref/basis.c the-sqisign_sqisign_lvl3_ref/biextension.c the-sqisign_sqisign_lvl3_ref/common.c the-sqisign_sqisign_lvl3_ref/dim2.c the-sqisign_sqisign_lvl3_ref/dim2id2iso.c the-sqisign_sqisign_lvl3_ref/dim4.c the-sqisign_sqisign_lvl3_ref/e0_basis.c the-sqisign_sqisign_lvl3_ref/ec.c the-sqisign_sqisign_lvl3_ref/ec_jac.c the-sqisign_sqisign_lvl3_ref/ec_params.c the-sqisign_sqisign_lvl3_ref/encode_signature.c the-sqisign_sqisign_lvl3_ref/encode_verification.c the-sqisign_sqisign_lvl3_ref/endomorphism_action.c the-sqisign_sqisign_lvl3_ref/finit.c the-sqisign_sqisign_lvl3_ref/fp.c the-sqisign_sqisign_lvl3_ref/fp2.c the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c the-sqisign_sqisign_lvl3_ref/hd.c the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_ref/hnf.c the-sqisign_sqisign_lvl3_ref/hnf_internal.c the-sqisign_sqisign_lvl3_ref/ibz_division.c the-sqisign_sqisign_lvl3_ref/id2iso.c the-sqisign_sqisign_lvl3_ref/ideal.c the-sqisign_sqisign_lvl3_ref/intbig.c the-sqisign_sqisign_lvl3_ref/integers.c the-sqisign_sqisign_lvl3_ref/isog_chains.c the-sqisign_sqisign_lvl3_ref/keygen.c the-sqisign_sqisign_lvl3_ref/l2.c the-sqisign_sqisign_lvl3_ref/lat_ball.c the-sqisign_sqisign_lvl3_ref/lattice.c the-sqisign_sqisign_lvl3_ref/lll_applications.c the-sqisign_sqisign_lvl3_ref/mp.c the-sqisign_sqisign_lvl3_ref/normeq.c the-sqisign_sqisign_lvl3_ref/printer.c the-sqisign_sqisign_lvl3_ref/quaternion_data.c the-sqisign_sqisign_lvl3_ref/rationals.c the-sqisign_sqisign_lvl3_ref/sign.c the-sqisign_sqisign_lvl3_ref/sqisign.c the-sqisign_sqisign_lvl3_ref/theta_isogenies.c the-sqisign_sqisign_lvl3_ref/theta_structure.c the-sqisign_sqisign_lvl3_ref/torsion_constants.c the-sqisign_sqisign_lvl3_ref/verify.c the-sqisign_sqisign_lvl3_ref/xeval.c the-sqisign_sqisign_lvl3_ref/xisog.c) target_compile_options(sqisign_lvl3_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) @@ -34,7 +39,9 @@ if(OQS_ENABLE_SIG_sqisign_lvl3) target_compile_options(sqisign_lvl3_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() +endif() +if(OQS_USE_GMP) if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) add_library(sqisign_lvl3_broadwell OBJECT the-sqisign_sqisign_lvl3_broadwell/algebra.c the-sqisign_sqisign_lvl3_broadwell/api.c the-sqisign_sqisign_lvl3_broadwell/basis.c the-sqisign_sqisign_lvl3_broadwell/biextension.c the-sqisign_sqisign_lvl3_broadwell/common.c the-sqisign_sqisign_lvl3_broadwell/dim2.c the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl3_broadwell/dim4.c the-sqisign_sqisign_lvl3_broadwell/e0_basis.c the-sqisign_sqisign_lvl3_broadwell/ec.c the-sqisign_sqisign_lvl3_broadwell/ec_jac.c the-sqisign_sqisign_lvl3_broadwell/ec_params.c the-sqisign_sqisign_lvl3_broadwell/encode_signature.c the-sqisign_sqisign_lvl3_broadwell/encode_verification.c the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl3_broadwell/finit.c the-sqisign_sqisign_lvl3_broadwell/fp.c the-sqisign_sqisign_lvl3_broadwell/fp2.c the-sqisign_sqisign_lvl3_broadwell/fp_asm.S the-sqisign_sqisign_lvl3_broadwell/gf65376.c the-sqisign_sqisign_lvl3_broadwell/hd.c the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl3_broadwell/hnf.c the-sqisign_sqisign_lvl3_broadwell/hnf_internal.c the-sqisign_sqisign_lvl3_broadwell/ibz_division.c the-sqisign_sqisign_lvl3_broadwell/id2iso.c the-sqisign_sqisign_lvl3_broadwell/ideal.c the-sqisign_sqisign_lvl3_broadwell/intbig.c the-sqisign_sqisign_lvl3_broadwell/integers.c the-sqisign_sqisign_lvl3_broadwell/isog_chains.c the-sqisign_sqisign_lvl3_broadwell/keygen.c the-sqisign_sqisign_lvl3_broadwell/l2.c the-sqisign_sqisign_lvl3_broadwell/lat_ball.c the-sqisign_sqisign_lvl3_broadwell/lattice.c the-sqisign_sqisign_lvl3_broadwell/lll_applications.c the-sqisign_sqisign_lvl3_broadwell/mp.c the-sqisign_sqisign_lvl3_broadwell/normeq.c the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c the-sqisign_sqisign_lvl3_broadwell/rationals.c the-sqisign_sqisign_lvl3_broadwell/sign.c the-sqisign_sqisign_lvl3_broadwell/sqisign.c the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl3_broadwell/theta_structure.c the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c the-sqisign_sqisign_lvl3_broadwell/verify.c the-sqisign_sqisign_lvl3_broadwell/xeval.c the-sqisign_sqisign_lvl3_broadwell/xisog.c) target_include_directories(sqisign_lvl3_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl3_broadwell) @@ -44,7 +51,9 @@ if(OQS_ENABLE_SIG_sqisign_lvl3_broadwell) target_compile_options(sqisign_lvl3_broadwell PUBLIC -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl3 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() +endif() +if(OQS_USE_GMP) if(OQS_ENABLE_SIG_sqisign_lvl5) add_library(sqisign_lvl5_ref OBJECT sig_sqisign_lvl5.c the-sqisign_sqisign_lvl5_ref/algebra.c the-sqisign_sqisign_lvl5_ref/api.c the-sqisign_sqisign_lvl5_ref/basis.c the-sqisign_sqisign_lvl5_ref/biextension.c the-sqisign_sqisign_lvl5_ref/common.c the-sqisign_sqisign_lvl5_ref/dim2.c the-sqisign_sqisign_lvl5_ref/dim2id2iso.c the-sqisign_sqisign_lvl5_ref/dim4.c the-sqisign_sqisign_lvl5_ref/e0_basis.c the-sqisign_sqisign_lvl5_ref/ec.c the-sqisign_sqisign_lvl5_ref/ec_jac.c the-sqisign_sqisign_lvl5_ref/ec_params.c the-sqisign_sqisign_lvl5_ref/encode_signature.c the-sqisign_sqisign_lvl5_ref/encode_verification.c the-sqisign_sqisign_lvl5_ref/endomorphism_action.c the-sqisign_sqisign_lvl5_ref/finit.c the-sqisign_sqisign_lvl5_ref/fp.c the-sqisign_sqisign_lvl5_ref/fp2.c the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c the-sqisign_sqisign_lvl5_ref/hd.c the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_ref/hnf.c the-sqisign_sqisign_lvl5_ref/hnf_internal.c the-sqisign_sqisign_lvl5_ref/ibz_division.c the-sqisign_sqisign_lvl5_ref/id2iso.c the-sqisign_sqisign_lvl5_ref/ideal.c the-sqisign_sqisign_lvl5_ref/intbig.c the-sqisign_sqisign_lvl5_ref/integers.c the-sqisign_sqisign_lvl5_ref/isog_chains.c the-sqisign_sqisign_lvl5_ref/keygen.c the-sqisign_sqisign_lvl5_ref/l2.c the-sqisign_sqisign_lvl5_ref/lat_ball.c the-sqisign_sqisign_lvl5_ref/lattice.c the-sqisign_sqisign_lvl5_ref/lll_applications.c the-sqisign_sqisign_lvl5_ref/mp.c the-sqisign_sqisign_lvl5_ref/normeq.c the-sqisign_sqisign_lvl5_ref/printer.c the-sqisign_sqisign_lvl5_ref/quaternion_data.c the-sqisign_sqisign_lvl5_ref/rationals.c the-sqisign_sqisign_lvl5_ref/sign.c the-sqisign_sqisign_lvl5_ref/sqisign.c the-sqisign_sqisign_lvl5_ref/theta_isogenies.c the-sqisign_sqisign_lvl5_ref/theta_structure.c the-sqisign_sqisign_lvl5_ref/torsion_constants.c the-sqisign_sqisign_lvl5_ref/verify.c the-sqisign_sqisign_lvl5_ref/xeval.c the-sqisign_sqisign_lvl5_ref/xisog.c) target_compile_options(sqisign_lvl5_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) @@ -54,7 +63,9 @@ if(OQS_ENABLE_SIG_sqisign_lvl5) target_compile_options(sqisign_lvl5_ref PUBLIC -DSQISIGN_BUILD_TYPE_REF=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() +endif() +if(OQS_USE_GMP) if(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) add_library(sqisign_lvl5_broadwell OBJECT the-sqisign_sqisign_lvl5_broadwell/algebra.c the-sqisign_sqisign_lvl5_broadwell/api.c the-sqisign_sqisign_lvl5_broadwell/basis.c the-sqisign_sqisign_lvl5_broadwell/biextension.c the-sqisign_sqisign_lvl5_broadwell/common.c the-sqisign_sqisign_lvl5_broadwell/dim2.c the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c the-sqisign_sqisign_lvl5_broadwell/dim4.c the-sqisign_sqisign_lvl5_broadwell/e0_basis.c the-sqisign_sqisign_lvl5_broadwell/ec.c the-sqisign_sqisign_lvl5_broadwell/ec_jac.c the-sqisign_sqisign_lvl5_broadwell/ec_params.c the-sqisign_sqisign_lvl5_broadwell/encode_signature.c the-sqisign_sqisign_lvl5_broadwell/encode_verification.c the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c the-sqisign_sqisign_lvl5_broadwell/finit.c the-sqisign_sqisign_lvl5_broadwell/fp.c the-sqisign_sqisign_lvl5_broadwell/fp2.c the-sqisign_sqisign_lvl5_broadwell/fp_asm.S the-sqisign_sqisign_lvl5_broadwell/gf27500.c the-sqisign_sqisign_lvl5_broadwell/hd.c the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c the-sqisign_sqisign_lvl5_broadwell/hnf.c the-sqisign_sqisign_lvl5_broadwell/hnf_internal.c the-sqisign_sqisign_lvl5_broadwell/ibz_division.c the-sqisign_sqisign_lvl5_broadwell/id2iso.c the-sqisign_sqisign_lvl5_broadwell/ideal.c the-sqisign_sqisign_lvl5_broadwell/intbig.c the-sqisign_sqisign_lvl5_broadwell/integers.c the-sqisign_sqisign_lvl5_broadwell/isog_chains.c the-sqisign_sqisign_lvl5_broadwell/keygen.c the-sqisign_sqisign_lvl5_broadwell/l2.c the-sqisign_sqisign_lvl5_broadwell/lat_ball.c the-sqisign_sqisign_lvl5_broadwell/lattice.c the-sqisign_sqisign_lvl5_broadwell/lll_applications.c the-sqisign_sqisign_lvl5_broadwell/mp.c the-sqisign_sqisign_lvl5_broadwell/normeq.c the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c the-sqisign_sqisign_lvl5_broadwell/rationals.c the-sqisign_sqisign_lvl5_broadwell/sign.c the-sqisign_sqisign_lvl5_broadwell/sqisign.c the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c the-sqisign_sqisign_lvl5_broadwell/theta_structure.c the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c the-sqisign_sqisign_lvl5_broadwell/verify.c the-sqisign_sqisign_lvl5_broadwell/xeval.c the-sqisign_sqisign_lvl5_broadwell/xisog.c) target_include_directories(sqisign_lvl5_broadwell PRIVATE ${CMAKE_CURRENT_LIST_DIR}/the-sqisign_sqisign_lvl5_broadwell) @@ -64,5 +75,6 @@ if(OQS_ENABLE_SIG_sqisign_lvl5_broadwell) target_compile_options(sqisign_lvl5_broadwell PUBLIC -DSQISIGN_BUILD_TYPE_BROADWELL=ON -DENABLE_SIGN=ON -DSQISIGN_VARIANT=lvl5 -DSQISIGN_GF_IMPL_BROADWELL) set(_SQISIGN_OBJS ${_SQISIGN_OBJS} $) endif() +endif() set(SQISIGN_OBJS ${_SQISIGN_OBJS} PARENT_SCOPE) From 3854af8c7ad1d644aa922aa6587914f837dbf1ca Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Fri, 15 Aug 2025 14:02:32 +0200 Subject: [PATCH 08/19] more guards around gmp dependency Signed-off-by: Basil Hess --- .CMake/alg_support.cmake | 4 ++++ .../.CMake/alg_support.cmake/add_enable_by_alg.fragment | 8 ++++++++ scripts/copy_from_upstream/copy_from_upstream.yml | 1 + 3 files changed, 13 insertions(+) diff --git a/.CMake/alg_support.cmake b/.CMake/alg_support.cmake index 3fe002f406..32eb0d6253 100644 --- a/.CMake/alg_support.cmake +++ b/.CMake/alg_support.cmake @@ -223,7 +223,11 @@ cmake_dependent_option(OQS_ENABLE_SIG_snova_SNOVA_24_5_5 "" ON "OQS_ENABLE_SIG_S cmake_dependent_option(OQS_ENABLE_SIG_snova_SNOVA_60_10_4 "" ON "OQS_ENABLE_SIG_SNOVA" OFF) cmake_dependent_option(OQS_ENABLE_SIG_snova_SNOVA_29_6_5 "" ON "OQS_ENABLE_SIG_SNOVA" OFF) +if (OQS_USE_GMP) option(OQS_ENABLE_SIG_SQISIGN "Enable sqisign algorithm family" ON) +else() +option(OQS_ENABLE_SIG_SQISIGN "Enable sqisign algorithm family" OFF) +endif() cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl1 "" ON "OQS_ENABLE_SIG_SQISIGN" OFF) cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl3 "" ON "OQS_ENABLE_SIG_SQISIGN" OFF) cmake_dependent_option(OQS_ENABLE_SIG_sqisign_lvl5 "" ON "OQS_ENABLE_SIG_SQISIGN" OFF) diff --git a/scripts/copy_from_upstream/.CMake/alg_support.cmake/add_enable_by_alg.fragment b/scripts/copy_from_upstream/.CMake/alg_support.cmake/add_enable_by_alg.fragment index 62135d9d43..7ce4ed3e1d 100644 --- a/scripts/copy_from_upstream/.CMake/alg_support.cmake/add_enable_by_alg.fragment +++ b/scripts/copy_from_upstream/.CMake/alg_support.cmake/add_enable_by_alg.fragment @@ -16,7 +16,15 @@ cmake_dependent_option(OQS_ENABLE_KEM_{{ family['name'] }}_{{ scheme['alias_sche {%- if 'disable_by_default' in family and family['disable_by_default'] %} option(OQS_ENABLE_SIG_{{ family['name']|upper }} "Enable {{ family['name'] }} algorithm family" OFF) {%- else %} +{%- if 'external' in family and family['external'] == 'gmp' %} +if (OQS_USE_GMP) +{%- endif %} option(OQS_ENABLE_SIG_{{ family['name']|upper }} "Enable {{ family['name'] }} algorithm family" ON) +{%- if 'external' in family and family['external'] == 'gmp' %} +else() +option(OQS_ENABLE_SIG_{{ family['name']|upper }} "Enable {{ family['name'] }} algorithm family" OFF) +endif() +{%- endif %} {%- endif %} {%- for scheme in family['schemes'] %} cmake_dependent_option(OQS_ENABLE_SIG_{{ family['name'] }}_{{ scheme['scheme'] }} "" ON "OQS_ENABLE_SIG_{{ family['name']|upper }}" OFF) diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index f378ffc6a7..e55d9865ab 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -606,6 +606,7 @@ sigs: name: sqisign default_implementation: ref upstream_location: the-sqisign + external: gmp schemes: - scheme: "lvl1" From 7d2efb56ef549bb8560952026781709fd237c389 Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Fri, 15 Aug 2025 14:18:50 +0200 Subject: [PATCH 09/19] Add sqisign namespace to allowlist Signed-off-by: Basil Hess --- tests/test_binary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_binary.py b/tests/test_binary.py index f10c40138d..736719f33d 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -33,7 +33,7 @@ def test_namespace(): symbols.append(line) # ideally this would be just ['oqs', 'pqclean'], but contains exceptions (e.g., providing compat implementations of unavailable platform functions) - namespaces = ['oqs', 'pqclean', 'keccak', 'pqcrystals', 'pqmayo', 'init', 'fini', 'seedexpander', '__x86.get_pc_thunk', 'libjade', 'jade', '__jade', '__jasmin_syscall', 'pqcp', 'pqov', '_snova', '_sqisign', 'sha3'] + namespaces = ['oqs', 'pqclean', 'keccak', 'pqcrystals', 'pqmayo', 'init', 'fini', 'seedexpander', '__x86.get_pc_thunk', 'libjade', 'jade', '__jade', '__jasmin_syscall', 'pqcp', 'pqov', '_snova', '_sqisign', 'sqisign', 'sha3'] non_namespaced = [] for symbolstr in symbols: From 09d07cf59209f3709b193680afff9ca58475ce9f Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Fri, 15 Aug 2025 14:25:54 +0200 Subject: [PATCH 10/19] Edit README Signed-off-by: Basil Hess --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 6768780d08..0dd4b0dc0d 100644 --- a/README.md +++ b/README.md @@ -115,12 +115,12 @@ This project is not commercially supported. All guidelines and goals for liboqs On Ubuntu: - sudo apt install astyle cmake gcc ninja-build libssl-dev python3-pytest python3-pytest-xdist unzip xsltproc doxygen graphviz python3-yaml valgrind + sudo apt install astyle cmake gcc ninja-build libssl-dev python3-pytest python3-pytest-xdist unzip xsltproc doxygen graphviz python3-yaml valgrind libgmp-dev On macOS, using a package manager of your choice (we've picked Homebrew): brew install cmake ninja openssl@3 wget doxygen graphviz astyle valgrind - pip3 install pytest pytest-xdist pyyaml + pip3 install pytest pytest-xdist pyyaml gmp Using Nix: From 65ce9ac04eaa84c9a1509dfa7d66310ff6338db1 Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Fri, 15 Aug 2025 14:44:07 +0200 Subject: [PATCH 11/19] Add dependency to travis [full tests] Signed-off-by: Basil Hess --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f16d17c0bf..5b5d55d11e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,6 @@ language: c before_script: - - sudo apt update && sudo apt -y install astyle cmake gcc ninja-build libssl-dev python3-pytest python3-pytest-xdist unzip xsltproc doxygen graphviz valgrind + - sudo apt update && sudo apt -y install astyle cmake gcc ninja-build libssl-dev python3-pytest python3-pytest-xdist unzip xsltproc doxygen graphviz valgrind libgmp-dev jobs: include: - arch: ppc64le # The IBM Power LXD container based build for OSS only From 7c75a17a4d6855f9fa5ebfa8c548023703e8d773 Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Fri, 15 Aug 2025 16:09:04 +0200 Subject: [PATCH 12/19] new pull [full tests] Signed-off-by: Basil Hess --- docs/algorithms/sig/sqisign.md | 2 +- docs/algorithms/sig/sqisign.yml | 2 +- .../copy_from_upstream/copy_from_upstream.yml | 2 +- .../algebra.c | 2 +- .../common.c | 5 +- .../the-sqisign_sqisign_lvl1_broadwell/ec.h | 51 ------------------- .../fips202.h | 2 + .../the-sqisign_sqisign_lvl1_broadwell/hnf.c | 2 +- .../hnf_internal.h | 2 +- .../ibz_division.c | 2 +- .../theta_isogenies.c | 27 ++++++++++ .../the-sqisign_sqisign_lvl1_ref/algebra.c | 2 +- .../the-sqisign_sqisign_lvl1_ref/common.c | 5 +- .../sqisign/the-sqisign_sqisign_lvl1_ref/ec.h | 51 ------------------- .../the-sqisign_sqisign_lvl1_ref/fips202.h | 2 + .../fp_p5248_64.c | 2 +- .../the-sqisign_sqisign_lvl1_ref/hnf.c | 2 +- .../hnf_internal.h | 2 +- .../ibz_division.c | 2 +- .../theta_isogenies.c | 27 ++++++++++ .../algebra.c | 2 +- .../common.c | 5 +- .../the-sqisign_sqisign_lvl3_broadwell/ec.h | 51 ------------------- .../fips202.h | 2 + .../the-sqisign_sqisign_lvl3_broadwell/hnf.c | 2 +- .../hnf_internal.h | 2 +- .../ibz_division.c | 2 +- .../theta_isogenies.c | 27 ++++++++++ .../the-sqisign_sqisign_lvl3_ref/algebra.c | 2 +- .../the-sqisign_sqisign_lvl3_ref/common.c | 5 +- .../sqisign/the-sqisign_sqisign_lvl3_ref/ec.h | 51 ------------------- .../the-sqisign_sqisign_lvl3_ref/fips202.h | 2 + .../fp_p65376_64.c | 2 +- .../the-sqisign_sqisign_lvl3_ref/hnf.c | 2 +- .../hnf_internal.h | 2 +- .../ibz_division.c | 2 +- .../theta_isogenies.c | 27 ++++++++++ .../algebra.c | 2 +- .../common.c | 5 +- .../the-sqisign_sqisign_lvl5_broadwell/ec.h | 51 ------------------- .../fips202.h | 2 + .../the-sqisign_sqisign_lvl5_broadwell/hnf.c | 2 +- .../hnf_internal.h | 2 +- .../ibz_division.c | 2 +- .../theta_isogenies.c | 27 ++++++++++ .../the-sqisign_sqisign_lvl5_ref/algebra.c | 2 +- .../the-sqisign_sqisign_lvl5_ref/common.c | 5 +- .../sqisign/the-sqisign_sqisign_lvl5_ref/ec.h | 51 ------------------- .../the-sqisign_sqisign_lvl5_ref/fips202.h | 2 + .../fp_p27500_64.c | 2 +- .../the-sqisign_sqisign_lvl5_ref/hnf.c | 2 +- .../hnf_internal.h | 2 +- .../ibz_division.c | 2 +- .../theta_isogenies.c | 27 ++++++++++ 54 files changed, 222 insertions(+), 348 deletions(-) diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md index 3a5b213015..55646fbab8 100644 --- a/docs/algorithms/sig/sqisign.md +++ b/docs/algorithms/sig/sqisign.md @@ -6,7 +6,7 @@ - **Authors' website**: https://sqisign.org/ - **Specification version**: Round 2. - **Primary Source**: - - **Source**: https://github.com/bhess/the-sqisign/commit/323648fa9c28c69b3f24d3cc22986530ffe8e8d7 + - **Source**: https://github.com/bhess/the-sqisign/commit/74e2fbff9159377b99a09da91dac3afd9179d426 - **Implementation license (SPDX-Identifier)**: Apache-2.0 diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml index 0dc5498412..83f7ab4b70 100644 --- a/docs/algorithms/sig/sqisign.yml +++ b/docs/algorithms/sig/sqisign.yml @@ -36,7 +36,7 @@ website: https://sqisign.org/ nist-round: 2 spec-version: Round 2 primary-upstream: - source: https://github.com/bhess/the-sqisign/commit/323648fa9c28c69b3f24d3cc22986530ffe8e8d7 + source: https://github.com/bhess/the-sqisign/commit/74e2fbff9159377b99a09da91dac3afd9179d426 spdx-license-identifier: Apache-2.0 parameter-sets: - name: SQIsign-lvl1 diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index e55d9865ab..a70491fcd2 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -96,7 +96,7 @@ upstreams: name: the-sqisign git_url: https://github.com/bhess/the-sqisign.git git_branch: oqs - git_commit: 323648fa9c28c69b3f24d3cc22986530ffe8e8d7 + git_commit: 74e2fbff9159377b99a09da91dac3afd9179d426 sig_scheme_path: '.' sig_meta_path: 'integration/liboqs/{pqclean_scheme}.yml' diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c index 50629f9fec..f4b4260755 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c @@ -9,7 +9,7 @@ quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) ibz_t bp; ibz_init(&bp); ibz_set(&bp, p); - quat_alg_init_set(alg, &bp); + quat_alg_init_set(alg, (const ibz_t *)&bp); ibz_finalize(&bp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c index d393e9cb11..1df7755a29 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c @@ -56,13 +56,13 @@ hash_to_challenge(scalar_t *scalar, shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; for (int i = 2; i < HASH_ITERATIONS; i++) { - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; } - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); @@ -76,6 +76,7 @@ hash_to_challenge(scalar_t *scalar, memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + shake256_inc_ctx_release(&ctx); (*scalar)[limbs - 1] &= mask; #ifdef TARGET_BIG_ENDIAN diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h index ee2be38060..e609c93a08 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h @@ -605,57 +605,6 @@ test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) return check_P & check_Q & check_PmQ; } -/** - * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f - * - * @param P: a point - * @param E: an elliptic curve - * @param t: an integer - * - * @return 0xFFFFFFFF if the order is correct, 0 otherwise - */ -static int -test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) -{ - jac_point_t test; - test = *P; - if (fp2_is_zero(&test.z)) - return 0; - for (int i = 0; i < t - 1; i++) { - DBL(&test, &test, E); - } - if (fp2_is_zero(&test.z)) - return 0; - DBL(&test, &test, E); - return (fp2_is_zero(&test.z)); -} - -// Prints the x-coordinate of the point (X : 1) -static void -ec_point_print(const char *name, ec_point_t P) -{ - fp2_t a; - if (fp2_is_zero(&P.z)) { - printf("%s = INF\n", name); - } else { - fp2_copy(&a, &P.z); - fp2_inv(&a); - fp2_mul(&a, &a, &P.x); - fp2_print(name, &a); - } -} - -// Prints the Montgomery coefficient A -static void -ec_curve_print(const char *name, ec_curve_t E) -{ - fp2_t a; - fp2_copy(&a, &E.C); - fp2_inv(&a); - fp2_mul(&a, &a, &E.A); - fp2_print(name, &a); -} - #endif // end isogeny computations /** diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h index 21bc0c3f79..d4bc8ac727 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fips202.h @@ -10,5 +10,7 @@ #define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb #define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize #define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze +#define shake256_inc_ctx_release OQS_SHA3_shake256_inc_ctx_release +#define shake256_inc_ctx_reset OQS_SHA3_shake256_inc_ctx_reset #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c index 1fb4c0f139..511a0a5d38 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c @@ -207,4 +207,4 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec ibz_vec_4_finalize(&(w[h])); ibz_vec_4_finalize(&(a[h])); } -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.h index 5ecc871bb4..2302bbc0c6 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf_internal.h @@ -91,4 +91,4 @@ void ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const i */ /** @} */ -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c index 622361d466..0fd35b5c65 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c @@ -9,4 +9,4 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { mpz_gcdext(*gcd, *u, *v, *a, *b); -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c index 478a9ab25b..e4fcf3caf0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/theta_isogenies.c @@ -381,6 +381,33 @@ gluing_change_of_basis(basis_change_matrix_t *M, return 1; } +#ifndef NDEBUG +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} +#endif + /** * @brief Compute the gluing isogeny from an elliptic product * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c index 50629f9fec..f4b4260755 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c @@ -9,7 +9,7 @@ quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) ibz_t bp; ibz_init(&bp); ibz_set(&bp, p); - quat_alg_init_set(alg, &bp); + quat_alg_init_set(alg, (const ibz_t *)&bp); ibz_finalize(&bp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c index d393e9cb11..1df7755a29 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c @@ -56,13 +56,13 @@ hash_to_challenge(scalar_t *scalar, shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; for (int i = 2; i < HASH_ITERATIONS; i++) { - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; } - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); @@ -76,6 +76,7 @@ hash_to_challenge(scalar_t *scalar, memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + shake256_inc_ctx_release(&ctx); (*scalar)[limbs - 1] &= mask; #ifdef TARGET_BIG_ENDIAN diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h index ee2be38060..e609c93a08 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h @@ -605,57 +605,6 @@ test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) return check_P & check_Q & check_PmQ; } -/** - * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f - * - * @param P: a point - * @param E: an elliptic curve - * @param t: an integer - * - * @return 0xFFFFFFFF if the order is correct, 0 otherwise - */ -static int -test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) -{ - jac_point_t test; - test = *P; - if (fp2_is_zero(&test.z)) - return 0; - for (int i = 0; i < t - 1; i++) { - DBL(&test, &test, E); - } - if (fp2_is_zero(&test.z)) - return 0; - DBL(&test, &test, E); - return (fp2_is_zero(&test.z)); -} - -// Prints the x-coordinate of the point (X : 1) -static void -ec_point_print(const char *name, ec_point_t P) -{ - fp2_t a; - if (fp2_is_zero(&P.z)) { - printf("%s = INF\n", name); - } else { - fp2_copy(&a, &P.z); - fp2_inv(&a); - fp2_mul(&a, &a, &P.x); - fp2_print(name, &a); - } -} - -// Prints the Montgomery coefficient A -static void -ec_curve_print(const char *name, ec_curve_t E) -{ - fp2_t a; - fp2_copy(&a, &E.C); - fp2_inv(&a); - fp2_mul(&a, &a, &E.A); - fp2_print(name, &a); -} - #endif // end isogeny computations /** diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h index 21bc0c3f79..d4bc8ac727 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fips202.h @@ -10,5 +10,7 @@ #define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb #define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize #define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze +#define shake256_inc_ctx_release OQS_SHA3_shake256_inc_ctx_release +#define shake256_inc_ctx_reset OQS_SHA3_shake256_inc_ctx_reset #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c index d46d1c5d85..aa2c7d4ede 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c @@ -791,4 +791,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif // RADIX_64 \ No newline at end of file +#endif // RADIX_64 diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c index 1fb4c0f139..511a0a5d38 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c @@ -207,4 +207,4 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec ibz_vec_4_finalize(&(w[h])); ibz_vec_4_finalize(&(a[h])); } -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.h index 5ecc871bb4..2302bbc0c6 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf_internal.h @@ -91,4 +91,4 @@ void ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const i */ /** @} */ -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c index 622361d466..0fd35b5c65 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c @@ -9,4 +9,4 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { mpz_gcdext(*gcd, *u, *v, *a, *b); -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.c index 478a9ab25b..e4fcf3caf0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/theta_isogenies.c @@ -381,6 +381,33 @@ gluing_change_of_basis(basis_change_matrix_t *M, return 1; } +#ifndef NDEBUG +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} +#endif + /** * @brief Compute the gluing isogeny from an elliptic product * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c index 50629f9fec..f4b4260755 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c @@ -9,7 +9,7 @@ quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) ibz_t bp; ibz_init(&bp); ibz_set(&bp, p); - quat_alg_init_set(alg, &bp); + quat_alg_init_set(alg, (const ibz_t *)&bp); ibz_finalize(&bp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c index d393e9cb11..1df7755a29 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c @@ -56,13 +56,13 @@ hash_to_challenge(scalar_t *scalar, shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; for (int i = 2; i < HASH_ITERATIONS; i++) { - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; } - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); @@ -76,6 +76,7 @@ hash_to_challenge(scalar_t *scalar, memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + shake256_inc_ctx_release(&ctx); (*scalar)[limbs - 1] &= mask; #ifdef TARGET_BIG_ENDIAN diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h index ee2be38060..e609c93a08 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h @@ -605,57 +605,6 @@ test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) return check_P & check_Q & check_PmQ; } -/** - * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f - * - * @param P: a point - * @param E: an elliptic curve - * @param t: an integer - * - * @return 0xFFFFFFFF if the order is correct, 0 otherwise - */ -static int -test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) -{ - jac_point_t test; - test = *P; - if (fp2_is_zero(&test.z)) - return 0; - for (int i = 0; i < t - 1; i++) { - DBL(&test, &test, E); - } - if (fp2_is_zero(&test.z)) - return 0; - DBL(&test, &test, E); - return (fp2_is_zero(&test.z)); -} - -// Prints the x-coordinate of the point (X : 1) -static void -ec_point_print(const char *name, ec_point_t P) -{ - fp2_t a; - if (fp2_is_zero(&P.z)) { - printf("%s = INF\n", name); - } else { - fp2_copy(&a, &P.z); - fp2_inv(&a); - fp2_mul(&a, &a, &P.x); - fp2_print(name, &a); - } -} - -// Prints the Montgomery coefficient A -static void -ec_curve_print(const char *name, ec_curve_t E) -{ - fp2_t a; - fp2_copy(&a, &E.C); - fp2_inv(&a); - fp2_mul(&a, &a, &E.A); - fp2_print(name, &a); -} - #endif // end isogeny computations /** diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h index 21bc0c3f79..d4bc8ac727 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fips202.h @@ -10,5 +10,7 @@ #define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb #define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize #define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze +#define shake256_inc_ctx_release OQS_SHA3_shake256_inc_ctx_release +#define shake256_inc_ctx_reset OQS_SHA3_shake256_inc_ctx_reset #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c index 1fb4c0f139..511a0a5d38 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c @@ -207,4 +207,4 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec ibz_vec_4_finalize(&(w[h])); ibz_vec_4_finalize(&(a[h])); } -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.h index 5ecc871bb4..2302bbc0c6 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf_internal.h @@ -91,4 +91,4 @@ void ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const i */ /** @} */ -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c index 622361d466..0fd35b5c65 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c @@ -9,4 +9,4 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { mpz_gcdext(*gcd, *u, *v, *a, *b); -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c index 478a9ab25b..e4fcf3caf0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/theta_isogenies.c @@ -381,6 +381,33 @@ gluing_change_of_basis(basis_change_matrix_t *M, return 1; } +#ifndef NDEBUG +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} +#endif + /** * @brief Compute the gluing isogeny from an elliptic product * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c index 50629f9fec..f4b4260755 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c @@ -9,7 +9,7 @@ quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) ibz_t bp; ibz_init(&bp); ibz_set(&bp, p); - quat_alg_init_set(alg, &bp); + quat_alg_init_set(alg, (const ibz_t *)&bp); ibz_finalize(&bp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c index d393e9cb11..1df7755a29 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c @@ -56,13 +56,13 @@ hash_to_challenge(scalar_t *scalar, shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; for (int i = 2; i < HASH_ITERATIONS; i++) { - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; } - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); @@ -76,6 +76,7 @@ hash_to_challenge(scalar_t *scalar, memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + shake256_inc_ctx_release(&ctx); (*scalar)[limbs - 1] &= mask; #ifdef TARGET_BIG_ENDIAN diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h index ee2be38060..e609c93a08 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h @@ -605,57 +605,6 @@ test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) return check_P & check_Q & check_PmQ; } -/** - * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f - * - * @param P: a point - * @param E: an elliptic curve - * @param t: an integer - * - * @return 0xFFFFFFFF if the order is correct, 0 otherwise - */ -static int -test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) -{ - jac_point_t test; - test = *P; - if (fp2_is_zero(&test.z)) - return 0; - for (int i = 0; i < t - 1; i++) { - DBL(&test, &test, E); - } - if (fp2_is_zero(&test.z)) - return 0; - DBL(&test, &test, E); - return (fp2_is_zero(&test.z)); -} - -// Prints the x-coordinate of the point (X : 1) -static void -ec_point_print(const char *name, ec_point_t P) -{ - fp2_t a; - if (fp2_is_zero(&P.z)) { - printf("%s = INF\n", name); - } else { - fp2_copy(&a, &P.z); - fp2_inv(&a); - fp2_mul(&a, &a, &P.x); - fp2_print(name, &a); - } -} - -// Prints the Montgomery coefficient A -static void -ec_curve_print(const char *name, ec_curve_t E) -{ - fp2_t a; - fp2_copy(&a, &E.C); - fp2_inv(&a); - fp2_mul(&a, &a, &E.A); - fp2_print(name, &a); -} - #endif // end isogeny computations /** diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h index 21bc0c3f79..d4bc8ac727 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fips202.h @@ -10,5 +10,7 @@ #define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb #define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize #define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze +#define shake256_inc_ctx_release OQS_SHA3_shake256_inc_ctx_release +#define shake256_inc_ctx_reset OQS_SHA3_shake256_inc_ctx_reset #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c index b5947aaac0..fa363c65fd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c @@ -872,4 +872,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif /* RADIX_64 */ \ No newline at end of file +#endif /* RADIX_64 */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c index 1fb4c0f139..511a0a5d38 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c @@ -207,4 +207,4 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec ibz_vec_4_finalize(&(w[h])); ibz_vec_4_finalize(&(a[h])); } -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.h index 5ecc871bb4..2302bbc0c6 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf_internal.h @@ -91,4 +91,4 @@ void ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const i */ /** @} */ -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c index 622361d466..0fd35b5c65 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c @@ -9,4 +9,4 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { mpz_gcdext(*gcd, *u, *v, *a, *b); -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.c index 478a9ab25b..e4fcf3caf0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/theta_isogenies.c @@ -381,6 +381,33 @@ gluing_change_of_basis(basis_change_matrix_t *M, return 1; } +#ifndef NDEBUG +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} +#endif + /** * @brief Compute the gluing isogeny from an elliptic product * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c index 50629f9fec..f4b4260755 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c @@ -9,7 +9,7 @@ quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) ibz_t bp; ibz_init(&bp); ibz_set(&bp, p); - quat_alg_init_set(alg, &bp); + quat_alg_init_set(alg, (const ibz_t *)&bp); ibz_finalize(&bp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c index d393e9cb11..1df7755a29 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c @@ -56,13 +56,13 @@ hash_to_challenge(scalar_t *scalar, shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; for (int i = 2; i < HASH_ITERATIONS; i++) { - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; } - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); @@ -76,6 +76,7 @@ hash_to_challenge(scalar_t *scalar, memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + shake256_inc_ctx_release(&ctx); (*scalar)[limbs - 1] &= mask; #ifdef TARGET_BIG_ENDIAN diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h index ee2be38060..e609c93a08 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h @@ -605,57 +605,6 @@ test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) return check_P & check_Q & check_PmQ; } -/** - * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f - * - * @param P: a point - * @param E: an elliptic curve - * @param t: an integer - * - * @return 0xFFFFFFFF if the order is correct, 0 otherwise - */ -static int -test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) -{ - jac_point_t test; - test = *P; - if (fp2_is_zero(&test.z)) - return 0; - for (int i = 0; i < t - 1; i++) { - DBL(&test, &test, E); - } - if (fp2_is_zero(&test.z)) - return 0; - DBL(&test, &test, E); - return (fp2_is_zero(&test.z)); -} - -// Prints the x-coordinate of the point (X : 1) -static void -ec_point_print(const char *name, ec_point_t P) -{ - fp2_t a; - if (fp2_is_zero(&P.z)) { - printf("%s = INF\n", name); - } else { - fp2_copy(&a, &P.z); - fp2_inv(&a); - fp2_mul(&a, &a, &P.x); - fp2_print(name, &a); - } -} - -// Prints the Montgomery coefficient A -static void -ec_curve_print(const char *name, ec_curve_t E) -{ - fp2_t a; - fp2_copy(&a, &E.C); - fp2_inv(&a); - fp2_mul(&a, &a, &E.A); - fp2_print(name, &a); -} - #endif // end isogeny computations /** diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h index 21bc0c3f79..d4bc8ac727 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fips202.h @@ -10,5 +10,7 @@ #define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb #define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize #define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze +#define shake256_inc_ctx_release OQS_SHA3_shake256_inc_ctx_release +#define shake256_inc_ctx_reset OQS_SHA3_shake256_inc_ctx_reset #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c index 1fb4c0f139..511a0a5d38 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c @@ -207,4 +207,4 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec ibz_vec_4_finalize(&(w[h])); ibz_vec_4_finalize(&(a[h])); } -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.h index 5ecc871bb4..2302bbc0c6 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf_internal.h @@ -91,4 +91,4 @@ void ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const i */ /** @} */ -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c index 622361d466..0fd35b5c65 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c @@ -9,4 +9,4 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { mpz_gcdext(*gcd, *u, *v, *a, *b); -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c index 478a9ab25b..e4fcf3caf0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/theta_isogenies.c @@ -381,6 +381,33 @@ gluing_change_of_basis(basis_change_matrix_t *M, return 1; } +#ifndef NDEBUG +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} +#endif + /** * @brief Compute the gluing isogeny from an elliptic product * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c index 50629f9fec..f4b4260755 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c @@ -9,7 +9,7 @@ quat_alg_init_set_ui(quat_alg_t *alg, unsigned int p) ibz_t bp; ibz_init(&bp); ibz_set(&bp, p); - quat_alg_init_set(alg, &bp); + quat_alg_init_set(alg, (const ibz_t *)&bp); ibz_finalize(&bp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c index d393e9cb11..1df7755a29 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c @@ -56,13 +56,13 @@ hash_to_challenge(scalar_t *scalar, shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; for (int i = 2; i < HASH_ITERATIONS; i++) { - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); (*scalar)[limbs - 1] &= mask; } - shake256_inc_init(&ctx); + shake256_inc_ctx_reset(&ctx); shake256_inc_absorb(&ctx, (void *)(*scalar), hash_bytes); shake256_inc_finalize(&ctx); @@ -76,6 +76,7 @@ hash_to_challenge(scalar_t *scalar, memset(*scalar, 0, NWORDS_ORDER * sizeof(digit_t)); shake256_inc_squeeze((void *)(*scalar), hash_bytes, &ctx); + shake256_inc_ctx_release(&ctx); (*scalar)[limbs - 1] &= mask; #ifdef TARGET_BIG_ENDIAN diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h index ee2be38060..e609c93a08 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h @@ -605,57 +605,6 @@ test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) return check_P & check_Q & check_PmQ; } -/** - * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f - * - * @param P: a point - * @param E: an elliptic curve - * @param t: an integer - * - * @return 0xFFFFFFFF if the order is correct, 0 otherwise - */ -static int -test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) -{ - jac_point_t test; - test = *P; - if (fp2_is_zero(&test.z)) - return 0; - for (int i = 0; i < t - 1; i++) { - DBL(&test, &test, E); - } - if (fp2_is_zero(&test.z)) - return 0; - DBL(&test, &test, E); - return (fp2_is_zero(&test.z)); -} - -// Prints the x-coordinate of the point (X : 1) -static void -ec_point_print(const char *name, ec_point_t P) -{ - fp2_t a; - if (fp2_is_zero(&P.z)) { - printf("%s = INF\n", name); - } else { - fp2_copy(&a, &P.z); - fp2_inv(&a); - fp2_mul(&a, &a, &P.x); - fp2_print(name, &a); - } -} - -// Prints the Montgomery coefficient A -static void -ec_curve_print(const char *name, ec_curve_t E) -{ - fp2_t a; - fp2_copy(&a, &E.C); - fp2_inv(&a); - fp2_mul(&a, &a, &E.A); - fp2_print(name, &a); -} - #endif // end isogeny computations /** diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h index 21bc0c3f79..d4bc8ac727 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fips202.h @@ -10,5 +10,7 @@ #define shake256_inc_absorb OQS_SHA3_shake256_inc_absorb #define shake256_inc_finalize OQS_SHA3_shake256_inc_finalize #define shake256_inc_squeeze OQS_SHA3_shake256_inc_squeeze +#define shake256_inc_ctx_release OQS_SHA3_shake256_inc_ctx_release +#define shake256_inc_ctx_reset OQS_SHA3_shake256_inc_ctx_reset #endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c index cc1f136321..67b3b9ba54 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c @@ -970,4 +970,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif /* RADIX_64 */ \ No newline at end of file +#endif /* RADIX_64 */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c index 1fb4c0f139..511a0a5d38 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c @@ -207,4 +207,4 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec ibz_vec_4_finalize(&(w[h])); ibz_vec_4_finalize(&(a[h])); } -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.h index 5ecc871bb4..2302bbc0c6 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf_internal.h @@ -91,4 +91,4 @@ void ibz_xgcd_with_u_not_0(ibz_t *d, ibz_t *u, ibz_t *v, const ibz_t *x, const i */ /** @} */ -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c index 622361d466..0fd35b5c65 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c @@ -9,4 +9,4 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { mpz_gcdext(*gcd, *u, *v, *a, *b); -} \ No newline at end of file +} diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.c index 478a9ab25b..e4fcf3caf0 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/theta_isogenies.c @@ -381,6 +381,33 @@ gluing_change_of_basis(basis_change_matrix_t *M, return 1; } +#ifndef NDEBUG +/** + * @brief Check if a Jacobian point (X : Y : Z) has order exactly 2^f + * + * @param P: a point + * @param E: an elliptic curve + * @param t: an integer + * + * @return 0xFFFFFFFF if the order is correct, 0 otherwise + */ +static int +test_jac_order_twof(const jac_point_t *P, const ec_curve_t *E, int t) +{ + jac_point_t test; + test = *P; + if (fp2_is_zero(&test.z)) + return 0; + for (int i = 0; i < t - 1; i++) { + DBL(&test, &test, E); + } + if (fp2_is_zero(&test.z)) + return 0; + DBL(&test, &test, E); + return (fp2_is_zero(&test.z)); +} +#endif + /** * @brief Compute the gluing isogeny from an elliptic product * From e5c9c17c4bf9ebdd73adb908c2cc0e203feb7349 Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Mon, 18 Aug 2025 13:55:01 +0200 Subject: [PATCH 13/19] new sqisign pull [full tests] Signed-off-by: Basil Hess --- docs/algorithms/sig/sqisign.md | 2 +- docs/algorithms/sig/sqisign.yml | 2 +- .../copy_from_upstream/copy_from_upstream.yml | 2 +- .../algebra.c | 137 +- .../common.c | 1 + .../the-sqisign_sqisign_lvl1_broadwell/dim2.c | 76 +- .../dim2id2iso.c | 112 +- .../the-sqisign_sqisign_lvl1_broadwell/dim4.c | 148 +- .../encode_signature.c | 32 +- .../endomorphism_action.c | 1106 +++---- .../finit.c | 20 +- .../the-sqisign_sqisign_lvl1_broadwell/hnf.c | 58 +- .../ibz_division.c | 2 +- .../id2iso.c | 110 +- .../ideal.c | 10 +- .../intbig.c | 186 +- .../intbig.h | 6 +- .../the-sqisign_sqisign_lvl1_broadwell/l2.c | 42 +- .../lat_ball.c | 18 +- .../lattice.c | 38 +- .../lll_applications.c | 12 +- .../normeq.c | 110 +- .../quaternion.h | 16 +- .../quaternion_data.c | 2252 +++++++-------- .../the-sqisign_sqisign_lvl1_broadwell/sign.c | 22 +- .../torsion_constants.c | 24 +- .../the-sqisign_sqisign_lvl1_ref/algebra.c | 137 +- .../the-sqisign_sqisign_lvl1_ref/common.c | 1 + .../the-sqisign_sqisign_lvl1_ref/dim2.c | 76 +- .../the-sqisign_sqisign_lvl1_ref/dim2id2iso.c | 112 +- .../the-sqisign_sqisign_lvl1_ref/dim4.c | 148 +- .../encode_signature.c | 32 +- .../endomorphism_action.c | 1106 +++---- .../the-sqisign_sqisign_lvl1_ref/finit.c | 20 +- .../the-sqisign_sqisign_lvl1_ref/hnf.c | 58 +- .../ibz_division.c | 2 +- .../the-sqisign_sqisign_lvl1_ref/id2iso.c | 110 +- .../the-sqisign_sqisign_lvl1_ref/ideal.c | 10 +- .../the-sqisign_sqisign_lvl1_ref/intbig.c | 186 +- .../the-sqisign_sqisign_lvl1_ref/intbig.h | 6 +- .../sqisign/the-sqisign_sqisign_lvl1_ref/l2.c | 42 +- .../the-sqisign_sqisign_lvl1_ref/lat_ball.c | 18 +- .../the-sqisign_sqisign_lvl1_ref/lattice.c | 38 +- .../lll_applications.c | 12 +- .../the-sqisign_sqisign_lvl1_ref/normeq.c | 110 +- .../the-sqisign_sqisign_lvl1_ref/printer.c | 16 +- .../the-sqisign_sqisign_lvl1_ref/quaternion.h | 16 +- .../quaternion_data.c | 2252 +++++++-------- .../the-sqisign_sqisign_lvl1_ref/sign.c | 22 +- .../torsion_constants.c | 24 +- .../algebra.c | 137 +- .../common.c | 1 + .../the-sqisign_sqisign_lvl3_broadwell/dim2.c | 76 +- .../dim2id2iso.c | 112 +- .../the-sqisign_sqisign_lvl3_broadwell/dim4.c | 148 +- .../encode_signature.c | 32 +- .../endomorphism_action.c | 1264 ++++---- .../finit.c | 20 +- .../the-sqisign_sqisign_lvl3_broadwell/hnf.c | 58 +- .../ibz_division.c | 2 +- .../id2iso.c | 110 +- .../ideal.c | 10 +- .../intbig.c | 186 +- .../intbig.h | 6 +- .../the-sqisign_sqisign_lvl3_broadwell/l2.c | 42 +- .../lat_ball.c | 18 +- .../lattice.c | 38 +- .../lll_applications.c | 12 +- .../normeq.c | 110 +- .../quaternion.h | 16 +- .../quaternion_data.c | 2572 ++++++++--------- .../the-sqisign_sqisign_lvl3_broadwell/sign.c | 22 +- .../torsion_constants.c | 24 +- .../the-sqisign_sqisign_lvl3_ref/algebra.c | 137 +- .../the-sqisign_sqisign_lvl3_ref/common.c | 1 + .../the-sqisign_sqisign_lvl3_ref/dim2.c | 76 +- .../the-sqisign_sqisign_lvl3_ref/dim2id2iso.c | 112 +- .../the-sqisign_sqisign_lvl3_ref/dim4.c | 148 +- .../encode_signature.c | 32 +- .../endomorphism_action.c | 1264 ++++---- .../the-sqisign_sqisign_lvl3_ref/finit.c | 20 +- .../the-sqisign_sqisign_lvl3_ref/hnf.c | 58 +- .../ibz_division.c | 2 +- .../the-sqisign_sqisign_lvl3_ref/id2iso.c | 110 +- .../the-sqisign_sqisign_lvl3_ref/ideal.c | 10 +- .../the-sqisign_sqisign_lvl3_ref/intbig.c | 186 +- .../the-sqisign_sqisign_lvl3_ref/intbig.h | 6 +- .../sqisign/the-sqisign_sqisign_lvl3_ref/l2.c | 42 +- .../the-sqisign_sqisign_lvl3_ref/lat_ball.c | 18 +- .../the-sqisign_sqisign_lvl3_ref/lattice.c | 38 +- .../lll_applications.c | 12 +- .../the-sqisign_sqisign_lvl3_ref/normeq.c | 110 +- .../the-sqisign_sqisign_lvl3_ref/printer.c | 16 +- .../the-sqisign_sqisign_lvl3_ref/quaternion.h | 16 +- .../quaternion_data.c | 2572 ++++++++--------- .../the-sqisign_sqisign_lvl3_ref/sign.c | 22 +- .../torsion_constants.c | 24 +- .../algebra.c | 137 +- .../common.c | 1 + .../the-sqisign_sqisign_lvl5_broadwell/dim2.c | 76 +- .../dim2id2iso.c | 112 +- .../the-sqisign_sqisign_lvl5_broadwell/dim4.c | 148 +- .../encode_signature.c | 32 +- .../endomorphism_action.c | 1154 ++++---- .../finit.c | 20 +- .../the-sqisign_sqisign_lvl5_broadwell/hnf.c | 58 +- .../ibz_division.c | 2 +- .../id2iso.c | 110 +- .../ideal.c | 10 +- .../intbig.c | 186 +- .../intbig.h | 6 +- .../the-sqisign_sqisign_lvl5_broadwell/l2.c | 42 +- .../lat_ball.c | 18 +- .../lattice.c | 38 +- .../lll_applications.c | 12 +- .../normeq.c | 110 +- .../quaternion.h | 16 +- .../quaternion_data.c | 2252 +++++++-------- .../the-sqisign_sqisign_lvl5_broadwell/sign.c | 22 +- .../torsion_constants.c | 24 +- .../the-sqisign_sqisign_lvl5_ref/algebra.c | 137 +- .../the-sqisign_sqisign_lvl5_ref/common.c | 1 + .../the-sqisign_sqisign_lvl5_ref/dim2.c | 76 +- .../the-sqisign_sqisign_lvl5_ref/dim2id2iso.c | 112 +- .../the-sqisign_sqisign_lvl5_ref/dim4.c | 148 +- .../encode_signature.c | 32 +- .../endomorphism_action.c | 1154 ++++---- .../the-sqisign_sqisign_lvl5_ref/finit.c | 20 +- .../the-sqisign_sqisign_lvl5_ref/hnf.c | 58 +- .../ibz_division.c | 2 +- .../the-sqisign_sqisign_lvl5_ref/id2iso.c | 110 +- .../the-sqisign_sqisign_lvl5_ref/ideal.c | 10 +- .../the-sqisign_sqisign_lvl5_ref/intbig.c | 186 +- .../the-sqisign_sqisign_lvl5_ref/intbig.h | 6 +- .../sqisign/the-sqisign_sqisign_lvl5_ref/l2.c | 42 +- .../the-sqisign_sqisign_lvl5_ref/lat_ball.c | 18 +- .../the-sqisign_sqisign_lvl5_ref/lattice.c | 38 +- .../lll_applications.c | 12 +- .../the-sqisign_sqisign_lvl5_ref/normeq.c | 110 +- .../the-sqisign_sqisign_lvl5_ref/printer.c | 16 +- .../the-sqisign_sqisign_lvl5_ref/quaternion.h | 16 +- .../quaternion_data.c | 2252 +++++++-------- .../the-sqisign_sqisign_lvl5_ref/sign.c | 22 +- .../torsion_constants.c | 24 +- tests/test_leaks.py | 2 +- 145 files changed, 14204 insertions(+), 14132 deletions(-) diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md index 55646fbab8..1fd0517a55 100644 --- a/docs/algorithms/sig/sqisign.md +++ b/docs/algorithms/sig/sqisign.md @@ -6,7 +6,7 @@ - **Authors' website**: https://sqisign.org/ - **Specification version**: Round 2. - **Primary Source**: - - **Source**: https://github.com/bhess/the-sqisign/commit/74e2fbff9159377b99a09da91dac3afd9179d426 + - **Source**: https://github.com/bhess/the-sqisign/commit/f86bf0851967e6a1daf3ced46af22c9e92f08913 - **Implementation license (SPDX-Identifier)**: Apache-2.0 diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml index 83f7ab4b70..827179b2fe 100644 --- a/docs/algorithms/sig/sqisign.yml +++ b/docs/algorithms/sig/sqisign.yml @@ -36,7 +36,7 @@ website: https://sqisign.org/ nist-round: 2 spec-version: Round 2 primary-upstream: - source: https://github.com/bhess/the-sqisign/commit/74e2fbff9159377b99a09da91dac3afd9179d426 + source: https://github.com/bhess/the-sqisign/commit/f86bf0851967e6a1daf3ced46af22c9e92f08913 spdx-license-identifier: Apache-2.0 parameter-sets: - name: SQIsign-lvl1 diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index a70491fcd2..f3b86e99c1 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -96,7 +96,7 @@ upstreams: name: the-sqisign git_url: https://github.com/bhess/the-sqisign.git git_branch: oqs - git_commit: 74e2fbff9159377b99a09da91dac3afd9179d426 + git_commit: f86bf0851967e6a1daf3ced46af22c9e92f08913 sig_scheme_path: '.' sig_meta_path: 'integration/liboqs/{pqclean_scheme}.yml' diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c index f4b4260755..a6298acf77 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/algebra.c @@ -21,54 +21,54 @@ quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, ibz_init(&prod); ibz_vec_4_init(&sum); - ibz_set(&(sum[0]), 0); - ibz_set(&(sum[1]), 0); - ibz_set(&(sum[2]), 0); - ibz_set(&(sum[3]), 0); + ibz_set(&(sum.v[0]), 0); + ibz_set(&(sum.v[1]), 0); + ibz_set(&(sum.v[2]), 0); + ibz_set(&(sum.v[3]), 0); // compute 1 coordinate - ibz_mul(&prod, &((*a)[2]), &((*b)[2])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[3])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[0])); - ibz_add(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[1])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[2])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[3])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&(sum.v[0]), &(sum.v[0]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[0])); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[1])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); // compute i coordiante - ibz_mul(&prod, &((*a)[2]), &((*b)[3])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[2])); - ibz_sub(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[1])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[0])); - ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[3])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[2])); + ibz_sub(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&(sum.v[1]), &(sum.v[1]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[1])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[0])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); // compute j coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[2])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[0])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[3])); - ibz_sub(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[1])); - ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[2])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[0])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[3])); + ibz_sub(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[1])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); // compute ij coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[3])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[0])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[1])); - ibz_sub(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[2])); - ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[3])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[0])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[1])); + ibz_sub(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[2])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); - ibz_copy(&((*res)[0]), &(sum[0])); - ibz_copy(&((*res)[1]), &(sum[1])); - ibz_copy(&((*res)[2]), &(sum[2])); - ibz_copy(&((*res)[3]), &(sum[3])); + ibz_copy(&(res->v[0]), &(sum.v[0])); + ibz_copy(&(res->v[1]), &(sum.v[1])); + ibz_copy(&(res->v[2]), &(sum.v[2])); + ibz_copy(&(res->v[3]), &(sum.v[3])); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); @@ -86,8 +86,8 @@ quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_ ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); for (int i = 0; i < 4; i++) { // multiply coordiates by reduced denominators from the other element - ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); - ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + ibz_mul(&(res_a->coord.v[i]), &(a->coord.v[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord.v[i]), &(b->coord.v[i]), &(res_a->denom)); } // multiply both reduced denominators ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); @@ -149,8 +149,8 @@ quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_conj(&norm, a); quat_alg_mul(&norm, a, &norm, alg); - ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); - ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_gcd(&g, &(norm.coord.v[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord.v[0]), &g); ibz_div(res_denom, &r, &(norm.denom), &g); ibz_abs(res_denom, res_denom); ibz_abs(res_num, res_num); @@ -165,20 +165,20 @@ void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) { ibz_copy(&(elem->denom), denominator); - ibz_copy(&(elem->coord[0]), numerator); - ibz_set(&(elem->coord[1]), 0); - ibz_set(&(elem->coord[2]), 0); - ibz_set(&(elem->coord[3]), 0); + ibz_copy(&(elem->coord.v[0]), numerator); + ibz_set(&(elem->coord.v[1]), 0); + ibz_set(&(elem->coord.v[2]), 0); + ibz_set(&(elem->coord.v[3]), 0); } void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) { ibz_copy(&(conj->denom), &(x->denom)); - ibz_copy(&(conj->coord[0]), &(x->coord[0])); - ibz_neg(&(conj->coord[1]), &(x->coord[1])); - ibz_neg(&(conj->coord[2]), &(x->coord[2])); - ibz_neg(&(conj->coord[3]), &(x->coord[3])); + ibz_copy(&(conj->coord.v[0]), &(x->coord.v[0])); + ibz_neg(&(conj->coord.v[1]), &(x->coord.v[1])); + ibz_neg(&(conj->coord.v[2]), &(x->coord.v[2])); + ibz_neg(&(conj->coord.v[3]), &(x->coord.v[3])); } void @@ -190,7 +190,8 @@ quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + // TODO: check if this is correct + ibz_div(primitive_x->v + i, &r, primitive_x->v + i, content); } ibz_finalize(&r); } @@ -235,10 +236,10 @@ quat_alg_elem_is_zero(const quat_alg_elem_t *x) void quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&(elem->coord[0]), coord0); - ibz_set(&(elem->coord[1]), coord1); - ibz_set(&(elem->coord[2]), coord2); - ibz_set(&(elem->coord[3]), coord3); + ibz_set(&(elem->coord.v[0]), coord0); + ibz_set(&(elem->coord.v[1]), coord1); + ibz_set(&(elem->coord.v[2]), coord2); + ibz_set(&(elem->coord.v[3]), coord3); ibz_set(&(elem->denom), denom); } @@ -247,10 +248,10 @@ void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) { ibz_copy(©->denom, &copied->denom); - ibz_copy(©->coord[0], &copied->coord[0]); - ibz_copy(©->coord[1], &copied->coord[1]); - ibz_copy(©->coord[2], &copied->coord[2]); - ibz_copy(©->coord[3], &copied->coord[3]); + ibz_copy(©->coord.v[0], &copied->coord.v[0]); + ibz_copy(©->coord.v[1], &copied->coord.v[1]); + ibz_copy(©->coord.v[2], &copied->coord.v[2]); + ibz_copy(©->coord.v[3], &copied->coord.v[3]); } // helper functions for lattices @@ -262,10 +263,10 @@ quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&(elem->coord[0]), coord0); - ibz_copy(&(elem->coord[1]), coord1); - ibz_copy(&(elem->coord[2]), coord2); - ibz_copy(&(elem->coord[3]), coord3); + ibz_copy(&(elem->coord.v[0]), coord0); + ibz_copy(&(elem->coord.v[1]), coord1); + ibz_copy(&(elem->coord.v[2]), coord2); + ibz_copy(&(elem->coord.v[3]), coord3); ibz_copy(&(elem->denom), denom); } @@ -274,7 +275,7 @@ void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) { for (int i = 0; i < 4; i++) { - ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + ibz_mul(&(res->coord.v[i]), &(elem->coord.v[i]), scalar); } ibz_copy(&(res->denom), &(elem->denom)); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c index 1df7755a29..e051ac340a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/common.c @@ -14,6 +14,7 @@ public_key_init(public_key_t *pk) void public_key_finalize(public_key_t *pk) { + (void) pk; } // compute the challenge as the hash of the message and the commitment curve and public key diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2.c index b31ae7771a..5bf214c4e2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2.c @@ -5,34 +5,34 @@ void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) { - ibz_set(&((*vec)[0]), a0); - ibz_set(&((*vec)[1]), a1); + ibz_set(&(vec->v[0]), a0); + ibz_set(&(vec->v[1]), a1); } void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) { - ibz_set(&((*mat)[0][0]), a00); - ibz_set(&((*mat)[0][1]), a01); - ibz_set(&((*mat)[1][0]), a10); - ibz_set(&((*mat)[1][1]), a11); + ibz_set(&(mat->m[0][0]), a00); + ibz_set(&(mat->m[0][1]), a01); + ibz_set(&(mat->m[1][0]), a10); + ibz_set(&(mat->m[1][1]), a11); } void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) { - ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); - ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); - ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); - ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); + ibz_copy(&(copy->m[0][0]), &(copied->m[0][0])); + ibz_copy(&(copy->m[0][1]), &(copied->m[0][1])); + ibz_copy(&(copy->m[1][0]), &(copied->m[1][0])); + ibz_copy(&(copy->m[1][1]), &(copied->m[1][1])); } void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) { - ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); - ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); - ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); - ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); + ibz_add(&(sum->m[0][0]), &(a->m[0][0]), &(b->m[0][0])); + ibz_add(&(sum->m[0][1]), &(a->m[0][1]), &(b->m[0][1])); + ibz_add(&(sum->m[1][0]), &(a->m[1][0]), &(b->m[1][0])); + ibz_add(&(sum->m[1][1]), &(a->m[1][1]), &(b->m[1][1])); } void @@ -53,16 +53,16 @@ ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t * ibz_vec_2_t matvec; ibz_init(&prod); ibz_vec_2_init(&matvec); - ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); - ibz_copy(&(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); - ibz_add(&(matvec[0]), &(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); - ibz_copy(&(matvec[1]), &prod); - ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); - ibz_add(&(matvec[1]), &(matvec[1]), &prod); - ibz_copy(&((*res)[0]), &(matvec[0])); - ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_mul(&prod, &(mat->m[0][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[0][1]), &(vec->v[1])); + ibz_add(&(matvec.v[0]), &(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[1][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[1]), &prod); + ibz_mul(&prod, &(mat->m[1][1]), &(vec->v[1])); + ibz_add(&(matvec.v[1]), &(matvec.v[1]), &prod); + ibz_copy(&(res->v[0]), &(matvec.v[0])); + ibz_copy(&(res->v[1]), &(matvec.v[1])); ibz_finalize(&prod); ibz_vec_2_finalize(&matvec); } @@ -78,21 +78,21 @@ ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2 ibz_mat_2x2_init(&sums); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_set(&(sums[i][j]), 0); + ibz_set(&(sums.m[i][j]), 0); } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { - ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); - ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); - ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + ibz_mul(&mul, &(mat_a->m[i][k]), &(mat_b->m[k][j])); + ibz_add(&(sums.m[i][j]), &(sums.m[i][j]), &mul); + ibz_mod(&(sums.m[i][j]), &(sums.m[i][j]), m); } } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + ibz_copy(&(prod->m[i][j]), &(sums.m[i][j])); } } ibz_finalize(&mul); @@ -105,9 +105,9 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_t det, prod; ibz_init(&det); ibz_init(&prod); - ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mul(&det, &(mat->m[0][0]), &(mat->m[1][1])); ibz_mod(&det, &det, m); - ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_mul(&prod, &(mat->m[0][1]), &(mat->m[1][0])); ibz_sub(&det, &det, &prod); ibz_mod(&det, &det, m); int res = ibz_invmod(&det, &det, m); @@ -115,15 +115,15 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_set(&prod, res); ibz_mul(&det, &det, &prod); // compute inverse - ibz_copy(&prod, &((*mat)[0][0])); - ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); - ibz_copy(&((*inv)[1][1]), &prod); - ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); - ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + ibz_copy(&prod, &(mat->m[0][0])); + ibz_copy(&(inv->m[0][0]), &(mat->m[1][1])); + ibz_copy(&(inv->m[1][1]), &prod); + ibz_neg(&(inv->m[1][0]), &(mat->m[1][0])); + ibz_neg(&(inv->m[0][1]), &(mat->m[0][1])); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); - ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + ibz_mul(&(inv->m[i][j]), &(inv->m[i][j]), &det); + ibz_mod(&(inv->m[i][j]), &(inv->m[i][j]), m); } } ibz_finalize(&det); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c index 171473d481..143060e2c3 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c @@ -137,10 +137,10 @@ _fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, ibz_invmod(&tmp, &tmp, &two_pow); assert(!ibz_is_even(&tmp)); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta to the basis ec_basis_t B0_two_theta; @@ -197,53 +197,53 @@ post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_ // treatment if (is_special_order) { // reordering the basis if needed - if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + if (ibz_cmp(&gram->m[0][0], &gram->m[2][2]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[0][0], &gram->m[3][3]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][3]); } - ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); - ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); - ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); - ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][3], &gram->m[0][1]); + ibz_swap(&gram->m[3][0], &gram->m[1][0]); + ibz_swap(&gram->m[2][3], &gram->m[2][1]); + ibz_swap(&gram->m[3][2], &gram->m[1][2]); + ibz_swap(&gram->m[3][3], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[1][1], &gram->m[3][3]) == 0) { // in this case it seems that we need to swap the second and third // element, and then recompute entirely the second element from the first // first we swap the second and third element for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); } // adjusting the sign if needed - if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + if (ibz_cmp(&reduced->m[0][0], &reduced->m[1][1]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); - ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); - ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + ibz_neg(&reduced->m[i][1], &reduced->m[i][1]); + ibz_neg(&gram->m[i][1], &gram->m[i][1]); + ibz_neg(&gram->m[1][i], &gram->m[1][i]); } } - if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + if (ibz_cmp(&reduced->m[0][2], &reduced->m[1][3]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); - ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); - ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + ibz_neg(&reduced->m[i][3], &reduced->m[i][3]); + ibz_neg(&gram->m[i][3], &gram->m[i][3]); + ibz_neg(&gram->m[3][i], &gram->m[3][i]); } - // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + // assert(ibz_cmp(&reduced->m[0][2],&reduced->m[1][3])==0); } } } @@ -273,7 +273,7 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // if the basis is of the form alpha, i*alpha, beta, i*beta // we can remove some values due to symmetry of the basis that bool need_remove_symmetry = - (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + (ibz_cmp(&gram->m[0][0], &gram->m[1][1]) == 0 && ibz_cmp(&gram->m[3][3], &gram->m[2][2]) == 0); int check1, check2, check3; @@ -324,10 +324,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // and we ensure that we don't record the same norm in the list if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { // Set the point as a vector (x, y, z, w) - ibz_set(&point[0], x); - ibz_set(&point[1], y); - ibz_set(&point[2], z); - ibz_set(&point[3], w); + ibz_set(&point.v[0], x); + ibz_set(&point.v[1], y); + ibz_set(&point.v[2], z); + ibz_set(&point.v[3], w); // Evaluate this through the gram matrix and divide out by the // adjusted_norm @@ -336,10 +336,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t assert(ibz_is_zero(&remain)); if (ibz_mod_ui(&norm, 2) == 1) { - ibz_set(&vecs[count][0], x); - ibz_set(&vecs[count][1], y); - ibz_set(&vecs[count][2], z); - ibz_set(&vecs[count][3], w); + ibz_set(&vecs[count].v[0], x); + ibz_set(&vecs[count].v[1], y); + ibz_set(&vecs[count].v[2], z); + ibz_set(&vecs[count].v[3], w); ibz_copy(&norms[count], &norm); count++; } @@ -530,10 +530,10 @@ find_uv(ibz_t *u, quat_alg_elem_t delta; // delta will be the element of smallest norm quat_alg_elem_init(&delta); - ibz_set(&delta.coord[0], 1); - ibz_set(&delta.coord[1], 0); - ibz_set(&delta.coord[2], 0); - ibz_set(&delta.coord[3], 0); + ibz_set(&delta.coord.v[0], 1); + ibz_set(&delta.coord.v[1], 0); + ibz_set(&delta.coord.v[2], 0); + ibz_set(&delta.coord.v[3], 0); ibz_copy(&delta.denom, &reduced_id.lattice.denom); ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); @@ -542,7 +542,7 @@ find_uv(ibz_t *u, quat_alg_conj(&delta, &delta); ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); - ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_copy(&reduced_id.norm, &gram[0].m[0][0]); ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); assert(ibz_cmp(&remain, &ibz_const_zero) == 0); @@ -989,10 +989,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, } ibz_invmod(&tmp, &tmp, &two_pow); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); @@ -1092,10 +1092,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); } ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); - ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); - ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); - ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); - ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + ibz_mul(&beta1->coord.v[0], &beta1->coord.v[0], &tmp); + ibz_mul(&beta1->coord.v[1], &beta1->coord.v[1], &tmp); + ibz_mul(&beta1->coord.v[2], &beta1->coord.v[2], &tmp); + ibz_mul(&beta1->coord.v[3], &beta1->coord.v[3], &tmp); endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim4.c index 495dc2dcb2..b024a7d46e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim4.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim4.c @@ -11,16 +11,16 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t ibz_mat_4x4_init(&mat); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(mat[i][j]), 0); + ibz_set(&(mat.m[i][j]), 0); for (int k = 0; k < 4; k++) { - ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); - ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + ibz_mul(&prod, &(a->m[i][k]), &(b->m[k][j])); + ibz_add(&(mat.m[i][j]), &(mat.m[i][j]), &prod); } } } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*res)[i][j]), &(mat[i][j])); + ibz_copy(&(res->m[i][j]), &(mat.m[i][j])); } } ibz_mat_4x4_finalize(&mat); @@ -31,61 +31,61 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&((*vec)[0]), coord0); - ibz_set(&((*vec)[1]), coord1); - ibz_set(&((*vec)[2]), coord2); - ibz_set(&((*vec)[3]), coord3); + ibz_set(&(vec->v[0]), coord0); + ibz_set(&(vec->v[1]), coord1); + ibz_set(&(vec->v[2]), coord2); + ibz_set(&(vec->v[3]), coord3); } void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*new)[i]), &((*vec)[i])); + ibz_copy(&(new->v[i]), &(vec->v[i])); } } void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&((*res)[0]), coord0); - ibz_copy(&((*res)[1]), coord1); - ibz_copy(&((*res)[2]), coord2); - ibz_copy(&((*res)[3]), coord3); + ibz_copy(&(res->v[0]), coord0); + ibz_copy(&(res->v[1]), coord1); + ibz_copy(&(res->v[2]), coord2); + ibz_copy(&(res->v[3]), coord3); } void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) { - ibz_gcd(content, &((*v)[0]), &((*v)[1])); - ibz_gcd(content, &((*v)[2]), content); - ibz_gcd(content, &((*v)[3]), content); + ibz_gcd(content, &(v->v[0]), &(v->v[1])); + ibz_gcd(content, &(v->v[2]), content); + ibz_gcd(content, &(v->v[3]), content); } void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_neg(&((*neg)[i]), &((*vec)[i])); + ibz_neg(&(neg->v[i]), &(vec->v[i])); } } void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_add(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_add(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_add(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_add(&(res->v[3]), &(a->v[3]), &(b->v[3])); } void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_sub(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_sub(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_sub(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_sub(&(res->v[3]), &(a->v[3]), &(b->v[3])); } int @@ -93,7 +93,7 @@ ibz_vec_4_is_zero(const ibz_vec_4_t *x) { int res = 1; for (int i = 0; i < 4; i++) { - res &= ibz_is_zero(&((*x)[i])); + res &= ibz_is_zero(&(x->v[i])); } return (res); } @@ -110,12 +110,12 @@ ibz_vec_4_linear_combination(ibz_vec_4_t *lc, ibz_vec_4_init(&sums); ibz_init(&prod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_vec_4_finalize(&sums); @@ -125,7 +125,7 @@ void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + ibz_mul(&(prod->v[i]), &(vec->v[i]), scalar); } } @@ -136,7 +136,7 @@ ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t * ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + ibz_div(&(quot->v[i]), &r, &(vec->v[i]), scalar); res = res && ibz_is_zero(&r); } ibz_finalize(&r); @@ -148,7 +148,7 @@ ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + ibz_copy(&(new->m[i][j]), &(mat->m[i][j])); } } } @@ -158,7 +158,7 @@ ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + ibz_neg(&(neg->m[i][j]), &(mat->m[i][j])); } } } @@ -170,7 +170,7 @@ ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) ibz_mat_4x4_init(&work); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(work[i][j]), &((*mat)[j][i])); + ibz_copy(&(work.m[i][j]), &(mat->m[j][i])); } } ibz_mat_4x4_copy(transposed, &work); @@ -182,7 +182,7 @@ ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*zero)[i][j]), 0); + ibz_set(&(zero->m[i][j]), 0); } } } @@ -192,9 +192,9 @@ ibz_mat_4x4_identity(ibz_mat_4x4_t *id) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*id)[i][j]), 0); + ibz_set(&(id->m[i][j]), 0); } - ibz_set(&((*id)[i][i]), 1); + ibz_set(&(id->m[i][i]), 1); } } @@ -204,7 +204,7 @@ ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) int res = 1; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + res = res && ibz_is_one(&(mat->m[i][j])) == (i == j); } } return (res); @@ -216,7 +216,7 @@ ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) int res = 0; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + res = res | ibz_cmp(&(mat1->m[i][j]), &(mat2->m[i][j])); } } return (!res); @@ -227,7 +227,7 @@ ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4 { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + ibz_mul(&(prod->m[i][j]), &(mat->m[i][j]), scalar); } } } @@ -237,10 +237,10 @@ ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) { ibz_t d; ibz_init(&d); - ibz_copy(&d, &((*mat)[0][0])); + ibz_copy(&d, &(mat->m[0][0])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_gcd(&d, &d, &((*mat)[i][j])); + ibz_gcd(&d, &d, &(mat->m[i][j])); } } ibz_copy(gcd, &d); @@ -255,7 +255,7 @@ ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4 ibz_init(&r); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + ibz_div(&(quot->m[i][j]), &r, &(mat->m[i][j]), scalar); res = res && ibz_is_zero(&r); } } @@ -325,17 +325,17 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ // compute some 2x2 minors, store them in s and c for (int i = 0; i < 3; i++) { - ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); - ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + ibz_mat_2x2_det_from_ibz(&(s[i]), &(mat->m[0][0]), &(mat->m[0][i + 1]), &(mat->m[1][0]), &(mat->m[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &(mat->m[2][0]), &(mat->m[2][i + 1]), &(mat->m[3][0]), &(mat->m[3][i + 1])); } for (int i = 0; i < 2; i++) { ibz_mat_2x2_det_from_ibz( - &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + &(s[3 + i]), &(mat->m[0][1]), &(mat->m[0][2 + i]), &(mat->m[1][1]), &(mat->m[1][2 + i])); ibz_mat_2x2_det_from_ibz( - &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + &(c[3 + i]), &(mat->m[2][1]), &(mat->m[2][2 + i]), &(mat->m[3][1]), &(mat->m[3][2 + i])); } - ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); - ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + ibz_mat_2x2_det_from_ibz(&(s[5]), &(mat->m[0][2]), &(mat->m[0][3]), &(mat->m[1][2]), &(mat->m[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &(mat->m[2][2]), &(mat->m[2][3]), &(mat->m[3][2]), &(mat->m[3][3])); // compute det ibz_set(&work_det, 0); @@ -351,39 +351,39 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ for (int j = 0; j < 4; j++) { for (int k = 0; k < 2; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } } for (int k = 2; k < 4; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } } @@ -418,8 +418,8 @@ ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t * // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[i][j], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -437,8 +437,8 @@ ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[j][i], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -457,14 +457,14 @@ quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) ibz_vec_4_init(&sum); ibz_mat_4x4_eval(&sum, qf, coord); for (int i = 0; i < 4; i++) { - ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + ibz_mul(&prod, &(sum.v[i]), &coord->v[i]); if (i > 0) { - ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); } else { - ibz_copy(&sum[0], &prod); + ibz_copy(&sum.v[0], &prod); } } - ibz_copy(res, &sum[0]); + ibz_copy(res, &sum.v[0]); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_signature.c index 112c695941..3a630cfd58 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_signature.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_signature.c @@ -157,17 +157,17 @@ secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) ibz_finalize(&gcd); } #endif - enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[3], FP_ENCODED_BYTES, true); quat_alg_elem_finalize(&gen); } - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][1], TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); } @@ -187,19 +187,19 @@ secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) quat_alg_elem_t gen; quat_alg_elem_init(&gen); enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); - enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[3], enc, FP_ENCODED_BYTES, true); quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); ibz_finalize(&norm); quat_alg_elem_finalize(&gen); } - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][1], enc, TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c index abeddc30a7..1a93e36455 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c @@ -261,223 +261,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xabf,0x5490,0xd5fd,0x36ba,0xda0f,0x4a59,0x4eea,0xd1,0xa3f0,0xa7ae,0x6f6,0x9146,0x5004,0xcde6,0xa2d2,0x7d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xabf,0x5490,0xd5fd,0x36ba,0xda0f,0x4a59,0x4eea,0xd1,0xa3f0,0xa7ae,0x6f6,0x9146,0x5004,0xcde6,0xa2d2,0x7d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x54900abf,0x36bad5fd,0x4a59da0f,0xd14eea,0xa7aea3f0,0x914606f6,0xcde65004,0x7da2d2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x54900abf,0x36bad5fd,0x4a59da0f,0xd14eea,0xa7aea3f0,0x914606f6,0xcde65004,0x7da2d2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x36bad5fd54900abf,0xd14eea4a59da0f,0x914606f6a7aea3f0,0x7da2d2cde65004}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x36bad5fd54900abf,0xd14eea4a59da0f,0x914606f6a7aea3f0,0x7da2d2cde65004}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8680,0xb787,0xbde3,0x611d,0xa95f,0x8b68,0xc9ec,0x819,0x2361,0xf73e,0x5e31,0xbd7b,0x2b45,0x40d7,0x2400,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8680,0xb787,0xbde3,0x611d,0xa95f,0x8b68,0xc9ec,0x819,0x2361,0xf73e,0x5e31,0xbd7b,0x2b45,0x40d7,0x2400,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7878680,0x611dbde3,0x8b68a95f,0x819c9ec,0xf73e2361,0xbd7b5e31,0x40d72b45,0x682400}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7878680,0x611dbde3,0x8b68a95f,0x819c9ec,0xf73e2361,0xbd7b5e31,0x40d72b45,0x682400}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x611dbde3b7878680,0x819c9ec8b68a95f,0xbd7b5e31f73e2361,0x68240040d72b45}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x611dbde3b7878680,0x819c9ec8b68a95f,0xbd7b5e31f73e2361,0x68240040d72b45}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4277,0x6d20,0x9e12,0x1f0c,0x977f,0xf854,0x9d1c,0x563f,0xdb,0xc2ed,0xaf54,0xe829,0x4fb,0xd83,0x7be8,0xca}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4277,0x6d20,0x9e12,0x1f0c,0x977f,0xf854,0x9d1c,0x563f,0xdb,0xc2ed,0xaf54,0xe829,0x4fb,0xd83,0x7be8,0xca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6d204277,0x1f0c9e12,0xf854977f,0x563f9d1c,0xc2ed00db,0xe829af54,0xd8304fb,0xca7be8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6d204277,0x1f0c9e12,0xf854977f,0x563f9d1c,0xc2ed00db,0xe829af54,0xd8304fb,0xca7be8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1f0c9e126d204277,0x563f9d1cf854977f,0xe829af54c2ed00db,0xca7be80d8304fb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1f0c9e126d204277,0x563f9d1cf854977f,0xe829af54c2ed00db,0xca7be80d8304fb}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf541,0xab6f,0x2a02,0xc945,0x25f0,0xb5a6,0xb115,0xff2e,0x5c0f,0x5851,0xf909,0x6eb9,0xaffb,0x3219,0x5d2d,0x82}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf541,0xab6f,0x2a02,0xc945,0x25f0,0xb5a6,0xb115,0xff2e,0x5c0f,0x5851,0xf909,0x6eb9,0xaffb,0x3219,0x5d2d,0x82}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xab6ff541,0xc9452a02,0xb5a625f0,0xff2eb115,0x58515c0f,0x6eb9f909,0x3219affb,0x825d2d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xab6ff541,0xc9452a02,0xb5a625f0,0xff2eb115,0x58515c0f,0x6eb9f909,0x3219affb,0x825d2d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9452a02ab6ff541,0xff2eb115b5a625f0,0x6eb9f90958515c0f,0x825d2d3219affb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9452a02ab6ff541,0xff2eb115b5a625f0,0x6eb9f90958515c0f,0x825d2d3219affb}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x30cd,0xb7f2,0x49cf,0xfe47,0xdb8a,0x683b,0x7335,0xbaa3,0xebe0,0x74ae,0x9dd4,0x8871,0x67c8,0x3c39,0x2ba2,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x30cd,0xb7f2,0x49cf,0xfe47,0xdb8a,0x683b,0x7335,0xbaa3,0xebe0,0x74ae,0x9dd4,0x8871,0x67c8,0x3c39,0x2ba2,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7f230cd,0xfe4749cf,0x683bdb8a,0xbaa37335,0x74aeebe0,0x88719dd4,0x3c3967c8,0x242ba2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7f230cd,0xfe4749cf,0x683bdb8a,0xbaa37335,0x74aeebe0,0x88719dd4,0x3c3967c8,0x242ba2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe4749cfb7f230cd,0xbaa37335683bdb8a,0x88719dd474aeebe0,0x242ba23c3967c8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe4749cfb7f230cd,0xbaa37335683bdb8a,0x88719dd474aeebe0,0x242ba23c3967c8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81fd,0xde09,0x9d8a,0x6e8c,0xa299,0x77a0,0xadb7,0x58b7,0x13a1,0x7d41,0x6349,0x1a1d,0xc40b,0x17c5,0xb772,0xdf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81fd,0xde09,0x9d8a,0x6e8c,0xa299,0x77a0,0xadb7,0x58b7,0x13a1,0x7d41,0x6349,0x1a1d,0xc40b,0x17c5,0xb772,0xdf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xde0981fd,0x6e8c9d8a,0x77a0a299,0x58b7adb7,0x7d4113a1,0x1a1d6349,0x17c5c40b,0xdfb772}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xde0981fd,0x6e8c9d8a,0x77a0a299,0x58b7adb7,0x7d4113a1,0x1a1d6349,0x17c5c40b,0xdfb772}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6e8c9d8ade0981fd,0x58b7adb777a0a299,0x1a1d63497d4113a1,0xdfb77217c5c40b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6e8c9d8ade0981fd,0x58b7adb777a0a299,0x1a1d63497d4113a1,0xdfb77217c5c40b}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4363,0xd1dc,0x3a2d,0x523e,0xecad,0x20f1,0x267e,0x376e,0x661b,0x53fc,0xddaa,0xf004,0x267a,0x5b07,0xd8e1,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4363,0xd1dc,0x3a2d,0x523e,0xecad,0x20f1,0x267e,0x376e,0x661b,0x53fc,0xddaa,0xf004,0x267a,0x5b07,0xd8e1,0x6f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd1dc4363,0x523e3a2d,0x20f1ecad,0x376e267e,0x53fc661b,0xf004ddaa,0x5b07267a,0x6fd8e1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd1dc4363,0x523e3a2d,0x20f1ecad,0x376e267e,0x53fc661b,0xf004ddaa,0x5b07267a,0x6fd8e1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523e3a2dd1dc4363,0x376e267e20f1ecad,0xf004ddaa53fc661b,0x6fd8e15b07267a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523e3a2dd1dc4363,0x376e267e20f1ecad,0xf004ddaa53fc661b,0x6fd8e15b07267a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf33,0x480d,0xb630,0x1b8,0x2475,0x97c4,0x8cca,0x455c,0x141f,0x8b51,0x622b,0x778e,0x9837,0xc3c6,0xd45d,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf33,0x480d,0xb630,0x1b8,0x2475,0x97c4,0x8cca,0x455c,0x141f,0x8b51,0x622b,0x778e,0x9837,0xc3c6,0xd45d,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x480dcf33,0x1b8b630,0x97c42475,0x455c8cca,0x8b51141f,0x778e622b,0xc3c69837,0xdbd45d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x480dcf33,0x1b8b630,0x97c42475,0x455c8cca,0x8b51141f,0x778e622b,0xc3c69837,0xdbd45d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b8b630480dcf33,0x455c8cca97c42475,0x778e622b8b51141f,0xdbd45dc3c69837}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b8b630480dcf33,0x455c8cca97c42475,0x778e622b8b51141f,0xdbd45dc3c69837}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x19e1, 0x1d98, 0x1de9, 0x1dfc, 0x922, 0x1fb8, 0x476, 0xd05, 0xc85, 0x1788, 0x1967, 0x155d, 0x1f93, 0x629, 0x188f, 0x119, 0x1f6f, 0x241, 0x1378, 0x0} @@ -737,223 +737,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8d79,0x38f8,0xf94c,0xe776,0x2bdf,0x2d2e,0x4242,0x8677,0xddf0,0x1736,0xa2e3,0x8ee7,0x52ac,0x4bb1,0xbb55,0xa4}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8d79,0x38f8,0xf94c,0xe776,0x2bdf,0x2d2e,0x4242,0x8677,0xddf0,0x1736,0xa2e3,0x8ee7,0x52ac,0x4bb1,0xbb55,0xa4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38f88d79,0xe776f94c,0x2d2e2bdf,0x86774242,0x1736ddf0,0x8ee7a2e3,0x4bb152ac,0xa4bb55}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38f88d79,0xe776f94c,0x2d2e2bdf,0x86774242,0x1736ddf0,0x8ee7a2e3,0x4bb152ac,0xa4bb55}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe776f94c38f88d79,0x867742422d2e2bdf,0x8ee7a2e31736ddf0,0xa4bb554bb152ac}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe776f94c38f88d79,0x867742422d2e2bdf,0x8ee7a2e31736ddf0,0xa4bb554bb152ac}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6774,0xe280,0xc0b8,0xd49d,0x3b88,0x2577,0xc53f,0x7a5d,0x3032,0x4cfb,0xd6b2,0x3ed5,0x27b8,0x584c,0x85b1,0xfc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6774,0xe280,0xc0b8,0xd49d,0x3b88,0x2577,0xc53f,0x7a5d,0x3032,0x4cfb,0xd6b2,0x3ed5,0x27b8,0x584c,0x85b1,0xfc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe2806774,0xd49dc0b8,0x25773b88,0x7a5dc53f,0x4cfb3032,0x3ed5d6b2,0x584c27b8,0xfc85b1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe2806774,0xd49dc0b8,0x25773b88,0x7a5dc53f,0x4cfb3032,0x3ed5d6b2,0x584c27b8,0xfc85b1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd49dc0b8e2806774,0x7a5dc53f25773b88,0x3ed5d6b24cfb3032,0xfc85b1584c27b8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd49dc0b8e2806774,0x7a5dc53f25773b88,0x3ed5d6b24cfb3032,0xfc85b1584c27b8}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc139,0x25cf,0xd25b,0xadb9,0xbd39,0xaa20,0x8867,0x4e7a,0x8b24,0xa81f,0x412a,0xacfc,0xee2d,0xab0c,0x1d50,0x20}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc139,0x25cf,0xd25b,0xadb9,0xbd39,0xaa20,0x8867,0x4e7a,0x8b24,0xa81f,0x412a,0xacfc,0xee2d,0xab0c,0x1d50,0x20}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x25cfc139,0xadb9d25b,0xaa20bd39,0x4e7a8867,0xa81f8b24,0xacfc412a,0xab0cee2d,0x201d50}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x25cfc139,0xadb9d25b,0xaa20bd39,0x4e7a8867,0xa81f8b24,0xacfc412a,0xab0cee2d,0x201d50}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xadb9d25b25cfc139,0x4e7a8867aa20bd39,0xacfc412aa81f8b24,0x201d50ab0cee2d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xadb9d25b25cfc139,0x4e7a8867aa20bd39,0xacfc412aa81f8b24,0x201d50ab0cee2d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7287,0xc707,0x6b3,0x1889,0xd420,0xd2d1,0xbdbd,0x7988,0x220f,0xe8c9,0x5d1c,0x7118,0xad53,0xb44e,0x44aa,0x5b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7287,0xc707,0x6b3,0x1889,0xd420,0xd2d1,0xbdbd,0x7988,0x220f,0xe8c9,0x5d1c,0x7118,0xad53,0xb44e,0x44aa,0x5b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc7077287,0x188906b3,0xd2d1d420,0x7988bdbd,0xe8c9220f,0x71185d1c,0xb44ead53,0x5b44aa}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc7077287,0x188906b3,0xd2d1d420,0x7988bdbd,0xe8c9220f,0x71185d1c,0xb44ead53,0x5b44aa}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x188906b3c7077287,0x7988bdbdd2d1d420,0x71185d1ce8c9220f,0x5b44aab44ead53}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x188906b3c7077287,0x7988bdbdd2d1d420,0x71185d1ce8c9220f,0x5b44aab44ead53}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7029,0x8b30,0x7529,0x9941,0x2be8,0x7b3f,0xe3d7,0x4553,0x7065,0x7bef,0xb49c,0xc80b,0xfa3e,0x950c,0x1ece,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7029,0x8b30,0x7529,0x9941,0x2be8,0x7b3f,0xe3d7,0x4553,0x7065,0x7bef,0xb49c,0xc80b,0xfa3e,0x950c,0x1ece,0x18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b307029,0x99417529,0x7b3f2be8,0x4553e3d7,0x7bef7065,0xc80bb49c,0x950cfa3e,0x181ece}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b307029,0x99417529,0x7b3f2be8,0x4553e3d7,0x7bef7065,0xc80bb49c,0x950cfa3e,0x181ece}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x994175298b307029,0x4553e3d77b3f2be8,0xc80bb49c7bef7065,0x181ece950cfa3e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x994175298b307029,0x4553e3d77b3f2be8,0xc80bb49c7bef7065,0x181ece950cfa3e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb399,0x92ce,0x85e8,0x7c82,0x86eb,0xb186,0x8924,0x64f1,0xd93,0x5e9a,0x3165,0x4196,0x5e79,0x158,0x55d5,0x31}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb399,0x92ce,0x85e8,0x7c82,0x86eb,0xb186,0x8924,0x64f1,0xd93,0x5e9a,0x3165,0x4196,0x5e79,0x158,0x55d5,0x31}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92ceb399,0x7c8285e8,0xb18686eb,0x64f18924,0x5e9a0d93,0x41963165,0x1585e79,0x3155d5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92ceb399,0x7c8285e8,0xb18686eb,0x64f18924,0x5e9a0d93,0x41963165,0x1585e79,0x3155d5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c8285e892ceb399,0x64f18924b18686eb,0x419631655e9a0d93,0x3155d501585e79}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c8285e892ceb399,0x64f18924b18686eb,0x419631655e9a0d93,0x3155d501585e79}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda47,0x29f8,0x7209,0xaa0c,0xfc22,0x39c9,0x6e19,0x517c,0xc94e,0xcfa4,0x20fc,0x1edc,0xe0d0,0x396d,0x85f0,0xdf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda47,0x29f8,0x7209,0xaa0c,0xfc22,0x39c9,0x6e19,0x517c,0xc94e,0xcfa4,0x20fc,0x1edc,0xe0d0,0x396d,0x85f0,0xdf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x29f8da47,0xaa0c7209,0x39c9fc22,0x517c6e19,0xcfa4c94e,0x1edc20fc,0x396de0d0,0xdf85f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x29f8da47,0xaa0c7209,0x39c9fc22,0x517c6e19,0xcfa4c94e,0x1edc20fc,0x396de0d0,0xdf85f0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa0c720929f8da47,0x517c6e1939c9fc22,0x1edc20fccfa4c94e,0xdf85f0396de0d0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa0c720929f8da47,0x517c6e1939c9fc22,0x1edc20fccfa4c94e,0xdf85f0396de0d0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fd7,0x74cf,0x8ad6,0x66be,0xd417,0x84c0,0x1c28,0xbaac,0x8f9a,0x8410,0x4b63,0x37f4,0x5c1,0x6af3,0xe131,0xe7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fd7,0x74cf,0x8ad6,0x66be,0xd417,0x84c0,0x1c28,0xbaac,0x8f9a,0x8410,0x4b63,0x37f4,0x5c1,0x6af3,0xe131,0xe7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x74cf8fd7,0x66be8ad6,0x84c0d417,0xbaac1c28,0x84108f9a,0x37f44b63,0x6af305c1,0xe7e131}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x74cf8fd7,0x66be8ad6,0x84c0d417,0xbaac1c28,0x84108f9a,0x37f44b63,0x6af305c1,0xe7e131}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x66be8ad674cf8fd7,0xbaac1c2884c0d417,0x37f44b6384108f9a,0xe7e1316af305c1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x66be8ad674cf8fd7,0xbaac1c2884c0d417,0x37f44b6384108f9a,0xe7e1316af305c1}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x13f5, 0x8b7, 0x1873, 0x144a, 0x1a89, 0x13f9, 0xd49, 0x6f2, 0x3bb, 0x102, 0x1ecb, 0x180b, 0x4d5, 0x608, 0x119, 0x5e6, 0x751, 0x961, 0xd37, 0x5} @@ -1213,223 +1213,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffc3,0x1fbe,0xc7ef,0x56c4,0x2834,0xfa5c,0x36aa,0x1ced,0x9076,0xa31d,0x8890,0xe52,0x87d2,0xef68,0x98bc,0xc2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffc3,0x1fbe,0xc7ef,0x56c4,0x2834,0xfa5c,0x36aa,0x1ced,0x9076,0xa31d,0x8890,0xe52,0x87d2,0xef68,0x98bc,0xc2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1fbeffc3,0x56c4c7ef,0xfa5c2834,0x1ced36aa,0xa31d9076,0xe528890,0xef6887d2,0xc298bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1fbeffc3,0x56c4c7ef,0xfa5c2834,0x1ced36aa,0xa31d9076,0xe528890,0xef6887d2,0xc298bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x56c4c7ef1fbeffc3,0x1ced36aafa5c2834,0xe528890a31d9076,0xc298bcef6887d2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x56c4c7ef1fbeffc3,0x1ced36aafa5c2834,0xe528890a31d9076,0xc298bcef6887d2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4098,0xd740,0xb5c6,0x8109,0x299,0x3a8c,0x81c2,0xc0d0,0xe848,0x9243,0x8996,0x656a,0x8c87,0x6c99,0xb9f5,0x4c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4098,0xd740,0xb5c6,0x8109,0x299,0x3a8c,0x81c2,0xc0d0,0xe848,0x9243,0x8996,0x656a,0x8c87,0x6c99,0xb9f5,0x4c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd7404098,0x8109b5c6,0x3a8c0299,0xc0d081c2,0x9243e848,0x656a8996,0x6c998c87,0x4cb9f5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd7404098,0x8109b5c6,0x3a8c0299,0xc0d081c2,0x9243e848,0x656a8996,0x6c998c87,0x4cb9f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8109b5c6d7404098,0xc0d081c23a8c0299,0x656a89969243e848,0x4cb9f56c998c87}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8109b5c6d7404098,0xc0d081c23a8c0299,0x656a89969243e848,0x4cb9f56c998c87}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x712b,0xfeed,0x55b5,0xc5fe,0xe867,0x77a9,0x1775,0x7814,0x4780,0x73b1,0x86b1,0x3973,0x797a,0x7f0b,0x1fa,0xb0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x712b,0xfeed,0x55b5,0xc5fe,0xe867,0x77a9,0x1775,0x7814,0x4780,0x73b1,0x86b1,0x3973,0x797a,0x7f0b,0x1fa,0xb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfeed712b,0xc5fe55b5,0x77a9e867,0x78141775,0x73b14780,0x397386b1,0x7f0b797a,0xb001fa}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfeed712b,0xc5fe55b5,0x77a9e867,0x78141775,0x73b14780,0x397386b1,0x7f0b797a,0xb001fa}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5fe55b5feed712b,0x7814177577a9e867,0x397386b173b14780,0xb001fa7f0b797a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5fe55b5feed712b,0x7814177577a9e867,0x397386b173b14780,0xb001fa7f0b797a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d,0xe041,0x3810,0xa93b,0xd7cb,0x5a3,0xc955,0xe312,0x6f89,0x5ce2,0x776f,0xf1ad,0x782d,0x1097,0x6743,0x3d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d,0xe041,0x3810,0xa93b,0xd7cb,0x5a3,0xc955,0xe312,0x6f89,0x5ce2,0x776f,0xf1ad,0x782d,0x1097,0x6743,0x3d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe041003d,0xa93b3810,0x5a3d7cb,0xe312c955,0x5ce26f89,0xf1ad776f,0x1097782d,0x3d6743}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe041003d,0xa93b3810,0x5a3d7cb,0xe312c955,0x5ce26f89,0xf1ad776f,0x1097782d,0x3d6743}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa93b3810e041003d,0xe312c95505a3d7cb,0xf1ad776f5ce26f89,0x3d67431097782d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa93b3810e041003d,0xe312c95505a3d7cb,0xf1ad776f5ce26f89,0x3d67431097782d}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d2b,0x1bd6,0xcc3f,0x7e74,0x4fea,0xfba0,0x9f84,0xd6d4,0x42a1,0x88d1,0x68b1,0x4f4e,0x13ec,0xa60c,0xb13b,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d2b,0x1bd6,0xcc3f,0x7e74,0x4fea,0xfba0,0x9f84,0xd6d4,0x42a1,0x88d1,0x68b1,0x4f4e,0x13ec,0xa60c,0xb13b,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1bd65d2b,0x7e74cc3f,0xfba04fea,0xd6d49f84,0x88d142a1,0x4f4e68b1,0xa60c13ec,0x2eb13b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1bd65d2b,0x7e74cc3f,0xfba04fea,0xd6d49f84,0x88d142a1,0x4f4e68b1,0xa60c13ec,0x2eb13b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e74cc3f1bd65d2b,0xd6d49f84fba04fea,0x4f4e68b188d142a1,0x2eb13ba60c13ec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e74cc3f1bd65d2b,0xd6d49f84fba04fea,0x4f4e68b188d142a1,0x2eb13ba60c13ec}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b4f,0x9448,0xaa16,0x649a,0xe4b4,0x3bc2,0xd3fd,0x8df1,0x931e,0x4078,0x8caa,0xe896,0xdeec,0xbed5,0x166e,0x7c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b4f,0x9448,0xaa16,0x649a,0xe4b4,0x3bc2,0xd3fd,0x8df1,0x931e,0x4078,0x8caa,0xe896,0xdeec,0xbed5,0x166e,0x7c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x94487b4f,0x649aaa16,0x3bc2e4b4,0x8df1d3fd,0x4078931e,0xe8968caa,0xbed5deec,0x7c166e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x94487b4f,0x649aaa16,0x3bc2e4b4,0x8df1d3fd,0x4078931e,0xe8968caa,0xbed5deec,0x7c166e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x649aaa1694487b4f,0x8df1d3fd3bc2e4b4,0xe8968caa4078931e,0x7c166ebed5deec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x649aaa1694487b4f,0x8df1d3fd3bc2e4b4,0xe8968caa4078931e,0x7c166ebed5deec}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x101d,0x51aa,0xd32d,0x2b40,0x7ba,0xc5f8,0x257a,0xb323,0x9bde,0x20c5,0xdc8f,0x2c3d,0x4e7b,0x54a6,0x17b9,0x99}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x101d,0x51aa,0xd32d,0x2b40,0x7ba,0xc5f8,0x257a,0xb323,0x9bde,0x20c5,0xdc8f,0x2c3d,0x4e7b,0x54a6,0x17b9,0x99}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x51aa101d,0x2b40d32d,0xc5f807ba,0xb323257a,0x20c59bde,0x2c3ddc8f,0x54a64e7b,0x9917b9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x51aa101d,0x2b40d32d,0xc5f807ba,0xb323257a,0x20c59bde,0x2c3ddc8f,0x54a64e7b,0x9917b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2b40d32d51aa101d,0xb323257ac5f807ba,0x2c3ddc8f20c59bde,0x9917b954a64e7b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2b40d32d51aa101d,0xb323257ac5f807ba,0x2c3ddc8f20c59bde,0x9917b954a64e7b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa2d5,0xe429,0x33c0,0x818b,0xb015,0x45f,0x607b,0x292b,0xbd5e,0x772e,0x974e,0xb0b1,0xec13,0x59f3,0x4ec4,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa2d5,0xe429,0x33c0,0x818b,0xb015,0x45f,0x607b,0x292b,0xbd5e,0x772e,0x974e,0xb0b1,0xec13,0x59f3,0x4ec4,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe429a2d5,0x818b33c0,0x45fb015,0x292b607b,0x772ebd5e,0xb0b1974e,0x59f3ec13,0xd14ec4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe429a2d5,0x818b33c0,0x45fb015,0x292b607b,0x772ebd5e,0xb0b1974e,0x59f3ec13,0xd14ec4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x818b33c0e429a2d5,0x292b607b045fb015,0xb0b1974e772ebd5e,0xd14ec459f3ec13}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x818b33c0e429a2d5,0x292b607b045fb015,0xb0b1974e772ebd5e,0xd14ec459f3ec13}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0xa72, 0x186f, 0x81c, 0x105c, 0x151, 0x1451, 0x1b96, 0x4d1, 0x12be, 0x3bf, 0x996, 0x1130, 0x1460, 0x1ed8, 0xc2a, 0x1c23, 0x1ee, 0x3f4, 0xbe2, 0x9} @@ -1689,223 +1689,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7363,0xbe7a,0xc901,0xb6e0,0x6a56,0x779d,0xbc42,0xd659,0x3476,0x3868,0x12f4,0x923a,0x6fa8,0x5412,0xd5f9,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7363,0xbe7a,0xc901,0xb6e0,0x6a56,0x779d,0xbc42,0xd659,0x3476,0x3868,0x12f4,0x923a,0x6fa8,0x5412,0xd5f9,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe7a7363,0xb6e0c901,0x779d6a56,0xd659bc42,0x38683476,0x923a12f4,0x54126fa8,0x3d5f9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe7a7363,0xb6e0c901,0x779d6a56,0xd659bc42,0x38683476,0x923a12f4,0x54126fa8,0x3d5f9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6e0c901be7a7363,0xd659bc42779d6a56,0x923a12f438683476,0x3d5f954126fa8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6e0c901be7a7363,0xd659bc42779d6a56,0x923a12f438683476,0x3d5f954126fa8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xedb4,0x4fd4,0x5c14,0x14b,0xf702,0xd6be,0x9c11,0x4bb,0x9f10,0xde25,0xb159,0x5085,0xb0a9,0x6f42,0xc4d3,0x1d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xedb4,0x4fd4,0x5c14,0x14b,0xf702,0xd6be,0x9c11,0x4bb,0x9f10,0xde25,0xb159,0x5085,0xb0a9,0x6f42,0xc4d3,0x1d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4fd4edb4,0x14b5c14,0xd6bef702,0x4bb9c11,0xde259f10,0x5085b159,0x6f42b0a9,0x1dc4d3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4fd4edb4,0x14b5c14,0xd6bef702,0x4bb9c11,0xde259f10,0x5085b159,0x6f42b0a9,0x1dc4d3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x14b5c144fd4edb4,0x4bb9c11d6bef702,0x5085b159de259f10,0x1dc4d36f42b0a9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x14b5c144fd4edb4,0x4bb9c11d6bef702,0x5085b159de259f10,0x1dc4d36f42b0a9}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe873,0x4974,0xc7ed,0x6b01,0xaffb,0xf3d4,0xc641,0x20d6,0xca22,0x2d69,0x9f01,0x451e,0xfa05,0xef65,0xb43b,0xde}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe873,0x4974,0xc7ed,0x6b01,0xaffb,0xf3d4,0xc641,0x20d6,0xca22,0x2d69,0x9f01,0x451e,0xfa05,0xef65,0xb43b,0xde}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4974e873,0x6b01c7ed,0xf3d4affb,0x20d6c641,0x2d69ca22,0x451e9f01,0xef65fa05,0xdeb43b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4974e873,0x6b01c7ed,0xf3d4affb,0x20d6c641,0x2d69ca22,0x451e9f01,0xef65fa05,0xdeb43b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6b01c7ed4974e873,0x20d6c641f3d4affb,0x451e9f012d69ca22,0xdeb43bef65fa05}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6b01c7ed4974e873,0x20d6c641f3d4affb,0x451e9f012d69ca22,0xdeb43bef65fa05}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c9d,0x4185,0x36fe,0x491f,0x95a9,0x8862,0x43bd,0x29a6,0xcb89,0xc797,0xed0b,0x6dc5,0x9057,0xabed,0x2a06,0xfc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c9d,0x4185,0x36fe,0x491f,0x95a9,0x8862,0x43bd,0x29a6,0xcb89,0xc797,0xed0b,0x6dc5,0x9057,0xabed,0x2a06,0xfc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41858c9d,0x491f36fe,0x886295a9,0x29a643bd,0xc797cb89,0x6dc5ed0b,0xabed9057,0xfc2a06}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41858c9d,0x491f36fe,0x886295a9,0x29a643bd,0xc797cb89,0x6dc5ed0b,0xabed9057,0xfc2a06}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x491f36fe41858c9d,0x29a643bd886295a9,0x6dc5ed0bc797cb89,0xfc2a06abed9057}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x491f36fe41858c9d,0x29a643bd886295a9,0x6dc5ed0bc797cb89,0xfc2a06abed9057}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca5b,0x1036,0x34a6,0x490c,0xc0ed,0x771b,0x1590,0x1c17,0x4855,0x977e,0x8054,0xdb98,0xb26f,0x1175,0x7722,0xfe}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca5b,0x1036,0x34a6,0x490c,0xc0ed,0x771b,0x1590,0x1c17,0x4855,0x977e,0x8054,0xdb98,0xb26f,0x1175,0x7722,0xfe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1036ca5b,0x490c34a6,0x771bc0ed,0x1c171590,0x977e4855,0xdb988054,0x1175b26f,0xfe7722}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1036ca5b,0x490c34a6,0x771bc0ed,0x1c171590,0x977e4855,0xdb988054,0x1175b26f,0xfe7722}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x490c34a61036ca5b,0x1c171590771bc0ed,0xdb988054977e4855,0xfe77221175b26f}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x490c34a61036ca5b,0x1c171590771bc0ed,0xdb988054977e4855,0xfe77221175b26f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf543,0x821c,0xae0a,0xb0cb,0x642d,0x5a80,0xd2bf,0x2340,0xc8f,0xe1ce,0x4e38,0xdace,0x3445,0x807e,0x9bc4,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf543,0x821c,0xae0a,0xb0cb,0x642d,0x5a80,0xd2bf,0x2340,0xc8f,0xe1ce,0x4e38,0xdace,0x3445,0x807e,0x9bc4,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x821cf543,0xb0cbae0a,0x5a80642d,0x2340d2bf,0xe1ce0c8f,0xdace4e38,0x807e3445,0x59bc4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x821cf543,0xb0cbae0a,0x5a80642d,0x2340d2bf,0xe1ce0c8f,0xdace4e38,0x807e3445,0x59bc4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb0cbae0a821cf543,0x2340d2bf5a80642d,0xdace4e38e1ce0c8f,0x59bc4807e3445}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb0cbae0a821cf543,0x2340d2bf5a80642d,0xdace4e38e1ce0c8f,0x59bc4807e3445}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e85,0xc3dc,0xfd4,0x39a7,0x5158,0x777b,0xb83,0xb0fe,0x55de,0x45b3,0x103f,0x53dc,0x27e2,0xb6cb,0x2b18,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e85,0xc3dc,0xfd4,0x39a7,0x5158,0x777b,0xb83,0xb0fe,0x55de,0x45b3,0x103f,0x53dc,0x27e2,0xb6cb,0x2b18,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc3dc6e85,0x39a70fd4,0x777b5158,0xb0fe0b83,0x45b355de,0x53dc103f,0xb6cb27e2,0x12b18}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc3dc6e85,0x39a70fd4,0x777b5158,0xb0fe0b83,0x45b355de,0x53dc103f,0xb6cb27e2,0x12b18}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x39a70fd4c3dc6e85,0xb0fe0b83777b5158,0x53dc103f45b355de,0x12b18b6cb27e2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x39a70fd4c3dc6e85,0xb0fe0b83777b5158,0x53dc103f45b355de,0x12b18b6cb27e2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35a5,0xefc9,0xcb59,0xb6f3,0x3f12,0x88e4,0xea6f,0xe3e8,0xb7aa,0x6881,0x7fab,0x2467,0x4d90,0xee8a,0x88dd,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35a5,0xefc9,0xcb59,0xb6f3,0x3f12,0x88e4,0xea6f,0xe3e8,0xb7aa,0x6881,0x7fab,0x2467,0x4d90,0xee8a,0x88dd,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xefc935a5,0xb6f3cb59,0x88e43f12,0xe3e8ea6f,0x6881b7aa,0x24677fab,0xee8a4d90,0x188dd}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xefc935a5,0xb6f3cb59,0x88e43f12,0xe3e8ea6f,0x6881b7aa,0x24677fab,0xee8a4d90,0x188dd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f3cb59efc935a5,0xe3e8ea6f88e43f12,0x24677fab6881b7aa,0x188ddee8a4d90}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f3cb59efc935a5,0xe3e8ea6f88e43f12,0x24677fab6881b7aa,0x188ddee8a4d90}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x102e, 0x4c4, 0x1586, 0x1184, 0xa12, 0x9ce, 0x1866, 0x433, 0x5ef, 0x82a, 0x13a5, 0xbc6, 0x591, 0x175b, 0x10bc, 0xfa5, 0x109d, 0x8d4, 0x1325, 0x9} @@ -2165,223 +2165,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2417,0x1b00,0xcfe,0x8960,0x662e,0x42d2,0xc00f,0x222c,0x7671,0x278b,0x863f,0xbcac,0xdb9c,0x6e5e,0x4c5a,0x1b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2417,0x1b00,0xcfe,0x8960,0x662e,0x42d2,0xc00f,0x222c,0x7671,0x278b,0x863f,0xbcac,0xdb9c,0x6e5e,0x4c5a,0x1b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b002417,0x89600cfe,0x42d2662e,0x222cc00f,0x278b7671,0xbcac863f,0x6e5edb9c,0x1b4c5a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b002417,0x89600cfe,0x42d2662e,0x222cc00f,0x278b7671,0xbcac863f,0x6e5edb9c,0x1b4c5a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x89600cfe1b002417,0x222cc00f42d2662e,0xbcac863f278b7671,0x1b4c5a6e5edb9c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x89600cfe1b002417,0x222cc00f42d2662e,0xbcac863f278b7671,0x1b4c5a6e5edb9c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x21e8,0xd92b,0x5a2d,0xef86,0xf492,0x1483,0x8ae0,0x6b37,0x7f78,0x7b90,0x69c5,0xf4ec,0x2fb9,0x1660,0x8296,0xf8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x21e8,0xd92b,0x5a2d,0xef86,0xf492,0x1483,0x8ae0,0x6b37,0x7f78,0x7b90,0x69c5,0xf4ec,0x2fb9,0x1660,0x8296,0xf8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd92b21e8,0xef865a2d,0x1483f492,0x6b378ae0,0x7b907f78,0xf4ec69c5,0x16602fb9,0xf88296}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd92b21e8,0xef865a2d,0x1483f492,0x6b378ae0,0x7b907f78,0xf4ec69c5,0x16602fb9,0xf88296}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xef865a2dd92b21e8,0x6b378ae01483f492,0xf4ec69c57b907f78,0xf8829616602fb9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xef865a2dd92b21e8,0x6b378ae01483f492,0xf4ec69c57b907f78,0xf8829616602fb9}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x38ff,0x5dc5,0x9aea,0xbc0e,0xbea5,0x775d,0x447b,0xc311,0xf01c,0xb63a,0x15fd,0x162a,0xab76,0x9def,0x2a0d,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x38ff,0x5dc5,0x9aea,0xbc0e,0xbea5,0x775d,0x447b,0xc311,0xf01c,0xb63a,0x15fd,0x162a,0xab76,0x9def,0x2a0d,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5dc538ff,0xbc0e9aea,0x775dbea5,0xc311447b,0xb63af01c,0x162a15fd,0x9defab76,0xc52a0d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5dc538ff,0xbc0e9aea,0x775dbea5,0xc311447b,0xb63af01c,0x162a15fd,0x9defab76,0xc52a0d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbc0e9aea5dc538ff,0xc311447b775dbea5,0x162a15fdb63af01c,0xc52a0d9defab76}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbc0e9aea5dc538ff,0xc311447b775dbea5,0x162a15fdb63af01c,0xc52a0d9defab76}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdbe9,0xe4ff,0xf301,0x769f,0x99d1,0xbd2d,0x3ff0,0xddd3,0x898e,0xd874,0x79c0,0x4353,0x2463,0x91a1,0xb3a5,0xe4}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdbe9,0xe4ff,0xf301,0x769f,0x99d1,0xbd2d,0x3ff0,0xddd3,0x898e,0xd874,0x79c0,0x4353,0x2463,0x91a1,0xb3a5,0xe4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4ffdbe9,0x769ff301,0xbd2d99d1,0xddd33ff0,0xd874898e,0x435379c0,0x91a12463,0xe4b3a5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4ffdbe9,0x769ff301,0xbd2d99d1,0xddd33ff0,0xd874898e,0x435379c0,0x91a12463,0xe4b3a5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x769ff301e4ffdbe9,0xddd33ff0bd2d99d1,0x435379c0d874898e,0xe4b3a591a12463}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x769ff301e4ffdbe9,0xddd33ff0bd2d99d1,0x435379c0d874898e,0xe4b3a591a12463}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x20f0,0x2693,0xacbf,0x731a,0xb0f3,0xd8ce,0x1bcd,0xf836,0x8469,0x44d5,0xd604,0xd3aa,0x4aa8,0xcdc3,0x9086,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x20f0,0x2693,0xacbf,0x731a,0xb0f3,0xd8ce,0x1bcd,0xf836,0x8469,0x44d5,0xd604,0xd3aa,0x4aa8,0xcdc3,0x9086,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x269320f0,0x731aacbf,0xd8ceb0f3,0xf8361bcd,0x44d58469,0xd3aad604,0xcdc34aa8,0x3f9086}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x269320f0,0x731aacbf,0xd8ceb0f3,0xf8361bcd,0x44d58469,0xd3aad604,0xcdc34aa8,0x3f9086}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x731aacbf269320f0,0xf8361bcdd8ceb0f3,0xd3aad60444d58469,0x3f9086cdc34aa8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x731aacbf269320f0,0xf8361bcdd8ceb0f3,0xd3aad60444d58469,0x3f9086cdc34aa8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcc11,0xe55a,0x932f,0x9534,0x2895,0xaf43,0x2956,0x614f,0x4e84,0xe4b2,0x60c6,0x255,0xbb14,0xd70d,0xc61e,0x13}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcc11,0xe55a,0x932f,0x9534,0x2895,0xaf43,0x2956,0x614f,0x4e84,0xe4b2,0x60c6,0x255,0xbb14,0xd70d,0xc61e,0x13}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe55acc11,0x9534932f,0xaf432895,0x614f2956,0xe4b24e84,0x25560c6,0xd70dbb14,0x13c61e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe55acc11,0x9534932f,0xaf432895,0x614f2956,0xe4b24e84,0x25560c6,0xd70dbb14,0x13c61e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9534932fe55acc11,0x614f2956af432895,0x25560c6e4b24e84,0x13c61ed70dbb14}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9534932fe55acc11,0x614f2956af432895,0x25560c6e4b24e84,0x13c61ed70dbb14}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x28d6,0x450d,0xd24f,0x54e4,0x6e67,0x81d,0x9b71,0xadbe,0x1088,0x6148,0x4ebf,0x4b68,0x829e,0x65c8,0xe1a6,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x28d6,0x450d,0xd24f,0x54e4,0x6e67,0x81d,0x9b71,0xadbe,0x1088,0x6148,0x4ebf,0x4b68,0x829e,0x65c8,0xe1a6,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450d28d6,0x54e4d24f,0x81d6e67,0xadbe9b71,0x61481088,0x4b684ebf,0x65c8829e,0xe5e1a6}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450d28d6,0x54e4d24f,0x81d6e67,0xadbe9b71,0x61481088,0x4b684ebf,0x65c8829e,0xe5e1a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x54e4d24f450d28d6,0xadbe9b71081d6e67,0x4b684ebf61481088,0xe5e1a665c8829e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x54e4d24f450d28d6,0xadbe9b71081d6e67,0x4b684ebf61481088,0xe5e1a665c8829e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf10,0xd96c,0x5340,0x8ce5,0x4f0c,0x2731,0xe432,0x7c9,0x7b96,0xbb2a,0x29fb,0x2c55,0xb557,0x323c,0x6f79,0xc0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf10,0xd96c,0x5340,0x8ce5,0x4f0c,0x2731,0xe432,0x7c9,0x7b96,0xbb2a,0x29fb,0x2c55,0xb557,0x323c,0x6f79,0xc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd96cdf10,0x8ce55340,0x27314f0c,0x7c9e432,0xbb2a7b96,0x2c5529fb,0x323cb557,0xc06f79}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd96cdf10,0x8ce55340,0x27314f0c,0x7c9e432,0xbb2a7b96,0x2c5529fb,0x323cb557,0xc06f79}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8ce55340d96cdf10,0x7c9e43227314f0c,0x2c5529fbbb2a7b96,0xc06f79323cb557}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8ce55340d96cdf10,0x7c9e43227314f0c,0x2c5529fbbb2a7b96,0xc06f79323cb557}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x6b9, 0xd4c, 0x1d8d, 0x1f99, 0x1be4, 0x1f53, 0x5c1, 0x1937, 0x9c, 0xe68, 0x61e, 0x14e8, 0x352, 0x3d6, 0x174, 0x133, 0x18a6, 0x1b19, 0x94b, 0x9} @@ -2641,223 +2641,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x94df,0x6dc7,0xcd7f,0xebb2,0xb290,0x811d,0x2825,0xc88,0xd514,0x959a,0x7d64,0xc8c3,0x16a9,0x106a,0x1eea,0x32}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x94df,0x6dc7,0xcd7f,0xebb2,0xb290,0x811d,0x2825,0xc88,0xd514,0x959a,0x7d64,0xc8c3,0x16a9,0x106a,0x1eea,0x32}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6dc794df,0xebb2cd7f,0x811db290,0xc882825,0x959ad514,0xc8c37d64,0x106a16a9,0x321eea}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6dc794df,0xebb2cd7f,0x811db290,0xc882825,0x959ad514,0xc8c37d64,0x106a16a9,0x321eea}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xebb2cd7f6dc794df,0xc882825811db290,0xc8c37d64959ad514,0x321eea106a16a9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xebb2cd7f6dc794df,0xc882825811db290,0xc8c37d64959ad514,0x321eea106a16a9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe08c,0xe778,0x1464,0x19fe,0xef25,0x1d24,0xa98f,0x4af0,0x70d3,0x8e4d,0x2b82,0x95ea,0x3277,0xc267,0x1695,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe08c,0xe778,0x1464,0x19fe,0xef25,0x1d24,0xa98f,0x4af0,0x70d3,0x8e4d,0x2b82,0x95ea,0x3277,0xc267,0x1695,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe778e08c,0x19fe1464,0x1d24ef25,0x4af0a98f,0x8e4d70d3,0x95ea2b82,0xc2673277,0xf1695}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe778e08c,0x19fe1464,0x1d24ef25,0x4af0a98f,0x8e4d70d3,0x95ea2b82,0xc2673277,0xf1695}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19fe1464e778e08c,0x4af0a98f1d24ef25,0x95ea2b828e4d70d3,0xf1695c2673277}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19fe1464e778e08c,0x4af0a98f1d24ef25,0x95ea2b828e4d70d3,0xf1695c2673277}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1df,0xb6e1,0xe2a4,0x4bc9,0xdc85,0x6365,0x3fca,0x9a38,0xee2,0xed03,0xca7f,0x1984,0xe709,0x1efe,0xc173,0x8b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1df,0xb6e1,0xe2a4,0x4bc9,0xdc85,0x6365,0x3fca,0x9a38,0xee2,0xed03,0xca7f,0x1984,0xe709,0x1efe,0xc173,0x8b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6e1f1df,0x4bc9e2a4,0x6365dc85,0x9a383fca,0xed030ee2,0x1984ca7f,0x1efee709,0x8bc173}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6e1f1df,0x4bc9e2a4,0x6365dc85,0x9a383fca,0xed030ee2,0x1984ca7f,0x1efee709,0x8bc173}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4bc9e2a4b6e1f1df,0x9a383fca6365dc85,0x1984ca7fed030ee2,0x8bc1731efee709}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4bc9e2a4b6e1f1df,0x9a383fca6365dc85,0x1984ca7fed030ee2,0x8bc1731efee709}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b21,0x9238,0x3280,0x144d,0x4d6f,0x7ee2,0xd7da,0xf377,0x2aeb,0x6a65,0x829b,0x373c,0xe956,0xef95,0xe115,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b21,0x9238,0x3280,0x144d,0x4d6f,0x7ee2,0xd7da,0xf377,0x2aeb,0x6a65,0x829b,0x373c,0xe956,0xef95,0xe115,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92386b21,0x144d3280,0x7ee24d6f,0xf377d7da,0x6a652aeb,0x373c829b,0xef95e956,0xcde115}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92386b21,0x144d3280,0x7ee24d6f,0xf377d7da,0x6a652aeb,0x373c829b,0xef95e956,0xcde115}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x144d328092386b21,0xf377d7da7ee24d6f,0x373c829b6a652aeb,0xcde115ef95e956}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x144d328092386b21,0xf377d7da7ee24d6f,0x373c829b6a652aeb,0xcde115ef95e956}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xf187,0x9a31,0x1ee,0x193b,0xeec2,0xbfed,0x9418,0x15b6,0xe9a,0x4c74,0xae85,0x3ebe,0x2677,0x3f12,0x42}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xf187,0x9a31,0x1ee,0x193b,0xeec2,0xbfed,0x9418,0x15b6,0xe9a,0x4c74,0xae85,0x3ebe,0x2677,0x3f12,0x42}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf187d647,0x1ee9a31,0xeec2193b,0x9418bfed,0xe9a15b6,0xae854c74,0x26773ebe,0x423f12}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf187d647,0x1ee9a31,0xeec2193b,0x9418bfed,0xe9a15b6,0xae854c74,0x26773ebe,0x423f12}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1ee9a31f187d647,0x9418bfedeec2193b,0xae854c740e9a15b6,0x423f1226773ebe}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1ee9a31f187d647,0x9418bfedeec2193b,0xae854c740e9a15b6,0x423f1226773ebe}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x68ff,0x99be,0x416c,0x7bbf,0xd44f,0x609f,0x7682,0xa8ff,0xa6bb,0xec03,0x8e77,0xc076,0x7873,0x9676,0xa152,0xf5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x68ff,0x99be,0x416c,0x7bbf,0xd44f,0x609f,0x7682,0xa8ff,0xa6bb,0xec03,0x8e77,0xc076,0x7873,0x9676,0xa152,0xf5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x99be68ff,0x7bbf416c,0x609fd44f,0xa8ff7682,0xec03a6bb,0xc0768e77,0x96767873,0xf5a152}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x99be68ff,0x7bbf416c,0x609fd44f,0xa8ff7682,0xec03a6bb,0xc0768e77,0x96767873,0xf5a152}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bbf416c99be68ff,0xa8ff7682609fd44f,0xc0768e77ec03a6bb,0xf5a15296767873}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bbf416c99be68ff,0xa8ff7682609fd44f,0xc0768e77ec03a6bb,0xf5a15296767873}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3739,0xf7da,0xbd23,0xa38e,0x8cf9,0x7690,0x6b0e,0x1a7,0x77f0,0xa2bd,0x5ac7,0x5101,0x3aae,0xa922,0x2d3a,0x95}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3739,0xf7da,0xbd23,0xa38e,0x8cf9,0x7690,0x6b0e,0x1a7,0x77f0,0xa2bd,0x5ac7,0x5101,0x3aae,0xa922,0x2d3a,0x95}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7da3739,0xa38ebd23,0x76908cf9,0x1a76b0e,0xa2bd77f0,0x51015ac7,0xa9223aae,0x952d3a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7da3739,0xa38ebd23,0x76908cf9,0x1a76b0e,0xa2bd77f0,0x51015ac7,0xa9223aae,0x952d3a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa38ebd23f7da3739,0x1a76b0e76908cf9,0x51015ac7a2bd77f0,0x952d3aa9223aae}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa38ebd23f7da3739,0x1a76b0e76908cf9,0x51015ac7a2bd77f0,0x952d3aa9223aae}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x29b9,0xe78,0x65ce,0xfe11,0xe6c4,0x113d,0x4012,0x6be7,0xea49,0xf165,0xb38b,0x517a,0xc141,0xd988,0xc0ed,0xbd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x29b9,0xe78,0x65ce,0xfe11,0xe6c4,0x113d,0x4012,0x6be7,0xea49,0xf165,0xb38b,0x517a,0xc141,0xd988,0xc0ed,0xbd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7829b9,0xfe1165ce,0x113de6c4,0x6be74012,0xf165ea49,0x517ab38b,0xd988c141,0xbdc0ed}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7829b9,0xfe1165ce,0x113de6c4,0x6be74012,0xf165ea49,0x517ab38b,0xd988c141,0xbdc0ed}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe1165ce0e7829b9,0x6be74012113de6c4,0x517ab38bf165ea49,0xbdc0edd988c141}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe1165ce0e7829b9,0x6be74012113de6c4,0x517ab38bf165ea49,0xbdc0edd988c141}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x1068, 0xf2c, 0x1ada, 0x1f58, 0x1343, 0x5cc, 0x90, 0x1bba, 0x50b, 0x1a35, 0x35f, 0x1388, 0x5ce, 0xc4f, 0x1462, 0x191f, 0x665, 0x843, 0x1cb1, 0x1} @@ -3117,220 +3117,220 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1975,0x2b02,0x86c,0x9cbe,0x7576,0xb1c3,0xd9a7,0x737e,0x4de1,0xa245,0x7652,0xf9bf,0x4bf8,0xdc2c,0xeaa1,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1975,0x2b02,0x86c,0x9cbe,0x7576,0xb1c3,0xd9a7,0x737e,0x4de1,0xa245,0x7652,0xf9bf,0x4bf8,0xdc2c,0xeaa1,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b021975,0x9cbe086c,0xb1c37576,0x737ed9a7,0xa2454de1,0xf9bf7652,0xdc2c4bf8,0x8eaa1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b021975,0x9cbe086c,0xb1c37576,0x737ed9a7,0xa2454de1,0xf9bf7652,0xdc2c4bf8,0x8eaa1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9cbe086c2b021975,0x737ed9a7b1c37576,0xf9bf7652a2454de1,0x8eaa1dc2c4bf8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9cbe086c2b021975,0x737ed9a7b1c37576,0xf9bf7652a2454de1,0x8eaa1dc2c4bf8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee88,0x46bc,0x7177,0x337c,0x92b6,0x40dc,0xb657,0x3366,0x6c8a,0x2b98,0x40eb,0x1146,0xe116,0xb00a,0xa22f,0xe3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee88,0x46bc,0x7177,0x337c,0x92b6,0x40dc,0xb657,0x3366,0x6c8a,0x2b98,0x40eb,0x1146,0xe116,0xb00a,0xa22f,0xe3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x46bcee88,0x337c7177,0x40dc92b6,0x3366b657,0x2b986c8a,0x114640eb,0xb00ae116,0xe3a22f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x46bcee88,0x337c7177,0x40dc92b6,0x3366b657,0x2b986c8a,0x114640eb,0xb00ae116,0xe3a22f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x337c717746bcee88,0x3366b65740dc92b6,0x114640eb2b986c8a,0xe3a22fb00ae116}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x337c717746bcee88,0x3366b65740dc92b6,0x114640eb2b986c8a,0xe3a22fb00ae116}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf28d,0x64d3,0xe248,0x40b9,0x5141,0x82bb,0x82ea,0xcf35,0xfaf0,0x3,0xd71f,0x6e88,0x7ac9,0xf4c9,0x6b9e,0xcc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf28d,0x64d3,0xe248,0x40b9,0x5141,0x82bb,0x82ea,0xcf35,0xfaf0,0x3,0xd71f,0x6e88,0x7ac9,0xf4c9,0x6b9e,0xcc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x64d3f28d,0x40b9e248,0x82bb5141,0xcf3582ea,0x3faf0,0x6e88d71f,0xf4c97ac9,0xcc6b9e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x64d3f28d,0x40b9e248,0x82bb5141,0xcf3582ea,0x3faf0,0x6e88d71f,0xf4c97ac9,0xcc6b9e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x40b9e24864d3f28d,0xcf3582ea82bb5141,0x6e88d71f0003faf0,0xcc6b9ef4c97ac9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x40b9e24864d3f28d,0xcf3582ea82bb5141,0x6e88d71f0003faf0,0xcc6b9ef4c97ac9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe68b,0xd4fd,0xf793,0x6341,0x8a89,0x4e3c,0x2658,0x8c81,0xb21e,0x5dba,0x89ad,0x640,0xb407,0x23d3,0x155e,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe68b,0xd4fd,0xf793,0x6341,0x8a89,0x4e3c,0x2658,0x8c81,0xb21e,0x5dba,0x89ad,0x640,0xb407,0x23d3,0x155e,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4fde68b,0x6341f793,0x4e3c8a89,0x8c812658,0x5dbab21e,0x64089ad,0x23d3b407,0xf7155e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4fde68b,0x6341f793,0x4e3c8a89,0x8c812658,0x5dbab21e,0x64089ad,0x23d3b407,0xf7155e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6341f793d4fde68b,0x8c8126584e3c8a89,0x64089ad5dbab21e,0xf7155e23d3b407}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6341f793d4fde68b,0x8c8126584e3c8a89,0x64089ad5dbab21e,0xf7155e23d3b407}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x84a0,0x8ad1,0xbcc4,0xc440,0x94e1,0x46ea,0x15c6,0x784e,0x190,0xd26f,0x630,0x2bee,0x74b1,0x93ce,0xe061,0x3c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x84a0,0x8ad1,0xbcc4,0xc440,0x94e1,0x46ea,0x15c6,0x784e,0x190,0xd26f,0x630,0x2bee,0x74b1,0x93ce,0xe061,0x3c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8ad184a0,0xc440bcc4,0x46ea94e1,0x784e15c6,0xd26f0190,0x2bee0630,0x93ce74b1,0x3ce061}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8ad184a0,0xc440bcc4,0x46ea94e1,0x784e15c6,0xd26f0190,0x2bee0630,0x93ce74b1,0x3ce061}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc440bcc48ad184a0,0x784e15c646ea94e1,0x2bee0630d26f0190,0x3ce06193ce74b1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc440bcc48ad184a0,0x784e15c646ea94e1,0x2bee0630d26f0190,0x3ce06193ce74b1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e2b,0xdafe,0xfa45,0xa69b,0xb77e,0xf670,0x927d,0xa0f9,0xccb5,0xc897,0x9607,0x5f22,0x47bf,0x867,0xf781,0xd9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e2b,0xdafe,0xfa45,0xa69b,0xb77e,0xf670,0x927d,0xa0f9,0xccb5,0xc897,0x9607,0x5f22,0x47bf,0x867,0xf781,0xd9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdafe1e2b,0xa69bfa45,0xf670b77e,0xa0f9927d,0xc897ccb5,0x5f229607,0x86747bf,0xd9f781}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdafe1e2b,0xa69bfa45,0xf670b77e,0xa0f9927d,0xc897ccb5,0x5f229607,0x86747bf,0xd9f781}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa69bfa45dafe1e2b,0xa0f9927df670b77e,0x5f229607c897ccb5,0xd9f781086747bf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa69bfa45dafe1e2b,0xa0f9927df670b77e,0x5f229607c897ccb5,0xd9f781086747bf}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2aa2,0xbd3f,0x2ad,0x19bd,0xe6f0,0x3b95,0x3fff,0xd17e,0xf3a6,0x7888,0xda46,0x3b21,0xcc57,0x5301,0x3e50,0xc4}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2aa2,0xbd3f,0x2ad,0x19bd,0xe6f0,0x3b95,0x3fff,0xd17e,0xf3a6,0x7888,0xda46,0x3b21,0xcc57,0x5301,0x3e50,0xc4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbd3f2aa2,0x19bd02ad,0x3b95e6f0,0xd17e3fff,0x7888f3a6,0x3b21da46,0x5301cc57,0xc43e50}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbd3f2aa2,0x19bd02ad,0x3b95e6f0,0xd17e3fff,0x7888f3a6,0x3b21da46,0x5301cc57,0xc43e50}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19bd02adbd3f2aa2,0xd17e3fff3b95e6f0,0x3b21da467888f3a6,0xc43e505301cc57}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19bd02adbd3f2aa2,0xd17e3fff3b95e6f0,0x3b21da467888f3a6,0xc43e505301cc57}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b60,0x752e,0x433b,0x3bbf,0x6b1e,0xb915,0xea39,0x87b1,0xfe6f,0x2d90,0xf9cf,0xd411,0x8b4e,0x6c31,0x1f9e,0xc3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b60,0x752e,0x433b,0x3bbf,0x6b1e,0xb915,0xea39,0x87b1,0xfe6f,0x2d90,0xf9cf,0xd411,0x8b4e,0x6c31,0x1f9e,0xc3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x752e7b60,0x3bbf433b,0xb9156b1e,0x87b1ea39,0x2d90fe6f,0xd411f9cf,0x6c318b4e,0xc31f9e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x752e7b60,0x3bbf433b,0xb9156b1e,0x87b1ea39,0x2d90fe6f,0xd411f9cf,0x6c318b4e,0xc31f9e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3bbf433b752e7b60,0x87b1ea39b9156b1e,0xd411f9cf2d90fe6f,0xc31f9e6c318b4e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3bbf433b752e7b60,0x87b1ea39b9156b1e,0xd411f9cf2d90fe6f,0xc31f9e6c318b4e}}}} #endif -}}}}; +}}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/finit.c index b3808edf07..c9a3687282 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/finit.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/finit.c @@ -29,29 +29,29 @@ quat_alg_elem_finalize(quat_alg_elem_t *elem) void ibz_vec_2_init(ibz_vec_2_t *vec) { - ibz_init(&((*vec)[0])); - ibz_init(&((*vec)[1])); + ibz_init(&(vec->v[0])); + ibz_init(&(vec->v[1])); } void ibz_vec_2_finalize(ibz_vec_2_t *vec) { - ibz_finalize(&((*vec)[0])); - ibz_finalize(&((*vec)[1])); + ibz_finalize(&(vec->v[0])); + ibz_finalize(&(vec->v[1])); } void ibz_vec_4_init(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_init(&(*vec)[i]); + ibz_init(&vec->v[i]); } } void ibz_vec_4_finalize(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_finalize(&(*vec)[i]); + ibz_finalize(&vec->v[i]); } } @@ -60,7 +60,7 @@ ibz_mat_2x2_init(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -69,7 +69,7 @@ ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } @@ -79,7 +79,7 @@ ibz_mat_4x4_init(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -88,7 +88,7 @@ ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c index 511a0a5d38..5edff425c8 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hnf.c @@ -14,21 +14,21 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) for (int i = 0; i < 4; i++) { // upper triangular for (int j = 0; j < i; j++) { - res = res && ibz_is_zero(&((*mat)[i][j])); + res = res && ibz_is_zero(&(mat->m[i][j])); } // find first non 0 element of line found = 0; for (int j = i; j < 4; j++) { if (found) { // all values are positive, and first non-0 is the largest of that line - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); - res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&(mat->m[i][ind]), &(mat->m[i][j])) > 0); } else { - if (!ibz_is_zero(&((*mat)[i][j]))) { + if (!ibz_is_zero(&(mat->m[i][j]))) { found = 1; ind = j; // mustbe non-negative - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) > 0); } } } @@ -37,7 +37,7 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) int linestart = -1; int i = 0; for (int j = 0; j < 4; j++) { - while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + while ((i < 4) && (ibz_is_zero(&(mat->m[i][j])))) { i = i + 1; } if (i != 4) { @@ -66,13 +66,13 @@ ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); - ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); + ibz_centered_mod(&(sums.v[i]), &(sums.v[i]), &m); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_finalize(&m); @@ -86,7 +86,7 @@ ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + ibz_centered_mod(&(res->v[i]), &(vec->v[i]), &m); } ibz_finalize(&m); } @@ -101,8 +101,8 @@ ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4 ibz_copy(&s, scalar); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); - ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + ibz_mul(&(prod->v[i]), &(vec->v[i]), &s); + ibz_mod(&(prod->v[i]), &(prod->v[i]), &m); } ibz_finalize(&m); ibz_finalize(&s); @@ -138,36 +138,36 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec if (h < 4) ibz_vec_4_init(&(w[h])); ibz_vec_4_init(&(a[h])); - ibz_copy(&(a[h][0]), &(generators[h][0])); - ibz_copy(&(a[h][1]), &(generators[h][1])); - ibz_copy(&(a[h][2]), &(generators[h][2])); - ibz_copy(&(a[h][3]), &(generators[h][3])); + ibz_copy(&(a[h].v[0]), &(generators[h].v[0])); + ibz_copy(&(a[h].v[1]), &(generators[h].v[1])); + ibz_copy(&(a[h].v[2]), &(generators[h].v[2])); + ibz_copy(&(a[h].v[3]), &(generators[h].v[3])); } assert(ibz_cmp(mod, &ibz_const_zero) > 0); ibz_copy(&m, mod); while (i != -1) { while (j != 0) { j = j - 1; - if (!ibz_is_zero(&(a[j][i]))) { + if (!ibz_is_zero(&(a[j].v[i]))) { // assumtion that ibz_xgcd outputs u,v which are small in absolute // value is needed here also, needs u non 0, but v can be 0 if needed - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &(a[j].v[i])); ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); - ibz_div(&coeff_1, &r, &(a[k][i]), &d); - ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_div(&coeff_1, &r, &(a[k].v[i]), &d); + ibz_div(&coeff_2, &r, &(a[j].v[i]), &d); ibz_neg(&coeff_2, &coeff_2); ibz_vec_4_linear_combination_mod( &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy } } - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &m); ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult - if (ibz_is_zero(&(w[i][i]))) { - ibz_copy(&(w[i][i]), &m); + if (ibz_is_zero(&(w[i].v[i]))) { + ibz_copy(&(w[i].v[i]), &m); } for (int h = i + 1; h < 4; h++) { - ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_div_floor(&q, &r, &(w[h].v[i]), &(w[i].v[i])); ibz_neg(&q, &q); ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); } @@ -177,8 +177,8 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec k = k - 1; i = i - 1; j = k; - if (ibz_is_zero(&(a[k][i]))) - ibz_copy(&(a[k][i]), &m); + if (ibz_is_zero(&(a[k].v[i]))) + ibz_copy(&(a[k].v[i]), &m); } else { k = k - 1; @@ -188,7 +188,7 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec } for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { - ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + ibz_copy(&((hnf->m)[i][j]), &(w[j].v[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c index 0fd35b5c65..f630f5a9fe 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ibz_division.c @@ -8,5 +8,5 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { - mpz_gcdext(*gcd, *u, *v, *a, *b); + mpz_gcdext(gcd->i, u->i, v->i, a->i, b->i); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.c index 0743974345..1be9d87e71 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/id2iso.c @@ -18,8 +18,8 @@ ec_biscalar_mul_ibz_vec(ec_point_t *res, const ec_curve_t *curve) { digit_t scalars[2][NWORDS_ORDER]; - ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); - ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ibz_to_digit_array(scalars[0], &scalar_vec->v[0]); + ibz_to_digit_array(scalars[1], &scalar_vec->v[1]); ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); } @@ -48,14 +48,14 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid quat_change_to_O0_basis(&coeffs, &alpha); for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); } } @@ -67,16 +67,16 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid { const ibz_t *const norm = &lideal->norm; - ibz_mod(&(*vec)[0], &mat[0][0], norm); - ibz_mod(&(*vec)[1], &mat[1][0], norm); - ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + ibz_mod(&vec->v[0], &mat.m[0][0], norm); + ibz_mod(&vec->v[1], &mat.m[1][0], norm); + ibz_gcd(&tmp, &vec->v[0], &vec->v[1]); if (ibz_is_even(&tmp)) { - ibz_mod(&(*vec)[0], &mat[0][1], norm); - ibz_mod(&(*vec)[1], &mat[1][1], norm); + ibz_mod(&vec->v[0], &mat.m[0][1], norm); + ibz_mod(&vec->v[1], &mat.m[1][1], norm); } #ifndef NDEBUG - ibz_gcd(&tmp, &(*vec)[0], norm); - ibz_gcd(&tmp, &(*vec)[1], &tmp); + ibz_gcd(&tmp, &vec->v[0], norm); + ibz_gcd(&tmp, &vec->v[1], &tmp); assert(!ibz_cmp(&tmp, &ibz_const_one)); #endif } @@ -102,28 +102,28 @@ matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_ copy_basis(&tmp_bas, bas); // reduction mod 2f - ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); - ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); - ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); - ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + ibz_mod(&mat->m[0][0], &mat->m[0][0], &pow_two); + ibz_mod(&mat->m[0][1], &mat->m[0][1], &pow_two); + ibz_mod(&mat->m[1][0], &mat->m[1][0], &pow_two); + ibz_mod(&mat->m[1][1], &mat->m[1][1], &pow_two); // For a matrix [[a, c], [b, d]] we compute: // // first basis element R = [a]P + [b]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][0]); - ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ibz_to_digit_array(scalars[0], &mat->m[0][0]); + ibz_to_digit_array(scalars[1], &mat->m[1][0]); ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); // second basis element S = [c]P + [d]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][1]); - ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ibz_to_digit_array(scalars[0], &mat->m[0][1]); + ibz_to_digit_array(scalars[1], &mat->m[1][1]); ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); // Their difference R - S = [a - c]P + [b - d]Q - ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_sub(&tmp, &mat->m[0][0], &mat->m[0][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[0], &tmp); - ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_sub(&tmp, &mat->m[1][0], &mat->m[1][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[1], &tmp); ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); @@ -157,23 +157,23 @@ endomorphism_application_even_basis(ec_basis_t *bas, quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); assert(ibz_is_odd(&content)); - ibz_set(&mat[0][0], 0); - ibz_set(&mat[0][1], 0); - ibz_set(&mat[1][0], 0); - ibz_set(&mat[1][1], 0); + ibz_set(&mat.m[0][0], 0); + ibz_set(&mat.m[0][1], 0); + ibz_set(&mat.m[1][0], 0); + ibz_set(&mat.m[1][1], 0); // computing the matrix for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&mat[i][j], &mat[i][j], &content); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&mat.m[i][j], &mat.m[i][j], &content); } } @@ -215,19 +215,19 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * ibz_mat_2x2_t mat; ibz_mat_2x2_init(&mat); - ibz_copy(&mat[0][0], &(*vec2)[0]); - ibz_copy(&mat[1][0], &(*vec2)[1]); + ibz_copy(&mat.m[0][0], &vec2->v[0]); + ibz_copy(&mat.m[1][0], &vec2->v[1]); ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); - ibz_copy(&mat[0][1], &vec[0]); - ibz_copy(&mat[1][1], &vec[1]); + ibz_copy(&mat.m[0][1], &vec.v[0]); + ibz_copy(&mat.m[1][1], &vec.v[1]); ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); - ibz_add(&mat[0][1], &mat[0][1], &vec[0]); - ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + ibz_add(&mat.m[0][1], &mat.m[0][1], &vec.v[0]); + ibz_add(&mat.m[1][1], &mat.m[1][1], &vec.v[1]); - ibz_mod(&mat[0][1], &mat[0][1], &two_pow); - ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + ibz_mod(&mat.m[0][1], &mat.m[0][1], &two_pow); + ibz_mod(&mat.m[1][1], &mat.m[1][1], &two_pow); ibz_mat_2x2_t inv; ibz_mat_2x2_init(&inv); @@ -247,11 +247,11 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * quat_alg_elem_t gen; quat_alg_elem_init(&gen); ibz_set(&gen.denom, 2); - ibz_add(&gen.coord[0], &vec[0], &vec[0]); - ibz_set(&gen.coord[1], -2); - ibz_add(&gen.coord[2], &vec[1], &vec[1]); - ibz_copy(&gen.coord[3], &vec[1]); - ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_add(&gen.coord.v[0], &vec.v[0], &vec.v[0]); + ibz_set(&gen.coord.v[1], -2); + ibz_add(&gen.coord.v[2], &vec.v[1], &vec.v[1]); + ibz_copy(&gen.coord.v[3], &vec.v[1]); + ibz_add(&gen.coord.v[0], &gen.coord.v[0], &vec.v[1]); ibz_vec_2_finalize(&vec); quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); @@ -319,10 +319,10 @@ _change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, #endif // Copy the results into the matrix - ibz_copy_digit_array(&((*mat)[0][0]), x1); - ibz_copy_digit_array(&((*mat)[1][0]), x2); - ibz_copy_digit_array(&((*mat)[0][1]), x3); - ibz_copy_digit_array(&((*mat)[1][1]), x4); + ibz_copy_digit_array(&(mat->m[0][0]), x1); + ibz_copy_digit_array(&(mat->m[1][0]), x2); + ibz_copy_digit_array(&(mat->m[0][1]), x3); + ibz_copy_digit_array(&(mat->m[1][1]), x4); } void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ideal.c index 9cf863a104..8634143941 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ideal.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ideal.c @@ -33,7 +33,7 @@ quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) ibz_copy(©->lattice.denom, &copied->lattice.denom); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + ibz_copy(©->lattice.basis.m[i][j], &copied->lattice.basis.m[i][j]); } } } @@ -248,13 +248,13 @@ quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + ibz_div(&G->m[i][j], &rmd, &G->m[i][j], &divisor); assert(ibz_is_zero(&rmd)); } } for (int i = 0; i < 4; i++) { for (int j = 0; j <= i - 1; j++) { - ibz_copy(&(*G)[j][i], &(*G)[i][j]); + ibz_copy(&G->m[j][i], &G->m[i][j]); } } @@ -289,8 +289,8 @@ quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg ibz_mat_4x4_transpose(&transposed, &(order->basis)); // multiply gram matrix by 2 because of reduced trace ibz_mat_4x4_identity(&norm); - ibz_copy(&(norm[2][2]), &(alg->p)); - ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_copy(&(norm.m[2][2]), &(alg->p)); + ibz_copy(&(norm.m[3][3]), &(alg->p)); ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); ibz_mat_4x4_mul(&prod, &transposed, &norm); ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.c index b0462dc8b5..e219bf3d96 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.c @@ -114,48 +114,48 @@ DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_ * @{ */ -const __mpz_struct ibz_const_zero[1] = { +const ibz_t ibz_const_zero = {{ { ._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]){ 0 }, } -}; +}}; -const __mpz_struct ibz_const_one[1] = { +const ibz_t ibz_const_one = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 1 }, } -}; +}}; -const __mpz_struct ibz_const_two[1] = { +const ibz_t ibz_const_two = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 2 }, } -}; +}}; -const __mpz_struct ibz_const_three[1] = { +const ibz_t ibz_const_three = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 3 }, } -}; +}}; void ibz_init(ibz_t *x) { - mpz_init(*x); + mpz_init(x->i); } void ibz_finalize(ibz_t *x) { - mpz_clear(*x); + mpz_clear(x->i); } void @@ -168,7 +168,7 @@ ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_add(*sum, *a, *b); + mpz_add(sum->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -186,7 +186,7 @@ ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_sub(*diff, *a, *b); + mpz_sub(diff->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); @@ -205,7 +205,7 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_mul(*prod, *a, *b); + mpz_mul(prod->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -216,13 +216,13 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) void ibz_neg(ibz_t *neg, const ibz_t *a) { - mpz_neg(*neg, *a); + mpz_neg(neg->i, a->i); } void ibz_abs(ibz_t *abs, const ibz_t *a) { - mpz_abs(*abs, *a); + mpz_abs(abs->i, a->i); } void @@ -235,7 +235,7 @@ ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_tdiv_qr(*quotient, *remainder, *a, *b); + mpz_tdiv_qr(quotient->i, remainder->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -251,7 +251,7 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) ibz_init(&a_cp); ibz_copy(&a_cp, a); #endif - mpz_tdiv_q_2exp(*quotient, *a, exp); + mpz_tdiv_q_2exp(quotient->i, a->i, exp); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); ibz_finalize(&a_cp); @@ -261,50 +261,50 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) { - mpz_fdiv_qr(*q, *r, *n, *d); + mpz_fdiv_qr(q->i, r->i, n->i, d->i); } void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) { - mpz_mod(*r, *a, *b); + mpz_mod(r->i, a->i, b->i); } unsigned long int -ibz_mod_ui(const mpz_t *n, unsigned long int d) +ibz_mod_ui(const ibz_t *n, unsigned long int d) { - return mpz_fdiv_ui(*n, d); + return mpz_fdiv_ui(n->i, d); } int ibz_divides(const ibz_t *a, const ibz_t *b) { - return mpz_divisible_p(*a, *b); + return mpz_divisible_p(a->i, b->i); } void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) { - mpz_pow_ui(*pow, *x, e); + mpz_pow_ui(pow->i, x->i, e); } void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) { - mpz_powm(*pow, *x, *e, *m); + mpz_powm(pow->i, x->i, e->i, m->i); DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); } int ibz_two_adic(ibz_t *pow) { - return mpz_scan1(*pow, 0); + return mpz_scan1(pow->i, 0); } int ibz_cmp(const ibz_t *a, const ibz_t *b) { - int ret = mpz_cmp(*a, *b); + int ret = mpz_cmp(a->i, b->i); DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); return ret; } @@ -312,7 +312,7 @@ ibz_cmp(const ibz_t *a, const ibz_t *b) int ibz_is_zero(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 0); + int ret = !mpz_cmp_ui(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); return ret; } @@ -320,7 +320,7 @@ ibz_is_zero(const ibz_t *x) int ibz_is_one(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 1); + int ret = !mpz_cmp_ui(x->i, 1); DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); return ret; } @@ -328,7 +328,7 @@ ibz_is_one(const ibz_t *x) int ibz_cmp_int32(const ibz_t *x, int32_t y) { - int ret = mpz_cmp_si(*x, (signed long int)y); + int ret = mpz_cmp_si(x->i, (signed long int)y); DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); return ret; } @@ -336,7 +336,7 @@ ibz_cmp_int32(const ibz_t *x, int32_t y) int ibz_is_even(const ibz_t *x) { - int ret = !mpz_tstbit(*x, 0); + int ret = !mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); return ret; } @@ -344,7 +344,7 @@ ibz_is_even(const ibz_t *x) int ibz_is_odd(const ibz_t *x) { - int ret = mpz_tstbit(*x, 0); + int ret = mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); return ret; } @@ -352,7 +352,7 @@ ibz_is_odd(const ibz_t *x) void ibz_set(ibz_t *i, int32_t x) { - mpz_set_si(*i, x); + mpz_set_si(i->i, x); } int @@ -361,7 +361,7 @@ ibz_convert_to_str(const ibz_t *i, char *str, int base) if (!str || (base != 10 && base != 16)) return 0; - mpz_get_str(str, base, *i); + mpz_get_str(str, base, i->i); return 1; } @@ -380,29 +380,29 @@ ibz_print(const ibz_t *num, int base) int ibz_set_from_str(ibz_t *i, const char *str, int base) { - return (1 + mpz_set_str(*i, str, base)); + return (1 + mpz_set_str(i->i, str, base)); } void ibz_copy(ibz_t *target, const ibz_t *value) { - mpz_set(*target, *value); + mpz_set(target->i, value->i); } void ibz_swap(ibz_t *a, ibz_t *b) { - mpz_swap(*a, *b); + mpz_swap(a->i, b->i); } int32_t ibz_get(const ibz_t *i) { #if LONG_MAX == INT32_MAX - return (int32_t)mpz_get_si(*i); + return (int32_t)mpz_get_si(i->i); #elif LONG_MAX > INT32_MAX // Extracts the sign bit and the 31 least significant bits - signed long int t = mpz_get_si(*i); + signed long int t = mpz_get_si(i->i); return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); #else #error Unsupported configuration: LONG_MAX must be >= INT32_MAX @@ -417,10 +417,10 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) mpz_t tmp; mpz_t bmina; mpz_init(bmina); - mpz_sub(bmina, *b, *a); + mpz_sub(bmina, b->i, a->i); if (mpz_sgn(bmina) == 0) { - mpz_set(*rand, *a); + mpz_set(rand->i, a->i); mpz_clear(bmina); return 1; } @@ -466,7 +466,7 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) break; } while (1); - mpz_add(*rand, tmp, *a); + mpz_add(rand->i, tmp, a->i); err: mpz_clear(bmina); return ret; @@ -534,19 +534,19 @@ int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) { int ret = 1; - mpz_t m_big; + ibz_t m_big; // m_big = 2 * m - mpz_init_set_si(m_big, m); - mpz_add(m_big, m_big, m_big); + mpz_init_set_si(m_big.i, m); + mpz_add(m_big.i, m_big.i, m_big.i); // Sample in [0, 2*m] ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); // Adjust to range [-m, m] - mpz_sub_ui(*rand, *rand, m); + mpz_sub_ui(rand->i, rand->i, m); - mpz_clear(m_big); + mpz_clear(m_big.i); return ret; } @@ -555,41 +555,41 @@ int ibz_rand_interval_bits(ibz_t *rand, uint32_t m) { int ret = 1; - mpz_t tmp; - mpz_t low; - mpz_init_set_ui(tmp, 1); - mpz_mul_2exp(tmp, tmp, m); - mpz_init(low); - mpz_neg(low, tmp); + ibz_t tmp; + ibz_t low; + mpz_init_set_ui(tmp.i, 1); + mpz_mul_2exp(tmp.i, tmp.i, m); + mpz_init(low.i); + mpz_neg(low.i, tmp.i); ret = ibz_rand_interval(rand, &low, &tmp); - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); if (ret != 1) goto err; - mpz_sub_ui(*rand, *rand, (unsigned long int)m); + mpz_sub_ui(rand->i, rand->i, (unsigned long int)m); return ret; err: - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); return ret; } int ibz_bitsize(const ibz_t *a) { - return (int)mpz_sizeinbase(*a, 2); + return (int)mpz_sizeinbase(a->i, 2); } int ibz_size_in_base(const ibz_t *a, int base) { - return (int)mpz_sizeinbase(*a, base); + return (int)mpz_sizeinbase(a->i, base); } void ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) { - mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); + mpz_import(target->i, dig_len, -1, sizeof(digit_t), 0, 0, dig); } void @@ -600,13 +600,13 @@ ibz_to_digits(digit_t *target, const ibz_t *ibz) // The next line ensures zero is written to the first limb of target if ibz is zero; // target is then overwritten by the actual value if it is not. target[0] = 0; - mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, ibz->i); } int ibz_probab_prime(const ibz_t *n, int reps) { - int ret = mpz_probab_prime_p(*n, reps); + int ret = mpz_probab_prime_p(n->i, reps); DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); return ret; } @@ -614,26 +614,26 @@ ibz_probab_prime(const ibz_t *n, int reps) void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) { - mpz_gcd(*gcd, *a, *b); + mpz_gcd(gcd->i, a->i, b->i); } int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) { - return (mpz_invert(*inv, *a, *mod) ? 1 : 0); + return (mpz_invert(inv->i, a->i, mod->i) ? 1 : 0); } int ibz_legendre(const ibz_t *a, const ibz_t *p) { - return mpz_legendre(*a, *p); + return mpz_legendre(a->i, p->i); } int ibz_sqrt(ibz_t *sqrt, const ibz_t *a) { - if (mpz_perfect_square_p(*a)) { - mpz_sqrt(*sqrt, *a); + if (mpz_perfect_square_p(a->i)) { + mpz_sqrt(sqrt->i, a->i); return 1; } else { return 0; @@ -643,7 +643,7 @@ ibz_sqrt(ibz_t *sqrt, const ibz_t *a) void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) { - mpz_sqrt(*sqrt, *a); + mpz_sqrt(sqrt->i, a->i); } int @@ -686,85 +686,85 @@ ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) int ret = 1; - mpz_mod(amod, *a, *p); + mpz_mod(amod, a->i, p->i); if (mpz_cmp_ui(amod, 0) < 0) { - mpz_add(amod, *p, amod); + mpz_add(amod, p->i, amod); } - if (mpz_legendre(amod, *p) != 1) { + if (mpz_legendre(amod, p->i) != 1) { ret = 0; goto end; } - mpz_sub_ui(pm1, *p, 1); + mpz_sub_ui(pm1, p->i, 1); - if (mpz_mod_ui(tmp, *p, 4) == 3) { + if (mpz_mod_ui(tmp, p->i, 4) == 3) { // p % 4 == 3 - mpz_add_ui(tmp, *p, 1); + mpz_add_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(*sqrt, amod, tmp, *p); - } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + mpz_powm(sqrt->i, amod, tmp, p->i); + } else if (mpz_mod_ui(tmp, p->i, 8) == 5) { // p % 8 == 5 - mpz_sub_ui(tmp, *p, 1); + mpz_sub_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + mpz_powm(tmp, amod, tmp, p->i); // a^{(p-1)/4} mod p if (!mpz_cmp_ui(tmp, 1)) { - mpz_add_ui(tmp, *p, 3); + mpz_add_ui(tmp, p->i, 3); mpz_fdiv_q_2exp(tmp, tmp, 3); - mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + mpz_powm(sqrt->i, amod, tmp, p->i); // a^{(p+3)/8} mod p } else { - mpz_sub_ui(tmp, *p, 5); + mpz_sub_ui(tmp, p->i, 5); mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 mpz_mul_2exp(a4, amod, 2); // 4*a - mpz_powm(tmp, a4, tmp, *p); + mpz_powm(tmp, a4, tmp, p->i); mpz_mul_2exp(a2, amod, 1); mpz_mul(tmp, a2, tmp); - mpz_mod(*sqrt, tmp, *p); + mpz_mod(sqrt->i, tmp, p->i); } } else { // p % 8 == 1 -> Shanks-Tonelli int e = 0; - mpz_sub_ui(q, *p, 1); + mpz_sub_ui(q, p->i, 1); while (mpz_tstbit(q, e) == 0) e++; mpz_fdiv_q_2exp(q, q, e); // 1. find generator - non-quadratic residue mpz_set_ui(qnr, 2); - while (mpz_legendre(qnr, *p) != -1) + while (mpz_legendre(qnr, p->i) != -1) mpz_add_ui(qnr, qnr, 1); - mpz_powm(z, qnr, q, *p); + mpz_powm(z, qnr, q, p->i); // 2. Initialize mpz_set(y, z); - mpz_powm(y, amod, q, *p); // y = a^q mod p + mpz_powm(y, amod, q, p->i); // y = a^q mod p mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 mpz_fdiv_q_2exp(tmp, tmp, 1); - mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + mpz_powm(x, amod, tmp, p->i); // x = a^(q + 1)/2 mod p mpz_set_ui(exp, 1); mpz_mul_2exp(exp, exp, e - 2); for (int i = 0; i < e; ++i) { - mpz_powm(b, y, exp, *p); + mpz_powm(b, y, exp, p->i); if (!mpz_cmp(b, pm1)) { mpz_mul(x, x, z); - mpz_mod(x, x, *p); + mpz_mod(x, x, p->i); mpz_mul(y, y, z); mpz_mul(y, y, z); - mpz_mod(y, y, *p); + mpz_mod(y, y, p->i); } - mpz_powm_ui(z, z, 2, *p); + mpz_powm_ui(z, z, 2, p->i); mpz_fdiv_q_2exp(exp, exp, 1); } - mpz_set(*sqrt, x); + mpz_set(sqrt->i, x); } #ifdef DEBUG_VERBOSE diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.h index a0c2c02477..28e478ff7f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/intbig.h @@ -33,7 +33,9 @@ * * For integers of arbitrary size, used by intbig module, using gmp */ -typedef mpz_t ibz_t; +typedef struct { + mpz_t i; +} ibz_t; /** @} */ @@ -129,7 +131,7 @@ int ibz_two_adic(ibz_t *pow); */ void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); -unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); +unsigned long int ibz_mod_ui(const ibz_t *n, unsigned long int d); /** @brief Test if a = 0 mod b */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c index 5491ee44d0..ea32213c75 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c @@ -57,25 +57,25 @@ to_etabar(fp_num *x) } static void -from_mpz(const mpz_t x, fp_num *r) +from_mpz(const ibz_t *x, fp_num *r) { long exp = 0; - r->s = mpz_get_d_2exp(&exp, x); + r->s = mpz_get_d_2exp(&exp, x->i); r->e = exp; } static void -to_mpz(const fp_num *x, mpz_t r) +to_mpz(const fp_num *x, ibz_t *r) { if (x->e >= DBL_MANT_DIG) { double s = x->s * 0x1P53; - mpz_set_d(r, s); - mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + mpz_set_d(r->i, s); + mpz_mul_2exp(r->i, r->i, x->e - DBL_MANT_DIG); } else if (x->e < 0) { - mpz_set_ui(r, 0); + mpz_set_ui(r->i, 0); } else { double s = ldexp(x->s, x->e); - mpz_set_d(r, round(s)); + mpz_set_d(r->i, round(s)); } } @@ -203,7 +203,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) ibz_init(&tmpI); // Main L² loop - from_mpz((*G)[0][0], &r[0][0]); + from_mpz(&G->m[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -213,7 +213,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - from_mpz((*G)[kappa][j], &r[kappa][j]); + from_mpz(&G->m[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { fp_mul(&r[kappa][k], &u[j][k], &tmpF); fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); @@ -229,22 +229,22 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) done = 0; copy(&u[kappa][i], &Xf); fp_round(&Xf); - to_mpz(&Xf, X); + to_mpz(&Xf, &X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { - ibz_mul(&tmpI, &X, &(*basis)[j][i]); - ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + ibz_mul(&tmpI, &X, &basis->m[j][i]); + ibz_sub(&basis->m[j][kappa], &basis->m[j][kappa], &tmpI); } // Update lower half of the Gram matrix // = - 2X + X² = // - X - X( - X·) //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 - ibz_mul(&tmpI, &X, &(*G)[kappa][i]); - ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + ibz_mul(&tmpI, &X, &G->m[kappa][i]); + ibz_sub(&G->m[kappa][kappa], &G->m[kappa][kappa], &tmpI); for (int j = 0; j < 4; j++) { // works because i < κ // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 - ibz_mul(&tmpI, &X, SYM((*G), i, j)); - ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + ibz_mul(&tmpI, &X, SYM(G->m, i, j)); + ibz_sub(SYM(G->m, kappa, j), SYM(G->m, kappa, j), &tmpI); } // After the loop: //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, @@ -261,7 +261,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - from_mpz((*G)[kappa][kappa], &lovasz[0]); + from_mpz(&G->m[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); @@ -279,11 +279,11 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Insert b_κ before b_swap in the basis and in the lower half Gram matrix for (int j = kappa; j > swap; j--) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + ibz_swap(&basis->m[i][j], &basis->m[i][j - 1]); if (i == j - 1) - ibz_swap(&(*G)[i][i], &(*G)[j][j]); + ibz_swap(&G->m[i][i], &G->m[j][j]); else if (i != j) - ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + ibz_swap(SYM(G->m, i, j), SYM(G->m, i, j - 1)); } } // Copy row u[κ] and r[κ] in swap position, ignore what follows @@ -318,7 +318,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Fill in the upper half of the Gram matrix for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } // Clearinghouse diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lat_ball.c index c7bbb9682f..3f7476988c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lat_ball.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lat_ball.c @@ -28,10 +28,10 @@ quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_m // Compute the parallelogram's bounds int trivial = 1; for (int i = 0; i < 4; i++) { - ibz_mul(&(*box)[i], &dualG[i][i], radius); - ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); - ibz_sqrt_floor(&(*box)[i], &(*box)[i]); - trivial &= ibz_is_zero(&(*box)[i]); + ibz_mul(&box->v[i], &dualG.m[i][i], radius); + ibz_div(&box->v[i], &rem, &box->v[i], &denom); + ibz_sqrt_floor(&box->v[i], &box->v[i]); + trivial &= ibz_is_zero(&box->v[i]); } // Compute the transpose transformation matrix @@ -95,12 +95,12 @@ quat_lattice_sample_from_ball(quat_alg_elem_t *res, do { // Sample vector for (int i = 0; i < 4; i++) { - if (ibz_is_zero(&box[i])) { - ibz_copy(&x[i], &ibz_const_zero); + if (ibz_is_zero(&box.v[i])) { + ibz_copy(&x.v[i], &ibz_const_zero); } else { - ibz_add(&tmp, &box[i], &box[i]); - ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); - ibz_sub(&x[i], &x[i], &box[i]); + ibz_add(&tmp, &box.v[i], &box.v[i]); + ok &= ibz_rand_interval(&x.v[i], &ibz_const_zero, &tmp); + ibz_sub(&x.v[i], &x.v[i], &box.v[i]); if (!ok) goto err; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lattice.c index c98bae9499..ef7b9ccdcc 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lattice.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lattice.c @@ -57,7 +57,7 @@ quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *l for (int row = 1; row < 4; ++row) { for (int col = 0; col < 4; ++col) { - ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + ibz_neg(&(conj->basis.m[row][col]), &(conj->basis.m[row][col])); } } } @@ -96,14 +96,14 @@ quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(tmp[i][j])); + ibz_copy(&(generators[j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + ibz_copy(&(generators[4 + j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); @@ -151,12 +151,12 @@ quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, ibz_vec_4_init(&p); ibz_vec_4_init(&a); for (int i = 0; i < 4; i++) { - ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + ibz_vec_4_copy_ibz(&a, &(lat->m[0][i]), &(lat->m[1][i]), &(lat->m[2][i]), &(lat->m[3][i])); quat_alg_coord_mul(&p, &a, coord, alg); - ibz_copy(&((*prod)[0][i]), &(p[0])); - ibz_copy(&((*prod)[1][i]), &(p[1])); - ibz_copy(&((*prod)[2][i]), &(p[2])); - ibz_copy(&((*prod)[3][i]), &(p[3])); + ibz_copy(&(prod->m[0][i]), &(p.v[0])); + ibz_copy(&(prod->m[1][i]), &(p.v[1])); + ibz_copy(&(prod->m[2][i]), &(p.v[2])); + ibz_copy(&(prod->m[3][i]), &(p.v[3])); } ibz_vec_4_finalize(&p); ibz_vec_4_finalize(&a); @@ -191,15 +191,15 @@ quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_vec_4_init(&(generators[i])); for (int k = 0; k < 4; k++) { ibz_vec_4_copy_ibz( - &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + &elem1, &(lat1->basis.m[0][k]), &(lat1->basis.m[1][k]), &(lat1->basis.m[2][k]), &(lat1->basis.m[3][k])); for (int i = 0; i < 4; i++) { ibz_vec_4_copy_ibz( - &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + &elem2, &(lat2->basis.m[0][i]), &(lat2->basis.m[1][i]), &(lat2->basis.m[2][i]), &(lat2->basis.m[3][i])); quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); for (int j = 0; j < 4; j++) { if (k == 0) - ibz_copy(&(detmat[i][j]), &(elem_res[j])); - ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + ibz_copy(&(detmat.m[i][j]), &(elem_res.v[j])); + ibz_copy(&(generators[4 * k + i].v[j]), &(elem_res.v[j])); } } } @@ -239,7 +239,7 @@ quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_ // copy result if (divisible && (coord != NULL)) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*coord)[i]), &(work_coord[i])); + ibz_copy(&(coord->v[i]), &(work_coord.v[i])); } } ibz_finalize(&prod); @@ -292,7 +292,7 @@ quat_lattice_hnf(quat_lattice_t *lat) ibz_vec_4_init(&(generators[i])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + ibz_copy(&(generators[j].v[i]), &(lat->basis.m[i][j])); } } ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); @@ -309,19 +309,19 @@ quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_al ibz_init(&tmp); for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_set(&(*G)[i][j], 0); + ibz_set(&G->m[i][j], 0); for (int k = 0; k < 4; k++) { - ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + ibz_mul(&tmp, &(lattice->basis.m)[k][i], &(lattice->basis.m)[k][j]); if (k >= 2) ibz_mul(&tmp, &tmp, &alg->p); - ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + ibz_add(&G->m[i][j], &G->m[i][j], &tmp); } - ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + ibz_mul(&G->m[i][j], &G->m[i][j], &ibz_const_two); } } for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) { - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } } ibz_finalize(&tmp); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_applications.c index 6c763b8c04..f5e9af922b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_applications.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_applications.c @@ -17,9 +17,9 @@ quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, quat_lll_core(gram, reduced); ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); for (int i = 0; i < 4; i++) { - ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + ibz_div_2exp(&(gram->m[i][i]), &(gram->m[i][i]), 1); for (int j = i + 1; j < 4; j++) { - ibz_set(&((*gram)[i][j]), 0); + ibz_set(&(gram->m[i][j]), 0); } } ibz_finalize(&gram_corrector); @@ -79,10 +79,10 @@ quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, while (!found && ctr < equiv_num_iter) { ctr++; // we select our linear combination at random - ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[3], equiv_bound_coeff); // computation of the norm of the vector sampled quat_qf_eval(&tmp, &gram, &new_alpha.coord); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/normeq.c index 8c133dd095..aadbbe06c7 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/normeq.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/normeq.c @@ -13,23 +13,23 @@ quat_lattice_O0_set(quat_lattice_t *O0) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(O0->basis[i][j]), 0); + ibz_set(&(O0->basis.m[i][j]), 0); } } ibz_set(&(O0->denom), 2); - ibz_set(&(O0->basis[0][0]), 2); - ibz_set(&(O0->basis[1][1]), 2); - ibz_set(&(O0->basis[2][2]), 1); - ibz_set(&(O0->basis[1][2]), 1); - ibz_set(&(O0->basis[3][3]), 1); - ibz_set(&(O0->basis[0][3]), 1); + ibz_set(&(O0->basis.m[0][0]), 2); + ibz_set(&(O0->basis.m[1][1]), 2); + ibz_set(&(O0->basis.m[2][2]), 1); + ibz_set(&(O0->basis.m[1][2]), 1); + ibz_set(&(O0->basis.m[3][3]), 1); + ibz_set(&(O0->basis.m[0][3]), 1); } void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) { - ibz_set(&O0->z.coord[1], 1); - ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.coord.v[1], 1); + ibz_set(&O0->t.coord.v[2], 1); ibz_set(&O0->z.denom, 1); ibz_set(&O0->t.denom, 1); O0->q = 1; @@ -50,24 +50,24 @@ quat_order_elem_create(quat_alg_elem_t *elem, quat_alg_elem_init(&quat_temp); // elem = x - quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + quat_alg_scalar(elem, &coeffs->v[0], &ibz_const_one); // quat_temp = i*y - quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_scalar(&quat_temp, &(coeffs->v[1]), &ibz_const_one); quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); // elem = x + i*y quat_alg_add(elem, elem, &quat_temp); // quat_temp = z * j - quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[2], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); // elem = x + i* + z*j quat_alg_add(elem, elem, &quat_temp); // quat_temp = t * j * i - quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[3], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); @@ -143,11 +143,11 @@ quat_represent_integer(quat_alg_elem_t *gamma, ibz_sub(&counter, &counter, &ibz_const_one); // we start by sampling the first coordinate - ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + ibz_rand_interval(&coeffs.v[2], &ibz_const_one, &bound); // then, we sample the second coordinate // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) - ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&cornacchia_target, &coeffs.v[2], &coeffs.v[2]); ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); ibz_sub(&temp, &adjusted_n_gamma, &temp); ibz_mul(&sq_bound, &q, &(params->algebra->p)); @@ -158,10 +158,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, continue; } // sampling the second value - ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + ibz_rand_interval(&coeffs.v[3], &ibz_const_one, &temp); // compute cornacchia_target = n_gamma - p * (z² + q*t²) - ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &coeffs.v[3], &coeffs.v[3]); ibz_mul(&temp, &q, &temp); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); @@ -170,7 +170,7 @@ quat_represent_integer(quat_alg_elem_t *gamma, // applying cornacchia if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) - found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + found = ibz_cornacchia_prime(&(coeffs.v[0]), &(coeffs.v[1]), &q, &cornacchia_target); else found = 0; @@ -179,33 +179,33 @@ quat_represent_integer(quat_alg_elem_t *gamma, // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 // we must have x = t mod 2 and y = z mod 2 // if q=1 we can simply swap x and y - if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { - ibz_swap(&coeffs[1], &coeffs[0]); + if (ibz_is_odd(&coeffs.v[0]) != ibz_is_odd(&coeffs.v[3])) { + ibz_swap(&coeffs.v[1], &coeffs.v[0]); } // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the // resulting endomorphism will behave well for dim 2 computations - found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && - ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + found = found && ((ibz_get(&coeffs.v[0]) - ibz_get(&coeffs.v[3])) % 4 == 2) && + ((ibz_get(&coeffs.v[1]) - ibz_get(&coeffs.v[2])) % 4 == 2); } if (found) { #ifndef NDEBUG ibz_set(&temp, (params->order->q)); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&test, &(coeffs.v[0]), &(coeffs.v[0])); ibz_add(&temp, &temp, &test); assert(0 == ibz_cmp(&temp, &cornacchia_target)); - ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &(coeffs.v[3]), &(coeffs.v[3])); ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); - ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_mul(&temp, &(coeffs.v[1]), &(coeffs.v[1])); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_set(&temp, (params->order->q)); ibz_mul(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_mul(&temp, &(coeffs.v[0]), &coeffs.v[0]); ibz_add(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &(coeffs.v[2]), &coeffs.v[2]); ibz_mul(&temp, &temp, &(params->algebra->p)); ibz_add(&cornacchia_target, &cornacchia_target, &temp); assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); @@ -213,8 +213,8 @@ quat_represent_integer(quat_alg_elem_t *gamma, // translate x,y,z,t into the quaternion element gamma quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); #ifndef NDEBUG - quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); - assert(ibz_is_one(&(coeffs[0]))); + quat_alg_norm(&temp, &(coeffs.v[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs.v[0]))); assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); #endif @@ -232,10 +232,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, if (found) { // new gamma ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); - ibz_copy(&gamma->coord[0], &coeffs[0]); - ibz_copy(&gamma->coord[1], &coeffs[1]); - ibz_copy(&gamma->coord[2], &coeffs[2]); - ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->coord.v[0], &coeffs.v[0]); + ibz_copy(&gamma->coord.v[1], &coeffs.v[1]); + ibz_copy(&gamma->coord.v[2], &coeffs.v[2]); + ibz_copy(&gamma->coord.v[3], &coeffs.v[3]); ibz_copy(&gamma->denom, &(((params->order)->order).denom)); } // var finalize @@ -279,10 +279,10 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, // we find a quaternion element of norm divisible by norm while (!found) { // generating a trace-zero element at random - ibz_set(&gen.coord[0], 0); + ibz_set(&gen.coord.v[0], 0); ibz_sub(&n_temp, norm, &ibz_const_one); for (int i = 1; i < 4; i++) - ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + ibz_rand_interval(&gen.coord.v[i], &ibz_const_zero, &n_temp); // first, we compute the norm of the gen quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); @@ -293,7 +293,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, ibz_mod(&disc, &disc, norm); // now we check that -n is a square mod norm // and if the square root exists we compute it - found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = ibz_sqrt_mod_p(&gen.coord.v[0], &disc, norm); found = found && !quat_alg_elem_is_zero(&gen); } } else { @@ -319,7 +319,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, found = 0; while (!found) { for (int i = 0; i < 4; i++) { - ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + ibz_rand_interval(&gen_rerand.coord.v[i], &ibz_const_one, norm); } quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); assert(ibz_is_one(&norm_d)); @@ -348,22 +348,22 @@ quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) { ibz_t tmp; ibz_init(&tmp); - ibz_copy(&(*vec)[2], &el->coord[2]); - ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) - ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) - ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); - ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); - ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); - - assert(ibz_divides(&(*vec)[0], &el->denom)); - assert(ibz_divides(&(*vec)[1], &el->denom)); - assert(ibz_divides(&(*vec)[2], &el->denom)); - assert(ibz_divides(&(*vec)[3], &el->denom)); - - ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); - ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); - ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); - ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + ibz_copy(&vec->v[2], &el->coord.v[2]); + ibz_add(&vec->v[2], &vec->v[2], &vec->v[2]); // double (not optimal if el->denom is even...) + ibz_copy(&vec->v[3], &el->coord.v[3]); // double (not optimal if el->denom is even...) + ibz_add(&vec->v[3], &vec->v[3], &vec->v[3]); + ibz_sub(&vec->v[0], &el->coord.v[0], &el->coord.v[3]); + ibz_sub(&vec->v[1], &el->coord.v[1], &el->coord.v[2]); + + assert(ibz_divides(&vec->v[0], &el->denom)); + assert(ibz_divides(&vec->v[1], &el->denom)); + assert(ibz_divides(&vec->v[2], &el->denom)); + assert(ibz_divides(&vec->v[3], &el->denom)); + + ibz_div(&vec->v[0], &tmp, &vec->v[0], &el->denom); + ibz_div(&vec->v[1], &tmp, &vec->v[1], &el->denom); + ibz_div(&vec->v[2], &tmp, &vec->v[2], &el->denom); + ibz_div(&vec->v[3], &tmp, &vec->v[3], &el->denom); ibz_finalize(&tmp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion.h index a567657464..2dd70a8c19 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion.h @@ -25,7 +25,9 @@ * * @typedef ibz_vec_2_t */ -typedef ibz_t ibz_vec_2_t[2]; +typedef struct { + ibz_t v[2]; +} ibz_vec_2_t; /** @brief Type for vectors of 4 integers * @@ -33,7 +35,9 @@ typedef ibz_t ibz_vec_2_t[2]; * * Represented as a vector of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_vec_4_t[4]; +typedef struct { + ibz_t v[4]; +} ibz_vec_4_t; /** @brief Type for 2 by 2 matrices of integers * @@ -41,7 +45,9 @@ typedef ibz_t ibz_vec_4_t[4]; * * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_2x2_t[2][2]; +typedef struct { + ibz_t m[2][2]; +} ibz_mat_2x2_t; /** @brief Type for 4 by 4 matrices of integers * @@ -49,7 +55,9 @@ typedef ibz_t ibz_mat_2x2_t[2][2]; * * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_4x4_t[4][4]; +typedef struct { + ibz_t m[4][4]; +} ibz_mat_4x4_t; /** * @} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c index baf3da0059..f9de8b4a4e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/quaternion_data.c @@ -4,3173 +4,3173 @@ const ibz_t QUAT_prime_cofactor = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x8000000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x8000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x800000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x800000000000000}}}} #endif ; const quat_alg_t QUATALG_PINFTY = { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x4ff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x4ff}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x4ffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x4ffffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x4ffffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x4ffffffffffffff}}}} #endif }; const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 1}, {{ +}}}, 1}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800000}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x80000000000000}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x80000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 5}, {{ +}}}, 5}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3f47,0x7060,0x5e29,0x3e35,0xd950,0x2a1b,0x10ae,0x78dd,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3f47,0x7060,0x5e29,0x3e35,0xd950,0x2a1b,0x10ae,0x78dd,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x70603f47,0x3e355e29,0x2a1bd950,0x78dd10ae,0x0,0x0,0x0,0x2800000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x70603f47,0x3e355e29,0x2a1bd950,0x78dd10ae,0x0,0x0,0x0,0x2800000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3e355e2970603f47,0x78dd10ae2a1bd950,0x0,0x280000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3e355e2970603f47,0x78dd10ae2a1bd950,0x0,0x280000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3fe7,0x28ee,0x26e8,0xb194,0x6d7a,0xaf58,0xe568,0xd6d}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3fe7,0x28ee,0x26e8,0xb194,0x6d7a,0xaf58,0xe568,0xd6d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x28ee3fe7,0xb19426e8,0xaf586d7a,0xd6de568}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x28ee3fe7,0xb19426e8,0xaf586d7a,0xd6de568}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb19426e828ee3fe7,0xd6de568af586d7a}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb19426e828ee3fe7,0xd6de568af586d7a}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 17}, {{ +}}}, 17}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x954f,0x6bc9,0xca46,0x3d25,0x431b,0x46ed,0x8229,0x4f5,0xe453,0x6eb3,0x4530,0xeb3e,0x5306,0xb3e4,0x306e,0x45}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x954f,0x6bc9,0xca46,0x3d25,0x431b,0x46ed,0x8229,0x4f5,0xe453,0x6eb3,0x4530,0xeb3e,0x5306,0xb3e4,0x306e,0x45}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6bc9954f,0x3d25ca46,0x46ed431b,0x4f58229,0x6eb3e453,0xeb3e4530,0xb3e45306,0x45306e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6bc9954f,0x3d25ca46,0x46ed431b,0x4f58229,0x6eb3e453,0xeb3e4530,0xb3e45306,0x45306e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3d25ca466bc9954f,0x4f5822946ed431b,0xeb3e45306eb3e453,0x45306eb3e45306}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3d25ca466bc9954f,0x4f5822946ed431b,0xeb3e45306eb3e453,0x45306eb3e45306}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7f,0xca3a,0x2454,0xbd31,0xe562,0xcb4c,0x72f0,0x21}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7f,0xca3a,0x2454,0xbd31,0xe562,0xcb4c,0x72f0,0x21}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xca3a0e7f,0xbd312454,0xcb4ce562,0x2172f0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xca3a0e7f,0xbd312454,0xcb4ce562,0x2172f0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbd312454ca3a0e7f,0x2172f0cb4ce562}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbd312454ca3a0e7f,0x2172f0cb4ce562}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 37}, {{ +}}}, 37}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x3a03,0xc406,0x47c,0xa0a2,0x6dbc,0x1df4,0x796,0x6cee,0xce0c,0xe0c7,0xc7c,0xc7ce,0x7ce0,0xce0c,0xe0c7,0x7c}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x3a03,0xc406,0x47c,0xa0a2,0x6dbc,0x1df4,0x796,0x6cee,0xce0c,0xe0c7,0xc7c,0xc7ce,0x7ce0,0xce0c,0xe0c7,0x7c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xc4063a03,0xa0a2047c,0x1df46dbc,0x6cee0796,0xe0c7ce0c,0xc7ce0c7c,0xce0c7ce0,0x7ce0c7}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xc4063a03,0xa0a2047c,0x1df46dbc,0x6cee0796,0xe0c7ce0c,0xc7ce0c7c,0xce0c7ce0,0x7ce0c7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa0a2047cc4063a03,0x6cee07961df46dbc,0xc7ce0c7ce0c7ce0c,0x7ce0c7ce0c7ce0}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa0a2047cc4063a03,0x6cee07961df46dbc,0xc7ce0c7ce0c7ce0c,0x7ce0c7ce0c7ce0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x188f,0xa1e2,0x2148,0xd9f8,0x2e79,0x1a07,0xe1b2,0xd6}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x188f,0xa1e2,0x2148,0xd9f8,0x2e79,0x1a07,0xe1b2,0xd6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa1e2188f,0xd9f82148,0x1a072e79,0xd6e1b2}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa1e2188f,0xd9f82148,0x1a072e79,0xd6e1b2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xd9f82148a1e2188f,0xd6e1b21a072e79}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xd9f82148a1e2188f,0xd6e1b21a072e79}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 41}, {{ +}}}, 41}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca33,0x3dd0,0x1d92,0x9f0,0x2f81,0xafe9,0xe395,0x83f7,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x27f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca33,0x3dd0,0x1d92,0x9f0,0x2f81,0xafe9,0xe395,0x83f7,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x27f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3dd0ca33,0x9f01d92,0xafe92f81,0x83f7e395,0xfffffffc,0xffffffff,0xffffffff,0x27fffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3dd0ca33,0x9f01d92,0xafe92f81,0x83f7e395,0xfffffffc,0xffffffff,0xffffffff,0x27fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9f01d923dd0ca33,0x83f7e395afe92f81,0xfffffffffffffffc,0x27fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9f01d923dd0ca33,0x83f7e395afe92f81,0xfffffffffffffffc,0x27fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb73,0xf93c,0x71c0,0x87f5,0x667a,0xcb3c,0xb9cb,0x12fa}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb73,0xf93c,0x71c0,0x87f5,0x667a,0xcb3c,0xb9cb,0x12fa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf93ceb73,0x87f571c0,0xcb3c667a,0x12fab9cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf93ceb73,0x87f571c0,0xcb3c667a,0x12fab9cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x87f571c0f93ceb73,0x12fab9cbcb3c667a}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x87f571c0f93ceb73,0x12fab9cbcb3c667a}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 53}, {{ +}}}, 53}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf0ab,0x9d3b,0x6ea,0x84ac,0x62e5,0xdde9,0x882b,0xd021,0xffe2,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x13ff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf0ab,0x9d3b,0x6ea,0x84ac,0x62e5,0xdde9,0x882b,0xd021,0xffe2,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x13ff}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d3bf0ab,0x84ac06ea,0xdde962e5,0xd021882b,0xffffffe2,0xffffffff,0xffffffff,0x13ffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d3bf0ab,0x84ac06ea,0xdde962e5,0xd021882b,0xffffffe2,0xffffffff,0xffffffff,0x13ffffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x84ac06ea9d3bf0ab,0xd021882bdde962e5,0xffffffffffffffe2,0x13ffffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x84ac06ea9d3bf0ab,0xd021882bdde962e5,0xffffffffffffffe2,0x13ffffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f37,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f37,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1f37,0x77013f1,0x56007183,0x9281da31}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1f37,0x77013f1,0x56007183,0x9281da31}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1f37,0x9281da3156007183}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1f37,0x9281da3156007183}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 97}}; +}}}, 97}}; const quat_left_ideal_t CONNECTING_IDEALS[7] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x5000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x5000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x50000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x50000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x5000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x5000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x3000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x3000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x30000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x30000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x3000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x3000000000000000}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfee5,0x2b,0xd6d8,0xe65c,0x68a3,0xe72d,0x373d,0x5b1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfee5,0x2b,0xd6d8,0xe65c,0x68a3,0xe72d,0x373d,0x5b1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2bfee5,0xe65cd6d8,0xe72d68a3,0x5b1373d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2bfee5,0xe65cd6d8,0xe72d68a3,0x5b1373d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe65cd6d8002bfee5,0x5b1373de72d68a3}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe65cd6d8002bfee5,0x5b1373de72d68a3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf719,0x8647,0x3ea3,0x9933,0x6a21,0xe8de,0x6f08,0x7343}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf719,0x8647,0x3ea3,0x9933,0x6a21,0xe8de,0x6f08,0x7343}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8647f719,0x99333ea3,0xe8de6a21,0x73436f08}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8647f719,0x99333ea3,0xe8de6a21,0x73436f08}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x99333ea38647f719,0x73436f08e8de6a21}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x99333ea38647f719,0x73436f08e8de6a21}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfaff,0xc339,0xabd,0xbfc8,0xe962,0x6805,0x5323,0x3c7a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfaff,0xc339,0xabd,0xbfc8,0xe962,0x6805,0x5323,0x3c7a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc339faff,0xbfc80abd,0x6805e962,0x3c7a5323}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc339faff,0xbfc80abd,0x6805e962,0x3c7a5323}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbfc80abdc339faff,0x3c7a53236805e962}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbfc80abdc339faff,0x3c7a53236805e962}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8597,0x3af7,0xa5a,0xbb29,0x77c0,0xd2d9,0xf561,0x84f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8597,0x3af7,0xa5a,0xbb29,0x77c0,0xd2d9,0xf561,0x84f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3af78597,0xbb290a5a,0xd2d977c0,0x84ff561}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3af78597,0xbb290a5a,0xd2d977c0,0x84ff561}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbb290a5a3af78597,0x84ff561d2d977c0}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbb290a5a3af78597,0x84ff561d2d977c0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x604b,0x3c1e,0x9e8c,0x8146,0x18b7,0xb452,0xa68a,0xf44}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x604b,0x3c1e,0x9e8c,0x8146,0x18b7,0xb452,0xa68a,0xf44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3c1e604b,0x81469e8c,0xb45218b7,0xf44a68a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3c1e604b,0x81469e8c,0xb45218b7,0xf44a68a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x81469e8c3c1e604b,0xf44a68ab45218b7}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x81469e8c3c1e604b,0xf44a68ab45218b7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x519b,0xa90b,0xcdca,0xd5f5,0x757a,0x83dd,0xb354,0xe59}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x519b,0xa90b,0xcdca,0xd5f5,0x757a,0x83dd,0xb354,0xe59}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa90b519b,0xd5f5cdca,0x83dd757a,0xe59b354}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa90b519b,0xd5f5cdca,0x83dd757a,0xe59b354}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd5f5cdcaa90b519b,0xe59b35483dd757a}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd5f5cdcaa90b519b,0xe59b35483dd757a}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e07,0xc4e3,0xf746,0x83d,0x5354,0x44c1,0x9c43,0x1f9f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e07,0xc4e3,0xf746,0x83d,0x5354,0x44c1,0x9c43,0x1f9f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc4e35e07,0x83df746,0x44c15354,0x1f9f9c43}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc4e35e07,0x83df746,0x44c15354,0x1f9f9c43}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x83df746c4e35e07,0x1f9f9c4344c15354}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x83df746c4e35e07,0x1f9f9c4344c15354}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdbd3,0x967a,0x8a96,0x1df4,0x7845,0xd70,0x419a,0x222}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdbd3,0x967a,0x8a96,0x1df4,0x7845,0xd70,0x419a,0x222}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x967adbd3,0x1df48a96,0xd707845,0x222419a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x967adbd3,0x1df48a96,0xd707845,0x222419a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1df48a96967adbd3,0x222419a0d707845}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1df48a96967adbd3,0x222419a0d707845}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e1f,0xbf19,0x63e0,0x34ae,0x7c14,0x3859,0xdfed,0xb125}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e1f,0xbf19,0x63e0,0x34ae,0x7c14,0x3859,0xdfed,0xb125}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbf193e1f,0x34ae63e0,0x38597c14,0xb125dfed}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbf193e1f,0x34ae63e0,0x38597c14,0xb125dfed}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x34ae63e0bf193e1f,0xb125dfed38597c14}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x34ae63e0bf193e1f,0xb125dfed38597c14}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcf9,0xaaca,0x773b,0xa951,0xfa2c,0xa2e4,0x10c3,0x59a4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcf9,0xaaca,0x773b,0xa951,0xfa2c,0xa2e4,0x10c3,0x59a4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaaca0cf9,0xa951773b,0xa2e4fa2c,0x59a410c3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaaca0cf9,0xa951773b,0xa2e4fa2c,0x59a410c3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xa951773baaca0cf9,0x59a410c3a2e4fa2c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xa951773baaca0cf9,0x59a410c3a2e4fa2c}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x275,0xd7ab,0xedeb,0xbc67,0xad41,0xaeb5,0xf2e5,0x148e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x275,0xd7ab,0xedeb,0xbc67,0xad41,0xaeb5,0xf2e5,0x148e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd7ab0275,0xbc67edeb,0xaeb5ad41,0x148ef2e5}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd7ab0275,0xbc67edeb,0xaeb5ad41,0x148ef2e5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbc67edebd7ab0275,0x148ef2e5aeb5ad41}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbc67edebd7ab0275,0x148ef2e5aeb5ad41}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa7c5,0x9024,0x7ceb,0x13c9,0x59c0,0x3d14,0xe56d,0x1507}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa7c5,0x9024,0x7ceb,0x13c9,0x59c0,0x3d14,0xe56d,0x1507}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9024a7c5,0x13c97ceb,0x3d1459c0,0x1507e56d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9024a7c5,0x13c97ceb,0x3d1459c0,0x1507e56d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x13c97ceb9024a7c5,0x1507e56d3d1459c0}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x13c97ceb9024a7c5,0x1507e56d3d1459c0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd51d,0xb3e7,0xb56b,0xe818,0x380,0x75e5,0x6c29,0x14cb}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd51d,0xb3e7,0xb56b,0xe818,0x380,0x75e5,0x6c29,0x14cb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3e7d51d,0xe818b56b,0x75e50380,0x14cb6c29}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3e7d51d,0xe818b56b,0x75e50380,0x14cb6c29}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe818b56bb3e7d51d,0x14cb6c2975e50380}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe818b56bb3e7d51d,0x14cb6c2975e50380}}}} #endif , &MAXORD_O0}}; const quat_alg_elem_t CONJUGATING_ELEMENTS[7] = {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #endif -}}}; +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sign.c index 9216bbe4d3..9520a6f7fd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sign.c @@ -31,12 +31,12 @@ compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const sig // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the // 2^TORSION_EVEN_POWER torsion of EA - ibz_set(&vec[0], 1); - ibz_copy_digit_array(&vec[1], sig->chall_coeff); + ibz_set(&vec.v[0], 1); + ibz_copy_digit_array(&vec.v[1], sig->chall_coeff); // now we compute the ideal associated to the challenge // for that, we need to find vec such that - // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // the kernel of the challenge isogeny is generated by vec.v[0]*B0[0] + vec.v[1]*B0[1] where B0 // is the image through the secret key isogeny of the canonical basis E0 ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); @@ -459,16 +459,16 @@ compute_and_set_basis_change_matrix(signature_t *sig, change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); // Assert all values in the matrix are of the expected size for packing - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][1]) <= SQIsign_response_length + HD_extra_torsion); // Set the basis change matrix to signature - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall.m[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall.m[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall.m[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall.m[1][1])); // Finalise the matrices ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c index d7a42bcbe9..55743c1989 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/torsion_constants.c @@ -4,40 +4,40 @@ const ibz_t TWO_TO_SECURITY_BITS = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x1}}}} #endif ; const ibz_t TORSION_PLUS_2POWER = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x100000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x100000000000000}}}} #endif ; const ibz_t SEC_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t COM_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c index f4b4260755..a6298acf77 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/algebra.c @@ -21,54 +21,54 @@ quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, ibz_init(&prod); ibz_vec_4_init(&sum); - ibz_set(&(sum[0]), 0); - ibz_set(&(sum[1]), 0); - ibz_set(&(sum[2]), 0); - ibz_set(&(sum[3]), 0); + ibz_set(&(sum.v[0]), 0); + ibz_set(&(sum.v[1]), 0); + ibz_set(&(sum.v[2]), 0); + ibz_set(&(sum.v[3]), 0); // compute 1 coordinate - ibz_mul(&prod, &((*a)[2]), &((*b)[2])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[3])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[0])); - ibz_add(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[1])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[2])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[3])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&(sum.v[0]), &(sum.v[0]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[0])); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[1])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); // compute i coordiante - ibz_mul(&prod, &((*a)[2]), &((*b)[3])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[2])); - ibz_sub(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[1])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[0])); - ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[3])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[2])); + ibz_sub(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&(sum.v[1]), &(sum.v[1]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[1])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[0])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); // compute j coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[2])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[0])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[3])); - ibz_sub(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[1])); - ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[2])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[0])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[3])); + ibz_sub(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[1])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); // compute ij coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[3])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[0])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[1])); - ibz_sub(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[2])); - ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[3])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[0])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[1])); + ibz_sub(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[2])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); - ibz_copy(&((*res)[0]), &(sum[0])); - ibz_copy(&((*res)[1]), &(sum[1])); - ibz_copy(&((*res)[2]), &(sum[2])); - ibz_copy(&((*res)[3]), &(sum[3])); + ibz_copy(&(res->v[0]), &(sum.v[0])); + ibz_copy(&(res->v[1]), &(sum.v[1])); + ibz_copy(&(res->v[2]), &(sum.v[2])); + ibz_copy(&(res->v[3]), &(sum.v[3])); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); @@ -86,8 +86,8 @@ quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_ ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); for (int i = 0; i < 4; i++) { // multiply coordiates by reduced denominators from the other element - ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); - ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + ibz_mul(&(res_a->coord.v[i]), &(a->coord.v[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord.v[i]), &(b->coord.v[i]), &(res_a->denom)); } // multiply both reduced denominators ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); @@ -149,8 +149,8 @@ quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_conj(&norm, a); quat_alg_mul(&norm, a, &norm, alg); - ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); - ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_gcd(&g, &(norm.coord.v[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord.v[0]), &g); ibz_div(res_denom, &r, &(norm.denom), &g); ibz_abs(res_denom, res_denom); ibz_abs(res_num, res_num); @@ -165,20 +165,20 @@ void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) { ibz_copy(&(elem->denom), denominator); - ibz_copy(&(elem->coord[0]), numerator); - ibz_set(&(elem->coord[1]), 0); - ibz_set(&(elem->coord[2]), 0); - ibz_set(&(elem->coord[3]), 0); + ibz_copy(&(elem->coord.v[0]), numerator); + ibz_set(&(elem->coord.v[1]), 0); + ibz_set(&(elem->coord.v[2]), 0); + ibz_set(&(elem->coord.v[3]), 0); } void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) { ibz_copy(&(conj->denom), &(x->denom)); - ibz_copy(&(conj->coord[0]), &(x->coord[0])); - ibz_neg(&(conj->coord[1]), &(x->coord[1])); - ibz_neg(&(conj->coord[2]), &(x->coord[2])); - ibz_neg(&(conj->coord[3]), &(x->coord[3])); + ibz_copy(&(conj->coord.v[0]), &(x->coord.v[0])); + ibz_neg(&(conj->coord.v[1]), &(x->coord.v[1])); + ibz_neg(&(conj->coord.v[2]), &(x->coord.v[2])); + ibz_neg(&(conj->coord.v[3]), &(x->coord.v[3])); } void @@ -190,7 +190,8 @@ quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + // TODO: check if this is correct + ibz_div(primitive_x->v + i, &r, primitive_x->v + i, content); } ibz_finalize(&r); } @@ -235,10 +236,10 @@ quat_alg_elem_is_zero(const quat_alg_elem_t *x) void quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&(elem->coord[0]), coord0); - ibz_set(&(elem->coord[1]), coord1); - ibz_set(&(elem->coord[2]), coord2); - ibz_set(&(elem->coord[3]), coord3); + ibz_set(&(elem->coord.v[0]), coord0); + ibz_set(&(elem->coord.v[1]), coord1); + ibz_set(&(elem->coord.v[2]), coord2); + ibz_set(&(elem->coord.v[3]), coord3); ibz_set(&(elem->denom), denom); } @@ -247,10 +248,10 @@ void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) { ibz_copy(©->denom, &copied->denom); - ibz_copy(©->coord[0], &copied->coord[0]); - ibz_copy(©->coord[1], &copied->coord[1]); - ibz_copy(©->coord[2], &copied->coord[2]); - ibz_copy(©->coord[3], &copied->coord[3]); + ibz_copy(©->coord.v[0], &copied->coord.v[0]); + ibz_copy(©->coord.v[1], &copied->coord.v[1]); + ibz_copy(©->coord.v[2], &copied->coord.v[2]); + ibz_copy(©->coord.v[3], &copied->coord.v[3]); } // helper functions for lattices @@ -262,10 +263,10 @@ quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&(elem->coord[0]), coord0); - ibz_copy(&(elem->coord[1]), coord1); - ibz_copy(&(elem->coord[2]), coord2); - ibz_copy(&(elem->coord[3]), coord3); + ibz_copy(&(elem->coord.v[0]), coord0); + ibz_copy(&(elem->coord.v[1]), coord1); + ibz_copy(&(elem->coord.v[2]), coord2); + ibz_copy(&(elem->coord.v[3]), coord3); ibz_copy(&(elem->denom), denom); } @@ -274,7 +275,7 @@ void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) { for (int i = 0; i < 4; i++) { - ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + ibz_mul(&(res->coord.v[i]), &(elem->coord.v[i]), scalar); } ibz_copy(&(res->denom), &(elem->denom)); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c index 1df7755a29..e051ac340a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/common.c @@ -14,6 +14,7 @@ public_key_init(public_key_t *pk) void public_key_finalize(public_key_t *pk) { + (void) pk; } // compute the challenge as the hash of the message and the commitment curve and public key diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2.c index b31ae7771a..5bf214c4e2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2.c @@ -5,34 +5,34 @@ void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) { - ibz_set(&((*vec)[0]), a0); - ibz_set(&((*vec)[1]), a1); + ibz_set(&(vec->v[0]), a0); + ibz_set(&(vec->v[1]), a1); } void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) { - ibz_set(&((*mat)[0][0]), a00); - ibz_set(&((*mat)[0][1]), a01); - ibz_set(&((*mat)[1][0]), a10); - ibz_set(&((*mat)[1][1]), a11); + ibz_set(&(mat->m[0][0]), a00); + ibz_set(&(mat->m[0][1]), a01); + ibz_set(&(mat->m[1][0]), a10); + ibz_set(&(mat->m[1][1]), a11); } void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) { - ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); - ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); - ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); - ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); + ibz_copy(&(copy->m[0][0]), &(copied->m[0][0])); + ibz_copy(&(copy->m[0][1]), &(copied->m[0][1])); + ibz_copy(&(copy->m[1][0]), &(copied->m[1][0])); + ibz_copy(&(copy->m[1][1]), &(copied->m[1][1])); } void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) { - ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); - ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); - ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); - ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); + ibz_add(&(sum->m[0][0]), &(a->m[0][0]), &(b->m[0][0])); + ibz_add(&(sum->m[0][1]), &(a->m[0][1]), &(b->m[0][1])); + ibz_add(&(sum->m[1][0]), &(a->m[1][0]), &(b->m[1][0])); + ibz_add(&(sum->m[1][1]), &(a->m[1][1]), &(b->m[1][1])); } void @@ -53,16 +53,16 @@ ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t * ibz_vec_2_t matvec; ibz_init(&prod); ibz_vec_2_init(&matvec); - ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); - ibz_copy(&(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); - ibz_add(&(matvec[0]), &(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); - ibz_copy(&(matvec[1]), &prod); - ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); - ibz_add(&(matvec[1]), &(matvec[1]), &prod); - ibz_copy(&((*res)[0]), &(matvec[0])); - ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_mul(&prod, &(mat->m[0][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[0][1]), &(vec->v[1])); + ibz_add(&(matvec.v[0]), &(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[1][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[1]), &prod); + ibz_mul(&prod, &(mat->m[1][1]), &(vec->v[1])); + ibz_add(&(matvec.v[1]), &(matvec.v[1]), &prod); + ibz_copy(&(res->v[0]), &(matvec.v[0])); + ibz_copy(&(res->v[1]), &(matvec.v[1])); ibz_finalize(&prod); ibz_vec_2_finalize(&matvec); } @@ -78,21 +78,21 @@ ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2 ibz_mat_2x2_init(&sums); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_set(&(sums[i][j]), 0); + ibz_set(&(sums.m[i][j]), 0); } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { - ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); - ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); - ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + ibz_mul(&mul, &(mat_a->m[i][k]), &(mat_b->m[k][j])); + ibz_add(&(sums.m[i][j]), &(sums.m[i][j]), &mul); + ibz_mod(&(sums.m[i][j]), &(sums.m[i][j]), m); } } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + ibz_copy(&(prod->m[i][j]), &(sums.m[i][j])); } } ibz_finalize(&mul); @@ -105,9 +105,9 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_t det, prod; ibz_init(&det); ibz_init(&prod); - ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mul(&det, &(mat->m[0][0]), &(mat->m[1][1])); ibz_mod(&det, &det, m); - ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_mul(&prod, &(mat->m[0][1]), &(mat->m[1][0])); ibz_sub(&det, &det, &prod); ibz_mod(&det, &det, m); int res = ibz_invmod(&det, &det, m); @@ -115,15 +115,15 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_set(&prod, res); ibz_mul(&det, &det, &prod); // compute inverse - ibz_copy(&prod, &((*mat)[0][0])); - ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); - ibz_copy(&((*inv)[1][1]), &prod); - ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); - ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + ibz_copy(&prod, &(mat->m[0][0])); + ibz_copy(&(inv->m[0][0]), &(mat->m[1][1])); + ibz_copy(&(inv->m[1][1]), &prod); + ibz_neg(&(inv->m[1][0]), &(mat->m[1][0])); + ibz_neg(&(inv->m[0][1]), &(mat->m[0][1])); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); - ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + ibz_mul(&(inv->m[i][j]), &(inv->m[i][j]), &det); + ibz_mod(&(inv->m[i][j]), &(inv->m[i][j]), m); } } ibz_finalize(&det); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c index 171473d481..143060e2c3 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c @@ -137,10 +137,10 @@ _fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, ibz_invmod(&tmp, &tmp, &two_pow); assert(!ibz_is_even(&tmp)); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta to the basis ec_basis_t B0_two_theta; @@ -197,53 +197,53 @@ post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_ // treatment if (is_special_order) { // reordering the basis if needed - if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + if (ibz_cmp(&gram->m[0][0], &gram->m[2][2]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[0][0], &gram->m[3][3]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][3]); } - ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); - ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); - ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); - ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][3], &gram->m[0][1]); + ibz_swap(&gram->m[3][0], &gram->m[1][0]); + ibz_swap(&gram->m[2][3], &gram->m[2][1]); + ibz_swap(&gram->m[3][2], &gram->m[1][2]); + ibz_swap(&gram->m[3][3], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[1][1], &gram->m[3][3]) == 0) { // in this case it seems that we need to swap the second and third // element, and then recompute entirely the second element from the first // first we swap the second and third element for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); } // adjusting the sign if needed - if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + if (ibz_cmp(&reduced->m[0][0], &reduced->m[1][1]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); - ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); - ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + ibz_neg(&reduced->m[i][1], &reduced->m[i][1]); + ibz_neg(&gram->m[i][1], &gram->m[i][1]); + ibz_neg(&gram->m[1][i], &gram->m[1][i]); } } - if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + if (ibz_cmp(&reduced->m[0][2], &reduced->m[1][3]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); - ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); - ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + ibz_neg(&reduced->m[i][3], &reduced->m[i][3]); + ibz_neg(&gram->m[i][3], &gram->m[i][3]); + ibz_neg(&gram->m[3][i], &gram->m[3][i]); } - // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + // assert(ibz_cmp(&reduced->m[0][2],&reduced->m[1][3])==0); } } } @@ -273,7 +273,7 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // if the basis is of the form alpha, i*alpha, beta, i*beta // we can remove some values due to symmetry of the basis that bool need_remove_symmetry = - (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + (ibz_cmp(&gram->m[0][0], &gram->m[1][1]) == 0 && ibz_cmp(&gram->m[3][3], &gram->m[2][2]) == 0); int check1, check2, check3; @@ -324,10 +324,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // and we ensure that we don't record the same norm in the list if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { // Set the point as a vector (x, y, z, w) - ibz_set(&point[0], x); - ibz_set(&point[1], y); - ibz_set(&point[2], z); - ibz_set(&point[3], w); + ibz_set(&point.v[0], x); + ibz_set(&point.v[1], y); + ibz_set(&point.v[2], z); + ibz_set(&point.v[3], w); // Evaluate this through the gram matrix and divide out by the // adjusted_norm @@ -336,10 +336,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t assert(ibz_is_zero(&remain)); if (ibz_mod_ui(&norm, 2) == 1) { - ibz_set(&vecs[count][0], x); - ibz_set(&vecs[count][1], y); - ibz_set(&vecs[count][2], z); - ibz_set(&vecs[count][3], w); + ibz_set(&vecs[count].v[0], x); + ibz_set(&vecs[count].v[1], y); + ibz_set(&vecs[count].v[2], z); + ibz_set(&vecs[count].v[3], w); ibz_copy(&norms[count], &norm); count++; } @@ -530,10 +530,10 @@ find_uv(ibz_t *u, quat_alg_elem_t delta; // delta will be the element of smallest norm quat_alg_elem_init(&delta); - ibz_set(&delta.coord[0], 1); - ibz_set(&delta.coord[1], 0); - ibz_set(&delta.coord[2], 0); - ibz_set(&delta.coord[3], 0); + ibz_set(&delta.coord.v[0], 1); + ibz_set(&delta.coord.v[1], 0); + ibz_set(&delta.coord.v[2], 0); + ibz_set(&delta.coord.v[3], 0); ibz_copy(&delta.denom, &reduced_id.lattice.denom); ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); @@ -542,7 +542,7 @@ find_uv(ibz_t *u, quat_alg_conj(&delta, &delta); ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); - ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_copy(&reduced_id.norm, &gram[0].m[0][0]); ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); assert(ibz_cmp(&remain, &ibz_const_zero) == 0); @@ -989,10 +989,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, } ibz_invmod(&tmp, &tmp, &two_pow); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); @@ -1092,10 +1092,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); } ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); - ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); - ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); - ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); - ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + ibz_mul(&beta1->coord.v[0], &beta1->coord.v[0], &tmp); + ibz_mul(&beta1->coord.v[1], &beta1->coord.v[1], &tmp); + ibz_mul(&beta1->coord.v[2], &beta1->coord.v[2], &tmp); + ibz_mul(&beta1->coord.v[3], &beta1->coord.v[3], &tmp); endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim4.c index 495dc2dcb2..b024a7d46e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim4.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim4.c @@ -11,16 +11,16 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t ibz_mat_4x4_init(&mat); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(mat[i][j]), 0); + ibz_set(&(mat.m[i][j]), 0); for (int k = 0; k < 4; k++) { - ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); - ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + ibz_mul(&prod, &(a->m[i][k]), &(b->m[k][j])); + ibz_add(&(mat.m[i][j]), &(mat.m[i][j]), &prod); } } } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*res)[i][j]), &(mat[i][j])); + ibz_copy(&(res->m[i][j]), &(mat.m[i][j])); } } ibz_mat_4x4_finalize(&mat); @@ -31,61 +31,61 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&((*vec)[0]), coord0); - ibz_set(&((*vec)[1]), coord1); - ibz_set(&((*vec)[2]), coord2); - ibz_set(&((*vec)[3]), coord3); + ibz_set(&(vec->v[0]), coord0); + ibz_set(&(vec->v[1]), coord1); + ibz_set(&(vec->v[2]), coord2); + ibz_set(&(vec->v[3]), coord3); } void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*new)[i]), &((*vec)[i])); + ibz_copy(&(new->v[i]), &(vec->v[i])); } } void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&((*res)[0]), coord0); - ibz_copy(&((*res)[1]), coord1); - ibz_copy(&((*res)[2]), coord2); - ibz_copy(&((*res)[3]), coord3); + ibz_copy(&(res->v[0]), coord0); + ibz_copy(&(res->v[1]), coord1); + ibz_copy(&(res->v[2]), coord2); + ibz_copy(&(res->v[3]), coord3); } void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) { - ibz_gcd(content, &((*v)[0]), &((*v)[1])); - ibz_gcd(content, &((*v)[2]), content); - ibz_gcd(content, &((*v)[3]), content); + ibz_gcd(content, &(v->v[0]), &(v->v[1])); + ibz_gcd(content, &(v->v[2]), content); + ibz_gcd(content, &(v->v[3]), content); } void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_neg(&((*neg)[i]), &((*vec)[i])); + ibz_neg(&(neg->v[i]), &(vec->v[i])); } } void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_add(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_add(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_add(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_add(&(res->v[3]), &(a->v[3]), &(b->v[3])); } void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_sub(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_sub(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_sub(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_sub(&(res->v[3]), &(a->v[3]), &(b->v[3])); } int @@ -93,7 +93,7 @@ ibz_vec_4_is_zero(const ibz_vec_4_t *x) { int res = 1; for (int i = 0; i < 4; i++) { - res &= ibz_is_zero(&((*x)[i])); + res &= ibz_is_zero(&(x->v[i])); } return (res); } @@ -110,12 +110,12 @@ ibz_vec_4_linear_combination(ibz_vec_4_t *lc, ibz_vec_4_init(&sums); ibz_init(&prod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_vec_4_finalize(&sums); @@ -125,7 +125,7 @@ void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + ibz_mul(&(prod->v[i]), &(vec->v[i]), scalar); } } @@ -136,7 +136,7 @@ ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t * ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + ibz_div(&(quot->v[i]), &r, &(vec->v[i]), scalar); res = res && ibz_is_zero(&r); } ibz_finalize(&r); @@ -148,7 +148,7 @@ ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + ibz_copy(&(new->m[i][j]), &(mat->m[i][j])); } } } @@ -158,7 +158,7 @@ ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + ibz_neg(&(neg->m[i][j]), &(mat->m[i][j])); } } } @@ -170,7 +170,7 @@ ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) ibz_mat_4x4_init(&work); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(work[i][j]), &((*mat)[j][i])); + ibz_copy(&(work.m[i][j]), &(mat->m[j][i])); } } ibz_mat_4x4_copy(transposed, &work); @@ -182,7 +182,7 @@ ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*zero)[i][j]), 0); + ibz_set(&(zero->m[i][j]), 0); } } } @@ -192,9 +192,9 @@ ibz_mat_4x4_identity(ibz_mat_4x4_t *id) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*id)[i][j]), 0); + ibz_set(&(id->m[i][j]), 0); } - ibz_set(&((*id)[i][i]), 1); + ibz_set(&(id->m[i][i]), 1); } } @@ -204,7 +204,7 @@ ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) int res = 1; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + res = res && ibz_is_one(&(mat->m[i][j])) == (i == j); } } return (res); @@ -216,7 +216,7 @@ ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) int res = 0; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + res = res | ibz_cmp(&(mat1->m[i][j]), &(mat2->m[i][j])); } } return (!res); @@ -227,7 +227,7 @@ ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4 { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + ibz_mul(&(prod->m[i][j]), &(mat->m[i][j]), scalar); } } } @@ -237,10 +237,10 @@ ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) { ibz_t d; ibz_init(&d); - ibz_copy(&d, &((*mat)[0][0])); + ibz_copy(&d, &(mat->m[0][0])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_gcd(&d, &d, &((*mat)[i][j])); + ibz_gcd(&d, &d, &(mat->m[i][j])); } } ibz_copy(gcd, &d); @@ -255,7 +255,7 @@ ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4 ibz_init(&r); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + ibz_div(&(quot->m[i][j]), &r, &(mat->m[i][j]), scalar); res = res && ibz_is_zero(&r); } } @@ -325,17 +325,17 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ // compute some 2x2 minors, store them in s and c for (int i = 0; i < 3; i++) { - ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); - ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + ibz_mat_2x2_det_from_ibz(&(s[i]), &(mat->m[0][0]), &(mat->m[0][i + 1]), &(mat->m[1][0]), &(mat->m[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &(mat->m[2][0]), &(mat->m[2][i + 1]), &(mat->m[3][0]), &(mat->m[3][i + 1])); } for (int i = 0; i < 2; i++) { ibz_mat_2x2_det_from_ibz( - &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + &(s[3 + i]), &(mat->m[0][1]), &(mat->m[0][2 + i]), &(mat->m[1][1]), &(mat->m[1][2 + i])); ibz_mat_2x2_det_from_ibz( - &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + &(c[3 + i]), &(mat->m[2][1]), &(mat->m[2][2 + i]), &(mat->m[3][1]), &(mat->m[3][2 + i])); } - ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); - ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + ibz_mat_2x2_det_from_ibz(&(s[5]), &(mat->m[0][2]), &(mat->m[0][3]), &(mat->m[1][2]), &(mat->m[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &(mat->m[2][2]), &(mat->m[2][3]), &(mat->m[3][2]), &(mat->m[3][3])); // compute det ibz_set(&work_det, 0); @@ -351,39 +351,39 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ for (int j = 0; j < 4; j++) { for (int k = 0; k < 2; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } } for (int k = 2; k < 4; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } } @@ -418,8 +418,8 @@ ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t * // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[i][j], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -437,8 +437,8 @@ ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[j][i], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -457,14 +457,14 @@ quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) ibz_vec_4_init(&sum); ibz_mat_4x4_eval(&sum, qf, coord); for (int i = 0; i < 4; i++) { - ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + ibz_mul(&prod, &(sum.v[i]), &coord->v[i]); if (i > 0) { - ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); } else { - ibz_copy(&sum[0], &prod); + ibz_copy(&sum.v[0], &prod); } } - ibz_copy(res, &sum[0]); + ibz_copy(res, &sum.v[0]); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_signature.c index 112c695941..3a630cfd58 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_signature.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_signature.c @@ -157,17 +157,17 @@ secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) ibz_finalize(&gcd); } #endif - enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[3], FP_ENCODED_BYTES, true); quat_alg_elem_finalize(&gen); } - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][1], TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); } @@ -187,19 +187,19 @@ secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) quat_alg_elem_t gen; quat_alg_elem_init(&gen); enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); - enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[3], enc, FP_ENCODED_BYTES, true); quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); ibz_finalize(&norm); quat_alg_elem_finalize(&gen); } - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][1], enc, TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c index abeddc30a7..1a93e36455 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c @@ -261,223 +261,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xabf,0x5490,0xd5fd,0x36ba,0xda0f,0x4a59,0x4eea,0xd1,0xa3f0,0xa7ae,0x6f6,0x9146,0x5004,0xcde6,0xa2d2,0x7d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xabf,0x5490,0xd5fd,0x36ba,0xda0f,0x4a59,0x4eea,0xd1,0xa3f0,0xa7ae,0x6f6,0x9146,0x5004,0xcde6,0xa2d2,0x7d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x54900abf,0x36bad5fd,0x4a59da0f,0xd14eea,0xa7aea3f0,0x914606f6,0xcde65004,0x7da2d2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x54900abf,0x36bad5fd,0x4a59da0f,0xd14eea,0xa7aea3f0,0x914606f6,0xcde65004,0x7da2d2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x36bad5fd54900abf,0xd14eea4a59da0f,0x914606f6a7aea3f0,0x7da2d2cde65004}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x36bad5fd54900abf,0xd14eea4a59da0f,0x914606f6a7aea3f0,0x7da2d2cde65004}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8680,0xb787,0xbde3,0x611d,0xa95f,0x8b68,0xc9ec,0x819,0x2361,0xf73e,0x5e31,0xbd7b,0x2b45,0x40d7,0x2400,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8680,0xb787,0xbde3,0x611d,0xa95f,0x8b68,0xc9ec,0x819,0x2361,0xf73e,0x5e31,0xbd7b,0x2b45,0x40d7,0x2400,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7878680,0x611dbde3,0x8b68a95f,0x819c9ec,0xf73e2361,0xbd7b5e31,0x40d72b45,0x682400}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7878680,0x611dbde3,0x8b68a95f,0x819c9ec,0xf73e2361,0xbd7b5e31,0x40d72b45,0x682400}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x611dbde3b7878680,0x819c9ec8b68a95f,0xbd7b5e31f73e2361,0x68240040d72b45}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x611dbde3b7878680,0x819c9ec8b68a95f,0xbd7b5e31f73e2361,0x68240040d72b45}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4277,0x6d20,0x9e12,0x1f0c,0x977f,0xf854,0x9d1c,0x563f,0xdb,0xc2ed,0xaf54,0xe829,0x4fb,0xd83,0x7be8,0xca}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4277,0x6d20,0x9e12,0x1f0c,0x977f,0xf854,0x9d1c,0x563f,0xdb,0xc2ed,0xaf54,0xe829,0x4fb,0xd83,0x7be8,0xca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6d204277,0x1f0c9e12,0xf854977f,0x563f9d1c,0xc2ed00db,0xe829af54,0xd8304fb,0xca7be8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6d204277,0x1f0c9e12,0xf854977f,0x563f9d1c,0xc2ed00db,0xe829af54,0xd8304fb,0xca7be8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1f0c9e126d204277,0x563f9d1cf854977f,0xe829af54c2ed00db,0xca7be80d8304fb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1f0c9e126d204277,0x563f9d1cf854977f,0xe829af54c2ed00db,0xca7be80d8304fb}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf541,0xab6f,0x2a02,0xc945,0x25f0,0xb5a6,0xb115,0xff2e,0x5c0f,0x5851,0xf909,0x6eb9,0xaffb,0x3219,0x5d2d,0x82}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf541,0xab6f,0x2a02,0xc945,0x25f0,0xb5a6,0xb115,0xff2e,0x5c0f,0x5851,0xf909,0x6eb9,0xaffb,0x3219,0x5d2d,0x82}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xab6ff541,0xc9452a02,0xb5a625f0,0xff2eb115,0x58515c0f,0x6eb9f909,0x3219affb,0x825d2d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xab6ff541,0xc9452a02,0xb5a625f0,0xff2eb115,0x58515c0f,0x6eb9f909,0x3219affb,0x825d2d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9452a02ab6ff541,0xff2eb115b5a625f0,0x6eb9f90958515c0f,0x825d2d3219affb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9452a02ab6ff541,0xff2eb115b5a625f0,0x6eb9f90958515c0f,0x825d2d3219affb}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56db,0x1b54,0xbda2,0xc5d3,0xdd06,0x861d,0x9780,0x7475,0x33d1,0x41af,0x34b2,0x7f9d,0x7f8c,0xaa8c,0xb471,0xca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b5456db,0xc5d3bda2,0x861ddd06,0x74759780,0x41af33d1,0x7f9d34b2,0xaa8c7f8c,0xcab471}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5d3bda21b5456db,0x74759780861ddd06,0x7f9d34b241af33d1,0xcab471aa8c7f8c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7d7a,0x48b,0x7d32,0x7bfb,0x9bd3,0x63d8,0x9182,0xa955,0x3e1,0x344,0x6861,0x76bf,0x5cd0,0xeeb4,0x4ae3,0x57}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x48b7d7a,0x7bfb7d32,0x63d89bd3,0xa9559182,0x34403e1,0x76bf6861,0xeeb45cd0,0x574ae3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bfb7d32048b7d7a,0xa955918263d89bd3,0x76bf6861034403e1,0x574ae3eeb45cd0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x444f,0x3698,0xd649,0x856f,0x41db,0x498f,0xafdf,0x189c,0xcb5b,0xe50b,0xbff,0xf7e0,0x47f9,0xa88b,0x35da,0x15}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3698444f,0x856fd649,0x498f41db,0x189cafdf,0xe50bcb5b,0xf7e00bff,0xa88b47f9,0x1535da}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x856fd6493698444f,0x189cafdf498f41db,0xf7e00bffe50bcb5b,0x1535daa88b47f9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa925,0xe4ab,0x425d,0x3a2c,0x22f9,0x79e2,0x687f,0x8b8a,0xcc2e,0xbe50,0xcb4d,0x8062,0x8073,0x5573,0x4b8e,0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4aba925,0x3a2c425d,0x79e222f9,0x8b8a687f,0xbe50cc2e,0x8062cb4d,0x55738073,0x354b8e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a2c425de4aba925,0x8b8a687f79e222f9,0x8062cb4dbe50cc2e,0x354b8e55738073}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x30cd,0xb7f2,0x49cf,0xfe47,0xdb8a,0x683b,0x7335,0xbaa3,0xebe0,0x74ae,0x9dd4,0x8871,0x67c8,0x3c39,0x2ba2,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x30cd,0xb7f2,0x49cf,0xfe47,0xdb8a,0x683b,0x7335,0xbaa3,0xebe0,0x74ae,0x9dd4,0x8871,0x67c8,0x3c39,0x2ba2,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7f230cd,0xfe4749cf,0x683bdb8a,0xbaa37335,0x74aeebe0,0x88719dd4,0x3c3967c8,0x242ba2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb7f230cd,0xfe4749cf,0x683bdb8a,0xbaa37335,0x74aeebe0,0x88719dd4,0x3c3967c8,0x242ba2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe4749cfb7f230cd,0xbaa37335683bdb8a,0x88719dd474aeebe0,0x242ba23c3967c8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe4749cfb7f230cd,0xbaa37335683bdb8a,0x88719dd474aeebe0,0x242ba23c3967c8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81fd,0xde09,0x9d8a,0x6e8c,0xa299,0x77a0,0xadb7,0x58b7,0x13a1,0x7d41,0x6349,0x1a1d,0xc40b,0x17c5,0xb772,0xdf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81fd,0xde09,0x9d8a,0x6e8c,0xa299,0x77a0,0xadb7,0x58b7,0x13a1,0x7d41,0x6349,0x1a1d,0xc40b,0x17c5,0xb772,0xdf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xde0981fd,0x6e8c9d8a,0x77a0a299,0x58b7adb7,0x7d4113a1,0x1a1d6349,0x17c5c40b,0xdfb772}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xde0981fd,0x6e8c9d8a,0x77a0a299,0x58b7adb7,0x7d4113a1,0x1a1d6349,0x17c5c40b,0xdfb772}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6e8c9d8ade0981fd,0x58b7adb777a0a299,0x1a1d63497d4113a1,0xdfb77217c5c40b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6e8c9d8ade0981fd,0x58b7adb777a0a299,0x1a1d63497d4113a1,0xdfb77217c5c40b}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4363,0xd1dc,0x3a2d,0x523e,0xecad,0x20f1,0x267e,0x376e,0x661b,0x53fc,0xddaa,0xf004,0x267a,0x5b07,0xd8e1,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4363,0xd1dc,0x3a2d,0x523e,0xecad,0x20f1,0x267e,0x376e,0x661b,0x53fc,0xddaa,0xf004,0x267a,0x5b07,0xd8e1,0x6f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd1dc4363,0x523e3a2d,0x20f1ecad,0x376e267e,0x53fc661b,0xf004ddaa,0x5b07267a,0x6fd8e1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd1dc4363,0x523e3a2d,0x20f1ecad,0x376e267e,0x53fc661b,0xf004ddaa,0x5b07267a,0x6fd8e1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523e3a2dd1dc4363,0x376e267e20f1ecad,0xf004ddaa53fc661b,0x6fd8e15b07267a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523e3a2dd1dc4363,0x376e267e20f1ecad,0xf004ddaa53fc661b,0x6fd8e15b07267a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf33,0x480d,0xb630,0x1b8,0x2475,0x97c4,0x8cca,0x455c,0x141f,0x8b51,0x622b,0x778e,0x9837,0xc3c6,0xd45d,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf33,0x480d,0xb630,0x1b8,0x2475,0x97c4,0x8cca,0x455c,0x141f,0x8b51,0x622b,0x778e,0x9837,0xc3c6,0xd45d,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x480dcf33,0x1b8b630,0x97c42475,0x455c8cca,0x8b51141f,0x778e622b,0xc3c69837,0xdbd45d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x480dcf33,0x1b8b630,0x97c42475,0x455c8cca,0x8b51141f,0x778e622b,0xc3c69837,0xdbd45d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b8b630480dcf33,0x455c8cca97c42475,0x778e622b8b51141f,0xdbd45dc3c69837}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b8b630480dcf33,0x455c8cca97c42475,0x778e622b8b51141f,0xdbd45dc3c69837}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x19e1, 0x1d98, 0x1de9, 0x1dfc, 0x922, 0x1fb8, 0x476, 0xd05, 0xc85, 0x1788, 0x1967, 0x155d, 0x1f93, 0x629, 0x188f, 0x119, 0x1f6f, 0x241, 0x1378, 0x0} @@ -737,223 +737,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8d79,0x38f8,0xf94c,0xe776,0x2bdf,0x2d2e,0x4242,0x8677,0xddf0,0x1736,0xa2e3,0x8ee7,0x52ac,0x4bb1,0xbb55,0xa4}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8d79,0x38f8,0xf94c,0xe776,0x2bdf,0x2d2e,0x4242,0x8677,0xddf0,0x1736,0xa2e3,0x8ee7,0x52ac,0x4bb1,0xbb55,0xa4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38f88d79,0xe776f94c,0x2d2e2bdf,0x86774242,0x1736ddf0,0x8ee7a2e3,0x4bb152ac,0xa4bb55}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38f88d79,0xe776f94c,0x2d2e2bdf,0x86774242,0x1736ddf0,0x8ee7a2e3,0x4bb152ac,0xa4bb55}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe776f94c38f88d79,0x867742422d2e2bdf,0x8ee7a2e31736ddf0,0xa4bb554bb152ac}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe776f94c38f88d79,0x867742422d2e2bdf,0x8ee7a2e31736ddf0,0xa4bb554bb152ac}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6774,0xe280,0xc0b8,0xd49d,0x3b88,0x2577,0xc53f,0x7a5d,0x3032,0x4cfb,0xd6b2,0x3ed5,0x27b8,0x584c,0x85b1,0xfc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6774,0xe280,0xc0b8,0xd49d,0x3b88,0x2577,0xc53f,0x7a5d,0x3032,0x4cfb,0xd6b2,0x3ed5,0x27b8,0x584c,0x85b1,0xfc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe2806774,0xd49dc0b8,0x25773b88,0x7a5dc53f,0x4cfb3032,0x3ed5d6b2,0x584c27b8,0xfc85b1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe2806774,0xd49dc0b8,0x25773b88,0x7a5dc53f,0x4cfb3032,0x3ed5d6b2,0x584c27b8,0xfc85b1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd49dc0b8e2806774,0x7a5dc53f25773b88,0x3ed5d6b24cfb3032,0xfc85b1584c27b8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd49dc0b8e2806774,0x7a5dc53f25773b88,0x3ed5d6b24cfb3032,0xfc85b1584c27b8}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc139,0x25cf,0xd25b,0xadb9,0xbd39,0xaa20,0x8867,0x4e7a,0x8b24,0xa81f,0x412a,0xacfc,0xee2d,0xab0c,0x1d50,0x20}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc139,0x25cf,0xd25b,0xadb9,0xbd39,0xaa20,0x8867,0x4e7a,0x8b24,0xa81f,0x412a,0xacfc,0xee2d,0xab0c,0x1d50,0x20}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x25cfc139,0xadb9d25b,0xaa20bd39,0x4e7a8867,0xa81f8b24,0xacfc412a,0xab0cee2d,0x201d50}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x25cfc139,0xadb9d25b,0xaa20bd39,0x4e7a8867,0xa81f8b24,0xacfc412a,0xab0cee2d,0x201d50}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xadb9d25b25cfc139,0x4e7a8867aa20bd39,0xacfc412aa81f8b24,0x201d50ab0cee2d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xadb9d25b25cfc139,0x4e7a8867aa20bd39,0xacfc412aa81f8b24,0x201d50ab0cee2d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7287,0xc707,0x6b3,0x1889,0xd420,0xd2d1,0xbdbd,0x7988,0x220f,0xe8c9,0x5d1c,0x7118,0xad53,0xb44e,0x44aa,0x5b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7287,0xc707,0x6b3,0x1889,0xd420,0xd2d1,0xbdbd,0x7988,0x220f,0xe8c9,0x5d1c,0x7118,0xad53,0xb44e,0x44aa,0x5b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc7077287,0x188906b3,0xd2d1d420,0x7988bdbd,0xe8c9220f,0x71185d1c,0xb44ead53,0x5b44aa}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc7077287,0x188906b3,0xd2d1d420,0x7988bdbd,0xe8c9220f,0x71185d1c,0xb44ead53,0x5b44aa}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x188906b3c7077287,0x7988bdbdd2d1d420,0x71185d1ce8c9220f,0x5b44aab44ead53}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x188906b3c7077287,0x7988bdbdd2d1d420,0x71185d1ce8c9220f,0x5b44aab44ead53}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xef13,0xa8dc,0x8ceb,0xe405,0xe2f5,0xfda5,0x28ac,0x3bbe,0x41e5,0xee91,0xb0ff,0x5f5c,0x1920,0x1e33,0xef67,0x95}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8dcef13,0xe4058ceb,0xfda5e2f5,0x3bbe28ac,0xee9141e5,0x5f5cb0ff,0x1e331920,0x95ef67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe4058ceba8dcef13,0x3bbe28acfda5e2f5,0x5f5cb0ffee9141e5,0x95ef671e331920}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b6e,0x9e93,0xfbce,0xb1b6,0x80bb,0x14b8,0x20ae,0x6bcd,0xb7f4,0xfeff,0xc4a7,0xceb3,0xd874,0x65bf,0xe003,0xe9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9e936b6e,0xb1b6fbce,0x14b880bb,0x6bcd20ae,0xfeffb7f4,0xceb3c4a7,0x65bfd874,0xe9e003}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb1b6fbce9e936b6e,0x6bcd20ae14b880bb,0xceb3c4a7feffb7f4,0xe9e00365bfd874}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x47ff,0xc988,0x46b6,0x5236,0x9694,0xec04,0xd563,0x7d56,0x6833,0xc48f,0x8b0a,0xe195,0xb64e,0xe957,0x58b2,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc98847ff,0x523646b6,0xec049694,0x7d56d563,0xc48f6833,0xe1958b0a,0xe957b64e,0xdb58b2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x523646b6c98847ff,0x7d56d563ec049694,0xe1958b0ac48f6833,0xdb58b2e957b64e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x10ed,0x5723,0x7314,0x1bfa,0x1d0a,0x25a,0xd753,0xc441,0xbe1a,0x116e,0x4f00,0xa0a3,0xe6df,0xe1cc,0x1098,0x6a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x572310ed,0x1bfa7314,0x25a1d0a,0xc441d753,0x116ebe1a,0xa0a34f00,0xe1cce6df,0x6a1098}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1bfa7314572310ed,0xc441d753025a1d0a,0xa0a34f00116ebe1a,0x6a1098e1cce6df}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7029,0x8b30,0x7529,0x9941,0x2be8,0x7b3f,0xe3d7,0x4553,0x7065,0x7bef,0xb49c,0xc80b,0xfa3e,0x950c,0x1ece,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7029,0x8b30,0x7529,0x9941,0x2be8,0x7b3f,0xe3d7,0x4553,0x7065,0x7bef,0xb49c,0xc80b,0xfa3e,0x950c,0x1ece,0x18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b307029,0x99417529,0x7b3f2be8,0x4553e3d7,0x7bef7065,0xc80bb49c,0x950cfa3e,0x181ece}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b307029,0x99417529,0x7b3f2be8,0x4553e3d7,0x7bef7065,0xc80bb49c,0x950cfa3e,0x181ece}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x994175298b307029,0x4553e3d77b3f2be8,0xc80bb49c7bef7065,0x181ece950cfa3e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x994175298b307029,0x4553e3d77b3f2be8,0xc80bb49c7bef7065,0x181ece950cfa3e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb399,0x92ce,0x85e8,0x7c82,0x86eb,0xb186,0x8924,0x64f1,0xd93,0x5e9a,0x3165,0x4196,0x5e79,0x158,0x55d5,0x31}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb399,0x92ce,0x85e8,0x7c82,0x86eb,0xb186,0x8924,0x64f1,0xd93,0x5e9a,0x3165,0x4196,0x5e79,0x158,0x55d5,0x31}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92ceb399,0x7c8285e8,0xb18686eb,0x64f18924,0x5e9a0d93,0x41963165,0x1585e79,0x3155d5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92ceb399,0x7c8285e8,0xb18686eb,0x64f18924,0x5e9a0d93,0x41963165,0x1585e79,0x3155d5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c8285e892ceb399,0x64f18924b18686eb,0x419631655e9a0d93,0x3155d501585e79}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c8285e892ceb399,0x64f18924b18686eb,0x419631655e9a0d93,0x3155d501585e79}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda47,0x29f8,0x7209,0xaa0c,0xfc22,0x39c9,0x6e19,0x517c,0xc94e,0xcfa4,0x20fc,0x1edc,0xe0d0,0x396d,0x85f0,0xdf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda47,0x29f8,0x7209,0xaa0c,0xfc22,0x39c9,0x6e19,0x517c,0xc94e,0xcfa4,0x20fc,0x1edc,0xe0d0,0x396d,0x85f0,0xdf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x29f8da47,0xaa0c7209,0x39c9fc22,0x517c6e19,0xcfa4c94e,0x1edc20fc,0x396de0d0,0xdf85f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x29f8da47,0xaa0c7209,0x39c9fc22,0x517c6e19,0xcfa4c94e,0x1edc20fc,0x396de0d0,0xdf85f0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa0c720929f8da47,0x517c6e1939c9fc22,0x1edc20fccfa4c94e,0xdf85f0396de0d0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa0c720929f8da47,0x517c6e1939c9fc22,0x1edc20fccfa4c94e,0xdf85f0396de0d0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fd7,0x74cf,0x8ad6,0x66be,0xd417,0x84c0,0x1c28,0xbaac,0x8f9a,0x8410,0x4b63,0x37f4,0x5c1,0x6af3,0xe131,0xe7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fd7,0x74cf,0x8ad6,0x66be,0xd417,0x84c0,0x1c28,0xbaac,0x8f9a,0x8410,0x4b63,0x37f4,0x5c1,0x6af3,0xe131,0xe7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x74cf8fd7,0x66be8ad6,0x84c0d417,0xbaac1c28,0x84108f9a,0x37f44b63,0x6af305c1,0xe7e131}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x74cf8fd7,0x66be8ad6,0x84c0d417,0xbaac1c28,0x84108f9a,0x37f44b63,0x6af305c1,0xe7e131}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x66be8ad674cf8fd7,0xbaac1c2884c0d417,0x37f44b6384108f9a,0xe7e1316af305c1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x66be8ad674cf8fd7,0xbaac1c2884c0d417,0x37f44b6384108f9a,0xe7e1316af305c1}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x13f5, 0x8b7, 0x1873, 0x144a, 0x1a89, 0x13f9, 0xd49, 0x6f2, 0x3bb, 0x102, 0x1ecb, 0x180b, 0x4d5, 0x608, 0x119, 0x5e6, 0x751, 0x961, 0xd37, 0x5} @@ -1213,223 +1213,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffc3,0x1fbe,0xc7ef,0x56c4,0x2834,0xfa5c,0x36aa,0x1ced,0x9076,0xa31d,0x8890,0xe52,0x87d2,0xef68,0x98bc,0xc2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffc3,0x1fbe,0xc7ef,0x56c4,0x2834,0xfa5c,0x36aa,0x1ced,0x9076,0xa31d,0x8890,0xe52,0x87d2,0xef68,0x98bc,0xc2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1fbeffc3,0x56c4c7ef,0xfa5c2834,0x1ced36aa,0xa31d9076,0xe528890,0xef6887d2,0xc298bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1fbeffc3,0x56c4c7ef,0xfa5c2834,0x1ced36aa,0xa31d9076,0xe528890,0xef6887d2,0xc298bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x56c4c7ef1fbeffc3,0x1ced36aafa5c2834,0xe528890a31d9076,0xc298bcef6887d2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x56c4c7ef1fbeffc3,0x1ced36aafa5c2834,0xe528890a31d9076,0xc298bcef6887d2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4098,0xd740,0xb5c6,0x8109,0x299,0x3a8c,0x81c2,0xc0d0,0xe848,0x9243,0x8996,0x656a,0x8c87,0x6c99,0xb9f5,0x4c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4098,0xd740,0xb5c6,0x8109,0x299,0x3a8c,0x81c2,0xc0d0,0xe848,0x9243,0x8996,0x656a,0x8c87,0x6c99,0xb9f5,0x4c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd7404098,0x8109b5c6,0x3a8c0299,0xc0d081c2,0x9243e848,0x656a8996,0x6c998c87,0x4cb9f5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd7404098,0x8109b5c6,0x3a8c0299,0xc0d081c2,0x9243e848,0x656a8996,0x6c998c87,0x4cb9f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8109b5c6d7404098,0xc0d081c23a8c0299,0x656a89969243e848,0x4cb9f56c998c87}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8109b5c6d7404098,0xc0d081c23a8c0299,0x656a89969243e848,0x4cb9f56c998c87}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x712b,0xfeed,0x55b5,0xc5fe,0xe867,0x77a9,0x1775,0x7814,0x4780,0x73b1,0x86b1,0x3973,0x797a,0x7f0b,0x1fa,0xb0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x712b,0xfeed,0x55b5,0xc5fe,0xe867,0x77a9,0x1775,0x7814,0x4780,0x73b1,0x86b1,0x3973,0x797a,0x7f0b,0x1fa,0xb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfeed712b,0xc5fe55b5,0x77a9e867,0x78141775,0x73b14780,0x397386b1,0x7f0b797a,0xb001fa}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfeed712b,0xc5fe55b5,0x77a9e867,0x78141775,0x73b14780,0x397386b1,0x7f0b797a,0xb001fa}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5fe55b5feed712b,0x7814177577a9e867,0x397386b173b14780,0xb001fa7f0b797a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc5fe55b5feed712b,0x7814177577a9e867,0x397386b173b14780,0xb001fa7f0b797a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d,0xe041,0x3810,0xa93b,0xd7cb,0x5a3,0xc955,0xe312,0x6f89,0x5ce2,0x776f,0xf1ad,0x782d,0x1097,0x6743,0x3d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d,0xe041,0x3810,0xa93b,0xd7cb,0x5a3,0xc955,0xe312,0x6f89,0x5ce2,0x776f,0xf1ad,0x782d,0x1097,0x6743,0x3d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe041003d,0xa93b3810,0x5a3d7cb,0xe312c955,0x5ce26f89,0xf1ad776f,0x1097782d,0x3d6743}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe041003d,0xa93b3810,0x5a3d7cb,0xe312c955,0x5ce26f89,0xf1ad776f,0x1097782d,0x3d6743}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa93b3810e041003d,0xe312c95505a3d7cb,0xf1ad776f5ce26f89,0x3d67431097782d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa93b3810e041003d,0xe312c95505a3d7cb,0xf1ad776f5ce26f89,0x3d67431097782d}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5ff1,0xa594,0x52b3,0xe75d,0xdd09,0xd267,0x7d25,0xd976,0xbc5,0xc1a8,0x9aae,0x10bf,0xe894,0x8de3,0xae84,0x70}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5945ff1,0xe75d52b3,0xd267dd09,0xd9767d25,0xc1a80bc5,0x10bf9aae,0x8de3e894,0x70ae84}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe75d52b3a5945ff1,0xd9767d25d267dd09,0x10bf9aaec1a80bc5,0x70ae848de3e894}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x11e6,0xb9e0,0x96c0,0xa6d7,0xee81,0x4b6,0x2f44,0xf4c5,0x4597,0xe75d,0x5b93,0xebb6,0x59c6,0xc08e,0x3084,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb9e011e6,0xa6d796c0,0x4b6ee81,0xf4c52f44,0xe75d4597,0xebb65b93,0xc08e59c6,0x163084}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa6d796c0b9e011e6,0xf4c52f4404b6ee81,0xebb65b93e75d4597,0x163084c08e59c6}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf41d,0x3463,0x6031,0x479f,0x6fe7,0x159f,0x1e6b,0x404e,0xe302,0x88a8,0xc1f7,0xad84,0x8b50,0x3175,0x3d66,0xab}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3463f41d,0x479f6031,0x159f6fe7,0x404e1e6b,0x88a8e302,0xad84c1f7,0x31758b50,0xab3d66}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x479f60313463f41d,0x404e1e6b159f6fe7,0xad84c1f788a8e302,0xab3d6631758b50}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa00f,0x5a6b,0xad4c,0x18a2,0x22f6,0x2d98,0x82da,0x2689,0xf43a,0x3e57,0x6551,0xef40,0x176b,0x721c,0x517b,0x8f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a6ba00f,0x18a2ad4c,0x2d9822f6,0x268982da,0x3e57f43a,0xef406551,0x721c176b,0x8f517b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x18a2ad4c5a6ba00f,0x268982da2d9822f6,0xef4065513e57f43a,0x8f517b721c176b}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d2b,0x1bd6,0xcc3f,0x7e74,0x4fea,0xfba0,0x9f84,0xd6d4,0x42a1,0x88d1,0x68b1,0x4f4e,0x13ec,0xa60c,0xb13b,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d2b,0x1bd6,0xcc3f,0x7e74,0x4fea,0xfba0,0x9f84,0xd6d4,0x42a1,0x88d1,0x68b1,0x4f4e,0x13ec,0xa60c,0xb13b,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1bd65d2b,0x7e74cc3f,0xfba04fea,0xd6d49f84,0x88d142a1,0x4f4e68b1,0xa60c13ec,0x2eb13b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1bd65d2b,0x7e74cc3f,0xfba04fea,0xd6d49f84,0x88d142a1,0x4f4e68b1,0xa60c13ec,0x2eb13b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e74cc3f1bd65d2b,0xd6d49f84fba04fea,0x4f4e68b188d142a1,0x2eb13ba60c13ec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e74cc3f1bd65d2b,0xd6d49f84fba04fea,0x4f4e68b188d142a1,0x2eb13ba60c13ec}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b4f,0x9448,0xaa16,0x649a,0xe4b4,0x3bc2,0xd3fd,0x8df1,0x931e,0x4078,0x8caa,0xe896,0xdeec,0xbed5,0x166e,0x7c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b4f,0x9448,0xaa16,0x649a,0xe4b4,0x3bc2,0xd3fd,0x8df1,0x931e,0x4078,0x8caa,0xe896,0xdeec,0xbed5,0x166e,0x7c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x94487b4f,0x649aaa16,0x3bc2e4b4,0x8df1d3fd,0x4078931e,0xe8968caa,0xbed5deec,0x7c166e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x94487b4f,0x649aaa16,0x3bc2e4b4,0x8df1d3fd,0x4078931e,0xe8968caa,0xbed5deec,0x7c166e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x649aaa1694487b4f,0x8df1d3fd3bc2e4b4,0xe8968caa4078931e,0x7c166ebed5deec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x649aaa1694487b4f,0x8df1d3fd3bc2e4b4,0xe8968caa4078931e,0x7c166ebed5deec}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x101d,0x51aa,0xd32d,0x2b40,0x7ba,0xc5f8,0x257a,0xb323,0x9bde,0x20c5,0xdc8f,0x2c3d,0x4e7b,0x54a6,0x17b9,0x99}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x101d,0x51aa,0xd32d,0x2b40,0x7ba,0xc5f8,0x257a,0xb323,0x9bde,0x20c5,0xdc8f,0x2c3d,0x4e7b,0x54a6,0x17b9,0x99}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x51aa101d,0x2b40d32d,0xc5f807ba,0xb323257a,0x20c59bde,0x2c3ddc8f,0x54a64e7b,0x9917b9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x51aa101d,0x2b40d32d,0xc5f807ba,0xb323257a,0x20c59bde,0x2c3ddc8f,0x54a64e7b,0x9917b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2b40d32d51aa101d,0xb323257ac5f807ba,0x2c3ddc8f20c59bde,0x9917b954a64e7b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2b40d32d51aa101d,0xb323257ac5f807ba,0x2c3ddc8f20c59bde,0x9917b954a64e7b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa2d5,0xe429,0x33c0,0x818b,0xb015,0x45f,0x607b,0x292b,0xbd5e,0x772e,0x974e,0xb0b1,0xec13,0x59f3,0x4ec4,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa2d5,0xe429,0x33c0,0x818b,0xb015,0x45f,0x607b,0x292b,0xbd5e,0x772e,0x974e,0xb0b1,0xec13,0x59f3,0x4ec4,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe429a2d5,0x818b33c0,0x45fb015,0x292b607b,0x772ebd5e,0xb0b1974e,0x59f3ec13,0xd14ec4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe429a2d5,0x818b33c0,0x45fb015,0x292b607b,0x772ebd5e,0xb0b1974e,0x59f3ec13,0xd14ec4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x818b33c0e429a2d5,0x292b607b045fb015,0xb0b1974e772ebd5e,0xd14ec459f3ec13}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x818b33c0e429a2d5,0x292b607b045fb015,0xb0b1974e772ebd5e,0xd14ec459f3ec13}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0xa72, 0x186f, 0x81c, 0x105c, 0x151, 0x1451, 0x1b96, 0x4d1, 0x12be, 0x3bf, 0x996, 0x1130, 0x1460, 0x1ed8, 0xc2a, 0x1c23, 0x1ee, 0x3f4, 0xbe2, 0x9} @@ -1689,223 +1689,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7363,0xbe7a,0xc901,0xb6e0,0x6a56,0x779d,0xbc42,0xd659,0x3476,0x3868,0x12f4,0x923a,0x6fa8,0x5412,0xd5f9,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7363,0xbe7a,0xc901,0xb6e0,0x6a56,0x779d,0xbc42,0xd659,0x3476,0x3868,0x12f4,0x923a,0x6fa8,0x5412,0xd5f9,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe7a7363,0xb6e0c901,0x779d6a56,0xd659bc42,0x38683476,0x923a12f4,0x54126fa8,0x3d5f9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe7a7363,0xb6e0c901,0x779d6a56,0xd659bc42,0x38683476,0x923a12f4,0x54126fa8,0x3d5f9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6e0c901be7a7363,0xd659bc42779d6a56,0x923a12f438683476,0x3d5f954126fa8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6e0c901be7a7363,0xd659bc42779d6a56,0x923a12f438683476,0x3d5f954126fa8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xedb4,0x4fd4,0x5c14,0x14b,0xf702,0xd6be,0x9c11,0x4bb,0x9f10,0xde25,0xb159,0x5085,0xb0a9,0x6f42,0xc4d3,0x1d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xedb4,0x4fd4,0x5c14,0x14b,0xf702,0xd6be,0x9c11,0x4bb,0x9f10,0xde25,0xb159,0x5085,0xb0a9,0x6f42,0xc4d3,0x1d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4fd4edb4,0x14b5c14,0xd6bef702,0x4bb9c11,0xde259f10,0x5085b159,0x6f42b0a9,0x1dc4d3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4fd4edb4,0x14b5c14,0xd6bef702,0x4bb9c11,0xde259f10,0x5085b159,0x6f42b0a9,0x1dc4d3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x14b5c144fd4edb4,0x4bb9c11d6bef702,0x5085b159de259f10,0x1dc4d36f42b0a9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x14b5c144fd4edb4,0x4bb9c11d6bef702,0x5085b159de259f10,0x1dc4d36f42b0a9}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe873,0x4974,0xc7ed,0x6b01,0xaffb,0xf3d4,0xc641,0x20d6,0xca22,0x2d69,0x9f01,0x451e,0xfa05,0xef65,0xb43b,0xde}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe873,0x4974,0xc7ed,0x6b01,0xaffb,0xf3d4,0xc641,0x20d6,0xca22,0x2d69,0x9f01,0x451e,0xfa05,0xef65,0xb43b,0xde}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4974e873,0x6b01c7ed,0xf3d4affb,0x20d6c641,0x2d69ca22,0x451e9f01,0xef65fa05,0xdeb43b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4974e873,0x6b01c7ed,0xf3d4affb,0x20d6c641,0x2d69ca22,0x451e9f01,0xef65fa05,0xdeb43b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6b01c7ed4974e873,0x20d6c641f3d4affb,0x451e9f012d69ca22,0xdeb43bef65fa05}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6b01c7ed4974e873,0x20d6c641f3d4affb,0x451e9f012d69ca22,0xdeb43bef65fa05}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c9d,0x4185,0x36fe,0x491f,0x95a9,0x8862,0x43bd,0x29a6,0xcb89,0xc797,0xed0b,0x6dc5,0x9057,0xabed,0x2a06,0xfc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c9d,0x4185,0x36fe,0x491f,0x95a9,0x8862,0x43bd,0x29a6,0xcb89,0xc797,0xed0b,0x6dc5,0x9057,0xabed,0x2a06,0xfc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41858c9d,0x491f36fe,0x886295a9,0x29a643bd,0xc797cb89,0x6dc5ed0b,0xabed9057,0xfc2a06}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41858c9d,0x491f36fe,0x886295a9,0x29a643bd,0xc797cb89,0x6dc5ed0b,0xabed9057,0xfc2a06}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x491f36fe41858c9d,0x29a643bd886295a9,0x6dc5ed0bc797cb89,0xfc2a06abed9057}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x491f36fe41858c9d,0x29a643bd886295a9,0x6dc5ed0bc797cb89,0xfc2a06abed9057}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x323f,0x7ed2,0x4455,0x415c,0x5876,0x4282,0x76ef,0xcc11,0xbdab,0x1142,0x4729,0x3405,0x1155,0x1779,0x7c1f,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7ed2323f,0x415c4455,0x42825876,0xcc1176ef,0x1142bdab,0x34054729,0x17791155,0xc57c1f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x415c44557ed2323f,0xcc1176ef42825876,0x340547291142bdab,0xc57c1f17791155}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc9ce,0xa958,0x4fac,0x8a69,0x31e1,0x9997,0x1a17,0x8c19,0xd118,0x68c7,0xc0eb,0x8113,0x62fd,0xf8c8,0xc94e,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa958c9ce,0x8a694fac,0x999731e1,0x8c191a17,0x68c7d118,0x8113c0eb,0xf8c862fd,0x3fc94e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8a694faca958c9ce,0x8c191a17999731e1,0x8113c0eb68c7d118,0x3fc94ef8c862fd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41cb,0xed47,0x8ea0,0x127c,0x69d5,0xf74c,0xce8c,0x3826,0x3da2,0x6bf3,0x56b,0xe695,0xc45c,0x84ac,0x5817,0xd0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed4741cb,0x127c8ea0,0xf74c69d5,0x3826ce8c,0x6bf33da2,0xe695056b,0x84acc45c,0xd05817}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x127c8ea0ed4741cb,0x3826ce8cf74c69d5,0xe695056b6bf33da2,0xd0581784acc45c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcdc1,0x812d,0xbbaa,0xbea3,0xa789,0xbd7d,0x8910,0x33ee,0x4254,0xeebd,0xb8d6,0xcbfa,0xeeaa,0xe886,0x83e0,0x3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x812dcdc1,0xbea3bbaa,0xbd7da789,0x33ee8910,0xeebd4254,0xcbfab8d6,0xe886eeaa,0x3a83e0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbea3bbaa812dcdc1,0x33ee8910bd7da789,0xcbfab8d6eebd4254,0x3a83e0e886eeaa}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca5b,0x1036,0x34a6,0x490c,0xc0ed,0x771b,0x1590,0x1c17,0x4855,0x977e,0x8054,0xdb98,0xb26f,0x1175,0x7722,0xfe}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca5b,0x1036,0x34a6,0x490c,0xc0ed,0x771b,0x1590,0x1c17,0x4855,0x977e,0x8054,0xdb98,0xb26f,0x1175,0x7722,0xfe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1036ca5b,0x490c34a6,0x771bc0ed,0x1c171590,0x977e4855,0xdb988054,0x1175b26f,0xfe7722}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1036ca5b,0x490c34a6,0x771bc0ed,0x1c171590,0x977e4855,0xdb988054,0x1175b26f,0xfe7722}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x490c34a61036ca5b,0x1c171590771bc0ed,0xdb988054977e4855,0xfe77221175b26f}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x490c34a61036ca5b,0x1c171590771bc0ed,0xdb988054977e4855,0xfe77221175b26f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf543,0x821c,0xae0a,0xb0cb,0x642d,0x5a80,0xd2bf,0x2340,0xc8f,0xe1ce,0x4e38,0xdace,0x3445,0x807e,0x9bc4,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf543,0x821c,0xae0a,0xb0cb,0x642d,0x5a80,0xd2bf,0x2340,0xc8f,0xe1ce,0x4e38,0xdace,0x3445,0x807e,0x9bc4,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x821cf543,0xb0cbae0a,0x5a80642d,0x2340d2bf,0xe1ce0c8f,0xdace4e38,0x807e3445,0x59bc4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x821cf543,0xb0cbae0a,0x5a80642d,0x2340d2bf,0xe1ce0c8f,0xdace4e38,0x807e3445,0x59bc4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb0cbae0a821cf543,0x2340d2bf5a80642d,0xdace4e38e1ce0c8f,0x59bc4807e3445}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb0cbae0a821cf543,0x2340d2bf5a80642d,0xdace4e38e1ce0c8f,0x59bc4807e3445}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e85,0xc3dc,0xfd4,0x39a7,0x5158,0x777b,0xb83,0xb0fe,0x55de,0x45b3,0x103f,0x53dc,0x27e2,0xb6cb,0x2b18,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e85,0xc3dc,0xfd4,0x39a7,0x5158,0x777b,0xb83,0xb0fe,0x55de,0x45b3,0x103f,0x53dc,0x27e2,0xb6cb,0x2b18,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc3dc6e85,0x39a70fd4,0x777b5158,0xb0fe0b83,0x45b355de,0x53dc103f,0xb6cb27e2,0x12b18}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc3dc6e85,0x39a70fd4,0x777b5158,0xb0fe0b83,0x45b355de,0x53dc103f,0xb6cb27e2,0x12b18}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x39a70fd4c3dc6e85,0xb0fe0b83777b5158,0x53dc103f45b355de,0x12b18b6cb27e2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x39a70fd4c3dc6e85,0xb0fe0b83777b5158,0x53dc103f45b355de,0x12b18b6cb27e2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35a5,0xefc9,0xcb59,0xb6f3,0x3f12,0x88e4,0xea6f,0xe3e8,0xb7aa,0x6881,0x7fab,0x2467,0x4d90,0xee8a,0x88dd,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35a5,0xefc9,0xcb59,0xb6f3,0x3f12,0x88e4,0xea6f,0xe3e8,0xb7aa,0x6881,0x7fab,0x2467,0x4d90,0xee8a,0x88dd,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xefc935a5,0xb6f3cb59,0x88e43f12,0xe3e8ea6f,0x6881b7aa,0x24677fab,0xee8a4d90,0x188dd}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xefc935a5,0xb6f3cb59,0x88e43f12,0xe3e8ea6f,0x6881b7aa,0x24677fab,0xee8a4d90,0x188dd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f3cb59efc935a5,0xe3e8ea6f88e43f12,0x24677fab6881b7aa,0x188ddee8a4d90}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f3cb59efc935a5,0xe3e8ea6f88e43f12,0x24677fab6881b7aa,0x188ddee8a4d90}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x102e, 0x4c4, 0x1586, 0x1184, 0xa12, 0x9ce, 0x1866, 0x433, 0x5ef, 0x82a, 0x13a5, 0xbc6, 0x591, 0x175b, 0x10bc, 0xfa5, 0x109d, 0x8d4, 0x1325, 0x9} @@ -2165,223 +2165,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2417,0x1b00,0xcfe,0x8960,0x662e,0x42d2,0xc00f,0x222c,0x7671,0x278b,0x863f,0xbcac,0xdb9c,0x6e5e,0x4c5a,0x1b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2417,0x1b00,0xcfe,0x8960,0x662e,0x42d2,0xc00f,0x222c,0x7671,0x278b,0x863f,0xbcac,0xdb9c,0x6e5e,0x4c5a,0x1b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b002417,0x89600cfe,0x42d2662e,0x222cc00f,0x278b7671,0xbcac863f,0x6e5edb9c,0x1b4c5a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1b002417,0x89600cfe,0x42d2662e,0x222cc00f,0x278b7671,0xbcac863f,0x6e5edb9c,0x1b4c5a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x89600cfe1b002417,0x222cc00f42d2662e,0xbcac863f278b7671,0x1b4c5a6e5edb9c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x89600cfe1b002417,0x222cc00f42d2662e,0xbcac863f278b7671,0x1b4c5a6e5edb9c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x21e8,0xd92b,0x5a2d,0xef86,0xf492,0x1483,0x8ae0,0x6b37,0x7f78,0x7b90,0x69c5,0xf4ec,0x2fb9,0x1660,0x8296,0xf8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x21e8,0xd92b,0x5a2d,0xef86,0xf492,0x1483,0x8ae0,0x6b37,0x7f78,0x7b90,0x69c5,0xf4ec,0x2fb9,0x1660,0x8296,0xf8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd92b21e8,0xef865a2d,0x1483f492,0x6b378ae0,0x7b907f78,0xf4ec69c5,0x16602fb9,0xf88296}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd92b21e8,0xef865a2d,0x1483f492,0x6b378ae0,0x7b907f78,0xf4ec69c5,0x16602fb9,0xf88296}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xef865a2dd92b21e8,0x6b378ae01483f492,0xf4ec69c57b907f78,0xf8829616602fb9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xef865a2dd92b21e8,0x6b378ae01483f492,0xf4ec69c57b907f78,0xf8829616602fb9}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x38ff,0x5dc5,0x9aea,0xbc0e,0xbea5,0x775d,0x447b,0xc311,0xf01c,0xb63a,0x15fd,0x162a,0xab76,0x9def,0x2a0d,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x38ff,0x5dc5,0x9aea,0xbc0e,0xbea5,0x775d,0x447b,0xc311,0xf01c,0xb63a,0x15fd,0x162a,0xab76,0x9def,0x2a0d,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5dc538ff,0xbc0e9aea,0x775dbea5,0xc311447b,0xb63af01c,0x162a15fd,0x9defab76,0xc52a0d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5dc538ff,0xbc0e9aea,0x775dbea5,0xc311447b,0xb63af01c,0x162a15fd,0x9defab76,0xc52a0d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbc0e9aea5dc538ff,0xc311447b775dbea5,0x162a15fdb63af01c,0xc52a0d9defab76}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbc0e9aea5dc538ff,0xc311447b775dbea5,0x162a15fdb63af01c,0xc52a0d9defab76}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdbe9,0xe4ff,0xf301,0x769f,0x99d1,0xbd2d,0x3ff0,0xddd3,0x898e,0xd874,0x79c0,0x4353,0x2463,0x91a1,0xb3a5,0xe4}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdbe9,0xe4ff,0xf301,0x769f,0x99d1,0xbd2d,0x3ff0,0xddd3,0x898e,0xd874,0x79c0,0x4353,0x2463,0x91a1,0xb3a5,0xe4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4ffdbe9,0x769ff301,0xbd2d99d1,0xddd33ff0,0xd874898e,0x435379c0,0x91a12463,0xe4b3a5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe4ffdbe9,0x769ff301,0xbd2d99d1,0xddd33ff0,0xd874898e,0x435379c0,0x91a12463,0xe4b3a5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x769ff301e4ffdbe9,0xddd33ff0bd2d99d1,0x435379c0d874898e,0xe4b3a591a12463}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x769ff301e4ffdbe9,0xddd33ff0bd2d99d1,0x435379c0d874898e,0xe4b3a591a12463}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x900d,0xd052,0xb453,0x206a,0xe61d,0x31f2,0xc579,0xfb21,0xc870,0x2bb,0xf38f,0xf9c1,0x83aa,0x47f1,0x58d1,0xeb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd052900d,0x206ab453,0x31f2e61d,0xfb21c579,0x2bbc870,0xf9c1f38f,0x47f183aa,0xeb58d1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x206ab453d052900d,0xfb21c57931f2e61d,0xf9c1f38f02bbc870,0xeb58d147f183aa}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x727e,0xa5e8,0xebc3,0x9e04,0xf1eb,0x38d7,0x68e0,0x8ea9,0x8f77,0x8331,0x48eb,0x82c0,0xb0a3,0x3583,0xf221,0x54}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa5e8727e,0x9e04ebc3,0x38d7f1eb,0x8ea968e0,0x83318f77,0x82c048eb,0x3583b0a3,0x54f221}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9e04ebc3a5e8727e,0x8ea968e038d7f1eb,0x82c048eb83318f77,0x54f2213583b0a3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdba9,0x49e4,0xc9b3,0x34e9,0x7ab9,0xd876,0xcae0,0xcfb0,0x6177,0x26b3,0x2c98,0x1e30,0x4a38,0x53cc,0x3bdc,0x71}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e4dba9,0x34e9c9b3,0xd8767ab9,0xcfb0cae0,0x26b36177,0x1e302c98,0x53cc4a38,0x713bdc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x34e9c9b349e4dba9,0xcfb0cae0d8767ab9,0x1e302c9826b36177,0x713bdc53cc4a38}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6ff3,0x2fad,0x4bac,0xdf95,0x19e2,0xce0d,0x3a86,0x4de,0x378f,0xfd44,0xc70,0x63e,0x7c55,0xb80e,0xa72e,0x14}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fad6ff3,0xdf954bac,0xce0d19e2,0x4de3a86,0xfd44378f,0x63e0c70,0xb80e7c55,0x14a72e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf954bac2fad6ff3,0x4de3a86ce0d19e2,0x63e0c70fd44378f,0x14a72eb80e7c55}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x20f0,0x2693,0xacbf,0x731a,0xb0f3,0xd8ce,0x1bcd,0xf836,0x8469,0x44d5,0xd604,0xd3aa,0x4aa8,0xcdc3,0x9086,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x20f0,0x2693,0xacbf,0x731a,0xb0f3,0xd8ce,0x1bcd,0xf836,0x8469,0x44d5,0xd604,0xd3aa,0x4aa8,0xcdc3,0x9086,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x269320f0,0x731aacbf,0xd8ceb0f3,0xf8361bcd,0x44d58469,0xd3aad604,0xcdc34aa8,0x3f9086}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x269320f0,0x731aacbf,0xd8ceb0f3,0xf8361bcd,0x44d58469,0xd3aad604,0xcdc34aa8,0x3f9086}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x731aacbf269320f0,0xf8361bcdd8ceb0f3,0xd3aad60444d58469,0x3f9086cdc34aa8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x731aacbf269320f0,0xf8361bcdd8ceb0f3,0xd3aad60444d58469,0x3f9086cdc34aa8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcc11,0xe55a,0x932f,0x9534,0x2895,0xaf43,0x2956,0x614f,0x4e84,0xe4b2,0x60c6,0x255,0xbb14,0xd70d,0xc61e,0x13}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcc11,0xe55a,0x932f,0x9534,0x2895,0xaf43,0x2956,0x614f,0x4e84,0xe4b2,0x60c6,0x255,0xbb14,0xd70d,0xc61e,0x13}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe55acc11,0x9534932f,0xaf432895,0x614f2956,0xe4b24e84,0x25560c6,0xd70dbb14,0x13c61e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe55acc11,0x9534932f,0xaf432895,0x614f2956,0xe4b24e84,0x25560c6,0xd70dbb14,0x13c61e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9534932fe55acc11,0x614f2956af432895,0x25560c6e4b24e84,0x13c61ed70dbb14}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9534932fe55acc11,0x614f2956af432895,0x25560c6e4b24e84,0x13c61ed70dbb14}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x28d6,0x450d,0xd24f,0x54e4,0x6e67,0x81d,0x9b71,0xadbe,0x1088,0x6148,0x4ebf,0x4b68,0x829e,0x65c8,0xe1a6,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x28d6,0x450d,0xd24f,0x54e4,0x6e67,0x81d,0x9b71,0xadbe,0x1088,0x6148,0x4ebf,0x4b68,0x829e,0x65c8,0xe1a6,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450d28d6,0x54e4d24f,0x81d6e67,0xadbe9b71,0x61481088,0x4b684ebf,0x65c8829e,0xe5e1a6}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450d28d6,0x54e4d24f,0x81d6e67,0xadbe9b71,0x61481088,0x4b684ebf,0x65c8829e,0xe5e1a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x54e4d24f450d28d6,0xadbe9b71081d6e67,0x4b684ebf61481088,0xe5e1a665c8829e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x54e4d24f450d28d6,0xadbe9b71081d6e67,0x4b684ebf61481088,0xe5e1a665c8829e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf10,0xd96c,0x5340,0x8ce5,0x4f0c,0x2731,0xe432,0x7c9,0x7b96,0xbb2a,0x29fb,0x2c55,0xb557,0x323c,0x6f79,0xc0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf10,0xd96c,0x5340,0x8ce5,0x4f0c,0x2731,0xe432,0x7c9,0x7b96,0xbb2a,0x29fb,0x2c55,0xb557,0x323c,0x6f79,0xc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd96cdf10,0x8ce55340,0x27314f0c,0x7c9e432,0xbb2a7b96,0x2c5529fb,0x323cb557,0xc06f79}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd96cdf10,0x8ce55340,0x27314f0c,0x7c9e432,0xbb2a7b96,0x2c5529fb,0x323cb557,0xc06f79}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8ce55340d96cdf10,0x7c9e43227314f0c,0x2c5529fbbb2a7b96,0xc06f79323cb557}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8ce55340d96cdf10,0x7c9e43227314f0c,0x2c5529fbbb2a7b96,0xc06f79323cb557}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x6b9, 0xd4c, 0x1d8d, 0x1f99, 0x1be4, 0x1f53, 0x5c1, 0x1937, 0x9c, 0xe68, 0x61e, 0x14e8, 0x352, 0x3d6, 0x174, 0x133, 0x18a6, 0x1b19, 0x94b, 0x9} @@ -2641,223 +2641,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5286,0x497a,0x2196,0xaa2d,0xa98a,0x7b17,0xa6e9,0x6da0,0x73f5,0xe349,0x83ae,0x7c6e,0x31,0xee2e,0x6931,0x32}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x497a5286,0xaa2d2196,0x7b17a98a,0x6da0a6e9,0xe34973f5,0x7c6e83ae,0xee2e0031,0x326931}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaa2d2196497a5286,0x6da0a6e97b17a98a,0x7c6e83aee34973f5,0x326931ee2e0031}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc8dc,0x8642,0xe46d,0x6a64,0x4c13,0x90e7,0xd82b,0x9b58,0x64b4,0x7381,0x5218,0x99b4,0x5926,0x5d78,0x95e2,0xb7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8642c8dc,0x6a64e46d,0x90e74c13,0x9b58d82b,0x738164b4,0x99b45218,0x5d785926,0xb795e2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6a64e46d8642c8dc,0x9b58d82b90e74c13,0x99b45218738164b4,0xb795e25d785926}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x94df,0x6dc7,0xcd7f,0xebb2,0xb290,0x811d,0x2825,0xc88,0xd514,0x959a,0x7d64,0xc8c3,0x16a9,0x106a,0x1eea,0x32}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x94df,0x6dc7,0xcd7f,0xebb2,0xb290,0x811d,0x2825,0xc88,0xd514,0x959a,0x7d64,0xc8c3,0x16a9,0x106a,0x1eea,0x32}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6dc794df,0xebb2cd7f,0x811db290,0xc882825,0x959ad514,0xc8c37d64,0x106a16a9,0x321eea}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6dc794df,0xebb2cd7f,0x811db290,0xc882825,0x959ad514,0xc8c37d64,0x106a16a9,0x321eea}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xebb2cd7f6dc794df,0xc882825811db290,0xc8c37d64959ad514,0x321eea106a16a9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xebb2cd7f6dc794df,0xc882825811db290,0xc8c37d64959ad514,0x321eea106a16a9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe08c,0xe778,0x1464,0x19fe,0xef25,0x1d24,0xa98f,0x4af0,0x70d3,0x8e4d,0x2b82,0x95ea,0x3277,0xc267,0x1695,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe08c,0xe778,0x1464,0x19fe,0xef25,0x1d24,0xa98f,0x4af0,0x70d3,0x8e4d,0x2b82,0x95ea,0x3277,0xc267,0x1695,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe778e08c,0x19fe1464,0x1d24ef25,0x4af0a98f,0x8e4d70d3,0x95ea2b82,0xc2673277,0xf1695}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe778e08c,0x19fe1464,0x1d24ef25,0x4af0a98f,0x8e4d70d3,0x95ea2b82,0xc2673277,0xf1695}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19fe1464e778e08c,0x4af0a98f1d24ef25,0x95ea2b828e4d70d3,0xf1695c2673277}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19fe1464e778e08c,0x4af0a98f1d24ef25,0x95ea2b828e4d70d3,0xf1695c2673277}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1df,0xb6e1,0xe2a4,0x4bc9,0xdc85,0x6365,0x3fca,0x9a38,0xee2,0xed03,0xca7f,0x1984,0xe709,0x1efe,0xc173,0x8b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1df,0xb6e1,0xe2a4,0x4bc9,0xdc85,0x6365,0x3fca,0x9a38,0xee2,0xed03,0xca7f,0x1984,0xe709,0x1efe,0xc173,0x8b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6e1f1df,0x4bc9e2a4,0x6365dc85,0x9a383fca,0xed030ee2,0x1984ca7f,0x1efee709,0x8bc173}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6e1f1df,0x4bc9e2a4,0x6365dc85,0x9a383fca,0xed030ee2,0x1984ca7f,0x1efee709,0x8bc173}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4bc9e2a4b6e1f1df,0x9a383fca6365dc85,0x1984ca7fed030ee2,0x8bc1731efee709}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4bc9e2a4b6e1f1df,0x9a383fca6365dc85,0x1984ca7fed030ee2,0x8bc1731efee709}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b21,0x9238,0x3280,0x144d,0x4d6f,0x7ee2,0xd7da,0xf377,0x2aeb,0x6a65,0x829b,0x373c,0xe956,0xef95,0xe115,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6b21,0x9238,0x3280,0x144d,0x4d6f,0x7ee2,0xd7da,0xf377,0x2aeb,0x6a65,0x829b,0x373c,0xe956,0xef95,0xe115,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92386b21,0x144d3280,0x7ee24d6f,0xf377d7da,0x6a652aeb,0x373c829b,0xef95e956,0xcde115}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x92386b21,0x144d3280,0x7ee24d6f,0xf377d7da,0x6a652aeb,0x373c829b,0xef95e956,0xcde115}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x144d328092386b21,0xf377d7da7ee24d6f,0x373c829b6a652aeb,0xcde115ef95e956}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x144d328092386b21,0xf377d7da7ee24d6f,0x373c829b6a652aeb,0xcde115ef95e956}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27cb,0x4931,0x13e0,0xcd75,0x6846,0x3de7,0x5a91,0x9ff9,0xa270,0xa6d6,0x26ec,0xb972,0xb44,0xe4b8,0x52fc,0x3f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x493127cb,0xcd7513e0,0x3de76846,0x9ff95a91,0xa6d6a270,0xb97226ec,0xe4b80b44,0x3f52fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd7513e0493127cb,0x9ff95a913de76846,0xb97226eca6d6a270,0x3f52fce4b80b44}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2d7e,0x5a38,0xca74,0x1d16,0x2547,0x1674,0x8c29,0xafc2,0x2349,0x4856,0x2c73,0x7957,0x67e1,0x3c3e,0x4d3,0xad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a382d7e,0x1d16ca74,0x16742547,0xafc28c29,0x48562349,0x79572c73,0x3c3e67e1,0xad04d3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1d16ca745a382d7e,0xafc28c2916742547,0x79572c7348562349,0xad04d33c3e67e1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8d7,0x27fb,0x321e,0xcf15,0xfbd3,0x6f8e,0x5fbd,0x7ed7,0xf394,0x58d6,0x5937,0xb73c,0xbfb,0xe027,0x64ac,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27fbd8d7,0xcf15321e,0x6f8efbd3,0x7ed75fbd,0x58d6f394,0xb73c5937,0xe0270bfb,0x2264ac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcf15321e27fbd8d7,0x7ed75fbd6f8efbd3,0xb73c593758d6f394,0x2264ace0270bfb}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd835,0xb6ce,0xec1f,0x328a,0x97b9,0xc218,0xa56e,0x6006,0x5d8f,0x5929,0xd913,0x468d,0xf4bb,0x1b47,0xad03,0xc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb6ced835,0x328aec1f,0xc21897b9,0x6006a56e,0x59295d8f,0x468dd913,0x1b47f4bb,0xc0ad03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x328aec1fb6ced835,0x6006a56ec21897b9,0x468dd91359295d8f,0xc0ad031b47f4bb}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2943,0x24bd,0x90cb,0x5516,0xd4c5,0xbd8b,0x5374,0xb6d0,0xb9fa,0x71a4,0x41d7,0xbe37,0x18,0xf717,0x3498,0x99}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x24bd2943,0x551690cb,0xbd8bd4c5,0xb6d05374,0x71a4b9fa,0xbe3741d7,0xf7170018,0x993498}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x551690cb24bd2943,0xb6d05374bd8bd4c5,0xbe3741d771a4b9fa,0x993498f7170018}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x646e,0xc321,0x7236,0xb532,0xa609,0xc873,0x6c15,0x4dac,0xb25a,0x39c0,0x290c,0x4cda,0x2c93,0x2ebc,0xcaf1,0xdb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc321646e,0xb5327236,0xc873a609,0x4dac6c15,0x39c0b25a,0x4cda290c,0x2ebc2c93,0xdbcaf1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb5327236c321646e,0x4dac6c15c873a609,0x4cda290c39c0b25a,0xdbcaf12ebc2c93}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xf187,0x9a31,0x1ee,0x193b,0xeec2,0xbfed,0x9418,0x15b6,0xe9a,0x4c74,0xae85,0x3ebe,0x2677,0x3f12,0x42}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xf187,0x9a31,0x1ee,0x193b,0xeec2,0xbfed,0x9418,0x15b6,0xe9a,0x4c74,0xae85,0x3ebe,0x2677,0x3f12,0x42}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf187d647,0x1ee9a31,0xeec2193b,0x9418bfed,0xe9a15b6,0xae854c74,0x26773ebe,0x423f12}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf187d647,0x1ee9a31,0xeec2193b,0x9418bfed,0xe9a15b6,0xae854c74,0x26773ebe,0x423f12}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1ee9a31f187d647,0x9418bfedeec2193b,0xae854c740e9a15b6,0x423f1226773ebe}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1ee9a31f187d647,0x9418bfedeec2193b,0xae854c740e9a15b6,0x423f1226773ebe}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x68ff,0x99be,0x416c,0x7bbf,0xd44f,0x609f,0x7682,0xa8ff,0xa6bb,0xec03,0x8e77,0xc076,0x7873,0x9676,0xa152,0xf5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x68ff,0x99be,0x416c,0x7bbf,0xd44f,0x609f,0x7682,0xa8ff,0xa6bb,0xec03,0x8e77,0xc076,0x7873,0x9676,0xa152,0xf5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x99be68ff,0x7bbf416c,0x609fd44f,0xa8ff7682,0xec03a6bb,0xc0768e77,0x96767873,0xf5a152}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x99be68ff,0x7bbf416c,0x609fd44f,0xa8ff7682,0xec03a6bb,0xc0768e77,0x96767873,0xf5a152}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bbf416c99be68ff,0xa8ff7682609fd44f,0xc0768e77ec03a6bb,0xf5a15296767873}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7bbf416c99be68ff,0xa8ff7682609fd44f,0xc0768e77ec03a6bb,0xf5a15296767873}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3739,0xf7da,0xbd23,0xa38e,0x8cf9,0x7690,0x6b0e,0x1a7,0x77f0,0xa2bd,0x5ac7,0x5101,0x3aae,0xa922,0x2d3a,0x95}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3739,0xf7da,0xbd23,0xa38e,0x8cf9,0x7690,0x6b0e,0x1a7,0x77f0,0xa2bd,0x5ac7,0x5101,0x3aae,0xa922,0x2d3a,0x95}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7da3739,0xa38ebd23,0x76908cf9,0x1a76b0e,0xa2bd77f0,0x51015ac7,0xa9223aae,0x952d3a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7da3739,0xa38ebd23,0x76908cf9,0x1a76b0e,0xa2bd77f0,0x51015ac7,0xa9223aae,0x952d3a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa38ebd23f7da3739,0x1a76b0e76908cf9,0x51015ac7a2bd77f0,0x952d3aa9223aae}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa38ebd23f7da3739,0x1a76b0e76908cf9,0x51015ac7a2bd77f0,0x952d3aa9223aae}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x29b9,0xe78,0x65ce,0xfe11,0xe6c4,0x113d,0x4012,0x6be7,0xea49,0xf165,0xb38b,0x517a,0xc141,0xd988,0xc0ed,0xbd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x29b9,0xe78,0x65ce,0xfe11,0xe6c4,0x113d,0x4012,0x6be7,0xea49,0xf165,0xb38b,0x517a,0xc141,0xd988,0xc0ed,0xbd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7829b9,0xfe1165ce,0x113de6c4,0x6be74012,0xf165ea49,0x517ab38b,0xd988c141,0xbdc0ed}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7829b9,0xfe1165ce,0x113de6c4,0x6be74012,0xf165ea49,0x517ab38b,0xd988c141,0xbdc0ed}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe1165ce0e7829b9,0x6be74012113de6c4,0x517ab38bf165ea49,0xbdc0edd988c141}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfe1165ce0e7829b9,0x6be74012113de6c4,0x517ab38bf165ea49,0xbdc0edd988c141}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x1068, 0xf2c, 0x1ada, 0x1f58, 0x1343, 0x5cc, 0x90, 0x1bba, 0x50b, 0x1a35, 0x35f, 0x1388, 0x5ce, 0xc4f, 0x1462, 0x191f, 0x665, 0x843, 0x1cb1, 0x1} @@ -3117,220 +3117,220 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x231b,0x1af2,0x1640,0xb19c,0xf713,0xe470,0x683e,0xf39a,0x3289,0x7a54,0xc26e,0x904e,0xd5a6,0x6a0c,0x55fc,0x44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1af2231b,0xb19c1640,0xe470f713,0xf39a683e,0x7a543289,0x904ec26e,0x6a0cd5a6,0x4455fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb19c16401af2231b,0xf39a683ee470f713,0x904ec26e7a543289,0x4455fc6a0cd5a6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad7a,0xb685,0xde69,0x55d2,0x5675,0x84e8,0x5916,0x925f,0x8c0a,0x1cb6,0x7c51,0x8391,0xffce,0x11d1,0x96ce,0xcd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb685ad7a,0x55d2de69,0x84e85675,0x925f5916,0x1cb68c0a,0x83917c51,0x11d1ffce,0xcd96ce}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x55d2de69b685ad7a,0x925f591684e85675,0x83917c511cb68c0a,0xcd96ce11d1ffce}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3724,0x79bd,0x1b92,0x959b,0xb3ec,0x6f18,0x27d4,0x64a7,0x9b4b,0x8c7e,0xade7,0x664b,0xa6d9,0xa287,0x6a1d,0x48}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x79bd3724,0x959b1b92,0x6f18b3ec,0x64a727d4,0x8c7e9b4b,0x664bade7,0xa287a6d9,0x486a1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x959b1b9279bd3724,0x64a727d46f18b3ec,0x664bade78c7e9b4b,0x486a1da287a6d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdce5,0xe50d,0xe9bf,0x4e63,0x8ec,0x1b8f,0x97c1,0xc65,0xcd76,0x85ab,0x3d91,0x6fb1,0x2a59,0x95f3,0xaa03,0xbb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe50ddce5,0x4e63e9bf,0x1b8f08ec,0xc6597c1,0x85abcd76,0x6fb13d91,0x95f32a59,0xbbaa03}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4e63e9bfe50ddce5,0xc6597c11b8f08ec,0x6fb13d9185abcd76,0xbbaa0395f32a59}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1975,0x2b02,0x86c,0x9cbe,0x7576,0xb1c3,0xd9a7,0x737e,0x4de1,0xa245,0x7652,0xf9bf,0x4bf8,0xdc2c,0xeaa1,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1975,0x2b02,0x86c,0x9cbe,0x7576,0xb1c3,0xd9a7,0x737e,0x4de1,0xa245,0x7652,0xf9bf,0x4bf8,0xdc2c,0xeaa1,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b021975,0x9cbe086c,0xb1c37576,0x737ed9a7,0xa2454de1,0xf9bf7652,0xdc2c4bf8,0x8eaa1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b021975,0x9cbe086c,0xb1c37576,0x737ed9a7,0xa2454de1,0xf9bf7652,0xdc2c4bf8,0x8eaa1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9cbe086c2b021975,0x737ed9a7b1c37576,0xf9bf7652a2454de1,0x8eaa1dc2c4bf8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9cbe086c2b021975,0x737ed9a7b1c37576,0xf9bf7652a2454de1,0x8eaa1dc2c4bf8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee88,0x46bc,0x7177,0x337c,0x92b6,0x40dc,0xb657,0x3366,0x6c8a,0x2b98,0x40eb,0x1146,0xe116,0xb00a,0xa22f,0xe3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee88,0x46bc,0x7177,0x337c,0x92b6,0x40dc,0xb657,0x3366,0x6c8a,0x2b98,0x40eb,0x1146,0xe116,0xb00a,0xa22f,0xe3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x46bcee88,0x337c7177,0x40dc92b6,0x3366b657,0x2b986c8a,0x114640eb,0xb00ae116,0xe3a22f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x46bcee88,0x337c7177,0x40dc92b6,0x3366b657,0x2b986c8a,0x114640eb,0xb00ae116,0xe3a22f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x337c717746bcee88,0x3366b65740dc92b6,0x114640eb2b986c8a,0xe3a22fb00ae116}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x337c717746bcee88,0x3366b65740dc92b6,0x114640eb2b986c8a,0xe3a22fb00ae116}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf28d,0x64d3,0xe248,0x40b9,0x5141,0x82bb,0x82ea,0xcf35,0xfaf0,0x3,0xd71f,0x6e88,0x7ac9,0xf4c9,0x6b9e,0xcc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf28d,0x64d3,0xe248,0x40b9,0x5141,0x82bb,0x82ea,0xcf35,0xfaf0,0x3,0xd71f,0x6e88,0x7ac9,0xf4c9,0x6b9e,0xcc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x64d3f28d,0x40b9e248,0x82bb5141,0xcf3582ea,0x3faf0,0x6e88d71f,0xf4c97ac9,0xcc6b9e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x64d3f28d,0x40b9e248,0x82bb5141,0xcf3582ea,0x3faf0,0x6e88d71f,0xf4c97ac9,0xcc6b9e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x40b9e24864d3f28d,0xcf3582ea82bb5141,0x6e88d71f0003faf0,0xcc6b9ef4c97ac9}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x40b9e24864d3f28d,0xcf3582ea82bb5141,0x6e88d71f0003faf0,0xcc6b9ef4c97ac9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe68b,0xd4fd,0xf793,0x6341,0x8a89,0x4e3c,0x2658,0x8c81,0xb21e,0x5dba,0x89ad,0x640,0xb407,0x23d3,0x155e,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe68b,0xd4fd,0xf793,0x6341,0x8a89,0x4e3c,0x2658,0x8c81,0xb21e,0x5dba,0x89ad,0x640,0xb407,0x23d3,0x155e,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4fde68b,0x6341f793,0x4e3c8a89,0x8c812658,0x5dbab21e,0x64089ad,0x23d3b407,0xf7155e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4fde68b,0x6341f793,0x4e3c8a89,0x8c812658,0x5dbab21e,0x64089ad,0x23d3b407,0xf7155e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6341f793d4fde68b,0x8c8126584e3c8a89,0x64089ad5dbab21e,0xf7155e23d3b407}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6341f793d4fde68b,0x8c8126584e3c8a89,0x64089ad5dbab21e,0xf7155e23d3b407}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7177,0xb186,0x73de,0xc572,0x9802,0xc0ee,0x7031,0xfe17,0xbc2e,0x41c5,0xe2a7,0xed41,0x1cbf,0x9ff9,0xf5bc,0x1e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb1867177,0xc57273de,0xc0ee9802,0xfe177031,0x41c5bc2e,0xed41e2a7,0x9ff91cbf,0x1ef5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc57273deb1867177,0xfe177031c0ee9802,0xed41e2a741c5bc2e,0x1ef5bc9ff91cbf}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x726a,0xbae3,0x32b6,0x75c2,0xe003,0x6e79,0xd172,0x382a,0x8a51,0x7962,0xa563,0x6a39,0x9cdd,0xc910,0xf6f0,0xa0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbae3726a,0x75c232b6,0x6e79e003,0x382ad172,0x79628a51,0x6a39a563,0xc9109cdd,0xa0f6f0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x75c232b6bae3726a,0x382ad1726e79e003,0x6a39a56379628a51,0xa0f6f0c9109cdd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x246b,0x9fa9,0x54e6,0xbd87,0x15b5,0x1c70,0x6470,0x25fa,0x3f5c,0x8940,0xa6e9,0x7eb5,0x6109,0x54a1,0xa8df,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9fa9246b,0xbd8754e6,0x1c7015b5,0x25fa6470,0x89403f5c,0x7eb5a6e9,0x54a16109,0x16a8df}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbd8754e69fa9246b,0x25fa64701c7015b5,0x7eb5a6e989403f5c,0x16a8df54a16109}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8e89,0x4e79,0x8c21,0x3a8d,0x67fd,0x3f11,0x8fce,0x1e8,0x43d1,0xbe3a,0x1d58,0x12be,0xe340,0x6006,0xa43,0xe1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4e798e89,0x3a8d8c21,0x3f1167fd,0x1e88fce,0xbe3a43d1,0x12be1d58,0x6006e340,0xe10a43}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3a8d8c214e798e89,0x1e88fce3f1167fd,0x12be1d58be3a43d1,0xe10a436006e340}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x118e,0xd79,0xb20,0xd8ce,0x7b89,0x7238,0x341f,0xf9cd,0x1944,0x3d2a,0x6137,0x4827,0x6ad3,0x3506,0x2afe,0x22}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd79118e,0xd8ce0b20,0x72387b89,0xf9cd341f,0x3d2a1944,0x48276137,0x35066ad3,0x222afe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8ce0b200d79118e,0xf9cd341f72387b89,0x482761373d2a1944,0x222afe35066ad3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd6bd,0xdb42,0x6f34,0xaae9,0x2b3a,0x4274,0xac8b,0x492f,0x4605,0x8e5b,0xbe28,0x41c8,0xffe7,0x8e8,0xcb67,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdb42d6bd,0xaae96f34,0x42742b3a,0x492fac8b,0x8e5b4605,0x41c8be28,0x8e8ffe7,0x66cb67}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaae96f34db42d6bd,0x492fac8b42742b3a,0x41c8be288e5b4605,0x66cb6708e8ffe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b92,0x3cde,0x8dc9,0x4acd,0x59f6,0x378c,0x93ea,0xb253,0x4da5,0xc63f,0xd6f3,0xb325,0xd36c,0xd143,0x350e,0x24}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3cde9b92,0x4acd8dc9,0x378c59f6,0xb25393ea,0xc63f4da5,0xb325d6f3,0xd143d36c,0x24350e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4acd8dc93cde9b92,0xb25393ea378c59f6,0xb325d6f3c63f4da5,0x24350ed143d36c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xee73,0xf286,0xf4df,0x2731,0x8476,0x8dc7,0xcbe0,0x632,0xe6bb,0xc2d5,0x9ec8,0xb7d8,0x952c,0xcaf9,0xd501,0xdd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf286ee73,0x2731f4df,0x8dc78476,0x632cbe0,0xc2d5e6bb,0xb7d89ec8,0xcaf9952c,0xddd501}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2731f4dff286ee73,0x632cbe08dc78476,0xb7d89ec8c2d5e6bb,0xddd501caf9952c}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x84a0,0x8ad1,0xbcc4,0xc440,0x94e1,0x46ea,0x15c6,0x784e,0x190,0xd26f,0x630,0x2bee,0x74b1,0x93ce,0xe061,0x3c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x84a0,0x8ad1,0xbcc4,0xc440,0x94e1,0x46ea,0x15c6,0x784e,0x190,0xd26f,0x630,0x2bee,0x74b1,0x93ce,0xe061,0x3c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8ad184a0,0xc440bcc4,0x46ea94e1,0x784e15c6,0xd26f0190,0x2bee0630,0x93ce74b1,0x3ce061}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8ad184a0,0xc440bcc4,0x46ea94e1,0x784e15c6,0xd26f0190,0x2bee0630,0x93ce74b1,0x3ce061}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc440bcc48ad184a0,0x784e15c646ea94e1,0x2bee0630d26f0190,0x3ce06193ce74b1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc440bcc48ad184a0,0x784e15c646ea94e1,0x2bee0630d26f0190,0x3ce06193ce74b1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e2b,0xdafe,0xfa45,0xa69b,0xb77e,0xf670,0x927d,0xa0f9,0xccb5,0xc897,0x9607,0x5f22,0x47bf,0x867,0xf781,0xd9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e2b,0xdafe,0xfa45,0xa69b,0xb77e,0xf670,0x927d,0xa0f9,0xccb5,0xc897,0x9607,0x5f22,0x47bf,0x867,0xf781,0xd9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdafe1e2b,0xa69bfa45,0xf670b77e,0xa0f9927d,0xc897ccb5,0x5f229607,0x86747bf,0xd9f781}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdafe1e2b,0xa69bfa45,0xf670b77e,0xa0f9927d,0xc897ccb5,0x5f229607,0x86747bf,0xd9f781}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa69bfa45dafe1e2b,0xa0f9927df670b77e,0x5f229607c897ccb5,0xd9f781086747bf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa69bfa45dafe1e2b,0xa0f9927df670b77e,0x5f229607c897ccb5,0xd9f781086747bf}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2aa2,0xbd3f,0x2ad,0x19bd,0xe6f0,0x3b95,0x3fff,0xd17e,0xf3a6,0x7888,0xda46,0x3b21,0xcc57,0x5301,0x3e50,0xc4}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2aa2,0xbd3f,0x2ad,0x19bd,0xe6f0,0x3b95,0x3fff,0xd17e,0xf3a6,0x7888,0xda46,0x3b21,0xcc57,0x5301,0x3e50,0xc4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbd3f2aa2,0x19bd02ad,0x3b95e6f0,0xd17e3fff,0x7888f3a6,0x3b21da46,0x5301cc57,0xc43e50}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbd3f2aa2,0x19bd02ad,0x3b95e6f0,0xd17e3fff,0x7888f3a6,0x3b21da46,0x5301cc57,0xc43e50}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19bd02adbd3f2aa2,0xd17e3fff3b95e6f0,0x3b21da467888f3a6,0xc43e505301cc57}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x19bd02adbd3f2aa2,0xd17e3fff3b95e6f0,0x3b21da467888f3a6,0xc43e505301cc57}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b60,0x752e,0x433b,0x3bbf,0x6b1e,0xb915,0xea39,0x87b1,0xfe6f,0x2d90,0xf9cf,0xd411,0x8b4e,0x6c31,0x1f9e,0xc3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7b60,0x752e,0x433b,0x3bbf,0x6b1e,0xb915,0xea39,0x87b1,0xfe6f,0x2d90,0xf9cf,0xd411,0x8b4e,0x6c31,0x1f9e,0xc3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x752e7b60,0x3bbf433b,0xb9156b1e,0x87b1ea39,0x2d90fe6f,0xd411f9cf,0x6c318b4e,0xc31f9e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x752e7b60,0x3bbf433b,0xb9156b1e,0x87b1ea39,0x2d90fe6f,0xd411f9cf,0x6c318b4e,0xc31f9e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3bbf433b752e7b60,0x87b1ea39b9156b1e,0xd411f9cf2d90fe6f,0xc31f9e6c318b4e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3bbf433b752e7b60,0x87b1ea39b9156b1e,0xd411f9cf2d90fe6f,0xc31f9e6c318b4e}}}} #endif -}}}}; +}}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/finit.c index b3808edf07..c9a3687282 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/finit.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/finit.c @@ -29,29 +29,29 @@ quat_alg_elem_finalize(quat_alg_elem_t *elem) void ibz_vec_2_init(ibz_vec_2_t *vec) { - ibz_init(&((*vec)[0])); - ibz_init(&((*vec)[1])); + ibz_init(&(vec->v[0])); + ibz_init(&(vec->v[1])); } void ibz_vec_2_finalize(ibz_vec_2_t *vec) { - ibz_finalize(&((*vec)[0])); - ibz_finalize(&((*vec)[1])); + ibz_finalize(&(vec->v[0])); + ibz_finalize(&(vec->v[1])); } void ibz_vec_4_init(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_init(&(*vec)[i]); + ibz_init(&vec->v[i]); } } void ibz_vec_4_finalize(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_finalize(&(*vec)[i]); + ibz_finalize(&vec->v[i]); } } @@ -60,7 +60,7 @@ ibz_mat_2x2_init(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -69,7 +69,7 @@ ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } @@ -79,7 +79,7 @@ ibz_mat_4x4_init(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -88,7 +88,7 @@ ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c index 511a0a5d38..5edff425c8 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hnf.c @@ -14,21 +14,21 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) for (int i = 0; i < 4; i++) { // upper triangular for (int j = 0; j < i; j++) { - res = res && ibz_is_zero(&((*mat)[i][j])); + res = res && ibz_is_zero(&(mat->m[i][j])); } // find first non 0 element of line found = 0; for (int j = i; j < 4; j++) { if (found) { // all values are positive, and first non-0 is the largest of that line - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); - res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&(mat->m[i][ind]), &(mat->m[i][j])) > 0); } else { - if (!ibz_is_zero(&((*mat)[i][j]))) { + if (!ibz_is_zero(&(mat->m[i][j]))) { found = 1; ind = j; // mustbe non-negative - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) > 0); } } } @@ -37,7 +37,7 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) int linestart = -1; int i = 0; for (int j = 0; j < 4; j++) { - while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + while ((i < 4) && (ibz_is_zero(&(mat->m[i][j])))) { i = i + 1; } if (i != 4) { @@ -66,13 +66,13 @@ ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); - ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); + ibz_centered_mod(&(sums.v[i]), &(sums.v[i]), &m); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_finalize(&m); @@ -86,7 +86,7 @@ ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + ibz_centered_mod(&(res->v[i]), &(vec->v[i]), &m); } ibz_finalize(&m); } @@ -101,8 +101,8 @@ ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4 ibz_copy(&s, scalar); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); - ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + ibz_mul(&(prod->v[i]), &(vec->v[i]), &s); + ibz_mod(&(prod->v[i]), &(prod->v[i]), &m); } ibz_finalize(&m); ibz_finalize(&s); @@ -138,36 +138,36 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec if (h < 4) ibz_vec_4_init(&(w[h])); ibz_vec_4_init(&(a[h])); - ibz_copy(&(a[h][0]), &(generators[h][0])); - ibz_copy(&(a[h][1]), &(generators[h][1])); - ibz_copy(&(a[h][2]), &(generators[h][2])); - ibz_copy(&(a[h][3]), &(generators[h][3])); + ibz_copy(&(a[h].v[0]), &(generators[h].v[0])); + ibz_copy(&(a[h].v[1]), &(generators[h].v[1])); + ibz_copy(&(a[h].v[2]), &(generators[h].v[2])); + ibz_copy(&(a[h].v[3]), &(generators[h].v[3])); } assert(ibz_cmp(mod, &ibz_const_zero) > 0); ibz_copy(&m, mod); while (i != -1) { while (j != 0) { j = j - 1; - if (!ibz_is_zero(&(a[j][i]))) { + if (!ibz_is_zero(&(a[j].v[i]))) { // assumtion that ibz_xgcd outputs u,v which are small in absolute // value is needed here also, needs u non 0, but v can be 0 if needed - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &(a[j].v[i])); ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); - ibz_div(&coeff_1, &r, &(a[k][i]), &d); - ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_div(&coeff_1, &r, &(a[k].v[i]), &d); + ibz_div(&coeff_2, &r, &(a[j].v[i]), &d); ibz_neg(&coeff_2, &coeff_2); ibz_vec_4_linear_combination_mod( &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy } } - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &m); ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult - if (ibz_is_zero(&(w[i][i]))) { - ibz_copy(&(w[i][i]), &m); + if (ibz_is_zero(&(w[i].v[i]))) { + ibz_copy(&(w[i].v[i]), &m); } for (int h = i + 1; h < 4; h++) { - ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_div_floor(&q, &r, &(w[h].v[i]), &(w[i].v[i])); ibz_neg(&q, &q); ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); } @@ -177,8 +177,8 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec k = k - 1; i = i - 1; j = k; - if (ibz_is_zero(&(a[k][i]))) - ibz_copy(&(a[k][i]), &m); + if (ibz_is_zero(&(a[k].v[i]))) + ibz_copy(&(a[k].v[i]), &m); } else { k = k - 1; @@ -188,7 +188,7 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec } for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { - ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + ibz_copy(&((hnf->m)[i][j]), &(w[j].v[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c index 0fd35b5c65..f630f5a9fe 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ibz_division.c @@ -8,5 +8,5 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { - mpz_gcdext(*gcd, *u, *v, *a, *b); + mpz_gcdext(gcd->i, u->i, v->i, a->i, b->i); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.c index 0743974345..1be9d87e71 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/id2iso.c @@ -18,8 +18,8 @@ ec_biscalar_mul_ibz_vec(ec_point_t *res, const ec_curve_t *curve) { digit_t scalars[2][NWORDS_ORDER]; - ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); - ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ibz_to_digit_array(scalars[0], &scalar_vec->v[0]); + ibz_to_digit_array(scalars[1], &scalar_vec->v[1]); ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); } @@ -48,14 +48,14 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid quat_change_to_O0_basis(&coeffs, &alpha); for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); } } @@ -67,16 +67,16 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid { const ibz_t *const norm = &lideal->norm; - ibz_mod(&(*vec)[0], &mat[0][0], norm); - ibz_mod(&(*vec)[1], &mat[1][0], norm); - ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + ibz_mod(&vec->v[0], &mat.m[0][0], norm); + ibz_mod(&vec->v[1], &mat.m[1][0], norm); + ibz_gcd(&tmp, &vec->v[0], &vec->v[1]); if (ibz_is_even(&tmp)) { - ibz_mod(&(*vec)[0], &mat[0][1], norm); - ibz_mod(&(*vec)[1], &mat[1][1], norm); + ibz_mod(&vec->v[0], &mat.m[0][1], norm); + ibz_mod(&vec->v[1], &mat.m[1][1], norm); } #ifndef NDEBUG - ibz_gcd(&tmp, &(*vec)[0], norm); - ibz_gcd(&tmp, &(*vec)[1], &tmp); + ibz_gcd(&tmp, &vec->v[0], norm); + ibz_gcd(&tmp, &vec->v[1], &tmp); assert(!ibz_cmp(&tmp, &ibz_const_one)); #endif } @@ -102,28 +102,28 @@ matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_ copy_basis(&tmp_bas, bas); // reduction mod 2f - ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); - ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); - ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); - ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + ibz_mod(&mat->m[0][0], &mat->m[0][0], &pow_two); + ibz_mod(&mat->m[0][1], &mat->m[0][1], &pow_two); + ibz_mod(&mat->m[1][0], &mat->m[1][0], &pow_two); + ibz_mod(&mat->m[1][1], &mat->m[1][1], &pow_two); // For a matrix [[a, c], [b, d]] we compute: // // first basis element R = [a]P + [b]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][0]); - ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ibz_to_digit_array(scalars[0], &mat->m[0][0]); + ibz_to_digit_array(scalars[1], &mat->m[1][0]); ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); // second basis element S = [c]P + [d]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][1]); - ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ibz_to_digit_array(scalars[0], &mat->m[0][1]); + ibz_to_digit_array(scalars[1], &mat->m[1][1]); ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); // Their difference R - S = [a - c]P + [b - d]Q - ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_sub(&tmp, &mat->m[0][0], &mat->m[0][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[0], &tmp); - ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_sub(&tmp, &mat->m[1][0], &mat->m[1][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[1], &tmp); ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); @@ -157,23 +157,23 @@ endomorphism_application_even_basis(ec_basis_t *bas, quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); assert(ibz_is_odd(&content)); - ibz_set(&mat[0][0], 0); - ibz_set(&mat[0][1], 0); - ibz_set(&mat[1][0], 0); - ibz_set(&mat[1][1], 0); + ibz_set(&mat.m[0][0], 0); + ibz_set(&mat.m[0][1], 0); + ibz_set(&mat.m[1][0], 0); + ibz_set(&mat.m[1][1], 0); // computing the matrix for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&mat[i][j], &mat[i][j], &content); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&mat.m[i][j], &mat.m[i][j], &content); } } @@ -215,19 +215,19 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * ibz_mat_2x2_t mat; ibz_mat_2x2_init(&mat); - ibz_copy(&mat[0][0], &(*vec2)[0]); - ibz_copy(&mat[1][0], &(*vec2)[1]); + ibz_copy(&mat.m[0][0], &vec2->v[0]); + ibz_copy(&mat.m[1][0], &vec2->v[1]); ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); - ibz_copy(&mat[0][1], &vec[0]); - ibz_copy(&mat[1][1], &vec[1]); + ibz_copy(&mat.m[0][1], &vec.v[0]); + ibz_copy(&mat.m[1][1], &vec.v[1]); ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); - ibz_add(&mat[0][1], &mat[0][1], &vec[0]); - ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + ibz_add(&mat.m[0][1], &mat.m[0][1], &vec.v[0]); + ibz_add(&mat.m[1][1], &mat.m[1][1], &vec.v[1]); - ibz_mod(&mat[0][1], &mat[0][1], &two_pow); - ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + ibz_mod(&mat.m[0][1], &mat.m[0][1], &two_pow); + ibz_mod(&mat.m[1][1], &mat.m[1][1], &two_pow); ibz_mat_2x2_t inv; ibz_mat_2x2_init(&inv); @@ -247,11 +247,11 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * quat_alg_elem_t gen; quat_alg_elem_init(&gen); ibz_set(&gen.denom, 2); - ibz_add(&gen.coord[0], &vec[0], &vec[0]); - ibz_set(&gen.coord[1], -2); - ibz_add(&gen.coord[2], &vec[1], &vec[1]); - ibz_copy(&gen.coord[3], &vec[1]); - ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_add(&gen.coord.v[0], &vec.v[0], &vec.v[0]); + ibz_set(&gen.coord.v[1], -2); + ibz_add(&gen.coord.v[2], &vec.v[1], &vec.v[1]); + ibz_copy(&gen.coord.v[3], &vec.v[1]); + ibz_add(&gen.coord.v[0], &gen.coord.v[0], &vec.v[1]); ibz_vec_2_finalize(&vec); quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); @@ -319,10 +319,10 @@ _change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, #endif // Copy the results into the matrix - ibz_copy_digit_array(&((*mat)[0][0]), x1); - ibz_copy_digit_array(&((*mat)[1][0]), x2); - ibz_copy_digit_array(&((*mat)[0][1]), x3); - ibz_copy_digit_array(&((*mat)[1][1]), x4); + ibz_copy_digit_array(&(mat->m[0][0]), x1); + ibz_copy_digit_array(&(mat->m[1][0]), x2); + ibz_copy_digit_array(&(mat->m[0][1]), x3); + ibz_copy_digit_array(&(mat->m[1][1]), x4); } void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ideal.c index 9cf863a104..8634143941 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ideal.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ideal.c @@ -33,7 +33,7 @@ quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) ibz_copy(©->lattice.denom, &copied->lattice.denom); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + ibz_copy(©->lattice.basis.m[i][j], &copied->lattice.basis.m[i][j]); } } } @@ -248,13 +248,13 @@ quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + ibz_div(&G->m[i][j], &rmd, &G->m[i][j], &divisor); assert(ibz_is_zero(&rmd)); } } for (int i = 0; i < 4; i++) { for (int j = 0; j <= i - 1; j++) { - ibz_copy(&(*G)[j][i], &(*G)[i][j]); + ibz_copy(&G->m[j][i], &G->m[i][j]); } } @@ -289,8 +289,8 @@ quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg ibz_mat_4x4_transpose(&transposed, &(order->basis)); // multiply gram matrix by 2 because of reduced trace ibz_mat_4x4_identity(&norm); - ibz_copy(&(norm[2][2]), &(alg->p)); - ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_copy(&(norm.m[2][2]), &(alg->p)); + ibz_copy(&(norm.m[3][3]), &(alg->p)); ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); ibz_mat_4x4_mul(&prod, &transposed, &norm); ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.c index b0462dc8b5..e219bf3d96 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.c @@ -114,48 +114,48 @@ DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_ * @{ */ -const __mpz_struct ibz_const_zero[1] = { +const ibz_t ibz_const_zero = {{ { ._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]){ 0 }, } -}; +}}; -const __mpz_struct ibz_const_one[1] = { +const ibz_t ibz_const_one = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 1 }, } -}; +}}; -const __mpz_struct ibz_const_two[1] = { +const ibz_t ibz_const_two = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 2 }, } -}; +}}; -const __mpz_struct ibz_const_three[1] = { +const ibz_t ibz_const_three = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 3 }, } -}; +}}; void ibz_init(ibz_t *x) { - mpz_init(*x); + mpz_init(x->i); } void ibz_finalize(ibz_t *x) { - mpz_clear(*x); + mpz_clear(x->i); } void @@ -168,7 +168,7 @@ ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_add(*sum, *a, *b); + mpz_add(sum->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -186,7 +186,7 @@ ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_sub(*diff, *a, *b); + mpz_sub(diff->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); @@ -205,7 +205,7 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_mul(*prod, *a, *b); + mpz_mul(prod->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -216,13 +216,13 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) void ibz_neg(ibz_t *neg, const ibz_t *a) { - mpz_neg(*neg, *a); + mpz_neg(neg->i, a->i); } void ibz_abs(ibz_t *abs, const ibz_t *a) { - mpz_abs(*abs, *a); + mpz_abs(abs->i, a->i); } void @@ -235,7 +235,7 @@ ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_tdiv_qr(*quotient, *remainder, *a, *b); + mpz_tdiv_qr(quotient->i, remainder->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -251,7 +251,7 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) ibz_init(&a_cp); ibz_copy(&a_cp, a); #endif - mpz_tdiv_q_2exp(*quotient, *a, exp); + mpz_tdiv_q_2exp(quotient->i, a->i, exp); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); ibz_finalize(&a_cp); @@ -261,50 +261,50 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) { - mpz_fdiv_qr(*q, *r, *n, *d); + mpz_fdiv_qr(q->i, r->i, n->i, d->i); } void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) { - mpz_mod(*r, *a, *b); + mpz_mod(r->i, a->i, b->i); } unsigned long int -ibz_mod_ui(const mpz_t *n, unsigned long int d) +ibz_mod_ui(const ibz_t *n, unsigned long int d) { - return mpz_fdiv_ui(*n, d); + return mpz_fdiv_ui(n->i, d); } int ibz_divides(const ibz_t *a, const ibz_t *b) { - return mpz_divisible_p(*a, *b); + return mpz_divisible_p(a->i, b->i); } void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) { - mpz_pow_ui(*pow, *x, e); + mpz_pow_ui(pow->i, x->i, e); } void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) { - mpz_powm(*pow, *x, *e, *m); + mpz_powm(pow->i, x->i, e->i, m->i); DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); } int ibz_two_adic(ibz_t *pow) { - return mpz_scan1(*pow, 0); + return mpz_scan1(pow->i, 0); } int ibz_cmp(const ibz_t *a, const ibz_t *b) { - int ret = mpz_cmp(*a, *b); + int ret = mpz_cmp(a->i, b->i); DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); return ret; } @@ -312,7 +312,7 @@ ibz_cmp(const ibz_t *a, const ibz_t *b) int ibz_is_zero(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 0); + int ret = !mpz_cmp_ui(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); return ret; } @@ -320,7 +320,7 @@ ibz_is_zero(const ibz_t *x) int ibz_is_one(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 1); + int ret = !mpz_cmp_ui(x->i, 1); DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); return ret; } @@ -328,7 +328,7 @@ ibz_is_one(const ibz_t *x) int ibz_cmp_int32(const ibz_t *x, int32_t y) { - int ret = mpz_cmp_si(*x, (signed long int)y); + int ret = mpz_cmp_si(x->i, (signed long int)y); DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); return ret; } @@ -336,7 +336,7 @@ ibz_cmp_int32(const ibz_t *x, int32_t y) int ibz_is_even(const ibz_t *x) { - int ret = !mpz_tstbit(*x, 0); + int ret = !mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); return ret; } @@ -344,7 +344,7 @@ ibz_is_even(const ibz_t *x) int ibz_is_odd(const ibz_t *x) { - int ret = mpz_tstbit(*x, 0); + int ret = mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); return ret; } @@ -352,7 +352,7 @@ ibz_is_odd(const ibz_t *x) void ibz_set(ibz_t *i, int32_t x) { - mpz_set_si(*i, x); + mpz_set_si(i->i, x); } int @@ -361,7 +361,7 @@ ibz_convert_to_str(const ibz_t *i, char *str, int base) if (!str || (base != 10 && base != 16)) return 0; - mpz_get_str(str, base, *i); + mpz_get_str(str, base, i->i); return 1; } @@ -380,29 +380,29 @@ ibz_print(const ibz_t *num, int base) int ibz_set_from_str(ibz_t *i, const char *str, int base) { - return (1 + mpz_set_str(*i, str, base)); + return (1 + mpz_set_str(i->i, str, base)); } void ibz_copy(ibz_t *target, const ibz_t *value) { - mpz_set(*target, *value); + mpz_set(target->i, value->i); } void ibz_swap(ibz_t *a, ibz_t *b) { - mpz_swap(*a, *b); + mpz_swap(a->i, b->i); } int32_t ibz_get(const ibz_t *i) { #if LONG_MAX == INT32_MAX - return (int32_t)mpz_get_si(*i); + return (int32_t)mpz_get_si(i->i); #elif LONG_MAX > INT32_MAX // Extracts the sign bit and the 31 least significant bits - signed long int t = mpz_get_si(*i); + signed long int t = mpz_get_si(i->i); return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); #else #error Unsupported configuration: LONG_MAX must be >= INT32_MAX @@ -417,10 +417,10 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) mpz_t tmp; mpz_t bmina; mpz_init(bmina); - mpz_sub(bmina, *b, *a); + mpz_sub(bmina, b->i, a->i); if (mpz_sgn(bmina) == 0) { - mpz_set(*rand, *a); + mpz_set(rand->i, a->i); mpz_clear(bmina); return 1; } @@ -466,7 +466,7 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) break; } while (1); - mpz_add(*rand, tmp, *a); + mpz_add(rand->i, tmp, a->i); err: mpz_clear(bmina); return ret; @@ -534,19 +534,19 @@ int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) { int ret = 1; - mpz_t m_big; + ibz_t m_big; // m_big = 2 * m - mpz_init_set_si(m_big, m); - mpz_add(m_big, m_big, m_big); + mpz_init_set_si(m_big.i, m); + mpz_add(m_big.i, m_big.i, m_big.i); // Sample in [0, 2*m] ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); // Adjust to range [-m, m] - mpz_sub_ui(*rand, *rand, m); + mpz_sub_ui(rand->i, rand->i, m); - mpz_clear(m_big); + mpz_clear(m_big.i); return ret; } @@ -555,41 +555,41 @@ int ibz_rand_interval_bits(ibz_t *rand, uint32_t m) { int ret = 1; - mpz_t tmp; - mpz_t low; - mpz_init_set_ui(tmp, 1); - mpz_mul_2exp(tmp, tmp, m); - mpz_init(low); - mpz_neg(low, tmp); + ibz_t tmp; + ibz_t low; + mpz_init_set_ui(tmp.i, 1); + mpz_mul_2exp(tmp.i, tmp.i, m); + mpz_init(low.i); + mpz_neg(low.i, tmp.i); ret = ibz_rand_interval(rand, &low, &tmp); - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); if (ret != 1) goto err; - mpz_sub_ui(*rand, *rand, (unsigned long int)m); + mpz_sub_ui(rand->i, rand->i, (unsigned long int)m); return ret; err: - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); return ret; } int ibz_bitsize(const ibz_t *a) { - return (int)mpz_sizeinbase(*a, 2); + return (int)mpz_sizeinbase(a->i, 2); } int ibz_size_in_base(const ibz_t *a, int base) { - return (int)mpz_sizeinbase(*a, base); + return (int)mpz_sizeinbase(a->i, base); } void ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) { - mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); + mpz_import(target->i, dig_len, -1, sizeof(digit_t), 0, 0, dig); } void @@ -600,13 +600,13 @@ ibz_to_digits(digit_t *target, const ibz_t *ibz) // The next line ensures zero is written to the first limb of target if ibz is zero; // target is then overwritten by the actual value if it is not. target[0] = 0; - mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, ibz->i); } int ibz_probab_prime(const ibz_t *n, int reps) { - int ret = mpz_probab_prime_p(*n, reps); + int ret = mpz_probab_prime_p(n->i, reps); DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); return ret; } @@ -614,26 +614,26 @@ ibz_probab_prime(const ibz_t *n, int reps) void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) { - mpz_gcd(*gcd, *a, *b); + mpz_gcd(gcd->i, a->i, b->i); } int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) { - return (mpz_invert(*inv, *a, *mod) ? 1 : 0); + return (mpz_invert(inv->i, a->i, mod->i) ? 1 : 0); } int ibz_legendre(const ibz_t *a, const ibz_t *p) { - return mpz_legendre(*a, *p); + return mpz_legendre(a->i, p->i); } int ibz_sqrt(ibz_t *sqrt, const ibz_t *a) { - if (mpz_perfect_square_p(*a)) { - mpz_sqrt(*sqrt, *a); + if (mpz_perfect_square_p(a->i)) { + mpz_sqrt(sqrt->i, a->i); return 1; } else { return 0; @@ -643,7 +643,7 @@ ibz_sqrt(ibz_t *sqrt, const ibz_t *a) void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) { - mpz_sqrt(*sqrt, *a); + mpz_sqrt(sqrt->i, a->i); } int @@ -686,85 +686,85 @@ ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) int ret = 1; - mpz_mod(amod, *a, *p); + mpz_mod(amod, a->i, p->i); if (mpz_cmp_ui(amod, 0) < 0) { - mpz_add(amod, *p, amod); + mpz_add(amod, p->i, amod); } - if (mpz_legendre(amod, *p) != 1) { + if (mpz_legendre(amod, p->i) != 1) { ret = 0; goto end; } - mpz_sub_ui(pm1, *p, 1); + mpz_sub_ui(pm1, p->i, 1); - if (mpz_mod_ui(tmp, *p, 4) == 3) { + if (mpz_mod_ui(tmp, p->i, 4) == 3) { // p % 4 == 3 - mpz_add_ui(tmp, *p, 1); + mpz_add_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(*sqrt, amod, tmp, *p); - } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + mpz_powm(sqrt->i, amod, tmp, p->i); + } else if (mpz_mod_ui(tmp, p->i, 8) == 5) { // p % 8 == 5 - mpz_sub_ui(tmp, *p, 1); + mpz_sub_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + mpz_powm(tmp, amod, tmp, p->i); // a^{(p-1)/4} mod p if (!mpz_cmp_ui(tmp, 1)) { - mpz_add_ui(tmp, *p, 3); + mpz_add_ui(tmp, p->i, 3); mpz_fdiv_q_2exp(tmp, tmp, 3); - mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + mpz_powm(sqrt->i, amod, tmp, p->i); // a^{(p+3)/8} mod p } else { - mpz_sub_ui(tmp, *p, 5); + mpz_sub_ui(tmp, p->i, 5); mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 mpz_mul_2exp(a4, amod, 2); // 4*a - mpz_powm(tmp, a4, tmp, *p); + mpz_powm(tmp, a4, tmp, p->i); mpz_mul_2exp(a2, amod, 1); mpz_mul(tmp, a2, tmp); - mpz_mod(*sqrt, tmp, *p); + mpz_mod(sqrt->i, tmp, p->i); } } else { // p % 8 == 1 -> Shanks-Tonelli int e = 0; - mpz_sub_ui(q, *p, 1); + mpz_sub_ui(q, p->i, 1); while (mpz_tstbit(q, e) == 0) e++; mpz_fdiv_q_2exp(q, q, e); // 1. find generator - non-quadratic residue mpz_set_ui(qnr, 2); - while (mpz_legendre(qnr, *p) != -1) + while (mpz_legendre(qnr, p->i) != -1) mpz_add_ui(qnr, qnr, 1); - mpz_powm(z, qnr, q, *p); + mpz_powm(z, qnr, q, p->i); // 2. Initialize mpz_set(y, z); - mpz_powm(y, amod, q, *p); // y = a^q mod p + mpz_powm(y, amod, q, p->i); // y = a^q mod p mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 mpz_fdiv_q_2exp(tmp, tmp, 1); - mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + mpz_powm(x, amod, tmp, p->i); // x = a^(q + 1)/2 mod p mpz_set_ui(exp, 1); mpz_mul_2exp(exp, exp, e - 2); for (int i = 0; i < e; ++i) { - mpz_powm(b, y, exp, *p); + mpz_powm(b, y, exp, p->i); if (!mpz_cmp(b, pm1)) { mpz_mul(x, x, z); - mpz_mod(x, x, *p); + mpz_mod(x, x, p->i); mpz_mul(y, y, z); mpz_mul(y, y, z); - mpz_mod(y, y, *p); + mpz_mod(y, y, p->i); } - mpz_powm_ui(z, z, 2, *p); + mpz_powm_ui(z, z, 2, p->i); mpz_fdiv_q_2exp(exp, exp, 1); } - mpz_set(*sqrt, x); + mpz_set(sqrt->i, x); } #ifdef DEBUG_VERBOSE diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.h index a0c2c02477..28e478ff7f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/intbig.h @@ -33,7 +33,9 @@ * * For integers of arbitrary size, used by intbig module, using gmp */ -typedef mpz_t ibz_t; +typedef struct { + mpz_t i; +} ibz_t; /** @} */ @@ -129,7 +131,7 @@ int ibz_two_adic(ibz_t *pow); */ void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); -unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); +unsigned long int ibz_mod_ui(const ibz_t *n, unsigned long int d); /** @brief Test if a = 0 mod b */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c index 5491ee44d0..ea32213c75 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c @@ -57,25 +57,25 @@ to_etabar(fp_num *x) } static void -from_mpz(const mpz_t x, fp_num *r) +from_mpz(const ibz_t *x, fp_num *r) { long exp = 0; - r->s = mpz_get_d_2exp(&exp, x); + r->s = mpz_get_d_2exp(&exp, x->i); r->e = exp; } static void -to_mpz(const fp_num *x, mpz_t r) +to_mpz(const fp_num *x, ibz_t *r) { if (x->e >= DBL_MANT_DIG) { double s = x->s * 0x1P53; - mpz_set_d(r, s); - mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + mpz_set_d(r->i, s); + mpz_mul_2exp(r->i, r->i, x->e - DBL_MANT_DIG); } else if (x->e < 0) { - mpz_set_ui(r, 0); + mpz_set_ui(r->i, 0); } else { double s = ldexp(x->s, x->e); - mpz_set_d(r, round(s)); + mpz_set_d(r->i, round(s)); } } @@ -203,7 +203,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) ibz_init(&tmpI); // Main L² loop - from_mpz((*G)[0][0], &r[0][0]); + from_mpz(&G->m[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -213,7 +213,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - from_mpz((*G)[kappa][j], &r[kappa][j]); + from_mpz(&G->m[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { fp_mul(&r[kappa][k], &u[j][k], &tmpF); fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); @@ -229,22 +229,22 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) done = 0; copy(&u[kappa][i], &Xf); fp_round(&Xf); - to_mpz(&Xf, X); + to_mpz(&Xf, &X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { - ibz_mul(&tmpI, &X, &(*basis)[j][i]); - ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + ibz_mul(&tmpI, &X, &basis->m[j][i]); + ibz_sub(&basis->m[j][kappa], &basis->m[j][kappa], &tmpI); } // Update lower half of the Gram matrix // = - 2X + X² = // - X - X( - X·) //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 - ibz_mul(&tmpI, &X, &(*G)[kappa][i]); - ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + ibz_mul(&tmpI, &X, &G->m[kappa][i]); + ibz_sub(&G->m[kappa][kappa], &G->m[kappa][kappa], &tmpI); for (int j = 0; j < 4; j++) { // works because i < κ // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 - ibz_mul(&tmpI, &X, SYM((*G), i, j)); - ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + ibz_mul(&tmpI, &X, SYM(G->m, i, j)); + ibz_sub(SYM(G->m, kappa, j), SYM(G->m, kappa, j), &tmpI); } // After the loop: //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, @@ -261,7 +261,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - from_mpz((*G)[kappa][kappa], &lovasz[0]); + from_mpz(&G->m[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); @@ -279,11 +279,11 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Insert b_κ before b_swap in the basis and in the lower half Gram matrix for (int j = kappa; j > swap; j--) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + ibz_swap(&basis->m[i][j], &basis->m[i][j - 1]); if (i == j - 1) - ibz_swap(&(*G)[i][i], &(*G)[j][j]); + ibz_swap(&G->m[i][i], &G->m[j][j]); else if (i != j) - ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + ibz_swap(SYM(G->m, i, j), SYM(G->m, i, j - 1)); } } // Copy row u[κ] and r[κ] in swap position, ignore what follows @@ -318,7 +318,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Fill in the upper half of the Gram matrix for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } // Clearinghouse diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lat_ball.c index c7bbb9682f..3f7476988c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lat_ball.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lat_ball.c @@ -28,10 +28,10 @@ quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_m // Compute the parallelogram's bounds int trivial = 1; for (int i = 0; i < 4; i++) { - ibz_mul(&(*box)[i], &dualG[i][i], radius); - ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); - ibz_sqrt_floor(&(*box)[i], &(*box)[i]); - trivial &= ibz_is_zero(&(*box)[i]); + ibz_mul(&box->v[i], &dualG.m[i][i], radius); + ibz_div(&box->v[i], &rem, &box->v[i], &denom); + ibz_sqrt_floor(&box->v[i], &box->v[i]); + trivial &= ibz_is_zero(&box->v[i]); } // Compute the transpose transformation matrix @@ -95,12 +95,12 @@ quat_lattice_sample_from_ball(quat_alg_elem_t *res, do { // Sample vector for (int i = 0; i < 4; i++) { - if (ibz_is_zero(&box[i])) { - ibz_copy(&x[i], &ibz_const_zero); + if (ibz_is_zero(&box.v[i])) { + ibz_copy(&x.v[i], &ibz_const_zero); } else { - ibz_add(&tmp, &box[i], &box[i]); - ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); - ibz_sub(&x[i], &x[i], &box[i]); + ibz_add(&tmp, &box.v[i], &box.v[i]); + ok &= ibz_rand_interval(&x.v[i], &ibz_const_zero, &tmp); + ibz_sub(&x.v[i], &x.v[i], &box.v[i]); if (!ok) goto err; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lattice.c index c98bae9499..ef7b9ccdcc 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lattice.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lattice.c @@ -57,7 +57,7 @@ quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *l for (int row = 1; row < 4; ++row) { for (int col = 0; col < 4; ++col) { - ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + ibz_neg(&(conj->basis.m[row][col]), &(conj->basis.m[row][col])); } } } @@ -96,14 +96,14 @@ quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(tmp[i][j])); + ibz_copy(&(generators[j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + ibz_copy(&(generators[4 + j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); @@ -151,12 +151,12 @@ quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, ibz_vec_4_init(&p); ibz_vec_4_init(&a); for (int i = 0; i < 4; i++) { - ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + ibz_vec_4_copy_ibz(&a, &(lat->m[0][i]), &(lat->m[1][i]), &(lat->m[2][i]), &(lat->m[3][i])); quat_alg_coord_mul(&p, &a, coord, alg); - ibz_copy(&((*prod)[0][i]), &(p[0])); - ibz_copy(&((*prod)[1][i]), &(p[1])); - ibz_copy(&((*prod)[2][i]), &(p[2])); - ibz_copy(&((*prod)[3][i]), &(p[3])); + ibz_copy(&(prod->m[0][i]), &(p.v[0])); + ibz_copy(&(prod->m[1][i]), &(p.v[1])); + ibz_copy(&(prod->m[2][i]), &(p.v[2])); + ibz_copy(&(prod->m[3][i]), &(p.v[3])); } ibz_vec_4_finalize(&p); ibz_vec_4_finalize(&a); @@ -191,15 +191,15 @@ quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_vec_4_init(&(generators[i])); for (int k = 0; k < 4; k++) { ibz_vec_4_copy_ibz( - &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + &elem1, &(lat1->basis.m[0][k]), &(lat1->basis.m[1][k]), &(lat1->basis.m[2][k]), &(lat1->basis.m[3][k])); for (int i = 0; i < 4; i++) { ibz_vec_4_copy_ibz( - &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + &elem2, &(lat2->basis.m[0][i]), &(lat2->basis.m[1][i]), &(lat2->basis.m[2][i]), &(lat2->basis.m[3][i])); quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); for (int j = 0; j < 4; j++) { if (k == 0) - ibz_copy(&(detmat[i][j]), &(elem_res[j])); - ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + ibz_copy(&(detmat.m[i][j]), &(elem_res.v[j])); + ibz_copy(&(generators[4 * k + i].v[j]), &(elem_res.v[j])); } } } @@ -239,7 +239,7 @@ quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_ // copy result if (divisible && (coord != NULL)) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*coord)[i]), &(work_coord[i])); + ibz_copy(&(coord->v[i]), &(work_coord.v[i])); } } ibz_finalize(&prod); @@ -292,7 +292,7 @@ quat_lattice_hnf(quat_lattice_t *lat) ibz_vec_4_init(&(generators[i])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + ibz_copy(&(generators[j].v[i]), &(lat->basis.m[i][j])); } } ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); @@ -309,19 +309,19 @@ quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_al ibz_init(&tmp); for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_set(&(*G)[i][j], 0); + ibz_set(&G->m[i][j], 0); for (int k = 0; k < 4; k++) { - ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + ibz_mul(&tmp, &(lattice->basis.m)[k][i], &(lattice->basis.m)[k][j]); if (k >= 2) ibz_mul(&tmp, &tmp, &alg->p); - ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + ibz_add(&G->m[i][j], &G->m[i][j], &tmp); } - ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + ibz_mul(&G->m[i][j], &G->m[i][j], &ibz_const_two); } } for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) { - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } } ibz_finalize(&tmp); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_applications.c index 6c763b8c04..f5e9af922b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_applications.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_applications.c @@ -17,9 +17,9 @@ quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, quat_lll_core(gram, reduced); ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); for (int i = 0; i < 4; i++) { - ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + ibz_div_2exp(&(gram->m[i][i]), &(gram->m[i][i]), 1); for (int j = i + 1; j < 4; j++) { - ibz_set(&((*gram)[i][j]), 0); + ibz_set(&(gram->m[i][j]), 0); } } ibz_finalize(&gram_corrector); @@ -79,10 +79,10 @@ quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, while (!found && ctr < equiv_num_iter) { ctr++; // we select our linear combination at random - ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[3], equiv_bound_coeff); // computation of the norm of the vector sampled quat_qf_eval(&tmp, &gram, &new_alpha.coord); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/normeq.c index 8c133dd095..aadbbe06c7 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/normeq.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/normeq.c @@ -13,23 +13,23 @@ quat_lattice_O0_set(quat_lattice_t *O0) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(O0->basis[i][j]), 0); + ibz_set(&(O0->basis.m[i][j]), 0); } } ibz_set(&(O0->denom), 2); - ibz_set(&(O0->basis[0][0]), 2); - ibz_set(&(O0->basis[1][1]), 2); - ibz_set(&(O0->basis[2][2]), 1); - ibz_set(&(O0->basis[1][2]), 1); - ibz_set(&(O0->basis[3][3]), 1); - ibz_set(&(O0->basis[0][3]), 1); + ibz_set(&(O0->basis.m[0][0]), 2); + ibz_set(&(O0->basis.m[1][1]), 2); + ibz_set(&(O0->basis.m[2][2]), 1); + ibz_set(&(O0->basis.m[1][2]), 1); + ibz_set(&(O0->basis.m[3][3]), 1); + ibz_set(&(O0->basis.m[0][3]), 1); } void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) { - ibz_set(&O0->z.coord[1], 1); - ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.coord.v[1], 1); + ibz_set(&O0->t.coord.v[2], 1); ibz_set(&O0->z.denom, 1); ibz_set(&O0->t.denom, 1); O0->q = 1; @@ -50,24 +50,24 @@ quat_order_elem_create(quat_alg_elem_t *elem, quat_alg_elem_init(&quat_temp); // elem = x - quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + quat_alg_scalar(elem, &coeffs->v[0], &ibz_const_one); // quat_temp = i*y - quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_scalar(&quat_temp, &(coeffs->v[1]), &ibz_const_one); quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); // elem = x + i*y quat_alg_add(elem, elem, &quat_temp); // quat_temp = z * j - quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[2], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); // elem = x + i* + z*j quat_alg_add(elem, elem, &quat_temp); // quat_temp = t * j * i - quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[3], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); @@ -143,11 +143,11 @@ quat_represent_integer(quat_alg_elem_t *gamma, ibz_sub(&counter, &counter, &ibz_const_one); // we start by sampling the first coordinate - ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + ibz_rand_interval(&coeffs.v[2], &ibz_const_one, &bound); // then, we sample the second coordinate // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) - ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&cornacchia_target, &coeffs.v[2], &coeffs.v[2]); ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); ibz_sub(&temp, &adjusted_n_gamma, &temp); ibz_mul(&sq_bound, &q, &(params->algebra->p)); @@ -158,10 +158,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, continue; } // sampling the second value - ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + ibz_rand_interval(&coeffs.v[3], &ibz_const_one, &temp); // compute cornacchia_target = n_gamma - p * (z² + q*t²) - ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &coeffs.v[3], &coeffs.v[3]); ibz_mul(&temp, &q, &temp); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); @@ -170,7 +170,7 @@ quat_represent_integer(quat_alg_elem_t *gamma, // applying cornacchia if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) - found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + found = ibz_cornacchia_prime(&(coeffs.v[0]), &(coeffs.v[1]), &q, &cornacchia_target); else found = 0; @@ -179,33 +179,33 @@ quat_represent_integer(quat_alg_elem_t *gamma, // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 // we must have x = t mod 2 and y = z mod 2 // if q=1 we can simply swap x and y - if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { - ibz_swap(&coeffs[1], &coeffs[0]); + if (ibz_is_odd(&coeffs.v[0]) != ibz_is_odd(&coeffs.v[3])) { + ibz_swap(&coeffs.v[1], &coeffs.v[0]); } // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the // resulting endomorphism will behave well for dim 2 computations - found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && - ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + found = found && ((ibz_get(&coeffs.v[0]) - ibz_get(&coeffs.v[3])) % 4 == 2) && + ((ibz_get(&coeffs.v[1]) - ibz_get(&coeffs.v[2])) % 4 == 2); } if (found) { #ifndef NDEBUG ibz_set(&temp, (params->order->q)); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&test, &(coeffs.v[0]), &(coeffs.v[0])); ibz_add(&temp, &temp, &test); assert(0 == ibz_cmp(&temp, &cornacchia_target)); - ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &(coeffs.v[3]), &(coeffs.v[3])); ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); - ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_mul(&temp, &(coeffs.v[1]), &(coeffs.v[1])); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_set(&temp, (params->order->q)); ibz_mul(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_mul(&temp, &(coeffs.v[0]), &coeffs.v[0]); ibz_add(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &(coeffs.v[2]), &coeffs.v[2]); ibz_mul(&temp, &temp, &(params->algebra->p)); ibz_add(&cornacchia_target, &cornacchia_target, &temp); assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); @@ -213,8 +213,8 @@ quat_represent_integer(quat_alg_elem_t *gamma, // translate x,y,z,t into the quaternion element gamma quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); #ifndef NDEBUG - quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); - assert(ibz_is_one(&(coeffs[0]))); + quat_alg_norm(&temp, &(coeffs.v[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs.v[0]))); assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); #endif @@ -232,10 +232,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, if (found) { // new gamma ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); - ibz_copy(&gamma->coord[0], &coeffs[0]); - ibz_copy(&gamma->coord[1], &coeffs[1]); - ibz_copy(&gamma->coord[2], &coeffs[2]); - ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->coord.v[0], &coeffs.v[0]); + ibz_copy(&gamma->coord.v[1], &coeffs.v[1]); + ibz_copy(&gamma->coord.v[2], &coeffs.v[2]); + ibz_copy(&gamma->coord.v[3], &coeffs.v[3]); ibz_copy(&gamma->denom, &(((params->order)->order).denom)); } // var finalize @@ -279,10 +279,10 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, // we find a quaternion element of norm divisible by norm while (!found) { // generating a trace-zero element at random - ibz_set(&gen.coord[0], 0); + ibz_set(&gen.coord.v[0], 0); ibz_sub(&n_temp, norm, &ibz_const_one); for (int i = 1; i < 4; i++) - ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + ibz_rand_interval(&gen.coord.v[i], &ibz_const_zero, &n_temp); // first, we compute the norm of the gen quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); @@ -293,7 +293,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, ibz_mod(&disc, &disc, norm); // now we check that -n is a square mod norm // and if the square root exists we compute it - found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = ibz_sqrt_mod_p(&gen.coord.v[0], &disc, norm); found = found && !quat_alg_elem_is_zero(&gen); } } else { @@ -319,7 +319,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, found = 0; while (!found) { for (int i = 0; i < 4; i++) { - ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + ibz_rand_interval(&gen_rerand.coord.v[i], &ibz_const_one, norm); } quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); assert(ibz_is_one(&norm_d)); @@ -348,22 +348,22 @@ quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) { ibz_t tmp; ibz_init(&tmp); - ibz_copy(&(*vec)[2], &el->coord[2]); - ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) - ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) - ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); - ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); - ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); - - assert(ibz_divides(&(*vec)[0], &el->denom)); - assert(ibz_divides(&(*vec)[1], &el->denom)); - assert(ibz_divides(&(*vec)[2], &el->denom)); - assert(ibz_divides(&(*vec)[3], &el->denom)); - - ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); - ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); - ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); - ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + ibz_copy(&vec->v[2], &el->coord.v[2]); + ibz_add(&vec->v[2], &vec->v[2], &vec->v[2]); // double (not optimal if el->denom is even...) + ibz_copy(&vec->v[3], &el->coord.v[3]); // double (not optimal if el->denom is even...) + ibz_add(&vec->v[3], &vec->v[3], &vec->v[3]); + ibz_sub(&vec->v[0], &el->coord.v[0], &el->coord.v[3]); + ibz_sub(&vec->v[1], &el->coord.v[1], &el->coord.v[2]); + + assert(ibz_divides(&vec->v[0], &el->denom)); + assert(ibz_divides(&vec->v[1], &el->denom)); + assert(ibz_divides(&vec->v[2], &el->denom)); + assert(ibz_divides(&vec->v[3], &el->denom)); + + ibz_div(&vec->v[0], &tmp, &vec->v[0], &el->denom); + ibz_div(&vec->v[1], &tmp, &vec->v[1], &el->denom); + ibz_div(&vec->v[2], &tmp, &vec->v[2], &el->denom); + ibz_div(&vec->v[3], &tmp, &vec->v[3], &el->denom); ibz_finalize(&tmp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/printer.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/printer.c index 6d6a3ca9b7..7702fb7ca4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/printer.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/printer.c @@ -7,7 +7,7 @@ ibz_mat_2x2_print(const ibz_mat_2x2_t *mat) printf("matrix: "); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_print(&((*mat)[i][j]), 10); + ibz_print(&(mat->m[i][j]), 10); printf(" "); } printf("\n "); @@ -21,7 +21,7 @@ ibz_mat_4x4_print(const ibz_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j]), 10); + ibz_print(&(mat->m[i][j]), 10); printf(" "); } printf("\n "); @@ -34,7 +34,7 @@ ibz_vec_2_print(const ibz_vec_2_t *vec) { printf("vector: "); for (int i = 0; i < 2; i++) { - ibz_print(&((*vec)[i]), 10); + ibz_print(&(vec->v[i]), 10); printf(" "); } printf("\n\n"); @@ -45,7 +45,7 @@ ibz_vec_4_print(const ibz_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i]), 10); + ibz_print(&(vec->v[i]), 10); printf(" "); } printf("\n\n"); @@ -61,7 +61,7 @@ quat_lattice_print(const quat_lattice_t *lat) printf("basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lat->basis)[i][j]), 10); + ibz_print(&((lat->basis.m)[i][j]), 10); printf(" "); } printf("\n "); @@ -85,7 +85,7 @@ quat_alg_elem_print(const quat_alg_elem_t *elem) printf("\n"); printf("coordinates: "); for (int i = 0; i < 4; i++) { - ibz_print(&((elem->coord)[i]), 10); + ibz_print(&((elem->coord.v)[i]), 10); printf(" "); } printf("\n\n"); @@ -104,7 +104,7 @@ quat_left_ideal_print(const quat_left_ideal_t *lideal) printf("basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lideal->lattice.basis)[i][j]), 10); + ibz_print(&((lideal->lattice.basis.m)[i][j]), 10); printf(" "); } if (i != 3) { @@ -120,7 +120,7 @@ quat_left_ideal_print(const quat_left_ideal_t *lideal) printf("parent order basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lideal->parent_order->basis)[i][j]), 10); + ibz_print(&((lideal->parent_order->basis.m)[i][j]), 10); printf(" "); } printf("\n "); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion.h index a567657464..2dd70a8c19 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion.h @@ -25,7 +25,9 @@ * * @typedef ibz_vec_2_t */ -typedef ibz_t ibz_vec_2_t[2]; +typedef struct { + ibz_t v[2]; +} ibz_vec_2_t; /** @brief Type for vectors of 4 integers * @@ -33,7 +35,9 @@ typedef ibz_t ibz_vec_2_t[2]; * * Represented as a vector of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_vec_4_t[4]; +typedef struct { + ibz_t v[4]; +} ibz_vec_4_t; /** @brief Type for 2 by 2 matrices of integers * @@ -41,7 +45,9 @@ typedef ibz_t ibz_vec_4_t[4]; * * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_2x2_t[2][2]; +typedef struct { + ibz_t m[2][2]; +} ibz_mat_2x2_t; /** @brief Type for 4 by 4 matrices of integers * @@ -49,7 +55,9 @@ typedef ibz_t ibz_mat_2x2_t[2][2]; * * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_4x4_t[4][4]; +typedef struct { + ibz_t m[4][4]; +} ibz_mat_4x4_t; /** * @} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.c index baf3da0059..f9de8b4a4e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/quaternion_data.c @@ -4,3173 +4,3173 @@ const ibz_t QUAT_prime_cofactor = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x8000000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x0,0x0,0x0,0x0,0x8000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x800000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x41,0x0,0x0,0x800000000000000}}}} #endif ; const quat_alg_t QUATALG_PINFTY = { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x4ff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x4ff}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x4ffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x4ffffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x4ffffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x4ffffffffffffff}}}} #endif }; const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 1}, {{ +}}}, 1}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800000}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x80000000000000}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x80000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x800}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x8000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x800000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x0,0x1000000000000000}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 5}, {{ +}}}, 5}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3f47,0x7060,0x5e29,0x3e35,0xd950,0x2a1b,0x10ae,0x78dd,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3f47,0x7060,0x5e29,0x3e35,0xd950,0x2a1b,0x10ae,0x78dd,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x70603f47,0x3e355e29,0x2a1bd950,0x78dd10ae,0x0,0x0,0x0,0x2800000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x70603f47,0x3e355e29,0x2a1bd950,0x78dd10ae,0x0,0x0,0x0,0x2800000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3e355e2970603f47,0x78dd10ae2a1bd950,0x0,0x280000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3e355e2970603f47,0x78dd10ae2a1bd950,0x0,0x280000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbc6a,0x3dc2,0x3d32,0xfaf9,0x14dc,0x9b4b,0x3080,0x5c3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3dc2bc6a,0xfaf93d32,0x9b4b14dc,0x5c3a3080}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xfaf93d323dc2bc6a,0x5c3a30809b4b14dc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3fe7,0x28ee,0x26e8,0xb194,0x6d7a,0xaf58,0xe568,0xd6d}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3fe7,0x28ee,0x26e8,0xb194,0x6d7a,0xaf58,0xe568,0xd6d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x28ee3fe7,0xb19426e8,0xaf586d7a,0xd6de568}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x28ee3fe7,0xb19426e8,0xaf586d7a,0xd6de568}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb19426e828ee3fe7,0xd6de568af586d7a}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb19426e828ee3fe7,0xd6de568af586d7a}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x78d4,0x7b85,0x7a64,0xf5f2,0x29b9,0x3696,0x6101,0xb874}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7b8578d4,0xf5f27a64,0x369629b9,0xb8746101}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xf5f27a647b8578d4,0xb8746101369629b9}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7d47,0x6fa4,0x2ad5,0x95ad,0x8a4b,0x49be,0x77e7,0xc898,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x6fa47d47,0x95ad2ad5,0x49be8a4b,0xc89877e7,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x95ad2ad56fa47d47,0xc89877e749be8a4b,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 17}, {{ +}}}, 17}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x954f,0x6bc9,0xca46,0x3d25,0x431b,0x46ed,0x8229,0x4f5,0xe453,0x6eb3,0x4530,0xeb3e,0x5306,0xb3e4,0x306e,0x45}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x954f,0x6bc9,0xca46,0x3d25,0x431b,0x46ed,0x8229,0x4f5,0xe453,0x6eb3,0x4530,0xeb3e,0x5306,0xb3e4,0x306e,0x45}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6bc9954f,0x3d25ca46,0x46ed431b,0x4f58229,0x6eb3e453,0xeb3e4530,0xb3e45306,0x45306e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6bc9954f,0x3d25ca46,0x46ed431b,0x4f58229,0x6eb3e453,0xeb3e4530,0xb3e45306,0x45306e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3d25ca466bc9954f,0x4f5822946ed431b,0xeb3e45306eb3e453,0x45306eb3e45306}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3d25ca466bc9954f,0x4f5822946ed431b,0xeb3e45306eb3e453,0x45306eb3e45306}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7f,0xca3a,0x2454,0xbd31,0xe562,0xcb4c,0x72f0,0x21}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe7f,0xca3a,0x2454,0xbd31,0xe562,0xcb4c,0x72f0,0x21}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xca3a0e7f,0xbd312454,0xcb4ce562,0x2172f0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xca3a0e7f,0xbd312454,0xcb4ce562,0x2172f0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbd312454ca3a0e7f,0x2172f0cb4ce562}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbd312454ca3a0e7f,0x2172f0cb4ce562}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x307a,0x74c8,0x8082,0xb034,0x4e8a,0xc43a,0x399a,0x9ab}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x74c8307a,0xb0348082,0xc43a4e8a,0x9ab399a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xb034808274c8307a,0x9ab399ac43a4e8a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 37}, {{ +}}}, 37}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x3a03,0xc406,0x47c,0xa0a2,0x6dbc,0x1df4,0x796,0x6cee,0xce0c,0xe0c7,0xc7c,0xc7ce,0x7ce0,0xce0c,0xe0c7,0x7c}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x3a03,0xc406,0x47c,0xa0a2,0x6dbc,0x1df4,0x796,0x6cee,0xce0c,0xe0c7,0xc7c,0xc7ce,0x7ce0,0xce0c,0xe0c7,0x7c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xc4063a03,0xa0a2047c,0x1df46dbc,0x6cee0796,0xe0c7ce0c,0xc7ce0c7c,0xce0c7ce0,0x7ce0c7}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xc4063a03,0xa0a2047c,0x1df46dbc,0x6cee0796,0xe0c7ce0c,0xc7ce0c7c,0xce0c7ce0,0x7ce0c7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa0a2047cc4063a03,0x6cee07961df46dbc,0xc7ce0c7ce0c7ce0c,0x7ce0c7ce0c7ce0}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa0a2047cc4063a03,0x6cee07961df46dbc,0xc7ce0c7ce0c7ce0c,0x7ce0c7ce0c7ce0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x188f,0xa1e2,0x2148,0xd9f8,0x2e79,0x1a07,0xe1b2,0xd6}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x188f,0xa1e2,0x2148,0xd9f8,0x2e79,0x1a07,0xe1b2,0xd6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa1e2188f,0xd9f82148,0x1a072e79,0xd6e1b2}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa1e2188f,0xd9f82148,0x1a072e79,0xd6e1b2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xd9f82148a1e2188f,0xd6e1b21a072e79}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xd9f82148a1e2188f,0xd6e1b21a072e79}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdd36,0xda6b,0xa943,0xd17a,0xe307,0x564c,0x4b0c,0x44d4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xda6bdd36,0xd17aa943,0x564ce307,0x44d44b0c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd17aa943da6bdd36,0x44d44b0c564ce307}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 41}, {{ +}}}, 41}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca33,0x3dd0,0x1d92,0x9f0,0x2f81,0xafe9,0xe395,0x83f7,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x27f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca33,0x3dd0,0x1d92,0x9f0,0x2f81,0xafe9,0xe395,0x83f7,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x27f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3dd0ca33,0x9f01d92,0xafe92f81,0x83f7e395,0xfffffffc,0xffffffff,0xffffffff,0x27fffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3dd0ca33,0x9f01d92,0xafe92f81,0x83f7e395,0xfffffffc,0xffffffff,0xffffffff,0x27fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9f01d923dd0ca33,0x83f7e395afe92f81,0xfffffffffffffffc,0x27fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9f01d923dd0ca33,0x83f7e395afe92f81,0xfffffffffffffffc,0x27fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcb52,0x12d,0xa79,0x1c00,0x273e,0xbcac,0x55bf,0xbddf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x12dcb52,0x1c000a79,0xbcac273e,0xbddf55bf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1c000a79012dcb52,0xbddf55bfbcac273e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb73,0xf93c,0x71c0,0x87f5,0x667a,0xcb3c,0xb9cb,0x12fa}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb73,0xf93c,0x71c0,0x87f5,0x667a,0xcb3c,0xb9cb,0x12fa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf93ceb73,0x87f571c0,0xcb3c667a,0x12fab9cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf93ceb73,0x87f571c0,0xcb3c667a,0x12fab9cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x87f571c0f93ceb73,0x12fab9cbcb3c667a}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x87f571c0f93ceb73,0x12fab9cbcb3c667a}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x96a4,0x25b,0x14f2,0x3800,0x4e7c,0x7958,0xab7f,0x7bbe,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25b96a4,0x380014f2,0x79584e7c,0x7bbeab7f,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x380014f2025b96a4,0x7bbeab7f79584e7c,0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x73e3,0x3339,0x19e7,0x4ba1,0x6ebc,0x2702,0xee62,0xdbd0,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x333973e3,0x4ba119e7,0x27026ebc,0xdbd0ee62,0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x4ba119e7333973e3,0xdbd0ee6227026ebc,0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x35}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 53}, {{ +}}}, 53}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf0ab,0x9d3b,0x6ea,0x84ac,0x62e5,0xdde9,0x882b,0xd021,0xffe2,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x13ff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf0ab,0x9d3b,0x6ea,0x84ac,0x62e5,0xdde9,0x882b,0xd021,0xffe2,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x13ff}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d3bf0ab,0x84ac06ea,0xdde962e5,0xd021882b,0xffffffe2,0xffffffff,0xffffffff,0x13ffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d3bf0ab,0x84ac06ea,0xdde962e5,0xd021882b,0xffffffe2,0xffffffff,0xffffffff,0x13ffffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x84ac06ea9d3bf0ab,0xd021882bdde962e5,0xffffffffffffffe2,0x13ffffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x84ac06ea9d3bf0ab,0xd021882bdde962e5,0xffffffffffffffe2,0x13ffffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xbffd,0x2ad7,0xbdcf,0xf15c,0x53d7,0xabc6,0xfbb5,0xe113,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2ad7bffd,0xf15cbdcf,0xabc653d7,0xe113fbb5,0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf15cbdcf2ad7bffd,0xe113fbb5abc653d7,0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f37,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f37,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1f37,0x77013f1,0x56007183,0x9281da31}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1f37,0x77013f1,0x56007183,0x9281da31}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1f37,0x9281da3156007183}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1f37,0x9281da3156007183}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x7ffa,0x55af,0x7b9e,0xe2b9,0xa7af,0x578c,0xf76b,0xc227,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x55af7ffa,0xe2b97b9e,0x578ca7af,0xc227f76b,0xf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe2b97b9e55af7ffa,0xc227f76b578ca7af,0xf}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xd16,0xf02b,0x1ce7,0xa2ef,0x54b,0x2c56,0x5963,0x667,0x6f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xf02b0d16,0xa2ef1ce7,0x2c56054b,0x6675963,0x6f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa2ef1ce7f02b0d16,0x66759632c56054b,0x6f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x308}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 97}}; +}}}, 97}}; const quat_left_ideal_t CONNECTING_IDEALS[7] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x6000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2,0x0,0x0,0x60000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x2,0x6000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x5000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x5000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x50000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x50000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x5000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x5000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x3000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x3000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x30000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x30000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x3000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x3000000000000000}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfee5,0x2b,0xd6d8,0xe65c,0x68a3,0xe72d,0x373d,0x5b1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfee5,0x2b,0xd6d8,0xe65c,0x68a3,0xe72d,0x373d,0x5b1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2bfee5,0xe65cd6d8,0xe72d68a3,0x5b1373d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2bfee5,0xe65cd6d8,0xe72d68a3,0x5b1373d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe65cd6d8002bfee5,0x5b1373de72d68a3}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe65cd6d8002bfee5,0x5b1373de72d68a3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5fe,0x8673,0x157b,0x7f90,0xd2c5,0xd00b,0xa646,0x78f4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8673f5fe,0x7f90157b,0xd00bd2c5,0x78f4a646}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x7f90157b8673f5fe,0x78f4a646d00bd2c5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf719,0x8647,0x3ea3,0x9933,0x6a21,0xe8de,0x6f08,0x7343}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf719,0x8647,0x3ea3,0x9933,0x6a21,0xe8de,0x6f08,0x7343}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8647f719,0x99333ea3,0xe8de6a21,0x73436f08}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8647f719,0x99333ea3,0xe8de6a21,0x73436f08}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x99333ea38647f719,0x73436f08e8de6a21}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x99333ea38647f719,0x73436f08e8de6a21}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfaff,0xc339,0xabd,0xbfc8,0xe962,0x6805,0x5323,0x3c7a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfaff,0xc339,0xabd,0xbfc8,0xe962,0x6805,0x5323,0x3c7a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc339faff,0xbfc80abd,0x6805e962,0x3c7a5323}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc339faff,0xbfc80abd,0x6805e962,0x3c7a5323}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbfc80abdc339faff,0x3c7a53236805e962}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbfc80abdc339faff,0x3c7a53236805e962}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8597,0x3af7,0xa5a,0xbb29,0x77c0,0xd2d9,0xf561,0x84f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8597,0x3af7,0xa5a,0xbb29,0x77c0,0xd2d9,0xf561,0x84f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3af78597,0xbb290a5a,0xd2d977c0,0x84ff561}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3af78597,0xbb290a5a,0xd2d977c0,0x84ff561}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbb290a5a3af78597,0x84ff561d2d977c0}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbb290a5a3af78597,0x84ff561d2d977c0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe5e2,0x7715,0xa8e6,0x3c6f,0x9078,0x872b,0x9bec,0x1794}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7715e5e2,0x3c6fa8e6,0x872b9078,0x17949bec}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x3c6fa8e67715e5e2,0x17949bec872b9078}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x604b,0x3c1e,0x9e8c,0x8146,0x18b7,0xb452,0xa68a,0xf44}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x604b,0x3c1e,0x9e8c,0x8146,0x18b7,0xb452,0xa68a,0xf44}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3c1e604b,0x81469e8c,0xb45218b7,0xf44a68a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3c1e604b,0x81469e8c,0xb45218b7,0xf44a68a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x81469e8c3c1e604b,0xf44a68ab45218b7}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x81469e8c3c1e604b,0xf44a68ab45218b7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x519b,0xa90b,0xcdca,0xd5f5,0x757a,0x83dd,0xb354,0xe59}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x519b,0xa90b,0xcdca,0xd5f5,0x757a,0x83dd,0xb354,0xe59}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa90b519b,0xd5f5cdca,0x83dd757a,0xe59b354}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa90b519b,0xd5f5cdca,0x83dd757a,0xe59b354}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd5f5cdcaa90b519b,0xe59b35483dd757a}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd5f5cdcaa90b519b,0xe59b35483dd757a}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafa2,0x6dee,0xc511,0xde33,0xc8ce,0xc89e,0x4f97,0x2df9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6deeafa2,0xde33c511,0xc89ec8ce,0x2df94f97}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xde33c5116deeafa2,0x2df94f97c89ec8ce}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e07,0xc4e3,0xf746,0x83d,0x5354,0x44c1,0x9c43,0x1f9f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e07,0xc4e3,0xf746,0x83d,0x5354,0x44c1,0x9c43,0x1f9f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc4e35e07,0x83df746,0x44c15354,0x1f9f9c43}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc4e35e07,0x83df746,0x44c15354,0x1f9f9c43}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x83df746c4e35e07,0x1f9f9c4344c15354}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x83df746c4e35e07,0x1f9f9c4344c15354}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdbd3,0x967a,0x8a96,0x1df4,0x7845,0xd70,0x419a,0x222}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdbd3,0x967a,0x8a96,0x1df4,0x7845,0xd70,0x419a,0x222}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x967adbd3,0x1df48a96,0xd707845,0x222419a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x967adbd3,0x1df48a96,0xd707845,0x222419a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1df48a96967adbd3,0x222419a0d707845}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1df48a96967adbd3,0x222419a0d707845}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x19f2,0x5594,0xee77,0x52a2,0xf459,0x45c9,0x2187,0xb348}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x559419f2,0x52a2ee77,0x45c9f459,0xb3482187}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x52a2ee77559419f2,0xb348218745c9f459}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e1f,0xbf19,0x63e0,0x34ae,0x7c14,0x3859,0xdfed,0xb125}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e1f,0xbf19,0x63e0,0x34ae,0x7c14,0x3859,0xdfed,0xb125}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbf193e1f,0x34ae63e0,0x38597c14,0xb125dfed}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xbf193e1f,0x34ae63e0,0x38597c14,0xb125dfed}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x34ae63e0bf193e1f,0xb125dfed38597c14}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x34ae63e0bf193e1f,0xb125dfed38597c14}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcf9,0xaaca,0x773b,0xa951,0xfa2c,0xa2e4,0x10c3,0x59a4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcf9,0xaaca,0x773b,0xa951,0xfa2c,0xa2e4,0x10c3,0x59a4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaaca0cf9,0xa951773b,0xa2e4fa2c,0x59a410c3}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xaaca0cf9,0xa951773b,0xa2e4fa2c,0x59a410c3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xa951773baaca0cf9,0x59a410c3a2e4fa2c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xa951773baaca0cf9,0x59a410c3a2e4fa2c}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x275,0xd7ab,0xedeb,0xbc67,0xad41,0xaeb5,0xf2e5,0x148e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x275,0xd7ab,0xedeb,0xbc67,0xad41,0xaeb5,0xf2e5,0x148e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd7ab0275,0xbc67edeb,0xaeb5ad41,0x148ef2e5}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd7ab0275,0xbc67edeb,0xaeb5ad41,0x148ef2e5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbc67edebd7ab0275,0x148ef2e5aeb5ad41}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xbc67edebd7ab0275,0x148ef2e5aeb5ad41}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa3a,0x67cf,0x6ad7,0xd031,0x701,0xebca,0xd852,0x2996}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x67cfaa3a,0xd0316ad7,0xebca0701,0x2996d852}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xd0316ad767cfaa3a,0x2996d852ebca0701}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa7c5,0x9024,0x7ceb,0x13c9,0x59c0,0x3d14,0xe56d,0x1507}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa7c5,0x9024,0x7ceb,0x13c9,0x59c0,0x3d14,0xe56d,0x1507}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9024a7c5,0x13c97ceb,0x3d1459c0,0x1507e56d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9024a7c5,0x13c97ceb,0x3d1459c0,0x1507e56d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x13c97ceb9024a7c5,0x1507e56d3d1459c0}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x13c97ceb9024a7c5,0x1507e56d3d1459c0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd51d,0xb3e7,0xb56b,0xe818,0x380,0x75e5,0x6c29,0x14cb}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd51d,0xb3e7,0xb56b,0xe818,0x380,0x75e5,0x6c29,0x14cb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3e7d51d,0xe818b56b,0x75e50380,0x14cb6c29}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3e7d51d,0xe818b56b,0x75e50380,0x14cb6c29}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe818b56bb3e7d51d,0x14cb6c2975e50380}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe818b56bb3e7d51d,0x14cb6c2975e50380}}}} #endif , &MAXORD_O0}}; const quat_alg_elem_t CONJUGATING_ELEMENTS[7] = {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x1000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x1,0x0,0x0,0x10000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1,0x1000000000000000}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf94f,0x85ef,0x90f3,0xcc79,0x98d9,0x1a83,0x8d,0x67e1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x85eff94f,0xcc7990f3,0x1a8398d9,0x67e1008d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xcc7990f385eff94f,0x67e1008d1a8398d9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x3}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf2f1,0x3b8a,0xd473,0x1e37,0xc83c,0x4395,0x4df6,0xbca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x3b8af2f1,0x1e37d473,0x4395c83c,0xbca4df6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x1e37d4733b8af2f1,0xbca4df64395c83c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x57d1,0xb6f7,0xe288,0x6f19,0x6467,0xe44f,0xa7cb,0x16fc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xb6f757d1,0x6f19e288,0xe44f6467,0x16fca7cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x6f19e288b6f757d1,0x16fca7cbe44f6467}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6511,0x45fa,0xa368,0xe869,0x4db2,0x88fc,0x6989,0xbdf3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x45fa6511,0xe869a368,0x88fc4db2,0xbdf36989}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0xe869a36845fa6511,0xbdf3698988fc4db2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x5}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}} +{{{._mp_alloc = 0, ._mp_size = 2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x1e6b,0x5c4a,0x13f1,0x770,0x7183,0x5600,0xda31,0x9281}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5c4a1e6b,0x77013f1,0x56007183,0x9281da31}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}} +{{{._mp_alloc = 0, ._mp_size = -2, ._mp_d = (mp_limb_t[]) {0x77013f15c4a1e6b,0x9281da3156007183}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x4}}}} #endif -}}}; +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sign.c index 9216bbe4d3..9520a6f7fd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sign.c @@ -31,12 +31,12 @@ compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const sig // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the // 2^TORSION_EVEN_POWER torsion of EA - ibz_set(&vec[0], 1); - ibz_copy_digit_array(&vec[1], sig->chall_coeff); + ibz_set(&vec.v[0], 1); + ibz_copy_digit_array(&vec.v[1], sig->chall_coeff); // now we compute the ideal associated to the challenge // for that, we need to find vec such that - // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // the kernel of the challenge isogeny is generated by vec.v[0]*B0[0] + vec.v[1]*B0[1] where B0 // is the image through the secret key isogeny of the canonical basis E0 ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); @@ -459,16 +459,16 @@ compute_and_set_basis_change_matrix(signature_t *sig, change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); // Assert all values in the matrix are of the expected size for packing - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][1]) <= SQIsign_response_length + HD_extra_torsion); // Set the basis change matrix to signature - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall.m[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall.m[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall.m[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall.m[1][1])); // Finalise the matrices ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.c index d7a42bcbe9..55743c1989 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/torsion_constants.c @@ -4,40 +4,40 @@ const ibz_t TWO_TO_SECURITY_BITS = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x1}}}} #endif ; const ibz_t TORSION_PLUS_2POWER = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x100000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x100000000000000}}}} #endif ; const ibz_t SEC_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t COM_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x4b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c index f4b4260755..a6298acf77 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/algebra.c @@ -21,54 +21,54 @@ quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, ibz_init(&prod); ibz_vec_4_init(&sum); - ibz_set(&(sum[0]), 0); - ibz_set(&(sum[1]), 0); - ibz_set(&(sum[2]), 0); - ibz_set(&(sum[3]), 0); + ibz_set(&(sum.v[0]), 0); + ibz_set(&(sum.v[1]), 0); + ibz_set(&(sum.v[2]), 0); + ibz_set(&(sum.v[3]), 0); // compute 1 coordinate - ibz_mul(&prod, &((*a)[2]), &((*b)[2])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[3])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[0])); - ibz_add(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[1])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[2])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[3])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&(sum.v[0]), &(sum.v[0]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[0])); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[1])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); // compute i coordiante - ibz_mul(&prod, &((*a)[2]), &((*b)[3])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[2])); - ibz_sub(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[1])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[0])); - ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[3])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[2])); + ibz_sub(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&(sum.v[1]), &(sum.v[1]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[1])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[0])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); // compute j coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[2])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[0])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[3])); - ibz_sub(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[1])); - ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[2])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[0])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[3])); + ibz_sub(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[1])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); // compute ij coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[3])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[0])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[1])); - ibz_sub(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[2])); - ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[3])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[0])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[1])); + ibz_sub(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[2])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); - ibz_copy(&((*res)[0]), &(sum[0])); - ibz_copy(&((*res)[1]), &(sum[1])); - ibz_copy(&((*res)[2]), &(sum[2])); - ibz_copy(&((*res)[3]), &(sum[3])); + ibz_copy(&(res->v[0]), &(sum.v[0])); + ibz_copy(&(res->v[1]), &(sum.v[1])); + ibz_copy(&(res->v[2]), &(sum.v[2])); + ibz_copy(&(res->v[3]), &(sum.v[3])); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); @@ -86,8 +86,8 @@ quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_ ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); for (int i = 0; i < 4; i++) { // multiply coordiates by reduced denominators from the other element - ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); - ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + ibz_mul(&(res_a->coord.v[i]), &(a->coord.v[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord.v[i]), &(b->coord.v[i]), &(res_a->denom)); } // multiply both reduced denominators ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); @@ -149,8 +149,8 @@ quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_conj(&norm, a); quat_alg_mul(&norm, a, &norm, alg); - ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); - ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_gcd(&g, &(norm.coord.v[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord.v[0]), &g); ibz_div(res_denom, &r, &(norm.denom), &g); ibz_abs(res_denom, res_denom); ibz_abs(res_num, res_num); @@ -165,20 +165,20 @@ void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) { ibz_copy(&(elem->denom), denominator); - ibz_copy(&(elem->coord[0]), numerator); - ibz_set(&(elem->coord[1]), 0); - ibz_set(&(elem->coord[2]), 0); - ibz_set(&(elem->coord[3]), 0); + ibz_copy(&(elem->coord.v[0]), numerator); + ibz_set(&(elem->coord.v[1]), 0); + ibz_set(&(elem->coord.v[2]), 0); + ibz_set(&(elem->coord.v[3]), 0); } void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) { ibz_copy(&(conj->denom), &(x->denom)); - ibz_copy(&(conj->coord[0]), &(x->coord[0])); - ibz_neg(&(conj->coord[1]), &(x->coord[1])); - ibz_neg(&(conj->coord[2]), &(x->coord[2])); - ibz_neg(&(conj->coord[3]), &(x->coord[3])); + ibz_copy(&(conj->coord.v[0]), &(x->coord.v[0])); + ibz_neg(&(conj->coord.v[1]), &(x->coord.v[1])); + ibz_neg(&(conj->coord.v[2]), &(x->coord.v[2])); + ibz_neg(&(conj->coord.v[3]), &(x->coord.v[3])); } void @@ -190,7 +190,8 @@ quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + // TODO: check if this is correct + ibz_div(primitive_x->v + i, &r, primitive_x->v + i, content); } ibz_finalize(&r); } @@ -235,10 +236,10 @@ quat_alg_elem_is_zero(const quat_alg_elem_t *x) void quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&(elem->coord[0]), coord0); - ibz_set(&(elem->coord[1]), coord1); - ibz_set(&(elem->coord[2]), coord2); - ibz_set(&(elem->coord[3]), coord3); + ibz_set(&(elem->coord.v[0]), coord0); + ibz_set(&(elem->coord.v[1]), coord1); + ibz_set(&(elem->coord.v[2]), coord2); + ibz_set(&(elem->coord.v[3]), coord3); ibz_set(&(elem->denom), denom); } @@ -247,10 +248,10 @@ void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) { ibz_copy(©->denom, &copied->denom); - ibz_copy(©->coord[0], &copied->coord[0]); - ibz_copy(©->coord[1], &copied->coord[1]); - ibz_copy(©->coord[2], &copied->coord[2]); - ibz_copy(©->coord[3], &copied->coord[3]); + ibz_copy(©->coord.v[0], &copied->coord.v[0]); + ibz_copy(©->coord.v[1], &copied->coord.v[1]); + ibz_copy(©->coord.v[2], &copied->coord.v[2]); + ibz_copy(©->coord.v[3], &copied->coord.v[3]); } // helper functions for lattices @@ -262,10 +263,10 @@ quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&(elem->coord[0]), coord0); - ibz_copy(&(elem->coord[1]), coord1); - ibz_copy(&(elem->coord[2]), coord2); - ibz_copy(&(elem->coord[3]), coord3); + ibz_copy(&(elem->coord.v[0]), coord0); + ibz_copy(&(elem->coord.v[1]), coord1); + ibz_copy(&(elem->coord.v[2]), coord2); + ibz_copy(&(elem->coord.v[3]), coord3); ibz_copy(&(elem->denom), denom); } @@ -274,7 +275,7 @@ void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) { for (int i = 0; i < 4; i++) { - ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + ibz_mul(&(res->coord.v[i]), &(elem->coord.v[i]), scalar); } ibz_copy(&(res->denom), &(elem->denom)); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c index 1df7755a29..e051ac340a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/common.c @@ -14,6 +14,7 @@ public_key_init(public_key_t *pk) void public_key_finalize(public_key_t *pk) { + (void) pk; } // compute the challenge as the hash of the message and the commitment curve and public key diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2.c index b31ae7771a..5bf214c4e2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2.c @@ -5,34 +5,34 @@ void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) { - ibz_set(&((*vec)[0]), a0); - ibz_set(&((*vec)[1]), a1); + ibz_set(&(vec->v[0]), a0); + ibz_set(&(vec->v[1]), a1); } void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) { - ibz_set(&((*mat)[0][0]), a00); - ibz_set(&((*mat)[0][1]), a01); - ibz_set(&((*mat)[1][0]), a10); - ibz_set(&((*mat)[1][1]), a11); + ibz_set(&(mat->m[0][0]), a00); + ibz_set(&(mat->m[0][1]), a01); + ibz_set(&(mat->m[1][0]), a10); + ibz_set(&(mat->m[1][1]), a11); } void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) { - ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); - ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); - ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); - ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); + ibz_copy(&(copy->m[0][0]), &(copied->m[0][0])); + ibz_copy(&(copy->m[0][1]), &(copied->m[0][1])); + ibz_copy(&(copy->m[1][0]), &(copied->m[1][0])); + ibz_copy(&(copy->m[1][1]), &(copied->m[1][1])); } void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) { - ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); - ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); - ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); - ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); + ibz_add(&(sum->m[0][0]), &(a->m[0][0]), &(b->m[0][0])); + ibz_add(&(sum->m[0][1]), &(a->m[0][1]), &(b->m[0][1])); + ibz_add(&(sum->m[1][0]), &(a->m[1][0]), &(b->m[1][0])); + ibz_add(&(sum->m[1][1]), &(a->m[1][1]), &(b->m[1][1])); } void @@ -53,16 +53,16 @@ ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t * ibz_vec_2_t matvec; ibz_init(&prod); ibz_vec_2_init(&matvec); - ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); - ibz_copy(&(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); - ibz_add(&(matvec[0]), &(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); - ibz_copy(&(matvec[1]), &prod); - ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); - ibz_add(&(matvec[1]), &(matvec[1]), &prod); - ibz_copy(&((*res)[0]), &(matvec[0])); - ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_mul(&prod, &(mat->m[0][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[0][1]), &(vec->v[1])); + ibz_add(&(matvec.v[0]), &(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[1][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[1]), &prod); + ibz_mul(&prod, &(mat->m[1][1]), &(vec->v[1])); + ibz_add(&(matvec.v[1]), &(matvec.v[1]), &prod); + ibz_copy(&(res->v[0]), &(matvec.v[0])); + ibz_copy(&(res->v[1]), &(matvec.v[1])); ibz_finalize(&prod); ibz_vec_2_finalize(&matvec); } @@ -78,21 +78,21 @@ ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2 ibz_mat_2x2_init(&sums); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_set(&(sums[i][j]), 0); + ibz_set(&(sums.m[i][j]), 0); } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { - ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); - ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); - ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + ibz_mul(&mul, &(mat_a->m[i][k]), &(mat_b->m[k][j])); + ibz_add(&(sums.m[i][j]), &(sums.m[i][j]), &mul); + ibz_mod(&(sums.m[i][j]), &(sums.m[i][j]), m); } } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + ibz_copy(&(prod->m[i][j]), &(sums.m[i][j])); } } ibz_finalize(&mul); @@ -105,9 +105,9 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_t det, prod; ibz_init(&det); ibz_init(&prod); - ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mul(&det, &(mat->m[0][0]), &(mat->m[1][1])); ibz_mod(&det, &det, m); - ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_mul(&prod, &(mat->m[0][1]), &(mat->m[1][0])); ibz_sub(&det, &det, &prod); ibz_mod(&det, &det, m); int res = ibz_invmod(&det, &det, m); @@ -115,15 +115,15 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_set(&prod, res); ibz_mul(&det, &det, &prod); // compute inverse - ibz_copy(&prod, &((*mat)[0][0])); - ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); - ibz_copy(&((*inv)[1][1]), &prod); - ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); - ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + ibz_copy(&prod, &(mat->m[0][0])); + ibz_copy(&(inv->m[0][0]), &(mat->m[1][1])); + ibz_copy(&(inv->m[1][1]), &prod); + ibz_neg(&(inv->m[1][0]), &(mat->m[1][0])); + ibz_neg(&(inv->m[0][1]), &(mat->m[0][1])); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); - ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + ibz_mul(&(inv->m[i][j]), &(inv->m[i][j]), &det); + ibz_mod(&(inv->m[i][j]), &(inv->m[i][j]), m); } } ibz_finalize(&det); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c index 171473d481..143060e2c3 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c @@ -137,10 +137,10 @@ _fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, ibz_invmod(&tmp, &tmp, &two_pow); assert(!ibz_is_even(&tmp)); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta to the basis ec_basis_t B0_two_theta; @@ -197,53 +197,53 @@ post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_ // treatment if (is_special_order) { // reordering the basis if needed - if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + if (ibz_cmp(&gram->m[0][0], &gram->m[2][2]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[0][0], &gram->m[3][3]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][3]); } - ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); - ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); - ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); - ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][3], &gram->m[0][1]); + ibz_swap(&gram->m[3][0], &gram->m[1][0]); + ibz_swap(&gram->m[2][3], &gram->m[2][1]); + ibz_swap(&gram->m[3][2], &gram->m[1][2]); + ibz_swap(&gram->m[3][3], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[1][1], &gram->m[3][3]) == 0) { // in this case it seems that we need to swap the second and third // element, and then recompute entirely the second element from the first // first we swap the second and third element for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); } // adjusting the sign if needed - if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + if (ibz_cmp(&reduced->m[0][0], &reduced->m[1][1]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); - ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); - ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + ibz_neg(&reduced->m[i][1], &reduced->m[i][1]); + ibz_neg(&gram->m[i][1], &gram->m[i][1]); + ibz_neg(&gram->m[1][i], &gram->m[1][i]); } } - if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + if (ibz_cmp(&reduced->m[0][2], &reduced->m[1][3]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); - ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); - ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + ibz_neg(&reduced->m[i][3], &reduced->m[i][3]); + ibz_neg(&gram->m[i][3], &gram->m[i][3]); + ibz_neg(&gram->m[3][i], &gram->m[3][i]); } - // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + // assert(ibz_cmp(&reduced->m[0][2],&reduced->m[1][3])==0); } } } @@ -273,7 +273,7 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // if the basis is of the form alpha, i*alpha, beta, i*beta // we can remove some values due to symmetry of the basis that bool need_remove_symmetry = - (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + (ibz_cmp(&gram->m[0][0], &gram->m[1][1]) == 0 && ibz_cmp(&gram->m[3][3], &gram->m[2][2]) == 0); int check1, check2, check3; @@ -324,10 +324,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // and we ensure that we don't record the same norm in the list if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { // Set the point as a vector (x, y, z, w) - ibz_set(&point[0], x); - ibz_set(&point[1], y); - ibz_set(&point[2], z); - ibz_set(&point[3], w); + ibz_set(&point.v[0], x); + ibz_set(&point.v[1], y); + ibz_set(&point.v[2], z); + ibz_set(&point.v[3], w); // Evaluate this through the gram matrix and divide out by the // adjusted_norm @@ -336,10 +336,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t assert(ibz_is_zero(&remain)); if (ibz_mod_ui(&norm, 2) == 1) { - ibz_set(&vecs[count][0], x); - ibz_set(&vecs[count][1], y); - ibz_set(&vecs[count][2], z); - ibz_set(&vecs[count][3], w); + ibz_set(&vecs[count].v[0], x); + ibz_set(&vecs[count].v[1], y); + ibz_set(&vecs[count].v[2], z); + ibz_set(&vecs[count].v[3], w); ibz_copy(&norms[count], &norm); count++; } @@ -530,10 +530,10 @@ find_uv(ibz_t *u, quat_alg_elem_t delta; // delta will be the element of smallest norm quat_alg_elem_init(&delta); - ibz_set(&delta.coord[0], 1); - ibz_set(&delta.coord[1], 0); - ibz_set(&delta.coord[2], 0); - ibz_set(&delta.coord[3], 0); + ibz_set(&delta.coord.v[0], 1); + ibz_set(&delta.coord.v[1], 0); + ibz_set(&delta.coord.v[2], 0); + ibz_set(&delta.coord.v[3], 0); ibz_copy(&delta.denom, &reduced_id.lattice.denom); ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); @@ -542,7 +542,7 @@ find_uv(ibz_t *u, quat_alg_conj(&delta, &delta); ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); - ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_copy(&reduced_id.norm, &gram[0].m[0][0]); ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); assert(ibz_cmp(&remain, &ibz_const_zero) == 0); @@ -989,10 +989,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, } ibz_invmod(&tmp, &tmp, &two_pow); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); @@ -1092,10 +1092,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); } ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); - ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); - ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); - ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); - ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + ibz_mul(&beta1->coord.v[0], &beta1->coord.v[0], &tmp); + ibz_mul(&beta1->coord.v[1], &beta1->coord.v[1], &tmp); + ibz_mul(&beta1->coord.v[2], &beta1->coord.v[2], &tmp); + ibz_mul(&beta1->coord.v[3], &beta1->coord.v[3], &tmp); endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim4.c index 495dc2dcb2..b024a7d46e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim4.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim4.c @@ -11,16 +11,16 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t ibz_mat_4x4_init(&mat); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(mat[i][j]), 0); + ibz_set(&(mat.m[i][j]), 0); for (int k = 0; k < 4; k++) { - ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); - ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + ibz_mul(&prod, &(a->m[i][k]), &(b->m[k][j])); + ibz_add(&(mat.m[i][j]), &(mat.m[i][j]), &prod); } } } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*res)[i][j]), &(mat[i][j])); + ibz_copy(&(res->m[i][j]), &(mat.m[i][j])); } } ibz_mat_4x4_finalize(&mat); @@ -31,61 +31,61 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&((*vec)[0]), coord0); - ibz_set(&((*vec)[1]), coord1); - ibz_set(&((*vec)[2]), coord2); - ibz_set(&((*vec)[3]), coord3); + ibz_set(&(vec->v[0]), coord0); + ibz_set(&(vec->v[1]), coord1); + ibz_set(&(vec->v[2]), coord2); + ibz_set(&(vec->v[3]), coord3); } void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*new)[i]), &((*vec)[i])); + ibz_copy(&(new->v[i]), &(vec->v[i])); } } void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&((*res)[0]), coord0); - ibz_copy(&((*res)[1]), coord1); - ibz_copy(&((*res)[2]), coord2); - ibz_copy(&((*res)[3]), coord3); + ibz_copy(&(res->v[0]), coord0); + ibz_copy(&(res->v[1]), coord1); + ibz_copy(&(res->v[2]), coord2); + ibz_copy(&(res->v[3]), coord3); } void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) { - ibz_gcd(content, &((*v)[0]), &((*v)[1])); - ibz_gcd(content, &((*v)[2]), content); - ibz_gcd(content, &((*v)[3]), content); + ibz_gcd(content, &(v->v[0]), &(v->v[1])); + ibz_gcd(content, &(v->v[2]), content); + ibz_gcd(content, &(v->v[3]), content); } void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_neg(&((*neg)[i]), &((*vec)[i])); + ibz_neg(&(neg->v[i]), &(vec->v[i])); } } void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_add(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_add(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_add(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_add(&(res->v[3]), &(a->v[3]), &(b->v[3])); } void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_sub(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_sub(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_sub(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_sub(&(res->v[3]), &(a->v[3]), &(b->v[3])); } int @@ -93,7 +93,7 @@ ibz_vec_4_is_zero(const ibz_vec_4_t *x) { int res = 1; for (int i = 0; i < 4; i++) { - res &= ibz_is_zero(&((*x)[i])); + res &= ibz_is_zero(&(x->v[i])); } return (res); } @@ -110,12 +110,12 @@ ibz_vec_4_linear_combination(ibz_vec_4_t *lc, ibz_vec_4_init(&sums); ibz_init(&prod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_vec_4_finalize(&sums); @@ -125,7 +125,7 @@ void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + ibz_mul(&(prod->v[i]), &(vec->v[i]), scalar); } } @@ -136,7 +136,7 @@ ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t * ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + ibz_div(&(quot->v[i]), &r, &(vec->v[i]), scalar); res = res && ibz_is_zero(&r); } ibz_finalize(&r); @@ -148,7 +148,7 @@ ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + ibz_copy(&(new->m[i][j]), &(mat->m[i][j])); } } } @@ -158,7 +158,7 @@ ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + ibz_neg(&(neg->m[i][j]), &(mat->m[i][j])); } } } @@ -170,7 +170,7 @@ ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) ibz_mat_4x4_init(&work); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(work[i][j]), &((*mat)[j][i])); + ibz_copy(&(work.m[i][j]), &(mat->m[j][i])); } } ibz_mat_4x4_copy(transposed, &work); @@ -182,7 +182,7 @@ ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*zero)[i][j]), 0); + ibz_set(&(zero->m[i][j]), 0); } } } @@ -192,9 +192,9 @@ ibz_mat_4x4_identity(ibz_mat_4x4_t *id) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*id)[i][j]), 0); + ibz_set(&(id->m[i][j]), 0); } - ibz_set(&((*id)[i][i]), 1); + ibz_set(&(id->m[i][i]), 1); } } @@ -204,7 +204,7 @@ ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) int res = 1; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + res = res && ibz_is_one(&(mat->m[i][j])) == (i == j); } } return (res); @@ -216,7 +216,7 @@ ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) int res = 0; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + res = res | ibz_cmp(&(mat1->m[i][j]), &(mat2->m[i][j])); } } return (!res); @@ -227,7 +227,7 @@ ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4 { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + ibz_mul(&(prod->m[i][j]), &(mat->m[i][j]), scalar); } } } @@ -237,10 +237,10 @@ ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) { ibz_t d; ibz_init(&d); - ibz_copy(&d, &((*mat)[0][0])); + ibz_copy(&d, &(mat->m[0][0])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_gcd(&d, &d, &((*mat)[i][j])); + ibz_gcd(&d, &d, &(mat->m[i][j])); } } ibz_copy(gcd, &d); @@ -255,7 +255,7 @@ ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4 ibz_init(&r); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + ibz_div(&(quot->m[i][j]), &r, &(mat->m[i][j]), scalar); res = res && ibz_is_zero(&r); } } @@ -325,17 +325,17 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ // compute some 2x2 minors, store them in s and c for (int i = 0; i < 3; i++) { - ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); - ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + ibz_mat_2x2_det_from_ibz(&(s[i]), &(mat->m[0][0]), &(mat->m[0][i + 1]), &(mat->m[1][0]), &(mat->m[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &(mat->m[2][0]), &(mat->m[2][i + 1]), &(mat->m[3][0]), &(mat->m[3][i + 1])); } for (int i = 0; i < 2; i++) { ibz_mat_2x2_det_from_ibz( - &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + &(s[3 + i]), &(mat->m[0][1]), &(mat->m[0][2 + i]), &(mat->m[1][1]), &(mat->m[1][2 + i])); ibz_mat_2x2_det_from_ibz( - &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + &(c[3 + i]), &(mat->m[2][1]), &(mat->m[2][2 + i]), &(mat->m[3][1]), &(mat->m[3][2 + i])); } - ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); - ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + ibz_mat_2x2_det_from_ibz(&(s[5]), &(mat->m[0][2]), &(mat->m[0][3]), &(mat->m[1][2]), &(mat->m[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &(mat->m[2][2]), &(mat->m[2][3]), &(mat->m[3][2]), &(mat->m[3][3])); // compute det ibz_set(&work_det, 0); @@ -351,39 +351,39 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ for (int j = 0; j < 4; j++) { for (int k = 0; k < 2; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } } for (int k = 2; k < 4; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } } @@ -418,8 +418,8 @@ ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t * // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[i][j], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -437,8 +437,8 @@ ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[j][i], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -457,14 +457,14 @@ quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) ibz_vec_4_init(&sum); ibz_mat_4x4_eval(&sum, qf, coord); for (int i = 0; i < 4; i++) { - ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + ibz_mul(&prod, &(sum.v[i]), &coord->v[i]); if (i > 0) { - ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); } else { - ibz_copy(&sum[0], &prod); + ibz_copy(&sum.v[0], &prod); } } - ibz_copy(res, &sum[0]); + ibz_copy(res, &sum.v[0]); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_signature.c index 112c695941..3a630cfd58 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_signature.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_signature.c @@ -157,17 +157,17 @@ secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) ibz_finalize(&gcd); } #endif - enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[3], FP_ENCODED_BYTES, true); quat_alg_elem_finalize(&gen); } - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][1], TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); } @@ -187,19 +187,19 @@ secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) quat_alg_elem_t gen; quat_alg_elem_init(&gen); enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); - enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[3], enc, FP_ENCODED_BYTES, true); quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); ibz_finalize(&norm); quat_alg_elem_finalize(&gen); } - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][1], enc, TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c index 8aafeac12b..a598a89c0e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c @@ -261,223 +261,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6f75,0xc742,0x1abb,0xc3b2,0x4bff,0xf015,0x66b,0xc51b,0xacd6,0x30c2,0xf641,0x625b,0x2e88,0xbe5,0x5121,0xbe40,0x8ac2,0x755b,0xb8c9,0x4eb6,0xb07,0x46b6,0x84cf,0x47}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6f75,0xc742,0x1abb,0xc3b2,0x4bff,0xf015,0x66b,0xc51b,0xacd6,0x30c2,0xf641,0x625b,0x2e88,0xbe5,0x5121,0xbe40,0x8ac2,0x755b,0xb8c9,0x4eb6,0xb07,0x46b6,0x84cf,0x47}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc7426f75,0xc3b21abb,0xf0154bff,0xc51b066b,0x30c2acd6,0x625bf641,0xbe52e88,0xbe405121,0x755b8ac2,0x4eb6b8c9,0x46b60b07,0x4784cf}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc7426f75,0xc3b21abb,0xf0154bff,0xc51b066b,0x30c2acd6,0x625bf641,0xbe52e88,0xbe405121,0x755b8ac2,0x4eb6b8c9,0x46b60b07,0x4784cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc3b21abbc7426f75,0xc51b066bf0154bff,0x625bf64130c2acd6,0xbe4051210be52e88,0x4eb6b8c9755b8ac2,0x4784cf46b60b07}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc3b21abbc7426f75,0xc51b066bf0154bff,0x625bf64130c2acd6,0xbe4051210be52e88,0x4eb6b8c9755b8ac2,0x4784cf46b60b07}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9db8,0x479b,0xe350,0xae1e,0x4f92,0x6572,0x60a4,0x89ed,0x12f4,0xb88d,0x64b6,0xf9ca,0x26b,0xc086,0x83b8,0xb2c7,0x88a8,0xe99b,0x57b3,0x9017,0xe033,0x9d5d,0x5de6,0x37}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9db8,0x479b,0xe350,0xae1e,0x4f92,0x6572,0x60a4,0x89ed,0x12f4,0xb88d,0x64b6,0xf9ca,0x26b,0xc086,0x83b8,0xb2c7,0x88a8,0xe99b,0x57b3,0x9017,0xe033,0x9d5d,0x5de6,0x37}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x479b9db8,0xae1ee350,0x65724f92,0x89ed60a4,0xb88d12f4,0xf9ca64b6,0xc086026b,0xb2c783b8,0xe99b88a8,0x901757b3,0x9d5de033,0x375de6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x479b9db8,0xae1ee350,0x65724f92,0x89ed60a4,0xb88d12f4,0xf9ca64b6,0xc086026b,0xb2c783b8,0xe99b88a8,0x901757b3,0x9d5de033,0x375de6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xae1ee350479b9db8,0x89ed60a465724f92,0xf9ca64b6b88d12f4,0xb2c783b8c086026b,0x901757b3e99b88a8,0x375de69d5de033}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xae1ee350479b9db8,0x89ed60a465724f92,0xf9ca64b6b88d12f4,0xb2c783b8c086026b,0x901757b3e99b88a8,0x375de69d5de033}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x23f7,0x1d02,0x3431,0x354e,0xba31,0x23a4,0xe6c4,0x6a9c,0x64c,0xea8,0x419f,0xe54f,0x3cb9,0xc02d,0x3caf,0xe7a3,0x2d32,0x31d4,0xed80,0x47d9,0x2086,0x69f4,0x80d3,0x25}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x23f7,0x1d02,0x3431,0x354e,0xba31,0x23a4,0xe6c4,0x6a9c,0x64c,0xea8,0x419f,0xe54f,0x3cb9,0xc02d,0x3caf,0xe7a3,0x2d32,0x31d4,0xed80,0x47d9,0x2086,0x69f4,0x80d3,0x25}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1d0223f7,0x354e3431,0x23a4ba31,0x6a9ce6c4,0xea8064c,0xe54f419f,0xc02d3cb9,0xe7a33caf,0x31d42d32,0x47d9ed80,0x69f42086,0x2580d3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1d0223f7,0x354e3431,0x23a4ba31,0x6a9ce6c4,0xea8064c,0xe54f419f,0xc02d3cb9,0xe7a33caf,0x31d42d32,0x47d9ed80,0x69f42086,0x2580d3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x354e34311d0223f7,0x6a9ce6c423a4ba31,0xe54f419f0ea8064c,0xe7a33cafc02d3cb9,0x47d9ed8031d42d32,0x2580d369f42086}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x354e34311d0223f7,0x6a9ce6c423a4ba31,0xe54f419f0ea8064c,0xe7a33cafc02d3cb9,0x47d9ed8031d42d32,0x2580d369f42086}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x908b,0x38bd,0xe544,0x3c4d,0xb400,0xfea,0xf994,0x3ae4,0x5329,0xcf3d,0x9be,0x9da4,0xd177,0xf41a,0xaede,0x41bf,0x753d,0x8aa4,0x4736,0xb149,0xf4f8,0xb949,0x7b30,0xb8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x908b,0x38bd,0xe544,0x3c4d,0xb400,0xfea,0xf994,0x3ae4,0x5329,0xcf3d,0x9be,0x9da4,0xd177,0xf41a,0xaede,0x41bf,0x753d,0x8aa4,0x4736,0xb149,0xf4f8,0xb949,0x7b30,0xb8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x38bd908b,0x3c4de544,0xfeab400,0x3ae4f994,0xcf3d5329,0x9da409be,0xf41ad177,0x41bfaede,0x8aa4753d,0xb1494736,0xb949f4f8,0xb87b30}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x38bd908b,0x3c4de544,0xfeab400,0x3ae4f994,0xcf3d5329,0x9da409be,0xf41ad177,0x41bfaede,0x8aa4753d,0xb1494736,0xb949f4f8,0xb87b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3c4de54438bd908b,0x3ae4f9940feab400,0x9da409becf3d5329,0x41bfaedef41ad177,0xb14947368aa4753d,0xb87b30b949f4f8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3c4de54438bd908b,0x3ae4f9940feab400,0x9da409becf3d5329,0x41bfaedef41ad177,0xb14947368aa4753d,0xb87b30b949f4f8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x83a3,0xab6f,0x4f99,0xe1f6,0xc2e8,0x2b61,0xd921,0xec7a,0x4f14,0x7555,0xf78e,0xe0fd,0xb2bf,0x44b,0xfb09,0x107c,0xf365,0x55f7,0x633,0x9bbe,0x409c,0x9c11,0x25b0,0xf1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x83a3,0xab6f,0x4f99,0xe1f6,0xc2e8,0x2b61,0xd921,0xec7a,0x4f14,0x7555,0xf78e,0xe0fd,0xb2bf,0x44b,0xfb09,0x107c,0xf365,0x55f7,0x633,0x9bbe,0x409c,0x9c11,0x25b0,0xf1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xab6f83a3,0xe1f64f99,0x2b61c2e8,0xec7ad921,0x75554f14,0xe0fdf78e,0x44bb2bf,0x107cfb09,0x55f7f365,0x9bbe0633,0x9c11409c,0xf125b0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xab6f83a3,0xe1f64f99,0x2b61c2e8,0xec7ad921,0x75554f14,0xe0fdf78e,0x44bb2bf,0x107cfb09,0x55f7f365,0x9bbe0633,0x9c11409c,0xf125b0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe1f64f99ab6f83a3,0xec7ad9212b61c2e8,0xe0fdf78e75554f14,0x107cfb09044bb2bf,0x9bbe063355f7f365,0xf125b09c11409c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe1f64f99ab6f83a3,0xec7ad9212b61c2e8,0xe0fdf78e75554f14,0x107cfb09044bb2bf,0x9bbe063355f7f365,0xf125b09c11409c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc3d,0x130,0x16ca,0x127f,0x1c5c,0x57d0,0x3ece,0x2e8d,0xc5ae,0xeb26,0x1272,0x6cab,0x79c7,0x7c9,0x321b,0xfeb3,0xc99f,0xb33e,0xefa2,0x62c3,0x7bbe,0x777c,0xc959,0x4e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc3d,0x130,0x16ca,0x127f,0x1c5c,0x57d0,0x3ece,0x2e8d,0xc5ae,0xeb26,0x1272,0x6cab,0x79c7,0x7c9,0x321b,0xfeb3,0xc99f,0xb33e,0xefa2,0x62c3,0x7bbe,0x777c,0xc959,0x4e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x130dc3d,0x127f16ca,0x57d01c5c,0x2e8d3ece,0xeb26c5ae,0x6cab1272,0x7c979c7,0xfeb3321b,0xb33ec99f,0x62c3efa2,0x777c7bbe,0x4ec959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x130dc3d,0x127f16ca,0x57d01c5c,0x2e8d3ece,0xeb26c5ae,0x6cab1272,0x7c979c7,0xfeb3321b,0xb33ec99f,0x62c3efa2,0x777c7bbe,0x4ec959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x127f16ca0130dc3d,0x2e8d3ece57d01c5c,0x6cab1272eb26c5ae,0xfeb3321b07c979c7,0x62c3efa2b33ec99f,0x4ec959777c7bbe}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x127f16ca0130dc3d,0x2e8d3ece57d01c5c,0x6cab1272eb26c5ae,0xfeb3321b07c979c7,0x62c3efa2b33ec99f,0x4ec959777c7bbe}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8f83,0xf9b,0xec59,0x68d7,0x8301,0x787e,0x909b,0x2714,0xe264,0x8ea5,0x9950,0x60f4,0x971d,0x392b,0x4d1b,0xeb9a,0xb9fb,0xdd02,0xcbaa,0x1f24,0x626c,0x6afb,0xfc8,0x91}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8f83,0xf9b,0xec59,0x68d7,0x8301,0x787e,0x909b,0x2714,0xe264,0x8ea5,0x9950,0x60f4,0x971d,0x392b,0x4d1b,0xeb9a,0xb9fb,0xdd02,0xcbaa,0x1f24,0x626c,0x6afb,0xfc8,0x91}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9b8f83,0x68d7ec59,0x787e8301,0x2714909b,0x8ea5e264,0x60f49950,0x392b971d,0xeb9a4d1b,0xdd02b9fb,0x1f24cbaa,0x6afb626c,0x910fc8}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9b8f83,0x68d7ec59,0x787e8301,0x2714909b,0x8ea5e264,0x60f49950,0x392b971d,0xeb9a4d1b,0xdd02b9fb,0x1f24cbaa,0x6afb626c,0x910fc8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x68d7ec590f9b8f83,0x2714909b787e8301,0x60f499508ea5e264,0xeb9a4d1b392b971d,0x1f24cbaadd02b9fb,0x910fc86afb626c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x68d7ec590f9b8f83,0x2714909b787e8301,0x60f499508ea5e264,0xeb9a4d1b392b971d,0x1f24cbaadd02b9fb,0x910fc86afb626c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7c5d,0x5490,0xb066,0x1e09,0x3d17,0xd49e,0x26de,0x1385,0xb0eb,0x8aaa,0x871,0x1f02,0x4d40,0xfbb4,0x4f6,0xef83,0xc9a,0xaa08,0xf9cc,0x6441,0xbf63,0x63ee,0xda4f,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7c5d,0x5490,0xb066,0x1e09,0x3d17,0xd49e,0x26de,0x1385,0xb0eb,0x8aaa,0x871,0x1f02,0x4d40,0xfbb4,0x4f6,0xef83,0xc9a,0xaa08,0xf9cc,0x6441,0xbf63,0x63ee,0xda4f,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x54907c5d,0x1e09b066,0xd49e3d17,0x138526de,0x8aaab0eb,0x1f020871,0xfbb44d40,0xef8304f6,0xaa080c9a,0x6441f9cc,0x63eebf63,0xeda4f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x54907c5d,0x1e09b066,0xd49e3d17,0x138526de,0x8aaab0eb,0x1f020871,0xfbb44d40,0xef8304f6,0xaa080c9a,0x6441f9cc,0x63eebf63,0xeda4f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1e09b06654907c5d,0x138526ded49e3d17,0x1f0208718aaab0eb,0xef8304f6fbb44d40,0x6441f9ccaa080c9a,0xeda4f63eebf63}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1e09b06654907c5d,0x138526ded49e3d17,0x1f0208718aaab0eb,0xef8304f6fbb44d40,0x6441f9ccaa080c9a,0xeda4f63eebf63}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x194c, 0x43, 0xc70, 0x1c7a, 0xa7a, 0xdf7, 0x1564, 0x1809, 0x8e8, 0x3a5, 0x1399, 0xf0a, 0x914, 0x1a27, 0xb1c, 0xcd0, 0xfc, 0xaa4, 0xd87, 0x1ed2, 0x2c0, 0x8e4, 0x1b93, 0x1a3f, 0x1d9b, 0x1a00, 0xbce, 0x17d2, 0x1a07, 0xf} @@ -737,223 +737,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ebb,0xe120,0x35fc,0x20e3,0xba01,0xff68,0x2ef4,0x62f6,0x5e93,0x94c1,0x3f93,0x804c,0xddc5,0x5b3d,0x1d31,0xf673,0x6e47,0x3d32,0x242c,0x6f7e,0x764b,0x63cb,0xbf4,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ebb,0xe120,0x35fc,0x20e3,0xba01,0xff68,0x2ef4,0x62f6,0x5e93,0x94c1,0x3f93,0x804c,0xddc5,0x5b3d,0x1d31,0xf673,0x6e47,0x3d32,0x242c,0x6f7e,0x764b,0x63cb,0xbf4,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe1201ebb,0x20e335fc,0xff68ba01,0x62f62ef4,0x94c15e93,0x804c3f93,0x5b3dddc5,0xf6731d31,0x3d326e47,0x6f7e242c,0x63cb764b,0xf70bf4}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe1201ebb,0x20e335fc,0xff68ba01,0x62f62ef4,0x94c15e93,0x804c3f93,0x5b3dddc5,0xf6731d31,0x3d326e47,0x6f7e242c,0x63cb764b,0xf70bf4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x20e335fce1201ebb,0x62f62ef4ff68ba01,0x804c3f9394c15e93,0xf6731d315b3dddc5,0x6f7e242c3d326e47,0xf70bf463cb764b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x20e335fce1201ebb,0x62f62ef4ff68ba01,0x804c3f9394c15e93,0xf6731d315b3dddc5,0x6f7e242c3d326e47,0xf70bf463cb764b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe76c,0x34d0,0x684,0xee5,0x43c6,0x5a38,0x4bd5,0x2867,0xd3c5,0x2ee1,0xf790,0x18bf,0xbb64,0x3924,0x7d25,0xe0bc,0x913a,0x1355,0x50e9,0x7091,0x6724,0x21b2,0xc027,0xaa}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe76c,0x34d0,0x684,0xee5,0x43c6,0x5a38,0x4bd5,0x2867,0xd3c5,0x2ee1,0xf790,0x18bf,0xbb64,0x3924,0x7d25,0xe0bc,0x913a,0x1355,0x50e9,0x7091,0x6724,0x21b2,0xc027,0xaa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x34d0e76c,0xee50684,0x5a3843c6,0x28674bd5,0x2ee1d3c5,0x18bff790,0x3924bb64,0xe0bc7d25,0x1355913a,0x709150e9,0x21b26724,0xaac027}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x34d0e76c,0xee50684,0x5a3843c6,0x28674bd5,0x2ee1d3c5,0x18bff790,0x3924bb64,0xe0bc7d25,0x1355913a,0x709150e9,0x21b26724,0xaac027}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xee5068434d0e76c,0x28674bd55a3843c6,0x18bff7902ee1d3c5,0xe0bc7d253924bb64,0x709150e91355913a,0xaac02721b26724}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xee5068434d0e76c,0x28674bd55a3843c6,0x18bff7902ee1d3c5,0xe0bc7d253924bb64,0x709150e91355913a,0xaac02721b26724}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbd01,0x45bb,0x58bc,0x8007,0xbf5b,0xfd7,0x440b,0x7f9,0x54ed,0xe5db,0x2ba9,0xcd7b,0xfc98,0x1314,0x1470,0x9e9b,0xca3,0x944c,0x73c6,0x4cc9,0xa757,0x45fe,0x8b40,0x46}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbd01,0x45bb,0x58bc,0x8007,0xbf5b,0xfd7,0x440b,0x7f9,0x54ed,0xe5db,0x2ba9,0xcd7b,0xfc98,0x1314,0x1470,0x9e9b,0xca3,0x944c,0x73c6,0x4cc9,0xa757,0x45fe,0x8b40,0x46}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x45bbbd01,0x800758bc,0xfd7bf5b,0x7f9440b,0xe5db54ed,0xcd7b2ba9,0x1314fc98,0x9e9b1470,0x944c0ca3,0x4cc973c6,0x45fea757,0x468b40}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x45bbbd01,0x800758bc,0xfd7bf5b,0x7f9440b,0xe5db54ed,0xcd7b2ba9,0x1314fc98,0x9e9b1470,0x944c0ca3,0x4cc973c6,0x45fea757,0x468b40}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x800758bc45bbbd01,0x7f9440b0fd7bf5b,0xcd7b2ba9e5db54ed,0x9e9b14701314fc98,0x4cc973c6944c0ca3,0x468b4045fea757}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x800758bc45bbbd01,0x7f9440b0fd7bf5b,0xcd7b2ba9e5db54ed,0x9e9b14701314fc98,0x4cc973c6944c0ca3,0x468b4045fea757}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe145,0x1edf,0xca03,0xdf1c,0x45fe,0x97,0xd10b,0x9d09,0xa16c,0x6b3e,0xc06c,0x7fb3,0x223a,0xa4c2,0xe2ce,0x98c,0x91b8,0xc2cd,0xdbd3,0x9081,0x89b4,0x9c34,0xf40b,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe145,0x1edf,0xca03,0xdf1c,0x45fe,0x97,0xd10b,0x9d09,0xa16c,0x6b3e,0xc06c,0x7fb3,0x223a,0xa4c2,0xe2ce,0x98c,0x91b8,0xc2cd,0xdbd3,0x9081,0x89b4,0x9c34,0xf40b,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1edfe145,0xdf1cca03,0x9745fe,0x9d09d10b,0x6b3ea16c,0x7fb3c06c,0xa4c2223a,0x98ce2ce,0xc2cd91b8,0x9081dbd3,0x9c3489b4,0x8f40b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1edfe145,0xdf1cca03,0x9745fe,0x9d09d10b,0x6b3ea16c,0x7fb3c06c,0xa4c2223a,0x98ce2ce,0xc2cd91b8,0x9081dbd3,0x9c3489b4,0x8f40b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdf1cca031edfe145,0x9d09d10b009745fe,0x7fb3c06c6b3ea16c,0x98ce2cea4c2223a,0x9081dbd3c2cd91b8,0x8f40b9c3489b4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdf1cca031edfe145,0x9d09d10b009745fe,0x7fb3c06c6b3ea16c,0x98ce2cea4c2223a,0x9081dbd3c2cd91b8,0x8f40b9c3489b4}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3e42,0x35b4,0xc315,0x4acc,0x7905,0x734e,0xe57,0x941d,0xcc00,0x9010,0x652,0x5679,0x1e7c,0x69d5,0x77f0,0x5936,0x9815,0xdc49,0xdbae,0x8415,0x2381,0x706d,0x1b55,0x35}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3e42,0x35b4,0xc315,0x4acc,0x7905,0x734e,0xe57,0x941d,0xcc00,0x9010,0x652,0x5679,0x1e7c,0x69d5,0x77f0,0x5936,0x9815,0xdc49,0xdbae,0x8415,0x2381,0x706d,0x1b55,0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x35b43e42,0x4accc315,0x734e7905,0x941d0e57,0x9010cc00,0x56790652,0x69d51e7c,0x593677f0,0xdc499815,0x8415dbae,0x706d2381,0x351b55}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x35b43e42,0x4accc315,0x734e7905,0x941d0e57,0x9010cc00,0x56790652,0x69d51e7c,0x593677f0,0xdc499815,0x8415dbae,0x706d2381,0x351b55}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4accc31535b43e42,0x941d0e57734e7905,0x567906529010cc00,0x593677f069d51e7c,0x8415dbaedc499815,0x351b55706d2381}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4accc31535b43e42,0x941d0e57734e7905,0x567906529010cc00,0x593677f069d51e7c,0x8415dbaedc499815,0x351b55706d2381}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9f23,0x1f88,0x311a,0x8d4e,0x15a2,0x199f,0x997,0x8bcf,0xc7a0,0xc956,0x3de8,0x254b,0x1224,0x1a69,0x604a,0x9cb1,0xa8f7,0xc6ee,0x5903,0x65b8,0xe8a5,0xa271,0x7d6e,0xb3}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9f23,0x1f88,0x311a,0x8d4e,0x15a2,0x199f,0x997,0x8bcf,0xc7a0,0xc956,0x3de8,0x254b,0x1224,0x1a69,0x604a,0x9cb1,0xa8f7,0xc6ee,0x5903,0x65b8,0xe8a5,0xa271,0x7d6e,0xb3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1f889f23,0x8d4e311a,0x199f15a2,0x8bcf0997,0xc956c7a0,0x254b3de8,0x1a691224,0x9cb1604a,0xc6eea8f7,0x65b85903,0xa271e8a5,0xb37d6e}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1f889f23,0x8d4e311a,0x199f15a2,0x8bcf0997,0xc956c7a0,0x254b3de8,0x1a691224,0x9cb1604a,0xc6eea8f7,0x65b85903,0xa271e8a5,0xb37d6e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8d4e311a1f889f23,0x8bcf0997199f15a2,0x254b3de8c956c7a0,0x9cb1604a1a691224,0x65b85903c6eea8f7,0xb37d6ea271e8a5}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8d4e311a1f889f23,0x8bcf0997199f15a2,0x254b3de8c956c7a0,0x9cb1604a1a691224,0x65b85903c6eea8f7,0xb37d6ea271e8a5}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfad4,0x9280,0x39ea,0xba3b,0xb12b,0x1c9c,0x5ffd,0x2c19,0x13bf,0x2145,0xaf34,0x30c1,0x70d8,0x27ea,0x6539,0xb50a,0x3106,0x3638,0x7fad,0xa5d2,0x912a,0xb0e6,0xb4a1,0xfd}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfad4,0x9280,0x39ea,0xba3b,0xb12b,0x1c9c,0x5ffd,0x2c19,0x13bf,0x2145,0xaf34,0x30c1,0x70d8,0x27ea,0x6539,0xb50a,0x3106,0x3638,0x7fad,0xa5d2,0x912a,0xb0e6,0xb4a1,0xfd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9280fad4,0xba3b39ea,0x1c9cb12b,0x2c195ffd,0x214513bf,0x30c1af34,0x27ea70d8,0xb50a6539,0x36383106,0xa5d27fad,0xb0e6912a,0xfdb4a1}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9280fad4,0xba3b39ea,0x1c9cb12b,0x2c195ffd,0x214513bf,0x30c1af34,0x27ea70d8,0xb50a6539,0x36383106,0xa5d27fad,0xb0e6912a,0xfdb4a1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xba3b39ea9280fad4,0x2c195ffd1c9cb12b,0x30c1af34214513bf,0xb50a653927ea70d8,0xa5d27fad36383106,0xfdb4a1b0e6912a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xba3b39ea9280fad4,0x2c195ffd1c9cb12b,0x30c1af34214513bf,0xb50a653927ea70d8,0xa5d27fad36383106,0xfdb4a1b0e6912a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc1be,0xca4b,0x3cea,0xb533,0x86fa,0x8cb1,0xf1a8,0x6be2,0x33ff,0x6fef,0xf9ad,0xa986,0xe183,0x962a,0x880f,0xa6c9,0x67ea,0x23b6,0x2451,0x7bea,0xdc7e,0x8f92,0xe4aa,0xca}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc1be,0xca4b,0x3cea,0xb533,0x86fa,0x8cb1,0xf1a8,0x6be2,0x33ff,0x6fef,0xf9ad,0xa986,0xe183,0x962a,0x880f,0xa6c9,0x67ea,0x23b6,0x2451,0x7bea,0xdc7e,0x8f92,0xe4aa,0xca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xca4bc1be,0xb5333cea,0x8cb186fa,0x6be2f1a8,0x6fef33ff,0xa986f9ad,0x962ae183,0xa6c9880f,0x23b667ea,0x7bea2451,0x8f92dc7e,0xcae4aa}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xca4bc1be,0xb5333cea,0x8cb186fa,0x6be2f1a8,0x6fef33ff,0xa986f9ad,0x962ae183,0xa6c9880f,0x23b667ea,0x7bea2451,0x8f92dc7e,0xcae4aa}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5333ceaca4bc1be,0x6be2f1a88cb186fa,0xa986f9ad6fef33ff,0xa6c9880f962ae183,0x7bea245123b667ea,0xcae4aa8f92dc7e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5333ceaca4bc1be,0x6be2f1a88cb186fa,0xa986f9ad6fef33ff,0xa6c9880f962ae183,0x7bea245123b667ea,0xcae4aa8f92dc7e}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x9a9, 0x8c7, 0x119d, 0xada, 0x1d98, 0xc97, 0x1a31, 0xdc6, 0x192a, 0xf3c, 0x509, 0xc5b, 0xea7, 0x1eb9, 0x59d, 0x1e3c, 0x114b, 0x88, 0x5a9, 0x1154, 0x18c0, 0x11db, 0x1ba9, 0x3b8, 0x837, 0x18d0, 0x17eb, 0x211, 0x1aa7, 0x11} @@ -1213,223 +1213,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe463,0x3132,0x31,0xb872,0xdbee,0x1045,0x2b88,0x62c5,0xee3c,0xde5c,0xb179,0xa84f,0x18e5,0x355e,0x9a0f,0xbef8,0x783a,0x35b5,0x6d1c,0xaa31,0x3024,0xed81,0xa0f6,0x8a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe463,0x3132,0x31,0xb872,0xdbee,0x1045,0x2b88,0x62c5,0xee3c,0xde5c,0xb179,0xa84f,0x18e5,0x355e,0x9a0f,0xbef8,0x783a,0x35b5,0x6d1c,0xaa31,0x3024,0xed81,0xa0f6,0x8a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3132e463,0xb8720031,0x1045dbee,0x62c52b88,0xde5cee3c,0xa84fb179,0x355e18e5,0xbef89a0f,0x35b5783a,0xaa316d1c,0xed813024,0x8aa0f6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3132e463,0xb8720031,0x1045dbee,0x62c52b88,0xde5cee3c,0xa84fb179,0x355e18e5,0xbef89a0f,0x35b5783a,0xaa316d1c,0xed813024,0x8aa0f6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb87200313132e463,0x62c52b881045dbee,0xa84fb179de5cee3c,0xbef89a0f355e18e5,0xaa316d1c35b5783a,0x8aa0f6ed813024}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb87200313132e463,0x62c52b881045dbee,0xa84fb179de5cee3c,0xbef89a0f355e18e5,0xaa316d1c35b5783a,0x8aa0f6ed813024}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcf24,0xdac2,0xe08b,0xd2f9,0x13a,0xf1f,0x9517,0xfa7c,0xa1c5,0x581e,0x4d0b,0x3e59,0x97cc,0x7506,0xee19,0xa48e,0xb1b0,0x50c2,0xb5a7,0x4b1d,0x2fcd,0xee68,0xab65,0x85}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcf24,0xdac2,0xe08b,0xd2f9,0x13a,0xf1f,0x9517,0xfa7c,0xa1c5,0x581e,0x4d0b,0x3e59,0x97cc,0x7506,0xee19,0xa48e,0xb1b0,0x50c2,0xb5a7,0x4b1d,0x2fcd,0xee68,0xab65,0x85}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdac2cf24,0xd2f9e08b,0xf1f013a,0xfa7c9517,0x581ea1c5,0x3e594d0b,0x750697cc,0xa48eee19,0x50c2b1b0,0x4b1db5a7,0xee682fcd,0x85ab65}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdac2cf24,0xd2f9e08b,0xf1f013a,0xfa7c9517,0x581ea1c5,0x3e594d0b,0x750697cc,0xa48eee19,0x50c2b1b0,0x4b1db5a7,0xee682fcd,0x85ab65}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd2f9e08bdac2cf24,0xfa7c95170f1f013a,0x3e594d0b581ea1c5,0xa48eee19750697cc,0x4b1db5a750c2b1b0,0x85ab65ee682fcd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd2f9e08bdac2cf24,0xfa7c95170f1f013a,0x3e594d0b581ea1c5,0xa48eee19750697cc,0x4b1db5a750c2b1b0,0x85ab65ee682fcd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8b69,0x7be5,0xdf28,0x9c91,0xf929,0x7c60,0x6c50,0x4f81,0x714a,0x59da,0x2741,0x3c71,0x223a,0x79bf,0x14bd,0xa26f,0xc787,0x606d,0xc74c,0xef81,0xd1c4,0x32a,0x55ff,0x6a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8b69,0x7be5,0xdf28,0x9c91,0xf929,0x7c60,0x6c50,0x4f81,0x714a,0x59da,0x2741,0x3c71,0x223a,0x79bf,0x14bd,0xa26f,0xc787,0x606d,0xc74c,0xef81,0xd1c4,0x32a,0x55ff,0x6a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7be58b69,0x9c91df28,0x7c60f929,0x4f816c50,0x59da714a,0x3c712741,0x79bf223a,0xa26f14bd,0x606dc787,0xef81c74c,0x32ad1c4,0x6a55ff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7be58b69,0x9c91df28,0x7c60f929,0x4f816c50,0x59da714a,0x3c712741,0x79bf223a,0xa26f14bd,0x606dc787,0xef81c74c,0x32ad1c4,0x6a55ff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c91df287be58b69,0x4f816c507c60f929,0x3c71274159da714a,0xa26f14bd79bf223a,0xef81c74c606dc787,0x6a55ff032ad1c4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c91df287be58b69,0x4f816c507c60f929,0x3c71274159da714a,0xa26f14bd79bf223a,0xef81c74c606dc787,0x6a55ff032ad1c4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1b9d,0xcecd,0xffce,0x478d,0x2411,0xefba,0xd477,0x9d3a,0x11c3,0x21a3,0x4e86,0x57b0,0xe71a,0xcaa1,0x65f0,0x4107,0x87c5,0xca4a,0x92e3,0x55ce,0xcfdb,0x127e,0x5f09,0x75}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1b9d,0xcecd,0xffce,0x478d,0x2411,0xefba,0xd477,0x9d3a,0x11c3,0x21a3,0x4e86,0x57b0,0xe71a,0xcaa1,0x65f0,0x4107,0x87c5,0xca4a,0x92e3,0x55ce,0xcfdb,0x127e,0x5f09,0x75}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecd1b9d,0x478dffce,0xefba2411,0x9d3ad477,0x21a311c3,0x57b04e86,0xcaa1e71a,0x410765f0,0xca4a87c5,0x55ce92e3,0x127ecfdb,0x755f09}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecd1b9d,0x478dffce,0xefba2411,0x9d3ad477,0x21a311c3,0x57b04e86,0xcaa1e71a,0x410765f0,0xca4a87c5,0x55ce92e3,0x127ecfdb,0x755f09}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x478dffcececd1b9d,0x9d3ad477efba2411,0x57b04e8621a311c3,0x410765f0caa1e71a,0x55ce92e3ca4a87c5,0x755f09127ecfdb}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x478dffcececd1b9d,0x9d3ad477efba2411,0x57b04e8621a311c3,0x410765f0caa1e71a,0x55ce92e3ca4a87c5,0x755f09127ecfdb}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd69f,0xa20a,0x2dbf,0x4897,0x3199,0xde89,0xe5f9,0x293e,0x826b,0xb67a,0x9878,0x508f,0x1cd5,0xbfc7,0xa6dc,0xa78c,0xa5a7,0xf717,0x2bd3,0x9a61,0x7d35,0xb772,0xba39,0x5d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd69f,0xa20a,0x2dbf,0x4897,0x3199,0xde89,0xe5f9,0x293e,0x826b,0xb67a,0x9878,0x508f,0x1cd5,0xbfc7,0xa6dc,0xa78c,0xa5a7,0xf717,0x2bd3,0x9a61,0x7d35,0xb772,0xba39,0x5d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa20ad69f,0x48972dbf,0xde893199,0x293ee5f9,0xb67a826b,0x508f9878,0xbfc71cd5,0xa78ca6dc,0xf717a5a7,0x9a612bd3,0xb7727d35,0x5dba39}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa20ad69f,0x48972dbf,0xde893199,0x293ee5f9,0xb67a826b,0x508f9878,0xbfc71cd5,0xa78ca6dc,0xf717a5a7,0x9a612bd3,0xb7727d35,0x5dba39}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x48972dbfa20ad69f,0x293ee5f9de893199,0x508f9878b67a826b,0xa78ca6dcbfc71cd5,0x9a612bd3f717a5a7,0x5dba39b7727d35}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x48972dbfa20ad69f,0x293ee5f9de893199,0x508f9878b67a826b,0xa78ca6dcbfc71cd5,0x9a612bd3f717a5a7,0x5dba39b7727d35}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xeec1,0x1e36,0x61bb,0x9e9f,0xe1d8,0x9166,0x8a8e,0xb5cd,0xc787,0x4281,0xb7db,0xc5fe,0x29b,0x7038,0xad1a,0xdfb3,0x5d88,0xa643,0xce34,0xe9d5,0xfe7,0xc15c,0xb80f,0xbc}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xeec1,0x1e36,0x61bb,0x9e9f,0xe1d8,0x9166,0x8a8e,0xb5cd,0xc787,0x4281,0xb7db,0xc5fe,0x29b,0x7038,0xad1a,0xdfb3,0x5d88,0xa643,0xce34,0xe9d5,0xfe7,0xc15c,0xb80f,0xbc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1e36eec1,0x9e9f61bb,0x9166e1d8,0xb5cd8a8e,0x4281c787,0xc5feb7db,0x7038029b,0xdfb3ad1a,0xa6435d88,0xe9d5ce34,0xc15c0fe7,0xbcb80f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1e36eec1,0x9e9f61bb,0x9166e1d8,0xb5cd8a8e,0x4281c787,0xc5feb7db,0x7038029b,0xdfb3ad1a,0xa6435d88,0xe9d5ce34,0xc15c0fe7,0xbcb80f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9e9f61bb1e36eec1,0xb5cd8a8e9166e1d8,0xc5feb7db4281c787,0xdfb3ad1a7038029b,0xe9d5ce34a6435d88,0xbcb80fc15c0fe7}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9e9f61bb1e36eec1,0xb5cd8a8e9166e1d8,0xc5feb7db4281c787,0xdfb3ad1a7038029b,0xe9d5ce34a6435d88,0xbcb80fc15c0fe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb7ff,0xc2,0x2b8a,0x5a59,0xd318,0x52ca,0x9b64,0xad19,0x8df,0xc9b8,0x7b28,0x9d09,0xe309,0x9,0xfb09,0xcbb9,0x6a67,0x1137,0x707c,0xaa5,0xcdf5,0x3ffd,0xfb9e,0xb9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb7ff,0xc2,0x2b8a,0x5a59,0xd318,0x52ca,0x9b64,0xad19,0x8df,0xc9b8,0x7b28,0x9d09,0xe309,0x9,0xfb09,0xcbb9,0x6a67,0x1137,0x707c,0xaa5,0xcdf5,0x3ffd,0xfb9e,0xb9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc2b7ff,0x5a592b8a,0x52cad318,0xad199b64,0xc9b808df,0x9d097b28,0x9e309,0xcbb9fb09,0x11376a67,0xaa5707c,0x3ffdcdf5,0xb9fb9e}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc2b7ff,0x5a592b8a,0x52cad318,0xad199b64,0xc9b808df,0x9d097b28,0x9e309,0xcbb9fb09,0x11376a67,0xaa5707c,0x3ffdcdf5,0xb9fb9e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5a592b8a00c2b7ff,0xad199b6452cad318,0x9d097b28c9b808df,0xcbb9fb090009e309,0xaa5707c11376a67,0xb9fb9e3ffdcdf5}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5a592b8a00c2b7ff,0xad199b6452cad318,0x9d097b28c9b808df,0xcbb9fb090009e309,0xaa5707c11376a67,0xb9fb9e3ffdcdf5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2961,0x5df5,0xd240,0xb768,0xce66,0x2176,0x1a06,0xd6c1,0x7d94,0x4985,0x6787,0xaf70,0xe32a,0x4038,0x5923,0x5873,0x5a58,0x8e8,0xd42c,0x659e,0x82ca,0x488d,0x45c6,0xa2}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2961,0x5df5,0xd240,0xb768,0xce66,0x2176,0x1a06,0xd6c1,0x7d94,0x4985,0x6787,0xaf70,0xe32a,0x4038,0x5923,0x5873,0x5a58,0x8e8,0xd42c,0x659e,0x82ca,0x488d,0x45c6,0xa2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5df52961,0xb768d240,0x2176ce66,0xd6c11a06,0x49857d94,0xaf706787,0x4038e32a,0x58735923,0x8e85a58,0x659ed42c,0x488d82ca,0xa245c6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5df52961,0xb768d240,0x2176ce66,0xd6c11a06,0x49857d94,0xaf706787,0x4038e32a,0x58735923,0x8e85a58,0x659ed42c,0x488d82ca,0xa245c6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb768d2405df52961,0xd6c11a062176ce66,0xaf70678749857d94,0x587359234038e32a,0x659ed42c08e85a58,0xa245c6488d82ca}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb768d2405df52961,0xd6c11a062176ce66,0xaf70678749857d94,0x587359234038e32a,0x659ed42c08e85a58,0xa245c6488d82ca}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0xccf, 0xad2, 0x1c72, 0x1f31, 0x155, 0xa3, 0x1062, 0xf6e, 0xe60, 0x87b, 0x1891, 0xb45, 0x1de8, 0x7f8, 0x18a6, 0x1e67, 0x988, 0x1887, 0xca7, 0x216, 0x1daa, 0x1aa5, 0xc3a, 0x11d3, 0x1282, 0x55d, 0x15fc, 0x1132, 0x1c5e, 0x8} @@ -1689,223 +1689,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xafa5,0x4195,0xbb2d,0xdd24,0xa3ca,0xc678,0xf995,0x2ccb,0x5c3b,0xf9ff,0xd06,0x1f9b,0x926d,0x4e3b,0x2881,0x24f2,0xcf4c,0x8e9a,0xa38d,0x24cb,0xe8f2,0x28a1,0x581c,0xde}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xafa5,0x4195,0xbb2d,0xdd24,0xa3ca,0xc678,0xf995,0x2ccb,0x5c3b,0xf9ff,0xd06,0x1f9b,0x926d,0x4e3b,0x2881,0x24f2,0xcf4c,0x8e9a,0xa38d,0x24cb,0xe8f2,0x28a1,0x581c,0xde}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4195afa5,0xdd24bb2d,0xc678a3ca,0x2ccbf995,0xf9ff5c3b,0x1f9b0d06,0x4e3b926d,0x24f22881,0x8e9acf4c,0x24cba38d,0x28a1e8f2,0xde581c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4195afa5,0xdd24bb2d,0xc678a3ca,0x2ccbf995,0xf9ff5c3b,0x1f9b0d06,0x4e3b926d,0x24f22881,0x8e9acf4c,0x24cba38d,0x28a1e8f2,0xde581c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd24bb2d4195afa5,0x2ccbf995c678a3ca,0x1f9b0d06f9ff5c3b,0x24f228814e3b926d,0x24cba38d8e9acf4c,0xde581c28a1e8f2}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd24bb2d4195afa5,0x2ccbf995c678a3ca,0x1f9b0d06f9ff5c3b,0x24f228814e3b926d,0x24cba38d8e9acf4c,0xde581c28a1e8f2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcd88,0x9cea,0x593c,0xb5a8,0x79c6,0xc07c,0x496f,0xfb85,0x5ac9,0x381c,0xf4f8,0xfa59,0xb7a3,0x5caa,0x24c2,0x67c8,0x31b3,0x7585,0xbe8a,0xb89f,0xa29f,0x6cd5,0xc156,0x25}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcd88,0x9cea,0x593c,0xb5a8,0x79c6,0xc07c,0x496f,0xfb85,0x5ac9,0x381c,0xf4f8,0xfa59,0xb7a3,0x5caa,0x24c2,0x67c8,0x31b3,0x7585,0xbe8a,0xb89f,0xa29f,0x6cd5,0xc156,0x25}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ceacd88,0xb5a8593c,0xc07c79c6,0xfb85496f,0x381c5ac9,0xfa59f4f8,0x5caab7a3,0x67c824c2,0x758531b3,0xb89fbe8a,0x6cd5a29f,0x25c156}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ceacd88,0xb5a8593c,0xc07c79c6,0xfb85496f,0x381c5ac9,0xfa59f4f8,0x5caab7a3,0x67c824c2,0x758531b3,0xb89fbe8a,0x6cd5a29f,0x25c156}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5a8593c9ceacd88,0xfb85496fc07c79c6,0xfa59f4f8381c5ac9,0x67c824c25caab7a3,0xb89fbe8a758531b3,0x25c1566cd5a29f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5a8593c9ceacd88,0xfb85496fc07c79c6,0xfa59f4f8381c5ac9,0x67c824c25caab7a3,0xb89fbe8a758531b3,0x25c1566cd5a29f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9627,0xd297,0x9200,0x73de,0xaa89,0xf44f,0x99c7,0x2d45,0xb1eb,0xab2b,0x4168,0x976f,0x1e88,0x7777,0x2f39,0x6648,0xc224,0xd5a1,0xb815,0x861b,0xf76f,0xb476,0x4123,0xbe}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9627,0xd297,0x9200,0x73de,0xaa89,0xf44f,0x99c7,0x2d45,0xb1eb,0xab2b,0x4168,0x976f,0x1e88,0x7777,0x2f39,0x6648,0xc224,0xd5a1,0xb815,0x861b,0xf76f,0xb476,0x4123,0xbe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd2979627,0x73de9200,0xf44faa89,0x2d4599c7,0xab2bb1eb,0x976f4168,0x77771e88,0x66482f39,0xd5a1c224,0x861bb815,0xb476f76f,0xbe4123}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd2979627,0x73de9200,0xf44faa89,0x2d4599c7,0xab2bb1eb,0x976f4168,0x77771e88,0x66482f39,0xd5a1c224,0x861bb815,0xb476f76f,0xbe4123}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x73de9200d2979627,0x2d4599c7f44faa89,0x976f4168ab2bb1eb,0x66482f3977771e88,0x861bb815d5a1c224,0xbe4123b476f76f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x73de9200d2979627,0x2d4599c7f44faa89,0x976f4168ab2bb1eb,0x66482f3977771e88,0x861bb815d5a1c224,0xbe4123b476f76f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x505b,0xbe6a,0x44d2,0x22db,0x5c35,0x3987,0x66a,0xd334,0xa3c4,0x600,0xf2f9,0xe064,0x6d92,0xb1c4,0xd77e,0xdb0d,0x30b3,0x7165,0x5c72,0xdb34,0x170d,0xd75e,0xa7e3,0x21}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x505b,0xbe6a,0x44d2,0x22db,0x5c35,0x3987,0x66a,0xd334,0xa3c4,0x600,0xf2f9,0xe064,0x6d92,0xb1c4,0xd77e,0xdb0d,0x30b3,0x7165,0x5c72,0xdb34,0x170d,0xd75e,0xa7e3,0x21}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbe6a505b,0x22db44d2,0x39875c35,0xd334066a,0x600a3c4,0xe064f2f9,0xb1c46d92,0xdb0dd77e,0x716530b3,0xdb345c72,0xd75e170d,0x21a7e3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbe6a505b,0x22db44d2,0x39875c35,0xd334066a,0x600a3c4,0xe064f2f9,0xb1c46d92,0xdb0dd77e,0x716530b3,0xdb345c72,0xd75e170d,0x21a7e3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22db44d2be6a505b,0xd334066a39875c35,0xe064f2f90600a3c4,0xdb0dd77eb1c46d92,0xdb345c72716530b3,0x21a7e3d75e170d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22db44d2be6a505b,0xd334066a39875c35,0xe064f2f90600a3c4,0xdb0dd77eb1c46d92,0xdb345c72716530b3,0x21a7e3d75e170d}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x43c6,0x55d8,0x682a,0xc215,0x706e,0xac4c,0x5ce,0x1182,0x8b72,0x90e3,0xf04f,0x6a11,0xc345,0x3488,0x45b0,0x5d3f,0x556b,0x9896,0x7b20,0x8d46,0xa9e3,0x7b0c,0xd428,0xba}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x43c6,0x55d8,0x682a,0xc215,0x706e,0xac4c,0x5ce,0x1182,0x8b72,0x90e3,0xf04f,0x6a11,0xc345,0x3488,0x45b0,0x5d3f,0x556b,0x9896,0x7b20,0x8d46,0xa9e3,0x7b0c,0xd428,0xba}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x55d843c6,0xc215682a,0xac4c706e,0x118205ce,0x90e38b72,0x6a11f04f,0x3488c345,0x5d3f45b0,0x9896556b,0x8d467b20,0x7b0ca9e3,0xbad428}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x55d843c6,0xc215682a,0xac4c706e,0x118205ce,0x90e38b72,0x6a11f04f,0x3488c345,0x5d3f45b0,0x9896556b,0x8d467b20,0x7b0ca9e3,0xbad428}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc215682a55d843c6,0x118205ceac4c706e,0x6a11f04f90e38b72,0x5d3f45b03488c345,0x8d467b209896556b,0xbad4287b0ca9e3}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc215682a55d843c6,0x118205ceac4c706e,0x6a11f04f90e38b72,0x5d3f45b03488c345,0x8d467b209896556b,0xbad4287b0ca9e3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x91a5,0xf9ad,0x243c,0xedb9,0xc4f5,0xce5f,0xd6d7,0x3592,0x40df,0xdead,0x1489,0xe297,0x55b1,0xee4d,0xda9d,0x9e1f,0x4a5c,0xd99a,0x6c6b,0xa585,0x62fc,0x4383,0xc1ad,0xc0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x91a5,0xf9ad,0x243c,0xedb9,0xc4f5,0xce5f,0xd6d7,0x3592,0x40df,0xdead,0x1489,0xe297,0x55b1,0xee4d,0xda9d,0x9e1f,0x4a5c,0xd99a,0x6c6b,0xa585,0x62fc,0x4383,0xc1ad,0xc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9ad91a5,0xedb9243c,0xce5fc4f5,0x3592d6d7,0xdead40df,0xe2971489,0xee4d55b1,0x9e1fda9d,0xd99a4a5c,0xa5856c6b,0x438362fc,0xc0c1ad}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9ad91a5,0xedb9243c,0xce5fc4f5,0x3592d6d7,0xdead40df,0xe2971489,0xee4d55b1,0x9e1fda9d,0xd99a4a5c,0xa5856c6b,0x438362fc,0xc0c1ad}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xedb9243cf9ad91a5,0x3592d6d7ce5fc4f5,0xe2971489dead40df,0x9e1fda9dee4d55b1,0xa5856c6bd99a4a5c,0xc0c1ad438362fc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xedb9243cf9ad91a5,0x3592d6d7ce5fc4f5,0xe2971489dead40df,0x9e1fda9dee4d55b1,0xa5856c6bd99a4a5c,0xc0c1ad438362fc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf454,0x6191,0x2181,0x2fc4,0x66fb,0xc44f,0x7bb6,0x9b1c,0x99f,0xee09,0xb1a3,0xf8f9,0xf234,0x5151,0x595c,0x4e44,0xa80a,0x305c,0x9930,0x25f6,0x8e50,0xb812,0xff4d,0xb8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf454,0x6191,0x2181,0x2fc4,0x66fb,0xc44f,0x7bb6,0x9b1c,0x99f,0xee09,0xb1a3,0xf8f9,0xf234,0x5151,0x595c,0x4e44,0xa80a,0x305c,0x9930,0x25f6,0x8e50,0xb812,0xff4d,0xb8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6191f454,0x2fc42181,0xc44f66fb,0x9b1c7bb6,0xee09099f,0xf8f9b1a3,0x5151f234,0x4e44595c,0x305ca80a,0x25f69930,0xb8128e50,0xb8ff4d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6191f454,0x2fc42181,0xc44f66fb,0x9b1c7bb6,0xee09099f,0xf8f9b1a3,0x5151f234,0x4e44595c,0x305ca80a,0x25f69930,0xb8128e50,0xb8ff4d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2fc421816191f454,0x9b1c7bb6c44f66fb,0xf8f9b1a3ee09099f,0x4e44595c5151f234,0x25f69930305ca80a,0xb8ff4db8128e50}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2fc421816191f454,0x9b1c7bb6c44f66fb,0xf8f9b1a3ee09099f,0x4e44595c5151f234,0x25f69930305ca80a,0xb8ff4db8128e50}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbc3a,0xaa27,0x97d5,0x3dea,0x8f91,0x53b3,0xfa31,0xee7d,0x748d,0x6f1c,0xfb0,0x95ee,0x3cba,0xcb77,0xba4f,0xa2c0,0xaa94,0x6769,0x84df,0x72b9,0x561c,0x84f3,0x2bd7,0x45}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbc3a,0xaa27,0x97d5,0x3dea,0x8f91,0x53b3,0xfa31,0xee7d,0x748d,0x6f1c,0xfb0,0x95ee,0x3cba,0xcb77,0xba4f,0xa2c0,0xaa94,0x6769,0x84df,0x72b9,0x561c,0x84f3,0x2bd7,0x45}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa27bc3a,0x3dea97d5,0x53b38f91,0xee7dfa31,0x6f1c748d,0x95ee0fb0,0xcb773cba,0xa2c0ba4f,0x6769aa94,0x72b984df,0x84f3561c,0x452bd7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa27bc3a,0x3dea97d5,0x53b38f91,0xee7dfa31,0x6f1c748d,0x95ee0fb0,0xcb773cba,0xa2c0ba4f,0x6769aa94,0x72b984df,0x84f3561c,0x452bd7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3dea97d5aa27bc3a,0xee7dfa3153b38f91,0x95ee0fb06f1c748d,0xa2c0ba4fcb773cba,0x72b984df6769aa94,0x452bd784f3561c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3dea97d5aa27bc3a,0xee7dfa3153b38f91,0x95ee0fb06f1c748d,0xa2c0ba4fcb773cba,0x72b984df6769aa94,0x452bd784f3561c}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x10c4, 0x1e1a, 0x1b68, 0xa71, 0xa1a, 0x1f60, 0xdda, 0xb59, 0x1bc0, 0x26c, 0x1325, 0x4cf, 0x1375, 0x10ef, 0x1702, 0x1eff, 0x171f, 0x467, 0xc1c, 0xf3b, 0xf74, 0x6fa, 0x177f, 0x911, 0x8bc, 0x16b7, 0x1022, 0xa5f, 0xa55, 0x9} @@ -2165,223 +2165,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x313b,0xc18a,0x812a,0x406d,0x472a,0x9fca,0x9f07,0xb030,0x8b7b,0x7924,0x2af6,0x9e99,0x2b81,0x8eb8,0x35ee,0x59c8,0x7655,0x34cc,0x5aaf,0x326,0xe58d,0xf8b7,0x969a,0x6e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x313b,0xc18a,0x812a,0x406d,0x472a,0x9fca,0x9f07,0xb030,0x8b7b,0x7924,0x2af6,0x9e99,0x2b81,0x8eb8,0x35ee,0x59c8,0x7655,0x34cc,0x5aaf,0x326,0xe58d,0xf8b7,0x969a,0x6e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc18a313b,0x406d812a,0x9fca472a,0xb0309f07,0x79248b7b,0x9e992af6,0x8eb82b81,0x59c835ee,0x34cc7655,0x3265aaf,0xf8b7e58d,0x6e969a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc18a313b,0x406d812a,0x9fca472a,0xb0309f07,0x79248b7b,0x9e992af6,0x8eb82b81,0x59c835ee,0x34cc7655,0x3265aaf,0xf8b7e58d,0x6e969a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x406d812ac18a313b,0xb0309f079fca472a,0x9e992af679248b7b,0x59c835ee8eb82b81,0x3265aaf34cc7655,0x6e969af8b7e58d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x406d812ac18a313b,0xb0309f079fca472a,0x9e992af679248b7b,0x59c835ee8eb82b81,0x3265aaf34cc7655,0x6e969af8b7e58d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6610,0xfd89,0xb147,0xcf39,0x2b02,0x4ccf,0xed64,0x8470,0xaaf6,0x1891,0x8c78,0xf074,0x8a4c,0xfaed,0xd66c,0xf52b,0xf1c5,0xb0a,0x5cd,0x46f8,0x79a3,0x81de,0x451d,0xd9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6610,0xfd89,0xb147,0xcf39,0x2b02,0x4ccf,0xed64,0x8470,0xaaf6,0x1891,0x8c78,0xf074,0x8a4c,0xfaed,0xd66c,0xf52b,0xf1c5,0xb0a,0x5cd,0x46f8,0x79a3,0x81de,0x451d,0xd9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfd896610,0xcf39b147,0x4ccf2b02,0x8470ed64,0x1891aaf6,0xf0748c78,0xfaed8a4c,0xf52bd66c,0xb0af1c5,0x46f805cd,0x81de79a3,0xd9451d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfd896610,0xcf39b147,0x4ccf2b02,0x8470ed64,0x1891aaf6,0xf0748c78,0xfaed8a4c,0xf52bd66c,0xb0af1c5,0x46f805cd,0x81de79a3,0xd9451d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcf39b147fd896610,0x8470ed644ccf2b02,0xf0748c781891aaf6,0xf52bd66cfaed8a4c,0x46f805cd0b0af1c5,0xd9451d81de79a3}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcf39b147fd896610,0x8470ed644ccf2b02,0xf0748c781891aaf6,0xf52bd66cfaed8a4c,0x46f805cd0b0af1c5,0xd9451d81de79a3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1869,0x2ce0,0x425c,0x7d0f,0x30c8,0x1c3e,0xd562,0xfb41,0x3951,0xeccc,0x9c8a,0xb265,0x829,0xd879,0x3c42,0x2cbf,0xb1d2,0xd9d3,0xee28,0x7fdf,0xccdd,0x3ad,0xa6d9,0x3b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1869,0x2ce0,0x425c,0x7d0f,0x30c8,0x1c3e,0xd562,0xfb41,0x3951,0xeccc,0x9c8a,0xb265,0x829,0xd879,0x3c42,0x2cbf,0xb1d2,0xd9d3,0xee28,0x7fdf,0xccdd,0x3ad,0xa6d9,0x3b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2ce01869,0x7d0f425c,0x1c3e30c8,0xfb41d562,0xeccc3951,0xb2659c8a,0xd8790829,0x2cbf3c42,0xd9d3b1d2,0x7fdfee28,0x3adccdd,0x3ba6d9}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2ce01869,0x7d0f425c,0x1c3e30c8,0xfb41d562,0xeccc3951,0xb2659c8a,0xd8790829,0x2cbf3c42,0xd9d3b1d2,0x7fdfee28,0x3adccdd,0x3ba6d9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7d0f425c2ce01869,0xfb41d5621c3e30c8,0xb2659c8aeccc3951,0x2cbf3c42d8790829,0x7fdfee28d9d3b1d2,0x3ba6d903adccdd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7d0f425c2ce01869,0xfb41d5621c3e30c8,0xb2659c8aeccc3951,0x2cbf3c42d8790829,0x7fdfee28d9d3b1d2,0x3ba6d903adccdd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcec5,0x3e75,0x7ed5,0xbf92,0xb8d5,0x6035,0x60f8,0x4fcf,0x7484,0x86db,0xd509,0x6166,0xd47e,0x7147,0xca11,0xa637,0x89aa,0xcb33,0xa550,0xfcd9,0x1a72,0x748,0x6965,0x91}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcec5,0x3e75,0x7ed5,0xbf92,0xb8d5,0x6035,0x60f8,0x4fcf,0x7484,0x86db,0xd509,0x6166,0xd47e,0x7147,0xca11,0xa637,0x89aa,0xcb33,0xa550,0xfcd9,0x1a72,0x748,0x6965,0x91}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3e75cec5,0xbf927ed5,0x6035b8d5,0x4fcf60f8,0x86db7484,0x6166d509,0x7147d47e,0xa637ca11,0xcb3389aa,0xfcd9a550,0x7481a72,0x916965}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3e75cec5,0xbf927ed5,0x6035b8d5,0x4fcf60f8,0x86db7484,0x6166d509,0x7147d47e,0xa637ca11,0xcb3389aa,0xfcd9a550,0x7481a72,0x916965}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbf927ed53e75cec5,0x4fcf60f86035b8d5,0x6166d50986db7484,0xa637ca117147d47e,0xfcd9a550cb3389aa,0x91696507481a72}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbf927ed53e75cec5,0x4fcf60f86035b8d5,0x6166d50986db7484,0xa637ca117147d47e,0xfcd9a550cb3389aa,0x91696507481a72}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x29a3,0x7abe,0x2ef1,0x26a6,0xa5a5,0x54e6,0xf4c8,0xb56f,0x2bae,0x1aae,0xd9ba,0x94ed,0x2df5,0x882c,0xc686,0x6f64,0x29f7,0x850a,0x9eee,0x617c,0x5678,0x3108,0x8ebe,0x86}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x29a3,0x7abe,0x2ef1,0x26a6,0xa5a5,0x54e6,0xf4c8,0xb56f,0x2bae,0x1aae,0xd9ba,0x94ed,0x2df5,0x882c,0xc686,0x6f64,0x29f7,0x850a,0x9eee,0x617c,0x5678,0x3108,0x8ebe,0x86}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7abe29a3,0x26a62ef1,0x54e6a5a5,0xb56ff4c8,0x1aae2bae,0x94edd9ba,0x882c2df5,0x6f64c686,0x850a29f7,0x617c9eee,0x31085678,0x868ebe}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7abe29a3,0x26a62ef1,0x54e6a5a5,0xb56ff4c8,0x1aae2bae,0x94edd9ba,0x882c2df5,0x6f64c686,0x850a29f7,0x617c9eee,0x31085678,0x868ebe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x26a62ef17abe29a3,0xb56ff4c854e6a5a5,0x94edd9ba1aae2bae,0x6f64c686882c2df5,0x617c9eee850a29f7,0x868ebe31085678}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x26a62ef17abe29a3,0xb56ff4c854e6a5a5,0x94edd9ba1aae2bae,0x6f64c686882c2df5,0x617c9eee850a29f7,0x868ebe31085678}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc2a5,0x8ce6,0x3729,0xaa2b,0xb9d2,0xbf43,0xe2be,0xaf25,0x4ffb,0xec8e,0xf85a,0x94c6,0xe027,0x3c64,0xf4ad,0xf63,0x86ba,0xa244,0xde0f,0x2390,0x11e1,0xdd7c,0xcd4c,0x33}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc2a5,0x8ce6,0x3729,0xaa2b,0xb9d2,0xbf43,0xe2be,0xaf25,0x4ffb,0xec8e,0xf85a,0x94c6,0xe027,0x3c64,0xf4ad,0xf63,0x86ba,0xa244,0xde0f,0x2390,0x11e1,0xdd7c,0xcd4c,0x33}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8ce6c2a5,0xaa2b3729,0xbf43b9d2,0xaf25e2be,0xec8e4ffb,0x94c6f85a,0x3c64e027,0xf63f4ad,0xa24486ba,0x2390de0f,0xdd7c11e1,0x33cd4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8ce6c2a5,0xaa2b3729,0xbf43b9d2,0xaf25e2be,0xec8e4ffb,0x94c6f85a,0x3c64e027,0xf63f4ad,0xa24486ba,0x2390de0f,0xdd7c11e1,0x33cd4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaa2b37298ce6c2a5,0xaf25e2bebf43b9d2,0x94c6f85aec8e4ffb,0xf63f4ad3c64e027,0x2390de0fa24486ba,0x33cd4cdd7c11e1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaa2b37298ce6c2a5,0xaf25e2bebf43b9d2,0x94c6f85aec8e4ffb,0xf63f4ad3c64e027,0x2390de0fa24486ba,0x33cd4cdd7c11e1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1893,0xa4bf,0x1eb8,0x9df0,0x91b1,0x17b0,0xe4ae,0x6ba1,0x35fd,0xd56b,0xc03f,0x82a8,0x99cd,0x30be,0xf3a3,0x181e,0x879b,0x518,0x3e8,0xed0e,0xc0ff,0xe2d6,0xe29c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1893,0xa4bf,0x1eb8,0x9df0,0x91b1,0x17b0,0xe4ae,0x6ba1,0x35fd,0xd56b,0xc03f,0x82a8,0x99cd,0x30be,0xf3a3,0x181e,0x879b,0x518,0x3e8,0xed0e,0xc0ff,0xe2d6,0xe29c,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa4bf1893,0x9df01eb8,0x17b091b1,0x6ba1e4ae,0xd56b35fd,0x82a8c03f,0x30be99cd,0x181ef3a3,0x518879b,0xed0e03e8,0xe2d6c0ff,0x1e29c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa4bf1893,0x9df01eb8,0x17b091b1,0x6ba1e4ae,0xd56b35fd,0x82a8c03f,0x30be99cd,0x181ef3a3,0x518879b,0xed0e03e8,0xe2d6c0ff,0x1e29c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9df01eb8a4bf1893,0x6ba1e4ae17b091b1,0x82a8c03fd56b35fd,0x181ef3a330be99cd,0xed0e03e80518879b,0x1e29ce2d6c0ff}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9df01eb8a4bf1893,0x6ba1e4ae17b091b1,0x82a8c03fd56b35fd,0x181ef3a330be99cd,0xed0e03e80518879b,0x1e29ce2d6c0ff}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd65d,0x8541,0xd10e,0xd959,0x5a5a,0xab19,0xb37,0x4a90,0xd451,0xe551,0x2645,0x6b12,0xd20a,0x77d3,0x3979,0x909b,0xd608,0x7af5,0x6111,0x9e83,0xa987,0xcef7,0x7141,0x79}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd65d,0x8541,0xd10e,0xd959,0x5a5a,0xab19,0xb37,0x4a90,0xd451,0xe551,0x2645,0x6b12,0xd20a,0x77d3,0x3979,0x909b,0xd608,0x7af5,0x6111,0x9e83,0xa987,0xcef7,0x7141,0x79}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8541d65d,0xd959d10e,0xab195a5a,0x4a900b37,0xe551d451,0x6b122645,0x77d3d20a,0x909b3979,0x7af5d608,0x9e836111,0xcef7a987,0x797141}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8541d65d,0xd959d10e,0xab195a5a,0x4a900b37,0xe551d451,0x6b122645,0x77d3d20a,0x909b3979,0x7af5d608,0x9e836111,0xcef7a987,0x797141}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd959d10e8541d65d,0x4a900b37ab195a5a,0x6b122645e551d451,0x909b397977d3d20a,0x9e8361117af5d608,0x797141cef7a987}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd959d10e8541d65d,0x4a900b37ab195a5a,0x6b122645e551d451,0x909b397977d3d20a,0x9e8361117af5d608,0x797141cef7a987}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x187f, 0x1c7d, 0x163a, 0x54d, 0x197f, 0x1a5d, 0x1833, 0x823, 0x11ae, 0x225, 0xd6d, 0x1627, 0xd6c, 0x1625, 0xa36, 0xf64, 0xcfa, 0x327, 0x188d, 0xfab, 0x56b, 0x91c, 0x1cc5, 0x4fe, 0x2ae, 0x181a, 0x195, 0x1288, 0x10fc, 0x3} @@ -2641,223 +2641,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x703,0xe86d,0xe89e,0xbcf8,0x675b,0xe250,0x9f65,0xe8ec,0x2c83,0x11ca,0x4751,0x192a,0xf9d8,0xf46a,0xeb89,0x4f40,0x2a2c,0xdcf,0xfff9,0x13f9,0x24e7,0x8348,0xb9af,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x703,0xe86d,0xe89e,0xbcf8,0x675b,0xe250,0x9f65,0xe8ec,0x2c83,0x11ca,0x4751,0x192a,0xf9d8,0xf46a,0xeb89,0x4f40,0x2a2c,0xdcf,0xfff9,0x13f9,0x24e7,0x8348,0xb9af,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe86d0703,0xbcf8e89e,0xe250675b,0xe8ec9f65,0x11ca2c83,0x192a4751,0xf46af9d8,0x4f40eb89,0xdcf2a2c,0x13f9fff9,0x834824e7,0x6b9af}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe86d0703,0xbcf8e89e,0xe250675b,0xe8ec9f65,0x11ca2c83,0x192a4751,0xf46af9d8,0x4f40eb89,0xdcf2a2c,0x13f9fff9,0x834824e7,0x6b9af}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbcf8e89ee86d0703,0xe8ec9f65e250675b,0x192a475111ca2c83,0x4f40eb89f46af9d8,0x13f9fff90dcf2a2c,0x6b9af834824e7}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbcf8e89ee86d0703,0xe8ec9f65e250675b,0x192a475111ca2c83,0x4f40eb89f46af9d8,0x13f9fff90dcf2a2c,0x6b9af834824e7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e40,0xb548,0xf9c7,0x6598,0x7e33,0x25c6,0x6cbf,0x2ef2,0xa630,0xdd99,0xaef2,0xf320,0x4a2,0x93a7,0x4541,0x2f7c,0xbf45,0x1a7a,0x24f4,0x52a9,0xd3b4,0xa12a,0x9d37,0xb0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e40,0xb548,0xf9c7,0x6598,0x7e33,0x25c6,0x6cbf,0x2ef2,0xa630,0xdd99,0xaef2,0xf320,0x4a2,0x93a7,0x4541,0x2f7c,0xbf45,0x1a7a,0x24f4,0x52a9,0xd3b4,0xa12a,0x9d37,0xb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb5481e40,0x6598f9c7,0x25c67e33,0x2ef26cbf,0xdd99a630,0xf320aef2,0x93a704a2,0x2f7c4541,0x1a7abf45,0x52a924f4,0xa12ad3b4,0xb09d37}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb5481e40,0x6598f9c7,0x25c67e33,0x2ef26cbf,0xdd99a630,0xf320aef2,0x93a704a2,0x2f7c4541,0x1a7abf45,0x52a924f4,0xa12ad3b4,0xb09d37}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6598f9c7b5481e40,0x2ef26cbf25c67e33,0xf320aef2dd99a630,0x2f7c454193a704a2,0x52a924f41a7abf45,0xb09d37a12ad3b4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6598f9c7b5481e40,0x2ef26cbf25c67e33,0xf320aef2dd99a630,0x2f7c454193a704a2,0x52a924f41a7abf45,0xb09d37a12ad3b4}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e1,0x2283,0x3774,0x83d4,0xf33f,0x1fc,0x2790,0xde59,0xe89d,0xc942,0x2c1b,0x6574,0x55b1,0x3a3c,0x9f11,0xbb0a,0x6813,0xa69,0xff9d,0xc94c,0xdede,0xce6b,0x18c6,0xa9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e1,0x2283,0x3774,0x83d4,0xf33f,0x1fc,0x2790,0xde59,0xe89d,0xc942,0x2c1b,0x6574,0x55b1,0x3a3c,0x9f11,0xbb0a,0x6813,0xa69,0xff9d,0xc94c,0xdede,0xce6b,0x18c6,0xa9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x228301e1,0x83d43774,0x1fcf33f,0xde592790,0xc942e89d,0x65742c1b,0x3a3c55b1,0xbb0a9f11,0xa696813,0xc94cff9d,0xce6bdede,0xa918c6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x228301e1,0x83d43774,0x1fcf33f,0xde592790,0xc942e89d,0x65742c1b,0x3a3c55b1,0xbb0a9f11,0xa696813,0xc94cff9d,0xce6bdede,0xa918c6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x83d43774228301e1,0xde59279001fcf33f,0x65742c1bc942e89d,0xbb0a9f113a3c55b1,0xc94cff9d0a696813,0xa918c6ce6bdede}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x83d43774228301e1,0xde59279001fcf33f,0x65742c1bc942e89d,0xbb0a9f113a3c55b1,0xc94cff9d0a696813,0xa918c6ce6bdede}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf8fd,0x1792,0x1761,0x4307,0x98a4,0x1daf,0x609a,0x1713,0xd37c,0xee35,0xb8ae,0xe6d5,0x627,0xb95,0x1476,0xb0bf,0xd5d3,0xf230,0x6,0xec06,0xdb18,0x7cb7,0x4650,0xf9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf8fd,0x1792,0x1761,0x4307,0x98a4,0x1daf,0x609a,0x1713,0xd37c,0xee35,0xb8ae,0xe6d5,0x627,0xb95,0x1476,0xb0bf,0xd5d3,0xf230,0x6,0xec06,0xdb18,0x7cb7,0x4650,0xf9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1792f8fd,0x43071761,0x1daf98a4,0x1713609a,0xee35d37c,0xe6d5b8ae,0xb950627,0xb0bf1476,0xf230d5d3,0xec060006,0x7cb7db18,0xf94650}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1792f8fd,0x43071761,0x1daf98a4,0x1713609a,0xee35d37c,0xe6d5b8ae,0xb950627,0xb0bf1476,0xf230d5d3,0xec060006,0x7cb7db18,0xf94650}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x430717611792f8fd,0x1713609a1daf98a4,0xe6d5b8aeee35d37c,0xb0bf14760b950627,0xec060006f230d5d3,0xf946507cb7db18}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x430717611792f8fd,0x1713609a1daf98a4,0xe6d5b8aeee35d37c,0xb0bf14760b950627,0xec060006f230d5d3,0xf946507cb7db18}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2d5d,0x46e9,0x4215,0x63b0,0x8358,0xdc91,0x80aa,0x6970,0x4e7d,0x266d,0xc13a,0xe4ea,0x504e,0xbc38,0xdbaf,0x119b,0xa3cc,0x45d8,0x98db,0x7b90,0x3a5b,0xde6a,0x3676,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2d5d,0x46e9,0x4215,0x63b0,0x8358,0xdc91,0x80aa,0x6970,0x4e7d,0x266d,0xc13a,0xe4ea,0x504e,0xbc38,0xdbaf,0x119b,0xa3cc,0x45d8,0x98db,0x7b90,0x3a5b,0xde6a,0x3676,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x46e92d5d,0x63b04215,0xdc918358,0x697080aa,0x266d4e7d,0xe4eac13a,0xbc38504e,0x119bdbaf,0x45d8a3cc,0x7b9098db,0xde6a3a5b,0x83676}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x46e92d5d,0x63b04215,0xdc918358,0x697080aa,0x266d4e7d,0xe4eac13a,0xbc38504e,0x119bdbaf,0x45d8a3cc,0x7b9098db,0xde6a3a5b,0x83676}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x63b0421546e92d5d,0x697080aadc918358,0xe4eac13a266d4e7d,0x119bdbafbc38504e,0x7b9098db45d8a3cc,0x83676de6a3a5b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x63b0421546e92d5d,0x697080aadc918358,0xe4eac13a266d4e7d,0x119bdbafbc38504e,0x7b9098db45d8a3cc,0x83676de6a3a5b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1db1,0x61ae,0x220b,0xc2e,0xa7ee,0xb16a,0x8697,0xf90c,0x7505,0xced5,0x5cf8,0xb601,0x6235,0x27ad,0x9fdf,0x57d0,0xca2,0xa6d2,0x94db,0xb53a,0x8bd2,0xa3ad,0xfe95,0x92}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1db1,0x61ae,0x220b,0xc2e,0xa7ee,0xb16a,0x8697,0xf90c,0x7505,0xced5,0x5cf8,0xb601,0x6235,0x27ad,0x9fdf,0x57d0,0xca2,0xa6d2,0x94db,0xb53a,0x8bd2,0xa3ad,0xfe95,0x92}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x61ae1db1,0xc2e220b,0xb16aa7ee,0xf90c8697,0xced57505,0xb6015cf8,0x27ad6235,0x57d09fdf,0xa6d20ca2,0xb53a94db,0xa3ad8bd2,0x92fe95}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x61ae1db1,0xc2e220b,0xb16aa7ee,0xf90c8697,0xced57505,0xb6015cf8,0x27ad6235,0x57d09fdf,0xa6d20ca2,0xb53a94db,0xa3ad8bd2,0x92fe95}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc2e220b61ae1db1,0xf90c8697b16aa7ee,0xb6015cf8ced57505,0x57d09fdf27ad6235,0xb53a94dba6d20ca2,0x92fe95a3ad8bd2}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc2e220b61ae1db1,0xf90c8697b16aa7ee,0xb6015cf8ced57505,0x57d09fdf27ad6235,0xb53a94dba6d20ca2,0x92fe95a3ad8bd2}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa809,0xf0cf,0xb393,0xf0ab,0x181a,0xb5bc,0x1833,0xb0ea,0xff0e,0x3088,0xb299,0x4f5c,0x5a20,0x5b86,0xad7b,0x9ffd,0x2216,0x4e4c,0xb8eb,0x989,0x712f,0xa798,0x8e8f,0x45}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa809,0xf0cf,0xb393,0xf0ab,0x181a,0xb5bc,0x1833,0xb0ea,0xff0e,0x3088,0xb299,0x4f5c,0x5a20,0x5b86,0xad7b,0x9ffd,0x2216,0x4e4c,0xb8eb,0x989,0x712f,0xa798,0x8e8f,0x45}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf0cfa809,0xf0abb393,0xb5bc181a,0xb0ea1833,0x3088ff0e,0x4f5cb299,0x5b865a20,0x9ffdad7b,0x4e4c2216,0x989b8eb,0xa798712f,0x458e8f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf0cfa809,0xf0abb393,0xb5bc181a,0xb0ea1833,0x3088ff0e,0x4f5cb299,0x5b865a20,0x9ffdad7b,0x4e4c2216,0x989b8eb,0xa798712f,0x458e8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf0abb393f0cfa809,0xb0ea1833b5bc181a,0x4f5cb2993088ff0e,0x9ffdad7b5b865a20,0x989b8eb4e4c2216,0x458e8fa798712f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf0abb393f0cfa809,0xb0ea1833b5bc181a,0x4f5cb2993088ff0e,0x9ffdad7b5b865a20,0x989b8eb4e4c2216,0x458e8fa798712f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd2a3,0xb916,0xbdea,0x9c4f,0x7ca7,0x236e,0x7f55,0x968f,0xb182,0xd992,0x3ec5,0x1b15,0xafb1,0x43c7,0x2450,0xee64,0x5c33,0xba27,0x6724,0x846f,0xc5a4,0x2195,0xc989,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd2a3,0xb916,0xbdea,0x9c4f,0x7ca7,0x236e,0x7f55,0x968f,0xb182,0xd992,0x3ec5,0x1b15,0xafb1,0x43c7,0x2450,0xee64,0x5c33,0xba27,0x6724,0x846f,0xc5a4,0x2195,0xc989,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb916d2a3,0x9c4fbdea,0x236e7ca7,0x968f7f55,0xd992b182,0x1b153ec5,0x43c7afb1,0xee642450,0xba275c33,0x846f6724,0x2195c5a4,0xf7c989}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb916d2a3,0x9c4fbdea,0x236e7ca7,0x968f7f55,0xd992b182,0x1b153ec5,0x43c7afb1,0xee642450,0xba275c33,0x846f6724,0x2195c5a4,0xf7c989}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c4fbdeab916d2a3,0x968f7f55236e7ca7,0x1b153ec5d992b182,0xee64245043c7afb1,0x846f6724ba275c33,0xf7c9892195c5a4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c4fbdeab916d2a3,0x968f7f55236e7ca7,0x1b153ec5d992b182,0xee64245043c7afb1,0x846f6724ba275c33,0xf7c9892195c5a4}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x17bf, 0xbb8, 0x485, 0x1296, 0x1b32, 0xe0b, 0x13f9, 0x15f5, 0x1a2b, 0xff6, 0xf53, 0x70a, 0x36c, 0x40f, 0xa89, 0xca6, 0x37e, 0x1488, 0x1796, 0x1be2, 0x1e0f, 0xf10, 0x1628, 0x1a20, 0x1ee7, 0x1e53, 0x48c, 0x94, 0xd53, 0xd} @@ -3117,223 +3117,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac65,0x6102,0xe1f0,0x7b39,0x64be,0xff4d,0x8256,0xd11b,0x4645,0x7a89,0x814c,0x66e7,0x77a,0xc4d8,0xe691,0x1f42,0xfdb9,0x547b,0x752,0x18d9,0x9279,0xe604,0xbed4,0xec}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac65,0x6102,0xe1f0,0x7b39,0x64be,0xff4d,0x8256,0xd11b,0x4645,0x7a89,0x814c,0x66e7,0x77a,0xc4d8,0xe691,0x1f42,0xfdb9,0x547b,0x752,0x18d9,0x9279,0xe604,0xbed4,0xec}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6102ac65,0x7b39e1f0,0xff4d64be,0xd11b8256,0x7a894645,0x66e7814c,0xc4d8077a,0x1f42e691,0x547bfdb9,0x18d90752,0xe6049279,0xecbed4}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6102ac65,0x7b39e1f0,0xff4d64be,0xd11b8256,0x7a894645,0x66e7814c,0xc4d8077a,0x1f42e691,0x547bfdb9,0x18d90752,0xe6049279,0xecbed4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7b39e1f06102ac65,0xd11b8256ff4d64be,0x66e7814c7a894645,0x1f42e691c4d8077a,0x18d90752547bfdb9,0xecbed4e6049279}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7b39e1f06102ac65,0xd11b8256ff4d64be,0x66e7814c7a894645,0x1f42e691c4d8077a,0x18d90752547bfdb9,0xecbed4e6049279}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3380,0xe477,0x9e18,0x218d,0xddc6,0x4cc5,0xb33f,0x59e7,0xb291,0xa1a1,0x8f77,0x92a2,0x480e,0x82af,0x40f1,0x5d48,0x83b0,0x4229,0xcb9e,0xff7a,0x2e32,0xa78,0x71fc,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3380,0xe477,0x9e18,0x218d,0xddc6,0x4cc5,0xb33f,0x59e7,0xb291,0xa1a1,0x8f77,0x92a2,0x480e,0x82af,0x40f1,0x5d48,0x83b0,0x4229,0xcb9e,0xff7a,0x2e32,0xa78,0x71fc,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe4773380,0x218d9e18,0x4cc5ddc6,0x59e7b33f,0xa1a1b291,0x92a28f77,0x82af480e,0x5d4840f1,0x422983b0,0xff7acb9e,0xa782e32,0x1671fc}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe4773380,0x218d9e18,0x4cc5ddc6,0x59e7b33f,0xa1a1b291,0x92a28f77,0x82af480e,0x5d4840f1,0x422983b0,0xff7acb9e,0xa782e32,0x1671fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x218d9e18e4773380,0x59e7b33f4cc5ddc6,0x92a28f77a1a1b291,0x5d4840f182af480e,0xff7acb9e422983b0,0x1671fc0a782e32}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x218d9e18e4773380,0x59e7b33f4cc5ddc6,0x92a28f77a1a1b291,0x5d4840f182af480e,0xff7acb9e422983b0,0x1671fc0a782e32}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbb17,0xaa62,0x774e,0x2e59,0xe440,0xebce,0x874e,0xbfdb,0x3afd,0xa7ba,0xded2,0x78aa,0x7568,0xcfed,0x5633,0xa1de,0x4c5e,0x5796,0x5727,0xec25,0xac0a,0xce9c,0x3f13,0x98}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbb17,0xaa62,0x774e,0x2e59,0xe440,0xebce,0x874e,0xbfdb,0x3afd,0xa7ba,0xded2,0x78aa,0x7568,0xcfed,0x5633,0xa1de,0x4c5e,0x5796,0x5727,0xec25,0xac0a,0xce9c,0x3f13,0x98}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa62bb17,0x2e59774e,0xebcee440,0xbfdb874e,0xa7ba3afd,0x78aaded2,0xcfed7568,0xa1de5633,0x57964c5e,0xec255727,0xce9cac0a,0x983f13}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa62bb17,0x2e59774e,0xebcee440,0xbfdb874e,0xa7ba3afd,0x78aaded2,0xcfed7568,0xa1de5633,0x57964c5e,0xec255727,0xce9cac0a,0x983f13}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e59774eaa62bb17,0xbfdb874eebcee440,0x78aaded2a7ba3afd,0xa1de5633cfed7568,0xec25572757964c5e,0x983f13ce9cac0a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e59774eaa62bb17,0xbfdb874eebcee440,0x78aaded2a7ba3afd,0xa1de5633cfed7568,0xec25572757964c5e,0x983f13ce9cac0a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x539b,0x9efd,0x1e0f,0x84c6,0x9b41,0xb2,0x7da9,0x2ee4,0xb9ba,0x8576,0x7eb3,0x9918,0xf885,0x3b27,0x196e,0xe0bd,0x246,0xab84,0xf8ad,0xe726,0x6d86,0x19fb,0x412b,0x13}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x539b,0x9efd,0x1e0f,0x84c6,0x9b41,0xb2,0x7da9,0x2ee4,0xb9ba,0x8576,0x7eb3,0x9918,0xf885,0x3b27,0x196e,0xe0bd,0x246,0xab84,0xf8ad,0xe726,0x6d86,0x19fb,0x412b,0x13}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9efd539b,0x84c61e0f,0xb29b41,0x2ee47da9,0x8576b9ba,0x99187eb3,0x3b27f885,0xe0bd196e,0xab840246,0xe726f8ad,0x19fb6d86,0x13412b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9efd539b,0x84c61e0f,0xb29b41,0x2ee47da9,0x8576b9ba,0x99187eb3,0x3b27f885,0xe0bd196e,0xab840246,0xe726f8ad,0x19fb6d86,0x13412b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x84c61e0f9efd539b,0x2ee47da900b29b41,0x99187eb38576b9ba,0xe0bd196e3b27f885,0xe726f8adab840246,0x13412b19fb6d86}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x84c61e0f9efd539b,0x2ee47da900b29b41,0x99187eb38576b9ba,0xe0bd196e3b27f885,0xe726f8adab840246,0x13412b19fb6d86}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb919,0xcfad,0xeb7f,0x81f8,0x4d97,0xf272,0x4300,0xdd38,0x1b01,0x826,0x1894,0x3e43,0x7310,0xa84,0x4161,0x7c63,0xec4,0x9625,0xe475,0xadc9,0x5a7,0xfa6a,0xb7e3,0x7e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb919,0xcfad,0xeb7f,0x81f8,0x4d97,0xf272,0x4300,0xdd38,0x1b01,0x826,0x1894,0x3e43,0x7310,0xa84,0x4161,0x7c63,0xec4,0x9625,0xe475,0xadc9,0x5a7,0xfa6a,0xb7e3,0x7e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcfadb919,0x81f8eb7f,0xf2724d97,0xdd384300,0x8261b01,0x3e431894,0xa847310,0x7c634161,0x96250ec4,0xadc9e475,0xfa6a05a7,0x7eb7e3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcfadb919,0x81f8eb7f,0xf2724d97,0xdd384300,0x8261b01,0x3e431894,0xa847310,0x7c634161,0x96250ec4,0xadc9e475,0xfa6a05a7,0x7eb7e3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x81f8eb7fcfadb919,0xdd384300f2724d97,0x3e43189408261b01,0x7c6341610a847310,0xadc9e47596250ec4,0x7eb7e3fa6a05a7}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x81f8eb7fcfadb919,0xdd384300f2724d97,0x3e43189408261b01,0x7c6341610a847310,0xadc9e47596250ec4,0x7eb7e3fa6a05a7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e83,0xade4,0x9d21,0x2e51,0x42e5,0xd3,0xac79,0xe0a8,0x32e2,0xfcf2,0xb504,0xc941,0xa0d0,0x8016,0x5485,0x3331,0xabd7,0xc296,0xf76e,0xef5,0xce39,0x8e31,0x165c,0x56}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e83,0xade4,0x9d21,0x2e51,0x42e5,0xd3,0xac79,0xe0a8,0x32e2,0xfcf2,0xb504,0xc941,0xa0d0,0x8016,0x5485,0x3331,0xabd7,0xc296,0xf76e,0xef5,0xce39,0x8e31,0x165c,0x56}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xade41e83,0x2e519d21,0xd342e5,0xe0a8ac79,0xfcf232e2,0xc941b504,0x8016a0d0,0x33315485,0xc296abd7,0xef5f76e,0x8e31ce39,0x56165c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xade41e83,0x2e519d21,0xd342e5,0xe0a8ac79,0xfcf232e2,0xc941b504,0x8016a0d0,0x33315485,0xc296abd7,0xef5f76e,0x8e31ce39,0x56165c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e519d21ade41e83,0xe0a8ac7900d342e5,0xc941b504fcf232e2,0x333154858016a0d0,0xef5f76ec296abd7,0x56165c8e31ce39}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e519d21ade41e83,0xe0a8ac7900d342e5,0xc941b504fcf232e2,0x333154858016a0d0,0xef5f76ec296abd7,0x56165c8e31ce39}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdd11,0x6e27,0xfbdb,0xf5d9,0xd6cb,0x9fef,0xc59a,0x7a4,0xfbd,0x5c3e,0xbc2,0xd091,0x6546,0xc9d0,0x193e,0x93fa,0x776,0x2763,0xdecd,0xbbe3,0xcec1,0x6abf,0x9070,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdd11,0x6e27,0xfbdb,0xf5d9,0xd6cb,0x9fef,0xc59a,0x7a4,0xfbd,0x5c3e,0xbc2,0xd091,0x6546,0xc9d0,0x193e,0x93fa,0x776,0x2763,0xdecd,0xbbe3,0xcec1,0x6abf,0x9070,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6e27dd11,0xf5d9fbdb,0x9fefd6cb,0x7a4c59a,0x5c3e0fbd,0xd0910bc2,0xc9d06546,0x93fa193e,0x27630776,0xbbe3decd,0x6abfcec1,0x669070}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6e27dd11,0xf5d9fbdb,0x9fefd6cb,0x7a4c59a,0x5c3e0fbd,0xd0910bc2,0xc9d06546,0x93fa193e,0x27630776,0xbbe3decd,0x6abfcec1,0x669070}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5d9fbdb6e27dd11,0x7a4c59a9fefd6cb,0xd0910bc25c3e0fbd,0x93fa193ec9d06546,0xbbe3decd27630776,0x6690706abfcec1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5d9fbdb6e27dd11,0x7a4c59a9fefd6cb,0xd0910bc25c3e0fbd,0x93fa193ec9d06546,0xbbe3decd27630776,0x6690706abfcec1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x46e7,0x3052,0x1480,0x7e07,0xb268,0xd8d,0xbcff,0x22c7,0xe4fe,0xf7d9,0xe76b,0xc1bc,0x8cef,0xf57b,0xbe9e,0x839c,0xf13b,0x69da,0x1b8a,0x5236,0xfa58,0x595,0x481c,0x81}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x46e7,0x3052,0x1480,0x7e07,0xb268,0xd8d,0xbcff,0x22c7,0xe4fe,0xf7d9,0xe76b,0xc1bc,0x8cef,0xf57b,0xbe9e,0x839c,0xf13b,0x69da,0x1b8a,0x5236,0xfa58,0x595,0x481c,0x81}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x305246e7,0x7e071480,0xd8db268,0x22c7bcff,0xf7d9e4fe,0xc1bce76b,0xf57b8cef,0x839cbe9e,0x69daf13b,0x52361b8a,0x595fa58,0x81481c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x305246e7,0x7e071480,0xd8db268,0x22c7bcff,0xf7d9e4fe,0xc1bce76b,0xf57b8cef,0x839cbe9e,0x69daf13b,0x52361b8a,0x595fa58,0x81481c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e071480305246e7,0x22c7bcff0d8db268,0xc1bce76bf7d9e4fe,0x839cbe9ef57b8cef,0x52361b8a69daf13b,0x81481c0595fa58}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e071480305246e7,0x22c7bcff0d8db268,0xc1bce76bf7d9e4fe,0x839cbe9ef57b8cef,0x52361b8a69daf13b,0x81481c0595fa58}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0xb89, 0xf4d, 0xbd8, 0x18d2, 0x781, 0x1f79, 0xef5, 0xcfd, 0xa7d, 0x121a, 0x59e, 0x18a3, 0x2b, 0x122f, 0x9d5, 0x18aa, 0x105f, 0x1bcd, 0x871, 0x1f9d, 0xae4, 0xc5c, 0x385, 0xbe8, 0x1878, 0xcd8, 0x7a1, 0x7c8, 0x5b4, 0xe} @@ -3593,220 +3593,220 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3d63,0xdad1,0xf501,0xd58f,0x8741,0xd265,0xf8bd,0xb3b9,0xac08,0xfc8b,0x45ab,0xbcdf,0x501,0x9f7,0x10ed,0x102f,0xc6e3,0xdc57,0xf892,0x8db4,0x2c76,0x21ab,0x2bc3,0x8e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3d63,0xdad1,0xf501,0xd58f,0x8741,0xd265,0xf8bd,0xb3b9,0xac08,0xfc8b,0x45ab,0xbcdf,0x501,0x9f7,0x10ed,0x102f,0xc6e3,0xdc57,0xf892,0x8db4,0x2c76,0x21ab,0x2bc3,0x8e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdad13d63,0xd58ff501,0xd2658741,0xb3b9f8bd,0xfc8bac08,0xbcdf45ab,0x9f70501,0x102f10ed,0xdc57c6e3,0x8db4f892,0x21ab2c76,0x8e2bc3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdad13d63,0xd58ff501,0xd2658741,0xb3b9f8bd,0xfc8bac08,0xbcdf45ab,0x9f70501,0x102f10ed,0xdc57c6e3,0x8db4f892,0x21ab2c76,0x8e2bc3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd58ff501dad13d63,0xb3b9f8bdd2658741,0xbcdf45abfc8bac08,0x102f10ed09f70501,0x8db4f892dc57c6e3,0x8e2bc321ab2c76}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd58ff501dad13d63,0xb3b9f8bdd2658741,0xbcdf45abfc8bac08,0x102f10ed09f70501,0x8db4f892dc57c6e3,0x8e2bc321ab2c76}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc998,0x418c,0xa8e4,0x2354,0x622a,0xb76d,0x5487,0xdad9,0x1672,0x522b,0xa00f,0xdfa5,0x296b,0xe17c,0x595e,0x91e1,0xa22d,0xe126,0x904c,0x9288,0x5075,0xc6c5,0x61b0,0xb1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc998,0x418c,0xa8e4,0x2354,0x622a,0xb76d,0x5487,0xdad9,0x1672,0x522b,0xa00f,0xdfa5,0x296b,0xe17c,0x595e,0x91e1,0xa22d,0xe126,0x904c,0x9288,0x5075,0xc6c5,0x61b0,0xb1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x418cc998,0x2354a8e4,0xb76d622a,0xdad95487,0x522b1672,0xdfa5a00f,0xe17c296b,0x91e1595e,0xe126a22d,0x9288904c,0xc6c55075,0xb161b0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x418cc998,0x2354a8e4,0xb76d622a,0xdad95487,0x522b1672,0xdfa5a00f,0xe17c296b,0x91e1595e,0xe126a22d,0x9288904c,0xc6c55075,0xb161b0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2354a8e4418cc998,0xdad95487b76d622a,0xdfa5a00f522b1672,0x91e1595ee17c296b,0x9288904ce126a22d,0xb161b0c6c55075}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2354a8e4418cc998,0xdad95487b76d622a,0xdfa5a00f522b1672,0x91e1595ee17c296b,0x9288904ce126a22d,0xb161b0c6c55075}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1271,0x594e,0x16ee,0x35fa,0xaf0e,0x11b2,0x1fca,0x24b7,0xa3e3,0x2bcc,0xc2f0,0x6409,0xf8e1,0x6a8f,0x67e,0xe7ee,0xad00,0x2b9a,0x6813,0x5e0a,0x6dec,0x48f5,0xbd1d,0xb3}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1271,0x594e,0x16ee,0x35fa,0xaf0e,0x11b2,0x1fca,0x24b7,0xa3e3,0x2bcc,0xc2f0,0x6409,0xf8e1,0x6a8f,0x67e,0xe7ee,0xad00,0x2b9a,0x6813,0x5e0a,0x6dec,0x48f5,0xbd1d,0xb3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x594e1271,0x35fa16ee,0x11b2af0e,0x24b71fca,0x2bcca3e3,0x6409c2f0,0x6a8ff8e1,0xe7ee067e,0x2b9aad00,0x5e0a6813,0x48f56dec,0xb3bd1d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x594e1271,0x35fa16ee,0x11b2af0e,0x24b71fca,0x2bcca3e3,0x6409c2f0,0x6a8ff8e1,0xe7ee067e,0x2b9aad00,0x5e0a6813,0x48f56dec,0xb3bd1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x35fa16ee594e1271,0x24b71fca11b2af0e,0x6409c2f02bcca3e3,0xe7ee067e6a8ff8e1,0x5e0a68132b9aad00,0xb3bd1d48f56dec}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x35fa16ee594e1271,0x24b71fca11b2af0e,0x6409c2f02bcca3e3,0xe7ee067e6a8ff8e1,0x5e0a68132b9aad00,0xb3bd1d48f56dec}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc29d,0x252e,0xafe,0x2a70,0x78be,0x2d9a,0x742,0x4c46,0x53f7,0x374,0xba54,0x4320,0xfafe,0xf608,0xef12,0xefd0,0x391c,0x23a8,0x76d,0x724b,0xd389,0xde54,0xd43c,0x71}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc29d,0x252e,0xafe,0x2a70,0x78be,0x2d9a,0x742,0x4c46,0x53f7,0x374,0xba54,0x4320,0xfafe,0xf608,0xef12,0xefd0,0x391c,0x23a8,0x76d,0x724b,0xd389,0xde54,0xd43c,0x71}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x252ec29d,0x2a700afe,0x2d9a78be,0x4c460742,0x37453f7,0x4320ba54,0xf608fafe,0xefd0ef12,0x23a8391c,0x724b076d,0xde54d389,0x71d43c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x252ec29d,0x2a700afe,0x2d9a78be,0x4c460742,0x37453f7,0x4320ba54,0xf608fafe,0xefd0ef12,0x23a8391c,0x724b076d,0xde54d389,0x71d43c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2a700afe252ec29d,0x4c4607422d9a78be,0x4320ba54037453f7,0xefd0ef12f608fafe,0x724b076d23a8391c,0x71d43cde54d389}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2a700afe252ec29d,0x4c4607422d9a78be,0x4320ba54037453f7,0xefd0ef12f608fafe,0x724b076d23a8391c,0x71d43cde54d389}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd70d,0x31e4,0xa551,0x7483,0x6f09,0x34d,0x6a80,0x85f,0x6b11,0xe29b,0x188,0x38d2,0x85b,0xa241,0xc423,0xddc8,0x3260,0x1722,0xf3a4,0x7cf7,0x36e8,0x7955,0xeeb9,0xc6}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd70d,0x31e4,0xa551,0x7483,0x6f09,0x34d,0x6a80,0x85f,0x6b11,0xe29b,0x188,0x38d2,0x85b,0xa241,0xc423,0xddc8,0x3260,0x1722,0xf3a4,0x7cf7,0x36e8,0x7955,0xeeb9,0xc6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x31e4d70d,0x7483a551,0x34d6f09,0x85f6a80,0xe29b6b11,0x38d20188,0xa241085b,0xddc8c423,0x17223260,0x7cf7f3a4,0x795536e8,0xc6eeb9}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x31e4d70d,0x7483a551,0x34d6f09,0x85f6a80,0xe29b6b11,0x38d20188,0xa241085b,0xddc8c423,0x17223260,0x7cf7f3a4,0x795536e8,0xc6eeb9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7483a55131e4d70d,0x85f6a80034d6f09,0x38d20188e29b6b11,0xddc8c423a241085b,0x7cf7f3a417223260,0xc6eeb9795536e8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7483a55131e4d70d,0x85f6a80034d6f09,0x38d20188e29b6b11,0xddc8c423a241085b,0x7cf7f3a417223260,0xc6eeb9795536e8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x59a9,0x8f53,0xd42f,0xf65b,0x7134,0x4475,0x9543,0x8428,0x4555,0x7d45,0x7bfb,0xe15d,0xe9c2,0x24ec,0xf17f,0x88ea,0x766c,0xbf2d,0x2b42,0x2771,0x5dfc,0xd040,0xfa62,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x59a9,0x8f53,0xd42f,0xf65b,0x7134,0x4475,0x9543,0x8428,0x4555,0x7d45,0x7bfb,0xe15d,0xe9c2,0x24ec,0xf17f,0x88ea,0x766c,0xbf2d,0x2b42,0x2771,0x5dfc,0xd040,0xfa62,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f5359a9,0xf65bd42f,0x44757134,0x84289543,0x7d454555,0xe15d7bfb,0x24ece9c2,0x88eaf17f,0xbf2d766c,0x27712b42,0xd0405dfc,0xc9fa62}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f5359a9,0xf65bd42f,0x44757134,0x84289543,0x7d454555,0xe15d7bfb,0x24ece9c2,0x88eaf17f,0xbf2d766c,0x27712b42,0xd0405dfc,0xc9fa62}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf65bd42f8f5359a9,0x8428954344757134,0xe15d7bfb7d454555,0x88eaf17f24ece9c2,0x27712b42bf2d766c,0xc9fa62d0405dfc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf65bd42f8f5359a9,0x8428954344757134,0xe15d7bfb7d454555,0x88eaf17f24ece9c2,0x27712b42bf2d766c,0xc9fa62d0405dfc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4b49,0x89b0,0x8c52,0x91ca,0xed1b,0xd527,0x453,0x82d,0xb0eb,0xb6bf,0x3790,0x5816,0x49bb,0xa0a7,0xffc6,0x5530,0x23b9,0x12bb,0x52c4,0x6f51,0x25fd,0x62d,0x723d,0xc6}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4b49,0x89b0,0x8c52,0x91ca,0xed1b,0xd527,0x453,0x82d,0xb0eb,0xb6bf,0x3790,0x5816,0x49bb,0xa0a7,0xffc6,0x5530,0x23b9,0x12bb,0x52c4,0x6f51,0x25fd,0x62d,0x723d,0xc6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x89b04b49,0x91ca8c52,0xd527ed1b,0x82d0453,0xb6bfb0eb,0x58163790,0xa0a749bb,0x5530ffc6,0x12bb23b9,0x6f5152c4,0x62d25fd,0xc6723d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x89b04b49,0x91ca8c52,0xd527ed1b,0x82d0453,0xb6bfb0eb,0x58163790,0xa0a749bb,0x5530ffc6,0x12bb23b9,0x6f5152c4,0x62d25fd,0xc6723d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x91ca8c5289b04b49,0x82d0453d527ed1b,0x58163790b6bfb0eb,0x5530ffc6a0a749bb,0x6f5152c412bb23b9,0xc6723d062d25fd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x91ca8c5289b04b49,0x82d0453d527ed1b,0x58163790b6bfb0eb,0x5530ffc6a0a749bb,0x6f5152c412bb23b9,0xc6723d062d25fd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x28f3,0xce1b,0x5aae,0x8b7c,0x90f6,0xfcb2,0x957f,0xf7a0,0x94ee,0x1d64,0xfe77,0xc72d,0xf7a4,0x5dbe,0x3bdc,0x2237,0xcd9f,0xe8dd,0xc5b,0x8308,0xc917,0x86aa,0x1146,0x39}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x28f3,0xce1b,0x5aae,0x8b7c,0x90f6,0xfcb2,0x957f,0xf7a0,0x94ee,0x1d64,0xfe77,0xc72d,0xf7a4,0x5dbe,0x3bdc,0x2237,0xcd9f,0xe8dd,0xc5b,0x8308,0xc917,0x86aa,0x1146,0x39}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xce1b28f3,0x8b7c5aae,0xfcb290f6,0xf7a0957f,0x1d6494ee,0xc72dfe77,0x5dbef7a4,0x22373bdc,0xe8ddcd9f,0x83080c5b,0x86aac917,0x391146}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xce1b28f3,0x8b7c5aae,0xfcb290f6,0xf7a0957f,0x1d6494ee,0xc72dfe77,0x5dbef7a4,0x22373bdc,0xe8ddcd9f,0x83080c5b,0x86aac917,0x391146}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8b7c5aaece1b28f3,0xf7a0957ffcb290f6,0xc72dfe771d6494ee,0x22373bdc5dbef7a4,0x83080c5be8ddcd9f,0x39114686aac917}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8b7c5aaece1b28f3,0xf7a0957ffcb290f6,0xc72dfe771d6494ee,0x22373bdc5dbef7a4,0x83080c5be8ddcd9f,0x39114686aac917}}}} #endif -}}}}; +}}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/finit.c index b3808edf07..c9a3687282 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/finit.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/finit.c @@ -29,29 +29,29 @@ quat_alg_elem_finalize(quat_alg_elem_t *elem) void ibz_vec_2_init(ibz_vec_2_t *vec) { - ibz_init(&((*vec)[0])); - ibz_init(&((*vec)[1])); + ibz_init(&(vec->v[0])); + ibz_init(&(vec->v[1])); } void ibz_vec_2_finalize(ibz_vec_2_t *vec) { - ibz_finalize(&((*vec)[0])); - ibz_finalize(&((*vec)[1])); + ibz_finalize(&(vec->v[0])); + ibz_finalize(&(vec->v[1])); } void ibz_vec_4_init(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_init(&(*vec)[i]); + ibz_init(&vec->v[i]); } } void ibz_vec_4_finalize(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_finalize(&(*vec)[i]); + ibz_finalize(&vec->v[i]); } } @@ -60,7 +60,7 @@ ibz_mat_2x2_init(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -69,7 +69,7 @@ ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } @@ -79,7 +79,7 @@ ibz_mat_4x4_init(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -88,7 +88,7 @@ ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c index 511a0a5d38..5edff425c8 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hnf.c @@ -14,21 +14,21 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) for (int i = 0; i < 4; i++) { // upper triangular for (int j = 0; j < i; j++) { - res = res && ibz_is_zero(&((*mat)[i][j])); + res = res && ibz_is_zero(&(mat->m[i][j])); } // find first non 0 element of line found = 0; for (int j = i; j < 4; j++) { if (found) { // all values are positive, and first non-0 is the largest of that line - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); - res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&(mat->m[i][ind]), &(mat->m[i][j])) > 0); } else { - if (!ibz_is_zero(&((*mat)[i][j]))) { + if (!ibz_is_zero(&(mat->m[i][j]))) { found = 1; ind = j; // mustbe non-negative - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) > 0); } } } @@ -37,7 +37,7 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) int linestart = -1; int i = 0; for (int j = 0; j < 4; j++) { - while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + while ((i < 4) && (ibz_is_zero(&(mat->m[i][j])))) { i = i + 1; } if (i != 4) { @@ -66,13 +66,13 @@ ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); - ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); + ibz_centered_mod(&(sums.v[i]), &(sums.v[i]), &m); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_finalize(&m); @@ -86,7 +86,7 @@ ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + ibz_centered_mod(&(res->v[i]), &(vec->v[i]), &m); } ibz_finalize(&m); } @@ -101,8 +101,8 @@ ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4 ibz_copy(&s, scalar); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); - ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + ibz_mul(&(prod->v[i]), &(vec->v[i]), &s); + ibz_mod(&(prod->v[i]), &(prod->v[i]), &m); } ibz_finalize(&m); ibz_finalize(&s); @@ -138,36 +138,36 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec if (h < 4) ibz_vec_4_init(&(w[h])); ibz_vec_4_init(&(a[h])); - ibz_copy(&(a[h][0]), &(generators[h][0])); - ibz_copy(&(a[h][1]), &(generators[h][1])); - ibz_copy(&(a[h][2]), &(generators[h][2])); - ibz_copy(&(a[h][3]), &(generators[h][3])); + ibz_copy(&(a[h].v[0]), &(generators[h].v[0])); + ibz_copy(&(a[h].v[1]), &(generators[h].v[1])); + ibz_copy(&(a[h].v[2]), &(generators[h].v[2])); + ibz_copy(&(a[h].v[3]), &(generators[h].v[3])); } assert(ibz_cmp(mod, &ibz_const_zero) > 0); ibz_copy(&m, mod); while (i != -1) { while (j != 0) { j = j - 1; - if (!ibz_is_zero(&(a[j][i]))) { + if (!ibz_is_zero(&(a[j].v[i]))) { // assumtion that ibz_xgcd outputs u,v which are small in absolute // value is needed here also, needs u non 0, but v can be 0 if needed - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &(a[j].v[i])); ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); - ibz_div(&coeff_1, &r, &(a[k][i]), &d); - ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_div(&coeff_1, &r, &(a[k].v[i]), &d); + ibz_div(&coeff_2, &r, &(a[j].v[i]), &d); ibz_neg(&coeff_2, &coeff_2); ibz_vec_4_linear_combination_mod( &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy } } - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &m); ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult - if (ibz_is_zero(&(w[i][i]))) { - ibz_copy(&(w[i][i]), &m); + if (ibz_is_zero(&(w[i].v[i]))) { + ibz_copy(&(w[i].v[i]), &m); } for (int h = i + 1; h < 4; h++) { - ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_div_floor(&q, &r, &(w[h].v[i]), &(w[i].v[i])); ibz_neg(&q, &q); ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); } @@ -177,8 +177,8 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec k = k - 1; i = i - 1; j = k; - if (ibz_is_zero(&(a[k][i]))) - ibz_copy(&(a[k][i]), &m); + if (ibz_is_zero(&(a[k].v[i]))) + ibz_copy(&(a[k].v[i]), &m); } else { k = k - 1; @@ -188,7 +188,7 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec } for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { - ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + ibz_copy(&((hnf->m)[i][j]), &(w[j].v[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c index 0fd35b5c65..f630f5a9fe 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ibz_division.c @@ -8,5 +8,5 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { - mpz_gcdext(*gcd, *u, *v, *a, *b); + mpz_gcdext(gcd->i, u->i, v->i, a->i, b->i); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.c index 0743974345..1be9d87e71 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/id2iso.c @@ -18,8 +18,8 @@ ec_biscalar_mul_ibz_vec(ec_point_t *res, const ec_curve_t *curve) { digit_t scalars[2][NWORDS_ORDER]; - ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); - ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ibz_to_digit_array(scalars[0], &scalar_vec->v[0]); + ibz_to_digit_array(scalars[1], &scalar_vec->v[1]); ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); } @@ -48,14 +48,14 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid quat_change_to_O0_basis(&coeffs, &alpha); for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); } } @@ -67,16 +67,16 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid { const ibz_t *const norm = &lideal->norm; - ibz_mod(&(*vec)[0], &mat[0][0], norm); - ibz_mod(&(*vec)[1], &mat[1][0], norm); - ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + ibz_mod(&vec->v[0], &mat.m[0][0], norm); + ibz_mod(&vec->v[1], &mat.m[1][0], norm); + ibz_gcd(&tmp, &vec->v[0], &vec->v[1]); if (ibz_is_even(&tmp)) { - ibz_mod(&(*vec)[0], &mat[0][1], norm); - ibz_mod(&(*vec)[1], &mat[1][1], norm); + ibz_mod(&vec->v[0], &mat.m[0][1], norm); + ibz_mod(&vec->v[1], &mat.m[1][1], norm); } #ifndef NDEBUG - ibz_gcd(&tmp, &(*vec)[0], norm); - ibz_gcd(&tmp, &(*vec)[1], &tmp); + ibz_gcd(&tmp, &vec->v[0], norm); + ibz_gcd(&tmp, &vec->v[1], &tmp); assert(!ibz_cmp(&tmp, &ibz_const_one)); #endif } @@ -102,28 +102,28 @@ matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_ copy_basis(&tmp_bas, bas); // reduction mod 2f - ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); - ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); - ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); - ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + ibz_mod(&mat->m[0][0], &mat->m[0][0], &pow_two); + ibz_mod(&mat->m[0][1], &mat->m[0][1], &pow_two); + ibz_mod(&mat->m[1][0], &mat->m[1][0], &pow_two); + ibz_mod(&mat->m[1][1], &mat->m[1][1], &pow_two); // For a matrix [[a, c], [b, d]] we compute: // // first basis element R = [a]P + [b]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][0]); - ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ibz_to_digit_array(scalars[0], &mat->m[0][0]); + ibz_to_digit_array(scalars[1], &mat->m[1][0]); ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); // second basis element S = [c]P + [d]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][1]); - ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ibz_to_digit_array(scalars[0], &mat->m[0][1]); + ibz_to_digit_array(scalars[1], &mat->m[1][1]); ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); // Their difference R - S = [a - c]P + [b - d]Q - ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_sub(&tmp, &mat->m[0][0], &mat->m[0][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[0], &tmp); - ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_sub(&tmp, &mat->m[1][0], &mat->m[1][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[1], &tmp); ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); @@ -157,23 +157,23 @@ endomorphism_application_even_basis(ec_basis_t *bas, quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); assert(ibz_is_odd(&content)); - ibz_set(&mat[0][0], 0); - ibz_set(&mat[0][1], 0); - ibz_set(&mat[1][0], 0); - ibz_set(&mat[1][1], 0); + ibz_set(&mat.m[0][0], 0); + ibz_set(&mat.m[0][1], 0); + ibz_set(&mat.m[1][0], 0); + ibz_set(&mat.m[1][1], 0); // computing the matrix for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&mat[i][j], &mat[i][j], &content); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&mat.m[i][j], &mat.m[i][j], &content); } } @@ -215,19 +215,19 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * ibz_mat_2x2_t mat; ibz_mat_2x2_init(&mat); - ibz_copy(&mat[0][0], &(*vec2)[0]); - ibz_copy(&mat[1][0], &(*vec2)[1]); + ibz_copy(&mat.m[0][0], &vec2->v[0]); + ibz_copy(&mat.m[1][0], &vec2->v[1]); ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); - ibz_copy(&mat[0][1], &vec[0]); - ibz_copy(&mat[1][1], &vec[1]); + ibz_copy(&mat.m[0][1], &vec.v[0]); + ibz_copy(&mat.m[1][1], &vec.v[1]); ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); - ibz_add(&mat[0][1], &mat[0][1], &vec[0]); - ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + ibz_add(&mat.m[0][1], &mat.m[0][1], &vec.v[0]); + ibz_add(&mat.m[1][1], &mat.m[1][1], &vec.v[1]); - ibz_mod(&mat[0][1], &mat[0][1], &two_pow); - ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + ibz_mod(&mat.m[0][1], &mat.m[0][1], &two_pow); + ibz_mod(&mat.m[1][1], &mat.m[1][1], &two_pow); ibz_mat_2x2_t inv; ibz_mat_2x2_init(&inv); @@ -247,11 +247,11 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * quat_alg_elem_t gen; quat_alg_elem_init(&gen); ibz_set(&gen.denom, 2); - ibz_add(&gen.coord[0], &vec[0], &vec[0]); - ibz_set(&gen.coord[1], -2); - ibz_add(&gen.coord[2], &vec[1], &vec[1]); - ibz_copy(&gen.coord[3], &vec[1]); - ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_add(&gen.coord.v[0], &vec.v[0], &vec.v[0]); + ibz_set(&gen.coord.v[1], -2); + ibz_add(&gen.coord.v[2], &vec.v[1], &vec.v[1]); + ibz_copy(&gen.coord.v[3], &vec.v[1]); + ibz_add(&gen.coord.v[0], &gen.coord.v[0], &vec.v[1]); ibz_vec_2_finalize(&vec); quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); @@ -319,10 +319,10 @@ _change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, #endif // Copy the results into the matrix - ibz_copy_digit_array(&((*mat)[0][0]), x1); - ibz_copy_digit_array(&((*mat)[1][0]), x2); - ibz_copy_digit_array(&((*mat)[0][1]), x3); - ibz_copy_digit_array(&((*mat)[1][1]), x4); + ibz_copy_digit_array(&(mat->m[0][0]), x1); + ibz_copy_digit_array(&(mat->m[1][0]), x2); + ibz_copy_digit_array(&(mat->m[0][1]), x3); + ibz_copy_digit_array(&(mat->m[1][1]), x4); } void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ideal.c index 9cf863a104..8634143941 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ideal.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ideal.c @@ -33,7 +33,7 @@ quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) ibz_copy(©->lattice.denom, &copied->lattice.denom); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + ibz_copy(©->lattice.basis.m[i][j], &copied->lattice.basis.m[i][j]); } } } @@ -248,13 +248,13 @@ quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + ibz_div(&G->m[i][j], &rmd, &G->m[i][j], &divisor); assert(ibz_is_zero(&rmd)); } } for (int i = 0; i < 4; i++) { for (int j = 0; j <= i - 1; j++) { - ibz_copy(&(*G)[j][i], &(*G)[i][j]); + ibz_copy(&G->m[j][i], &G->m[i][j]); } } @@ -289,8 +289,8 @@ quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg ibz_mat_4x4_transpose(&transposed, &(order->basis)); // multiply gram matrix by 2 because of reduced trace ibz_mat_4x4_identity(&norm); - ibz_copy(&(norm[2][2]), &(alg->p)); - ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_copy(&(norm.m[2][2]), &(alg->p)); + ibz_copy(&(norm.m[3][3]), &(alg->p)); ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); ibz_mat_4x4_mul(&prod, &transposed, &norm); ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.c index b0462dc8b5..e219bf3d96 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.c @@ -114,48 +114,48 @@ DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_ * @{ */ -const __mpz_struct ibz_const_zero[1] = { +const ibz_t ibz_const_zero = {{ { ._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]){ 0 }, } -}; +}}; -const __mpz_struct ibz_const_one[1] = { +const ibz_t ibz_const_one = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 1 }, } -}; +}}; -const __mpz_struct ibz_const_two[1] = { +const ibz_t ibz_const_two = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 2 }, } -}; +}}; -const __mpz_struct ibz_const_three[1] = { +const ibz_t ibz_const_three = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 3 }, } -}; +}}; void ibz_init(ibz_t *x) { - mpz_init(*x); + mpz_init(x->i); } void ibz_finalize(ibz_t *x) { - mpz_clear(*x); + mpz_clear(x->i); } void @@ -168,7 +168,7 @@ ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_add(*sum, *a, *b); + mpz_add(sum->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -186,7 +186,7 @@ ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_sub(*diff, *a, *b); + mpz_sub(diff->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); @@ -205,7 +205,7 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_mul(*prod, *a, *b); + mpz_mul(prod->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -216,13 +216,13 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) void ibz_neg(ibz_t *neg, const ibz_t *a) { - mpz_neg(*neg, *a); + mpz_neg(neg->i, a->i); } void ibz_abs(ibz_t *abs, const ibz_t *a) { - mpz_abs(*abs, *a); + mpz_abs(abs->i, a->i); } void @@ -235,7 +235,7 @@ ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_tdiv_qr(*quotient, *remainder, *a, *b); + mpz_tdiv_qr(quotient->i, remainder->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -251,7 +251,7 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) ibz_init(&a_cp); ibz_copy(&a_cp, a); #endif - mpz_tdiv_q_2exp(*quotient, *a, exp); + mpz_tdiv_q_2exp(quotient->i, a->i, exp); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); ibz_finalize(&a_cp); @@ -261,50 +261,50 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) { - mpz_fdiv_qr(*q, *r, *n, *d); + mpz_fdiv_qr(q->i, r->i, n->i, d->i); } void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) { - mpz_mod(*r, *a, *b); + mpz_mod(r->i, a->i, b->i); } unsigned long int -ibz_mod_ui(const mpz_t *n, unsigned long int d) +ibz_mod_ui(const ibz_t *n, unsigned long int d) { - return mpz_fdiv_ui(*n, d); + return mpz_fdiv_ui(n->i, d); } int ibz_divides(const ibz_t *a, const ibz_t *b) { - return mpz_divisible_p(*a, *b); + return mpz_divisible_p(a->i, b->i); } void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) { - mpz_pow_ui(*pow, *x, e); + mpz_pow_ui(pow->i, x->i, e); } void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) { - mpz_powm(*pow, *x, *e, *m); + mpz_powm(pow->i, x->i, e->i, m->i); DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); } int ibz_two_adic(ibz_t *pow) { - return mpz_scan1(*pow, 0); + return mpz_scan1(pow->i, 0); } int ibz_cmp(const ibz_t *a, const ibz_t *b) { - int ret = mpz_cmp(*a, *b); + int ret = mpz_cmp(a->i, b->i); DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); return ret; } @@ -312,7 +312,7 @@ ibz_cmp(const ibz_t *a, const ibz_t *b) int ibz_is_zero(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 0); + int ret = !mpz_cmp_ui(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); return ret; } @@ -320,7 +320,7 @@ ibz_is_zero(const ibz_t *x) int ibz_is_one(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 1); + int ret = !mpz_cmp_ui(x->i, 1); DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); return ret; } @@ -328,7 +328,7 @@ ibz_is_one(const ibz_t *x) int ibz_cmp_int32(const ibz_t *x, int32_t y) { - int ret = mpz_cmp_si(*x, (signed long int)y); + int ret = mpz_cmp_si(x->i, (signed long int)y); DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); return ret; } @@ -336,7 +336,7 @@ ibz_cmp_int32(const ibz_t *x, int32_t y) int ibz_is_even(const ibz_t *x) { - int ret = !mpz_tstbit(*x, 0); + int ret = !mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); return ret; } @@ -344,7 +344,7 @@ ibz_is_even(const ibz_t *x) int ibz_is_odd(const ibz_t *x) { - int ret = mpz_tstbit(*x, 0); + int ret = mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); return ret; } @@ -352,7 +352,7 @@ ibz_is_odd(const ibz_t *x) void ibz_set(ibz_t *i, int32_t x) { - mpz_set_si(*i, x); + mpz_set_si(i->i, x); } int @@ -361,7 +361,7 @@ ibz_convert_to_str(const ibz_t *i, char *str, int base) if (!str || (base != 10 && base != 16)) return 0; - mpz_get_str(str, base, *i); + mpz_get_str(str, base, i->i); return 1; } @@ -380,29 +380,29 @@ ibz_print(const ibz_t *num, int base) int ibz_set_from_str(ibz_t *i, const char *str, int base) { - return (1 + mpz_set_str(*i, str, base)); + return (1 + mpz_set_str(i->i, str, base)); } void ibz_copy(ibz_t *target, const ibz_t *value) { - mpz_set(*target, *value); + mpz_set(target->i, value->i); } void ibz_swap(ibz_t *a, ibz_t *b) { - mpz_swap(*a, *b); + mpz_swap(a->i, b->i); } int32_t ibz_get(const ibz_t *i) { #if LONG_MAX == INT32_MAX - return (int32_t)mpz_get_si(*i); + return (int32_t)mpz_get_si(i->i); #elif LONG_MAX > INT32_MAX // Extracts the sign bit and the 31 least significant bits - signed long int t = mpz_get_si(*i); + signed long int t = mpz_get_si(i->i); return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); #else #error Unsupported configuration: LONG_MAX must be >= INT32_MAX @@ -417,10 +417,10 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) mpz_t tmp; mpz_t bmina; mpz_init(bmina); - mpz_sub(bmina, *b, *a); + mpz_sub(bmina, b->i, a->i); if (mpz_sgn(bmina) == 0) { - mpz_set(*rand, *a); + mpz_set(rand->i, a->i); mpz_clear(bmina); return 1; } @@ -466,7 +466,7 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) break; } while (1); - mpz_add(*rand, tmp, *a); + mpz_add(rand->i, tmp, a->i); err: mpz_clear(bmina); return ret; @@ -534,19 +534,19 @@ int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) { int ret = 1; - mpz_t m_big; + ibz_t m_big; // m_big = 2 * m - mpz_init_set_si(m_big, m); - mpz_add(m_big, m_big, m_big); + mpz_init_set_si(m_big.i, m); + mpz_add(m_big.i, m_big.i, m_big.i); // Sample in [0, 2*m] ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); // Adjust to range [-m, m] - mpz_sub_ui(*rand, *rand, m); + mpz_sub_ui(rand->i, rand->i, m); - mpz_clear(m_big); + mpz_clear(m_big.i); return ret; } @@ -555,41 +555,41 @@ int ibz_rand_interval_bits(ibz_t *rand, uint32_t m) { int ret = 1; - mpz_t tmp; - mpz_t low; - mpz_init_set_ui(tmp, 1); - mpz_mul_2exp(tmp, tmp, m); - mpz_init(low); - mpz_neg(low, tmp); + ibz_t tmp; + ibz_t low; + mpz_init_set_ui(tmp.i, 1); + mpz_mul_2exp(tmp.i, tmp.i, m); + mpz_init(low.i); + mpz_neg(low.i, tmp.i); ret = ibz_rand_interval(rand, &low, &tmp); - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); if (ret != 1) goto err; - mpz_sub_ui(*rand, *rand, (unsigned long int)m); + mpz_sub_ui(rand->i, rand->i, (unsigned long int)m); return ret; err: - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); return ret; } int ibz_bitsize(const ibz_t *a) { - return (int)mpz_sizeinbase(*a, 2); + return (int)mpz_sizeinbase(a->i, 2); } int ibz_size_in_base(const ibz_t *a, int base) { - return (int)mpz_sizeinbase(*a, base); + return (int)mpz_sizeinbase(a->i, base); } void ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) { - mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); + mpz_import(target->i, dig_len, -1, sizeof(digit_t), 0, 0, dig); } void @@ -600,13 +600,13 @@ ibz_to_digits(digit_t *target, const ibz_t *ibz) // The next line ensures zero is written to the first limb of target if ibz is zero; // target is then overwritten by the actual value if it is not. target[0] = 0; - mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, ibz->i); } int ibz_probab_prime(const ibz_t *n, int reps) { - int ret = mpz_probab_prime_p(*n, reps); + int ret = mpz_probab_prime_p(n->i, reps); DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); return ret; } @@ -614,26 +614,26 @@ ibz_probab_prime(const ibz_t *n, int reps) void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) { - mpz_gcd(*gcd, *a, *b); + mpz_gcd(gcd->i, a->i, b->i); } int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) { - return (mpz_invert(*inv, *a, *mod) ? 1 : 0); + return (mpz_invert(inv->i, a->i, mod->i) ? 1 : 0); } int ibz_legendre(const ibz_t *a, const ibz_t *p) { - return mpz_legendre(*a, *p); + return mpz_legendre(a->i, p->i); } int ibz_sqrt(ibz_t *sqrt, const ibz_t *a) { - if (mpz_perfect_square_p(*a)) { - mpz_sqrt(*sqrt, *a); + if (mpz_perfect_square_p(a->i)) { + mpz_sqrt(sqrt->i, a->i); return 1; } else { return 0; @@ -643,7 +643,7 @@ ibz_sqrt(ibz_t *sqrt, const ibz_t *a) void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) { - mpz_sqrt(*sqrt, *a); + mpz_sqrt(sqrt->i, a->i); } int @@ -686,85 +686,85 @@ ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) int ret = 1; - mpz_mod(amod, *a, *p); + mpz_mod(amod, a->i, p->i); if (mpz_cmp_ui(amod, 0) < 0) { - mpz_add(amod, *p, amod); + mpz_add(amod, p->i, amod); } - if (mpz_legendre(amod, *p) != 1) { + if (mpz_legendre(amod, p->i) != 1) { ret = 0; goto end; } - mpz_sub_ui(pm1, *p, 1); + mpz_sub_ui(pm1, p->i, 1); - if (mpz_mod_ui(tmp, *p, 4) == 3) { + if (mpz_mod_ui(tmp, p->i, 4) == 3) { // p % 4 == 3 - mpz_add_ui(tmp, *p, 1); + mpz_add_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(*sqrt, amod, tmp, *p); - } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + mpz_powm(sqrt->i, amod, tmp, p->i); + } else if (mpz_mod_ui(tmp, p->i, 8) == 5) { // p % 8 == 5 - mpz_sub_ui(tmp, *p, 1); + mpz_sub_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + mpz_powm(tmp, amod, tmp, p->i); // a^{(p-1)/4} mod p if (!mpz_cmp_ui(tmp, 1)) { - mpz_add_ui(tmp, *p, 3); + mpz_add_ui(tmp, p->i, 3); mpz_fdiv_q_2exp(tmp, tmp, 3); - mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + mpz_powm(sqrt->i, amod, tmp, p->i); // a^{(p+3)/8} mod p } else { - mpz_sub_ui(tmp, *p, 5); + mpz_sub_ui(tmp, p->i, 5); mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 mpz_mul_2exp(a4, amod, 2); // 4*a - mpz_powm(tmp, a4, tmp, *p); + mpz_powm(tmp, a4, tmp, p->i); mpz_mul_2exp(a2, amod, 1); mpz_mul(tmp, a2, tmp); - mpz_mod(*sqrt, tmp, *p); + mpz_mod(sqrt->i, tmp, p->i); } } else { // p % 8 == 1 -> Shanks-Tonelli int e = 0; - mpz_sub_ui(q, *p, 1); + mpz_sub_ui(q, p->i, 1); while (mpz_tstbit(q, e) == 0) e++; mpz_fdiv_q_2exp(q, q, e); // 1. find generator - non-quadratic residue mpz_set_ui(qnr, 2); - while (mpz_legendre(qnr, *p) != -1) + while (mpz_legendre(qnr, p->i) != -1) mpz_add_ui(qnr, qnr, 1); - mpz_powm(z, qnr, q, *p); + mpz_powm(z, qnr, q, p->i); // 2. Initialize mpz_set(y, z); - mpz_powm(y, amod, q, *p); // y = a^q mod p + mpz_powm(y, amod, q, p->i); // y = a^q mod p mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 mpz_fdiv_q_2exp(tmp, tmp, 1); - mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + mpz_powm(x, amod, tmp, p->i); // x = a^(q + 1)/2 mod p mpz_set_ui(exp, 1); mpz_mul_2exp(exp, exp, e - 2); for (int i = 0; i < e; ++i) { - mpz_powm(b, y, exp, *p); + mpz_powm(b, y, exp, p->i); if (!mpz_cmp(b, pm1)) { mpz_mul(x, x, z); - mpz_mod(x, x, *p); + mpz_mod(x, x, p->i); mpz_mul(y, y, z); mpz_mul(y, y, z); - mpz_mod(y, y, *p); + mpz_mod(y, y, p->i); } - mpz_powm_ui(z, z, 2, *p); + mpz_powm_ui(z, z, 2, p->i); mpz_fdiv_q_2exp(exp, exp, 1); } - mpz_set(*sqrt, x); + mpz_set(sqrt->i, x); } #ifdef DEBUG_VERBOSE diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.h index a0c2c02477..28e478ff7f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/intbig.h @@ -33,7 +33,9 @@ * * For integers of arbitrary size, used by intbig module, using gmp */ -typedef mpz_t ibz_t; +typedef struct { + mpz_t i; +} ibz_t; /** @} */ @@ -129,7 +131,7 @@ int ibz_two_adic(ibz_t *pow); */ void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); -unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); +unsigned long int ibz_mod_ui(const ibz_t *n, unsigned long int d); /** @brief Test if a = 0 mod b */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c index 5491ee44d0..ea32213c75 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c @@ -57,25 +57,25 @@ to_etabar(fp_num *x) } static void -from_mpz(const mpz_t x, fp_num *r) +from_mpz(const ibz_t *x, fp_num *r) { long exp = 0; - r->s = mpz_get_d_2exp(&exp, x); + r->s = mpz_get_d_2exp(&exp, x->i); r->e = exp; } static void -to_mpz(const fp_num *x, mpz_t r) +to_mpz(const fp_num *x, ibz_t *r) { if (x->e >= DBL_MANT_DIG) { double s = x->s * 0x1P53; - mpz_set_d(r, s); - mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + mpz_set_d(r->i, s); + mpz_mul_2exp(r->i, r->i, x->e - DBL_MANT_DIG); } else if (x->e < 0) { - mpz_set_ui(r, 0); + mpz_set_ui(r->i, 0); } else { double s = ldexp(x->s, x->e); - mpz_set_d(r, round(s)); + mpz_set_d(r->i, round(s)); } } @@ -203,7 +203,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) ibz_init(&tmpI); // Main L² loop - from_mpz((*G)[0][0], &r[0][0]); + from_mpz(&G->m[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -213,7 +213,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - from_mpz((*G)[kappa][j], &r[kappa][j]); + from_mpz(&G->m[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { fp_mul(&r[kappa][k], &u[j][k], &tmpF); fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); @@ -229,22 +229,22 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) done = 0; copy(&u[kappa][i], &Xf); fp_round(&Xf); - to_mpz(&Xf, X); + to_mpz(&Xf, &X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { - ibz_mul(&tmpI, &X, &(*basis)[j][i]); - ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + ibz_mul(&tmpI, &X, &basis->m[j][i]); + ibz_sub(&basis->m[j][kappa], &basis->m[j][kappa], &tmpI); } // Update lower half of the Gram matrix // = - 2X + X² = // - X - X( - X·) //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 - ibz_mul(&tmpI, &X, &(*G)[kappa][i]); - ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + ibz_mul(&tmpI, &X, &G->m[kappa][i]); + ibz_sub(&G->m[kappa][kappa], &G->m[kappa][kappa], &tmpI); for (int j = 0; j < 4; j++) { // works because i < κ // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 - ibz_mul(&tmpI, &X, SYM((*G), i, j)); - ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + ibz_mul(&tmpI, &X, SYM(G->m, i, j)); + ibz_sub(SYM(G->m, kappa, j), SYM(G->m, kappa, j), &tmpI); } // After the loop: //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, @@ -261,7 +261,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - from_mpz((*G)[kappa][kappa], &lovasz[0]); + from_mpz(&G->m[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); @@ -279,11 +279,11 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Insert b_κ before b_swap in the basis and in the lower half Gram matrix for (int j = kappa; j > swap; j--) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + ibz_swap(&basis->m[i][j], &basis->m[i][j - 1]); if (i == j - 1) - ibz_swap(&(*G)[i][i], &(*G)[j][j]); + ibz_swap(&G->m[i][i], &G->m[j][j]); else if (i != j) - ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + ibz_swap(SYM(G->m, i, j), SYM(G->m, i, j - 1)); } } // Copy row u[κ] and r[κ] in swap position, ignore what follows @@ -318,7 +318,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Fill in the upper half of the Gram matrix for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } // Clearinghouse diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lat_ball.c index c7bbb9682f..3f7476988c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lat_ball.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lat_ball.c @@ -28,10 +28,10 @@ quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_m // Compute the parallelogram's bounds int trivial = 1; for (int i = 0; i < 4; i++) { - ibz_mul(&(*box)[i], &dualG[i][i], radius); - ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); - ibz_sqrt_floor(&(*box)[i], &(*box)[i]); - trivial &= ibz_is_zero(&(*box)[i]); + ibz_mul(&box->v[i], &dualG.m[i][i], radius); + ibz_div(&box->v[i], &rem, &box->v[i], &denom); + ibz_sqrt_floor(&box->v[i], &box->v[i]); + trivial &= ibz_is_zero(&box->v[i]); } // Compute the transpose transformation matrix @@ -95,12 +95,12 @@ quat_lattice_sample_from_ball(quat_alg_elem_t *res, do { // Sample vector for (int i = 0; i < 4; i++) { - if (ibz_is_zero(&box[i])) { - ibz_copy(&x[i], &ibz_const_zero); + if (ibz_is_zero(&box.v[i])) { + ibz_copy(&x.v[i], &ibz_const_zero); } else { - ibz_add(&tmp, &box[i], &box[i]); - ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); - ibz_sub(&x[i], &x[i], &box[i]); + ibz_add(&tmp, &box.v[i], &box.v[i]); + ok &= ibz_rand_interval(&x.v[i], &ibz_const_zero, &tmp); + ibz_sub(&x.v[i], &x.v[i], &box.v[i]); if (!ok) goto err; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lattice.c index c98bae9499..ef7b9ccdcc 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lattice.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lattice.c @@ -57,7 +57,7 @@ quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *l for (int row = 1; row < 4; ++row) { for (int col = 0; col < 4; ++col) { - ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + ibz_neg(&(conj->basis.m[row][col]), &(conj->basis.m[row][col])); } } } @@ -96,14 +96,14 @@ quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(tmp[i][j])); + ibz_copy(&(generators[j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + ibz_copy(&(generators[4 + j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); @@ -151,12 +151,12 @@ quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, ibz_vec_4_init(&p); ibz_vec_4_init(&a); for (int i = 0; i < 4; i++) { - ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + ibz_vec_4_copy_ibz(&a, &(lat->m[0][i]), &(lat->m[1][i]), &(lat->m[2][i]), &(lat->m[3][i])); quat_alg_coord_mul(&p, &a, coord, alg); - ibz_copy(&((*prod)[0][i]), &(p[0])); - ibz_copy(&((*prod)[1][i]), &(p[1])); - ibz_copy(&((*prod)[2][i]), &(p[2])); - ibz_copy(&((*prod)[3][i]), &(p[3])); + ibz_copy(&(prod->m[0][i]), &(p.v[0])); + ibz_copy(&(prod->m[1][i]), &(p.v[1])); + ibz_copy(&(prod->m[2][i]), &(p.v[2])); + ibz_copy(&(prod->m[3][i]), &(p.v[3])); } ibz_vec_4_finalize(&p); ibz_vec_4_finalize(&a); @@ -191,15 +191,15 @@ quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_vec_4_init(&(generators[i])); for (int k = 0; k < 4; k++) { ibz_vec_4_copy_ibz( - &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + &elem1, &(lat1->basis.m[0][k]), &(lat1->basis.m[1][k]), &(lat1->basis.m[2][k]), &(lat1->basis.m[3][k])); for (int i = 0; i < 4; i++) { ibz_vec_4_copy_ibz( - &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + &elem2, &(lat2->basis.m[0][i]), &(lat2->basis.m[1][i]), &(lat2->basis.m[2][i]), &(lat2->basis.m[3][i])); quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); for (int j = 0; j < 4; j++) { if (k == 0) - ibz_copy(&(detmat[i][j]), &(elem_res[j])); - ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + ibz_copy(&(detmat.m[i][j]), &(elem_res.v[j])); + ibz_copy(&(generators[4 * k + i].v[j]), &(elem_res.v[j])); } } } @@ -239,7 +239,7 @@ quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_ // copy result if (divisible && (coord != NULL)) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*coord)[i]), &(work_coord[i])); + ibz_copy(&(coord->v[i]), &(work_coord.v[i])); } } ibz_finalize(&prod); @@ -292,7 +292,7 @@ quat_lattice_hnf(quat_lattice_t *lat) ibz_vec_4_init(&(generators[i])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + ibz_copy(&(generators[j].v[i]), &(lat->basis.m[i][j])); } } ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); @@ -309,19 +309,19 @@ quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_al ibz_init(&tmp); for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_set(&(*G)[i][j], 0); + ibz_set(&G->m[i][j], 0); for (int k = 0; k < 4; k++) { - ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + ibz_mul(&tmp, &(lattice->basis.m)[k][i], &(lattice->basis.m)[k][j]); if (k >= 2) ibz_mul(&tmp, &tmp, &alg->p); - ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + ibz_add(&G->m[i][j], &G->m[i][j], &tmp); } - ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + ibz_mul(&G->m[i][j], &G->m[i][j], &ibz_const_two); } } for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) { - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } } ibz_finalize(&tmp); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_applications.c index 6c763b8c04..f5e9af922b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_applications.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_applications.c @@ -17,9 +17,9 @@ quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, quat_lll_core(gram, reduced); ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); for (int i = 0; i < 4; i++) { - ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + ibz_div_2exp(&(gram->m[i][i]), &(gram->m[i][i]), 1); for (int j = i + 1; j < 4; j++) { - ibz_set(&((*gram)[i][j]), 0); + ibz_set(&(gram->m[i][j]), 0); } } ibz_finalize(&gram_corrector); @@ -79,10 +79,10 @@ quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, while (!found && ctr < equiv_num_iter) { ctr++; // we select our linear combination at random - ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[3], equiv_bound_coeff); // computation of the norm of the vector sampled quat_qf_eval(&tmp, &gram, &new_alpha.coord); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/normeq.c index 8c133dd095..aadbbe06c7 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/normeq.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/normeq.c @@ -13,23 +13,23 @@ quat_lattice_O0_set(quat_lattice_t *O0) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(O0->basis[i][j]), 0); + ibz_set(&(O0->basis.m[i][j]), 0); } } ibz_set(&(O0->denom), 2); - ibz_set(&(O0->basis[0][0]), 2); - ibz_set(&(O0->basis[1][1]), 2); - ibz_set(&(O0->basis[2][2]), 1); - ibz_set(&(O0->basis[1][2]), 1); - ibz_set(&(O0->basis[3][3]), 1); - ibz_set(&(O0->basis[0][3]), 1); + ibz_set(&(O0->basis.m[0][0]), 2); + ibz_set(&(O0->basis.m[1][1]), 2); + ibz_set(&(O0->basis.m[2][2]), 1); + ibz_set(&(O0->basis.m[1][2]), 1); + ibz_set(&(O0->basis.m[3][3]), 1); + ibz_set(&(O0->basis.m[0][3]), 1); } void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) { - ibz_set(&O0->z.coord[1], 1); - ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.coord.v[1], 1); + ibz_set(&O0->t.coord.v[2], 1); ibz_set(&O0->z.denom, 1); ibz_set(&O0->t.denom, 1); O0->q = 1; @@ -50,24 +50,24 @@ quat_order_elem_create(quat_alg_elem_t *elem, quat_alg_elem_init(&quat_temp); // elem = x - quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + quat_alg_scalar(elem, &coeffs->v[0], &ibz_const_one); // quat_temp = i*y - quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_scalar(&quat_temp, &(coeffs->v[1]), &ibz_const_one); quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); // elem = x + i*y quat_alg_add(elem, elem, &quat_temp); // quat_temp = z * j - quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[2], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); // elem = x + i* + z*j quat_alg_add(elem, elem, &quat_temp); // quat_temp = t * j * i - quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[3], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); @@ -143,11 +143,11 @@ quat_represent_integer(quat_alg_elem_t *gamma, ibz_sub(&counter, &counter, &ibz_const_one); // we start by sampling the first coordinate - ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + ibz_rand_interval(&coeffs.v[2], &ibz_const_one, &bound); // then, we sample the second coordinate // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) - ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&cornacchia_target, &coeffs.v[2], &coeffs.v[2]); ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); ibz_sub(&temp, &adjusted_n_gamma, &temp); ibz_mul(&sq_bound, &q, &(params->algebra->p)); @@ -158,10 +158,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, continue; } // sampling the second value - ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + ibz_rand_interval(&coeffs.v[3], &ibz_const_one, &temp); // compute cornacchia_target = n_gamma - p * (z² + q*t²) - ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &coeffs.v[3], &coeffs.v[3]); ibz_mul(&temp, &q, &temp); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); @@ -170,7 +170,7 @@ quat_represent_integer(quat_alg_elem_t *gamma, // applying cornacchia if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) - found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + found = ibz_cornacchia_prime(&(coeffs.v[0]), &(coeffs.v[1]), &q, &cornacchia_target); else found = 0; @@ -179,33 +179,33 @@ quat_represent_integer(quat_alg_elem_t *gamma, // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 // we must have x = t mod 2 and y = z mod 2 // if q=1 we can simply swap x and y - if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { - ibz_swap(&coeffs[1], &coeffs[0]); + if (ibz_is_odd(&coeffs.v[0]) != ibz_is_odd(&coeffs.v[3])) { + ibz_swap(&coeffs.v[1], &coeffs.v[0]); } // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the // resulting endomorphism will behave well for dim 2 computations - found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && - ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + found = found && ((ibz_get(&coeffs.v[0]) - ibz_get(&coeffs.v[3])) % 4 == 2) && + ((ibz_get(&coeffs.v[1]) - ibz_get(&coeffs.v[2])) % 4 == 2); } if (found) { #ifndef NDEBUG ibz_set(&temp, (params->order->q)); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&test, &(coeffs.v[0]), &(coeffs.v[0])); ibz_add(&temp, &temp, &test); assert(0 == ibz_cmp(&temp, &cornacchia_target)); - ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &(coeffs.v[3]), &(coeffs.v[3])); ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); - ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_mul(&temp, &(coeffs.v[1]), &(coeffs.v[1])); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_set(&temp, (params->order->q)); ibz_mul(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_mul(&temp, &(coeffs.v[0]), &coeffs.v[0]); ibz_add(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &(coeffs.v[2]), &coeffs.v[2]); ibz_mul(&temp, &temp, &(params->algebra->p)); ibz_add(&cornacchia_target, &cornacchia_target, &temp); assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); @@ -213,8 +213,8 @@ quat_represent_integer(quat_alg_elem_t *gamma, // translate x,y,z,t into the quaternion element gamma quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); #ifndef NDEBUG - quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); - assert(ibz_is_one(&(coeffs[0]))); + quat_alg_norm(&temp, &(coeffs.v[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs.v[0]))); assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); #endif @@ -232,10 +232,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, if (found) { // new gamma ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); - ibz_copy(&gamma->coord[0], &coeffs[0]); - ibz_copy(&gamma->coord[1], &coeffs[1]); - ibz_copy(&gamma->coord[2], &coeffs[2]); - ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->coord.v[0], &coeffs.v[0]); + ibz_copy(&gamma->coord.v[1], &coeffs.v[1]); + ibz_copy(&gamma->coord.v[2], &coeffs.v[2]); + ibz_copy(&gamma->coord.v[3], &coeffs.v[3]); ibz_copy(&gamma->denom, &(((params->order)->order).denom)); } // var finalize @@ -279,10 +279,10 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, // we find a quaternion element of norm divisible by norm while (!found) { // generating a trace-zero element at random - ibz_set(&gen.coord[0], 0); + ibz_set(&gen.coord.v[0], 0); ibz_sub(&n_temp, norm, &ibz_const_one); for (int i = 1; i < 4; i++) - ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + ibz_rand_interval(&gen.coord.v[i], &ibz_const_zero, &n_temp); // first, we compute the norm of the gen quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); @@ -293,7 +293,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, ibz_mod(&disc, &disc, norm); // now we check that -n is a square mod norm // and if the square root exists we compute it - found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = ibz_sqrt_mod_p(&gen.coord.v[0], &disc, norm); found = found && !quat_alg_elem_is_zero(&gen); } } else { @@ -319,7 +319,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, found = 0; while (!found) { for (int i = 0; i < 4; i++) { - ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + ibz_rand_interval(&gen_rerand.coord.v[i], &ibz_const_one, norm); } quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); assert(ibz_is_one(&norm_d)); @@ -348,22 +348,22 @@ quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) { ibz_t tmp; ibz_init(&tmp); - ibz_copy(&(*vec)[2], &el->coord[2]); - ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) - ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) - ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); - ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); - ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); - - assert(ibz_divides(&(*vec)[0], &el->denom)); - assert(ibz_divides(&(*vec)[1], &el->denom)); - assert(ibz_divides(&(*vec)[2], &el->denom)); - assert(ibz_divides(&(*vec)[3], &el->denom)); - - ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); - ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); - ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); - ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + ibz_copy(&vec->v[2], &el->coord.v[2]); + ibz_add(&vec->v[2], &vec->v[2], &vec->v[2]); // double (not optimal if el->denom is even...) + ibz_copy(&vec->v[3], &el->coord.v[3]); // double (not optimal if el->denom is even...) + ibz_add(&vec->v[3], &vec->v[3], &vec->v[3]); + ibz_sub(&vec->v[0], &el->coord.v[0], &el->coord.v[3]); + ibz_sub(&vec->v[1], &el->coord.v[1], &el->coord.v[2]); + + assert(ibz_divides(&vec->v[0], &el->denom)); + assert(ibz_divides(&vec->v[1], &el->denom)); + assert(ibz_divides(&vec->v[2], &el->denom)); + assert(ibz_divides(&vec->v[3], &el->denom)); + + ibz_div(&vec->v[0], &tmp, &vec->v[0], &el->denom); + ibz_div(&vec->v[1], &tmp, &vec->v[1], &el->denom); + ibz_div(&vec->v[2], &tmp, &vec->v[2], &el->denom); + ibz_div(&vec->v[3], &tmp, &vec->v[3], &el->denom); ibz_finalize(&tmp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion.h index a567657464..2dd70a8c19 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion.h @@ -25,7 +25,9 @@ * * @typedef ibz_vec_2_t */ -typedef ibz_t ibz_vec_2_t[2]; +typedef struct { + ibz_t v[2]; +} ibz_vec_2_t; /** @brief Type for vectors of 4 integers * @@ -33,7 +35,9 @@ typedef ibz_t ibz_vec_2_t[2]; * * Represented as a vector of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_vec_4_t[4]; +typedef struct { + ibz_t v[4]; +} ibz_vec_4_t; /** @brief Type for 2 by 2 matrices of integers * @@ -41,7 +45,9 @@ typedef ibz_t ibz_vec_4_t[4]; * * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_2x2_t[2][2]; +typedef struct { + ibz_t m[2][2]; +} ibz_mat_2x2_t; /** @brief Type for 4 by 4 matrices of integers * @@ -49,7 +55,9 @@ typedef ibz_t ibz_mat_2x2_t[2][2]; * * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_4x4_t[4][4]; +typedef struct { + ibz_t m[4][4]; +} ibz_mat_4x4_t; /** * @} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c index 24402255d4..6944b06f09 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/quaternion_data.c @@ -4,3623 +4,3623 @@ const ibz_t QUAT_prime_cofactor = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8000}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80000000}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x8000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x8000000000000000}}}} #endif ; const quat_alg_t QUATALG_PINFTY = { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x40ff}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x40ff}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x40ffffff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x40ffffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x40ffffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x40ffffffffffffff}}}} #endif }; const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[8] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 1}, {{ +}}}, 1}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x680}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x680}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423,0x0,0x0,0x0,0x0,0x0,0x6800000}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423,0x0,0x0,0x0,0x0,0x0,0x6800000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a,0x0,0x0,0x680000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a,0x0,0x0,0x680000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 5}, {{ +}}}, 5}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed,0x0,0x0,0x0,0x0,0x0,0x2800000}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed,0x0,0x0,0x0,0x0,0x0,0x2800000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b,0x0,0x0,0x280000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b,0x0,0x0,0x280000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 13}, {{ +}}}, 13}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc07,0x925a,0x605a,0x9489,0x475b,0x7944,0x880f,0x65fa,0xed5a,0x329c,0x13f8,0x78f2,0xfffe,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc07,0x925a,0x605a,0x9489,0x475b,0x7944,0x880f,0x65fa,0xed5a,0x329c,0x13f8,0x78f2,0xfffe,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x925adc07,0x9489605a,0x7944475b,0x65fa880f,0x329ced5a,0x78f213f8,0xfffffffe,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x925adc07,0x9489605a,0x7944475b,0x65fa880f,0x329ced5a,0x78f213f8,0xfffffffe,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9489605a925adc07,0x65fa880f7944475b,0x78f213f8329ced5a,0xfffffffffffffffe,0xffffffffffffffff,0x207fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9489605a925adc07,0x65fa880f7944475b,0x78f213f8329ced5a,0xfffffffffffffffe,0xffffffffffffffff,0x207fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9c07,0x5ca4,0xc660,0xc2e5,0x94d7,0x2b1d,0x3b32,0xa3de,0x67a4,0x2fd3,0xfeab,0x1a11}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9c07,0x5ca4,0xc660,0xc2e5,0x94d7,0x2b1d,0x3b32,0xa3de,0x67a4,0x2fd3,0xfeab,0x1a11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5ca49c07,0xc2e5c660,0x2b1d94d7,0xa3de3b32,0x2fd367a4,0x1a11feab}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5ca49c07,0xc2e5c660,0x2b1d94d7,0xa3de3b32,0x2fd367a4,0x1a11feab}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xc2e5c6605ca49c07,0xa3de3b322b1d94d7,0x1a11feab2fd367a4}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xc2e5c6605ca49c07,0xa3de3b322b1d94d7,0x1a11feab2fd367a4}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 17}, {{ +}}}, 17}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9a15,0x48a0,0x16ae,0xa42,0x3772,0x534a,0x26a7,0x2f5e,0xce7c,0x39eb,0xa365,0x745c,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0x657}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9a15,0x48a0,0x16ae,0xa42,0x3772,0x534a,0x26a7,0x2f5e,0xce7c,0x39eb,0xa365,0x745c,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0x657}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x48a09a15,0xa4216ae,0x534a3772,0x2f5e26a7,0x39ebce7c,0x745ca365,0xa2576a25,0x576a2576,0x6a2576a2,0x2576a257,0x76a2576a,0x6576a25}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x48a09a15,0xa4216ae,0x534a3772,0x2f5e26a7,0x39ebce7c,0x745ca365,0xa2576a25,0x576a2576,0x6a2576a2,0x2576a257,0x76a2576a,0x6576a25}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4216ae48a09a15,0x2f5e26a7534a3772,0x745ca36539ebce7c,0x576a2576a2576a25,0x2576a2576a2576a2,0x6576a2576a2576a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4216ae48a09a15,0x2f5e26a7534a3772,0x745ca36539ebce7c,0x576a2576a2576a25,0x2576a2576a2576a2,0x6576a2576a2576a}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50e5,0x2533,0xb03b,0x2c45,0xfde,0xaaf1,0xafff,0x8c73,0xebfd,0xfb3,0xc7bc,0x26}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50e5,0x2533,0xb03b,0x2c45,0xfde,0xaaf1,0xafff,0x8c73,0xebfd,0xfb3,0xc7bc,0x26}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x253350e5,0x2c45b03b,0xaaf10fde,0x8c73afff,0xfb3ebfd,0x26c7bc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x253350e5,0x2c45b03b,0xaaf10fde,0x8c73afff,0xfb3ebfd,0x26c7bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2c45b03b253350e5,0x8c73afffaaf10fde,0x26c7bc0fb3ebfd}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2c45b03b253350e5,0x8c73afffaaf10fde,0x26c7bc0fb3ebfd}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 41}, {{ +}}}, 41}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x73ba,0x1227,0x9519,0xedfb,0x605b,0xe80,0x1a20,0xf0b2,0xb418,0xa90c,0xb325,0xefd6,0x7e3e,0xf8fc,0xe3f1,0x8fc7,0x3f1f,0xfc7e,0xf1f8,0xc7e3,0x1f8f,0x7e3f,0xf8fc,0x71}}} +{{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x73ba,0x1227,0x9519,0xedfb,0x605b,0xe80,0x1a20,0xf0b2,0xb418,0xa90c,0xb325,0xefd6,0x7e3e,0xf8fc,0xe3f1,0x8fc7,0x3f1f,0xfc7e,0xf1f8,0xc7e3,0x1f8f,0x7e3f,0xf8fc,0x71}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x122773ba,0xedfb9519,0xe80605b,0xf0b21a20,0xa90cb418,0xefd6b325,0xf8fc7e3e,0x8fc7e3f1,0xfc7e3f1f,0xc7e3f1f8,0x7e3f1f8f,0x71f8fc}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x122773ba,0xedfb9519,0xe80605b,0xf0b21a20,0xa90cb418,0xefd6b325,0xf8fc7e3e,0x8fc7e3f1,0xfc7e3f1f,0xc7e3f1f8,0x7e3f1f8f,0x71f8fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xedfb9519122773ba,0xf0b21a200e80605b,0xefd6b325a90cb418,0x8fc7e3f1f8fc7e3e,0xc7e3f1f8fc7e3f1f,0x71f8fc7e3f1f8f}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xedfb9519122773ba,0xf0b21a200e80605b,0xefd6b325a90cb418,0x8fc7e3f1f8fc7e3e,0xc7e3f1f8fc7e3f1f,0x71f8fc7e3f1f8f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x73ba,0x8a7,0x681e,0x130f,0xeee3,0xd966,0x4ebe,0xf78b,0xba4d,0xfa9,0xc409,0x245}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x73ba,0x8a7,0x681e,0x130f,0xeee3,0xd966,0x4ebe,0xf78b,0xba4d,0xfa9,0xc409,0x245}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x8a773ba,0x130f681e,0xd966eee3,0xf78b4ebe,0xfa9ba4d,0x245c409}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x8a773ba,0x130f681e,0xd966eee3,0xf78b4ebe,0xfa9ba4d,0x245c409}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x130f681e08a773ba,0xf78b4ebed966eee3,0x245c4090fa9ba4d}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x130f681e08a773ba,0xf78b4ebed966eee3,0x245c4090fa9ba4d}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 73}, {{ +}}}, 73}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x30b3,0xeb66,0x87b7,0x617e,0x27c,0xfa7,0xdcf4,0x90c8,0x7e8b,0x9e3c,0xaf36,0xb7ba,0x5eeb,0xbaf7,0xbdd7,0x75ee,0x7baf,0xebdd,0xf75e,0xd7ba,0xeebd,0xaf75,0xdd7b,0x2eb}}} +{{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x30b3,0xeb66,0x87b7,0x617e,0x27c,0xfa7,0xdcf4,0x90c8,0x7e8b,0x9e3c,0xaf36,0xb7ba,0x5eeb,0xbaf7,0xbdd7,0x75ee,0x7baf,0xebdd,0xf75e,0xd7ba,0xeebd,0xaf75,0xdd7b,0x2eb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xeb6630b3,0x617e87b7,0xfa7027c,0x90c8dcf4,0x9e3c7e8b,0xb7baaf36,0xbaf75eeb,0x75eebdd7,0xebdd7baf,0xd7baf75e,0xaf75eebd,0x2ebdd7b}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xeb6630b3,0x617e87b7,0xfa7027c,0x90c8dcf4,0x9e3c7e8b,0xb7baaf36,0xbaf75eeb,0x75eebdd7,0xebdd7baf,0xd7baf75e,0xaf75eebd,0x2ebdd7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x617e87b7eb6630b3,0x90c8dcf40fa7027c,0xb7baaf369e3c7e8b,0x75eebdd7baf75eeb,0xd7baf75eebdd7baf,0x2ebdd7baf75eebd}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x617e87b7eb6630b3,0x90c8dcf40fa7027c,0xb7baaf369e3c7e8b,0x75eebdd7baf75eeb,0xd7baf75eebdd7baf,0x2ebdd7baf75eebd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xb5ab,0x986,0x1b92,0x5123,0x4b2a,0x653b,0x4896,0xc0fd,0x579e,0xc06c,0xd20e,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xb5ab,0x986,0x1b92,0x5123,0x4b2a,0x653b,0x4896,0xc0fd,0x579e,0xc06c,0xd20e,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x986b5ab,0x51231b92,0x653b4b2a,0xc0fd4896,0xc06c579e,0xf7d20e}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x986b5ab,0x51231b92,0x653b4b2a,0xc0fd4896,0xc06c579e,0xf7d20e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x51231b920986b5ab,0xc0fd4896653b4b2a,0xf7d20ec06c579e}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x51231b920986b5ab,0xc0fd4896653b4b2a,0xf7d20ec06c579e}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 89}, {{ +}}}, 89}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0xbd79,0x489c,0xbd84,0xce46,0x9344,0xb194,0x642a,0x3c5a,0xdb04,0x96f5,0x6e1f,0x4dcb,0xff6e,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}} +{{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0xbd79,0x489c,0xbd84,0xce46,0x9344,0xb194,0x642a,0x3c5a,0xdb04,0x96f5,0x6e1f,0x4dcb,0xff6e,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x489cbd79,0xce46bd84,0xb1949344,0x3c5a642a,0x96f5db04,0x4dcb6e1f,0xffffff6e,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x489cbd79,0xce46bd84,0xb1949344,0x3c5a642a,0x96f5db04,0x4dcb6e1f,0xffffff6e,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xce46bd84489cbd79,0x3c5a642ab1949344,0x4dcb6e1f96f5db04,0xffffffffffffff6e,0xffffffffffffffff,0x207fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xce46bd84489cbd79,0x3c5a642ab1949344,0x4dcb6e1f96f5db04,0xffffffffffffff6e,0xffffffffffffffff,0x207fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xa1c9,0x3fda,0x577,0x71a8,0xf4d3,0x4269,0xecf2,0x2a5d,0x41b6,0x6e41,0x47e5,0x782c,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xa1c9,0x3fda,0x577,0x71a8,0xf4d3,0x4269,0xecf2,0x2a5d,0x41b6,0x6e41,0x47e5,0x782c,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x3fdaa1c9,0x71a80577,0x4269f4d3,0x2a5decf2,0x6e4141b6,0x782c47e5,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x3fdaa1c9,0x71a80577,0x4269f4d3,0x2a5decf2,0x6e4141b6,0x782c47e5,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x71a805773fdaa1c9,0x2a5decf24269f4d3,0x782c47e56e4141b6,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x71a805773fdaa1c9,0x2a5decf24269f4d3,0x782c47e56e4141b6,0x2}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 97}}; +}}}, 97}}; const quat_left_ideal_t CONNECTING_IDEALS[8] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x3f45,0x9d13,0x18d8,0xd9d,0x581f,0x857d,0xdf68,0xd151,0x582a,0xa4d6,0xa864,0x68b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x3f45,0x9d13,0x18d8,0xd9d,0x581f,0x857d,0xdf68,0xd151,0x582a,0xa4d6,0xa864,0x68b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9d133f45,0xd9d18d8,0x857d581f,0xd151df68,0xa4d6582a,0x68ba864,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9d133f45,0xd9d18d8,0x857d581f,0xd151df68,0xa4d6582a,0x68ba864,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd9d18d89d133f45,0xd151df68857d581f,0x68ba864a4d6582a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd9d18d89d133f45,0xd151df68857d581f,0x68ba864a4d6582a,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfad,0xcd37,0x66f0,0x90ea,0x2958,0x73d0,0xf9dd,0x3c75,0xe22e,0xbc3f,0xae14,0x8e28}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfad,0xcd37,0x66f0,0x90ea,0x2958,0x73d0,0xf9dd,0x3c75,0xe22e,0xbc3f,0xae14,0x8e28}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd37dfad,0x90ea66f0,0x73d02958,0x3c75f9dd,0xbc3fe22e,0x8e28ae14}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd37dfad,0x90ea66f0,0x73d02958,0x3c75f9dd,0xbc3fe22e,0x8e28ae14}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x90ea66f0cd37dfad,0x3c75f9dd73d02958,0x8e28ae14bc3fe22e}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x90ea66f0cd37dfad,0x3c75f9dd73d02958,0x8e28ae14bc3fe22e}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe0bb,0x1b20,0x4939,0xd4cc,0xa436,0xac70,0x5d50,0xfe05,0xe870,0x178b,0xcef2,0xd21,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe0bb,0x1b20,0x4939,0xd4cc,0xa436,0xac70,0x5d50,0xfe05,0xe870,0x178b,0xcef2,0xd21,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x1b20e0bb,0xd4cc4939,0xac70a436,0xfe055d50,0x178be870,0xd21cef2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x1b20e0bb,0xd4cc4939,0xac70a436,0xfe055d50,0x178be870,0xd21cef2,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd4cc49391b20e0bb,0xfe055d50ac70a436,0xd21cef2178be870,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd4cc49391b20e0bb,0xfe055d50ac70a436,0xd21cef2178be870,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4ebd,0xc907,0x738,0xe090,0x47df,0xb03f,0x814f,0x7faa,0x3a11,0x23cb,0xde52,0x892d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4ebd,0xc907,0x738,0xe090,0x47df,0xb03f,0x814f,0x7faa,0x3a11,0x23cb,0xde52,0x892d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9074ebd,0xe0900738,0xb03f47df,0x7faa814f,0x23cb3a11,0x892dde52}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9074ebd,0xe0900738,0xb03f47df,0x7faa814f,0x23cb3a11,0x892dde52}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe0900738c9074ebd,0x7faa814fb03f47df,0x892dde5223cb3a11}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe0900738c9074ebd,0x7faa814fb03f47df,0x892dde5223cb3a11}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50bf,0xeebf,0xe944,0xea4d,0x76d,0xcbc5,0x4919,0x12b0,0x71f3,0x9e30,0x3304,0x1265}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50bf,0xeebf,0xe944,0xea4d,0x76d,0xcbc5,0x4919,0x12b0,0x71f3,0x9e30,0x3304,0x1265}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xeebf50bf,0xea4de944,0xcbc5076d,0x12b04919,0x9e3071f3,0x12653304}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xeebf50bf,0xea4de944,0xcbc5076d,0x12b04919,0x9e3071f3,0x12653304}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xea4de944eebf50bf,0x12b04919cbc5076d,0x126533049e3071f3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xea4de944eebf50bf,0x12b04919cbc5076d,0x126533049e3071f3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x81c3,0xdc60,0x7bed,0xf8f0,0xdcf,0x4413,0xf95b,0x18b1,0x7f8a,0x3cd4,0xc0e,0xe4bd,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x81c3,0xdc60,0x7bed,0xf8f0,0xdcf,0x4413,0xf95b,0x18b1,0x7f8a,0x3cd4,0xc0e,0xe4bd,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xdc6081c3,0xf8f07bed,0x44130dcf,0x18b1f95b,0x3cd47f8a,0xe4bd0c0e,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xdc6081c3,0xf8f07bed,0x44130dcf,0x18b1f95b,0x3cd47f8a,0xe4bd0c0e,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8f07beddc6081c3,0x18b1f95b44130dcf,0xe4bd0c0e3cd47f8a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8f07beddc6081c3,0x18b1f95b44130dcf,0xe4bd0c0e3cd47f8a,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe941,0x658f,0x3299,0xf19f,0xa9e,0x87ec,0x213a,0x95b1,0x78be,0x6d82,0x1f89,0xfb91}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe941,0x658f,0x3299,0xf19f,0xa9e,0x87ec,0x213a,0x95b1,0x78be,0x6d82,0x1f89,0xfb91}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x658fe941,0xf19f3299,0x87ec0a9e,0x95b1213a,0x6d8278be,0xfb911f89}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x658fe941,0xf19f3299,0x87ec0a9e,0x95b1213a,0x6d8278be,0xfb911f89}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf19f3299658fe941,0x95b1213a87ec0a9e,0xfb911f896d8278be}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf19f3299658fe941,0x95b1213a87ec0a9e,0xfb911f896d8278be}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x60fb,0xd399,0x887f,0xd263,0xe0e7,0xb202,0x699b,0xea34,0x5a15,0x4b8a,0x6763,0x8e95}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x60fb,0xd399,0x887f,0xd263,0xe0e7,0xb202,0x699b,0xea34,0x5a15,0x4b8a,0x6763,0x8e95}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd39960fb,0xd263887f,0xb202e0e7,0xea34699b,0x4b8a5a15,0x8e956763}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd39960fb,0xd263887f,0xb202e0e7,0xea34699b,0x4b8a5a15,0x8e956763}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xd263887fd39960fb,0xea34699bb202e0e7,0x8e9567634b8a5a15}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xd263887fd39960fb,0xea34699bb202e0e7,0x8e9567634b8a5a15}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7edf,0xd82a,0x4c38,0xa9b9,0x663f,0xb4af,0xb83e,0x8f97,0x898d,0x9b3,0x342a,0x1298}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7edf,0xd82a,0x4c38,0xa9b9,0x663f,0xb4af,0xb83e,0x8f97,0x898d,0x9b3,0x342a,0x1298}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd82a7edf,0xa9b94c38,0xb4af663f,0x8f97b83e,0x9b3898d,0x1298342a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd82a7edf,0xa9b94c38,0xb4af663f,0x8f97b83e,0x9b3898d,0x1298342a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa9b94c38d82a7edf,0x8f97b83eb4af663f,0x1298342a09b3898d}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa9b94c38d82a7edf,0x8f97b83eb4af663f,0x1298342a09b3898d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb00f,0x8bbf,0x19a9,0xd6b,0xf7b,0xcd5c,0x74e7,0xd7e2,0xa419,0x3593,0x56a8,0x8de8,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb00f,0x8bbf,0x19a9,0xd6b,0xf7b,0xcd5c,0x74e7,0xd7e2,0xa419,0x3593,0x56a8,0x8de8,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x8bbfb00f,0xd6b19a9,0xcd5c0f7b,0xd7e274e7,0x3593a419,0x8de856a8,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x8bbfb00f,0xd6b19a9,0xcd5c0f7b,0xd7e274e7,0x3593a419,0x8de856a8,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd6b19a98bbfb00f,0xd7e274e7cd5c0f7b,0x8de856a83593a419,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd6b19a98bbfb00f,0xd7e274e7cd5c0f7b,0x8de856a83593a419,0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf007,0x6c34,0xd3b,0x6c6f,0xff26,0xd5e2,0x4cf0,0xf932,0xbec1,0x84e1,0x9955,0xdb05}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf007,0x6c34,0xd3b,0x6c6f,0xff26,0xd5e2,0x4cf0,0xf932,0xbec1,0x84e1,0x9955,0xdb05}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6c34f007,0x6c6f0d3b,0xd5e2ff26,0xf9324cf0,0x84e1bec1,0xdb059955}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6c34f007,0x6c6f0d3b,0xd5e2ff26,0xf9324cf0,0x84e1bec1,0xdb059955}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6c6f0d3b6c34f007,0xf9324cf0d5e2ff26,0xdb05995584e1bec1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6c6f0d3b6c34f007,0xf9324cf0d5e2ff26,0xdb05995584e1bec1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3a91,0xcd01,0xac55,0x9a52,0x9887,0x118f,0x4dec,0x4245,0xd869,0x1022,0x1d16,0x7ad}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3a91,0xcd01,0xac55,0x9a52,0x9887,0x118f,0x4dec,0x4245,0xd869,0x1022,0x1d16,0x7ad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd013a91,0x9a52ac55,0x118f9887,0x42454dec,0x1022d869,0x7ad1d16}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd013a91,0x9a52ac55,0x118f9887,0x42454dec,0x1022d869,0x7ad1d16}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9a52ac55cd013a91,0x42454dec118f9887,0x7ad1d161022d869}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9a52ac55cd013a91,0x42454dec118f9887,0x7ad1d161022d869}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4095,0x6a9f,0x1c86,0xfd81,0xe6a7,0xc52d,0xbb45,0xdbac,0x50ae,0x3a1b,0x87b,0x673a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4095,0x6a9f,0x1c86,0xfd81,0xe6a7,0xc52d,0xbb45,0xdbac,0x50ae,0x3a1b,0x87b,0x673a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6a9f4095,0xfd811c86,0xc52de6a7,0xdbacbb45,0x3a1b50ae,0x673a087b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6a9f4095,0xfd811c86,0xc52de6a7,0xdbacbb45,0x3a1b50ae,0x673a087b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xfd811c866a9f4095,0xdbacbb45c52de6a7,0x673a087b3a1b50ae}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xfd811c866a9f4095,0xdbacbb45c52de6a7,0x673a087b3a1b50ae}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4d27,0x98d5,0x3839,0x83ff,0x48b7,0x4d5b,0xc95b,0xbe45,0x9d44,0x36f3,0x4d57,0x6c26}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4d27,0x98d5,0x3839,0x83ff,0x48b7,0x4d5b,0xc95b,0xbe45,0x9d44,0x36f3,0x4d57,0x6c26}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x98d54d27,0x83ff3839,0x4d5b48b7,0xbe45c95b,0x36f39d44,0x6c264d57}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x98d54d27,0x83ff3839,0x4d5b48b7,0xbe45c95b,0x36f39d44,0x6c264d57}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x83ff383998d54d27,0xbe45c95b4d5b48b7,0x6c264d5736f39d44}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x83ff383998d54d27,0xbe45c95b4d5b48b7,0x6c264d5736f39d44}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98a3,0xa25f,0x7811,0xbf10,0x9edd,0x52ef,0xc322,0x2e01,0xda9b,0x5768,0x69c7,0x66f9}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98a3,0xa25f,0x7811,0xbf10,0x9edd,0x52ef,0xc322,0x2e01,0xda9b,0x5768,0x69c7,0x66f9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa25f98a3,0xbf107811,0x52ef9edd,0x2e01c322,0x5768da9b,0x66f969c7}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa25f98a3,0xbf107811,0x52ef9edd,0x2e01c322,0x5768da9b,0x66f969c7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbf107811a25f98a3,0x2e01c32252ef9edd,0x66f969c75768da9b}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbf107811a25f98a3,0x2e01c32252ef9edd,0x66f969c75768da9b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x72e5,0x9d9a,0xd825,0xa187,0x73ca,0xd025,0xc63e,0xf623,0x3bef,0x472e,0xdb8f,0x698f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x72e5,0x9d9a,0xd825,0xa187,0x73ca,0xd025,0xc63e,0xf623,0x3bef,0x472e,0xdb8f,0x698f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9d9a72e5,0xa187d825,0xd02573ca,0xf623c63e,0x472e3bef,0x698fdb8f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9d9a72e5,0xa187d825,0xd02573ca,0xf623c63e,0x472e3bef,0x698fdb8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa187d8259d9a72e5,0xf623c63ed02573ca,0x698fdb8f472e3bef}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa187d8259d9a72e5,0xf623c63ed02573ca,0x698fdb8f472e3bef}}}} #endif , &MAXORD_O0}}; const quat_alg_elem_t CONJUGATING_ELEMENTS[8] = {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #endif -}}}; +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sign.c index 9216bbe4d3..9520a6f7fd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sign.c @@ -31,12 +31,12 @@ compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const sig // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the // 2^TORSION_EVEN_POWER torsion of EA - ibz_set(&vec[0], 1); - ibz_copy_digit_array(&vec[1], sig->chall_coeff); + ibz_set(&vec.v[0], 1); + ibz_copy_digit_array(&vec.v[1], sig->chall_coeff); // now we compute the ideal associated to the challenge // for that, we need to find vec such that - // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // the kernel of the challenge isogeny is generated by vec.v[0]*B0[0] + vec.v[1]*B0[1] where B0 // is the image through the secret key isogeny of the canonical basis E0 ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); @@ -459,16 +459,16 @@ compute_and_set_basis_change_matrix(signature_t *sig, change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); // Assert all values in the matrix are of the expected size for packing - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][1]) <= SQIsign_response_length + HD_extra_torsion); // Set the basis change matrix to signature - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall.m[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall.m[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall.m[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall.m[1][1])); // Finalise the matrices ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c index 1a6c203035..6e7296bfeb 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/torsion_constants.c @@ -4,40 +4,40 @@ const ibz_t TWO_TO_SECURITY_BITS = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t TORSION_PLUS_2POWER = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x100000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x100000000000000}}}} #endif ; const ibz_t SEC_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t COM_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c index f4b4260755..a6298acf77 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/algebra.c @@ -21,54 +21,54 @@ quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, ibz_init(&prod); ibz_vec_4_init(&sum); - ibz_set(&(sum[0]), 0); - ibz_set(&(sum[1]), 0); - ibz_set(&(sum[2]), 0); - ibz_set(&(sum[3]), 0); + ibz_set(&(sum.v[0]), 0); + ibz_set(&(sum.v[1]), 0); + ibz_set(&(sum.v[2]), 0); + ibz_set(&(sum.v[3]), 0); // compute 1 coordinate - ibz_mul(&prod, &((*a)[2]), &((*b)[2])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[3])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[0])); - ibz_add(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[1])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[2])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[3])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&(sum.v[0]), &(sum.v[0]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[0])); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[1])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); // compute i coordiante - ibz_mul(&prod, &((*a)[2]), &((*b)[3])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[2])); - ibz_sub(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[1])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[0])); - ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[3])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[2])); + ibz_sub(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&(sum.v[1]), &(sum.v[1]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[1])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[0])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); // compute j coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[2])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[0])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[3])); - ibz_sub(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[1])); - ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[2])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[0])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[3])); + ibz_sub(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[1])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); // compute ij coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[3])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[0])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[1])); - ibz_sub(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[2])); - ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[3])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[0])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[1])); + ibz_sub(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[2])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); - ibz_copy(&((*res)[0]), &(sum[0])); - ibz_copy(&((*res)[1]), &(sum[1])); - ibz_copy(&((*res)[2]), &(sum[2])); - ibz_copy(&((*res)[3]), &(sum[3])); + ibz_copy(&(res->v[0]), &(sum.v[0])); + ibz_copy(&(res->v[1]), &(sum.v[1])); + ibz_copy(&(res->v[2]), &(sum.v[2])); + ibz_copy(&(res->v[3]), &(sum.v[3])); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); @@ -86,8 +86,8 @@ quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_ ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); for (int i = 0; i < 4; i++) { // multiply coordiates by reduced denominators from the other element - ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); - ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + ibz_mul(&(res_a->coord.v[i]), &(a->coord.v[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord.v[i]), &(b->coord.v[i]), &(res_a->denom)); } // multiply both reduced denominators ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); @@ -149,8 +149,8 @@ quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_conj(&norm, a); quat_alg_mul(&norm, a, &norm, alg); - ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); - ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_gcd(&g, &(norm.coord.v[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord.v[0]), &g); ibz_div(res_denom, &r, &(norm.denom), &g); ibz_abs(res_denom, res_denom); ibz_abs(res_num, res_num); @@ -165,20 +165,20 @@ void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) { ibz_copy(&(elem->denom), denominator); - ibz_copy(&(elem->coord[0]), numerator); - ibz_set(&(elem->coord[1]), 0); - ibz_set(&(elem->coord[2]), 0); - ibz_set(&(elem->coord[3]), 0); + ibz_copy(&(elem->coord.v[0]), numerator); + ibz_set(&(elem->coord.v[1]), 0); + ibz_set(&(elem->coord.v[2]), 0); + ibz_set(&(elem->coord.v[3]), 0); } void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) { ibz_copy(&(conj->denom), &(x->denom)); - ibz_copy(&(conj->coord[0]), &(x->coord[0])); - ibz_neg(&(conj->coord[1]), &(x->coord[1])); - ibz_neg(&(conj->coord[2]), &(x->coord[2])); - ibz_neg(&(conj->coord[3]), &(x->coord[3])); + ibz_copy(&(conj->coord.v[0]), &(x->coord.v[0])); + ibz_neg(&(conj->coord.v[1]), &(x->coord.v[1])); + ibz_neg(&(conj->coord.v[2]), &(x->coord.v[2])); + ibz_neg(&(conj->coord.v[3]), &(x->coord.v[3])); } void @@ -190,7 +190,8 @@ quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + // TODO: check if this is correct + ibz_div(primitive_x->v + i, &r, primitive_x->v + i, content); } ibz_finalize(&r); } @@ -235,10 +236,10 @@ quat_alg_elem_is_zero(const quat_alg_elem_t *x) void quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&(elem->coord[0]), coord0); - ibz_set(&(elem->coord[1]), coord1); - ibz_set(&(elem->coord[2]), coord2); - ibz_set(&(elem->coord[3]), coord3); + ibz_set(&(elem->coord.v[0]), coord0); + ibz_set(&(elem->coord.v[1]), coord1); + ibz_set(&(elem->coord.v[2]), coord2); + ibz_set(&(elem->coord.v[3]), coord3); ibz_set(&(elem->denom), denom); } @@ -247,10 +248,10 @@ void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) { ibz_copy(©->denom, &copied->denom); - ibz_copy(©->coord[0], &copied->coord[0]); - ibz_copy(©->coord[1], &copied->coord[1]); - ibz_copy(©->coord[2], &copied->coord[2]); - ibz_copy(©->coord[3], &copied->coord[3]); + ibz_copy(©->coord.v[0], &copied->coord.v[0]); + ibz_copy(©->coord.v[1], &copied->coord.v[1]); + ibz_copy(©->coord.v[2], &copied->coord.v[2]); + ibz_copy(©->coord.v[3], &copied->coord.v[3]); } // helper functions for lattices @@ -262,10 +263,10 @@ quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&(elem->coord[0]), coord0); - ibz_copy(&(elem->coord[1]), coord1); - ibz_copy(&(elem->coord[2]), coord2); - ibz_copy(&(elem->coord[3]), coord3); + ibz_copy(&(elem->coord.v[0]), coord0); + ibz_copy(&(elem->coord.v[1]), coord1); + ibz_copy(&(elem->coord.v[2]), coord2); + ibz_copy(&(elem->coord.v[3]), coord3); ibz_copy(&(elem->denom), denom); } @@ -274,7 +275,7 @@ void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) { for (int i = 0; i < 4; i++) { - ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + ibz_mul(&(res->coord.v[i]), &(elem->coord.v[i]), scalar); } ibz_copy(&(res->denom), &(elem->denom)); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c index 1df7755a29..e051ac340a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/common.c @@ -14,6 +14,7 @@ public_key_init(public_key_t *pk) void public_key_finalize(public_key_t *pk) { + (void) pk; } // compute the challenge as the hash of the message and the commitment curve and public key diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2.c index b31ae7771a..5bf214c4e2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2.c @@ -5,34 +5,34 @@ void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) { - ibz_set(&((*vec)[0]), a0); - ibz_set(&((*vec)[1]), a1); + ibz_set(&(vec->v[0]), a0); + ibz_set(&(vec->v[1]), a1); } void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) { - ibz_set(&((*mat)[0][0]), a00); - ibz_set(&((*mat)[0][1]), a01); - ibz_set(&((*mat)[1][0]), a10); - ibz_set(&((*mat)[1][1]), a11); + ibz_set(&(mat->m[0][0]), a00); + ibz_set(&(mat->m[0][1]), a01); + ibz_set(&(mat->m[1][0]), a10); + ibz_set(&(mat->m[1][1]), a11); } void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) { - ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); - ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); - ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); - ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); + ibz_copy(&(copy->m[0][0]), &(copied->m[0][0])); + ibz_copy(&(copy->m[0][1]), &(copied->m[0][1])); + ibz_copy(&(copy->m[1][0]), &(copied->m[1][0])); + ibz_copy(&(copy->m[1][1]), &(copied->m[1][1])); } void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) { - ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); - ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); - ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); - ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); + ibz_add(&(sum->m[0][0]), &(a->m[0][0]), &(b->m[0][0])); + ibz_add(&(sum->m[0][1]), &(a->m[0][1]), &(b->m[0][1])); + ibz_add(&(sum->m[1][0]), &(a->m[1][0]), &(b->m[1][0])); + ibz_add(&(sum->m[1][1]), &(a->m[1][1]), &(b->m[1][1])); } void @@ -53,16 +53,16 @@ ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t * ibz_vec_2_t matvec; ibz_init(&prod); ibz_vec_2_init(&matvec); - ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); - ibz_copy(&(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); - ibz_add(&(matvec[0]), &(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); - ibz_copy(&(matvec[1]), &prod); - ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); - ibz_add(&(matvec[1]), &(matvec[1]), &prod); - ibz_copy(&((*res)[0]), &(matvec[0])); - ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_mul(&prod, &(mat->m[0][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[0][1]), &(vec->v[1])); + ibz_add(&(matvec.v[0]), &(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[1][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[1]), &prod); + ibz_mul(&prod, &(mat->m[1][1]), &(vec->v[1])); + ibz_add(&(matvec.v[1]), &(matvec.v[1]), &prod); + ibz_copy(&(res->v[0]), &(matvec.v[0])); + ibz_copy(&(res->v[1]), &(matvec.v[1])); ibz_finalize(&prod); ibz_vec_2_finalize(&matvec); } @@ -78,21 +78,21 @@ ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2 ibz_mat_2x2_init(&sums); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_set(&(sums[i][j]), 0); + ibz_set(&(sums.m[i][j]), 0); } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { - ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); - ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); - ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + ibz_mul(&mul, &(mat_a->m[i][k]), &(mat_b->m[k][j])); + ibz_add(&(sums.m[i][j]), &(sums.m[i][j]), &mul); + ibz_mod(&(sums.m[i][j]), &(sums.m[i][j]), m); } } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + ibz_copy(&(prod->m[i][j]), &(sums.m[i][j])); } } ibz_finalize(&mul); @@ -105,9 +105,9 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_t det, prod; ibz_init(&det); ibz_init(&prod); - ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mul(&det, &(mat->m[0][0]), &(mat->m[1][1])); ibz_mod(&det, &det, m); - ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_mul(&prod, &(mat->m[0][1]), &(mat->m[1][0])); ibz_sub(&det, &det, &prod); ibz_mod(&det, &det, m); int res = ibz_invmod(&det, &det, m); @@ -115,15 +115,15 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_set(&prod, res); ibz_mul(&det, &det, &prod); // compute inverse - ibz_copy(&prod, &((*mat)[0][0])); - ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); - ibz_copy(&((*inv)[1][1]), &prod); - ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); - ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + ibz_copy(&prod, &(mat->m[0][0])); + ibz_copy(&(inv->m[0][0]), &(mat->m[1][1])); + ibz_copy(&(inv->m[1][1]), &prod); + ibz_neg(&(inv->m[1][0]), &(mat->m[1][0])); + ibz_neg(&(inv->m[0][1]), &(mat->m[0][1])); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); - ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + ibz_mul(&(inv->m[i][j]), &(inv->m[i][j]), &det); + ibz_mod(&(inv->m[i][j]), &(inv->m[i][j]), m); } } ibz_finalize(&det); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c index 171473d481..143060e2c3 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c @@ -137,10 +137,10 @@ _fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, ibz_invmod(&tmp, &tmp, &two_pow); assert(!ibz_is_even(&tmp)); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta to the basis ec_basis_t B0_two_theta; @@ -197,53 +197,53 @@ post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_ // treatment if (is_special_order) { // reordering the basis if needed - if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + if (ibz_cmp(&gram->m[0][0], &gram->m[2][2]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[0][0], &gram->m[3][3]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][3]); } - ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); - ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); - ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); - ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][3], &gram->m[0][1]); + ibz_swap(&gram->m[3][0], &gram->m[1][0]); + ibz_swap(&gram->m[2][3], &gram->m[2][1]); + ibz_swap(&gram->m[3][2], &gram->m[1][2]); + ibz_swap(&gram->m[3][3], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[1][1], &gram->m[3][3]) == 0) { // in this case it seems that we need to swap the second and third // element, and then recompute entirely the second element from the first // first we swap the second and third element for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); } // adjusting the sign if needed - if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + if (ibz_cmp(&reduced->m[0][0], &reduced->m[1][1]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); - ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); - ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + ibz_neg(&reduced->m[i][1], &reduced->m[i][1]); + ibz_neg(&gram->m[i][1], &gram->m[i][1]); + ibz_neg(&gram->m[1][i], &gram->m[1][i]); } } - if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + if (ibz_cmp(&reduced->m[0][2], &reduced->m[1][3]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); - ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); - ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + ibz_neg(&reduced->m[i][3], &reduced->m[i][3]); + ibz_neg(&gram->m[i][3], &gram->m[i][3]); + ibz_neg(&gram->m[3][i], &gram->m[3][i]); } - // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + // assert(ibz_cmp(&reduced->m[0][2],&reduced->m[1][3])==0); } } } @@ -273,7 +273,7 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // if the basis is of the form alpha, i*alpha, beta, i*beta // we can remove some values due to symmetry of the basis that bool need_remove_symmetry = - (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + (ibz_cmp(&gram->m[0][0], &gram->m[1][1]) == 0 && ibz_cmp(&gram->m[3][3], &gram->m[2][2]) == 0); int check1, check2, check3; @@ -324,10 +324,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // and we ensure that we don't record the same norm in the list if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { // Set the point as a vector (x, y, z, w) - ibz_set(&point[0], x); - ibz_set(&point[1], y); - ibz_set(&point[2], z); - ibz_set(&point[3], w); + ibz_set(&point.v[0], x); + ibz_set(&point.v[1], y); + ibz_set(&point.v[2], z); + ibz_set(&point.v[3], w); // Evaluate this through the gram matrix and divide out by the // adjusted_norm @@ -336,10 +336,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t assert(ibz_is_zero(&remain)); if (ibz_mod_ui(&norm, 2) == 1) { - ibz_set(&vecs[count][0], x); - ibz_set(&vecs[count][1], y); - ibz_set(&vecs[count][2], z); - ibz_set(&vecs[count][3], w); + ibz_set(&vecs[count].v[0], x); + ibz_set(&vecs[count].v[1], y); + ibz_set(&vecs[count].v[2], z); + ibz_set(&vecs[count].v[3], w); ibz_copy(&norms[count], &norm); count++; } @@ -530,10 +530,10 @@ find_uv(ibz_t *u, quat_alg_elem_t delta; // delta will be the element of smallest norm quat_alg_elem_init(&delta); - ibz_set(&delta.coord[0], 1); - ibz_set(&delta.coord[1], 0); - ibz_set(&delta.coord[2], 0); - ibz_set(&delta.coord[3], 0); + ibz_set(&delta.coord.v[0], 1); + ibz_set(&delta.coord.v[1], 0); + ibz_set(&delta.coord.v[2], 0); + ibz_set(&delta.coord.v[3], 0); ibz_copy(&delta.denom, &reduced_id.lattice.denom); ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); @@ -542,7 +542,7 @@ find_uv(ibz_t *u, quat_alg_conj(&delta, &delta); ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); - ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_copy(&reduced_id.norm, &gram[0].m[0][0]); ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); assert(ibz_cmp(&remain, &ibz_const_zero) == 0); @@ -989,10 +989,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, } ibz_invmod(&tmp, &tmp, &two_pow); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); @@ -1092,10 +1092,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); } ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); - ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); - ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); - ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); - ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + ibz_mul(&beta1->coord.v[0], &beta1->coord.v[0], &tmp); + ibz_mul(&beta1->coord.v[1], &beta1->coord.v[1], &tmp); + ibz_mul(&beta1->coord.v[2], &beta1->coord.v[2], &tmp); + ibz_mul(&beta1->coord.v[3], &beta1->coord.v[3], &tmp); endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim4.c index 495dc2dcb2..b024a7d46e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim4.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim4.c @@ -11,16 +11,16 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t ibz_mat_4x4_init(&mat); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(mat[i][j]), 0); + ibz_set(&(mat.m[i][j]), 0); for (int k = 0; k < 4; k++) { - ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); - ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + ibz_mul(&prod, &(a->m[i][k]), &(b->m[k][j])); + ibz_add(&(mat.m[i][j]), &(mat.m[i][j]), &prod); } } } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*res)[i][j]), &(mat[i][j])); + ibz_copy(&(res->m[i][j]), &(mat.m[i][j])); } } ibz_mat_4x4_finalize(&mat); @@ -31,61 +31,61 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&((*vec)[0]), coord0); - ibz_set(&((*vec)[1]), coord1); - ibz_set(&((*vec)[2]), coord2); - ibz_set(&((*vec)[3]), coord3); + ibz_set(&(vec->v[0]), coord0); + ibz_set(&(vec->v[1]), coord1); + ibz_set(&(vec->v[2]), coord2); + ibz_set(&(vec->v[3]), coord3); } void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*new)[i]), &((*vec)[i])); + ibz_copy(&(new->v[i]), &(vec->v[i])); } } void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&((*res)[0]), coord0); - ibz_copy(&((*res)[1]), coord1); - ibz_copy(&((*res)[2]), coord2); - ibz_copy(&((*res)[3]), coord3); + ibz_copy(&(res->v[0]), coord0); + ibz_copy(&(res->v[1]), coord1); + ibz_copy(&(res->v[2]), coord2); + ibz_copy(&(res->v[3]), coord3); } void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) { - ibz_gcd(content, &((*v)[0]), &((*v)[1])); - ibz_gcd(content, &((*v)[2]), content); - ibz_gcd(content, &((*v)[3]), content); + ibz_gcd(content, &(v->v[0]), &(v->v[1])); + ibz_gcd(content, &(v->v[2]), content); + ibz_gcd(content, &(v->v[3]), content); } void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_neg(&((*neg)[i]), &((*vec)[i])); + ibz_neg(&(neg->v[i]), &(vec->v[i])); } } void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_add(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_add(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_add(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_add(&(res->v[3]), &(a->v[3]), &(b->v[3])); } void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_sub(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_sub(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_sub(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_sub(&(res->v[3]), &(a->v[3]), &(b->v[3])); } int @@ -93,7 +93,7 @@ ibz_vec_4_is_zero(const ibz_vec_4_t *x) { int res = 1; for (int i = 0; i < 4; i++) { - res &= ibz_is_zero(&((*x)[i])); + res &= ibz_is_zero(&(x->v[i])); } return (res); } @@ -110,12 +110,12 @@ ibz_vec_4_linear_combination(ibz_vec_4_t *lc, ibz_vec_4_init(&sums); ibz_init(&prod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_vec_4_finalize(&sums); @@ -125,7 +125,7 @@ void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + ibz_mul(&(prod->v[i]), &(vec->v[i]), scalar); } } @@ -136,7 +136,7 @@ ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t * ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + ibz_div(&(quot->v[i]), &r, &(vec->v[i]), scalar); res = res && ibz_is_zero(&r); } ibz_finalize(&r); @@ -148,7 +148,7 @@ ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + ibz_copy(&(new->m[i][j]), &(mat->m[i][j])); } } } @@ -158,7 +158,7 @@ ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + ibz_neg(&(neg->m[i][j]), &(mat->m[i][j])); } } } @@ -170,7 +170,7 @@ ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) ibz_mat_4x4_init(&work); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(work[i][j]), &((*mat)[j][i])); + ibz_copy(&(work.m[i][j]), &(mat->m[j][i])); } } ibz_mat_4x4_copy(transposed, &work); @@ -182,7 +182,7 @@ ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*zero)[i][j]), 0); + ibz_set(&(zero->m[i][j]), 0); } } } @@ -192,9 +192,9 @@ ibz_mat_4x4_identity(ibz_mat_4x4_t *id) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*id)[i][j]), 0); + ibz_set(&(id->m[i][j]), 0); } - ibz_set(&((*id)[i][i]), 1); + ibz_set(&(id->m[i][i]), 1); } } @@ -204,7 +204,7 @@ ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) int res = 1; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + res = res && ibz_is_one(&(mat->m[i][j])) == (i == j); } } return (res); @@ -216,7 +216,7 @@ ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) int res = 0; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + res = res | ibz_cmp(&(mat1->m[i][j]), &(mat2->m[i][j])); } } return (!res); @@ -227,7 +227,7 @@ ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4 { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + ibz_mul(&(prod->m[i][j]), &(mat->m[i][j]), scalar); } } } @@ -237,10 +237,10 @@ ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) { ibz_t d; ibz_init(&d); - ibz_copy(&d, &((*mat)[0][0])); + ibz_copy(&d, &(mat->m[0][0])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_gcd(&d, &d, &((*mat)[i][j])); + ibz_gcd(&d, &d, &(mat->m[i][j])); } } ibz_copy(gcd, &d); @@ -255,7 +255,7 @@ ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4 ibz_init(&r); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + ibz_div(&(quot->m[i][j]), &r, &(mat->m[i][j]), scalar); res = res && ibz_is_zero(&r); } } @@ -325,17 +325,17 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ // compute some 2x2 minors, store them in s and c for (int i = 0; i < 3; i++) { - ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); - ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + ibz_mat_2x2_det_from_ibz(&(s[i]), &(mat->m[0][0]), &(mat->m[0][i + 1]), &(mat->m[1][0]), &(mat->m[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &(mat->m[2][0]), &(mat->m[2][i + 1]), &(mat->m[3][0]), &(mat->m[3][i + 1])); } for (int i = 0; i < 2; i++) { ibz_mat_2x2_det_from_ibz( - &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + &(s[3 + i]), &(mat->m[0][1]), &(mat->m[0][2 + i]), &(mat->m[1][1]), &(mat->m[1][2 + i])); ibz_mat_2x2_det_from_ibz( - &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + &(c[3 + i]), &(mat->m[2][1]), &(mat->m[2][2 + i]), &(mat->m[3][1]), &(mat->m[3][2 + i])); } - ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); - ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + ibz_mat_2x2_det_from_ibz(&(s[5]), &(mat->m[0][2]), &(mat->m[0][3]), &(mat->m[1][2]), &(mat->m[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &(mat->m[2][2]), &(mat->m[2][3]), &(mat->m[3][2]), &(mat->m[3][3])); // compute det ibz_set(&work_det, 0); @@ -351,39 +351,39 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ for (int j = 0; j < 4; j++) { for (int k = 0; k < 2; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } } for (int k = 2; k < 4; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } } @@ -418,8 +418,8 @@ ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t * // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[i][j], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -437,8 +437,8 @@ ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[j][i], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -457,14 +457,14 @@ quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) ibz_vec_4_init(&sum); ibz_mat_4x4_eval(&sum, qf, coord); for (int i = 0; i < 4; i++) { - ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + ibz_mul(&prod, &(sum.v[i]), &coord->v[i]); if (i > 0) { - ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); } else { - ibz_copy(&sum[0], &prod); + ibz_copy(&sum.v[0], &prod); } } - ibz_copy(res, &sum[0]); + ibz_copy(res, &sum.v[0]); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_signature.c index 112c695941..3a630cfd58 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_signature.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_signature.c @@ -157,17 +157,17 @@ secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) ibz_finalize(&gcd); } #endif - enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[3], FP_ENCODED_BYTES, true); quat_alg_elem_finalize(&gen); } - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][1], TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); } @@ -187,19 +187,19 @@ secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) quat_alg_elem_t gen; quat_alg_elem_init(&gen); enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); - enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[3], enc, FP_ENCODED_BYTES, true); quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); ibz_finalize(&norm); quat_alg_elem_finalize(&gen); } - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][1], enc, TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c index 8aafeac12b..a598a89c0e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c @@ -261,223 +261,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6f75,0xc742,0x1abb,0xc3b2,0x4bff,0xf015,0x66b,0xc51b,0xacd6,0x30c2,0xf641,0x625b,0x2e88,0xbe5,0x5121,0xbe40,0x8ac2,0x755b,0xb8c9,0x4eb6,0xb07,0x46b6,0x84cf,0x47}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6f75,0xc742,0x1abb,0xc3b2,0x4bff,0xf015,0x66b,0xc51b,0xacd6,0x30c2,0xf641,0x625b,0x2e88,0xbe5,0x5121,0xbe40,0x8ac2,0x755b,0xb8c9,0x4eb6,0xb07,0x46b6,0x84cf,0x47}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc7426f75,0xc3b21abb,0xf0154bff,0xc51b066b,0x30c2acd6,0x625bf641,0xbe52e88,0xbe405121,0x755b8ac2,0x4eb6b8c9,0x46b60b07,0x4784cf}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc7426f75,0xc3b21abb,0xf0154bff,0xc51b066b,0x30c2acd6,0x625bf641,0xbe52e88,0xbe405121,0x755b8ac2,0x4eb6b8c9,0x46b60b07,0x4784cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc3b21abbc7426f75,0xc51b066bf0154bff,0x625bf64130c2acd6,0xbe4051210be52e88,0x4eb6b8c9755b8ac2,0x4784cf46b60b07}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc3b21abbc7426f75,0xc51b066bf0154bff,0x625bf64130c2acd6,0xbe4051210be52e88,0x4eb6b8c9755b8ac2,0x4784cf46b60b07}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9db8,0x479b,0xe350,0xae1e,0x4f92,0x6572,0x60a4,0x89ed,0x12f4,0xb88d,0x64b6,0xf9ca,0x26b,0xc086,0x83b8,0xb2c7,0x88a8,0xe99b,0x57b3,0x9017,0xe033,0x9d5d,0x5de6,0x37}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9db8,0x479b,0xe350,0xae1e,0x4f92,0x6572,0x60a4,0x89ed,0x12f4,0xb88d,0x64b6,0xf9ca,0x26b,0xc086,0x83b8,0xb2c7,0x88a8,0xe99b,0x57b3,0x9017,0xe033,0x9d5d,0x5de6,0x37}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x479b9db8,0xae1ee350,0x65724f92,0x89ed60a4,0xb88d12f4,0xf9ca64b6,0xc086026b,0xb2c783b8,0xe99b88a8,0x901757b3,0x9d5de033,0x375de6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x479b9db8,0xae1ee350,0x65724f92,0x89ed60a4,0xb88d12f4,0xf9ca64b6,0xc086026b,0xb2c783b8,0xe99b88a8,0x901757b3,0x9d5de033,0x375de6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xae1ee350479b9db8,0x89ed60a465724f92,0xf9ca64b6b88d12f4,0xb2c783b8c086026b,0x901757b3e99b88a8,0x375de69d5de033}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xae1ee350479b9db8,0x89ed60a465724f92,0xf9ca64b6b88d12f4,0xb2c783b8c086026b,0x901757b3e99b88a8,0x375de69d5de033}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x23f7,0x1d02,0x3431,0x354e,0xba31,0x23a4,0xe6c4,0x6a9c,0x64c,0xea8,0x419f,0xe54f,0x3cb9,0xc02d,0x3caf,0xe7a3,0x2d32,0x31d4,0xed80,0x47d9,0x2086,0x69f4,0x80d3,0x25}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x23f7,0x1d02,0x3431,0x354e,0xba31,0x23a4,0xe6c4,0x6a9c,0x64c,0xea8,0x419f,0xe54f,0x3cb9,0xc02d,0x3caf,0xe7a3,0x2d32,0x31d4,0xed80,0x47d9,0x2086,0x69f4,0x80d3,0x25}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1d0223f7,0x354e3431,0x23a4ba31,0x6a9ce6c4,0xea8064c,0xe54f419f,0xc02d3cb9,0xe7a33caf,0x31d42d32,0x47d9ed80,0x69f42086,0x2580d3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1d0223f7,0x354e3431,0x23a4ba31,0x6a9ce6c4,0xea8064c,0xe54f419f,0xc02d3cb9,0xe7a33caf,0x31d42d32,0x47d9ed80,0x69f42086,0x2580d3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x354e34311d0223f7,0x6a9ce6c423a4ba31,0xe54f419f0ea8064c,0xe7a33cafc02d3cb9,0x47d9ed8031d42d32,0x2580d369f42086}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x354e34311d0223f7,0x6a9ce6c423a4ba31,0xe54f419f0ea8064c,0xe7a33cafc02d3cb9,0x47d9ed8031d42d32,0x2580d369f42086}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x908b,0x38bd,0xe544,0x3c4d,0xb400,0xfea,0xf994,0x3ae4,0x5329,0xcf3d,0x9be,0x9da4,0xd177,0xf41a,0xaede,0x41bf,0x753d,0x8aa4,0x4736,0xb149,0xf4f8,0xb949,0x7b30,0xb8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x908b,0x38bd,0xe544,0x3c4d,0xb400,0xfea,0xf994,0x3ae4,0x5329,0xcf3d,0x9be,0x9da4,0xd177,0xf41a,0xaede,0x41bf,0x753d,0x8aa4,0x4736,0xb149,0xf4f8,0xb949,0x7b30,0xb8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x38bd908b,0x3c4de544,0xfeab400,0x3ae4f994,0xcf3d5329,0x9da409be,0xf41ad177,0x41bfaede,0x8aa4753d,0xb1494736,0xb949f4f8,0xb87b30}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x38bd908b,0x3c4de544,0xfeab400,0x3ae4f994,0xcf3d5329,0x9da409be,0xf41ad177,0x41bfaede,0x8aa4753d,0xb1494736,0xb949f4f8,0xb87b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3c4de54438bd908b,0x3ae4f9940feab400,0x9da409becf3d5329,0x41bfaedef41ad177,0xb14947368aa4753d,0xb87b30b949f4f8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3c4de54438bd908b,0x3ae4f9940feab400,0x9da409becf3d5329,0x41bfaedef41ad177,0xb14947368aa4753d,0xb87b30b949f4f8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x97d1,0x8f9c,0x8477,0x3a,0x39d2,0x66ae,0xabd6,0x13da,0xf153,0xb9e7,0xf8db,0x5f9f,0x36f7,0xfcb2,0xa4f0,0x62b9,0x5c07,0x3694,0x539d,0xe8c5,0x7631,0xf16c,0xc691,0x9a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f9c97d1,0x3a8477,0x66ae39d2,0x13daabd6,0xb9e7f153,0x5f9ff8db,0xfcb236f7,0x62b9a4f0,0x36945c07,0xe8c5539d,0xf16c7631,0x9ac691}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3a84778f9c97d1,0x13daabd666ae39d2,0x5f9ff8dbb9e7f153,0x62b9a4f0fcb236f7,0xe8c5539d36945c07,0x9ac691f16c7631}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ac2,0xbac6,0x4a43,0x76df,0xe925,0x4a2d,0x1cf8,0xd32d,0x7867,0x1dc0,0xc02f,0xdf8b,0xf122,0x4f0c,0xe07d,0x4a9e,0xa97,0x7ce2,0x8791,0x3570,0x1749,0x519b,0x34cc,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbac61ac2,0x76df4a43,0x4a2de925,0xd32d1cf8,0x1dc07867,0xdf8bc02f,0x4f0cf122,0x4a9ee07d,0x7ce20a97,0x35708791,0x519b1749,0x6634cc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76df4a43bac61ac2,0xd32d1cf84a2de925,0xdf8bc02f1dc07867,0x4a9ee07d4f0cf122,0x357087917ce20a97,0x6634cc519b1749}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfb0f,0x234,0xa481,0x9c61,0x4bd1,0xcd58,0x3a72,0xe38c,0xbe7b,0xea3,0xf102,0xdc99,0xf180,0xb229,0x5d86,0xef91,0x46c4,0x8831,0xa9d5,0xf66f,0xa451,0x6c02,0x9ebd,0xfc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x234fb0f,0x9c61a481,0xcd584bd1,0xe38c3a72,0xea3be7b,0xdc99f102,0xb229f180,0xef915d86,0x883146c4,0xf66fa9d5,0x6c02a451,0xfc9ebd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c61a4810234fb0f,0xe38c3a72cd584bd1,0xdc99f1020ea3be7b,0xef915d86b229f180,0xf66fa9d5883146c4,0xfc9ebd6c02a451}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x682f,0x7063,0x7b88,0xffc5,0xc62d,0x9951,0x5429,0xec25,0xeac,0x4618,0x724,0xa060,0xc908,0x34d,0x5b0f,0x9d46,0xa3f8,0xc96b,0xac62,0x173a,0x89ce,0xe93,0x396e,0x65}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7063682f,0xffc57b88,0x9951c62d,0xec255429,0x46180eac,0xa0600724,0x34dc908,0x9d465b0f,0xc96ba3f8,0x173aac62,0xe9389ce,0x65396e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffc57b887063682f,0xec2554299951c62d,0xa060072446180eac,0x9d465b0f034dc908,0x173aac62c96ba3f8,0x65396e0e9389ce}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x83a3,0xab6f,0x4f99,0xe1f6,0xc2e8,0x2b61,0xd921,0xec7a,0x4f14,0x7555,0xf78e,0xe0fd,0xb2bf,0x44b,0xfb09,0x107c,0xf365,0x55f7,0x633,0x9bbe,0x409c,0x9c11,0x25b0,0xf1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x83a3,0xab6f,0x4f99,0xe1f6,0xc2e8,0x2b61,0xd921,0xec7a,0x4f14,0x7555,0xf78e,0xe0fd,0xb2bf,0x44b,0xfb09,0x107c,0xf365,0x55f7,0x633,0x9bbe,0x409c,0x9c11,0x25b0,0xf1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xab6f83a3,0xe1f64f99,0x2b61c2e8,0xec7ad921,0x75554f14,0xe0fdf78e,0x44bb2bf,0x107cfb09,0x55f7f365,0x9bbe0633,0x9c11409c,0xf125b0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xab6f83a3,0xe1f64f99,0x2b61c2e8,0xec7ad921,0x75554f14,0xe0fdf78e,0x44bb2bf,0x107cfb09,0x55f7f365,0x9bbe0633,0x9c11409c,0xf125b0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe1f64f99ab6f83a3,0xec7ad9212b61c2e8,0xe0fdf78e75554f14,0x107cfb09044bb2bf,0x9bbe063355f7f365,0xf125b09c11409c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe1f64f99ab6f83a3,0xec7ad9212b61c2e8,0xe0fdf78e75554f14,0x107cfb09044bb2bf,0x9bbe063355f7f365,0xf125b09c11409c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc3d,0x130,0x16ca,0x127f,0x1c5c,0x57d0,0x3ece,0x2e8d,0xc5ae,0xeb26,0x1272,0x6cab,0x79c7,0x7c9,0x321b,0xfeb3,0xc99f,0xb33e,0xefa2,0x62c3,0x7bbe,0x777c,0xc959,0x4e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc3d,0x130,0x16ca,0x127f,0x1c5c,0x57d0,0x3ece,0x2e8d,0xc5ae,0xeb26,0x1272,0x6cab,0x79c7,0x7c9,0x321b,0xfeb3,0xc99f,0xb33e,0xefa2,0x62c3,0x7bbe,0x777c,0xc959,0x4e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x130dc3d,0x127f16ca,0x57d01c5c,0x2e8d3ece,0xeb26c5ae,0x6cab1272,0x7c979c7,0xfeb3321b,0xb33ec99f,0x62c3efa2,0x777c7bbe,0x4ec959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x130dc3d,0x127f16ca,0x57d01c5c,0x2e8d3ece,0xeb26c5ae,0x6cab1272,0x7c979c7,0xfeb3321b,0xb33ec99f,0x62c3efa2,0x777c7bbe,0x4ec959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x127f16ca0130dc3d,0x2e8d3ece57d01c5c,0x6cab1272eb26c5ae,0xfeb3321b07c979c7,0x62c3efa2b33ec99f,0x4ec959777c7bbe}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x127f16ca0130dc3d,0x2e8d3ece57d01c5c,0x6cab1272eb26c5ae,0xfeb3321b07c979c7,0x62c3efa2b33ec99f,0x4ec959777c7bbe}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8f83,0xf9b,0xec59,0x68d7,0x8301,0x787e,0x909b,0x2714,0xe264,0x8ea5,0x9950,0x60f4,0x971d,0x392b,0x4d1b,0xeb9a,0xb9fb,0xdd02,0xcbaa,0x1f24,0x626c,0x6afb,0xfc8,0x91}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8f83,0xf9b,0xec59,0x68d7,0x8301,0x787e,0x909b,0x2714,0xe264,0x8ea5,0x9950,0x60f4,0x971d,0x392b,0x4d1b,0xeb9a,0xb9fb,0xdd02,0xcbaa,0x1f24,0x626c,0x6afb,0xfc8,0x91}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9b8f83,0x68d7ec59,0x787e8301,0x2714909b,0x8ea5e264,0x60f49950,0x392b971d,0xeb9a4d1b,0xdd02b9fb,0x1f24cbaa,0x6afb626c,0x910fc8}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9b8f83,0x68d7ec59,0x787e8301,0x2714909b,0x8ea5e264,0x60f49950,0x392b971d,0xeb9a4d1b,0xdd02b9fb,0x1f24cbaa,0x6afb626c,0x910fc8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x68d7ec590f9b8f83,0x2714909b787e8301,0x60f499508ea5e264,0xeb9a4d1b392b971d,0x1f24cbaadd02b9fb,0x910fc86afb626c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x68d7ec590f9b8f83,0x2714909b787e8301,0x60f499508ea5e264,0xeb9a4d1b392b971d,0x1f24cbaadd02b9fb,0x910fc86afb626c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7c5d,0x5490,0xb066,0x1e09,0x3d17,0xd49e,0x26de,0x1385,0xb0eb,0x8aaa,0x871,0x1f02,0x4d40,0xfbb4,0x4f6,0xef83,0xc9a,0xaa08,0xf9cc,0x6441,0xbf63,0x63ee,0xda4f,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7c5d,0x5490,0xb066,0x1e09,0x3d17,0xd49e,0x26de,0x1385,0xb0eb,0x8aaa,0x871,0x1f02,0x4d40,0xfbb4,0x4f6,0xef83,0xc9a,0xaa08,0xf9cc,0x6441,0xbf63,0x63ee,0xda4f,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x54907c5d,0x1e09b066,0xd49e3d17,0x138526de,0x8aaab0eb,0x1f020871,0xfbb44d40,0xef8304f6,0xaa080c9a,0x6441f9cc,0x63eebf63,0xeda4f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x54907c5d,0x1e09b066,0xd49e3d17,0x138526de,0x8aaab0eb,0x1f020871,0xfbb44d40,0xef8304f6,0xaa080c9a,0x6441f9cc,0x63eebf63,0xeda4f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1e09b06654907c5d,0x138526ded49e3d17,0x1f0208718aaab0eb,0xef8304f6fbb44d40,0x6441f9ccaa080c9a,0xeda4f63eebf63}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1e09b06654907c5d,0x138526ded49e3d17,0x1f0208718aaab0eb,0xef8304f6fbb44d40,0x6441f9ccaa080c9a,0xeda4f63eebf63}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x194c, 0x43, 0xc70, 0x1c7a, 0xa7a, 0xdf7, 0x1564, 0x1809, 0x8e8, 0x3a5, 0x1399, 0xf0a, 0x914, 0x1a27, 0xb1c, 0xcd0, 0xfc, 0xaa4, 0xd87, 0x1ed2, 0x2c0, 0x8e4, 0x1b93, 0x1a3f, 0x1d9b, 0x1a00, 0xbce, 0x17d2, 0x1a07, 0xf} @@ -737,223 +737,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ebb,0xe120,0x35fc,0x20e3,0xba01,0xff68,0x2ef4,0x62f6,0x5e93,0x94c1,0x3f93,0x804c,0xddc5,0x5b3d,0x1d31,0xf673,0x6e47,0x3d32,0x242c,0x6f7e,0x764b,0x63cb,0xbf4,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1ebb,0xe120,0x35fc,0x20e3,0xba01,0xff68,0x2ef4,0x62f6,0x5e93,0x94c1,0x3f93,0x804c,0xddc5,0x5b3d,0x1d31,0xf673,0x6e47,0x3d32,0x242c,0x6f7e,0x764b,0x63cb,0xbf4,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe1201ebb,0x20e335fc,0xff68ba01,0x62f62ef4,0x94c15e93,0x804c3f93,0x5b3dddc5,0xf6731d31,0x3d326e47,0x6f7e242c,0x63cb764b,0xf70bf4}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe1201ebb,0x20e335fc,0xff68ba01,0x62f62ef4,0x94c15e93,0x804c3f93,0x5b3dddc5,0xf6731d31,0x3d326e47,0x6f7e242c,0x63cb764b,0xf70bf4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x20e335fce1201ebb,0x62f62ef4ff68ba01,0x804c3f9394c15e93,0xf6731d315b3dddc5,0x6f7e242c3d326e47,0xf70bf463cb764b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x20e335fce1201ebb,0x62f62ef4ff68ba01,0x804c3f9394c15e93,0xf6731d315b3dddc5,0x6f7e242c3d326e47,0xf70bf463cb764b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe76c,0x34d0,0x684,0xee5,0x43c6,0x5a38,0x4bd5,0x2867,0xd3c5,0x2ee1,0xf790,0x18bf,0xbb64,0x3924,0x7d25,0xe0bc,0x913a,0x1355,0x50e9,0x7091,0x6724,0x21b2,0xc027,0xaa}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe76c,0x34d0,0x684,0xee5,0x43c6,0x5a38,0x4bd5,0x2867,0xd3c5,0x2ee1,0xf790,0x18bf,0xbb64,0x3924,0x7d25,0xe0bc,0x913a,0x1355,0x50e9,0x7091,0x6724,0x21b2,0xc027,0xaa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x34d0e76c,0xee50684,0x5a3843c6,0x28674bd5,0x2ee1d3c5,0x18bff790,0x3924bb64,0xe0bc7d25,0x1355913a,0x709150e9,0x21b26724,0xaac027}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x34d0e76c,0xee50684,0x5a3843c6,0x28674bd5,0x2ee1d3c5,0x18bff790,0x3924bb64,0xe0bc7d25,0x1355913a,0x709150e9,0x21b26724,0xaac027}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xee5068434d0e76c,0x28674bd55a3843c6,0x18bff7902ee1d3c5,0xe0bc7d253924bb64,0x709150e91355913a,0xaac02721b26724}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xee5068434d0e76c,0x28674bd55a3843c6,0x18bff7902ee1d3c5,0xe0bc7d253924bb64,0x709150e91355913a,0xaac02721b26724}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbd01,0x45bb,0x58bc,0x8007,0xbf5b,0xfd7,0x440b,0x7f9,0x54ed,0xe5db,0x2ba9,0xcd7b,0xfc98,0x1314,0x1470,0x9e9b,0xca3,0x944c,0x73c6,0x4cc9,0xa757,0x45fe,0x8b40,0x46}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbd01,0x45bb,0x58bc,0x8007,0xbf5b,0xfd7,0x440b,0x7f9,0x54ed,0xe5db,0x2ba9,0xcd7b,0xfc98,0x1314,0x1470,0x9e9b,0xca3,0x944c,0x73c6,0x4cc9,0xa757,0x45fe,0x8b40,0x46}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x45bbbd01,0x800758bc,0xfd7bf5b,0x7f9440b,0xe5db54ed,0xcd7b2ba9,0x1314fc98,0x9e9b1470,0x944c0ca3,0x4cc973c6,0x45fea757,0x468b40}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x45bbbd01,0x800758bc,0xfd7bf5b,0x7f9440b,0xe5db54ed,0xcd7b2ba9,0x1314fc98,0x9e9b1470,0x944c0ca3,0x4cc973c6,0x45fea757,0x468b40}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x800758bc45bbbd01,0x7f9440b0fd7bf5b,0xcd7b2ba9e5db54ed,0x9e9b14701314fc98,0x4cc973c6944c0ca3,0x468b4045fea757}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x800758bc45bbbd01,0x7f9440b0fd7bf5b,0xcd7b2ba9e5db54ed,0x9e9b14701314fc98,0x4cc973c6944c0ca3,0x468b4045fea757}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe145,0x1edf,0xca03,0xdf1c,0x45fe,0x97,0xd10b,0x9d09,0xa16c,0x6b3e,0xc06c,0x7fb3,0x223a,0xa4c2,0xe2ce,0x98c,0x91b8,0xc2cd,0xdbd3,0x9081,0x89b4,0x9c34,0xf40b,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe145,0x1edf,0xca03,0xdf1c,0x45fe,0x97,0xd10b,0x9d09,0xa16c,0x6b3e,0xc06c,0x7fb3,0x223a,0xa4c2,0xe2ce,0x98c,0x91b8,0xc2cd,0xdbd3,0x9081,0x89b4,0x9c34,0xf40b,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1edfe145,0xdf1cca03,0x9745fe,0x9d09d10b,0x6b3ea16c,0x7fb3c06c,0xa4c2223a,0x98ce2ce,0xc2cd91b8,0x9081dbd3,0x9c3489b4,0x8f40b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1edfe145,0xdf1cca03,0x9745fe,0x9d09d10b,0x6b3ea16c,0x7fb3c06c,0xa4c2223a,0x98ce2ce,0xc2cd91b8,0x9081dbd3,0x9c3489b4,0x8f40b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdf1cca031edfe145,0x9d09d10b009745fe,0x7fb3c06c6b3ea16c,0x98ce2cea4c2223a,0x9081dbd3c2cd91b8,0x8f40b9c3489b4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdf1cca031edfe145,0x9d09d10b009745fe,0x7fb3c06c6b3ea16c,0x98ce2cea4c2223a,0x9081dbd3c2cd91b8,0x8f40b9c3489b4}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8d4f,0xfa2a,0xd4d0,0xce2,0x7436,0x8079,0xbe5f,0x2c18,0x5699,0x3569,0x7ecd,0xe106,0xea0,0x7d91,0xcc95,0x7293,0x5f1d,0xd812,0xb900,0x9858,0xd95a,0xc80e,0x1d4a,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfa2a8d4f,0xce2d4d0,0x80797436,0x2c18be5f,0x35695699,0xe1067ecd,0x7d910ea0,0x7293cc95,0xd8125f1d,0x9858b900,0xc80ed95a,0xa1d4a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xce2d4d0fa2a8d4f,0x2c18be5f80797436,0xe1067ecd35695699,0x7293cc957d910ea0,0x9858b900d8125f1d,0xa1d4ac80ed95a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1eca,0x7027,0xf189,0x93f2,0x1c1f,0x5a6f,0xabbc,0x9e7d,0xa00a,0xc45,0x62a8,0x8db0,0x70cd,0x413f,0x400a,0xffaa,0x2ae6,0xd8a8,0xcb0e,0x69c4,0x7d9a,0x7a25,0xa679,0xad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x70271eca,0x93f2f189,0x5a6f1c1f,0x9e7dabbc,0xc45a00a,0x8db062a8,0x413f70cd,0xffaa400a,0xd8a82ae6,0x69c4cb0e,0x7a257d9a,0xada679}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x93f2f18970271eca,0x9e7dabbc5a6f1c1f,0x8db062a80c45a00a,0xffaa400a413f70cd,0x69c4cb0ed8a82ae6,0xada6797a257d9a}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8949,0xfec5,0x9be5,0xc657,0xab10,0x2df6,0x3ee,0xc0f7,0x1a64,0x328e,0x3b3,0xb50c,0x650a,0xa23d,0x8ab,0xb103,0xf6e6,0xb27d,0x708a,0xc702,0x5301,0x2f00,0x9991,0x2f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfec58949,0xc6579be5,0x2df6ab10,0xc0f703ee,0x328e1a64,0xb50c03b3,0xa23d650a,0xb10308ab,0xb27df6e6,0xc702708a,0x2f005301,0x2f9991}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc6579be5fec58949,0xc0f703ee2df6ab10,0xb50c03b3328e1a64,0xb10308aba23d650a,0xc702708ab27df6e6,0x2f99912f005301}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x72b1,0x5d5,0x2b2f,0xf31d,0x8bc9,0x7f86,0x41a0,0xd3e7,0xa966,0xca96,0x8132,0x1ef9,0xf15f,0x826e,0x336a,0x8d6c,0xa0e2,0x27ed,0x46ff,0x67a7,0x26a5,0x37f1,0xe2b5,0xf5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5d572b1,0xf31d2b2f,0x7f868bc9,0xd3e741a0,0xca96a966,0x1ef98132,0x826ef15f,0x8d6c336a,0x27eda0e2,0x67a746ff,0x37f126a5,0xf5e2b5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf31d2b2f05d572b1,0xd3e741a07f868bc9,0x1ef98132ca96a966,0x8d6c336a826ef15f,0x67a746ff27eda0e2,0xf5e2b537f126a5}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3e42,0x35b4,0xc315,0x4acc,0x7905,0x734e,0xe57,0x941d,0xcc00,0x9010,0x652,0x5679,0x1e7c,0x69d5,0x77f0,0x5936,0x9815,0xdc49,0xdbae,0x8415,0x2381,0x706d,0x1b55,0x35}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3e42,0x35b4,0xc315,0x4acc,0x7905,0x734e,0xe57,0x941d,0xcc00,0x9010,0x652,0x5679,0x1e7c,0x69d5,0x77f0,0x5936,0x9815,0xdc49,0xdbae,0x8415,0x2381,0x706d,0x1b55,0x35}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x35b43e42,0x4accc315,0x734e7905,0x941d0e57,0x9010cc00,0x56790652,0x69d51e7c,0x593677f0,0xdc499815,0x8415dbae,0x706d2381,0x351b55}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x35b43e42,0x4accc315,0x734e7905,0x941d0e57,0x9010cc00,0x56790652,0x69d51e7c,0x593677f0,0xdc499815,0x8415dbae,0x706d2381,0x351b55}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4accc31535b43e42,0x941d0e57734e7905,0x567906529010cc00,0x593677f069d51e7c,0x8415dbaedc499815,0x351b55706d2381}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4accc31535b43e42,0x941d0e57734e7905,0x567906529010cc00,0x593677f069d51e7c,0x8415dbaedc499815,0x351b55706d2381}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9f23,0x1f88,0x311a,0x8d4e,0x15a2,0x199f,0x997,0x8bcf,0xc7a0,0xc956,0x3de8,0x254b,0x1224,0x1a69,0x604a,0x9cb1,0xa8f7,0xc6ee,0x5903,0x65b8,0xe8a5,0xa271,0x7d6e,0xb3}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9f23,0x1f88,0x311a,0x8d4e,0x15a2,0x199f,0x997,0x8bcf,0xc7a0,0xc956,0x3de8,0x254b,0x1224,0x1a69,0x604a,0x9cb1,0xa8f7,0xc6ee,0x5903,0x65b8,0xe8a5,0xa271,0x7d6e,0xb3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1f889f23,0x8d4e311a,0x199f15a2,0x8bcf0997,0xc956c7a0,0x254b3de8,0x1a691224,0x9cb1604a,0xc6eea8f7,0x65b85903,0xa271e8a5,0xb37d6e}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1f889f23,0x8d4e311a,0x199f15a2,0x8bcf0997,0xc956c7a0,0x254b3de8,0x1a691224,0x9cb1604a,0xc6eea8f7,0x65b85903,0xa271e8a5,0xb37d6e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8d4e311a1f889f23,0x8bcf0997199f15a2,0x254b3de8c956c7a0,0x9cb1604a1a691224,0x65b85903c6eea8f7,0xb37d6ea271e8a5}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8d4e311a1f889f23,0x8bcf0997199f15a2,0x254b3de8c956c7a0,0x9cb1604a1a691224,0x65b85903c6eea8f7,0xb37d6ea271e8a5}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfad4,0x9280,0x39ea,0xba3b,0xb12b,0x1c9c,0x5ffd,0x2c19,0x13bf,0x2145,0xaf34,0x30c1,0x70d8,0x27ea,0x6539,0xb50a,0x3106,0x3638,0x7fad,0xa5d2,0x912a,0xb0e6,0xb4a1,0xfd}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xfad4,0x9280,0x39ea,0xba3b,0xb12b,0x1c9c,0x5ffd,0x2c19,0x13bf,0x2145,0xaf34,0x30c1,0x70d8,0x27ea,0x6539,0xb50a,0x3106,0x3638,0x7fad,0xa5d2,0x912a,0xb0e6,0xb4a1,0xfd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9280fad4,0xba3b39ea,0x1c9cb12b,0x2c195ffd,0x214513bf,0x30c1af34,0x27ea70d8,0xb50a6539,0x36383106,0xa5d27fad,0xb0e6912a,0xfdb4a1}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9280fad4,0xba3b39ea,0x1c9cb12b,0x2c195ffd,0x214513bf,0x30c1af34,0x27ea70d8,0xb50a6539,0x36383106,0xa5d27fad,0xb0e6912a,0xfdb4a1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xba3b39ea9280fad4,0x2c195ffd1c9cb12b,0x30c1af34214513bf,0xb50a653927ea70d8,0xa5d27fad36383106,0xfdb4a1b0e6912a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xba3b39ea9280fad4,0x2c195ffd1c9cb12b,0x30c1af34214513bf,0xb50a653927ea70d8,0xa5d27fad36383106,0xfdb4a1b0e6912a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc1be,0xca4b,0x3cea,0xb533,0x86fa,0x8cb1,0xf1a8,0x6be2,0x33ff,0x6fef,0xf9ad,0xa986,0xe183,0x962a,0x880f,0xa6c9,0x67ea,0x23b6,0x2451,0x7bea,0xdc7e,0x8f92,0xe4aa,0xca}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc1be,0xca4b,0x3cea,0xb533,0x86fa,0x8cb1,0xf1a8,0x6be2,0x33ff,0x6fef,0xf9ad,0xa986,0xe183,0x962a,0x880f,0xa6c9,0x67ea,0x23b6,0x2451,0x7bea,0xdc7e,0x8f92,0xe4aa,0xca}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xca4bc1be,0xb5333cea,0x8cb186fa,0x6be2f1a8,0x6fef33ff,0xa986f9ad,0x962ae183,0xa6c9880f,0x23b667ea,0x7bea2451,0x8f92dc7e,0xcae4aa}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xca4bc1be,0xb5333cea,0x8cb186fa,0x6be2f1a8,0x6fef33ff,0xa986f9ad,0x962ae183,0xa6c9880f,0x23b667ea,0x7bea2451,0x8f92dc7e,0xcae4aa}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5333ceaca4bc1be,0x6be2f1a88cb186fa,0xa986f9ad6fef33ff,0xa6c9880f962ae183,0x7bea245123b667ea,0xcae4aa8f92dc7e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5333ceaca4bc1be,0x6be2f1a88cb186fa,0xa986f9ad6fef33ff,0xa6c9880f962ae183,0x7bea245123b667ea,0xcae4aa8f92dc7e}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x9a9, 0x8c7, 0x119d, 0xada, 0x1d98, 0xc97, 0x1a31, 0xdc6, 0x192a, 0xf3c, 0x509, 0xc5b, 0xea7, 0x1eb9, 0x59d, 0x1e3c, 0x114b, 0x88, 0x5a9, 0x1154, 0x18c0, 0x11db, 0x1ba9, 0x3b8, 0x837, 0x18d0, 0x17eb, 0x211, 0x1aa7, 0x11} @@ -1213,223 +1213,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe463,0x3132,0x31,0xb872,0xdbee,0x1045,0x2b88,0x62c5,0xee3c,0xde5c,0xb179,0xa84f,0x18e5,0x355e,0x9a0f,0xbef8,0x783a,0x35b5,0x6d1c,0xaa31,0x3024,0xed81,0xa0f6,0x8a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe463,0x3132,0x31,0xb872,0xdbee,0x1045,0x2b88,0x62c5,0xee3c,0xde5c,0xb179,0xa84f,0x18e5,0x355e,0x9a0f,0xbef8,0x783a,0x35b5,0x6d1c,0xaa31,0x3024,0xed81,0xa0f6,0x8a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3132e463,0xb8720031,0x1045dbee,0x62c52b88,0xde5cee3c,0xa84fb179,0x355e18e5,0xbef89a0f,0x35b5783a,0xaa316d1c,0xed813024,0x8aa0f6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3132e463,0xb8720031,0x1045dbee,0x62c52b88,0xde5cee3c,0xa84fb179,0x355e18e5,0xbef89a0f,0x35b5783a,0xaa316d1c,0xed813024,0x8aa0f6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb87200313132e463,0x62c52b881045dbee,0xa84fb179de5cee3c,0xbef89a0f355e18e5,0xaa316d1c35b5783a,0x8aa0f6ed813024}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb87200313132e463,0x62c52b881045dbee,0xa84fb179de5cee3c,0xbef89a0f355e18e5,0xaa316d1c35b5783a,0x8aa0f6ed813024}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcf24,0xdac2,0xe08b,0xd2f9,0x13a,0xf1f,0x9517,0xfa7c,0xa1c5,0x581e,0x4d0b,0x3e59,0x97cc,0x7506,0xee19,0xa48e,0xb1b0,0x50c2,0xb5a7,0x4b1d,0x2fcd,0xee68,0xab65,0x85}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcf24,0xdac2,0xe08b,0xd2f9,0x13a,0xf1f,0x9517,0xfa7c,0xa1c5,0x581e,0x4d0b,0x3e59,0x97cc,0x7506,0xee19,0xa48e,0xb1b0,0x50c2,0xb5a7,0x4b1d,0x2fcd,0xee68,0xab65,0x85}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdac2cf24,0xd2f9e08b,0xf1f013a,0xfa7c9517,0x581ea1c5,0x3e594d0b,0x750697cc,0xa48eee19,0x50c2b1b0,0x4b1db5a7,0xee682fcd,0x85ab65}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdac2cf24,0xd2f9e08b,0xf1f013a,0xfa7c9517,0x581ea1c5,0x3e594d0b,0x750697cc,0xa48eee19,0x50c2b1b0,0x4b1db5a7,0xee682fcd,0x85ab65}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd2f9e08bdac2cf24,0xfa7c95170f1f013a,0x3e594d0b581ea1c5,0xa48eee19750697cc,0x4b1db5a750c2b1b0,0x85ab65ee682fcd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd2f9e08bdac2cf24,0xfa7c95170f1f013a,0x3e594d0b581ea1c5,0xa48eee19750697cc,0x4b1db5a750c2b1b0,0x85ab65ee682fcd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8b69,0x7be5,0xdf28,0x9c91,0xf929,0x7c60,0x6c50,0x4f81,0x714a,0x59da,0x2741,0x3c71,0x223a,0x79bf,0x14bd,0xa26f,0xc787,0x606d,0xc74c,0xef81,0xd1c4,0x32a,0x55ff,0x6a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8b69,0x7be5,0xdf28,0x9c91,0xf929,0x7c60,0x6c50,0x4f81,0x714a,0x59da,0x2741,0x3c71,0x223a,0x79bf,0x14bd,0xa26f,0xc787,0x606d,0xc74c,0xef81,0xd1c4,0x32a,0x55ff,0x6a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7be58b69,0x9c91df28,0x7c60f929,0x4f816c50,0x59da714a,0x3c712741,0x79bf223a,0xa26f14bd,0x606dc787,0xef81c74c,0x32ad1c4,0x6a55ff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7be58b69,0x9c91df28,0x7c60f929,0x4f816c50,0x59da714a,0x3c712741,0x79bf223a,0xa26f14bd,0x606dc787,0xef81c74c,0x32ad1c4,0x6a55ff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c91df287be58b69,0x4f816c507c60f929,0x3c71274159da714a,0xa26f14bd79bf223a,0xef81c74c606dc787,0x6a55ff032ad1c4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c91df287be58b69,0x4f816c507c60f929,0x3c71274159da714a,0xa26f14bd79bf223a,0xef81c74c606dc787,0x6a55ff032ad1c4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1b9d,0xcecd,0xffce,0x478d,0x2411,0xefba,0xd477,0x9d3a,0x11c3,0x21a3,0x4e86,0x57b0,0xe71a,0xcaa1,0x65f0,0x4107,0x87c5,0xca4a,0x92e3,0x55ce,0xcfdb,0x127e,0x5f09,0x75}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1b9d,0xcecd,0xffce,0x478d,0x2411,0xefba,0xd477,0x9d3a,0x11c3,0x21a3,0x4e86,0x57b0,0xe71a,0xcaa1,0x65f0,0x4107,0x87c5,0xca4a,0x92e3,0x55ce,0xcfdb,0x127e,0x5f09,0x75}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecd1b9d,0x478dffce,0xefba2411,0x9d3ad477,0x21a311c3,0x57b04e86,0xcaa1e71a,0x410765f0,0xca4a87c5,0x55ce92e3,0x127ecfdb,0x755f09}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecd1b9d,0x478dffce,0xefba2411,0x9d3ad477,0x21a311c3,0x57b04e86,0xcaa1e71a,0x410765f0,0xca4a87c5,0x55ce92e3,0x127ecfdb,0x755f09}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x478dffcececd1b9d,0x9d3ad477efba2411,0x57b04e8621a311c3,0x410765f0caa1e71a,0x55ce92e3ca4a87c5,0x755f09127ecfdb}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x478dffcececd1b9d,0x9d3ad477efba2411,0x57b04e8621a311c3,0x410765f0caa1e71a,0x55ce92e3ca4a87c5,0x755f09127ecfdb}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb089,0xa64c,0xa5a7,0x17cc,0xe580,0xaa34,0x86e8,0x9328,0x2d1e,0x66ce,0x2dbc,0xd6e5,0x68f,0xaf97,0x8c7a,0xc341,0x4b41,0x4e1c,0xe0a3,0x580f,0xe796,0x8f21,0x8ad3,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa64cb089,0x17cca5a7,0xaa34e580,0x932886e8,0x66ce2d1e,0xd6e52dbc,0xaf97068f,0xc3418c7a,0x4e1c4b41,0x580fe0a3,0x8f21e796,0xf8ad3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x17cca5a7a64cb089,0x932886e8aa34e580,0xd6e52dbc66ce2d1e,0xc3418c7aaf97068f,0x580fe0a34e1c4b41,0xf8ad38f21e796}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xebe,0xec57,0xcd8c,0xef29,0xf13a,0xd391,0xa791,0x715c,0xe58e,0x194c,0xf950,0x5a37,0xdb9e,0xdab6,0x82c8,0x5cce,0x3197,0x339a,0xa700,0xad4,0xcd5b,0x91c1,0x5cff,0xb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xec570ebe,0xef29cd8c,0xd391f13a,0x715ca791,0x194ce58e,0x5a37f950,0xdab6db9e,0x5cce82c8,0x339a3197,0xad4a700,0x91c1cd5b,0xb05cff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xef29cd8cec570ebe,0x715ca791d391f13a,0x5a37f950194ce58e,0x5cce82c8dab6db9e,0xad4a700339a3197,0xb05cff91c1cd5b}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3b4f,0x8fac,0x4b2c,0xc9a0,0x69a2,0xe4fa,0x3480,0xe41b,0x5801,0xd68b,0xa965,0x2f67,0x3134,0x7ac0,0x93a7,0x5352,0x9612,0x200e,0x33e6,0x44f,0xbca8,0x82f1,0xe411,0x4d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8fac3b4f,0xc9a04b2c,0xe4fa69a2,0xe41b3480,0xd68b5801,0x2f67a965,0x7ac03134,0x535293a7,0x200e9612,0x44f33e6,0x82f1bca8,0x4de411}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9a04b2c8fac3b4f,0xe41b3480e4fa69a2,0x2f67a965d68b5801,0x535293a77ac03134,0x44f33e6200e9612,0x4de41182f1bca8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4f77,0x59b3,0x5a58,0xe833,0x1a7f,0x55cb,0x7917,0x6cd7,0xd2e1,0x9931,0xd243,0x291a,0xf970,0x5068,0x7385,0x3cbe,0xb4be,0xb1e3,0x1f5c,0xa7f0,0x1869,0x70de,0x752c,0xf0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x59b34f77,0xe8335a58,0x55cb1a7f,0x6cd77917,0x9931d2e1,0x291ad243,0x5068f970,0x3cbe7385,0xb1e3b4be,0xa7f01f5c,0x70de1869,0xf0752c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe8335a5859b34f77,0x6cd7791755cb1a7f,0x291ad2439931d2e1,0x3cbe73855068f970,0xa7f01f5cb1e3b4be,0xf0752c70de1869}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd69f,0xa20a,0x2dbf,0x4897,0x3199,0xde89,0xe5f9,0x293e,0x826b,0xb67a,0x9878,0x508f,0x1cd5,0xbfc7,0xa6dc,0xa78c,0xa5a7,0xf717,0x2bd3,0x9a61,0x7d35,0xb772,0xba39,0x5d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd69f,0xa20a,0x2dbf,0x4897,0x3199,0xde89,0xe5f9,0x293e,0x826b,0xb67a,0x9878,0x508f,0x1cd5,0xbfc7,0xa6dc,0xa78c,0xa5a7,0xf717,0x2bd3,0x9a61,0x7d35,0xb772,0xba39,0x5d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa20ad69f,0x48972dbf,0xde893199,0x293ee5f9,0xb67a826b,0x508f9878,0xbfc71cd5,0xa78ca6dc,0xf717a5a7,0x9a612bd3,0xb7727d35,0x5dba39}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa20ad69f,0x48972dbf,0xde893199,0x293ee5f9,0xb67a826b,0x508f9878,0xbfc71cd5,0xa78ca6dc,0xf717a5a7,0x9a612bd3,0xb7727d35,0x5dba39}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x48972dbfa20ad69f,0x293ee5f9de893199,0x508f9878b67a826b,0xa78ca6dcbfc71cd5,0x9a612bd3f717a5a7,0x5dba39b7727d35}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x48972dbfa20ad69f,0x293ee5f9de893199,0x508f9878b67a826b,0xa78ca6dcbfc71cd5,0x9a612bd3f717a5a7,0x5dba39b7727d35}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xeec1,0x1e36,0x61bb,0x9e9f,0xe1d8,0x9166,0x8a8e,0xb5cd,0xc787,0x4281,0xb7db,0xc5fe,0x29b,0x7038,0xad1a,0xdfb3,0x5d88,0xa643,0xce34,0xe9d5,0xfe7,0xc15c,0xb80f,0xbc}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xeec1,0x1e36,0x61bb,0x9e9f,0xe1d8,0x9166,0x8a8e,0xb5cd,0xc787,0x4281,0xb7db,0xc5fe,0x29b,0x7038,0xad1a,0xdfb3,0x5d88,0xa643,0xce34,0xe9d5,0xfe7,0xc15c,0xb80f,0xbc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1e36eec1,0x9e9f61bb,0x9166e1d8,0xb5cd8a8e,0x4281c787,0xc5feb7db,0x7038029b,0xdfb3ad1a,0xa6435d88,0xe9d5ce34,0xc15c0fe7,0xbcb80f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1e36eec1,0x9e9f61bb,0x9166e1d8,0xb5cd8a8e,0x4281c787,0xc5feb7db,0x7038029b,0xdfb3ad1a,0xa6435d88,0xe9d5ce34,0xc15c0fe7,0xbcb80f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9e9f61bb1e36eec1,0xb5cd8a8e9166e1d8,0xc5feb7db4281c787,0xdfb3ad1a7038029b,0xe9d5ce34a6435d88,0xbcb80fc15c0fe7}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9e9f61bb1e36eec1,0xb5cd8a8e9166e1d8,0xc5feb7db4281c787,0xdfb3ad1a7038029b,0xe9d5ce34a6435d88,0xbcb80fc15c0fe7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb7ff,0xc2,0x2b8a,0x5a59,0xd318,0x52ca,0x9b64,0xad19,0x8df,0xc9b8,0x7b28,0x9d09,0xe309,0x9,0xfb09,0xcbb9,0x6a67,0x1137,0x707c,0xaa5,0xcdf5,0x3ffd,0xfb9e,0xb9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb7ff,0xc2,0x2b8a,0x5a59,0xd318,0x52ca,0x9b64,0xad19,0x8df,0xc9b8,0x7b28,0x9d09,0xe309,0x9,0xfb09,0xcbb9,0x6a67,0x1137,0x707c,0xaa5,0xcdf5,0x3ffd,0xfb9e,0xb9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc2b7ff,0x5a592b8a,0x52cad318,0xad199b64,0xc9b808df,0x9d097b28,0x9e309,0xcbb9fb09,0x11376a67,0xaa5707c,0x3ffdcdf5,0xb9fb9e}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc2b7ff,0x5a592b8a,0x52cad318,0xad199b64,0xc9b808df,0x9d097b28,0x9e309,0xcbb9fb09,0x11376a67,0xaa5707c,0x3ffdcdf5,0xb9fb9e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5a592b8a00c2b7ff,0xad199b6452cad318,0x9d097b28c9b808df,0xcbb9fb090009e309,0xaa5707c11376a67,0xb9fb9e3ffdcdf5}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5a592b8a00c2b7ff,0xad199b6452cad318,0x9d097b28c9b808df,0xcbb9fb090009e309,0xaa5707c11376a67,0xb9fb9e3ffdcdf5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2961,0x5df5,0xd240,0xb768,0xce66,0x2176,0x1a06,0xd6c1,0x7d94,0x4985,0x6787,0xaf70,0xe32a,0x4038,0x5923,0x5873,0x5a58,0x8e8,0xd42c,0x659e,0x82ca,0x488d,0x45c6,0xa2}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2961,0x5df5,0xd240,0xb768,0xce66,0x2176,0x1a06,0xd6c1,0x7d94,0x4985,0x6787,0xaf70,0xe32a,0x4038,0x5923,0x5873,0x5a58,0x8e8,0xd42c,0x659e,0x82ca,0x488d,0x45c6,0xa2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5df52961,0xb768d240,0x2176ce66,0xd6c11a06,0x49857d94,0xaf706787,0x4038e32a,0x58735923,0x8e85a58,0x659ed42c,0x488d82ca,0xa245c6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5df52961,0xb768d240,0x2176ce66,0xd6c11a06,0x49857d94,0xaf706787,0x4038e32a,0x58735923,0x8e85a58,0x659ed42c,0x488d82ca,0xa245c6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb768d2405df52961,0xd6c11a062176ce66,0xaf70678749857d94,0x587359234038e32a,0x659ed42c08e85a58,0xa245c6488d82ca}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb768d2405df52961,0xd6c11a062176ce66,0xaf70678749857d94,0x587359234038e32a,0x659ed42c08e85a58,0xa245c6488d82ca}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0xccf, 0xad2, 0x1c72, 0x1f31, 0x155, 0xa3, 0x1062, 0xf6e, 0xe60, 0x87b, 0x1891, 0xb45, 0x1de8, 0x7f8, 0x18a6, 0x1e67, 0x988, 0x1887, 0xca7, 0x216, 0x1daa, 0x1aa5, 0xc3a, 0x11d3, 0x1282, 0x55d, 0x15fc, 0x1132, 0x1c5e, 0x8} @@ -1689,223 +1689,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xafa5,0x4195,0xbb2d,0xdd24,0xa3ca,0xc678,0xf995,0x2ccb,0x5c3b,0xf9ff,0xd06,0x1f9b,0x926d,0x4e3b,0x2881,0x24f2,0xcf4c,0x8e9a,0xa38d,0x24cb,0xe8f2,0x28a1,0x581c,0xde}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xafa5,0x4195,0xbb2d,0xdd24,0xa3ca,0xc678,0xf995,0x2ccb,0x5c3b,0xf9ff,0xd06,0x1f9b,0x926d,0x4e3b,0x2881,0x24f2,0xcf4c,0x8e9a,0xa38d,0x24cb,0xe8f2,0x28a1,0x581c,0xde}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4195afa5,0xdd24bb2d,0xc678a3ca,0x2ccbf995,0xf9ff5c3b,0x1f9b0d06,0x4e3b926d,0x24f22881,0x8e9acf4c,0x24cba38d,0x28a1e8f2,0xde581c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4195afa5,0xdd24bb2d,0xc678a3ca,0x2ccbf995,0xf9ff5c3b,0x1f9b0d06,0x4e3b926d,0x24f22881,0x8e9acf4c,0x24cba38d,0x28a1e8f2,0xde581c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd24bb2d4195afa5,0x2ccbf995c678a3ca,0x1f9b0d06f9ff5c3b,0x24f228814e3b926d,0x24cba38d8e9acf4c,0xde581c28a1e8f2}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd24bb2d4195afa5,0x2ccbf995c678a3ca,0x1f9b0d06f9ff5c3b,0x24f228814e3b926d,0x24cba38d8e9acf4c,0xde581c28a1e8f2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcd88,0x9cea,0x593c,0xb5a8,0x79c6,0xc07c,0x496f,0xfb85,0x5ac9,0x381c,0xf4f8,0xfa59,0xb7a3,0x5caa,0x24c2,0x67c8,0x31b3,0x7585,0xbe8a,0xb89f,0xa29f,0x6cd5,0xc156,0x25}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcd88,0x9cea,0x593c,0xb5a8,0x79c6,0xc07c,0x496f,0xfb85,0x5ac9,0x381c,0xf4f8,0xfa59,0xb7a3,0x5caa,0x24c2,0x67c8,0x31b3,0x7585,0xbe8a,0xb89f,0xa29f,0x6cd5,0xc156,0x25}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ceacd88,0xb5a8593c,0xc07c79c6,0xfb85496f,0x381c5ac9,0xfa59f4f8,0x5caab7a3,0x67c824c2,0x758531b3,0xb89fbe8a,0x6cd5a29f,0x25c156}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ceacd88,0xb5a8593c,0xc07c79c6,0xfb85496f,0x381c5ac9,0xfa59f4f8,0x5caab7a3,0x67c824c2,0x758531b3,0xb89fbe8a,0x6cd5a29f,0x25c156}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5a8593c9ceacd88,0xfb85496fc07c79c6,0xfa59f4f8381c5ac9,0x67c824c25caab7a3,0xb89fbe8a758531b3,0x25c1566cd5a29f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb5a8593c9ceacd88,0xfb85496fc07c79c6,0xfa59f4f8381c5ac9,0x67c824c25caab7a3,0xb89fbe8a758531b3,0x25c1566cd5a29f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9627,0xd297,0x9200,0x73de,0xaa89,0xf44f,0x99c7,0x2d45,0xb1eb,0xab2b,0x4168,0x976f,0x1e88,0x7777,0x2f39,0x6648,0xc224,0xd5a1,0xb815,0x861b,0xf76f,0xb476,0x4123,0xbe}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9627,0xd297,0x9200,0x73de,0xaa89,0xf44f,0x99c7,0x2d45,0xb1eb,0xab2b,0x4168,0x976f,0x1e88,0x7777,0x2f39,0x6648,0xc224,0xd5a1,0xb815,0x861b,0xf76f,0xb476,0x4123,0xbe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd2979627,0x73de9200,0xf44faa89,0x2d4599c7,0xab2bb1eb,0x976f4168,0x77771e88,0x66482f39,0xd5a1c224,0x861bb815,0xb476f76f,0xbe4123}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd2979627,0x73de9200,0xf44faa89,0x2d4599c7,0xab2bb1eb,0x976f4168,0x77771e88,0x66482f39,0xd5a1c224,0x861bb815,0xb476f76f,0xbe4123}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x73de9200d2979627,0x2d4599c7f44faa89,0x976f4168ab2bb1eb,0x66482f3977771e88,0x861bb815d5a1c224,0xbe4123b476f76f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x73de9200d2979627,0x2d4599c7f44faa89,0x976f4168ab2bb1eb,0x66482f3977771e88,0x861bb815d5a1c224,0xbe4123b476f76f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x505b,0xbe6a,0x44d2,0x22db,0x5c35,0x3987,0x66a,0xd334,0xa3c4,0x600,0xf2f9,0xe064,0x6d92,0xb1c4,0xd77e,0xdb0d,0x30b3,0x7165,0x5c72,0xdb34,0x170d,0xd75e,0xa7e3,0x21}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x505b,0xbe6a,0x44d2,0x22db,0x5c35,0x3987,0x66a,0xd334,0xa3c4,0x600,0xf2f9,0xe064,0x6d92,0xb1c4,0xd77e,0xdb0d,0x30b3,0x7165,0x5c72,0xdb34,0x170d,0xd75e,0xa7e3,0x21}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbe6a505b,0x22db44d2,0x39875c35,0xd334066a,0x600a3c4,0xe064f2f9,0xb1c46d92,0xdb0dd77e,0x716530b3,0xdb345c72,0xd75e170d,0x21a7e3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbe6a505b,0x22db44d2,0x39875c35,0xd334066a,0x600a3c4,0xe064f2f9,0xb1c46d92,0xdb0dd77e,0x716530b3,0xdb345c72,0xd75e170d,0x21a7e3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22db44d2be6a505b,0xd334066a39875c35,0xe064f2f90600a3c4,0xdb0dd77eb1c46d92,0xdb345c72716530b3,0x21a7e3d75e170d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22db44d2be6a505b,0xd334066a39875c35,0xe064f2f90600a3c4,0xdb0dd77eb1c46d92,0xdb345c72716530b3,0x21a7e3d75e170d}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4fff,0xe916,0xb451,0xaee,0xc580,0xfa2c,0xb766,0x199a,0xa8a0,0x36f2,0xaa1b,0x47dd,0xb34f,0x26f5,0xb1ba,0xc4c1,0xad4d,0xfcf8,0x21fd,0xf7a,0x2a36,0x4cd2,0x2b08,0x94}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe9164fff,0xaeeb451,0xfa2cc580,0x199ab766,0x36f2a8a0,0x47ddaa1b,0x26f5b34f,0xc4c1b1ba,0xfcf8ad4d,0xf7a21fd,0x4cd22a36,0x942b08}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaeeb451e9164fff,0x199ab766fa2cc580,0x47ddaa1b36f2a8a0,0xc4c1b1ba26f5b34f,0xf7a21fdfcf8ad4d,0x942b084cd22a36}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf16e,0x5176,0xe09f,0xe2ae,0xa46d,0x6925,0xc9bd,0xbd66,0xdd5c,0x51ce,0xe98d,0x9ad3,0x6815,0x4476,0xbec8,0xa088,0x6799,0x1733,0xf164,0x1d16,0xc914,0x4af3,0x3b5b,0x84}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5176f16e,0xe2aee09f,0x6925a46d,0xbd66c9bd,0x51cedd5c,0x9ad3e98d,0x44766815,0xa088bec8,0x17336799,0x1d16f164,0x4af3c914,0x843b5b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe2aee09f5176f16e,0xbd66c9bd6925a46d,0x9ad3e98d51cedd5c,0xa088bec844766815,0x1d16f16417336799,0x843b5b4af3c914}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x22c1,0x4088,0xbc14,0x9620,0x7d53,0x77ec,0x89,0xda12,0x54df,0x3806,0xb925,0x6092,0xd1b4,0x59e3,0x3419,0x9617,0x9ae6,0x48a9,0xf008,0xb452,0x9756,0x35cb,0x9c5d,0x49}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x408822c1,0x9620bc14,0x77ec7d53,0xda120089,0x380654df,0x6092b925,0x59e3d1b4,0x96173419,0x48a99ae6,0xb452f008,0x35cb9756,0x499c5d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9620bc14408822c1,0xda12008977ec7d53,0x6092b925380654df,0x9617341959e3d1b4,0xb452f00848a99ae6,0x499c5d35cb9756}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb001,0x16e9,0x4bae,0xf511,0x3a7f,0x5d3,0x4899,0xe665,0x575f,0xc90d,0x55e4,0xb822,0x4cb0,0xd90a,0x4e45,0x3b3e,0x52b2,0x307,0xde02,0xf085,0xd5c9,0xb32d,0xd4f7,0x6b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x16e9b001,0xf5114bae,0x5d33a7f,0xe6654899,0xc90d575f,0xb82255e4,0xd90a4cb0,0x3b3e4e45,0x30752b2,0xf085de02,0xb32dd5c9,0x6bd4f7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5114bae16e9b001,0xe665489905d33a7f,0xb82255e4c90d575f,0x3b3e4e45d90a4cb0,0xf085de02030752b2,0x6bd4f7b32dd5c9}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x43c6,0x55d8,0x682a,0xc215,0x706e,0xac4c,0x5ce,0x1182,0x8b72,0x90e3,0xf04f,0x6a11,0xc345,0x3488,0x45b0,0x5d3f,0x556b,0x9896,0x7b20,0x8d46,0xa9e3,0x7b0c,0xd428,0xba}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x43c6,0x55d8,0x682a,0xc215,0x706e,0xac4c,0x5ce,0x1182,0x8b72,0x90e3,0xf04f,0x6a11,0xc345,0x3488,0x45b0,0x5d3f,0x556b,0x9896,0x7b20,0x8d46,0xa9e3,0x7b0c,0xd428,0xba}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x55d843c6,0xc215682a,0xac4c706e,0x118205ce,0x90e38b72,0x6a11f04f,0x3488c345,0x5d3f45b0,0x9896556b,0x8d467b20,0x7b0ca9e3,0xbad428}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x55d843c6,0xc215682a,0xac4c706e,0x118205ce,0x90e38b72,0x6a11f04f,0x3488c345,0x5d3f45b0,0x9896556b,0x8d467b20,0x7b0ca9e3,0xbad428}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc215682a55d843c6,0x118205ceac4c706e,0x6a11f04f90e38b72,0x5d3f45b03488c345,0x8d467b209896556b,0xbad4287b0ca9e3}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc215682a55d843c6,0x118205ceac4c706e,0x6a11f04f90e38b72,0x5d3f45b03488c345,0x8d467b209896556b,0xbad4287b0ca9e3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x91a5,0xf9ad,0x243c,0xedb9,0xc4f5,0xce5f,0xd6d7,0x3592,0x40df,0xdead,0x1489,0xe297,0x55b1,0xee4d,0xda9d,0x9e1f,0x4a5c,0xd99a,0x6c6b,0xa585,0x62fc,0x4383,0xc1ad,0xc0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x91a5,0xf9ad,0x243c,0xedb9,0xc4f5,0xce5f,0xd6d7,0x3592,0x40df,0xdead,0x1489,0xe297,0x55b1,0xee4d,0xda9d,0x9e1f,0x4a5c,0xd99a,0x6c6b,0xa585,0x62fc,0x4383,0xc1ad,0xc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9ad91a5,0xedb9243c,0xce5fc4f5,0x3592d6d7,0xdead40df,0xe2971489,0xee4d55b1,0x9e1fda9d,0xd99a4a5c,0xa5856c6b,0x438362fc,0xc0c1ad}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf9ad91a5,0xedb9243c,0xce5fc4f5,0x3592d6d7,0xdead40df,0xe2971489,0xee4d55b1,0x9e1fda9d,0xd99a4a5c,0xa5856c6b,0x438362fc,0xc0c1ad}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xedb9243cf9ad91a5,0x3592d6d7ce5fc4f5,0xe2971489dead40df,0x9e1fda9dee4d55b1,0xa5856c6bd99a4a5c,0xc0c1ad438362fc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xedb9243cf9ad91a5,0x3592d6d7ce5fc4f5,0xe2971489dead40df,0x9e1fda9dee4d55b1,0xa5856c6bd99a4a5c,0xc0c1ad438362fc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf454,0x6191,0x2181,0x2fc4,0x66fb,0xc44f,0x7bb6,0x9b1c,0x99f,0xee09,0xb1a3,0xf8f9,0xf234,0x5151,0x595c,0x4e44,0xa80a,0x305c,0x9930,0x25f6,0x8e50,0xb812,0xff4d,0xb8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf454,0x6191,0x2181,0x2fc4,0x66fb,0xc44f,0x7bb6,0x9b1c,0x99f,0xee09,0xb1a3,0xf8f9,0xf234,0x5151,0x595c,0x4e44,0xa80a,0x305c,0x9930,0x25f6,0x8e50,0xb812,0xff4d,0xb8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6191f454,0x2fc42181,0xc44f66fb,0x9b1c7bb6,0xee09099f,0xf8f9b1a3,0x5151f234,0x4e44595c,0x305ca80a,0x25f69930,0xb8128e50,0xb8ff4d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6191f454,0x2fc42181,0xc44f66fb,0x9b1c7bb6,0xee09099f,0xf8f9b1a3,0x5151f234,0x4e44595c,0x305ca80a,0x25f69930,0xb8128e50,0xb8ff4d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2fc421816191f454,0x9b1c7bb6c44f66fb,0xf8f9b1a3ee09099f,0x4e44595c5151f234,0x25f69930305ca80a,0xb8ff4db8128e50}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2fc421816191f454,0x9b1c7bb6c44f66fb,0xf8f9b1a3ee09099f,0x4e44595c5151f234,0x25f69930305ca80a,0xb8ff4db8128e50}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbc3a,0xaa27,0x97d5,0x3dea,0x8f91,0x53b3,0xfa31,0xee7d,0x748d,0x6f1c,0xfb0,0x95ee,0x3cba,0xcb77,0xba4f,0xa2c0,0xaa94,0x6769,0x84df,0x72b9,0x561c,0x84f3,0x2bd7,0x45}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbc3a,0xaa27,0x97d5,0x3dea,0x8f91,0x53b3,0xfa31,0xee7d,0x748d,0x6f1c,0xfb0,0x95ee,0x3cba,0xcb77,0xba4f,0xa2c0,0xaa94,0x6769,0x84df,0x72b9,0x561c,0x84f3,0x2bd7,0x45}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa27bc3a,0x3dea97d5,0x53b38f91,0xee7dfa31,0x6f1c748d,0x95ee0fb0,0xcb773cba,0xa2c0ba4f,0x6769aa94,0x72b984df,0x84f3561c,0x452bd7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa27bc3a,0x3dea97d5,0x53b38f91,0xee7dfa31,0x6f1c748d,0x95ee0fb0,0xcb773cba,0xa2c0ba4f,0x6769aa94,0x72b984df,0x84f3561c,0x452bd7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3dea97d5aa27bc3a,0xee7dfa3153b38f91,0x95ee0fb06f1c748d,0xa2c0ba4fcb773cba,0x72b984df6769aa94,0x452bd784f3561c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3dea97d5aa27bc3a,0xee7dfa3153b38f91,0x95ee0fb06f1c748d,0xa2c0ba4fcb773cba,0x72b984df6769aa94,0x452bd784f3561c}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x10c4, 0x1e1a, 0x1b68, 0xa71, 0xa1a, 0x1f60, 0xdda, 0xb59, 0x1bc0, 0x26c, 0x1325, 0x4cf, 0x1375, 0x10ef, 0x1702, 0x1eff, 0x171f, 0x467, 0xc1c, 0xf3b, 0xf74, 0x6fa, 0x177f, 0x911, 0x8bc, 0x16b7, 0x1022, 0xa5f, 0xa55, 0x9} @@ -2165,223 +2165,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x313b,0xc18a,0x812a,0x406d,0x472a,0x9fca,0x9f07,0xb030,0x8b7b,0x7924,0x2af6,0x9e99,0x2b81,0x8eb8,0x35ee,0x59c8,0x7655,0x34cc,0x5aaf,0x326,0xe58d,0xf8b7,0x969a,0x6e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x313b,0xc18a,0x812a,0x406d,0x472a,0x9fca,0x9f07,0xb030,0x8b7b,0x7924,0x2af6,0x9e99,0x2b81,0x8eb8,0x35ee,0x59c8,0x7655,0x34cc,0x5aaf,0x326,0xe58d,0xf8b7,0x969a,0x6e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc18a313b,0x406d812a,0x9fca472a,0xb0309f07,0x79248b7b,0x9e992af6,0x8eb82b81,0x59c835ee,0x34cc7655,0x3265aaf,0xf8b7e58d,0x6e969a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc18a313b,0x406d812a,0x9fca472a,0xb0309f07,0x79248b7b,0x9e992af6,0x8eb82b81,0x59c835ee,0x34cc7655,0x3265aaf,0xf8b7e58d,0x6e969a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x406d812ac18a313b,0xb0309f079fca472a,0x9e992af679248b7b,0x59c835ee8eb82b81,0x3265aaf34cc7655,0x6e969af8b7e58d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x406d812ac18a313b,0xb0309f079fca472a,0x9e992af679248b7b,0x59c835ee8eb82b81,0x3265aaf34cc7655,0x6e969af8b7e58d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6610,0xfd89,0xb147,0xcf39,0x2b02,0x4ccf,0xed64,0x8470,0xaaf6,0x1891,0x8c78,0xf074,0x8a4c,0xfaed,0xd66c,0xf52b,0xf1c5,0xb0a,0x5cd,0x46f8,0x79a3,0x81de,0x451d,0xd9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x6610,0xfd89,0xb147,0xcf39,0x2b02,0x4ccf,0xed64,0x8470,0xaaf6,0x1891,0x8c78,0xf074,0x8a4c,0xfaed,0xd66c,0xf52b,0xf1c5,0xb0a,0x5cd,0x46f8,0x79a3,0x81de,0x451d,0xd9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfd896610,0xcf39b147,0x4ccf2b02,0x8470ed64,0x1891aaf6,0xf0748c78,0xfaed8a4c,0xf52bd66c,0xb0af1c5,0x46f805cd,0x81de79a3,0xd9451d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xfd896610,0xcf39b147,0x4ccf2b02,0x8470ed64,0x1891aaf6,0xf0748c78,0xfaed8a4c,0xf52bd66c,0xb0af1c5,0x46f805cd,0x81de79a3,0xd9451d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcf39b147fd896610,0x8470ed644ccf2b02,0xf0748c781891aaf6,0xf52bd66cfaed8a4c,0x46f805cd0b0af1c5,0xd9451d81de79a3}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcf39b147fd896610,0x8470ed644ccf2b02,0xf0748c781891aaf6,0xf52bd66cfaed8a4c,0x46f805cd0b0af1c5,0xd9451d81de79a3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1869,0x2ce0,0x425c,0x7d0f,0x30c8,0x1c3e,0xd562,0xfb41,0x3951,0xeccc,0x9c8a,0xb265,0x829,0xd879,0x3c42,0x2cbf,0xb1d2,0xd9d3,0xee28,0x7fdf,0xccdd,0x3ad,0xa6d9,0x3b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1869,0x2ce0,0x425c,0x7d0f,0x30c8,0x1c3e,0xd562,0xfb41,0x3951,0xeccc,0x9c8a,0xb265,0x829,0xd879,0x3c42,0x2cbf,0xb1d2,0xd9d3,0xee28,0x7fdf,0xccdd,0x3ad,0xa6d9,0x3b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2ce01869,0x7d0f425c,0x1c3e30c8,0xfb41d562,0xeccc3951,0xb2659c8a,0xd8790829,0x2cbf3c42,0xd9d3b1d2,0x7fdfee28,0x3adccdd,0x3ba6d9}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2ce01869,0x7d0f425c,0x1c3e30c8,0xfb41d562,0xeccc3951,0xb2659c8a,0xd8790829,0x2cbf3c42,0xd9d3b1d2,0x7fdfee28,0x3adccdd,0x3ba6d9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7d0f425c2ce01869,0xfb41d5621c3e30c8,0xb2659c8aeccc3951,0x2cbf3c42d8790829,0x7fdfee28d9d3b1d2,0x3ba6d903adccdd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7d0f425c2ce01869,0xfb41d5621c3e30c8,0xb2659c8aeccc3951,0x2cbf3c42d8790829,0x7fdfee28d9d3b1d2,0x3ba6d903adccdd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcec5,0x3e75,0x7ed5,0xbf92,0xb8d5,0x6035,0x60f8,0x4fcf,0x7484,0x86db,0xd509,0x6166,0xd47e,0x7147,0xca11,0xa637,0x89aa,0xcb33,0xa550,0xfcd9,0x1a72,0x748,0x6965,0x91}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcec5,0x3e75,0x7ed5,0xbf92,0xb8d5,0x6035,0x60f8,0x4fcf,0x7484,0x86db,0xd509,0x6166,0xd47e,0x7147,0xca11,0xa637,0x89aa,0xcb33,0xa550,0xfcd9,0x1a72,0x748,0x6965,0x91}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3e75cec5,0xbf927ed5,0x6035b8d5,0x4fcf60f8,0x86db7484,0x6166d509,0x7147d47e,0xa637ca11,0xcb3389aa,0xfcd9a550,0x7481a72,0x916965}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3e75cec5,0xbf927ed5,0x6035b8d5,0x4fcf60f8,0x86db7484,0x6166d509,0x7147d47e,0xa637ca11,0xcb3389aa,0xfcd9a550,0x7481a72,0x916965}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbf927ed53e75cec5,0x4fcf60f86035b8d5,0x6166d50986db7484,0xa637ca117147d47e,0xfcd9a550cb3389aa,0x91696507481a72}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbf927ed53e75cec5,0x4fcf60f86035b8d5,0x6166d50986db7484,0xa637ca117147d47e,0xfcd9a550cb3389aa,0x91696507481a72}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x807f,0x9ad4,0xa2d5,0xb571,0xe39a,0x9682,0xbc36,0x2fde,0x80a1,0x55a9,0x2092,0x49c9,0x4269,0x7a75,0x7411,0xde67,0x75d6,0x9689,0xaf23,0x7b89,0x819b,0x60e4,0x276f,0x1a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9ad4807f,0xb571a2d5,0x9682e39a,0x2fdebc36,0x55a980a1,0x49c92092,0x7a754269,0xde677411,0x968975d6,0x7b89af23,0x60e4819b,0x1a276f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb571a2d59ad4807f,0x2fdebc369682e39a,0x49c9209255a980a1,0xde6774117a754269,0x7b89af23968975d6,0x1a276f60e4819b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc876,0x218,0xd2f9,0x49ab,0x3ef7,0x1ab3,0x4dc4,0xf981,0x381a,0xdb9c,0x12c5,0xc5e1,0x5b21,0xc148,0x9d31,0x5369,0x5706,0x2277,0xcf63,0x1c7d,0xe151,0xd481,0xcd50,0x4b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x218c876,0x49abd2f9,0x1ab33ef7,0xf9814dc4,0xdb9c381a,0xc5e112c5,0xc1485b21,0x53699d31,0x22775706,0x1c7dcf63,0xd481e151,0x4bcd50}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x49abd2f90218c876,0xf9814dc41ab33ef7,0xc5e112c5db9c381a,0x53699d31c1485b21,0x1c7dcf6322775706,0x4bcd50d481e151}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2191,0x98ac,0x5708,0x78cc,0x63fa,0xe9c9,0x168,0x4c13,0x19e6,0xe2be,0xd010,0xd629,0xf3ff,0x3b08,0x7c2b,0xed3e,0x356e,0x5d94,0x253b,0xa0f5,0xc652,0x60d4,0x9dac,0x63}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98ac2191,0x78cc5708,0xe9c963fa,0x4c130168,0xe2be19e6,0xd629d010,0x3b08f3ff,0xed3e7c2b,0x5d94356e,0xa0f5253b,0x60d4c652,0x639dac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x78cc570898ac2191,0x4c130168e9c963fa,0xd629d010e2be19e6,0xed3e7c2b3b08f3ff,0xa0f5253b5d94356e,0x639dac60d4c652}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7f81,0x652b,0x5d2a,0x4a8e,0x1c65,0x697d,0x43c9,0xd021,0x7f5e,0xaa56,0xdf6d,0xb636,0xbd96,0x858a,0x8bee,0x2198,0x8a29,0x6976,0x50dc,0x8476,0x7e64,0x9f1b,0xd890,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x652b7f81,0x4a8e5d2a,0x697d1c65,0xd02143c9,0xaa567f5e,0xb636df6d,0x858abd96,0x21988bee,0x69768a29,0x847650dc,0x9f1b7e64,0xe5d890}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4a8e5d2a652b7f81,0xd02143c9697d1c65,0xb636df6daa567f5e,0x21988bee858abd96,0x847650dc69768a29,0xe5d8909f1b7e64}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x29a3,0x7abe,0x2ef1,0x26a6,0xa5a5,0x54e6,0xf4c8,0xb56f,0x2bae,0x1aae,0xd9ba,0x94ed,0x2df5,0x882c,0xc686,0x6f64,0x29f7,0x850a,0x9eee,0x617c,0x5678,0x3108,0x8ebe,0x86}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x29a3,0x7abe,0x2ef1,0x26a6,0xa5a5,0x54e6,0xf4c8,0xb56f,0x2bae,0x1aae,0xd9ba,0x94ed,0x2df5,0x882c,0xc686,0x6f64,0x29f7,0x850a,0x9eee,0x617c,0x5678,0x3108,0x8ebe,0x86}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7abe29a3,0x26a62ef1,0x54e6a5a5,0xb56ff4c8,0x1aae2bae,0x94edd9ba,0x882c2df5,0x6f64c686,0x850a29f7,0x617c9eee,0x31085678,0x868ebe}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7abe29a3,0x26a62ef1,0x54e6a5a5,0xb56ff4c8,0x1aae2bae,0x94edd9ba,0x882c2df5,0x6f64c686,0x850a29f7,0x617c9eee,0x31085678,0x868ebe}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x26a62ef17abe29a3,0xb56ff4c854e6a5a5,0x94edd9ba1aae2bae,0x6f64c686882c2df5,0x617c9eee850a29f7,0x868ebe31085678}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x26a62ef17abe29a3,0xb56ff4c854e6a5a5,0x94edd9ba1aae2bae,0x6f64c686882c2df5,0x617c9eee850a29f7,0x868ebe31085678}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc2a5,0x8ce6,0x3729,0xaa2b,0xb9d2,0xbf43,0xe2be,0xaf25,0x4ffb,0xec8e,0xf85a,0x94c6,0xe027,0x3c64,0xf4ad,0xf63,0x86ba,0xa244,0xde0f,0x2390,0x11e1,0xdd7c,0xcd4c,0x33}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc2a5,0x8ce6,0x3729,0xaa2b,0xb9d2,0xbf43,0xe2be,0xaf25,0x4ffb,0xec8e,0xf85a,0x94c6,0xe027,0x3c64,0xf4ad,0xf63,0x86ba,0xa244,0xde0f,0x2390,0x11e1,0xdd7c,0xcd4c,0x33}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8ce6c2a5,0xaa2b3729,0xbf43b9d2,0xaf25e2be,0xec8e4ffb,0x94c6f85a,0x3c64e027,0xf63f4ad,0xa24486ba,0x2390de0f,0xdd7c11e1,0x33cd4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8ce6c2a5,0xaa2b3729,0xbf43b9d2,0xaf25e2be,0xec8e4ffb,0x94c6f85a,0x3c64e027,0xf63f4ad,0xa24486ba,0x2390de0f,0xdd7c11e1,0x33cd4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaa2b37298ce6c2a5,0xaf25e2bebf43b9d2,0x94c6f85aec8e4ffb,0xf63f4ad3c64e027,0x2390de0fa24486ba,0x33cd4cdd7c11e1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xaa2b37298ce6c2a5,0xaf25e2bebf43b9d2,0x94c6f85aec8e4ffb,0xf63f4ad3c64e027,0x2390de0fa24486ba,0x33cd4cdd7c11e1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1893,0xa4bf,0x1eb8,0x9df0,0x91b1,0x17b0,0xe4ae,0x6ba1,0x35fd,0xd56b,0xc03f,0x82a8,0x99cd,0x30be,0xf3a3,0x181e,0x879b,0x518,0x3e8,0xed0e,0xc0ff,0xe2d6,0xe29c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1893,0xa4bf,0x1eb8,0x9df0,0x91b1,0x17b0,0xe4ae,0x6ba1,0x35fd,0xd56b,0xc03f,0x82a8,0x99cd,0x30be,0xf3a3,0x181e,0x879b,0x518,0x3e8,0xed0e,0xc0ff,0xe2d6,0xe29c,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa4bf1893,0x9df01eb8,0x17b091b1,0x6ba1e4ae,0xd56b35fd,0x82a8c03f,0x30be99cd,0x181ef3a3,0x518879b,0xed0e03e8,0xe2d6c0ff,0x1e29c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa4bf1893,0x9df01eb8,0x17b091b1,0x6ba1e4ae,0xd56b35fd,0x82a8c03f,0x30be99cd,0x181ef3a3,0x518879b,0xed0e03e8,0xe2d6c0ff,0x1e29c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9df01eb8a4bf1893,0x6ba1e4ae17b091b1,0x82a8c03fd56b35fd,0x181ef3a330be99cd,0xed0e03e80518879b,0x1e29ce2d6c0ff}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9df01eb8a4bf1893,0x6ba1e4ae17b091b1,0x82a8c03fd56b35fd,0x181ef3a330be99cd,0xed0e03e80518879b,0x1e29ce2d6c0ff}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd65d,0x8541,0xd10e,0xd959,0x5a5a,0xab19,0xb37,0x4a90,0xd451,0xe551,0x2645,0x6b12,0xd20a,0x77d3,0x3979,0x909b,0xd608,0x7af5,0x6111,0x9e83,0xa987,0xcef7,0x7141,0x79}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd65d,0x8541,0xd10e,0xd959,0x5a5a,0xab19,0xb37,0x4a90,0xd451,0xe551,0x2645,0x6b12,0xd20a,0x77d3,0x3979,0x909b,0xd608,0x7af5,0x6111,0x9e83,0xa987,0xcef7,0x7141,0x79}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8541d65d,0xd959d10e,0xab195a5a,0x4a900b37,0xe551d451,0x6b122645,0x77d3d20a,0x909b3979,0x7af5d608,0x9e836111,0xcef7a987,0x797141}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8541d65d,0xd959d10e,0xab195a5a,0x4a900b37,0xe551d451,0x6b122645,0x77d3d20a,0x909b3979,0x7af5d608,0x9e836111,0xcef7a987,0x797141}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd959d10e8541d65d,0x4a900b37ab195a5a,0x6b122645e551d451,0x909b397977d3d20a,0x9e8361117af5d608,0x797141cef7a987}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd959d10e8541d65d,0x4a900b37ab195a5a,0x6b122645e551d451,0x909b397977d3d20a,0x9e8361117af5d608,0x797141cef7a987}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x187f, 0x1c7d, 0x163a, 0x54d, 0x197f, 0x1a5d, 0x1833, 0x823, 0x11ae, 0x225, 0xd6d, 0x1627, 0xd6c, 0x1625, 0xa36, 0xf64, 0xcfa, 0x327, 0x188d, 0xfab, 0x56b, 0x91c, 0x1cc5, 0x4fe, 0x2ae, 0x181a, 0x195, 0x1288, 0x10fc, 0x3} @@ -2641,223 +2641,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf28e,0x2e0a,0xfab7,0x1f58,0x11ea,0x243,0x39c3,0x28d4,0xe1ba,0x1878,0x3aec,0x4d87,0xf832,0x354a,0xa312,0x7416,0xb5b,0x68ee,0x1627,0x78d2,0xfbbc,0x152f,0x1cb2,0xd1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2e0af28e,0x1f58fab7,0x24311ea,0x28d439c3,0x1878e1ba,0x4d873aec,0x354af832,0x7416a312,0x68ee0b5b,0x78d21627,0x152ffbbc,0xd11cb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1f58fab72e0af28e,0x28d439c3024311ea,0x4d873aec1878e1ba,0x7416a312354af832,0x78d2162768ee0b5b,0xd11cb2152ffbbc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3134,0x8876,0x4246,0xdd4b,0x87d0,0x86b1,0x71e8,0x2d33,0xfe04,0xe6d8,0x6377,0xf6d6,0xdd4f,0x8b91,0x29d4,0x521e,0x3822,0xa1d5,0x2eb9,0x8c9c,0x35a1,0xab9f,0x2d4c,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x88763134,0xdd4b4246,0x86b187d0,0x2d3371e8,0xe6d8fe04,0xf6d66377,0x8b91dd4f,0x521e29d4,0xa1d53822,0x8c9c2eb9,0xab9f35a1,0x112d4c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdd4b424688763134,0x2d3371e886b187d0,0xf6d66377e6d8fe04,0x521e29d48b91dd4f,0x8c9c2eb9a1d53822,0x112d4cab9f35a1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x703,0xe86d,0xe89e,0xbcf8,0x675b,0xe250,0x9f65,0xe8ec,0x2c83,0x11ca,0x4751,0x192a,0xf9d8,0xf46a,0xeb89,0x4f40,0x2a2c,0xdcf,0xfff9,0x13f9,0x24e7,0x8348,0xb9af,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x703,0xe86d,0xe89e,0xbcf8,0x675b,0xe250,0x9f65,0xe8ec,0x2c83,0x11ca,0x4751,0x192a,0xf9d8,0xf46a,0xeb89,0x4f40,0x2a2c,0xdcf,0xfff9,0x13f9,0x24e7,0x8348,0xb9af,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe86d0703,0xbcf8e89e,0xe250675b,0xe8ec9f65,0x11ca2c83,0x192a4751,0xf46af9d8,0x4f40eb89,0xdcf2a2c,0x13f9fff9,0x834824e7,0x6b9af}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe86d0703,0xbcf8e89e,0xe250675b,0xe8ec9f65,0x11ca2c83,0x192a4751,0xf46af9d8,0x4f40eb89,0xdcf2a2c,0x13f9fff9,0x834824e7,0x6b9af}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbcf8e89ee86d0703,0xe8ec9f65e250675b,0x192a475111ca2c83,0x4f40eb89f46af9d8,0x13f9fff90dcf2a2c,0x6b9af834824e7}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xbcf8e89ee86d0703,0xe8ec9f65e250675b,0x192a475111ca2c83,0x4f40eb89f46af9d8,0x13f9fff90dcf2a2c,0x6b9af834824e7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e40,0xb548,0xf9c7,0x6598,0x7e33,0x25c6,0x6cbf,0x2ef2,0xa630,0xdd99,0xaef2,0xf320,0x4a2,0x93a7,0x4541,0x2f7c,0xbf45,0x1a7a,0x24f4,0x52a9,0xd3b4,0xa12a,0x9d37,0xb0}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e40,0xb548,0xf9c7,0x6598,0x7e33,0x25c6,0x6cbf,0x2ef2,0xa630,0xdd99,0xaef2,0xf320,0x4a2,0x93a7,0x4541,0x2f7c,0xbf45,0x1a7a,0x24f4,0x52a9,0xd3b4,0xa12a,0x9d37,0xb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb5481e40,0x6598f9c7,0x25c67e33,0x2ef26cbf,0xdd99a630,0xf320aef2,0x93a704a2,0x2f7c4541,0x1a7abf45,0x52a924f4,0xa12ad3b4,0xb09d37}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb5481e40,0x6598f9c7,0x25c67e33,0x2ef26cbf,0xdd99a630,0xf320aef2,0x93a704a2,0x2f7c4541,0x1a7abf45,0x52a924f4,0xa12ad3b4,0xb09d37}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6598f9c7b5481e40,0x2ef26cbf25c67e33,0xf320aef2dd99a630,0x2f7c454193a704a2,0x52a924f41a7abf45,0xb09d37a12ad3b4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6598f9c7b5481e40,0x2ef26cbf25c67e33,0xf320aef2dd99a630,0x2f7c454193a704a2,0x52a924f41a7abf45,0xb09d37a12ad3b4}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e1,0x2283,0x3774,0x83d4,0xf33f,0x1fc,0x2790,0xde59,0xe89d,0xc942,0x2c1b,0x6574,0x55b1,0x3a3c,0x9f11,0xbb0a,0x6813,0xa69,0xff9d,0xc94c,0xdede,0xce6b,0x18c6,0xa9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e1,0x2283,0x3774,0x83d4,0xf33f,0x1fc,0x2790,0xde59,0xe89d,0xc942,0x2c1b,0x6574,0x55b1,0x3a3c,0x9f11,0xbb0a,0x6813,0xa69,0xff9d,0xc94c,0xdede,0xce6b,0x18c6,0xa9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x228301e1,0x83d43774,0x1fcf33f,0xde592790,0xc942e89d,0x65742c1b,0x3a3c55b1,0xbb0a9f11,0xa696813,0xc94cff9d,0xce6bdede,0xa918c6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x228301e1,0x83d43774,0x1fcf33f,0xde592790,0xc942e89d,0x65742c1b,0x3a3c55b1,0xbb0a9f11,0xa696813,0xc94cff9d,0xce6bdede,0xa918c6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x83d43774228301e1,0xde59279001fcf33f,0x65742c1bc942e89d,0xbb0a9f113a3c55b1,0xc94cff9d0a696813,0xa918c6ce6bdede}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x83d43774228301e1,0xde59279001fcf33f,0x65742c1bc942e89d,0xbb0a9f113a3c55b1,0xc94cff9d0a696813,0xa918c6ce6bdede}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf8fd,0x1792,0x1761,0x4307,0x98a4,0x1daf,0x609a,0x1713,0xd37c,0xee35,0xb8ae,0xe6d5,0x627,0xb95,0x1476,0xb0bf,0xd5d3,0xf230,0x6,0xec06,0xdb18,0x7cb7,0x4650,0xf9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xf8fd,0x1792,0x1761,0x4307,0x98a4,0x1daf,0x609a,0x1713,0xd37c,0xee35,0xb8ae,0xe6d5,0x627,0xb95,0x1476,0xb0bf,0xd5d3,0xf230,0x6,0xec06,0xdb18,0x7cb7,0x4650,0xf9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1792f8fd,0x43071761,0x1daf98a4,0x1713609a,0xee35d37c,0xe6d5b8ae,0xb950627,0xb0bf1476,0xf230d5d3,0xec060006,0x7cb7db18,0xf94650}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x1792f8fd,0x43071761,0x1daf98a4,0x1713609a,0xee35d37c,0xe6d5b8ae,0xb950627,0xb0bf1476,0xf230d5d3,0xec060006,0x7cb7db18,0xf94650}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x430717611792f8fd,0x1713609a1daf98a4,0xe6d5b8aeee35d37c,0xb0bf14760b950627,0xec060006f230d5d3,0xf946507cb7db18}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x430717611792f8fd,0x1713609a1daf98a4,0xe6d5b8aeee35d37c,0xb0bf14760b950627,0xec060006f230d5d3,0xf946507cb7db18}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x5419,0x8a5a,0x8d5e,0x1367,0xae77,0x3903,0xcf47,0xab31,0xf30c,0x9219,0xed73,0x2055,0xcc84,0x4098,0x7046,0x4d6f,0x5c68,0xe641,0x224,0xd5ed,0x977a,0xdfcd,0x15c8,0xe4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8a5a5419,0x13678d5e,0x3903ae77,0xab31cf47,0x9219f30c,0x2055ed73,0x4098cc84,0x4d6f7046,0xe6415c68,0xd5ed0224,0xdfcd977a,0xe415c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x13678d5e8a5a5419,0xab31cf473903ae77,0x2055ed739219f30c,0x4d6f70464098cc84,0xd5ed0224e6415c68,0xe415c8dfcd977a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x92ea,0x29a5,0x41ba,0x7e17,0x33c,0xaeb2,0x4b64,0x1b0d,0x4be,0xe57e,0x2d2e,0x1554,0xe7f7,0xea1e,0x4267,0x7d2a,0x17cd,0xd234,0xc388,0x147a,0x43e6,0xaa63,0x83bd,0x4f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x29a592ea,0x7e1741ba,0xaeb2033c,0x1b0d4b64,0xe57e04be,0x15542d2e,0xea1ee7f7,0x7d2a4267,0xd23417cd,0x147ac388,0xaa6343e6,0x4f83bd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e1741ba29a592ea,0x1b0d4b64aeb2033c,0x15542d2ee57e04be,0x7d2a4267ea1ee7f7,0x147ac388d23417cd,0x4f83bdaa6343e6}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa8b7,0x5b17,0x819c,0x465e,0xa6fb,0xb2a1,0x91c3,0x953,0x486,0x4ed4,0xc367,0x75ba,0xcc01,0x115f,0xe242,0xe751,0x4748,0xbf53,0x4ec0,0x950a,0xe08e,0x7ea2,0xec82,0x7d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x5b17a8b7,0x465e819c,0xb2a1a6fb,0x95391c3,0x4ed40486,0x75bac367,0x115fcc01,0xe751e242,0xbf534748,0x950a4ec0,0x7ea2e08e,0x7dec82}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x465e819c5b17a8b7,0x95391c3b2a1a6fb,0x75bac3674ed40486,0xe751e242115fcc01,0x950a4ec0bf534748,0x7dec827ea2e08e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xabe7,0x75a5,0x72a1,0xec98,0x5188,0xc6fc,0x30b8,0x54ce,0xcf3,0x6de6,0x128c,0xdfaa,0x337b,0xbf67,0x8fb9,0xb290,0xa397,0x19be,0xfddb,0x2a12,0x6885,0x2032,0xea37,0x1b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x75a5abe7,0xec9872a1,0xc6fc5188,0x54ce30b8,0x6de60cf3,0xdfaa128c,0xbf67337b,0xb2908fb9,0x19bea397,0x2a12fddb,0x20326885,0x1bea37}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xec9872a175a5abe7,0x54ce30b8c6fc5188,0xdfaa128c6de60cf3,0xb2908fb9bf67337b,0x2a12fddb19bea397,0x1bea3720326885}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x7947,0x9705,0x7d5b,0xfac,0x88f5,0x8121,0x1ce1,0x146a,0x70dd,0xc3c,0x9d76,0x26c3,0x7c19,0x1aa5,0x5189,0xba0b,0x5ad,0xb477,0xb13,0x3c69,0xfdde,0xa97,0x8e59,0x68}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x97057947,0xfac7d5b,0x812188f5,0x146a1ce1,0xc3c70dd,0x26c39d76,0x1aa57c19,0xba0b5189,0xb47705ad,0x3c690b13,0xa97fdde,0x688e59}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfac7d5b97057947,0x146a1ce1812188f5,0x26c39d760c3c70dd,0xba0b51891aa57c19,0x3c690b13b47705ad,0x688e590a97fdde}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x189a,0x443b,0xa123,0x6ea5,0xc3e8,0x4358,0xb8f4,0x1699,0x7f02,0xf36c,0x31bb,0xfb6b,0xeea7,0x45c8,0x14ea,0x290f,0x9c11,0xd0ea,0x175c,0xc64e,0x9ad0,0x55cf,0x96a6,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x443b189a,0x6ea5a123,0x4358c3e8,0x1699b8f4,0xf36c7f02,0xfb6b31bb,0x45c8eea7,0x290f14ea,0xd0ea9c11,0xc64e175c,0x55cf9ad0,0x896a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6ea5a123443b189a,0x1699b8f44358c3e8,0xfb6b31bbf36c7f02,0x290f14ea45c8eea7,0xc64e175cd0ea9c11,0x896a655cf9ad0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2d5d,0x46e9,0x4215,0x63b0,0x8358,0xdc91,0x80aa,0x6970,0x4e7d,0x266d,0xc13a,0xe4ea,0x504e,0xbc38,0xdbaf,0x119b,0xa3cc,0x45d8,0x98db,0x7b90,0x3a5b,0xde6a,0x3676,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x2d5d,0x46e9,0x4215,0x63b0,0x8358,0xdc91,0x80aa,0x6970,0x4e7d,0x266d,0xc13a,0xe4ea,0x504e,0xbc38,0xdbaf,0x119b,0xa3cc,0x45d8,0x98db,0x7b90,0x3a5b,0xde6a,0x3676,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x46e92d5d,0x63b04215,0xdc918358,0x697080aa,0x266d4e7d,0xe4eac13a,0xbc38504e,0x119bdbaf,0x45d8a3cc,0x7b9098db,0xde6a3a5b,0x83676}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x46e92d5d,0x63b04215,0xdc918358,0x697080aa,0x266d4e7d,0xe4eac13a,0xbc38504e,0x119bdbaf,0x45d8a3cc,0x7b9098db,0xde6a3a5b,0x83676}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x63b0421546e92d5d,0x697080aadc918358,0xe4eac13a266d4e7d,0x119bdbafbc38504e,0x7b9098db45d8a3cc,0x83676de6a3a5b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x63b0421546e92d5d,0x697080aadc918358,0xe4eac13a266d4e7d,0x119bdbafbc38504e,0x7b9098db45d8a3cc,0x83676de6a3a5b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1db1,0x61ae,0x220b,0xc2e,0xa7ee,0xb16a,0x8697,0xf90c,0x7505,0xced5,0x5cf8,0xb601,0x6235,0x27ad,0x9fdf,0x57d0,0xca2,0xa6d2,0x94db,0xb53a,0x8bd2,0xa3ad,0xfe95,0x92}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1db1,0x61ae,0x220b,0xc2e,0xa7ee,0xb16a,0x8697,0xf90c,0x7505,0xced5,0x5cf8,0xb601,0x6235,0x27ad,0x9fdf,0x57d0,0xca2,0xa6d2,0x94db,0xb53a,0x8bd2,0xa3ad,0xfe95,0x92}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x61ae1db1,0xc2e220b,0xb16aa7ee,0xf90c8697,0xced57505,0xb6015cf8,0x27ad6235,0x57d09fdf,0xa6d20ca2,0xb53a94db,0xa3ad8bd2,0x92fe95}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x61ae1db1,0xc2e220b,0xb16aa7ee,0xf90c8697,0xced57505,0xb6015cf8,0x27ad6235,0x57d09fdf,0xa6d20ca2,0xb53a94db,0xa3ad8bd2,0x92fe95}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc2e220b61ae1db1,0xf90c8697b16aa7ee,0xb6015cf8ced57505,0x57d09fdf27ad6235,0xb53a94dba6d20ca2,0x92fe95a3ad8bd2}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc2e220b61ae1db1,0xf90c8697b16aa7ee,0xb6015cf8ced57505,0x57d09fdf27ad6235,0xb53a94dba6d20ca2,0x92fe95a3ad8bd2}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa809,0xf0cf,0xb393,0xf0ab,0x181a,0xb5bc,0x1833,0xb0ea,0xff0e,0x3088,0xb299,0x4f5c,0x5a20,0x5b86,0xad7b,0x9ffd,0x2216,0x4e4c,0xb8eb,0x989,0x712f,0xa798,0x8e8f,0x45}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa809,0xf0cf,0xb393,0xf0ab,0x181a,0xb5bc,0x1833,0xb0ea,0xff0e,0x3088,0xb299,0x4f5c,0x5a20,0x5b86,0xad7b,0x9ffd,0x2216,0x4e4c,0xb8eb,0x989,0x712f,0xa798,0x8e8f,0x45}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf0cfa809,0xf0abb393,0xb5bc181a,0xb0ea1833,0x3088ff0e,0x4f5cb299,0x5b865a20,0x9ffdad7b,0x4e4c2216,0x989b8eb,0xa798712f,0x458e8f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf0cfa809,0xf0abb393,0xb5bc181a,0xb0ea1833,0x3088ff0e,0x4f5cb299,0x5b865a20,0x9ffdad7b,0x4e4c2216,0x989b8eb,0xa798712f,0x458e8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf0abb393f0cfa809,0xb0ea1833b5bc181a,0x4f5cb2993088ff0e,0x9ffdad7b5b865a20,0x989b8eb4e4c2216,0x458e8fa798712f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf0abb393f0cfa809,0xb0ea1833b5bc181a,0x4f5cb2993088ff0e,0x9ffdad7b5b865a20,0x989b8eb4e4c2216,0x458e8fa798712f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd2a3,0xb916,0xbdea,0x9c4f,0x7ca7,0x236e,0x7f55,0x968f,0xb182,0xd992,0x3ec5,0x1b15,0xafb1,0x43c7,0x2450,0xee64,0x5c33,0xba27,0x6724,0x846f,0xc5a4,0x2195,0xc989,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd2a3,0xb916,0xbdea,0x9c4f,0x7ca7,0x236e,0x7f55,0x968f,0xb182,0xd992,0x3ec5,0x1b15,0xafb1,0x43c7,0x2450,0xee64,0x5c33,0xba27,0x6724,0x846f,0xc5a4,0x2195,0xc989,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb916d2a3,0x9c4fbdea,0x236e7ca7,0x968f7f55,0xd992b182,0x1b153ec5,0x43c7afb1,0xee642450,0xba275c33,0x846f6724,0x2195c5a4,0xf7c989}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb916d2a3,0x9c4fbdea,0x236e7ca7,0x968f7f55,0xd992b182,0x1b153ec5,0x43c7afb1,0xee642450,0xba275c33,0x846f6724,0x2195c5a4,0xf7c989}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c4fbdeab916d2a3,0x968f7f55236e7ca7,0x1b153ec5d992b182,0xee64245043c7afb1,0x846f6724ba275c33,0xf7c9892195c5a4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9c4fbdeab916d2a3,0x968f7f55236e7ca7,0x1b153ec5d992b182,0xee64245043c7afb1,0x846f6724ba275c33,0xf7c9892195c5a4}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x17bf, 0xbb8, 0x485, 0x1296, 0x1b32, 0xe0b, 0x13f9, 0x15f5, 0x1a2b, 0xff6, 0xf53, 0x70a, 0x36c, 0x40f, 0xa89, 0xca6, 0x37e, 0x1488, 0x1796, 0x1be2, 0x1e0f, 0xf10, 0x1628, 0x1a20, 0x1ee7, 0x1e53, 0x48c, 0x94, 0xd53, 0xd} @@ -3117,223 +3117,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac65,0x6102,0xe1f0,0x7b39,0x64be,0xff4d,0x8256,0xd11b,0x4645,0x7a89,0x814c,0x66e7,0x77a,0xc4d8,0xe691,0x1f42,0xfdb9,0x547b,0x752,0x18d9,0x9279,0xe604,0xbed4,0xec}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac65,0x6102,0xe1f0,0x7b39,0x64be,0xff4d,0x8256,0xd11b,0x4645,0x7a89,0x814c,0x66e7,0x77a,0xc4d8,0xe691,0x1f42,0xfdb9,0x547b,0x752,0x18d9,0x9279,0xe604,0xbed4,0xec}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6102ac65,0x7b39e1f0,0xff4d64be,0xd11b8256,0x7a894645,0x66e7814c,0xc4d8077a,0x1f42e691,0x547bfdb9,0x18d90752,0xe6049279,0xecbed4}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6102ac65,0x7b39e1f0,0xff4d64be,0xd11b8256,0x7a894645,0x66e7814c,0xc4d8077a,0x1f42e691,0x547bfdb9,0x18d90752,0xe6049279,0xecbed4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7b39e1f06102ac65,0xd11b8256ff4d64be,0x66e7814c7a894645,0x1f42e691c4d8077a,0x18d90752547bfdb9,0xecbed4e6049279}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7b39e1f06102ac65,0xd11b8256ff4d64be,0x66e7814c7a894645,0x1f42e691c4d8077a,0x18d90752547bfdb9,0xecbed4e6049279}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3380,0xe477,0x9e18,0x218d,0xddc6,0x4cc5,0xb33f,0x59e7,0xb291,0xa1a1,0x8f77,0x92a2,0x480e,0x82af,0x40f1,0x5d48,0x83b0,0x4229,0xcb9e,0xff7a,0x2e32,0xa78,0x71fc,0x16}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3380,0xe477,0x9e18,0x218d,0xddc6,0x4cc5,0xb33f,0x59e7,0xb291,0xa1a1,0x8f77,0x92a2,0x480e,0x82af,0x40f1,0x5d48,0x83b0,0x4229,0xcb9e,0xff7a,0x2e32,0xa78,0x71fc,0x16}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe4773380,0x218d9e18,0x4cc5ddc6,0x59e7b33f,0xa1a1b291,0x92a28f77,0x82af480e,0x5d4840f1,0x422983b0,0xff7acb9e,0xa782e32,0x1671fc}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe4773380,0x218d9e18,0x4cc5ddc6,0x59e7b33f,0xa1a1b291,0x92a28f77,0x82af480e,0x5d4840f1,0x422983b0,0xff7acb9e,0xa782e32,0x1671fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x218d9e18e4773380,0x59e7b33f4cc5ddc6,0x92a28f77a1a1b291,0x5d4840f182af480e,0xff7acb9e422983b0,0x1671fc0a782e32}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x218d9e18e4773380,0x59e7b33f4cc5ddc6,0x92a28f77a1a1b291,0x5d4840f182af480e,0xff7acb9e422983b0,0x1671fc0a782e32}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbb17,0xaa62,0x774e,0x2e59,0xe440,0xebce,0x874e,0xbfdb,0x3afd,0xa7ba,0xded2,0x78aa,0x7568,0xcfed,0x5633,0xa1de,0x4c5e,0x5796,0x5727,0xec25,0xac0a,0xce9c,0x3f13,0x98}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xbb17,0xaa62,0x774e,0x2e59,0xe440,0xebce,0x874e,0xbfdb,0x3afd,0xa7ba,0xded2,0x78aa,0x7568,0xcfed,0x5633,0xa1de,0x4c5e,0x5796,0x5727,0xec25,0xac0a,0xce9c,0x3f13,0x98}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa62bb17,0x2e59774e,0xebcee440,0xbfdb874e,0xa7ba3afd,0x78aaded2,0xcfed7568,0xa1de5633,0x57964c5e,0xec255727,0xce9cac0a,0x983f13}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xaa62bb17,0x2e59774e,0xebcee440,0xbfdb874e,0xa7ba3afd,0x78aaded2,0xcfed7568,0xa1de5633,0x57964c5e,0xec255727,0xce9cac0a,0x983f13}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e59774eaa62bb17,0xbfdb874eebcee440,0x78aaded2a7ba3afd,0xa1de5633cfed7568,0xec25572757964c5e,0x983f13ce9cac0a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e59774eaa62bb17,0xbfdb874eebcee440,0x78aaded2a7ba3afd,0xa1de5633cfed7568,0xec25572757964c5e,0x983f13ce9cac0a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x539b,0x9efd,0x1e0f,0x84c6,0x9b41,0xb2,0x7da9,0x2ee4,0xb9ba,0x8576,0x7eb3,0x9918,0xf885,0x3b27,0x196e,0xe0bd,0x246,0xab84,0xf8ad,0xe726,0x6d86,0x19fb,0x412b,0x13}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x539b,0x9efd,0x1e0f,0x84c6,0x9b41,0xb2,0x7da9,0x2ee4,0xb9ba,0x8576,0x7eb3,0x9918,0xf885,0x3b27,0x196e,0xe0bd,0x246,0xab84,0xf8ad,0xe726,0x6d86,0x19fb,0x412b,0x13}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9efd539b,0x84c61e0f,0xb29b41,0x2ee47da9,0x8576b9ba,0x99187eb3,0x3b27f885,0xe0bd196e,0xab840246,0xe726f8ad,0x19fb6d86,0x13412b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9efd539b,0x84c61e0f,0xb29b41,0x2ee47da9,0x8576b9ba,0x99187eb3,0x3b27f885,0xe0bd196e,0xab840246,0xe726f8ad,0x19fb6d86,0x13412b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x84c61e0f9efd539b,0x2ee47da900b29b41,0x99187eb38576b9ba,0xe0bd196e3b27f885,0xe726f8adab840246,0x13412b19fb6d86}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x84c61e0f9efd539b,0x2ee47da900b29b41,0x99187eb38576b9ba,0xe0bd196e3b27f885,0xe726f8adab840246,0x13412b19fb6d86}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc761,0xa939,0x45c3,0xca85,0x6b48,0xe2f1,0x7d5b,0x8e01,0xb824,0x1b34,0x31da,0x3fa1,0xaae4,0x143a,0xc0b8,0xe32f,0x843,0x2c29,0xd142,0x4e56,0xd55c,0xc504,0x24cd,0xc5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xa939c761,0xca8545c3,0xe2f16b48,0x8e017d5b,0x1b34b824,0x3fa131da,0x143aaae4,0xe32fc0b8,0x2c290843,0x4e56d142,0xc504d55c,0xc524cd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xca8545c3a939c761,0x8e017d5be2f16b48,0x3fa131da1b34b824,0xe32fc0b8143aaae4,0x4e56d1422c290843,0xc524cdc504d55c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe87a,0x6127,0x8903,0x2308,0xfbed,0x12dc,0x3209,0xc898,0x715,0x2ab8,0x446f,0x1078,0x2808,0x2493,0x34a9,0xf92b,0x96f4,0xa3a,0xf860,0xd4ad,0xc8a9,0x410d,0x626b,0xe5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6127e87a,0x23088903,0x12dcfbed,0xc8983209,0x2ab80715,0x1078446f,0x24932808,0xf92b34a9,0xa3a96f4,0xd4adf860,0x410dc8a9,0xe5626b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x230889036127e87a,0xc898320912dcfbed,0x1078446f2ab80715,0xf92b34a924932808,0xd4adf8600a3a96f4,0xe5626b410dc8a9}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa06f,0xcd2e,0x736e,0xdfff,0x28f9,0xe694,0x7c39,0xab16,0x21f5,0xbc4,0x4883,0xe8f6,0xcaae,0xf3c3,0xa9f1,0x8d46,0x16e4,0xa8e2,0x5d95,0x3838,0xa6f1,0x34b7,0x94da,0xde}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcd2ea06f,0xdfff736e,0xe69428f9,0xab167c39,0xbc421f5,0xe8f64883,0xf3c3caae,0x8d46a9f1,0xa8e216e4,0x38385d95,0x34b7a6f1,0xde94da}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xdfff736ecd2ea06f,0xab167c39e69428f9,0xe8f648830bc421f5,0x8d46a9f1f3c3caae,0x38385d95a8e216e4,0xde94da34b7a6f1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x389f,0x56c6,0xba3c,0x357a,0x94b7,0x1d0e,0x82a4,0x71fe,0x47db,0xe4cb,0xce25,0xc05e,0x551b,0xebc5,0x3f47,0x1cd0,0xf7bc,0xd3d6,0x2ebd,0xb1a9,0x2aa3,0x3afb,0xdb32,0x3a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x56c6389f,0x357aba3c,0x1d0e94b7,0x71fe82a4,0xe4cb47db,0xc05ece25,0xebc5551b,0x1cd03f47,0xd3d6f7bc,0xb1a92ebd,0x3afb2aa3,0x3adb32}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x357aba3c56c6389f,0x71fe82a41d0e94b7,0xc05ece25e4cb47db,0x1cd03f47ebc5551b,0xb1a92ebdd3d6f7bc,0x3adb323afb2aa3}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb919,0xcfad,0xeb7f,0x81f8,0x4d97,0xf272,0x4300,0xdd38,0x1b01,0x826,0x1894,0x3e43,0x7310,0xa84,0x4161,0x7c63,0xec4,0x9625,0xe475,0xadc9,0x5a7,0xfa6a,0xb7e3,0x7e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xb919,0xcfad,0xeb7f,0x81f8,0x4d97,0xf272,0x4300,0xdd38,0x1b01,0x826,0x1894,0x3e43,0x7310,0xa84,0x4161,0x7c63,0xec4,0x9625,0xe475,0xadc9,0x5a7,0xfa6a,0xb7e3,0x7e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcfadb919,0x81f8eb7f,0xf2724d97,0xdd384300,0x8261b01,0x3e431894,0xa847310,0x7c634161,0x96250ec4,0xadc9e475,0xfa6a05a7,0x7eb7e3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcfadb919,0x81f8eb7f,0xf2724d97,0xdd384300,0x8261b01,0x3e431894,0xa847310,0x7c634161,0x96250ec4,0xadc9e475,0xfa6a05a7,0x7eb7e3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x81f8eb7fcfadb919,0xdd384300f2724d97,0x3e43189408261b01,0x7c6341610a847310,0xadc9e47596250ec4,0x7eb7e3fa6a05a7}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x81f8eb7fcfadb919,0xdd384300f2724d97,0x3e43189408261b01,0x7c6341610a847310,0xadc9e47596250ec4,0x7eb7e3fa6a05a7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e83,0xade4,0x9d21,0x2e51,0x42e5,0xd3,0xac79,0xe0a8,0x32e2,0xfcf2,0xb504,0xc941,0xa0d0,0x8016,0x5485,0x3331,0xabd7,0xc296,0xf76e,0xef5,0xce39,0x8e31,0x165c,0x56}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e83,0xade4,0x9d21,0x2e51,0x42e5,0xd3,0xac79,0xe0a8,0x32e2,0xfcf2,0xb504,0xc941,0xa0d0,0x8016,0x5485,0x3331,0xabd7,0xc296,0xf76e,0xef5,0xce39,0x8e31,0x165c,0x56}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xade41e83,0x2e519d21,0xd342e5,0xe0a8ac79,0xfcf232e2,0xc941b504,0x8016a0d0,0x33315485,0xc296abd7,0xef5f76e,0x8e31ce39,0x56165c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xade41e83,0x2e519d21,0xd342e5,0xe0a8ac79,0xfcf232e2,0xc941b504,0x8016a0d0,0x33315485,0xc296abd7,0xef5f76e,0x8e31ce39,0x56165c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e519d21ade41e83,0xe0a8ac7900d342e5,0xc941b504fcf232e2,0x333154858016a0d0,0xef5f76ec296abd7,0x56165c8e31ce39}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2e519d21ade41e83,0xe0a8ac7900d342e5,0xc941b504fcf232e2,0x333154858016a0d0,0xef5f76ec296abd7,0x56165c8e31ce39}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdd11,0x6e27,0xfbdb,0xf5d9,0xd6cb,0x9fef,0xc59a,0x7a4,0xfbd,0x5c3e,0xbc2,0xd091,0x6546,0xc9d0,0x193e,0x93fa,0x776,0x2763,0xdecd,0xbbe3,0xcec1,0x6abf,0x9070,0x66}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdd11,0x6e27,0xfbdb,0xf5d9,0xd6cb,0x9fef,0xc59a,0x7a4,0xfbd,0x5c3e,0xbc2,0xd091,0x6546,0xc9d0,0x193e,0x93fa,0x776,0x2763,0xdecd,0xbbe3,0xcec1,0x6abf,0x9070,0x66}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6e27dd11,0xf5d9fbdb,0x9fefd6cb,0x7a4c59a,0x5c3e0fbd,0xd0910bc2,0xc9d06546,0x93fa193e,0x27630776,0xbbe3decd,0x6abfcec1,0x669070}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6e27dd11,0xf5d9fbdb,0x9fefd6cb,0x7a4c59a,0x5c3e0fbd,0xd0910bc2,0xc9d06546,0x93fa193e,0x27630776,0xbbe3decd,0x6abfcec1,0x669070}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5d9fbdb6e27dd11,0x7a4c59a9fefd6cb,0xd0910bc25c3e0fbd,0x93fa193ec9d06546,0xbbe3decd27630776,0x6690706abfcec1}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf5d9fbdb6e27dd11,0x7a4c59a9fefd6cb,0xd0910bc25c3e0fbd,0x93fa193ec9d06546,0xbbe3decd27630776,0x6690706abfcec1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x46e7,0x3052,0x1480,0x7e07,0xb268,0xd8d,0xbcff,0x22c7,0xe4fe,0xf7d9,0xe76b,0xc1bc,0x8cef,0xf57b,0xbe9e,0x839c,0xf13b,0x69da,0x1b8a,0x5236,0xfa58,0x595,0x481c,0x81}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x46e7,0x3052,0x1480,0x7e07,0xb268,0xd8d,0xbcff,0x22c7,0xe4fe,0xf7d9,0xe76b,0xc1bc,0x8cef,0xf57b,0xbe9e,0x839c,0xf13b,0x69da,0x1b8a,0x5236,0xfa58,0x595,0x481c,0x81}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x305246e7,0x7e071480,0xd8db268,0x22c7bcff,0xf7d9e4fe,0xc1bce76b,0xf57b8cef,0x839cbe9e,0x69daf13b,0x52361b8a,0x595fa58,0x81481c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x305246e7,0x7e071480,0xd8db268,0x22c7bcff,0xf7d9e4fe,0xc1bce76b,0xf57b8cef,0x839cbe9e,0x69daf13b,0x52361b8a,0x595fa58,0x81481c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e071480305246e7,0x22c7bcff0d8db268,0xc1bce76bf7d9e4fe,0x839cbe9ef57b8cef,0x52361b8a69daf13b,0x81481c0595fa58}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7e071480305246e7,0x22c7bcff0d8db268,0xc1bce76bf7d9e4fe,0x839cbe9ef57b8cef,0x52361b8a69daf13b,0x81481c0595fa58}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0xb89, 0xf4d, 0xbd8, 0x18d2, 0x781, 0x1f79, 0xef5, 0xcfd, 0xa7d, 0x121a, 0x59e, 0x18a3, 0x2b, 0x122f, 0x9d5, 0x18aa, 0x105f, 0x1bcd, 0x871, 0x1f9d, 0xae4, 0xc5c, 0x385, 0xbe8, 0x1878, 0xcd8, 0x7a1, 0x7c8, 0x5b4, 0xe} @@ -3593,220 +3593,220 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xa74d,0x6268,0x6670,0x157d,0x36bf,0xbff5,0x13f3,0x62fb,0x404b,0x863b,0xb7a3,0xada5,0x355f,0xc945,0x6b4c,0x7d75,0x9efd,0x8901,0x4c90,0xba3e,0x48f0,0x5677,0x49eb,0x93}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6268a74d,0x157d6670,0xbff536bf,0x62fb13f3,0x863b404b,0xada5b7a3,0xc945355f,0x7d756b4c,0x89019efd,0xba3e4c90,0x567748f0,0x9349eb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x157d66706268a74d,0x62fb13f3bff536bf,0xada5b7a3863b404b,0x7d756b4cc945355f,0xba3e4c9089019efd,0x9349eb567748f0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd72,0xd1f5,0x548,0xe0a7,0xee15,0xfdbc,0xc63c,0xd72b,0x1e45,0xe787,0xc513,0xb278,0x7cd,0xcab5,0x5ced,0x8be9,0xf4a4,0x9711,0xe9d8,0x872d,0x443,0xead0,0xe34d,0x2e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd1f50d72,0xe0a70548,0xfdbcee15,0xd72bc63c,0xe7871e45,0xb278c513,0xcab507cd,0x8be95ced,0x9711f4a4,0x872de9d8,0xead00443,0x2ee34d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe0a70548d1f50d72,0xd72bc63cfdbcee15,0xb278c513e7871e45,0x8be95cedcab507cd,0x872de9d89711f4a4,0x2ee34dead00443}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xcecc,0x7789,0xbdb9,0x22b4,0x782f,0x794e,0x8e17,0xd2cc,0x1fb,0x1927,0x9c88,0x929,0x22b0,0x746e,0xd62b,0xade1,0xc7dd,0x5e2a,0xd146,0x7363,0xca5e,0x5460,0xd2b3,0xee}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7789cecc,0x22b4bdb9,0x794e782f,0xd2cc8e17,0x192701fb,0x9299c88,0x746e22b0,0xade1d62b,0x5e2ac7dd,0x7363d146,0x5460ca5e,0xeed2b3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x22b4bdb97789cecc,0xd2cc8e17794e782f,0x9299c88192701fb,0xade1d62b746e22b0,0x7363d1465e2ac7dd,0xeed2b35460ca5e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x58b3,0x9d97,0x998f,0xea82,0xc940,0x400a,0xec0c,0x9d04,0xbfb4,0x79c4,0x485c,0x525a,0xcaa0,0x36ba,0x94b3,0x828a,0x6102,0x76fe,0xb36f,0x45c1,0xb70f,0xa988,0xb614,0x6c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9d9758b3,0xea82998f,0x400ac940,0x9d04ec0c,0x79c4bfb4,0x525a485c,0x36bacaa0,0x828a94b3,0x76fe6102,0x45c1b36f,0xa988b70f,0x6cb614}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea82998f9d9758b3,0x9d04ec0c400ac940,0x525a485c79c4bfb4,0x828a94b336bacaa0,0x45c1b36f76fe6102,0x6cb614a988b70f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3d63,0xdad1,0xf501,0xd58f,0x8741,0xd265,0xf8bd,0xb3b9,0xac08,0xfc8b,0x45ab,0xbcdf,0x501,0x9f7,0x10ed,0x102f,0xc6e3,0xdc57,0xf892,0x8db4,0x2c76,0x21ab,0x2bc3,0x8e}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x3d63,0xdad1,0xf501,0xd58f,0x8741,0xd265,0xf8bd,0xb3b9,0xac08,0xfc8b,0x45ab,0xbcdf,0x501,0x9f7,0x10ed,0x102f,0xc6e3,0xdc57,0xf892,0x8db4,0x2c76,0x21ab,0x2bc3,0x8e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdad13d63,0xd58ff501,0xd2658741,0xb3b9f8bd,0xfc8bac08,0xbcdf45ab,0x9f70501,0x102f10ed,0xdc57c6e3,0x8db4f892,0x21ab2c76,0x8e2bc3}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdad13d63,0xd58ff501,0xd2658741,0xb3b9f8bd,0xfc8bac08,0xbcdf45ab,0x9f70501,0x102f10ed,0xdc57c6e3,0x8db4f892,0x21ab2c76,0x8e2bc3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd58ff501dad13d63,0xb3b9f8bdd2658741,0xbcdf45abfc8bac08,0x102f10ed09f70501,0x8db4f892dc57c6e3,0x8e2bc321ab2c76}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd58ff501dad13d63,0xb3b9f8bdd2658741,0xbcdf45abfc8bac08,0x102f10ed09f70501,0x8db4f892dc57c6e3,0x8e2bc321ab2c76}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc998,0x418c,0xa8e4,0x2354,0x622a,0xb76d,0x5487,0xdad9,0x1672,0x522b,0xa00f,0xdfa5,0x296b,0xe17c,0x595e,0x91e1,0xa22d,0xe126,0x904c,0x9288,0x5075,0xc6c5,0x61b0,0xb1}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc998,0x418c,0xa8e4,0x2354,0x622a,0xb76d,0x5487,0xdad9,0x1672,0x522b,0xa00f,0xdfa5,0x296b,0xe17c,0x595e,0x91e1,0xa22d,0xe126,0x904c,0x9288,0x5075,0xc6c5,0x61b0,0xb1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x418cc998,0x2354a8e4,0xb76d622a,0xdad95487,0x522b1672,0xdfa5a00f,0xe17c296b,0x91e1595e,0xe126a22d,0x9288904c,0xc6c55075,0xb161b0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x418cc998,0x2354a8e4,0xb76d622a,0xdad95487,0x522b1672,0xdfa5a00f,0xe17c296b,0x91e1595e,0xe126a22d,0x9288904c,0xc6c55075,0xb161b0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2354a8e4418cc998,0xdad95487b76d622a,0xdfa5a00f522b1672,0x91e1595ee17c296b,0x9288904ce126a22d,0xb161b0c6c55075}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2354a8e4418cc998,0xdad95487b76d622a,0xdfa5a00f522b1672,0x91e1595ee17c296b,0x9288904ce126a22d,0xb161b0c6c55075}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1271,0x594e,0x16ee,0x35fa,0xaf0e,0x11b2,0x1fca,0x24b7,0xa3e3,0x2bcc,0xc2f0,0x6409,0xf8e1,0x6a8f,0x67e,0xe7ee,0xad00,0x2b9a,0x6813,0x5e0a,0x6dec,0x48f5,0xbd1d,0xb3}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1271,0x594e,0x16ee,0x35fa,0xaf0e,0x11b2,0x1fca,0x24b7,0xa3e3,0x2bcc,0xc2f0,0x6409,0xf8e1,0x6a8f,0x67e,0xe7ee,0xad00,0x2b9a,0x6813,0x5e0a,0x6dec,0x48f5,0xbd1d,0xb3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x594e1271,0x35fa16ee,0x11b2af0e,0x24b71fca,0x2bcca3e3,0x6409c2f0,0x6a8ff8e1,0xe7ee067e,0x2b9aad00,0x5e0a6813,0x48f56dec,0xb3bd1d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x594e1271,0x35fa16ee,0x11b2af0e,0x24b71fca,0x2bcca3e3,0x6409c2f0,0x6a8ff8e1,0xe7ee067e,0x2b9aad00,0x5e0a6813,0x48f56dec,0xb3bd1d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x35fa16ee594e1271,0x24b71fca11b2af0e,0x6409c2f02bcca3e3,0xe7ee067e6a8ff8e1,0x5e0a68132b9aad00,0xb3bd1d48f56dec}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x35fa16ee594e1271,0x24b71fca11b2af0e,0x6409c2f02bcca3e3,0xe7ee067e6a8ff8e1,0x5e0a68132b9aad00,0xb3bd1d48f56dec}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc29d,0x252e,0xafe,0x2a70,0x78be,0x2d9a,0x742,0x4c46,0x53f7,0x374,0xba54,0x4320,0xfafe,0xf608,0xef12,0xefd0,0x391c,0x23a8,0x76d,0x724b,0xd389,0xde54,0xd43c,0x71}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xc29d,0x252e,0xafe,0x2a70,0x78be,0x2d9a,0x742,0x4c46,0x53f7,0x374,0xba54,0x4320,0xfafe,0xf608,0xef12,0xefd0,0x391c,0x23a8,0x76d,0x724b,0xd389,0xde54,0xd43c,0x71}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x252ec29d,0x2a700afe,0x2d9a78be,0x4c460742,0x37453f7,0x4320ba54,0xf608fafe,0xefd0ef12,0x23a8391c,0x724b076d,0xde54d389,0x71d43c}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x252ec29d,0x2a700afe,0x2d9a78be,0x4c460742,0x37453f7,0x4320ba54,0xf608fafe,0xefd0ef12,0x23a8391c,0x724b076d,0xde54d389,0x71d43c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2a700afe252ec29d,0x4c4607422d9a78be,0x4320ba54037453f7,0xefd0ef12f608fafe,0x724b076d23a8391c,0x71d43cde54d389}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2a700afe252ec29d,0x4c4607422d9a78be,0x4320ba54037453f7,0xefd0ef12f608fafe,0x724b076d23a8391c,0x71d43cde54d389}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe7,0x36f4,0xe2a7,0x986d,0x214b,0xf464,0x7818,0x49f7,0x5ea0,0x73e7,0x9e17,0x85a0,0xd1cc,0xc33d,0x7833,0xbb28,0xba60,0x69d5,0xd73c,0x879e,0x6732,0x905,0xeefd,0xc8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x36f400e7,0x986de2a7,0xf464214b,0x49f77818,0x73e75ea0,0x85a09e17,0xc33dd1cc,0xbb287833,0x69d5ba60,0x879ed73c,0x9056732,0xc8eefd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x986de2a736f400e7,0x49f77818f464214b,0x85a09e1773e75ea0,0xbb287833c33dd1cc,0x879ed73c69d5ba60,0xc8eefd09056732}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x925e,0xff6f,0xa6dc,0x2eff,0xb2a6,0xe88c,0xab24,0x6829,0x91eb,0x118,0x77ac,0x8506,0x9934,0x5a86,0x5cac,0xfd15,0x5869,0xc232,0xebcf,0xcdef,0x254,0xf9e4,0xcc7b,0x96}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xff6f925e,0x2effa6dc,0xe88cb2a6,0x6829ab24,0x11891eb,0x850677ac,0x5a869934,0xfd155cac,0xc2325869,0xcdefebcf,0xf9e40254,0x96cc7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x2effa6dcff6f925e,0x6829ab24e88cb2a6,0x850677ac011891eb,0xfd155cac5a869934,0xcdefebcfc2325869,0x96cc7bf9e40254}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xae19,0xed4d,0x178c,0x7829,0x7b15,0x4808,0xdab3,0xebc6,0x25cb,0x3298,0xe9bd,0x3d5e,0xe9a4,0x960,0x92e6,0xb3ea,0x33e,0xf491,0x2343,0xbdb9,0xa17d,0x63a5,0x767f,0x41}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xed4dae19,0x7829178c,0x48087b15,0xebc6dab3,0x329825cb,0x3d5ee9bd,0x960e9a4,0xb3ea92e6,0xf491033e,0xbdb92343,0x63a5a17d,0x41767f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7829178ced4dae19,0xebc6dab348087b15,0x3d5ee9bd329825cb,0xb3ea92e60960e9a4,0xbdb92343f491033e,0x41767f63a5a17d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xff19,0xc90b,0x1d58,0x6792,0xdeb4,0xb9b,0x87e7,0xb608,0xa15f,0x8c18,0x61e8,0x7a5f,0x2e33,0x3cc2,0x87cc,0x44d7,0x459f,0x962a,0x28c3,0x7861,0x98cd,0xf6fa,0x1102,0x37}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc90bff19,0x67921d58,0xb9bdeb4,0xb60887e7,0x8c18a15f,0x7a5f61e8,0x3cc22e33,0x44d787cc,0x962a459f,0x786128c3,0xf6fa98cd,0x371102}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x67921d58c90bff19,0xb60887e70b9bdeb4,0x7a5f61e88c18a15f,0x44d787cc3cc22e33,0x786128c3962a459f,0x371102f6fa98cd}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x53a7,0x3134,0xb338,0x8abe,0x9b5f,0xdffa,0x89f9,0xb17d,0xa025,0xc31d,0xdbd1,0xd6d2,0x9aaf,0x64a2,0xb5a6,0xbeba,0xcf7e,0x4480,0x2648,0x5d1f,0xa478,0xab3b,0xa4f5,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x313453a7,0x8abeb338,0xdffa9b5f,0xb17d89f9,0xc31da025,0xd6d2dbd1,0x64a29aaf,0xbebab5a6,0x4480cf7e,0x5d1f2648,0xab3ba478,0xc9a4f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8abeb338313453a7,0xb17d89f9dffa9b5f,0xd6d2dbd1c31da025,0xbebab5a664a29aaf,0x5d1f26484480cf7e,0xc9a4f5ab3ba478}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x86b9,0x68fa,0x82a4,0xf053,0x770a,0x7ede,0xe31e,0xeb95,0x8f22,0xf3c3,0x6289,0xd93c,0x83e6,0xe55a,0xae76,0x45f4,0xfa52,0x4b88,0xf4ec,0xc396,0x221,0xf568,0x71a6,0x97}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x68fa86b9,0xf05382a4,0x7ede770a,0xeb95e31e,0xf3c38f22,0xd93c6289,0xe55a83e6,0x45f4ae76,0x4b88fa52,0xc396f4ec,0xf5680221,0x9771a6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf05382a468fa86b9,0xeb95e31e7ede770a,0xd93c6289f3c38f22,0x45f4ae76e55a83e6,0xc396f4ec4b88fa52,0x9771a6f5680221}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xe766,0xbbc4,0x5edc,0x915a,0x3c17,0xbca7,0x470b,0xe966,0x80fd,0xc93,0xce44,0x494,0x1158,0xba37,0xeb15,0xd6f0,0x63ee,0x2f15,0xe8a3,0x39b1,0x652f,0xaa30,0x6959,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbbc4e766,0x915a5edc,0xbca73c17,0xe966470b,0xc9380fd,0x494ce44,0xba371158,0xd6f0eb15,0x2f1563ee,0x39b1e8a3,0xaa30652f,0xf76959}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x915a5edcbbc4e766,0xe966470bbca73c17,0x494ce440c9380fd,0xd6f0eb15ba371158,0x39b1e8a32f1563ee,0xf76959aa30652f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xac5a,0xcecb,0x4cc7,0x7541,0x64a0,0x2005,0x7606,0x4e82,0x5fda,0x3ce2,0x242e,0x292d,0x6550,0x9b5d,0x4a59,0x4145,0x3081,0xbb7f,0xd9b7,0xa2e0,0x5b87,0x54c4,0x5b0a,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xcecbac5a,0x75414cc7,0x200564a0,0x4e827606,0x3ce25fda,0x292d242e,0x9b5d6550,0x41454a59,0xbb7f3081,0xa2e0d9b7,0x54c45b87,0x365b0a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x75414cc7cecbac5a,0x4e827606200564a0,0x292d242e3ce25fda,0x41454a599b5d6550,0xa2e0d9b7bb7f3081,0x365b0a54c45b87}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd70d,0x31e4,0xa551,0x7483,0x6f09,0x34d,0x6a80,0x85f,0x6b11,0xe29b,0x188,0x38d2,0x85b,0xa241,0xc423,0xddc8,0x3260,0x1722,0xf3a4,0x7cf7,0x36e8,0x7955,0xeeb9,0xc6}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xd70d,0x31e4,0xa551,0x7483,0x6f09,0x34d,0x6a80,0x85f,0x6b11,0xe29b,0x188,0x38d2,0x85b,0xa241,0xc423,0xddc8,0x3260,0x1722,0xf3a4,0x7cf7,0x36e8,0x7955,0xeeb9,0xc6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x31e4d70d,0x7483a551,0x34d6f09,0x85f6a80,0xe29b6b11,0x38d20188,0xa241085b,0xddc8c423,0x17223260,0x7cf7f3a4,0x795536e8,0xc6eeb9}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x31e4d70d,0x7483a551,0x34d6f09,0x85f6a80,0xe29b6b11,0x38d20188,0xa241085b,0xddc8c423,0x17223260,0x7cf7f3a4,0x795536e8,0xc6eeb9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7483a55131e4d70d,0x85f6a80034d6f09,0x38d20188e29b6b11,0xddc8c423a241085b,0x7cf7f3a417223260,0xc6eeb9795536e8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x7483a55131e4d70d,0x85f6a80034d6f09,0x38d20188e29b6b11,0xddc8c423a241085b,0x7cf7f3a417223260,0xc6eeb9795536e8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x59a9,0x8f53,0xd42f,0xf65b,0x7134,0x4475,0x9543,0x8428,0x4555,0x7d45,0x7bfb,0xe15d,0xe9c2,0x24ec,0xf17f,0x88ea,0x766c,0xbf2d,0x2b42,0x2771,0x5dfc,0xd040,0xfa62,0xc9}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x59a9,0x8f53,0xd42f,0xf65b,0x7134,0x4475,0x9543,0x8428,0x4555,0x7d45,0x7bfb,0xe15d,0xe9c2,0x24ec,0xf17f,0x88ea,0x766c,0xbf2d,0x2b42,0x2771,0x5dfc,0xd040,0xfa62,0xc9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f5359a9,0xf65bd42f,0x44757134,0x84289543,0x7d454555,0xe15d7bfb,0x24ece9c2,0x88eaf17f,0xbf2d766c,0x27712b42,0xd0405dfc,0xc9fa62}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8f5359a9,0xf65bd42f,0x44757134,0x84289543,0x7d454555,0xe15d7bfb,0x24ece9c2,0x88eaf17f,0xbf2d766c,0x27712b42,0xd0405dfc,0xc9fa62}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf65bd42f8f5359a9,0x8428954344757134,0xe15d7bfb7d454555,0x88eaf17f24ece9c2,0x27712b42bf2d766c,0xc9fa62d0405dfc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf65bd42f8f5359a9,0x8428954344757134,0xe15d7bfb7d454555,0x88eaf17f24ece9c2,0x27712b42bf2d766c,0xc9fa62d0405dfc}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4b49,0x89b0,0x8c52,0x91ca,0xed1b,0xd527,0x453,0x82d,0xb0eb,0xb6bf,0x3790,0x5816,0x49bb,0xa0a7,0xffc6,0x5530,0x23b9,0x12bb,0x52c4,0x6f51,0x25fd,0x62d,0x723d,0xc6}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x4b49,0x89b0,0x8c52,0x91ca,0xed1b,0xd527,0x453,0x82d,0xb0eb,0xb6bf,0x3790,0x5816,0x49bb,0xa0a7,0xffc6,0x5530,0x23b9,0x12bb,0x52c4,0x6f51,0x25fd,0x62d,0x723d,0xc6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x89b04b49,0x91ca8c52,0xd527ed1b,0x82d0453,0xb6bfb0eb,0x58163790,0xa0a749bb,0x5530ffc6,0x12bb23b9,0x6f5152c4,0x62d25fd,0xc6723d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x89b04b49,0x91ca8c52,0xd527ed1b,0x82d0453,0xb6bfb0eb,0x58163790,0xa0a749bb,0x5530ffc6,0x12bb23b9,0x6f5152c4,0x62d25fd,0xc6723d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x91ca8c5289b04b49,0x82d0453d527ed1b,0x58163790b6bfb0eb,0x5530ffc6a0a749bb,0x6f5152c412bb23b9,0xc6723d062d25fd}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x91ca8c5289b04b49,0x82d0453d527ed1b,0x58163790b6bfb0eb,0x5530ffc6a0a749bb,0x6f5152c412bb23b9,0xc6723d062d25fd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x28f3,0xce1b,0x5aae,0x8b7c,0x90f6,0xfcb2,0x957f,0xf7a0,0x94ee,0x1d64,0xfe77,0xc72d,0xf7a4,0x5dbe,0x3bdc,0x2237,0xcd9f,0xe8dd,0xc5b,0x8308,0xc917,0x86aa,0x1146,0x39}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x28f3,0xce1b,0x5aae,0x8b7c,0x90f6,0xfcb2,0x957f,0xf7a0,0x94ee,0x1d64,0xfe77,0xc72d,0xf7a4,0x5dbe,0x3bdc,0x2237,0xcd9f,0xe8dd,0xc5b,0x8308,0xc917,0x86aa,0x1146,0x39}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xce1b28f3,0x8b7c5aae,0xfcb290f6,0xf7a0957f,0x1d6494ee,0xc72dfe77,0x5dbef7a4,0x22373bdc,0xe8ddcd9f,0x83080c5b,0x86aac917,0x391146}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xce1b28f3,0x8b7c5aae,0xfcb290f6,0xf7a0957f,0x1d6494ee,0xc72dfe77,0x5dbef7a4,0x22373bdc,0xe8ddcd9f,0x83080c5b,0x86aac917,0x391146}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8b7c5aaece1b28f3,0xf7a0957ffcb290f6,0xc72dfe771d6494ee,0x22373bdc5dbef7a4,0x83080c5be8ddcd9f,0x39114686aac917}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x8b7c5aaece1b28f3,0xf7a0957ffcb290f6,0xc72dfe771d6494ee,0x22373bdc5dbef7a4,0x83080c5be8ddcd9f,0x39114686aac917}}}} #endif -}}}}; +}}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/finit.c index b3808edf07..c9a3687282 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/finit.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/finit.c @@ -29,29 +29,29 @@ quat_alg_elem_finalize(quat_alg_elem_t *elem) void ibz_vec_2_init(ibz_vec_2_t *vec) { - ibz_init(&((*vec)[0])); - ibz_init(&((*vec)[1])); + ibz_init(&(vec->v[0])); + ibz_init(&(vec->v[1])); } void ibz_vec_2_finalize(ibz_vec_2_t *vec) { - ibz_finalize(&((*vec)[0])); - ibz_finalize(&((*vec)[1])); + ibz_finalize(&(vec->v[0])); + ibz_finalize(&(vec->v[1])); } void ibz_vec_4_init(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_init(&(*vec)[i]); + ibz_init(&vec->v[i]); } } void ibz_vec_4_finalize(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_finalize(&(*vec)[i]); + ibz_finalize(&vec->v[i]); } } @@ -60,7 +60,7 @@ ibz_mat_2x2_init(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -69,7 +69,7 @@ ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } @@ -79,7 +79,7 @@ ibz_mat_4x4_init(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -88,7 +88,7 @@ ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c index 511a0a5d38..5edff425c8 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hnf.c @@ -14,21 +14,21 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) for (int i = 0; i < 4; i++) { // upper triangular for (int j = 0; j < i; j++) { - res = res && ibz_is_zero(&((*mat)[i][j])); + res = res && ibz_is_zero(&(mat->m[i][j])); } // find first non 0 element of line found = 0; for (int j = i; j < 4; j++) { if (found) { // all values are positive, and first non-0 is the largest of that line - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); - res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&(mat->m[i][ind]), &(mat->m[i][j])) > 0); } else { - if (!ibz_is_zero(&((*mat)[i][j]))) { + if (!ibz_is_zero(&(mat->m[i][j]))) { found = 1; ind = j; // mustbe non-negative - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) > 0); } } } @@ -37,7 +37,7 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) int linestart = -1; int i = 0; for (int j = 0; j < 4; j++) { - while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + while ((i < 4) && (ibz_is_zero(&(mat->m[i][j])))) { i = i + 1; } if (i != 4) { @@ -66,13 +66,13 @@ ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); - ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); + ibz_centered_mod(&(sums.v[i]), &(sums.v[i]), &m); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_finalize(&m); @@ -86,7 +86,7 @@ ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + ibz_centered_mod(&(res->v[i]), &(vec->v[i]), &m); } ibz_finalize(&m); } @@ -101,8 +101,8 @@ ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4 ibz_copy(&s, scalar); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); - ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + ibz_mul(&(prod->v[i]), &(vec->v[i]), &s); + ibz_mod(&(prod->v[i]), &(prod->v[i]), &m); } ibz_finalize(&m); ibz_finalize(&s); @@ -138,36 +138,36 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec if (h < 4) ibz_vec_4_init(&(w[h])); ibz_vec_4_init(&(a[h])); - ibz_copy(&(a[h][0]), &(generators[h][0])); - ibz_copy(&(a[h][1]), &(generators[h][1])); - ibz_copy(&(a[h][2]), &(generators[h][2])); - ibz_copy(&(a[h][3]), &(generators[h][3])); + ibz_copy(&(a[h].v[0]), &(generators[h].v[0])); + ibz_copy(&(a[h].v[1]), &(generators[h].v[1])); + ibz_copy(&(a[h].v[2]), &(generators[h].v[2])); + ibz_copy(&(a[h].v[3]), &(generators[h].v[3])); } assert(ibz_cmp(mod, &ibz_const_zero) > 0); ibz_copy(&m, mod); while (i != -1) { while (j != 0) { j = j - 1; - if (!ibz_is_zero(&(a[j][i]))) { + if (!ibz_is_zero(&(a[j].v[i]))) { // assumtion that ibz_xgcd outputs u,v which are small in absolute // value is needed here also, needs u non 0, but v can be 0 if needed - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &(a[j].v[i])); ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); - ibz_div(&coeff_1, &r, &(a[k][i]), &d); - ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_div(&coeff_1, &r, &(a[k].v[i]), &d); + ibz_div(&coeff_2, &r, &(a[j].v[i]), &d); ibz_neg(&coeff_2, &coeff_2); ibz_vec_4_linear_combination_mod( &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy } } - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &m); ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult - if (ibz_is_zero(&(w[i][i]))) { - ibz_copy(&(w[i][i]), &m); + if (ibz_is_zero(&(w[i].v[i]))) { + ibz_copy(&(w[i].v[i]), &m); } for (int h = i + 1; h < 4; h++) { - ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_div_floor(&q, &r, &(w[h].v[i]), &(w[i].v[i])); ibz_neg(&q, &q); ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); } @@ -177,8 +177,8 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec k = k - 1; i = i - 1; j = k; - if (ibz_is_zero(&(a[k][i]))) - ibz_copy(&(a[k][i]), &m); + if (ibz_is_zero(&(a[k].v[i]))) + ibz_copy(&(a[k].v[i]), &m); } else { k = k - 1; @@ -188,7 +188,7 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec } for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { - ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + ibz_copy(&((hnf->m)[i][j]), &(w[j].v[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c index 0fd35b5c65..f630f5a9fe 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ibz_division.c @@ -8,5 +8,5 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { - mpz_gcdext(*gcd, *u, *v, *a, *b); + mpz_gcdext(gcd->i, u->i, v->i, a->i, b->i); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.c index 0743974345..1be9d87e71 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/id2iso.c @@ -18,8 +18,8 @@ ec_biscalar_mul_ibz_vec(ec_point_t *res, const ec_curve_t *curve) { digit_t scalars[2][NWORDS_ORDER]; - ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); - ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ibz_to_digit_array(scalars[0], &scalar_vec->v[0]); + ibz_to_digit_array(scalars[1], &scalar_vec->v[1]); ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); } @@ -48,14 +48,14 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid quat_change_to_O0_basis(&coeffs, &alpha); for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); } } @@ -67,16 +67,16 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid { const ibz_t *const norm = &lideal->norm; - ibz_mod(&(*vec)[0], &mat[0][0], norm); - ibz_mod(&(*vec)[1], &mat[1][0], norm); - ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + ibz_mod(&vec->v[0], &mat.m[0][0], norm); + ibz_mod(&vec->v[1], &mat.m[1][0], norm); + ibz_gcd(&tmp, &vec->v[0], &vec->v[1]); if (ibz_is_even(&tmp)) { - ibz_mod(&(*vec)[0], &mat[0][1], norm); - ibz_mod(&(*vec)[1], &mat[1][1], norm); + ibz_mod(&vec->v[0], &mat.m[0][1], norm); + ibz_mod(&vec->v[1], &mat.m[1][1], norm); } #ifndef NDEBUG - ibz_gcd(&tmp, &(*vec)[0], norm); - ibz_gcd(&tmp, &(*vec)[1], &tmp); + ibz_gcd(&tmp, &vec->v[0], norm); + ibz_gcd(&tmp, &vec->v[1], &tmp); assert(!ibz_cmp(&tmp, &ibz_const_one)); #endif } @@ -102,28 +102,28 @@ matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_ copy_basis(&tmp_bas, bas); // reduction mod 2f - ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); - ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); - ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); - ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + ibz_mod(&mat->m[0][0], &mat->m[0][0], &pow_two); + ibz_mod(&mat->m[0][1], &mat->m[0][1], &pow_two); + ibz_mod(&mat->m[1][0], &mat->m[1][0], &pow_two); + ibz_mod(&mat->m[1][1], &mat->m[1][1], &pow_two); // For a matrix [[a, c], [b, d]] we compute: // // first basis element R = [a]P + [b]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][0]); - ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ibz_to_digit_array(scalars[0], &mat->m[0][0]); + ibz_to_digit_array(scalars[1], &mat->m[1][0]); ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); // second basis element S = [c]P + [d]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][1]); - ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ibz_to_digit_array(scalars[0], &mat->m[0][1]); + ibz_to_digit_array(scalars[1], &mat->m[1][1]); ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); // Their difference R - S = [a - c]P + [b - d]Q - ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_sub(&tmp, &mat->m[0][0], &mat->m[0][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[0], &tmp); - ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_sub(&tmp, &mat->m[1][0], &mat->m[1][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[1], &tmp); ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); @@ -157,23 +157,23 @@ endomorphism_application_even_basis(ec_basis_t *bas, quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); assert(ibz_is_odd(&content)); - ibz_set(&mat[0][0], 0); - ibz_set(&mat[0][1], 0); - ibz_set(&mat[1][0], 0); - ibz_set(&mat[1][1], 0); + ibz_set(&mat.m[0][0], 0); + ibz_set(&mat.m[0][1], 0); + ibz_set(&mat.m[1][0], 0); + ibz_set(&mat.m[1][1], 0); // computing the matrix for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&mat[i][j], &mat[i][j], &content); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&mat.m[i][j], &mat.m[i][j], &content); } } @@ -215,19 +215,19 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * ibz_mat_2x2_t mat; ibz_mat_2x2_init(&mat); - ibz_copy(&mat[0][0], &(*vec2)[0]); - ibz_copy(&mat[1][0], &(*vec2)[1]); + ibz_copy(&mat.m[0][0], &vec2->v[0]); + ibz_copy(&mat.m[1][0], &vec2->v[1]); ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); - ibz_copy(&mat[0][1], &vec[0]); - ibz_copy(&mat[1][1], &vec[1]); + ibz_copy(&mat.m[0][1], &vec.v[0]); + ibz_copy(&mat.m[1][1], &vec.v[1]); ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); - ibz_add(&mat[0][1], &mat[0][1], &vec[0]); - ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + ibz_add(&mat.m[0][1], &mat.m[0][1], &vec.v[0]); + ibz_add(&mat.m[1][1], &mat.m[1][1], &vec.v[1]); - ibz_mod(&mat[0][1], &mat[0][1], &two_pow); - ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + ibz_mod(&mat.m[0][1], &mat.m[0][1], &two_pow); + ibz_mod(&mat.m[1][1], &mat.m[1][1], &two_pow); ibz_mat_2x2_t inv; ibz_mat_2x2_init(&inv); @@ -247,11 +247,11 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * quat_alg_elem_t gen; quat_alg_elem_init(&gen); ibz_set(&gen.denom, 2); - ibz_add(&gen.coord[0], &vec[0], &vec[0]); - ibz_set(&gen.coord[1], -2); - ibz_add(&gen.coord[2], &vec[1], &vec[1]); - ibz_copy(&gen.coord[3], &vec[1]); - ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_add(&gen.coord.v[0], &vec.v[0], &vec.v[0]); + ibz_set(&gen.coord.v[1], -2); + ibz_add(&gen.coord.v[2], &vec.v[1], &vec.v[1]); + ibz_copy(&gen.coord.v[3], &vec.v[1]); + ibz_add(&gen.coord.v[0], &gen.coord.v[0], &vec.v[1]); ibz_vec_2_finalize(&vec); quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); @@ -319,10 +319,10 @@ _change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, #endif // Copy the results into the matrix - ibz_copy_digit_array(&((*mat)[0][0]), x1); - ibz_copy_digit_array(&((*mat)[1][0]), x2); - ibz_copy_digit_array(&((*mat)[0][1]), x3); - ibz_copy_digit_array(&((*mat)[1][1]), x4); + ibz_copy_digit_array(&(mat->m[0][0]), x1); + ibz_copy_digit_array(&(mat->m[1][0]), x2); + ibz_copy_digit_array(&(mat->m[0][1]), x3); + ibz_copy_digit_array(&(mat->m[1][1]), x4); } void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ideal.c index 9cf863a104..8634143941 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ideal.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ideal.c @@ -33,7 +33,7 @@ quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) ibz_copy(©->lattice.denom, &copied->lattice.denom); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + ibz_copy(©->lattice.basis.m[i][j], &copied->lattice.basis.m[i][j]); } } } @@ -248,13 +248,13 @@ quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + ibz_div(&G->m[i][j], &rmd, &G->m[i][j], &divisor); assert(ibz_is_zero(&rmd)); } } for (int i = 0; i < 4; i++) { for (int j = 0; j <= i - 1; j++) { - ibz_copy(&(*G)[j][i], &(*G)[i][j]); + ibz_copy(&G->m[j][i], &G->m[i][j]); } } @@ -289,8 +289,8 @@ quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg ibz_mat_4x4_transpose(&transposed, &(order->basis)); // multiply gram matrix by 2 because of reduced trace ibz_mat_4x4_identity(&norm); - ibz_copy(&(norm[2][2]), &(alg->p)); - ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_copy(&(norm.m[2][2]), &(alg->p)); + ibz_copy(&(norm.m[3][3]), &(alg->p)); ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); ibz_mat_4x4_mul(&prod, &transposed, &norm); ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.c index b0462dc8b5..e219bf3d96 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.c @@ -114,48 +114,48 @@ DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_ * @{ */ -const __mpz_struct ibz_const_zero[1] = { +const ibz_t ibz_const_zero = {{ { ._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]){ 0 }, } -}; +}}; -const __mpz_struct ibz_const_one[1] = { +const ibz_t ibz_const_one = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 1 }, } -}; +}}; -const __mpz_struct ibz_const_two[1] = { +const ibz_t ibz_const_two = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 2 }, } -}; +}}; -const __mpz_struct ibz_const_three[1] = { +const ibz_t ibz_const_three = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 3 }, } -}; +}}; void ibz_init(ibz_t *x) { - mpz_init(*x); + mpz_init(x->i); } void ibz_finalize(ibz_t *x) { - mpz_clear(*x); + mpz_clear(x->i); } void @@ -168,7 +168,7 @@ ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_add(*sum, *a, *b); + mpz_add(sum->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -186,7 +186,7 @@ ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_sub(*diff, *a, *b); + mpz_sub(diff->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); @@ -205,7 +205,7 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_mul(*prod, *a, *b); + mpz_mul(prod->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -216,13 +216,13 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) void ibz_neg(ibz_t *neg, const ibz_t *a) { - mpz_neg(*neg, *a); + mpz_neg(neg->i, a->i); } void ibz_abs(ibz_t *abs, const ibz_t *a) { - mpz_abs(*abs, *a); + mpz_abs(abs->i, a->i); } void @@ -235,7 +235,7 @@ ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_tdiv_qr(*quotient, *remainder, *a, *b); + mpz_tdiv_qr(quotient->i, remainder->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -251,7 +251,7 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) ibz_init(&a_cp); ibz_copy(&a_cp, a); #endif - mpz_tdiv_q_2exp(*quotient, *a, exp); + mpz_tdiv_q_2exp(quotient->i, a->i, exp); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); ibz_finalize(&a_cp); @@ -261,50 +261,50 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) { - mpz_fdiv_qr(*q, *r, *n, *d); + mpz_fdiv_qr(q->i, r->i, n->i, d->i); } void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) { - mpz_mod(*r, *a, *b); + mpz_mod(r->i, a->i, b->i); } unsigned long int -ibz_mod_ui(const mpz_t *n, unsigned long int d) +ibz_mod_ui(const ibz_t *n, unsigned long int d) { - return mpz_fdiv_ui(*n, d); + return mpz_fdiv_ui(n->i, d); } int ibz_divides(const ibz_t *a, const ibz_t *b) { - return mpz_divisible_p(*a, *b); + return mpz_divisible_p(a->i, b->i); } void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) { - mpz_pow_ui(*pow, *x, e); + mpz_pow_ui(pow->i, x->i, e); } void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) { - mpz_powm(*pow, *x, *e, *m); + mpz_powm(pow->i, x->i, e->i, m->i); DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); } int ibz_two_adic(ibz_t *pow) { - return mpz_scan1(*pow, 0); + return mpz_scan1(pow->i, 0); } int ibz_cmp(const ibz_t *a, const ibz_t *b) { - int ret = mpz_cmp(*a, *b); + int ret = mpz_cmp(a->i, b->i); DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); return ret; } @@ -312,7 +312,7 @@ ibz_cmp(const ibz_t *a, const ibz_t *b) int ibz_is_zero(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 0); + int ret = !mpz_cmp_ui(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); return ret; } @@ -320,7 +320,7 @@ ibz_is_zero(const ibz_t *x) int ibz_is_one(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 1); + int ret = !mpz_cmp_ui(x->i, 1); DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); return ret; } @@ -328,7 +328,7 @@ ibz_is_one(const ibz_t *x) int ibz_cmp_int32(const ibz_t *x, int32_t y) { - int ret = mpz_cmp_si(*x, (signed long int)y); + int ret = mpz_cmp_si(x->i, (signed long int)y); DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); return ret; } @@ -336,7 +336,7 @@ ibz_cmp_int32(const ibz_t *x, int32_t y) int ibz_is_even(const ibz_t *x) { - int ret = !mpz_tstbit(*x, 0); + int ret = !mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); return ret; } @@ -344,7 +344,7 @@ ibz_is_even(const ibz_t *x) int ibz_is_odd(const ibz_t *x) { - int ret = mpz_tstbit(*x, 0); + int ret = mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); return ret; } @@ -352,7 +352,7 @@ ibz_is_odd(const ibz_t *x) void ibz_set(ibz_t *i, int32_t x) { - mpz_set_si(*i, x); + mpz_set_si(i->i, x); } int @@ -361,7 +361,7 @@ ibz_convert_to_str(const ibz_t *i, char *str, int base) if (!str || (base != 10 && base != 16)) return 0; - mpz_get_str(str, base, *i); + mpz_get_str(str, base, i->i); return 1; } @@ -380,29 +380,29 @@ ibz_print(const ibz_t *num, int base) int ibz_set_from_str(ibz_t *i, const char *str, int base) { - return (1 + mpz_set_str(*i, str, base)); + return (1 + mpz_set_str(i->i, str, base)); } void ibz_copy(ibz_t *target, const ibz_t *value) { - mpz_set(*target, *value); + mpz_set(target->i, value->i); } void ibz_swap(ibz_t *a, ibz_t *b) { - mpz_swap(*a, *b); + mpz_swap(a->i, b->i); } int32_t ibz_get(const ibz_t *i) { #if LONG_MAX == INT32_MAX - return (int32_t)mpz_get_si(*i); + return (int32_t)mpz_get_si(i->i); #elif LONG_MAX > INT32_MAX // Extracts the sign bit and the 31 least significant bits - signed long int t = mpz_get_si(*i); + signed long int t = mpz_get_si(i->i); return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); #else #error Unsupported configuration: LONG_MAX must be >= INT32_MAX @@ -417,10 +417,10 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) mpz_t tmp; mpz_t bmina; mpz_init(bmina); - mpz_sub(bmina, *b, *a); + mpz_sub(bmina, b->i, a->i); if (mpz_sgn(bmina) == 0) { - mpz_set(*rand, *a); + mpz_set(rand->i, a->i); mpz_clear(bmina); return 1; } @@ -466,7 +466,7 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) break; } while (1); - mpz_add(*rand, tmp, *a); + mpz_add(rand->i, tmp, a->i); err: mpz_clear(bmina); return ret; @@ -534,19 +534,19 @@ int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) { int ret = 1; - mpz_t m_big; + ibz_t m_big; // m_big = 2 * m - mpz_init_set_si(m_big, m); - mpz_add(m_big, m_big, m_big); + mpz_init_set_si(m_big.i, m); + mpz_add(m_big.i, m_big.i, m_big.i); // Sample in [0, 2*m] ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); // Adjust to range [-m, m] - mpz_sub_ui(*rand, *rand, m); + mpz_sub_ui(rand->i, rand->i, m); - mpz_clear(m_big); + mpz_clear(m_big.i); return ret; } @@ -555,41 +555,41 @@ int ibz_rand_interval_bits(ibz_t *rand, uint32_t m) { int ret = 1; - mpz_t tmp; - mpz_t low; - mpz_init_set_ui(tmp, 1); - mpz_mul_2exp(tmp, tmp, m); - mpz_init(low); - mpz_neg(low, tmp); + ibz_t tmp; + ibz_t low; + mpz_init_set_ui(tmp.i, 1); + mpz_mul_2exp(tmp.i, tmp.i, m); + mpz_init(low.i); + mpz_neg(low.i, tmp.i); ret = ibz_rand_interval(rand, &low, &tmp); - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); if (ret != 1) goto err; - mpz_sub_ui(*rand, *rand, (unsigned long int)m); + mpz_sub_ui(rand->i, rand->i, (unsigned long int)m); return ret; err: - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); return ret; } int ibz_bitsize(const ibz_t *a) { - return (int)mpz_sizeinbase(*a, 2); + return (int)mpz_sizeinbase(a->i, 2); } int ibz_size_in_base(const ibz_t *a, int base) { - return (int)mpz_sizeinbase(*a, base); + return (int)mpz_sizeinbase(a->i, base); } void ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) { - mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); + mpz_import(target->i, dig_len, -1, sizeof(digit_t), 0, 0, dig); } void @@ -600,13 +600,13 @@ ibz_to_digits(digit_t *target, const ibz_t *ibz) // The next line ensures zero is written to the first limb of target if ibz is zero; // target is then overwritten by the actual value if it is not. target[0] = 0; - mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, ibz->i); } int ibz_probab_prime(const ibz_t *n, int reps) { - int ret = mpz_probab_prime_p(*n, reps); + int ret = mpz_probab_prime_p(n->i, reps); DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); return ret; } @@ -614,26 +614,26 @@ ibz_probab_prime(const ibz_t *n, int reps) void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) { - mpz_gcd(*gcd, *a, *b); + mpz_gcd(gcd->i, a->i, b->i); } int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) { - return (mpz_invert(*inv, *a, *mod) ? 1 : 0); + return (mpz_invert(inv->i, a->i, mod->i) ? 1 : 0); } int ibz_legendre(const ibz_t *a, const ibz_t *p) { - return mpz_legendre(*a, *p); + return mpz_legendre(a->i, p->i); } int ibz_sqrt(ibz_t *sqrt, const ibz_t *a) { - if (mpz_perfect_square_p(*a)) { - mpz_sqrt(*sqrt, *a); + if (mpz_perfect_square_p(a->i)) { + mpz_sqrt(sqrt->i, a->i); return 1; } else { return 0; @@ -643,7 +643,7 @@ ibz_sqrt(ibz_t *sqrt, const ibz_t *a) void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) { - mpz_sqrt(*sqrt, *a); + mpz_sqrt(sqrt->i, a->i); } int @@ -686,85 +686,85 @@ ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) int ret = 1; - mpz_mod(amod, *a, *p); + mpz_mod(amod, a->i, p->i); if (mpz_cmp_ui(amod, 0) < 0) { - mpz_add(amod, *p, amod); + mpz_add(amod, p->i, amod); } - if (mpz_legendre(amod, *p) != 1) { + if (mpz_legendre(amod, p->i) != 1) { ret = 0; goto end; } - mpz_sub_ui(pm1, *p, 1); + mpz_sub_ui(pm1, p->i, 1); - if (mpz_mod_ui(tmp, *p, 4) == 3) { + if (mpz_mod_ui(tmp, p->i, 4) == 3) { // p % 4 == 3 - mpz_add_ui(tmp, *p, 1); + mpz_add_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(*sqrt, amod, tmp, *p); - } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + mpz_powm(sqrt->i, amod, tmp, p->i); + } else if (mpz_mod_ui(tmp, p->i, 8) == 5) { // p % 8 == 5 - mpz_sub_ui(tmp, *p, 1); + mpz_sub_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + mpz_powm(tmp, amod, tmp, p->i); // a^{(p-1)/4} mod p if (!mpz_cmp_ui(tmp, 1)) { - mpz_add_ui(tmp, *p, 3); + mpz_add_ui(tmp, p->i, 3); mpz_fdiv_q_2exp(tmp, tmp, 3); - mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + mpz_powm(sqrt->i, amod, tmp, p->i); // a^{(p+3)/8} mod p } else { - mpz_sub_ui(tmp, *p, 5); + mpz_sub_ui(tmp, p->i, 5); mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 mpz_mul_2exp(a4, amod, 2); // 4*a - mpz_powm(tmp, a4, tmp, *p); + mpz_powm(tmp, a4, tmp, p->i); mpz_mul_2exp(a2, amod, 1); mpz_mul(tmp, a2, tmp); - mpz_mod(*sqrt, tmp, *p); + mpz_mod(sqrt->i, tmp, p->i); } } else { // p % 8 == 1 -> Shanks-Tonelli int e = 0; - mpz_sub_ui(q, *p, 1); + mpz_sub_ui(q, p->i, 1); while (mpz_tstbit(q, e) == 0) e++; mpz_fdiv_q_2exp(q, q, e); // 1. find generator - non-quadratic residue mpz_set_ui(qnr, 2); - while (mpz_legendre(qnr, *p) != -1) + while (mpz_legendre(qnr, p->i) != -1) mpz_add_ui(qnr, qnr, 1); - mpz_powm(z, qnr, q, *p); + mpz_powm(z, qnr, q, p->i); // 2. Initialize mpz_set(y, z); - mpz_powm(y, amod, q, *p); // y = a^q mod p + mpz_powm(y, amod, q, p->i); // y = a^q mod p mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 mpz_fdiv_q_2exp(tmp, tmp, 1); - mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + mpz_powm(x, amod, tmp, p->i); // x = a^(q + 1)/2 mod p mpz_set_ui(exp, 1); mpz_mul_2exp(exp, exp, e - 2); for (int i = 0; i < e; ++i) { - mpz_powm(b, y, exp, *p); + mpz_powm(b, y, exp, p->i); if (!mpz_cmp(b, pm1)) { mpz_mul(x, x, z); - mpz_mod(x, x, *p); + mpz_mod(x, x, p->i); mpz_mul(y, y, z); mpz_mul(y, y, z); - mpz_mod(y, y, *p); + mpz_mod(y, y, p->i); } - mpz_powm_ui(z, z, 2, *p); + mpz_powm_ui(z, z, 2, p->i); mpz_fdiv_q_2exp(exp, exp, 1); } - mpz_set(*sqrt, x); + mpz_set(sqrt->i, x); } #ifdef DEBUG_VERBOSE diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.h index a0c2c02477..28e478ff7f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/intbig.h @@ -33,7 +33,9 @@ * * For integers of arbitrary size, used by intbig module, using gmp */ -typedef mpz_t ibz_t; +typedef struct { + mpz_t i; +} ibz_t; /** @} */ @@ -129,7 +131,7 @@ int ibz_two_adic(ibz_t *pow); */ void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); -unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); +unsigned long int ibz_mod_ui(const ibz_t *n, unsigned long int d); /** @brief Test if a = 0 mod b */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c index 5491ee44d0..ea32213c75 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c @@ -57,25 +57,25 @@ to_etabar(fp_num *x) } static void -from_mpz(const mpz_t x, fp_num *r) +from_mpz(const ibz_t *x, fp_num *r) { long exp = 0; - r->s = mpz_get_d_2exp(&exp, x); + r->s = mpz_get_d_2exp(&exp, x->i); r->e = exp; } static void -to_mpz(const fp_num *x, mpz_t r) +to_mpz(const fp_num *x, ibz_t *r) { if (x->e >= DBL_MANT_DIG) { double s = x->s * 0x1P53; - mpz_set_d(r, s); - mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + mpz_set_d(r->i, s); + mpz_mul_2exp(r->i, r->i, x->e - DBL_MANT_DIG); } else if (x->e < 0) { - mpz_set_ui(r, 0); + mpz_set_ui(r->i, 0); } else { double s = ldexp(x->s, x->e); - mpz_set_d(r, round(s)); + mpz_set_d(r->i, round(s)); } } @@ -203,7 +203,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) ibz_init(&tmpI); // Main L² loop - from_mpz((*G)[0][0], &r[0][0]); + from_mpz(&G->m[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -213,7 +213,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - from_mpz((*G)[kappa][j], &r[kappa][j]); + from_mpz(&G->m[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { fp_mul(&r[kappa][k], &u[j][k], &tmpF); fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); @@ -229,22 +229,22 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) done = 0; copy(&u[kappa][i], &Xf); fp_round(&Xf); - to_mpz(&Xf, X); + to_mpz(&Xf, &X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { - ibz_mul(&tmpI, &X, &(*basis)[j][i]); - ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + ibz_mul(&tmpI, &X, &basis->m[j][i]); + ibz_sub(&basis->m[j][kappa], &basis->m[j][kappa], &tmpI); } // Update lower half of the Gram matrix // = - 2X + X² = // - X - X( - X·) //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 - ibz_mul(&tmpI, &X, &(*G)[kappa][i]); - ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + ibz_mul(&tmpI, &X, &G->m[kappa][i]); + ibz_sub(&G->m[kappa][kappa], &G->m[kappa][kappa], &tmpI); for (int j = 0; j < 4; j++) { // works because i < κ // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 - ibz_mul(&tmpI, &X, SYM((*G), i, j)); - ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + ibz_mul(&tmpI, &X, SYM(G->m, i, j)); + ibz_sub(SYM(G->m, kappa, j), SYM(G->m, kappa, j), &tmpI); } // After the loop: //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, @@ -261,7 +261,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - from_mpz((*G)[kappa][kappa], &lovasz[0]); + from_mpz(&G->m[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); @@ -279,11 +279,11 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Insert b_κ before b_swap in the basis and in the lower half Gram matrix for (int j = kappa; j > swap; j--) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + ibz_swap(&basis->m[i][j], &basis->m[i][j - 1]); if (i == j - 1) - ibz_swap(&(*G)[i][i], &(*G)[j][j]); + ibz_swap(&G->m[i][i], &G->m[j][j]); else if (i != j) - ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + ibz_swap(SYM(G->m, i, j), SYM(G->m, i, j - 1)); } } // Copy row u[κ] and r[κ] in swap position, ignore what follows @@ -318,7 +318,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Fill in the upper half of the Gram matrix for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } // Clearinghouse diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lat_ball.c index c7bbb9682f..3f7476988c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lat_ball.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lat_ball.c @@ -28,10 +28,10 @@ quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_m // Compute the parallelogram's bounds int trivial = 1; for (int i = 0; i < 4; i++) { - ibz_mul(&(*box)[i], &dualG[i][i], radius); - ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); - ibz_sqrt_floor(&(*box)[i], &(*box)[i]); - trivial &= ibz_is_zero(&(*box)[i]); + ibz_mul(&box->v[i], &dualG.m[i][i], radius); + ibz_div(&box->v[i], &rem, &box->v[i], &denom); + ibz_sqrt_floor(&box->v[i], &box->v[i]); + trivial &= ibz_is_zero(&box->v[i]); } // Compute the transpose transformation matrix @@ -95,12 +95,12 @@ quat_lattice_sample_from_ball(quat_alg_elem_t *res, do { // Sample vector for (int i = 0; i < 4; i++) { - if (ibz_is_zero(&box[i])) { - ibz_copy(&x[i], &ibz_const_zero); + if (ibz_is_zero(&box.v[i])) { + ibz_copy(&x.v[i], &ibz_const_zero); } else { - ibz_add(&tmp, &box[i], &box[i]); - ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); - ibz_sub(&x[i], &x[i], &box[i]); + ibz_add(&tmp, &box.v[i], &box.v[i]); + ok &= ibz_rand_interval(&x.v[i], &ibz_const_zero, &tmp); + ibz_sub(&x.v[i], &x.v[i], &box.v[i]); if (!ok) goto err; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lattice.c index c98bae9499..ef7b9ccdcc 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lattice.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lattice.c @@ -57,7 +57,7 @@ quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *l for (int row = 1; row < 4; ++row) { for (int col = 0; col < 4; ++col) { - ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + ibz_neg(&(conj->basis.m[row][col]), &(conj->basis.m[row][col])); } } } @@ -96,14 +96,14 @@ quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(tmp[i][j])); + ibz_copy(&(generators[j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + ibz_copy(&(generators[4 + j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); @@ -151,12 +151,12 @@ quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, ibz_vec_4_init(&p); ibz_vec_4_init(&a); for (int i = 0; i < 4; i++) { - ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + ibz_vec_4_copy_ibz(&a, &(lat->m[0][i]), &(lat->m[1][i]), &(lat->m[2][i]), &(lat->m[3][i])); quat_alg_coord_mul(&p, &a, coord, alg); - ibz_copy(&((*prod)[0][i]), &(p[0])); - ibz_copy(&((*prod)[1][i]), &(p[1])); - ibz_copy(&((*prod)[2][i]), &(p[2])); - ibz_copy(&((*prod)[3][i]), &(p[3])); + ibz_copy(&(prod->m[0][i]), &(p.v[0])); + ibz_copy(&(prod->m[1][i]), &(p.v[1])); + ibz_copy(&(prod->m[2][i]), &(p.v[2])); + ibz_copy(&(prod->m[3][i]), &(p.v[3])); } ibz_vec_4_finalize(&p); ibz_vec_4_finalize(&a); @@ -191,15 +191,15 @@ quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_vec_4_init(&(generators[i])); for (int k = 0; k < 4; k++) { ibz_vec_4_copy_ibz( - &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + &elem1, &(lat1->basis.m[0][k]), &(lat1->basis.m[1][k]), &(lat1->basis.m[2][k]), &(lat1->basis.m[3][k])); for (int i = 0; i < 4; i++) { ibz_vec_4_copy_ibz( - &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + &elem2, &(lat2->basis.m[0][i]), &(lat2->basis.m[1][i]), &(lat2->basis.m[2][i]), &(lat2->basis.m[3][i])); quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); for (int j = 0; j < 4; j++) { if (k == 0) - ibz_copy(&(detmat[i][j]), &(elem_res[j])); - ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + ibz_copy(&(detmat.m[i][j]), &(elem_res.v[j])); + ibz_copy(&(generators[4 * k + i].v[j]), &(elem_res.v[j])); } } } @@ -239,7 +239,7 @@ quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_ // copy result if (divisible && (coord != NULL)) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*coord)[i]), &(work_coord[i])); + ibz_copy(&(coord->v[i]), &(work_coord.v[i])); } } ibz_finalize(&prod); @@ -292,7 +292,7 @@ quat_lattice_hnf(quat_lattice_t *lat) ibz_vec_4_init(&(generators[i])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + ibz_copy(&(generators[j].v[i]), &(lat->basis.m[i][j])); } } ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); @@ -309,19 +309,19 @@ quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_al ibz_init(&tmp); for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_set(&(*G)[i][j], 0); + ibz_set(&G->m[i][j], 0); for (int k = 0; k < 4; k++) { - ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + ibz_mul(&tmp, &(lattice->basis.m)[k][i], &(lattice->basis.m)[k][j]); if (k >= 2) ibz_mul(&tmp, &tmp, &alg->p); - ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + ibz_add(&G->m[i][j], &G->m[i][j], &tmp); } - ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + ibz_mul(&G->m[i][j], &G->m[i][j], &ibz_const_two); } } for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) { - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } } ibz_finalize(&tmp); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_applications.c index 6c763b8c04..f5e9af922b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_applications.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_applications.c @@ -17,9 +17,9 @@ quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, quat_lll_core(gram, reduced); ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); for (int i = 0; i < 4; i++) { - ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + ibz_div_2exp(&(gram->m[i][i]), &(gram->m[i][i]), 1); for (int j = i + 1; j < 4; j++) { - ibz_set(&((*gram)[i][j]), 0); + ibz_set(&(gram->m[i][j]), 0); } } ibz_finalize(&gram_corrector); @@ -79,10 +79,10 @@ quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, while (!found && ctr < equiv_num_iter) { ctr++; // we select our linear combination at random - ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[3], equiv_bound_coeff); // computation of the norm of the vector sampled quat_qf_eval(&tmp, &gram, &new_alpha.coord); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/normeq.c index 8c133dd095..aadbbe06c7 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/normeq.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/normeq.c @@ -13,23 +13,23 @@ quat_lattice_O0_set(quat_lattice_t *O0) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(O0->basis[i][j]), 0); + ibz_set(&(O0->basis.m[i][j]), 0); } } ibz_set(&(O0->denom), 2); - ibz_set(&(O0->basis[0][0]), 2); - ibz_set(&(O0->basis[1][1]), 2); - ibz_set(&(O0->basis[2][2]), 1); - ibz_set(&(O0->basis[1][2]), 1); - ibz_set(&(O0->basis[3][3]), 1); - ibz_set(&(O0->basis[0][3]), 1); + ibz_set(&(O0->basis.m[0][0]), 2); + ibz_set(&(O0->basis.m[1][1]), 2); + ibz_set(&(O0->basis.m[2][2]), 1); + ibz_set(&(O0->basis.m[1][2]), 1); + ibz_set(&(O0->basis.m[3][3]), 1); + ibz_set(&(O0->basis.m[0][3]), 1); } void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) { - ibz_set(&O0->z.coord[1], 1); - ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.coord.v[1], 1); + ibz_set(&O0->t.coord.v[2], 1); ibz_set(&O0->z.denom, 1); ibz_set(&O0->t.denom, 1); O0->q = 1; @@ -50,24 +50,24 @@ quat_order_elem_create(quat_alg_elem_t *elem, quat_alg_elem_init(&quat_temp); // elem = x - quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + quat_alg_scalar(elem, &coeffs->v[0], &ibz_const_one); // quat_temp = i*y - quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_scalar(&quat_temp, &(coeffs->v[1]), &ibz_const_one); quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); // elem = x + i*y quat_alg_add(elem, elem, &quat_temp); // quat_temp = z * j - quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[2], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); // elem = x + i* + z*j quat_alg_add(elem, elem, &quat_temp); // quat_temp = t * j * i - quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[3], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); @@ -143,11 +143,11 @@ quat_represent_integer(quat_alg_elem_t *gamma, ibz_sub(&counter, &counter, &ibz_const_one); // we start by sampling the first coordinate - ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + ibz_rand_interval(&coeffs.v[2], &ibz_const_one, &bound); // then, we sample the second coordinate // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) - ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&cornacchia_target, &coeffs.v[2], &coeffs.v[2]); ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); ibz_sub(&temp, &adjusted_n_gamma, &temp); ibz_mul(&sq_bound, &q, &(params->algebra->p)); @@ -158,10 +158,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, continue; } // sampling the second value - ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + ibz_rand_interval(&coeffs.v[3], &ibz_const_one, &temp); // compute cornacchia_target = n_gamma - p * (z² + q*t²) - ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &coeffs.v[3], &coeffs.v[3]); ibz_mul(&temp, &q, &temp); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); @@ -170,7 +170,7 @@ quat_represent_integer(quat_alg_elem_t *gamma, // applying cornacchia if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) - found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + found = ibz_cornacchia_prime(&(coeffs.v[0]), &(coeffs.v[1]), &q, &cornacchia_target); else found = 0; @@ -179,33 +179,33 @@ quat_represent_integer(quat_alg_elem_t *gamma, // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 // we must have x = t mod 2 and y = z mod 2 // if q=1 we can simply swap x and y - if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { - ibz_swap(&coeffs[1], &coeffs[0]); + if (ibz_is_odd(&coeffs.v[0]) != ibz_is_odd(&coeffs.v[3])) { + ibz_swap(&coeffs.v[1], &coeffs.v[0]); } // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the // resulting endomorphism will behave well for dim 2 computations - found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && - ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + found = found && ((ibz_get(&coeffs.v[0]) - ibz_get(&coeffs.v[3])) % 4 == 2) && + ((ibz_get(&coeffs.v[1]) - ibz_get(&coeffs.v[2])) % 4 == 2); } if (found) { #ifndef NDEBUG ibz_set(&temp, (params->order->q)); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&test, &(coeffs.v[0]), &(coeffs.v[0])); ibz_add(&temp, &temp, &test); assert(0 == ibz_cmp(&temp, &cornacchia_target)); - ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &(coeffs.v[3]), &(coeffs.v[3])); ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); - ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_mul(&temp, &(coeffs.v[1]), &(coeffs.v[1])); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_set(&temp, (params->order->q)); ibz_mul(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_mul(&temp, &(coeffs.v[0]), &coeffs.v[0]); ibz_add(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &(coeffs.v[2]), &coeffs.v[2]); ibz_mul(&temp, &temp, &(params->algebra->p)); ibz_add(&cornacchia_target, &cornacchia_target, &temp); assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); @@ -213,8 +213,8 @@ quat_represent_integer(quat_alg_elem_t *gamma, // translate x,y,z,t into the quaternion element gamma quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); #ifndef NDEBUG - quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); - assert(ibz_is_one(&(coeffs[0]))); + quat_alg_norm(&temp, &(coeffs.v[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs.v[0]))); assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); #endif @@ -232,10 +232,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, if (found) { // new gamma ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); - ibz_copy(&gamma->coord[0], &coeffs[0]); - ibz_copy(&gamma->coord[1], &coeffs[1]); - ibz_copy(&gamma->coord[2], &coeffs[2]); - ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->coord.v[0], &coeffs.v[0]); + ibz_copy(&gamma->coord.v[1], &coeffs.v[1]); + ibz_copy(&gamma->coord.v[2], &coeffs.v[2]); + ibz_copy(&gamma->coord.v[3], &coeffs.v[3]); ibz_copy(&gamma->denom, &(((params->order)->order).denom)); } // var finalize @@ -279,10 +279,10 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, // we find a quaternion element of norm divisible by norm while (!found) { // generating a trace-zero element at random - ibz_set(&gen.coord[0], 0); + ibz_set(&gen.coord.v[0], 0); ibz_sub(&n_temp, norm, &ibz_const_one); for (int i = 1; i < 4; i++) - ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + ibz_rand_interval(&gen.coord.v[i], &ibz_const_zero, &n_temp); // first, we compute the norm of the gen quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); @@ -293,7 +293,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, ibz_mod(&disc, &disc, norm); // now we check that -n is a square mod norm // and if the square root exists we compute it - found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = ibz_sqrt_mod_p(&gen.coord.v[0], &disc, norm); found = found && !quat_alg_elem_is_zero(&gen); } } else { @@ -319,7 +319,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, found = 0; while (!found) { for (int i = 0; i < 4; i++) { - ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + ibz_rand_interval(&gen_rerand.coord.v[i], &ibz_const_one, norm); } quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); assert(ibz_is_one(&norm_d)); @@ -348,22 +348,22 @@ quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) { ibz_t tmp; ibz_init(&tmp); - ibz_copy(&(*vec)[2], &el->coord[2]); - ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) - ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) - ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); - ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); - ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); - - assert(ibz_divides(&(*vec)[0], &el->denom)); - assert(ibz_divides(&(*vec)[1], &el->denom)); - assert(ibz_divides(&(*vec)[2], &el->denom)); - assert(ibz_divides(&(*vec)[3], &el->denom)); - - ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); - ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); - ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); - ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + ibz_copy(&vec->v[2], &el->coord.v[2]); + ibz_add(&vec->v[2], &vec->v[2], &vec->v[2]); // double (not optimal if el->denom is even...) + ibz_copy(&vec->v[3], &el->coord.v[3]); // double (not optimal if el->denom is even...) + ibz_add(&vec->v[3], &vec->v[3], &vec->v[3]); + ibz_sub(&vec->v[0], &el->coord.v[0], &el->coord.v[3]); + ibz_sub(&vec->v[1], &el->coord.v[1], &el->coord.v[2]); + + assert(ibz_divides(&vec->v[0], &el->denom)); + assert(ibz_divides(&vec->v[1], &el->denom)); + assert(ibz_divides(&vec->v[2], &el->denom)); + assert(ibz_divides(&vec->v[3], &el->denom)); + + ibz_div(&vec->v[0], &tmp, &vec->v[0], &el->denom); + ibz_div(&vec->v[1], &tmp, &vec->v[1], &el->denom); + ibz_div(&vec->v[2], &tmp, &vec->v[2], &el->denom); + ibz_div(&vec->v[3], &tmp, &vec->v[3], &el->denom); ibz_finalize(&tmp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/printer.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/printer.c index 6d6a3ca9b7..7702fb7ca4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/printer.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/printer.c @@ -7,7 +7,7 @@ ibz_mat_2x2_print(const ibz_mat_2x2_t *mat) printf("matrix: "); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_print(&((*mat)[i][j]), 10); + ibz_print(&(mat->m[i][j]), 10); printf(" "); } printf("\n "); @@ -21,7 +21,7 @@ ibz_mat_4x4_print(const ibz_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j]), 10); + ibz_print(&(mat->m[i][j]), 10); printf(" "); } printf("\n "); @@ -34,7 +34,7 @@ ibz_vec_2_print(const ibz_vec_2_t *vec) { printf("vector: "); for (int i = 0; i < 2; i++) { - ibz_print(&((*vec)[i]), 10); + ibz_print(&(vec->v[i]), 10); printf(" "); } printf("\n\n"); @@ -45,7 +45,7 @@ ibz_vec_4_print(const ibz_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i]), 10); + ibz_print(&(vec->v[i]), 10); printf(" "); } printf("\n\n"); @@ -61,7 +61,7 @@ quat_lattice_print(const quat_lattice_t *lat) printf("basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lat->basis)[i][j]), 10); + ibz_print(&((lat->basis.m)[i][j]), 10); printf(" "); } printf("\n "); @@ -85,7 +85,7 @@ quat_alg_elem_print(const quat_alg_elem_t *elem) printf("\n"); printf("coordinates: "); for (int i = 0; i < 4; i++) { - ibz_print(&((elem->coord)[i]), 10); + ibz_print(&((elem->coord.v)[i]), 10); printf(" "); } printf("\n\n"); @@ -104,7 +104,7 @@ quat_left_ideal_print(const quat_left_ideal_t *lideal) printf("basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lideal->lattice.basis)[i][j]), 10); + ibz_print(&((lideal->lattice.basis.m)[i][j]), 10); printf(" "); } if (i != 3) { @@ -120,7 +120,7 @@ quat_left_ideal_print(const quat_left_ideal_t *lideal) printf("parent order basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lideal->parent_order->basis)[i][j]), 10); + ibz_print(&((lideal->parent_order->basis.m)[i][j]), 10); printf(" "); } printf("\n "); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion.h index a567657464..2dd70a8c19 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion.h @@ -25,7 +25,9 @@ * * @typedef ibz_vec_2_t */ -typedef ibz_t ibz_vec_2_t[2]; +typedef struct { + ibz_t v[2]; +} ibz_vec_2_t; /** @brief Type for vectors of 4 integers * @@ -33,7 +35,9 @@ typedef ibz_t ibz_vec_2_t[2]; * * Represented as a vector of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_vec_4_t[4]; +typedef struct { + ibz_t v[4]; +} ibz_vec_4_t; /** @brief Type for 2 by 2 matrices of integers * @@ -41,7 +45,9 @@ typedef ibz_t ibz_vec_4_t[4]; * * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_2x2_t[2][2]; +typedef struct { + ibz_t m[2][2]; +} ibz_mat_2x2_t; /** @brief Type for 4 by 4 matrices of integers * @@ -49,7 +55,9 @@ typedef ibz_t ibz_mat_2x2_t[2][2]; * * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_4x4_t[4][4]; +typedef struct { + ibz_t m[4][4]; +} ibz_mat_4x4_t; /** * @} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.c index 24402255d4..6944b06f09 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/quaternion_data.c @@ -4,3623 +4,3623 @@ const ibz_t QUAT_prime_cofactor = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8000}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8000}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80000000}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x8000000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x171,0x0,0x0,0x0,0x0,0x8000000000000000}}}} #endif ; const quat_alg_t QUATALG_PINFTY = { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x40ff}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x40ff}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x40ffffff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x40ffffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x40ffffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x40ffffffffffffff}}}} #endif }; const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[8] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 1}, {{ +}}}, 1}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x680}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x680}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423,0x0,0x0,0x0,0x0,0x0,0x6800000}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423,0x0,0x0,0x0,0x0,0x0,0x6800000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a,0x0,0x0,0x680000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a,0x0,0x0,0x680000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd7e6,0xf3f6,0xac79,0x9f2c,0x4bb1,0xc46b,0xf962,0x2536,0x9d7f,0xfa25,0xbe93,0x1e18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xf3f6d7e6,0x9f2cac79,0xc46b4bb1,0x2536f962,0xfa259d7f,0x1e18be93}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9f2cac79f3f6d7e6,0x2536f962c46b4bb1,0x1e18be93fa259d7f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x1e5f,0x4aa8,0x9064,0x8436,0x8fae,0x50ab,0x2fd8,0xdd15,0x617a,0x8343,0x9423,0x3d7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4aa81e5f,0x84369064,0x50ab8fae,0xdd152fd8,0x8343617a,0x3d79423}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x843690644aa81e5f,0xdd152fd850ab8fae,0x3d794238343617a}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xafcc,0xe7ed,0x58f3,0x3e59,0x9763,0x88d6,0xf2c5,0x4a6d,0x3afe,0xf44b,0x7d27,0x3c31}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xe7edafcc,0x3e5958f3,0x88d69763,0x4a6df2c5,0xf44b3afe,0x3c317d27}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x3e5958f3e7edafcc,0x4a6df2c588d69763,0x3c317d27f44b3afe}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fb7,0xea91,0xa3ea,0x2a21,0x9cd1,0x26b3,0xde73,0xa2d3,0xcecc,0x20a1,0xc963,0x266b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xea912fb7,0x2a21a3ea,0x26b39cd1,0xa2d3de73,0x20a1cecc,0x266bc963}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2a21a3eaea912fb7,0xa2d3de7326b39cd1,0x266bc96320a1cecc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 5}, {{ +}}}, 5}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x280}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed,0x0,0x0,0x0,0x0,0x0,0x2800000}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed,0x0,0x0,0x0,0x0,0x0,0x2800000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b,0x0,0x0,0x280000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b,0x0,0x0,0x280000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8620,0xd90f,0x2de3,0x46e5,0x6151,0xe505,0xe458,0xdeee,0xe5be,0xe4ea,0x54e,0x1e60}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd90f8620,0x46e52de3,0xe5056151,0xdeeee458,0xe4eae5be,0x1e60054e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x46e52de3d90f8620,0xdeeee458e5056151,0x1e60054ee4eae5be}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8400,0x4135,0x343c,0xa4cf,0x6603,0xa414,0xc207,0x5ac7,0x921b,0xd084,0x1ed,0x6cf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x41358400,0xa4cf343c,0xa4146603,0x5ac7c207,0xd084921b,0x6cf01ed}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xa4cf343c41358400,0x5ac7c207a4146603,0x6cf01edd084921b}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xc40,0xb21f,0x5bc7,0x8dca,0xc2a2,0xca0a,0xc8b1,0xbddd,0xcb7d,0xc9d5,0xa9d,0x3cc0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb21f0c40,0x8dca5bc7,0xca0ac2a2,0xbdddc8b1,0xc9d5cb7d,0x3cc00a9d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x8dca5bc7b21f0c40,0xbdddc8b1ca0ac2a2,0x3cc00a9dc9d5cb7d}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x6801,0x9f6f,0x4e1e,0xbd0b,0x5c5e,0xaa12,0xb4c6,0x3849,0xd6c7,0x2d76,0x3227,0xb106}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9f6f6801,0xbd0b4e1e,0xaa125c5e,0x3849b4c6,0x2d76d6c7,0xb1063227}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbd0b4e1e9f6f6801,0x3849b4c6aa125c5e,0xb10632272d76d6c7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 13}, {{ +}}}, 13}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc07,0x925a,0x605a,0x9489,0x475b,0x7944,0x880f,0x65fa,0xed5a,0x329c,0x13f8,0x78f2,0xfffe,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0xdc07,0x925a,0x605a,0x9489,0x475b,0x7944,0x880f,0x65fa,0xed5a,0x329c,0x13f8,0x78f2,0xfffe,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x925adc07,0x9489605a,0x7944475b,0x65fa880f,0x329ced5a,0x78f213f8,0xfffffffe,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x925adc07,0x9489605a,0x7944475b,0x65fa880f,0x329ced5a,0x78f213f8,0xfffffffe,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9489605a925adc07,0x65fa880f7944475b,0x78f213f8329ced5a,0xfffffffffffffffe,0xffffffffffffffff,0x207fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9489605a925adc07,0x65fa880f7944475b,0x78f213f8329ced5a,0xfffffffffffffffe,0xffffffffffffffff,0x207fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2da0,0x3994,0xed9c,0xa9ab,0x232d,0x98ec,0x80f9,0xc2e7,0x195c,0xec6e,0xb7b6,0x1ed5,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x39942da0,0xa9abed9c,0x98ec232d,0xc2e780f9,0xec6e195c,0x1ed5b7b6,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa9abed9c39942da0,0xc2e780f998ec232d,0x1ed5b7b6ec6e195c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9c07,0x5ca4,0xc660,0xc2e5,0x94d7,0x2b1d,0x3b32,0xa3de,0x67a4,0x2fd3,0xfeab,0x1a11}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x9c07,0x5ca4,0xc660,0xc2e5,0x94d7,0x2b1d,0x3b32,0xa3de,0x67a4,0x2fd3,0xfeab,0x1a11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5ca49c07,0xc2e5c660,0x2b1d94d7,0xa3de3b32,0x2fd367a4,0x1a11feab}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x5ca49c07,0xc2e5c660,0x2b1d94d7,0xa3de3b32,0x2fd367a4,0x1a11feab}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xc2e5c6605ca49c07,0xa3de3b322b1d94d7,0x1a11feab2fd367a4}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xc2e5c6605ca49c07,0xa3de3b322b1d94d7,0x1a11feab2fd367a4}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x5b40,0x7328,0xdb38,0x5357,0x465b,0x31d8,0x1f3,0x85cf,0x32b9,0xd8dc,0x6f6d,0x3dab,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x73285b40,0x5357db38,0x31d8465b,0x85cf01f3,0xd8dc32b9,0x3dab6f6d,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5357db3873285b40,0x85cf01f331d8465b,0x3dab6f6dd8dc32b9,0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xb7ef,0x4ddc,0x58cc,0xe284,0xc4a7,0xb9ed,0xdca9,0xc383,0xc3dd,0x5a13,0xd2bc,0x7663,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x4ddcb7ef,0xe28458cc,0xb9edc4a7,0xc383dca9,0x5a13c3dd,0x7663d2bc,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe28458cc4ddcb7ef,0xc383dca9b9edc4a7,0x7663d2bc5a13c3dd,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x11}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 17}, {{ +}}}, 17}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9a15,0x48a0,0x16ae,0xa42,0x3772,0x534a,0x26a7,0x2f5e,0xce7c,0x39eb,0xa365,0x745c,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0x657}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x9a15,0x48a0,0x16ae,0xa42,0x3772,0x534a,0x26a7,0x2f5e,0xce7c,0x39eb,0xa365,0x745c,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0xa257,0x2576,0x576a,0x76a2,0x6a25,0x657}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x48a09a15,0xa4216ae,0x534a3772,0x2f5e26a7,0x39ebce7c,0x745ca365,0xa2576a25,0x576a2576,0x6a2576a2,0x2576a257,0x76a2576a,0x6576a25}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x48a09a15,0xa4216ae,0x534a3772,0x2f5e26a7,0x39ebce7c,0x745ca365,0xa2576a25,0x576a2576,0x6a2576a2,0x2576a257,0x76a2576a,0x6576a25}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4216ae48a09a15,0x2f5e26a7534a3772,0x745ca36539ebce7c,0x576a2576a2576a25,0x2576a2576a2576a2,0x6576a2576a2576a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa4216ae48a09a15,0x2f5e26a7534a3772,0x745ca36539ebce7c,0x576a2576a2576a25,0x2576a2576a2576a2,0x6576a2576a2576a}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50e5,0x2533,0xb03b,0x2c45,0xfde,0xaaf1,0xafff,0x8c73,0xebfd,0xfb3,0xc7bc,0x26}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50e5,0x2533,0xb03b,0x2c45,0xfde,0xaaf1,0xafff,0x8c73,0xebfd,0xfb3,0xc7bc,0x26}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x253350e5,0x2c45b03b,0xaaf10fde,0x8c73afff,0xfb3ebfd,0x26c7bc}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x253350e5,0x2c45b03b,0xaaf10fde,0x8c73afff,0xfb3ebfd,0x26c7bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2c45b03b253350e5,0x8c73afffaaf10fde,0x26c7bc0fb3ebfd}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x2c45b03b253350e5,0x8c73afffaaf10fde,0x26c7bc0fb3ebfd}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xe8e2,0xea6f,0x72f1,0x2e52,0x152a,0xc137,0x5fe4,0xfd0e,0x9736,0x7a1,0xfa3d,0xc6b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xea6fe8e2,0x2e5272f1,0xc137152a,0xfd0e5fe4,0x7a19736,0xc6bfa3d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x2e5272f1ea6fe8e2,0xfd0e5fe4c137152a,0xc6bfa3d07a19736}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 41}, {{ +}}}, 41}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x73ba,0x1227,0x9519,0xedfb,0x605b,0xe80,0x1a20,0xf0b2,0xb418,0xa90c,0xb325,0xefd6,0x7e3e,0xf8fc,0xe3f1,0x8fc7,0x3f1f,0xfc7e,0xf1f8,0xc7e3,0x1f8f,0x7e3f,0xf8fc,0x71}}} +{{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x73ba,0x1227,0x9519,0xedfb,0x605b,0xe80,0x1a20,0xf0b2,0xb418,0xa90c,0xb325,0xefd6,0x7e3e,0xf8fc,0xe3f1,0x8fc7,0x3f1f,0xfc7e,0xf1f8,0xc7e3,0x1f8f,0x7e3f,0xf8fc,0x71}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x122773ba,0xedfb9519,0xe80605b,0xf0b21a20,0xa90cb418,0xefd6b325,0xf8fc7e3e,0x8fc7e3f1,0xfc7e3f1f,0xc7e3f1f8,0x7e3f1f8f,0x71f8fc}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x122773ba,0xedfb9519,0xe80605b,0xf0b21a20,0xa90cb418,0xefd6b325,0xf8fc7e3e,0x8fc7e3f1,0xfc7e3f1f,0xc7e3f1f8,0x7e3f1f8f,0x71f8fc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xedfb9519122773ba,0xf0b21a200e80605b,0xefd6b325a90cb418,0x8fc7e3f1f8fc7e3e,0xc7e3f1f8fc7e3f1f,0x71f8fc7e3f1f8f}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xedfb9519122773ba,0xf0b21a200e80605b,0xefd6b325a90cb418,0x8fc7e3f1f8fc7e3e,0xc7e3f1f8fc7e3f1f,0x71f8fc7e3f1f8f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xd800,0xb033,0x2b73,0xca86,0x59e,0x9473,0xdfd,0x5cc8,0x75c,0xb8c2,0x1139,0x14d5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xb033d800,0xca862b73,0x9473059e,0x5cc80dfd,0xb8c2075c,0x14d51139}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xca862b73b033d800,0x5cc80dfd9473059e,0x14d51139b8c2075c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x73ba,0x8a7,0x681e,0x130f,0xeee3,0xd966,0x4ebe,0xf78b,0xba4d,0xfa9,0xc409,0x245}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x73ba,0x8a7,0x681e,0x130f,0xeee3,0xd966,0x4ebe,0xf78b,0xba4d,0xfa9,0xc409,0x245}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x8a773ba,0x130f681e,0xd966eee3,0xf78b4ebe,0xfa9ba4d,0x245c409}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x8a773ba,0x130f681e,0xd966eee3,0xf78b4ebe,0xfa9ba4d,0x245c409}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x130f681e08a773ba,0xf78b4ebed966eee3,0x245c4090fa9ba4d}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x130f681e08a773ba,0xf78b4ebed966eee3,0x245c4090fa9ba4d}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xb000,0x6067,0x56e7,0x950c,0xb3d,0x28e6,0x1bfb,0xb990,0xeb8,0x7184,0x2273,0x29aa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6067b000,0x950c56e7,0x28e60b3d,0xb9901bfb,0x71840eb8,0x29aa2273}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x950c56e76067b000,0xb9901bfb28e60b3d,0x29aa227371840eb8}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xffff,0xef7f,0x6120,0xdec9,0x3d80,0xfcb4,0xe8d7,0x2d72,0x4077,0xeecc,0xcd2a,0x4bc9,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xef7fffff,0xdec96120,0xfcb43d80,0x2d72e8d7,0xeecc4077,0x4bc9cd2a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdec96120ef7fffff,0x2d72e8d7fcb43d80,0x4bc9cd2aeecc4077,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 73}, {{ +}}}, 73}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x30b3,0xeb66,0x87b7,0x617e,0x27c,0xfa7,0xdcf4,0x90c8,0x7e8b,0x9e3c,0xaf36,0xb7ba,0x5eeb,0xbaf7,0xbdd7,0x75ee,0x7baf,0xebdd,0xf75e,0xd7ba,0xeebd,0xaf75,0xdd7b,0x2eb}}} +{{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0x30b3,0xeb66,0x87b7,0x617e,0x27c,0xfa7,0xdcf4,0x90c8,0x7e8b,0x9e3c,0xaf36,0xb7ba,0x5eeb,0xbaf7,0xbdd7,0x75ee,0x7baf,0xebdd,0xf75e,0xd7ba,0xeebd,0xaf75,0xdd7b,0x2eb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xeb6630b3,0x617e87b7,0xfa7027c,0x90c8dcf4,0x9e3c7e8b,0xb7baaf36,0xbaf75eeb,0x75eebdd7,0xebdd7baf,0xd7baf75e,0xaf75eebd,0x2ebdd7b}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xeb6630b3,0x617e87b7,0xfa7027c,0x90c8dcf4,0x9e3c7e8b,0xb7baaf36,0xbaf75eeb,0x75eebdd7,0xebdd7baf,0xd7baf75e,0xaf75eebd,0x2ebdd7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x617e87b7eb6630b3,0x90c8dcf40fa7027c,0xb7baaf369e3c7e8b,0x75eebdd7baf75eeb,0xd7baf75eebdd7baf,0x2ebdd7baf75eebd}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x617e87b7eb6630b3,0x90c8dcf40fa7027c,0xb7baaf369e3c7e8b,0x75eebdd7baf75eeb,0xd7baf75eebdd7baf,0x2ebdd7baf75eebd}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xb5ab,0x986,0x1b92,0x5123,0x4b2a,0x653b,0x4896,0xc0fd,0x579e,0xc06c,0xd20e,0xf7}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xb5ab,0x986,0x1b92,0x5123,0x4b2a,0x653b,0x4896,0xc0fd,0x579e,0xc06c,0xd20e,0xf7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x986b5ab,0x51231b92,0x653b4b2a,0xc0fd4896,0xc06c579e,0xf7d20e}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x986b5ab,0x51231b92,0x653b4b2a,0xc0fd4896,0xc06c579e,0xf7d20e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x51231b920986b5ab,0xc0fd4896653b4b2a,0xf7d20ec06c579e}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x51231b920986b5ab,0xc0fd4896653b4b2a,0xf7d20ec06c579e}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4fae,0x9faa,0x2b8a,0x6a69,0x436c,0x633a,0x7892,0x301c,0xec62,0xcb54,0xe41,0xac50}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9faa4fae,0x6a692b8a,0x633a436c,0x301c7892,0xcb54ec62,0xac500e41}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6a692b8a9faa4fae,0x301c7892633a436c,0xac500e41cb54ec62}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x8}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 89}, {{ +}}}, 89}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0xbd79,0x489c,0xbd84,0xce46,0x9344,0xb194,0x642a,0x3c5a,0xdb04,0x96f5,0x6e1f,0x4dcb,0xff6e,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}} +{{{._mp_alloc = 0, ._mp_size = -24, ._mp_d = (mp_limb_t[]) {0xbd79,0x489c,0xbd84,0xce46,0x9344,0xb194,0x642a,0x3c5a,0xdb04,0x96f5,0x6e1f,0x4dcb,0xff6e,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x207f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x489cbd79,0xce46bd84,0xb1949344,0x3c5a642a,0x96f5db04,0x4dcb6e1f,0xffffff6e,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x489cbd79,0xce46bd84,0xb1949344,0x3c5a642a,0x96f5db04,0x4dcb6e1f,0xffffff6e,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x207fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xce46bd84489cbd79,0x3c5a642ab1949344,0x4dcb6e1f96f5db04,0xffffffffffffff6e,0xffffffffffffffff,0x207fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xce46bd84489cbd79,0x3c5a642ab1949344,0x4dcb6e1f96f5db04,0xffffffffffffff6e,0xffffffffffffffff,0x207fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x50ca,0x36f8,0x25a6,0x7c3a,0x5a1f,0x9b15,0x8dd,0xc279,0x6311,0x30d1,0xbea1,0x7280,0x18}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x36f850ca,0x7c3a25a6,0x9b155a1f,0xc27908dd,0x30d16311,0x7280bea1,0x18}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7c3a25a636f850ca,0xc27908dd9b155a1f,0x7280bea130d16311,0x18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xa1c9,0x3fda,0x577,0x71a8,0xf4d3,0x4269,0xecf2,0x2a5d,0x41b6,0x6e41,0x47e5,0x782c,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0xa1c9,0x3fda,0x577,0x71a8,0xf4d3,0x4269,0xecf2,0x2a5d,0x41b6,0x6e41,0x47e5,0x782c,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x3fdaa1c9,0x71a80577,0x4269f4d3,0x2a5decf2,0x6e4141b6,0x782c47e5,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0x3fdaa1c9,0x71a80577,0x4269f4d3,0x2a5decf2,0x6e4141b6,0x782c47e5,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x71a805773fdaa1c9,0x2a5decf24269f4d3,0x782c47e56e4141b6,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x71a805773fdaa1c9,0x2a5decf24269f4d3,0x782c47e56e4141b6,0x2}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xa194,0x6df0,0x4b4c,0xf874,0xb43e,0x362a,0x11bb,0x84f2,0xc623,0x61a2,0x7d42,0xe501,0x30}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x6df0a194,0xf8744b4c,0x362ab43e,0x84f211bb,0x61a2c623,0xe5017d42,0x30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8744b4c6df0a194,0x84f211bb362ab43e,0xe5017d4261a2c623,0x30}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x83f7,0x63ae,0x245e,0x2154,0x883c,0x544b,0x8f96,0x1b2d,0xcc0c,0x8d73,0x7bdd,0x118e,0x1df}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x63ae83f7,0x2154245e,0x544b883c,0x1b2d8f96,0x8d73cc0c,0x118e7bdd,0x1df}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x2154245e63ae83f7,0x1b2d8f96544b883c,0x118e7bdd8d73cc0c,0x1df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 97}}; +}}}, 97}}; const quat_left_ideal_t CONNECTING_IDEALS[8] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xbf5a,0x9a6f,0xcde1,0x21d4,0x52b1,0xe7a0,0xf3ba,0x78eb,0xc45c,0x787f,0x5c29,0x1c51,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9a6fbf5a,0x21d4cde1,0xe7a052b1,0x78ebf3ba,0x787fc45c,0x1c515c29,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x21d4cde19a6fbf5a,0x78ebf3bae7a052b1,0x1c515c29787fc45c,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x3f45,0x9d13,0x18d8,0xd9d,0x581f,0x857d,0xdf68,0xd151,0x582a,0xa4d6,0xa864,0x68b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x3f45,0x9d13,0x18d8,0xd9d,0x581f,0x857d,0xdf68,0xd151,0x582a,0xa4d6,0xa864,0x68b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9d133f45,0xd9d18d8,0x857d581f,0xd151df68,0xa4d6582a,0x68ba864,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x9d133f45,0xd9d18d8,0x857d581f,0xd151df68,0xa4d6582a,0x68ba864,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd9d18d89d133f45,0xd151df68857d581f,0x68ba864a4d6582a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd9d18d89d133f45,0xd151df68857d581f,0x68ba864a4d6582a,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfad,0xcd37,0x66f0,0x90ea,0x2958,0x73d0,0xf9dd,0x3c75,0xe22e,0xbc3f,0xae14,0x8e28}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfad,0xcd37,0x66f0,0x90ea,0x2958,0x73d0,0xf9dd,0x3c75,0xe22e,0xbc3f,0xae14,0x8e28}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd37dfad,0x90ea66f0,0x73d02958,0x3c75f9dd,0xbc3fe22e,0x8e28ae14}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd37dfad,0x90ea66f0,0x73d02958,0x3c75f9dd,0xbc3fe22e,0x8e28ae14}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x90ea66f0cd37dfad,0x3c75f9dd73d02958,0x8e28ae14bc3fe22e}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x90ea66f0cd37dfad,0x3c75f9dd73d02958,0x8e28ae14bc3fe22e}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x9d7a,0x920e,0xe71,0xc120,0x8fbf,0x607e,0x29f,0xff55,0x7422,0x4796,0xbca4,0x125b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x920e9d7a,0xc1200e71,0x607e8fbf,0xff55029f,0x47967422,0x125bbca4,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc1200e71920e9d7a,0xff55029f607e8fbf,0x125bbca447967422,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe0bb,0x1b20,0x4939,0xd4cc,0xa436,0xac70,0x5d50,0xfe05,0xe870,0x178b,0xcef2,0xd21,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe0bb,0x1b20,0x4939,0xd4cc,0xa436,0xac70,0x5d50,0xfe05,0xe870,0x178b,0xcef2,0xd21,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x1b20e0bb,0xd4cc4939,0xac70a436,0xfe055d50,0x178be870,0xd21cef2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x1b20e0bb,0xd4cc4939,0xac70a436,0xfe055d50,0x178be870,0xd21cef2,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd4cc49391b20e0bb,0xfe055d50ac70a436,0xd21cef2178be870,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd4cc49391b20e0bb,0xfe055d50ac70a436,0xd21cef2178be870,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4ebd,0xc907,0x738,0xe090,0x47df,0xb03f,0x814f,0x7faa,0x3a11,0x23cb,0xde52,0x892d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4ebd,0xc907,0x738,0xe090,0x47df,0xb03f,0x814f,0x7faa,0x3a11,0x23cb,0xde52,0x892d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9074ebd,0xe0900738,0xb03f47df,0x7faa814f,0x23cb3a11,0x892dde52}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xc9074ebd,0xe0900738,0xb03f47df,0x7faa814f,0x23cb3a11,0x892dde52}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe0900738c9074ebd,0x7faa814fb03f47df,0x892dde5223cb3a11}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xe0900738c9074ebd,0x7faa814fb03f47df,0x892dde5223cb3a11}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50bf,0xeebf,0xe944,0xea4d,0x76d,0xcbc5,0x4919,0x12b0,0x71f3,0x9e30,0x3304,0x1265}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x50bf,0xeebf,0xe944,0xea4d,0x76d,0xcbc5,0x4919,0x12b0,0x71f3,0x9e30,0x3304,0x1265}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xeebf50bf,0xea4de944,0xcbc5076d,0x12b04919,0x9e3071f3,0x12653304}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xeebf50bf,0xea4de944,0xcbc5076d,0x12b04919,0x9e3071f3,0x12653304}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xea4de944eebf50bf,0x12b04919cbc5076d,0x126533049e3071f3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xea4de944eebf50bf,0x12b04919cbc5076d,0x126533049e3071f3}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xd282,0xcb1f,0x6532,0xe33e,0x153d,0xfd8,0x4275,0x2b62,0xf17d,0xdb04,0x3f12,0xf722,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xcb1fd282,0xe33e6532,0xfd8153d,0x2b624275,0xdb04f17d,0xf7223f12,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe33e6532cb1fd282,0x2b6242750fd8153d,0xf7223f12db04f17d,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x81c3,0xdc60,0x7bed,0xf8f0,0xdcf,0x4413,0xf95b,0x18b1,0x7f8a,0x3cd4,0xc0e,0xe4bd,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x81c3,0xdc60,0x7bed,0xf8f0,0xdcf,0x4413,0xf95b,0x18b1,0x7f8a,0x3cd4,0xc0e,0xe4bd,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xdc6081c3,0xf8f07bed,0x44130dcf,0x18b1f95b,0x3cd47f8a,0xe4bd0c0e,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xdc6081c3,0xf8f07bed,0x44130dcf,0x18b1f95b,0x3cd47f8a,0xe4bd0c0e,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8f07beddc6081c3,0x18b1f95b44130dcf,0xe4bd0c0e3cd47f8a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf8f07beddc6081c3,0x18b1f95b44130dcf,0xe4bd0c0e3cd47f8a,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe941,0x658f,0x3299,0xf19f,0xa9e,0x87ec,0x213a,0x95b1,0x78be,0x6d82,0x1f89,0xfb91}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe941,0x658f,0x3299,0xf19f,0xa9e,0x87ec,0x213a,0x95b1,0x78be,0x6d82,0x1f89,0xfb91}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x658fe941,0xf19f3299,0x87ec0a9e,0x95b1213a,0x6d8278be,0xfb911f89}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x658fe941,0xf19f3299,0x87ec0a9e,0x95b1213a,0x6d8278be,0xfb911f89}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf19f3299658fe941,0x95b1213a87ec0a9e,0xfb911f896d8278be}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xf19f3299658fe941,0x95b1213a87ec0a9e,0xfb911f896d8278be}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x60fb,0xd399,0x887f,0xd263,0xe0e7,0xb202,0x699b,0xea34,0x5a15,0x4b8a,0x6763,0x8e95}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x60fb,0xd399,0x887f,0xd263,0xe0e7,0xb202,0x699b,0xea34,0x5a15,0x4b8a,0x6763,0x8e95}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd39960fb,0xd263887f,0xb202e0e7,0xea34699b,0x4b8a5a15,0x8e956763}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd39960fb,0xd263887f,0xb202e0e7,0xea34699b,0x4b8a5a15,0x8e956763}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xd263887fd39960fb,0xea34699bb202e0e7,0x8e9567634b8a5a15}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xd263887fd39960fb,0xea34699bb202e0e7,0x8e9567634b8a5a15}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xdfda,0xabc3,0xd4b8,0x7c1c,0x4727,0x66b2,0x21da,0x79cc,0xe3a3,0x553d,0x9b8d,0xa12d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xabc3dfda,0x7c1cd4b8,0x66b24727,0x79cc21da,0x553de3a3,0xa12d9b8d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x7c1cd4b8abc3dfda,0x79cc21da66b24727,0xa12d9b8d553de3a3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7edf,0xd82a,0x4c38,0xa9b9,0x663f,0xb4af,0xb83e,0x8f97,0x898d,0x9b3,0x342a,0x1298}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7edf,0xd82a,0x4c38,0xa9b9,0x663f,0xb4af,0xb83e,0x8f97,0x898d,0x9b3,0x342a,0x1298}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd82a7edf,0xa9b94c38,0xb4af663f,0x8f97b83e,0x9b3898d,0x1298342a}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xd82a7edf,0xa9b94c38,0xb4af663f,0x8f97b83e,0x9b3898d,0x1298342a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa9b94c38d82a7edf,0x8f97b83eb4af663f,0x1298342a09b3898d}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa9b94c38d82a7edf,0x8f97b83eb4af663f,0x1298342a09b3898d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb00f,0x8bbf,0x19a9,0xd6b,0xf7b,0xcd5c,0x74e7,0xd7e2,0xa419,0x3593,0x56a8,0x8de8,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb00f,0x8bbf,0x19a9,0xd6b,0xf7b,0xcd5c,0x74e7,0xd7e2,0xa419,0x3593,0x56a8,0x8de8,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x8bbfb00f,0xd6b19a9,0xcd5c0f7b,0xd7e274e7,0x3593a419,0x8de856a8,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x8bbfb00f,0xd6b19a9,0xcd5c0f7b,0xd7e274e7,0x3593a419,0x8de856a8,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd6b19a98bbfb00f,0xd7e274e7cd5c0f7b,0x8de856a83593a419,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd6b19a98bbfb00f,0xd7e274e7cd5c0f7b,0x8de856a83593a419,0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xe00e,0xd869,0x1a76,0xd8de,0xfe4c,0xabc5,0x99e1,0xf264,0x7d83,0x9c3,0x32ab,0xb60b,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xd869e00e,0xd8de1a76,0xabc5fe4c,0xf26499e1,0x9c37d83,0xb60b32ab,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xd8de1a76d869e00e,0xf26499e1abc5fe4c,0xb60b32ab09c37d83,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf007,0x6c34,0xd3b,0x6c6f,0xff26,0xd5e2,0x4cf0,0xf932,0xbec1,0x84e1,0x9955,0xdb05}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xf007,0x6c34,0xd3b,0x6c6f,0xff26,0xd5e2,0x4cf0,0xf932,0xbec1,0x84e1,0x9955,0xdb05}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6c34f007,0x6c6f0d3b,0xd5e2ff26,0xf9324cf0,0x84e1bec1,0xdb059955}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6c34f007,0x6c6f0d3b,0xd5e2ff26,0xf9324cf0,0x84e1bec1,0xdb059955}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6c6f0d3b6c34f007,0xf9324cf0d5e2ff26,0xdb05995584e1bec1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x6c6f0d3b6c34f007,0xf9324cf0d5e2ff26,0xdb05995584e1bec1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3a91,0xcd01,0xac55,0x9a52,0x9887,0x118f,0x4dec,0x4245,0xd869,0x1022,0x1d16,0x7ad}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3a91,0xcd01,0xac55,0x9a52,0x9887,0x118f,0x4dec,0x4245,0xd869,0x1022,0x1d16,0x7ad}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd013a91,0x9a52ac55,0x118f9887,0x42454dec,0x1022d869,0x7ad1d16}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xcd013a91,0x9a52ac55,0x118f9887,0x42454dec,0x1022d869,0x7ad1d16}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9a52ac55cd013a91,0x42454dec118f9887,0x7ad1d161022d869}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x9a52ac55cd013a91,0x42454dec118f9887,0x7ad1d161022d869}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x7b26,0x37a0,0xc8dc,0x97d3,0x7f2f,0xd6bd,0x931,0x1df2,0x2918,0x4a3e,0x2591,0x6ee7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x37a07b26,0x97d3c8dc,0xd6bd7f2f,0x1df20931,0x4a3e2918,0x6ee72591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x97d3c8dc37a07b26,0x1df20931d6bd7f2f,0x6ee725914a3e2918}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4095,0x6a9f,0x1c86,0xfd81,0xe6a7,0xc52d,0xbb45,0xdbac,0x50ae,0x3a1b,0x87b,0x673a}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4095,0x6a9f,0x1c86,0xfd81,0xe6a7,0xc52d,0xbb45,0xdbac,0x50ae,0x3a1b,0x87b,0x673a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6a9f4095,0xfd811c86,0xc52de6a7,0xdbacbb45,0x3a1b50ae,0x673a087b}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x6a9f4095,0xfd811c86,0xc52de6a7,0xdbacbb45,0x3a1b50ae,0x673a087b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xfd811c866a9f4095,0xdbacbb45c52de6a7,0x673a087b3a1b50ae}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xfd811c866a9f4095,0xdbacbb45c52de6a7,0x673a087b3a1b50ae}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4d27,0x98d5,0x3839,0x83ff,0x48b7,0x4d5b,0xc95b,0xbe45,0x9d44,0x36f3,0x4d57,0x6c26}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x4d27,0x98d5,0x3839,0x83ff,0x48b7,0x4d5b,0xc95b,0xbe45,0x9d44,0x36f3,0x4d57,0x6c26}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x98d54d27,0x83ff3839,0x4d5b48b7,0xbe45c95b,0x36f39d44,0x6c264d57}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x98d54d27,0x83ff3839,0x4d5b48b7,0xbe45c95b,0x36f39d44,0x6c264d57}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x83ff383998d54d27,0xbe45c95b4d5b48b7,0x6c264d5736f39d44}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x83ff383998d54d27,0xbe45c95b4d5b48b7,0x6c264d5736f39d44}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xe5ca,0x3b34,0xb04b,0x430f,0xe795,0xa04a,0x8c7d,0xec47,0x77df,0x8e5c,0xb71e,0xd31f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x3b34e5ca,0x430fb04b,0xa04ae795,0xec478c7d,0x8e5c77df,0xd31fb71e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0x430fb04b3b34e5ca,0xec478c7da04ae795,0xd31fb71e8e5c77df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98a3,0xa25f,0x7811,0xbf10,0x9edd,0x52ef,0xc322,0x2e01,0xda9b,0x5768,0x69c7,0x66f9}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x98a3,0xa25f,0x7811,0xbf10,0x9edd,0x52ef,0xc322,0x2e01,0xda9b,0x5768,0x69c7,0x66f9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa25f98a3,0xbf107811,0x52ef9edd,0x2e01c322,0x5768da9b,0x66f969c7}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0xa25f98a3,0xbf107811,0x52ef9edd,0x2e01c322,0x5768da9b,0x66f969c7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbf107811a25f98a3,0x2e01c32252ef9edd,0x66f969c75768da9b}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbf107811a25f98a3,0x2e01c32252ef9edd,0x66f969c75768da9b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x72e5,0x9d9a,0xd825,0xa187,0x73ca,0xd025,0xc63e,0xf623,0x3bef,0x472e,0xdb8f,0x698f}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x72e5,0x9d9a,0xd825,0xa187,0x73ca,0xd025,0xc63e,0xf623,0x3bef,0x472e,0xdb8f,0x698f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9d9a72e5,0xa187d825,0xd02573ca,0xf623c63e,0x472e3bef,0x698fdb8f}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x9d9a72e5,0xa187d825,0xd02573ca,0xf623c63e,0x472e3bef,0x698fdb8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa187d8259d9a72e5,0xf623c63ed02573ca,0x698fdb8f472e3bef}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xa187d8259d9a72e5,0xf623c63ed02573ca,0x698fdb8f472e3bef}}}} #endif , &MAXORD_O0}}; const quat_alg_elem_t CONJUGATING_ELEMENTS[8] = {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x8015,0xfd5c,0xb508,0x1437,0xfa92,0x6222,0x1452,0xa79a,0x6c31,0xd3a9,0xb3c4,0x15c5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0xfd5c8015,0x1437b508,0x6222fa92,0xa79a1452,0xd3a96c31,0x15c5b3c4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0x1437b508fd5c8015,0xa79a14526222fa92,0x15c5b3c4d3a96c31}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xbcbf,0x76ed,0xc538,0xec53,0xeb88,0xb40d,0xa54e,0x14f,0x8bb2,0x300a,0xedb2,0x539}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x76edbcbf,0xec53c538,0xb40deb88,0x14fa54e,0x300a8bb2,0x539edb2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xec53c53876edbcbf,0x14fa54eb40deb88,0x539edb2300a8bb2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -13, ._mp_d = (mp_limb_t[]) {0x2341,0xb9df,0x4e77,0xcd8c,0x1cab,0xdb9d,0x8b8e,0x3e12,0x6370,0x7935,0x7217,0x987,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -7, ._mp_d = (mp_limb_t[]) {0xb9df2341,0xcd8c4e77,0xdb9d1cab,0x3e128b8e,0x79356370,0x9877217,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xcd8c4e77b9df2341,0x3e128b8edb9d1cab,0x987721779356370,0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0xefed,0x55e1,0x6a5c,0xbe0e,0x2393,0x3359,0x10ed,0xbce6,0xf1d1,0xaa9e,0xcdc6,0x5096}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x55e1efed,0xbe0e6a5c,0x33592393,0xbce610ed,0xaa9ef1d1,0x5096cdc6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xbe0e6a5c55e1efed,0xbce610ed33592393,0x5096cdc6aa9ef1d1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x2fff,0x4caa,0xcd,0xcb73,0xeed1,0xde69,0x24f9,0x1a82,0xd96a,0xd42f,0xdc02,0x2822}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x4caa2fff,0xcb7300cd,0xde69eed1,0x1a8224f9,0xd42fd96a,0x2822dc02}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcb7300cd4caa2fff,0x1a8224f9de69eed1,0x2822dc02d42fd96a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = 3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}} +{{{._mp_alloc = 0, ._mp_size = -12, ._mp_d = (mp_limb_t[]) {0x3d93,0x1bd0,0xe46e,0xcbe9,0xbf97,0xeb5e,0x498,0xef9,0x148c,0xa51f,0x92c8,0x3773}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}} +{{{._mp_alloc = 0, ._mp_size = -6, ._mp_d = (mp_limb_t[]) {0x1bd03d93,0xcbe9e46e,0xeb5ebf97,0xef90498,0xa51f148c,0x377392c8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}} +{{{._mp_alloc = 0, ._mp_size = -3, ._mp_d = (mp_limb_t[]) {0xcbe9e46e1bd03d93,0xef90498eb5ebf97,0x377392c8a51f148c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x4385,0xf091,0xe8e9,0xfaa3,0x7d60,0x8ab7,0x68b2,0x8a57,0x2754,0xa10c,0x6f20,0x71e4,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0xf0914385,0xfaa3e8e9,0x8ab77d60,0x8a5768b2,0xa10c2754,0x71e46f20,0x4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfaa3e8e9f0914385,0x8a5768b28ab77d60,0x71e46f20a10c2754,0x4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x9}}}} #endif -}}}; +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sign.c index 9216bbe4d3..9520a6f7fd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sign.c @@ -31,12 +31,12 @@ compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const sig // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the // 2^TORSION_EVEN_POWER torsion of EA - ibz_set(&vec[0], 1); - ibz_copy_digit_array(&vec[1], sig->chall_coeff); + ibz_set(&vec.v[0], 1); + ibz_copy_digit_array(&vec.v[1], sig->chall_coeff); // now we compute the ideal associated to the challenge // for that, we need to find vec such that - // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // the kernel of the challenge isogeny is generated by vec.v[0]*B0[0] + vec.v[1]*B0[1] where B0 // is the image through the secret key isogeny of the canonical basis E0 ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); @@ -459,16 +459,16 @@ compute_and_set_basis_change_matrix(signature_t *sig, change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); // Assert all values in the matrix are of the expected size for packing - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][1]) <= SQIsign_response_length + HD_extra_torsion); // Set the basis change matrix to signature - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall.m[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall.m[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall.m[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall.m[1][1])); // Finalise the matrices ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.c index 1a6c203035..6e7296bfeb 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/torsion_constants.c @@ -4,40 +4,40 @@ const ibz_t TWO_TO_SECURITY_BITS = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 7, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t TORSION_PLUS_2POWER = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}} +{{{._mp_alloc = 0, ._mp_size = 24, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}} +{{{._mp_alloc = 0, ._mp_size = 12, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x100000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 6, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x100000000000000}}}} #endif ; const ibz_t SEC_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t COM_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 49, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 25, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 13, ._mp_d = (mp_limb_t[]) {0xb7,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c index f4b4260755..a6298acf77 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/algebra.c @@ -21,54 +21,54 @@ quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, ibz_init(&prod); ibz_vec_4_init(&sum); - ibz_set(&(sum[0]), 0); - ibz_set(&(sum[1]), 0); - ibz_set(&(sum[2]), 0); - ibz_set(&(sum[3]), 0); + ibz_set(&(sum.v[0]), 0); + ibz_set(&(sum.v[1]), 0); + ibz_set(&(sum.v[2]), 0); + ibz_set(&(sum.v[3]), 0); // compute 1 coordinate - ibz_mul(&prod, &((*a)[2]), &((*b)[2])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[3])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[0])); - ibz_add(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[1])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[2])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[3])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&(sum.v[0]), &(sum.v[0]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[0])); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[1])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); // compute i coordiante - ibz_mul(&prod, &((*a)[2]), &((*b)[3])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[2])); - ibz_sub(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[1])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[0])); - ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[3])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[2])); + ibz_sub(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&(sum.v[1]), &(sum.v[1]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[1])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[0])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); // compute j coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[2])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[0])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[3])); - ibz_sub(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[1])); - ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[2])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[0])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[3])); + ibz_sub(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[1])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); // compute ij coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[3])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[0])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[1])); - ibz_sub(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[2])); - ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[3])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[0])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[1])); + ibz_sub(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[2])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); - ibz_copy(&((*res)[0]), &(sum[0])); - ibz_copy(&((*res)[1]), &(sum[1])); - ibz_copy(&((*res)[2]), &(sum[2])); - ibz_copy(&((*res)[3]), &(sum[3])); + ibz_copy(&(res->v[0]), &(sum.v[0])); + ibz_copy(&(res->v[1]), &(sum.v[1])); + ibz_copy(&(res->v[2]), &(sum.v[2])); + ibz_copy(&(res->v[3]), &(sum.v[3])); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); @@ -86,8 +86,8 @@ quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_ ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); for (int i = 0; i < 4; i++) { // multiply coordiates by reduced denominators from the other element - ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); - ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + ibz_mul(&(res_a->coord.v[i]), &(a->coord.v[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord.v[i]), &(b->coord.v[i]), &(res_a->denom)); } // multiply both reduced denominators ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); @@ -149,8 +149,8 @@ quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_conj(&norm, a); quat_alg_mul(&norm, a, &norm, alg); - ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); - ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_gcd(&g, &(norm.coord.v[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord.v[0]), &g); ibz_div(res_denom, &r, &(norm.denom), &g); ibz_abs(res_denom, res_denom); ibz_abs(res_num, res_num); @@ -165,20 +165,20 @@ void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) { ibz_copy(&(elem->denom), denominator); - ibz_copy(&(elem->coord[0]), numerator); - ibz_set(&(elem->coord[1]), 0); - ibz_set(&(elem->coord[2]), 0); - ibz_set(&(elem->coord[3]), 0); + ibz_copy(&(elem->coord.v[0]), numerator); + ibz_set(&(elem->coord.v[1]), 0); + ibz_set(&(elem->coord.v[2]), 0); + ibz_set(&(elem->coord.v[3]), 0); } void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) { ibz_copy(&(conj->denom), &(x->denom)); - ibz_copy(&(conj->coord[0]), &(x->coord[0])); - ibz_neg(&(conj->coord[1]), &(x->coord[1])); - ibz_neg(&(conj->coord[2]), &(x->coord[2])); - ibz_neg(&(conj->coord[3]), &(x->coord[3])); + ibz_copy(&(conj->coord.v[0]), &(x->coord.v[0])); + ibz_neg(&(conj->coord.v[1]), &(x->coord.v[1])); + ibz_neg(&(conj->coord.v[2]), &(x->coord.v[2])); + ibz_neg(&(conj->coord.v[3]), &(x->coord.v[3])); } void @@ -190,7 +190,8 @@ quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + // TODO: check if this is correct + ibz_div(primitive_x->v + i, &r, primitive_x->v + i, content); } ibz_finalize(&r); } @@ -235,10 +236,10 @@ quat_alg_elem_is_zero(const quat_alg_elem_t *x) void quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&(elem->coord[0]), coord0); - ibz_set(&(elem->coord[1]), coord1); - ibz_set(&(elem->coord[2]), coord2); - ibz_set(&(elem->coord[3]), coord3); + ibz_set(&(elem->coord.v[0]), coord0); + ibz_set(&(elem->coord.v[1]), coord1); + ibz_set(&(elem->coord.v[2]), coord2); + ibz_set(&(elem->coord.v[3]), coord3); ibz_set(&(elem->denom), denom); } @@ -247,10 +248,10 @@ void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) { ibz_copy(©->denom, &copied->denom); - ibz_copy(©->coord[0], &copied->coord[0]); - ibz_copy(©->coord[1], &copied->coord[1]); - ibz_copy(©->coord[2], &copied->coord[2]); - ibz_copy(©->coord[3], &copied->coord[3]); + ibz_copy(©->coord.v[0], &copied->coord.v[0]); + ibz_copy(©->coord.v[1], &copied->coord.v[1]); + ibz_copy(©->coord.v[2], &copied->coord.v[2]); + ibz_copy(©->coord.v[3], &copied->coord.v[3]); } // helper functions for lattices @@ -262,10 +263,10 @@ quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&(elem->coord[0]), coord0); - ibz_copy(&(elem->coord[1]), coord1); - ibz_copy(&(elem->coord[2]), coord2); - ibz_copy(&(elem->coord[3]), coord3); + ibz_copy(&(elem->coord.v[0]), coord0); + ibz_copy(&(elem->coord.v[1]), coord1); + ibz_copy(&(elem->coord.v[2]), coord2); + ibz_copy(&(elem->coord.v[3]), coord3); ibz_copy(&(elem->denom), denom); } @@ -274,7 +275,7 @@ void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) { for (int i = 0; i < 4; i++) { - ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + ibz_mul(&(res->coord.v[i]), &(elem->coord.v[i]), scalar); } ibz_copy(&(res->denom), &(elem->denom)); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c index 1df7755a29..e051ac340a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/common.c @@ -14,6 +14,7 @@ public_key_init(public_key_t *pk) void public_key_finalize(public_key_t *pk) { + (void) pk; } // compute the challenge as the hash of the message and the commitment curve and public key diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2.c index b31ae7771a..5bf214c4e2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2.c @@ -5,34 +5,34 @@ void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) { - ibz_set(&((*vec)[0]), a0); - ibz_set(&((*vec)[1]), a1); + ibz_set(&(vec->v[0]), a0); + ibz_set(&(vec->v[1]), a1); } void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) { - ibz_set(&((*mat)[0][0]), a00); - ibz_set(&((*mat)[0][1]), a01); - ibz_set(&((*mat)[1][0]), a10); - ibz_set(&((*mat)[1][1]), a11); + ibz_set(&(mat->m[0][0]), a00); + ibz_set(&(mat->m[0][1]), a01); + ibz_set(&(mat->m[1][0]), a10); + ibz_set(&(mat->m[1][1]), a11); } void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) { - ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); - ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); - ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); - ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); + ibz_copy(&(copy->m[0][0]), &(copied->m[0][0])); + ibz_copy(&(copy->m[0][1]), &(copied->m[0][1])); + ibz_copy(&(copy->m[1][0]), &(copied->m[1][0])); + ibz_copy(&(copy->m[1][1]), &(copied->m[1][1])); } void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) { - ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); - ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); - ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); - ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); + ibz_add(&(sum->m[0][0]), &(a->m[0][0]), &(b->m[0][0])); + ibz_add(&(sum->m[0][1]), &(a->m[0][1]), &(b->m[0][1])); + ibz_add(&(sum->m[1][0]), &(a->m[1][0]), &(b->m[1][0])); + ibz_add(&(sum->m[1][1]), &(a->m[1][1]), &(b->m[1][1])); } void @@ -53,16 +53,16 @@ ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t * ibz_vec_2_t matvec; ibz_init(&prod); ibz_vec_2_init(&matvec); - ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); - ibz_copy(&(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); - ibz_add(&(matvec[0]), &(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); - ibz_copy(&(matvec[1]), &prod); - ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); - ibz_add(&(matvec[1]), &(matvec[1]), &prod); - ibz_copy(&((*res)[0]), &(matvec[0])); - ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_mul(&prod, &(mat->m[0][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[0][1]), &(vec->v[1])); + ibz_add(&(matvec.v[0]), &(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[1][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[1]), &prod); + ibz_mul(&prod, &(mat->m[1][1]), &(vec->v[1])); + ibz_add(&(matvec.v[1]), &(matvec.v[1]), &prod); + ibz_copy(&(res->v[0]), &(matvec.v[0])); + ibz_copy(&(res->v[1]), &(matvec.v[1])); ibz_finalize(&prod); ibz_vec_2_finalize(&matvec); } @@ -78,21 +78,21 @@ ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2 ibz_mat_2x2_init(&sums); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_set(&(sums[i][j]), 0); + ibz_set(&(sums.m[i][j]), 0); } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { - ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); - ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); - ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + ibz_mul(&mul, &(mat_a->m[i][k]), &(mat_b->m[k][j])); + ibz_add(&(sums.m[i][j]), &(sums.m[i][j]), &mul); + ibz_mod(&(sums.m[i][j]), &(sums.m[i][j]), m); } } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + ibz_copy(&(prod->m[i][j]), &(sums.m[i][j])); } } ibz_finalize(&mul); @@ -105,9 +105,9 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_t det, prod; ibz_init(&det); ibz_init(&prod); - ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mul(&det, &(mat->m[0][0]), &(mat->m[1][1])); ibz_mod(&det, &det, m); - ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_mul(&prod, &(mat->m[0][1]), &(mat->m[1][0])); ibz_sub(&det, &det, &prod); ibz_mod(&det, &det, m); int res = ibz_invmod(&det, &det, m); @@ -115,15 +115,15 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_set(&prod, res); ibz_mul(&det, &det, &prod); // compute inverse - ibz_copy(&prod, &((*mat)[0][0])); - ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); - ibz_copy(&((*inv)[1][1]), &prod); - ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); - ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + ibz_copy(&prod, &(mat->m[0][0])); + ibz_copy(&(inv->m[0][0]), &(mat->m[1][1])); + ibz_copy(&(inv->m[1][1]), &prod); + ibz_neg(&(inv->m[1][0]), &(mat->m[1][0])); + ibz_neg(&(inv->m[0][1]), &(mat->m[0][1])); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); - ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + ibz_mul(&(inv->m[i][j]), &(inv->m[i][j]), &det); + ibz_mod(&(inv->m[i][j]), &(inv->m[i][j]), m); } } ibz_finalize(&det); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c index 171473d481..143060e2c3 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c @@ -137,10 +137,10 @@ _fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, ibz_invmod(&tmp, &tmp, &two_pow); assert(!ibz_is_even(&tmp)); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta to the basis ec_basis_t B0_two_theta; @@ -197,53 +197,53 @@ post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_ // treatment if (is_special_order) { // reordering the basis if needed - if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + if (ibz_cmp(&gram->m[0][0], &gram->m[2][2]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[0][0], &gram->m[3][3]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][3]); } - ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); - ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); - ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); - ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][3], &gram->m[0][1]); + ibz_swap(&gram->m[3][0], &gram->m[1][0]); + ibz_swap(&gram->m[2][3], &gram->m[2][1]); + ibz_swap(&gram->m[3][2], &gram->m[1][2]); + ibz_swap(&gram->m[3][3], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[1][1], &gram->m[3][3]) == 0) { // in this case it seems that we need to swap the second and third // element, and then recompute entirely the second element from the first // first we swap the second and third element for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); } // adjusting the sign if needed - if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + if (ibz_cmp(&reduced->m[0][0], &reduced->m[1][1]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); - ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); - ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + ibz_neg(&reduced->m[i][1], &reduced->m[i][1]); + ibz_neg(&gram->m[i][1], &gram->m[i][1]); + ibz_neg(&gram->m[1][i], &gram->m[1][i]); } } - if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + if (ibz_cmp(&reduced->m[0][2], &reduced->m[1][3]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); - ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); - ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + ibz_neg(&reduced->m[i][3], &reduced->m[i][3]); + ibz_neg(&gram->m[i][3], &gram->m[i][3]); + ibz_neg(&gram->m[3][i], &gram->m[3][i]); } - // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + // assert(ibz_cmp(&reduced->m[0][2],&reduced->m[1][3])==0); } } } @@ -273,7 +273,7 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // if the basis is of the form alpha, i*alpha, beta, i*beta // we can remove some values due to symmetry of the basis that bool need_remove_symmetry = - (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + (ibz_cmp(&gram->m[0][0], &gram->m[1][1]) == 0 && ibz_cmp(&gram->m[3][3], &gram->m[2][2]) == 0); int check1, check2, check3; @@ -324,10 +324,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // and we ensure that we don't record the same norm in the list if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { // Set the point as a vector (x, y, z, w) - ibz_set(&point[0], x); - ibz_set(&point[1], y); - ibz_set(&point[2], z); - ibz_set(&point[3], w); + ibz_set(&point.v[0], x); + ibz_set(&point.v[1], y); + ibz_set(&point.v[2], z); + ibz_set(&point.v[3], w); // Evaluate this through the gram matrix and divide out by the // adjusted_norm @@ -336,10 +336,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t assert(ibz_is_zero(&remain)); if (ibz_mod_ui(&norm, 2) == 1) { - ibz_set(&vecs[count][0], x); - ibz_set(&vecs[count][1], y); - ibz_set(&vecs[count][2], z); - ibz_set(&vecs[count][3], w); + ibz_set(&vecs[count].v[0], x); + ibz_set(&vecs[count].v[1], y); + ibz_set(&vecs[count].v[2], z); + ibz_set(&vecs[count].v[3], w); ibz_copy(&norms[count], &norm); count++; } @@ -530,10 +530,10 @@ find_uv(ibz_t *u, quat_alg_elem_t delta; // delta will be the element of smallest norm quat_alg_elem_init(&delta); - ibz_set(&delta.coord[0], 1); - ibz_set(&delta.coord[1], 0); - ibz_set(&delta.coord[2], 0); - ibz_set(&delta.coord[3], 0); + ibz_set(&delta.coord.v[0], 1); + ibz_set(&delta.coord.v[1], 0); + ibz_set(&delta.coord.v[2], 0); + ibz_set(&delta.coord.v[3], 0); ibz_copy(&delta.denom, &reduced_id.lattice.denom); ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); @@ -542,7 +542,7 @@ find_uv(ibz_t *u, quat_alg_conj(&delta, &delta); ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); - ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_copy(&reduced_id.norm, &gram[0].m[0][0]); ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); assert(ibz_cmp(&remain, &ibz_const_zero) == 0); @@ -989,10 +989,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, } ibz_invmod(&tmp, &tmp, &two_pow); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); @@ -1092,10 +1092,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); } ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); - ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); - ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); - ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); - ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + ibz_mul(&beta1->coord.v[0], &beta1->coord.v[0], &tmp); + ibz_mul(&beta1->coord.v[1], &beta1->coord.v[1], &tmp); + ibz_mul(&beta1->coord.v[2], &beta1->coord.v[2], &tmp); + ibz_mul(&beta1->coord.v[3], &beta1->coord.v[3], &tmp); endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim4.c index 495dc2dcb2..b024a7d46e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim4.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim4.c @@ -11,16 +11,16 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t ibz_mat_4x4_init(&mat); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(mat[i][j]), 0); + ibz_set(&(mat.m[i][j]), 0); for (int k = 0; k < 4; k++) { - ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); - ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + ibz_mul(&prod, &(a->m[i][k]), &(b->m[k][j])); + ibz_add(&(mat.m[i][j]), &(mat.m[i][j]), &prod); } } } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*res)[i][j]), &(mat[i][j])); + ibz_copy(&(res->m[i][j]), &(mat.m[i][j])); } } ibz_mat_4x4_finalize(&mat); @@ -31,61 +31,61 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&((*vec)[0]), coord0); - ibz_set(&((*vec)[1]), coord1); - ibz_set(&((*vec)[2]), coord2); - ibz_set(&((*vec)[3]), coord3); + ibz_set(&(vec->v[0]), coord0); + ibz_set(&(vec->v[1]), coord1); + ibz_set(&(vec->v[2]), coord2); + ibz_set(&(vec->v[3]), coord3); } void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*new)[i]), &((*vec)[i])); + ibz_copy(&(new->v[i]), &(vec->v[i])); } } void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&((*res)[0]), coord0); - ibz_copy(&((*res)[1]), coord1); - ibz_copy(&((*res)[2]), coord2); - ibz_copy(&((*res)[3]), coord3); + ibz_copy(&(res->v[0]), coord0); + ibz_copy(&(res->v[1]), coord1); + ibz_copy(&(res->v[2]), coord2); + ibz_copy(&(res->v[3]), coord3); } void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) { - ibz_gcd(content, &((*v)[0]), &((*v)[1])); - ibz_gcd(content, &((*v)[2]), content); - ibz_gcd(content, &((*v)[3]), content); + ibz_gcd(content, &(v->v[0]), &(v->v[1])); + ibz_gcd(content, &(v->v[2]), content); + ibz_gcd(content, &(v->v[3]), content); } void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_neg(&((*neg)[i]), &((*vec)[i])); + ibz_neg(&(neg->v[i]), &(vec->v[i])); } } void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_add(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_add(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_add(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_add(&(res->v[3]), &(a->v[3]), &(b->v[3])); } void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_sub(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_sub(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_sub(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_sub(&(res->v[3]), &(a->v[3]), &(b->v[3])); } int @@ -93,7 +93,7 @@ ibz_vec_4_is_zero(const ibz_vec_4_t *x) { int res = 1; for (int i = 0; i < 4; i++) { - res &= ibz_is_zero(&((*x)[i])); + res &= ibz_is_zero(&(x->v[i])); } return (res); } @@ -110,12 +110,12 @@ ibz_vec_4_linear_combination(ibz_vec_4_t *lc, ibz_vec_4_init(&sums); ibz_init(&prod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_vec_4_finalize(&sums); @@ -125,7 +125,7 @@ void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + ibz_mul(&(prod->v[i]), &(vec->v[i]), scalar); } } @@ -136,7 +136,7 @@ ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t * ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + ibz_div(&(quot->v[i]), &r, &(vec->v[i]), scalar); res = res && ibz_is_zero(&r); } ibz_finalize(&r); @@ -148,7 +148,7 @@ ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + ibz_copy(&(new->m[i][j]), &(mat->m[i][j])); } } } @@ -158,7 +158,7 @@ ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + ibz_neg(&(neg->m[i][j]), &(mat->m[i][j])); } } } @@ -170,7 +170,7 @@ ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) ibz_mat_4x4_init(&work); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(work[i][j]), &((*mat)[j][i])); + ibz_copy(&(work.m[i][j]), &(mat->m[j][i])); } } ibz_mat_4x4_copy(transposed, &work); @@ -182,7 +182,7 @@ ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*zero)[i][j]), 0); + ibz_set(&(zero->m[i][j]), 0); } } } @@ -192,9 +192,9 @@ ibz_mat_4x4_identity(ibz_mat_4x4_t *id) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*id)[i][j]), 0); + ibz_set(&(id->m[i][j]), 0); } - ibz_set(&((*id)[i][i]), 1); + ibz_set(&(id->m[i][i]), 1); } } @@ -204,7 +204,7 @@ ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) int res = 1; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + res = res && ibz_is_one(&(mat->m[i][j])) == (i == j); } } return (res); @@ -216,7 +216,7 @@ ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) int res = 0; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + res = res | ibz_cmp(&(mat1->m[i][j]), &(mat2->m[i][j])); } } return (!res); @@ -227,7 +227,7 @@ ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4 { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + ibz_mul(&(prod->m[i][j]), &(mat->m[i][j]), scalar); } } } @@ -237,10 +237,10 @@ ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) { ibz_t d; ibz_init(&d); - ibz_copy(&d, &((*mat)[0][0])); + ibz_copy(&d, &(mat->m[0][0])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_gcd(&d, &d, &((*mat)[i][j])); + ibz_gcd(&d, &d, &(mat->m[i][j])); } } ibz_copy(gcd, &d); @@ -255,7 +255,7 @@ ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4 ibz_init(&r); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + ibz_div(&(quot->m[i][j]), &r, &(mat->m[i][j]), scalar); res = res && ibz_is_zero(&r); } } @@ -325,17 +325,17 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ // compute some 2x2 minors, store them in s and c for (int i = 0; i < 3; i++) { - ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); - ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + ibz_mat_2x2_det_from_ibz(&(s[i]), &(mat->m[0][0]), &(mat->m[0][i + 1]), &(mat->m[1][0]), &(mat->m[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &(mat->m[2][0]), &(mat->m[2][i + 1]), &(mat->m[3][0]), &(mat->m[3][i + 1])); } for (int i = 0; i < 2; i++) { ibz_mat_2x2_det_from_ibz( - &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + &(s[3 + i]), &(mat->m[0][1]), &(mat->m[0][2 + i]), &(mat->m[1][1]), &(mat->m[1][2 + i])); ibz_mat_2x2_det_from_ibz( - &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + &(c[3 + i]), &(mat->m[2][1]), &(mat->m[2][2 + i]), &(mat->m[3][1]), &(mat->m[3][2 + i])); } - ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); - ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + ibz_mat_2x2_det_from_ibz(&(s[5]), &(mat->m[0][2]), &(mat->m[0][3]), &(mat->m[1][2]), &(mat->m[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &(mat->m[2][2]), &(mat->m[2][3]), &(mat->m[3][2]), &(mat->m[3][3])); // compute det ibz_set(&work_det, 0); @@ -351,39 +351,39 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ for (int j = 0; j < 4; j++) { for (int k = 0; k < 2; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } } for (int k = 2; k < 4; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } } @@ -418,8 +418,8 @@ ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t * // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[i][j], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -437,8 +437,8 @@ ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[j][i], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -457,14 +457,14 @@ quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) ibz_vec_4_init(&sum); ibz_mat_4x4_eval(&sum, qf, coord); for (int i = 0; i < 4; i++) { - ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + ibz_mul(&prod, &(sum.v[i]), &coord->v[i]); if (i > 0) { - ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); } else { - ibz_copy(&sum[0], &prod); + ibz_copy(&sum.v[0], &prod); } } - ibz_copy(res, &sum[0]); + ibz_copy(res, &sum.v[0]); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_signature.c index 112c695941..3a630cfd58 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_signature.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_signature.c @@ -157,17 +157,17 @@ secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) ibz_finalize(&gcd); } #endif - enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[3], FP_ENCODED_BYTES, true); quat_alg_elem_finalize(&gen); } - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][1], TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); } @@ -187,19 +187,19 @@ secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) quat_alg_elem_t gen; quat_alg_elem_init(&gen); enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); - enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[3], enc, FP_ENCODED_BYTES, true); quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); ibz_finalize(&norm); quat_alg_elem_finalize(&gen); } - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][1], enc, TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c index dd089e6f4f..d62ffc51c7 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c @@ -261,223 +261,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x20f3,0x77e0,0xc9a6,0xeb4f,0xb334,0xff68,0xecb4,0xa6e3,0x5015,0x43c1,0x9e87,0xf4eb,0x22e7,0x5f37,0x9392,0x80a0,0x9ea0,0x670f,0x1be3,0x7559,0x2cb5,0x900d,0xfa83,0x1519,0x67b8,0x4d7c,0xaf3a,0x6dc4,0x12e1,0x1e51,0x8d84,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x20f3,0x77e0,0xc9a6,0xeb4f,0xb334,0xff68,0xecb4,0xa6e3,0x5015,0x43c1,0x9e87,0xf4eb,0x22e7,0x5f37,0x9392,0x80a0,0x9ea0,0x670f,0x1be3,0x7559,0x2cb5,0x900d,0xfa83,0x1519,0x67b8,0x4d7c,0xaf3a,0x6dc4,0x12e1,0x1e51,0x8d84,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77e020f3,0xeb4fc9a6,0xff68b334,0xa6e3ecb4,0x43c15015,0xf4eb9e87,0x5f3722e7,0x80a09392,0x670f9ea0,0x75591be3,0x900d2cb5,0x1519fa83,0x4d7c67b8,0x6dc4af3a,0x1e5112e1,0x58d84}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77e020f3,0xeb4fc9a6,0xff68b334,0xa6e3ecb4,0x43c15015,0xf4eb9e87,0x5f3722e7,0x80a09392,0x670f9ea0,0x75591be3,0x900d2cb5,0x1519fa83,0x4d7c67b8,0x6dc4af3a,0x1e5112e1,0x58d84}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb4fc9a677e020f3,0xa6e3ecb4ff68b334,0xf4eb9e8743c15015,0x80a093925f3722e7,0x75591be3670f9ea0,0x1519fa83900d2cb5,0x6dc4af3a4d7c67b8,0x58d841e5112e1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb4fc9a677e020f3,0xa6e3ecb4ff68b334,0xf4eb9e8743c15015,0x80a093925f3722e7,0x75591be3670f9ea0,0x1519fa83900d2cb5,0x6dc4af3a4d7c67b8,0x58d841e5112e1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8e98,0xe430,0x6d21,0x2fa6,0x524f,0xf0cf,0xe5eb,0x30ec,0x3658,0x7711,0x7d2f,0x47bf,0xbbc5,0x720c,0xe7a6,0x1ef4,0x335f,0x2c25,0x59e5,0x471c,0x5e06,0x5d38,0x62d6,0xa2a7,0x65f3,0xdefc,0x5e15,0x7a7a,0xdac4,0xc542,0x7bb8,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8e98,0xe430,0x6d21,0x2fa6,0x524f,0xf0cf,0xe5eb,0x30ec,0x3658,0x7711,0x7d2f,0x47bf,0xbbc5,0x720c,0xe7a6,0x1ef4,0x335f,0x2c25,0x59e5,0x471c,0x5e06,0x5d38,0x62d6,0xa2a7,0x65f3,0xdefc,0x5e15,0x7a7a,0xdac4,0xc542,0x7bb8,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4308e98,0x2fa66d21,0xf0cf524f,0x30ece5eb,0x77113658,0x47bf7d2f,0x720cbbc5,0x1ef4e7a6,0x2c25335f,0x471c59e5,0x5d385e06,0xa2a762d6,0xdefc65f3,0x7a7a5e15,0xc542dac4,0xd7bb8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4308e98,0x2fa66d21,0xf0cf524f,0x30ece5eb,0x77113658,0x47bf7d2f,0x720cbbc5,0x1ef4e7a6,0x2c25335f,0x471c59e5,0x5d385e06,0xa2a762d6,0xdefc65f3,0x7a7a5e15,0xc542dac4,0xd7bb8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fa66d21e4308e98,0x30ece5ebf0cf524f,0x47bf7d2f77113658,0x1ef4e7a6720cbbc5,0x471c59e52c25335f,0xa2a762d65d385e06,0x7a7a5e15defc65f3,0xd7bb8c542dac4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fa66d21e4308e98,0x30ece5ebf0cf524f,0x47bf7d2f77113658,0x1ef4e7a6720cbbc5,0x471c59e52c25335f,0xa2a762d65d385e06,0x7a7a5e15defc65f3,0xd7bb8c542dac4}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3249,0xe4fe,0xec61,0x49e0,0x5b5f,0xc495,0x6ef6,0x811,0x4fdf,0x59fc,0xbd69,0x608e,0xafe2,0xe9a9,0x5706,0x98ac,0xb327,0x481a,0x9c4e,0xecac,0x19fa,0x6401,0xfaad,0x14a4,0xeda,0x3fb5,0x7eb5,0x9768,0x6597,0x4c10,0xdc28,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3249,0xe4fe,0xec61,0x49e0,0x5b5f,0xc495,0x6ef6,0x811,0x4fdf,0x59fc,0xbd69,0x608e,0xafe2,0xe9a9,0x5706,0x98ac,0xb327,0x481a,0x9c4e,0xecac,0x19fa,0x6401,0xfaad,0x14a4,0xeda,0x3fb5,0x7eb5,0x9768,0x6597,0x4c10,0xdc28,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4fe3249,0x49e0ec61,0xc4955b5f,0x8116ef6,0x59fc4fdf,0x608ebd69,0xe9a9afe2,0x98ac5706,0x481ab327,0xecac9c4e,0x640119fa,0x14a4faad,0x3fb50eda,0x97687eb5,0x4c106597,0xbdc28}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4fe3249,0x49e0ec61,0xc4955b5f,0x8116ef6,0x59fc4fdf,0x608ebd69,0xe9a9afe2,0x98ac5706,0x481ab327,0xecac9c4e,0x640119fa,0x14a4faad,0x3fb50eda,0x97687eb5,0x4c106597,0xbdc28}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e0ec61e4fe3249,0x8116ef6c4955b5f,0x608ebd6959fc4fdf,0x98ac5706e9a9afe2,0xecac9c4e481ab327,0x14a4faad640119fa,0x97687eb53fb50eda,0xbdc284c106597}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e0ec61e4fe3249,0x8116ef6c4955b5f,0x608ebd6959fc4fdf,0x98ac5706e9a9afe2,0xecac9c4e481ab327,0x14a4faad640119fa,0x97687eb53fb50eda,0xbdc284c106597}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdf0d,0x881f,0x3659,0x14b0,0x4ccb,0x97,0x134b,0x591c,0xafea,0xbc3e,0x6178,0xb14,0xdd18,0xa0c8,0x6c6d,0x7f5f,0x615f,0x98f0,0xe41c,0x8aa6,0xd34a,0x6ff2,0x57c,0xeae6,0x9847,0xb283,0x50c5,0x923b,0xed1e,0xe1ae,0x727b,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdf0d,0x881f,0x3659,0x14b0,0x4ccb,0x97,0x134b,0x591c,0xafea,0xbc3e,0x6178,0xb14,0xdd18,0xa0c8,0x6c6d,0x7f5f,0x615f,0x98f0,0xe41c,0x8aa6,0xd34a,0x6ff2,0x57c,0xeae6,0x9847,0xb283,0x50c5,0x923b,0xed1e,0xe1ae,0x727b,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x881fdf0d,0x14b03659,0x974ccb,0x591c134b,0xbc3eafea,0xb146178,0xa0c8dd18,0x7f5f6c6d,0x98f0615f,0x8aa6e41c,0x6ff2d34a,0xeae6057c,0xb2839847,0x923b50c5,0xe1aeed1e,0xa727b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x881fdf0d,0x14b03659,0x974ccb,0x591c134b,0xbc3eafea,0xb146178,0xa0c8dd18,0x7f5f6c6d,0x98f0615f,0x8aa6e41c,0x6ff2d34a,0xeae6057c,0xb2839847,0x923b50c5,0xe1aeed1e,0xa727b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14b03659881fdf0d,0x591c134b00974ccb,0xb146178bc3eafea,0x7f5f6c6da0c8dd18,0x8aa6e41c98f0615f,0xeae6057c6ff2d34a,0x923b50c5b2839847,0xa727be1aeed1e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14b03659881fdf0d,0x591c134b00974ccb,0xb146178bc3eafea,0x7f5f6c6da0c8dd18,0x8aa6e41c98f0615f,0xeae6057c6ff2d34a,0x923b50c5b2839847,0xa727be1aeed1e}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xaa15,0x7f4c,0xb027,0xba3f,0xa936,0x25fb,0xd8a6,0xc32c,0x4ff6,0xcba,0x7e3a,0x6517,0x8b62,0x1a7d,0x90bb,0x13df,0x3bed,0x3d1a,0x462b,0x6826,0xf410,0xe897,0x8229,0x4b78,0xee4b,0x42f9,0x6ed,0x6da5,0x4789,0x56bf,0x95bb,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xaa15,0x7f4c,0xb027,0xba3f,0xa936,0x25fb,0xd8a6,0xc32c,0x4ff6,0xcba,0x7e3a,0x6517,0x8b62,0x1a7d,0x90bb,0x13df,0x3bed,0x3d1a,0x462b,0x6826,0xf410,0xe897,0x8229,0x4b78,0xee4b,0x42f9,0x6ed,0x6da5,0x4789,0x56bf,0x95bb,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f4caa15,0xba3fb027,0x25fba936,0xc32cd8a6,0xcba4ff6,0x65177e3a,0x1a7d8b62,0x13df90bb,0x3d1a3bed,0x6826462b,0xe897f410,0x4b788229,0x42f9ee4b,0x6da506ed,0x56bf4789,0xb95bb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f4caa15,0xba3fb027,0x25fba936,0xc32cd8a6,0xcba4ff6,0x65177e3a,0x1a7d8b62,0x13df90bb,0x3d1a3bed,0x6826462b,0xe897f410,0x4b788229,0x42f9ee4b,0x6da506ed,0x56bf4789,0xb95bb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xba3fb0277f4caa15,0xc32cd8a625fba936,0x65177e3a0cba4ff6,0x13df90bb1a7d8b62,0x6826462b3d1a3bed,0x4b788229e897f410,0x6da506ed42f9ee4b,0xb95bb56bf4789}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xba3fb0277f4caa15,0xc32cd8a625fba936,0x65177e3a0cba4ff6,0x13df90bb1a7d8b62,0x6826462b3d1a3bed,0x4b788229e897f410,0x6da506ed42f9ee4b,0xb95bb56bf4789}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc893,0xf896,0x2771,0xa804,0x1b30,0x95f4,0x9365,0xd12c,0x33e,0xa849,0x9eb8,0x99bc,0xbb85,0x5dc7,0x7fc2,0x63f9,0x71ec,0x9605,0x475f,0xb8e1,0xc488,0xe25f,0x7f40,0x8735,0xecac,0xd7f,0x2994,0x17fb,0xf1ae,0xdafb,0xc2a,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc893,0xf896,0x2771,0xa804,0x1b30,0x95f4,0x9365,0xd12c,0x33e,0xa849,0x9eb8,0x99bc,0xbb85,0x5dc7,0x7fc2,0x63f9,0x71ec,0x9605,0x475f,0xb8e1,0xc488,0xe25f,0x7f40,0x8735,0xecac,0xd7f,0x2994,0x17fb,0xf1ae,0xdafb,0xc2a,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf896c893,0xa8042771,0x95f41b30,0xd12c9365,0xa849033e,0x99bc9eb8,0x5dc7bb85,0x63f97fc2,0x960571ec,0xb8e1475f,0xe25fc488,0x87357f40,0xd7fecac,0x17fb2994,0xdafbf1ae,0x30c2a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf896c893,0xa8042771,0x95f41b30,0xd12c9365,0xa849033e,0x99bc9eb8,0x5dc7bb85,0x63f97fc2,0x960571ec,0xb8e1475f,0xe25fc488,0x87357f40,0xd7fecac,0x17fb2994,0xdafbf1ae,0x30c2a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8042771f896c893,0xd12c936595f41b30,0x99bc9eb8a849033e,0x63f97fc25dc7bb85,0xb8e1475f960571ec,0x87357f40e25fc488,0x17fb29940d7fecac,0x30c2adafbf1ae}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8042771f896c893,0xd12c936595f41b30,0x99bc9eb8a849033e,0x63f97fc25dc7bb85,0xb8e1475f960571ec,0x87357f40e25fc488,0x17fb29940d7fecac,0x30c2adafbf1ae}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3bfd,0x13ce,0x920a,0x911b,0x4570,0x25b1,0xd461,0xc4e5,0x637e,0x243d,0x5ee1,0x2e39,0x5d17,0x952,0x68c2,0x7a32,0x2b9d,0x2f39,0xe4d1,0x13a4,0x6ad4,0x6cd2,0x9b,0xa287,0x5fc3,0x37c9,0xd69b,0xa250,0x1cb2,0xbc08,0xc8f9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3bfd,0x13ce,0x920a,0x911b,0x4570,0x25b1,0xd461,0xc4e5,0x637e,0x243d,0x5ee1,0x2e39,0x5d17,0x952,0x68c2,0x7a32,0x2b9d,0x2f39,0xe4d1,0x13a4,0x6ad4,0x6cd2,0x9b,0xa287,0x5fc3,0x37c9,0xd69b,0xa250,0x1cb2,0xbc08,0xc8f9,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x13ce3bfd,0x911b920a,0x25b14570,0xc4e5d461,0x243d637e,0x2e395ee1,0x9525d17,0x7a3268c2,0x2f392b9d,0x13a4e4d1,0x6cd26ad4,0xa287009b,0x37c95fc3,0xa250d69b,0xbc081cb2,0x1c8f9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x13ce3bfd,0x911b920a,0x25b14570,0xc4e5d461,0x243d637e,0x2e395ee1,0x9525d17,0x7a3268c2,0x2f392b9d,0x13a4e4d1,0x6cd26ad4,0xa287009b,0x37c95fc3,0xa250d69b,0xbc081cb2,0x1c8f9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x911b920a13ce3bfd,0xc4e5d46125b14570,0x2e395ee1243d637e,0x7a3268c209525d17,0x13a4e4d12f392b9d,0xa287009b6cd26ad4,0xa250d69b37c95fc3,0x1c8f9bc081cb2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x911b920a13ce3bfd,0xc4e5d46125b14570,0x2e395ee1243d637e,0x7a3268c209525d17,0x13a4e4d12f392b9d,0xa287009b6cd26ad4,0xa250d69b37c95fc3,0x1c8f9bc081cb2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55eb,0x80b3,0x4fd8,0x45c0,0x56c9,0xda04,0x2759,0x3cd3,0xb009,0xf345,0x81c5,0x9ae8,0x749d,0xe582,0x6f44,0xec20,0xc412,0xc2e5,0xb9d4,0x97d9,0xbef,0x1768,0x7dd6,0xb487,0x11b4,0xbd06,0xf912,0x925a,0xb876,0xa940,0x6a44,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55eb,0x80b3,0x4fd8,0x45c0,0x56c9,0xda04,0x2759,0x3cd3,0xb009,0xf345,0x81c5,0x9ae8,0x749d,0xe582,0x6f44,0xec20,0xc412,0xc2e5,0xb9d4,0x97d9,0xbef,0x1768,0x7dd6,0xb487,0x11b4,0xbd06,0xf912,0x925a,0xb876,0xa940,0x6a44,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x80b355eb,0x45c04fd8,0xda0456c9,0x3cd32759,0xf345b009,0x9ae881c5,0xe582749d,0xec206f44,0xc2e5c412,0x97d9b9d4,0x17680bef,0xb4877dd6,0xbd0611b4,0x925af912,0xa940b876,0x46a44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x80b355eb,0x45c04fd8,0xda0456c9,0x3cd32759,0xf345b009,0x9ae881c5,0xe582749d,0xec206f44,0xc2e5c412,0x97d9b9d4,0x17680bef,0xb4877dd6,0xbd0611b4,0x925af912,0xa940b876,0x46a44}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45c04fd880b355eb,0x3cd32759da0456c9,0x9ae881c5f345b009,0xec206f44e582749d,0x97d9b9d4c2e5c412,0xb4877dd617680bef,0x925af912bd0611b4,0x46a44a940b876}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45c04fd880b355eb,0x3cd32759da0456c9,0x9ae881c5f345b009,0xec206f44e582749d,0x97d9b9d4c2e5c412,0xb4877dd617680bef,0x925af912bd0611b4,0x46a44a940b876}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x4d6, 0x18fd, 0x18c0, 0x13c1, 0x1718, 0x122c, 0x830, 0xa02, 0xee0, 0x1ad1, 0x1485, 0x27f, 0x13e5, 0x118f, 0xdce, 0x31c, 0x1b49, 0x998, 0x117, 0x343, 0xdae, 0x1fe7, 0x16a0, 0x1c43, 0x172, 0xa6e, 0x702, 0xeb8, 0x835, 0x1cf, 0x48d, 0x4e4, 0x13eb, 0x57d, 0xccf, 0x97e, 0x3b6, 0x910, 0x2dd} @@ -737,223 +737,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1aff,0x9f84,0xf1c6,0xd816,0xbdd0,0xd450,0x1990,0x119,0xbcf7,0x1a97,0x4780,0x8209,0x695b,0x1d73,0x20ba,0x7b53,0x5e3c,0x4ce5,0xac53,0x351f,0xaaa3,0x5a3e,0xd54c,0x121f,0xbf17,0xdb55,0xc9c,0x8370,0x2061,0x415c,0x1f35,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1aff,0x9f84,0xf1c6,0xd816,0xbdd0,0xd450,0x1990,0x119,0xbcf7,0x1a97,0x4780,0x8209,0x695b,0x1d73,0x20ba,0x7b53,0x5e3c,0x4ce5,0xac53,0x351f,0xaaa3,0x5a3e,0xd54c,0x121f,0xbf17,0xdb55,0xc9c,0x8370,0x2061,0x415c,0x1f35,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9f841aff,0xd816f1c6,0xd450bdd0,0x1191990,0x1a97bcf7,0x82094780,0x1d73695b,0x7b5320ba,0x4ce55e3c,0x351fac53,0x5a3eaaa3,0x121fd54c,0xdb55bf17,0x83700c9c,0x415c2061,0xc1f35}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9f841aff,0xd816f1c6,0xd450bdd0,0x1191990,0x1a97bcf7,0x82094780,0x1d73695b,0x7b5320ba,0x4ce55e3c,0x351fac53,0x5a3eaaa3,0x121fd54c,0xdb55bf17,0x83700c9c,0x415c2061,0xc1f35}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd816f1c69f841aff,0x1191990d450bdd0,0x820947801a97bcf7,0x7b5320ba1d73695b,0x351fac534ce55e3c,0x121fd54c5a3eaaa3,0x83700c9cdb55bf17,0xc1f35415c2061}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd816f1c69f841aff,0x1191990d450bdd0,0x820947801a97bcf7,0x7b5320ba1d73695b,0x351fac534ce55e3c,0x121fd54c5a3eaaa3,0x83700c9cdb55bf17,0xc1f35415c2061}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x7734,0xde6f,0xbab1,0xd4f3,0xc928,0x6c68,0x69b0,0x7cc0,0x994f,0x296c,0xb1dc,0x2eb2,0xe4ce,0x8494,0xa8ff,0x95d3,0x5f30,0xe7f,0x918,0x6cd6,0xae27,0x747c,0x1f93,0xed96,0x5590,0xc91a,0x713d,0xc33e,0xc075,0x40fd,0x9ce5,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x7734,0xde6f,0xbab1,0xd4f3,0xc928,0x6c68,0x69b0,0x7cc0,0x994f,0x296c,0xb1dc,0x2eb2,0xe4ce,0x8494,0xa8ff,0x95d3,0x5f30,0xe7f,0x918,0x6cd6,0xae27,0x747c,0x1f93,0xed96,0x5590,0xc91a,0x713d,0xc33e,0xc075,0x40fd,0x9ce5,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xde6f7734,0xd4f3bab1,0x6c68c928,0x7cc069b0,0x296c994f,0x2eb2b1dc,0x8494e4ce,0x95d3a8ff,0xe7f5f30,0x6cd60918,0x747cae27,0xed961f93,0xc91a5590,0xc33e713d,0x40fdc075,0x39ce5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xde6f7734,0xd4f3bab1,0x6c68c928,0x7cc069b0,0x296c994f,0x2eb2b1dc,0x8494e4ce,0x95d3a8ff,0xe7f5f30,0x6cd60918,0x747cae27,0xed961f93,0xc91a5590,0xc33e713d,0x40fdc075,0x39ce5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4f3bab1de6f7734,0x7cc069b06c68c928,0x2eb2b1dc296c994f,0x95d3a8ff8494e4ce,0x6cd609180e7f5f30,0xed961f93747cae27,0xc33e713dc91a5590,0x39ce540fdc075}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4f3bab1de6f7734,0x7cc069b06c68c928,0x2eb2b1dc296c994f,0x95d3a8ff8494e4ce,0x6cd609180e7f5f30,0xed961f93747cae27,0xc33e713dc91a5590,0x39ce540fdc075}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xda85,0x89f5,0x1aaf,0x9ec7,0xcfff,0xec63,0x3ae9,0x20bc,0xc2f3,0x9942,0x7d84,0xfa25,0x5e69,0xeb7b,0xc357,0x9342,0x5c58,0xd26c,0x857b,0x7a7f,0x757,0xfb5c,0xbb97,0x33,0x6c28,0xfceb,0xd644,0xcc0a,0x22ad,0xe1c0,0x12d6,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xda85,0x89f5,0x1aaf,0x9ec7,0xcfff,0xec63,0x3ae9,0x20bc,0xc2f3,0x9942,0x7d84,0xfa25,0x5e69,0xeb7b,0xc357,0x9342,0x5c58,0xd26c,0x857b,0x7a7f,0x757,0xfb5c,0xbb97,0x33,0x6c28,0xfceb,0xd644,0xcc0a,0x22ad,0xe1c0,0x12d6,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x89f5da85,0x9ec71aaf,0xec63cfff,0x20bc3ae9,0x9942c2f3,0xfa257d84,0xeb7b5e69,0x9342c357,0xd26c5c58,0x7a7f857b,0xfb5c0757,0x33bb97,0xfceb6c28,0xcc0ad644,0xe1c022ad,0x412d6}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x89f5da85,0x9ec71aaf,0xec63cfff,0x20bc3ae9,0x9942c2f3,0xfa257d84,0xeb7b5e69,0x9342c357,0xd26c5c58,0x7a7f857b,0xfb5c0757,0x33bb97,0xfceb6c28,0xcc0ad644,0xe1c022ad,0x412d6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9ec71aaf89f5da85,0x20bc3ae9ec63cfff,0xfa257d849942c2f3,0x9342c357eb7b5e69,0x7a7f857bd26c5c58,0x33bb97fb5c0757,0xcc0ad644fceb6c28,0x412d6e1c022ad}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9ec71aaf89f5da85,0x20bc3ae9ec63cfff,0xfa257d849942c2f3,0x9342c357eb7b5e69,0x7a7f857bd26c5c58,0x33bb97fb5c0757,0xcc0ad644fceb6c28,0x412d6e1c022ad}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe501,0x607b,0xe39,0x27e9,0x422f,0x2baf,0xe66f,0xfee6,0x4308,0xe568,0xb87f,0x7df6,0x96a4,0xe28c,0xdf45,0x84ac,0xa1c3,0xb31a,0x53ac,0xcae0,0x555c,0xa5c1,0x2ab3,0xede0,0x40e8,0x24aa,0xf363,0x7c8f,0xdf9e,0xbea3,0xe0ca,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe501,0x607b,0xe39,0x27e9,0x422f,0x2baf,0xe66f,0xfee6,0x4308,0xe568,0xb87f,0x7df6,0x96a4,0xe28c,0xdf45,0x84ac,0xa1c3,0xb31a,0x53ac,0xcae0,0x555c,0xa5c1,0x2ab3,0xede0,0x40e8,0x24aa,0xf363,0x7c8f,0xdf9e,0xbea3,0xe0ca,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x607be501,0x27e90e39,0x2baf422f,0xfee6e66f,0xe5684308,0x7df6b87f,0xe28c96a4,0x84acdf45,0xb31aa1c3,0xcae053ac,0xa5c1555c,0xede02ab3,0x24aa40e8,0x7c8ff363,0xbea3df9e,0x3e0ca}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x607be501,0x27e90e39,0x2baf422f,0xfee6e66f,0xe5684308,0x7df6b87f,0xe28c96a4,0x84acdf45,0xb31aa1c3,0xcae053ac,0xa5c1555c,0xede02ab3,0x24aa40e8,0x7c8ff363,0xbea3df9e,0x3e0ca}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27e90e39607be501,0xfee6e66f2baf422f,0x7df6b87fe5684308,0x84acdf45e28c96a4,0xcae053acb31aa1c3,0xede02ab3a5c1555c,0x7c8ff36324aa40e8,0x3e0cabea3df9e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27e90e39607be501,0xfee6e66f2baf422f,0x7df6b87fe5684308,0x84acdf45e28c96a4,0xcae053acb31aa1c3,0xede02ab3a5c1555c,0x7c8ff36324aa40e8,0x3e0cabea3df9e}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x679c,0x35ac,0x6c8c,0xee5e,0x2827,0x29fa,0x9f6c,0xbda,0x2083,0x5e20,0xd351,0x39bd,0xd9bc,0x4085,0x3727,0x8f2,0xe905,0x55dd,0x6f90,0x6e26,0x6779,0xf15a,0xf170,0xec90,0xdb0e,0x53a0,0x6f99,0xe710,0xad92,0xa7f0,0xe2e1,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x679c,0x35ac,0x6c8c,0xee5e,0x2827,0x29fa,0x9f6c,0xbda,0x2083,0x5e20,0xd351,0x39bd,0xd9bc,0x4085,0x3727,0x8f2,0xe905,0x55dd,0x6f90,0x6e26,0x6779,0xf15a,0xf170,0xec90,0xdb0e,0x53a0,0x6f99,0xe710,0xad92,0xa7f0,0xe2e1,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35ac679c,0xee5e6c8c,0x29fa2827,0xbda9f6c,0x5e202083,0x39bdd351,0x4085d9bc,0x8f23727,0x55dde905,0x6e266f90,0xf15a6779,0xec90f170,0x53a0db0e,0xe7106f99,0xa7f0ad92,0xde2e1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35ac679c,0xee5e6c8c,0x29fa2827,0xbda9f6c,0x5e202083,0x39bdd351,0x4085d9bc,0x8f23727,0x55dde905,0x6e266f90,0xf15a6779,0xec90f170,0x53a0db0e,0xe7106f99,0xa7f0ad92,0xde2e1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xee5e6c8c35ac679c,0xbda9f6c29fa2827,0x39bdd3515e202083,0x8f237274085d9bc,0x6e266f9055dde905,0xec90f170f15a6779,0xe7106f9953a0db0e,0xde2e1a7f0ad92}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xee5e6c8c35ac679c,0xbda9f6c29fa2827,0x39bdd3515e202083,0x8f237274085d9bc,0x6e266f9055dde905,0xec90f170f15a6779,0xe7106f9953a0db0e,0xde2e1a7f0ad92}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa483,0xbf25,0x238c,0x4c65,0xdd0b,0xccc9,0xc5af,0xac20,0xe998,0xb162,0xe2bf,0xbd24,0x5fd,0x6720,0xd781,0xd37d,0xa89,0x595a,0x76b0,0x7f86,0xdea4,0x59ea,0x2c01,0xd679,0x714b,0x5454,0xe262,0x2bcf,0xfad4,0x8bc0,0x8cd3,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa483,0xbf25,0x238c,0x4c65,0xdd0b,0xccc9,0xc5af,0xac20,0xe998,0xb162,0xe2bf,0xbd24,0x5fd,0x6720,0xd781,0xd37d,0xa89,0x595a,0x76b0,0x7f86,0xdea4,0x59ea,0x2c01,0xd679,0x714b,0x5454,0xe262,0x2bcf,0xfad4,0x8bc0,0x8cd3,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbf25a483,0x4c65238c,0xccc9dd0b,0xac20c5af,0xb162e998,0xbd24e2bf,0x672005fd,0xd37dd781,0x595a0a89,0x7f8676b0,0x59eadea4,0xd6792c01,0x5454714b,0x2bcfe262,0x8bc0fad4,0xc8cd3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbf25a483,0x4c65238c,0xccc9dd0b,0xac20c5af,0xb162e998,0xbd24e2bf,0x672005fd,0xd37dd781,0x595a0a89,0x7f8676b0,0x59eadea4,0xd6792c01,0x5454714b,0x2bcfe262,0x8bc0fad4,0xc8cd3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4c65238cbf25a483,0xac20c5afccc9dd0b,0xbd24e2bfb162e998,0xd37dd781672005fd,0x7f8676b0595a0a89,0xd6792c0159eadea4,0x2bcfe2625454714b,0xc8cd38bc0fad4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4c65238cbf25a483,0xac20c5afccc9dd0b,0xbd24e2bfb162e998,0xd37dd781672005fd,0x7f8676b0595a0a89,0xd6792c0159eadea4,0x2bcfe2625454714b,0xc8cd38bc0fad4}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3f72,0x6188,0x95e8,0xed15,0x2b1a,0x2fd,0xaae9,0x15d9,0x5945,0x23ff,0xfe55,0xce25,0xaa48,0xa648,0x8534,0x16db,0x3fcf,0xa301,0xfb7c,0x3a68,0x4ba,0x1c1d,0x30ee,0xf044,0x116f,0xc4f8,0x98b2,0x4971,0xea5c,0xb93e,0x2836,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3f72,0x6188,0x95e8,0xed15,0x2b1a,0x2fd,0xaae9,0x15d9,0x5945,0x23ff,0xfe55,0xce25,0xaa48,0xa648,0x8534,0x16db,0x3fcf,0xa301,0xfb7c,0x3a68,0x4ba,0x1c1d,0x30ee,0xf044,0x116f,0xc4f8,0x98b2,0x4971,0xea5c,0xb93e,0x2836,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x61883f72,0xed1595e8,0x2fd2b1a,0x15d9aae9,0x23ff5945,0xce25fe55,0xa648aa48,0x16db8534,0xa3013fcf,0x3a68fb7c,0x1c1d04ba,0xf04430ee,0xc4f8116f,0x497198b2,0xb93eea5c,0x32836}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x61883f72,0xed1595e8,0x2fd2b1a,0x15d9aae9,0x23ff5945,0xce25fe55,0xa648aa48,0x16db8534,0xa3013fcf,0x3a68fb7c,0x1c1d04ba,0xf04430ee,0xc4f8116f,0x497198b2,0xb93eea5c,0x32836}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed1595e861883f72,0x15d9aae902fd2b1a,0xce25fe5523ff5945,0x16db8534a648aa48,0x3a68fb7ca3013fcf,0xf04430ee1c1d04ba,0x497198b2c4f8116f,0x32836b93eea5c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed1595e861883f72,0x15d9aae902fd2b1a,0xce25fe5523ff5945,0x16db8534a648aa48,0x3a68fb7ca3013fcf,0xf04430ee1c1d04ba,0x497198b2c4f8116f,0x32836b93eea5c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9864,0xca53,0x9373,0x11a1,0xd7d8,0xd605,0x6093,0xf425,0xdf7c,0xa1df,0x2cae,0xc642,0x2643,0xbf7a,0xc8d8,0xf70d,0x16fa,0xaa22,0x906f,0x91d9,0x9886,0xea5,0xe8f,0x136f,0x24f1,0xac5f,0x9066,0x18ef,0x526d,0x580f,0x1d1e,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9864,0xca53,0x9373,0x11a1,0xd7d8,0xd605,0x6093,0xf425,0xdf7c,0xa1df,0x2cae,0xc642,0x2643,0xbf7a,0xc8d8,0xf70d,0x16fa,0xaa22,0x906f,0x91d9,0x9886,0xea5,0xe8f,0x136f,0x24f1,0xac5f,0x9066,0x18ef,0x526d,0x580f,0x1d1e,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca539864,0x11a19373,0xd605d7d8,0xf4256093,0xa1dfdf7c,0xc6422cae,0xbf7a2643,0xf70dc8d8,0xaa2216fa,0x91d9906f,0xea59886,0x136f0e8f,0xac5f24f1,0x18ef9066,0x580f526d,0x21d1e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca539864,0x11a19373,0xd605d7d8,0xf4256093,0xa1dfdf7c,0xc6422cae,0xbf7a2643,0xf70dc8d8,0xaa2216fa,0x91d9906f,0xea59886,0x136f0e8f,0xac5f24f1,0x18ef9066,0x580f526d,0x21d1e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x11a19373ca539864,0xf4256093d605d7d8,0xc6422caea1dfdf7c,0xf70dc8d8bf7a2643,0x91d9906faa2216fa,0x136f0e8f0ea59886,0x18ef9066ac5f24f1,0x21d1e580f526d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x11a19373ca539864,0xf4256093d605d7d8,0xc6422caea1dfdf7c,0xf70dc8d8bf7a2643,0x91d9906faa2216fa,0x136f0e8f0ea59886,0x18ef9066ac5f24f1,0x21d1e580f526d}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x185f, 0xecc, 0x21c, 0xa62, 0x77, 0x8b1, 0x1188, 0xec2, 0xda1, 0xbf0, 0xb11, 0x82d, 0x1fe9, 0x10d4, 0x1e36, 0x194e, 0x77e, 0x1112, 0x14c0, 0x1dcd, 0x1be7, 0x1a4c, 0x140e, 0x10c5, 0x1aaf, 0x236, 0xdf3, 0x1a21, 0x1dff, 0x15bb, 0xda8, 0x30e, 0x17b0, 0xc7b, 0xd1, 0xcb, 0x868, 0x2e5, 0x5a} @@ -1213,223 +1213,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5eb9,0x2393,0xd8e8,0xc566,0xd78,0xa77f,0x1bf1,0x4577,0x3141,0xecd3,0x132c,0x281,0x13b5,0x1d34,0xb4bb,0xf25,0xdc3,0xbf86,0x5e9f,0xde50,0xf536,0xe95e,0xd5b0,0x687d,0x3ab,0x992c,0xdb8d,0xc8cc,0xfaf0,0xd954,0x6e1a,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5eb9,0x2393,0xd8e8,0xc566,0xd78,0xa77f,0x1bf1,0x4577,0x3141,0xecd3,0x132c,0x281,0x13b5,0x1d34,0xb4bb,0xf25,0xdc3,0xbf86,0x5e9f,0xde50,0xf536,0xe95e,0xd5b0,0x687d,0x3ab,0x992c,0xdb8d,0xc8cc,0xfaf0,0xd954,0x6e1a,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x23935eb9,0xc566d8e8,0xa77f0d78,0x45771bf1,0xecd33141,0x281132c,0x1d3413b5,0xf25b4bb,0xbf860dc3,0xde505e9f,0xe95ef536,0x687dd5b0,0x992c03ab,0xc8ccdb8d,0xd954faf0,0x56e1a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x23935eb9,0xc566d8e8,0xa77f0d78,0x45771bf1,0xecd33141,0x281132c,0x1d3413b5,0xf25b4bb,0xbf860dc3,0xde505e9f,0xe95ef536,0x687dd5b0,0x992c03ab,0xc8ccdb8d,0xd954faf0,0x56e1a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc566d8e823935eb9,0x45771bf1a77f0d78,0x281132cecd33141,0xf25b4bb1d3413b5,0xde505e9fbf860dc3,0x687dd5b0e95ef536,0xc8ccdb8d992c03ab,0x56e1ad954faf0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc566d8e823935eb9,0x45771bf1a77f0d78,0x281132cecd33141,0xf25b4bb1d3413b5,0xde505e9fbf860dc3,0x687dd5b0e95ef536,0xc8ccdb8d992c03ab,0x56e1ad954faf0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf17c,0xf7a8,0xd9f7,0x1544,0xb2c8,0xf5aa,0x3812,0x3fba,0xf63e,0xb545,0x678c,0xad77,0xed9f,0x12f8,0xa5dc,0x74c9,0xec1d,0xc1e0,0x806f,0x14a0,0xfb25,0x34f3,0x606c,0x57d5,0x9733,0x9c8c,0x83e3,0xa787,0x7cae,0x503b,0x2499,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf17c,0xf7a8,0xd9f7,0x1544,0xb2c8,0xf5aa,0x3812,0x3fba,0xf63e,0xb545,0x678c,0xad77,0xed9f,0x12f8,0xa5dc,0x74c9,0xec1d,0xc1e0,0x806f,0x14a0,0xfb25,0x34f3,0x606c,0x57d5,0x9733,0x9c8c,0x83e3,0xa787,0x7cae,0x503b,0x2499,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf7a8f17c,0x1544d9f7,0xf5aab2c8,0x3fba3812,0xb545f63e,0xad77678c,0x12f8ed9f,0x74c9a5dc,0xc1e0ec1d,0x14a0806f,0x34f3fb25,0x57d5606c,0x9c8c9733,0xa78783e3,0x503b7cae,0x12499}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf7a8f17c,0x1544d9f7,0xf5aab2c8,0x3fba3812,0xb545f63e,0xad77678c,0x12f8ed9f,0x74c9a5dc,0xc1e0ec1d,0x14a0806f,0x34f3fb25,0x57d5606c,0x9c8c9733,0xa78783e3,0x503b7cae,0x12499}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1544d9f7f7a8f17c,0x3fba3812f5aab2c8,0xad77678cb545f63e,0x74c9a5dc12f8ed9f,0x14a0806fc1e0ec1d,0x57d5606c34f3fb25,0xa78783e39c8c9733,0x12499503b7cae}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1544d9f7f7a8f17c,0x3fba3812f5aab2c8,0xad77678cb545f63e,0x74c9a5dc12f8ed9f,0x14a0806fc1e0ec1d,0x57d5606c34f3fb25,0xa78783e39c8c9733,0x12499503b7cae}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d83,0x57ac,0xb73f,0xb74d,0x1869,0x3588,0x43,0x915,0x7f31,0x82eb,0x4487,0xb830,0x6627,0x70a7,0x9911,0x5646,0x4779,0xe113,0x168c,0x925d,0xc1e8,0xd347,0xa95e,0xd5a6,0x7deb,0xbeb,0x72,0xf755,0x306,0x9ee2,0x7ef9,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d83,0x57ac,0xb73f,0xb74d,0x1869,0x3588,0x43,0x915,0x7f31,0x82eb,0x4487,0xb830,0x6627,0x70a7,0x9911,0x5646,0x4779,0xe113,0x168c,0x925d,0xc1e8,0xd347,0xa95e,0xd5a6,0x7deb,0xbeb,0x72,0xf755,0x306,0x9ee2,0x7ef9,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x57ac5d83,0xb74db73f,0x35881869,0x9150043,0x82eb7f31,0xb8304487,0x70a76627,0x56469911,0xe1134779,0x925d168c,0xd347c1e8,0xd5a6a95e,0xbeb7deb,0xf7550072,0x9ee20306,0x27ef9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x57ac5d83,0xb74db73f,0x35881869,0x9150043,0x82eb7f31,0xb8304487,0x70a76627,0x56469911,0xe1134779,0x925d168c,0xd347c1e8,0xd5a6a95e,0xbeb7deb,0xf7550072,0x9ee20306,0x27ef9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb74db73f57ac5d83,0x915004335881869,0xb830448782eb7f31,0x5646991170a76627,0x925d168ce1134779,0xd5a6a95ed347c1e8,0xf75500720beb7deb,0x27ef99ee20306}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb74db73f57ac5d83,0x915004335881869,0xb830448782eb7f31,0x5646991170a76627,0x925d168ce1134779,0xd5a6a95ed347c1e8,0xf75500720beb7deb,0x27ef99ee20306}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa147,0xdc6c,0x2717,0x3a99,0xf287,0x5880,0xe40e,0xba88,0xcebe,0x132c,0xecd3,0xfd7e,0xec4a,0xe2cb,0x4b44,0xf0da,0xf23c,0x4079,0xa160,0x21af,0xac9,0x16a1,0x2a4f,0x9782,0xfc54,0x66d3,0x2472,0x3733,0x50f,0x26ab,0x91e5,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa147,0xdc6c,0x2717,0x3a99,0xf287,0x5880,0xe40e,0xba88,0xcebe,0x132c,0xecd3,0xfd7e,0xec4a,0xe2cb,0x4b44,0xf0da,0xf23c,0x4079,0xa160,0x21af,0xac9,0x16a1,0x2a4f,0x9782,0xfc54,0x66d3,0x2472,0x3733,0x50f,0x26ab,0x91e5,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdc6ca147,0x3a992717,0x5880f287,0xba88e40e,0x132ccebe,0xfd7eecd3,0xe2cbec4a,0xf0da4b44,0x4079f23c,0x21afa160,0x16a10ac9,0x97822a4f,0x66d3fc54,0x37332472,0x26ab050f,0xa91e5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdc6ca147,0x3a992717,0x5880f287,0xba88e40e,0x132ccebe,0xfd7eecd3,0xe2cbec4a,0xf0da4b44,0x4079f23c,0x21afa160,0x16a10ac9,0x97822a4f,0x66d3fc54,0x37332472,0x26ab050f,0xa91e5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3a992717dc6ca147,0xba88e40e5880f287,0xfd7eecd3132ccebe,0xf0da4b44e2cbec4a,0x21afa1604079f23c,0x97822a4f16a10ac9,0x3733247266d3fc54,0xa91e526ab050f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3a992717dc6ca147,0xba88e40e5880f287,0xfd7eecd3132ccebe,0xf0da4b44e2cbec4a,0x21afa1604079f23c,0x97822a4f16a10ac9,0x3733247266d3fc54,0xa91e526ab050f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6f0b,0x3478,0x5aeb,0x64,0x9a1a,0xecff,0xccf0,0x2fab,0xf3a8,0x718a,0x97e7,0xc31a,0xa0cd,0xb872,0x514e,0x5ee1,0x4b79,0x4af9,0xd0c3,0x97c6,0x9591,0x2370,0xa987,0xa5e6,0xe201,0x8730,0x3150,0x1980,0x8452,0x3b83,0x25c9,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6f0b,0x3478,0x5aeb,0x64,0x9a1a,0xecff,0xccf0,0x2fab,0xf3a8,0x718a,0x97e7,0xc31a,0xa0cd,0xb872,0x514e,0x5ee1,0x4b79,0x4af9,0xd0c3,0x97c6,0x9591,0x2370,0xa987,0xa5e6,0xe201,0x8730,0x3150,0x1980,0x8452,0x3b83,0x25c9,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x34786f0b,0x645aeb,0xecff9a1a,0x2fabccf0,0x718af3a8,0xc31a97e7,0xb872a0cd,0x5ee1514e,0x4af94b79,0x97c6d0c3,0x23709591,0xa5e6a987,0x8730e201,0x19803150,0x3b838452,0xb25c9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x34786f0b,0x645aeb,0xecff9a1a,0x2fabccf0,0x718af3a8,0xc31a97e7,0xb872a0cd,0x5ee1514e,0x4af94b79,0x97c6d0c3,0x23709591,0xa5e6a987,0x8730e201,0x19803150,0x3b838452,0xb25c9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x645aeb34786f0b,0x2fabccf0ecff9a1a,0xc31a97e7718af3a8,0x5ee1514eb872a0cd,0x97c6d0c34af94b79,0xa5e6a98723709591,0x198031508730e201,0xb25c93b838452}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x645aeb34786f0b,0x2fabccf0ecff9a1a,0xc31a97e7718af3a8,0x5ee1514eb872a0cd,0x97c6d0c34af94b79,0xa5e6a98723709591,0x198031508730e201,0xb25c93b838452}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1de7,0x7f69,0xdefe,0xfc6b,0x6fd5,0xc100,0x5188,0x1318,0x416e,0x10dd,0x33ac,0x4260,0x8985,0x1d0e,0x5b13,0xd02e,0x6fb5,0x6e28,0x9b7d,0x4f72,0x9665,0xd5f3,0xf00d,0xda5f,0x98f2,0xd778,0x4b2a,0x958d,0xfcef,0xd837,0x4a93,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1de7,0x7f69,0xdefe,0xfc6b,0x6fd5,0xc100,0x5188,0x1318,0x416e,0x10dd,0x33ac,0x4260,0x8985,0x1d0e,0x5b13,0xd02e,0x6fb5,0x6e28,0x9b7d,0x4f72,0x9665,0xd5f3,0xf00d,0xda5f,0x98f2,0xd778,0x4b2a,0x958d,0xfcef,0xd837,0x4a93,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f691de7,0xfc6bdefe,0xc1006fd5,0x13185188,0x10dd416e,0x426033ac,0x1d0e8985,0xd02e5b13,0x6e286fb5,0x4f729b7d,0xd5f39665,0xda5ff00d,0xd77898f2,0x958d4b2a,0xd837fcef,0x34a93}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f691de7,0xfc6bdefe,0xc1006fd5,0x13185188,0x10dd416e,0x426033ac,0x1d0e8985,0xd02e5b13,0x6e286fb5,0x4f729b7d,0xd5f39665,0xda5ff00d,0xd77898f2,0x958d4b2a,0xd837fcef,0x34a93}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfc6bdefe7f691de7,0x13185188c1006fd5,0x426033ac10dd416e,0xd02e5b131d0e8985,0x4f729b7d6e286fb5,0xda5ff00dd5f39665,0x958d4b2ad77898f2,0x34a93d837fcef}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfc6bdefe7f691de7,0x13185188c1006fd5,0x426033ac10dd416e,0xd02e5b131d0e8985,0x4f729b7d6e286fb5,0xda5ff00dd5f39665,0x958d4b2ad77898f2,0x34a93d837fcef}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8527,0x81f3,0xcb8f,0x5e0d,0x7c93,0x7448,0x613,0xedcf,0x7d31,0x77c7,0x19dc,0x8ace,0xbfb8,0xa582,0x9ccc,0x28df,0xb6e0,0x4f69,0x33e6,0x546b,0xcfb2,0x1627,0x53ed,0xdc8d,0xd80b,0xb843,0xc438,0xb942,0x8fb5,0xb3c0,0xc1dc,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8527,0x81f3,0xcb8f,0x5e0d,0x7c93,0x7448,0x613,0xedcf,0x7d31,0x77c7,0x19dc,0x8ace,0xbfb8,0xa582,0x9ccc,0x28df,0xb6e0,0x4f69,0x33e6,0x546b,0xcfb2,0x1627,0x53ed,0xdc8d,0xd80b,0xb843,0xc438,0xb942,0x8fb5,0xb3c0,0xc1dc,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81f38527,0x5e0dcb8f,0x74487c93,0xedcf0613,0x77c77d31,0x8ace19dc,0xa582bfb8,0x28df9ccc,0x4f69b6e0,0x546b33e6,0x1627cfb2,0xdc8d53ed,0xb843d80b,0xb942c438,0xb3c08fb5,0x2c1dc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81f38527,0x5e0dcb8f,0x74487c93,0xedcf0613,0x77c77d31,0x8ace19dc,0xa582bfb8,0x28df9ccc,0x4f69b6e0,0x546b33e6,0x1627cfb2,0xdc8d53ed,0xb843d80b,0xb942c438,0xb3c08fb5,0x2c1dc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e0dcb8f81f38527,0xedcf061374487c93,0x8ace19dc77c77d31,0x28df9ccca582bfb8,0x546b33e64f69b6e0,0xdc8d53ed1627cfb2,0xb942c438b843d80b,0x2c1dcb3c08fb5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e0dcb8f81f38527,0xedcf061374487c93,0x8ace19dc77c77d31,0x28df9ccca582bfb8,0x546b33e64f69b6e0,0xdc8d53ed1627cfb2,0xb942c438b843d80b,0x2c1dcb3c08fb5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x90f5,0xcb87,0xa514,0xff9b,0x65e5,0x1300,0x330f,0xd054,0xc57,0x8e75,0x6818,0x3ce5,0x5f32,0x478d,0xaeb1,0xa11e,0xb486,0xb506,0x2f3c,0x6839,0x6a6e,0xdc8f,0x5678,0x5a19,0x1dfe,0x78cf,0xceaf,0xe67f,0x7bad,0xc47c,0xda36,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x90f5,0xcb87,0xa514,0xff9b,0x65e5,0x1300,0x330f,0xd054,0xc57,0x8e75,0x6818,0x3ce5,0x5f32,0x478d,0xaeb1,0xa11e,0xb486,0xb506,0x2f3c,0x6839,0x6a6e,0xdc8f,0x5678,0x5a19,0x1dfe,0x78cf,0xceaf,0xe67f,0x7bad,0xc47c,0xda36,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcb8790f5,0xff9ba514,0x130065e5,0xd054330f,0x8e750c57,0x3ce56818,0x478d5f32,0xa11eaeb1,0xb506b486,0x68392f3c,0xdc8f6a6e,0x5a195678,0x78cf1dfe,0xe67fceaf,0xc47c7bad,0x4da36}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcb8790f5,0xff9ba514,0x130065e5,0xd054330f,0x8e750c57,0x3ce56818,0x478d5f32,0xa11eaeb1,0xb506b486,0x68392f3c,0xdc8f6a6e,0x5a195678,0x78cf1dfe,0xe67fceaf,0xc47c7bad,0x4da36}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xff9ba514cb8790f5,0xd054330f130065e5,0x3ce568188e750c57,0xa11eaeb1478d5f32,0x68392f3cb506b486,0x5a195678dc8f6a6e,0xe67fceaf78cf1dfe,0x4da36c47c7bad}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xff9ba514cb8790f5,0xd054330f130065e5,0x3ce568188e750c57,0xa11eaeb1478d5f32,0x68392f3cb506b486,0x5a195678dc8f6a6e,0xe67fceaf78cf1dfe,0x4da36c47c7bad}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x14d5, 0x1163, 0xf47, 0x337, 0x71e, 0x4ad, 0x1071, 0x19d4, 0x11d9, 0xdce, 0x186a, 0x1785, 0x13c8, 0x695, 0xe1b, 0x54b, 0x174f, 0xd0c, 0x17cb, 0x17db, 0xb41, 0xb78, 0x1a46, 0x66f, 0x23, 0x836, 0x19ce, 0xcdf, 0x90b, 0x6fb, 0x1508, 0x1bb5, 0x6aa, 0x1219, 0x1bba, 0x1d67, 0x1e46, 0x8d8, 0x62c} @@ -1689,223 +1689,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5fd3,0xc1bb,0x3527,0x289e,0x97fd,0xf5ce,0xa8e1,0xfbf2,0x8f04,0xb5e7,0xdf66,0xcb44,0x5b5,0x8314,0x31c,0x6e5c,0xa6b9,0x3134,0x3d19,0x5ea9,0x860d,0x37fe,0x8003,0xafb9,0xbfdd,0xf377,0xa36d,0xde5a,0xa9df,0x8da,0xc872,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5fd3,0xc1bb,0x3527,0x289e,0x97fd,0xf5ce,0xa8e1,0xfbf2,0x8f04,0xb5e7,0xdf66,0xcb44,0x5b5,0x8314,0x31c,0x6e5c,0xa6b9,0x3134,0x3d19,0x5ea9,0x860d,0x37fe,0x8003,0xafb9,0xbfdd,0xf377,0xa36d,0xde5a,0xa9df,0x8da,0xc872,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1bb5fd3,0x289e3527,0xf5ce97fd,0xfbf2a8e1,0xb5e78f04,0xcb44df66,0x831405b5,0x6e5c031c,0x3134a6b9,0x5ea93d19,0x37fe860d,0xafb98003,0xf377bfdd,0xde5aa36d,0x8daa9df,0xbc872}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1bb5fd3,0x289e3527,0xf5ce97fd,0xfbf2a8e1,0xb5e78f04,0xcb44df66,0x831405b5,0x6e5c031c,0x3134a6b9,0x5ea93d19,0x37fe860d,0xafb98003,0xf377bfdd,0xde5aa36d,0x8daa9df,0xbc872}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x289e3527c1bb5fd3,0xfbf2a8e1f5ce97fd,0xcb44df66b5e78f04,0x6e5c031c831405b5,0x5ea93d193134a6b9,0xafb9800337fe860d,0xde5aa36df377bfdd,0xbc87208daa9df}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x289e3527c1bb5fd3,0xfbf2a8e1f5ce97fd,0xcb44df66b5e78f04,0x6e5c031c831405b5,0x5ea93d193134a6b9,0xafb9800337fe860d,0xde5aa36df377bfdd,0xbc87208daa9df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb354,0x6a4f,0xd461,0xf7db,0x4aec,0x6786,0xff6,0xb274,0xfcf4,0x66d,0x97e9,0x277e,0x5e43,0x68a3,0xb1fa,0x6062,0xa56a,0x8c2b,0x67ed,0xd926,0x444a,0x4883,0x5bc5,0x8084,0x1f0a,0x209e,0x3b85,0x4eb6,0x14fe,0xb973,0xb05c,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb354,0x6a4f,0xd461,0xf7db,0x4aec,0x6786,0xff6,0xb274,0xfcf4,0x66d,0x97e9,0x277e,0x5e43,0x68a3,0xb1fa,0x6062,0xa56a,0x8c2b,0x67ed,0xd926,0x444a,0x4883,0x5bc5,0x8084,0x1f0a,0x209e,0x3b85,0x4eb6,0x14fe,0xb973,0xb05c,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6a4fb354,0xf7dbd461,0x67864aec,0xb2740ff6,0x66dfcf4,0x277e97e9,0x68a35e43,0x6062b1fa,0x8c2ba56a,0xd92667ed,0x4883444a,0x80845bc5,0x209e1f0a,0x4eb63b85,0xb97314fe,0xab05c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6a4fb354,0xf7dbd461,0x67864aec,0xb2740ff6,0x66dfcf4,0x277e97e9,0x68a35e43,0x6062b1fa,0x8c2ba56a,0xd92667ed,0x4883444a,0x80845bc5,0x209e1f0a,0x4eb63b85,0xb97314fe,0xab05c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7dbd4616a4fb354,0xb2740ff667864aec,0x277e97e9066dfcf4,0x6062b1fa68a35e43,0xd92667ed8c2ba56a,0x80845bc54883444a,0x4eb63b85209e1f0a,0xab05cb97314fe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7dbd4616a4fb354,0xb2740ff667864aec,0x277e97e9066dfcf4,0x6062b1fa68a35e43,0xd92667ed8c2ba56a,0x80845bc54883444a,0x4eb63b85209e1f0a,0xab05cb97314fe}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9c41,0x213b,0x2271,0x4d2a,0xca4c,0x987c,0xf3fd,0x8462,0x84ba,0x5504,0xf930,0x5ca1,0xb075,0x84d2,0xb16,0x1bc1,0xe1ac,0xfeb5,0xe84e,0x4bb0,0xf6b6,0x57b6,0x3d98,0x97f4,0xda24,0x9866,0x1aae,0xb84,0x36ec,0xfcb7,0x4a2d,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9c41,0x213b,0x2271,0x4d2a,0xca4c,0x987c,0xf3fd,0x8462,0x84ba,0x5504,0xf930,0x5ca1,0xb075,0x84d2,0xb16,0x1bc1,0xe1ac,0xfeb5,0xe84e,0x4bb0,0xf6b6,0x57b6,0x3d98,0x97f4,0xda24,0x9866,0x1aae,0xb84,0x36ec,0xfcb7,0x4a2d,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x213b9c41,0x4d2a2271,0x987cca4c,0x8462f3fd,0x550484ba,0x5ca1f930,0x84d2b075,0x1bc10b16,0xfeb5e1ac,0x4bb0e84e,0x57b6f6b6,0x97f43d98,0x9866da24,0xb841aae,0xfcb736ec,0xf4a2d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x213b9c41,0x4d2a2271,0x987cca4c,0x8462f3fd,0x550484ba,0x5ca1f930,0x84d2b075,0x1bc10b16,0xfeb5e1ac,0x4bb0e84e,0x57b6f6b6,0x97f43d98,0x9866da24,0xb841aae,0xfcb736ec,0xf4a2d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4d2a2271213b9c41,0x8462f3fd987cca4c,0x5ca1f930550484ba,0x1bc10b1684d2b075,0x4bb0e84efeb5e1ac,0x97f43d9857b6f6b6,0xb841aae9866da24,0xf4a2dfcb736ec}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4d2a2271213b9c41,0x8462f3fd987cca4c,0x5ca1f930550484ba,0x1bc10b1684d2b075,0x4bb0e84efeb5e1ac,0x97f43d9857b6f6b6,0xb841aae9866da24,0xf4a2dfcb736ec}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa02d,0x3e44,0xcad8,0xd761,0x6802,0xa31,0x571e,0x40d,0x70fb,0x4a18,0x2099,0x34bb,0xfa4a,0x7ceb,0xfce3,0x91a3,0x5946,0xcecb,0xc2e6,0xa156,0x79f2,0xc801,0x7ffc,0x5046,0x4022,0xc88,0x5c92,0x21a5,0x5620,0xf725,0x378d,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa02d,0x3e44,0xcad8,0xd761,0x6802,0xa31,0x571e,0x40d,0x70fb,0x4a18,0x2099,0x34bb,0xfa4a,0x7ceb,0xfce3,0x91a3,0x5946,0xcecb,0xc2e6,0xa156,0x79f2,0xc801,0x7ffc,0x5046,0x4022,0xc88,0x5c92,0x21a5,0x5620,0xf725,0x378d,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3e44a02d,0xd761cad8,0xa316802,0x40d571e,0x4a1870fb,0x34bb2099,0x7cebfa4a,0x91a3fce3,0xcecb5946,0xa156c2e6,0xc80179f2,0x50467ffc,0xc884022,0x21a55c92,0xf7255620,0x4378d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3e44a02d,0xd761cad8,0xa316802,0x40d571e,0x4a1870fb,0x34bb2099,0x7cebfa4a,0x91a3fce3,0xcecb5946,0xa156c2e6,0xc80179f2,0x50467ffc,0xc884022,0x21a55c92,0xf7255620,0x4378d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd761cad83e44a02d,0x40d571e0a316802,0x34bb20994a1870fb,0x91a3fce37cebfa4a,0xa156c2e6cecb5946,0x50467ffcc80179f2,0x21a55c920c884022,0x4378df7255620}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd761cad83e44a02d,0x40d571e0a316802,0x34bb20994a1870fb,0x91a3fce37cebfa4a,0xa156c2e6cecb5946,0x50467ffcc80179f2,0x21a55c920c884022,0x4378df7255620}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x718a,0xe24a,0xae5,0xa4d6,0xd401,0xf453,0x9f91,0x69ce,0x7d19,0xfa11,0x9273,0x4e63,0xf33a,0xde49,0xe08f,0x746a,0x243d,0x52bb,0x43b6,0xe4c,0x1bdd,0x380d,0xdf64,0x74fe,0x4dfa,0x584f,0xa4d6,0xd71b,0xf067,0xf070,0x717e,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x718a,0xe24a,0xae5,0xa4d6,0xd401,0xf453,0x9f91,0x69ce,0x7d19,0xfa11,0x9273,0x4e63,0xf33a,0xde49,0xe08f,0x746a,0x243d,0x52bb,0x43b6,0xe4c,0x1bdd,0x380d,0xdf64,0x74fe,0x4dfa,0x584f,0xa4d6,0xd71b,0xf067,0xf070,0x717e,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe24a718a,0xa4d60ae5,0xf453d401,0x69ce9f91,0xfa117d19,0x4e639273,0xde49f33a,0x746ae08f,0x52bb243d,0xe4c43b6,0x380d1bdd,0x74fedf64,0x584f4dfa,0xd71ba4d6,0xf070f067,0xf717e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe24a718a,0xa4d60ae5,0xf453d401,0x69ce9f91,0xfa117d19,0x4e639273,0xde49f33a,0x746ae08f,0x52bb243d,0xe4c43b6,0x380d1bdd,0x74fedf64,0x584f4dfa,0xd71ba4d6,0xf070f067,0xf717e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa4d60ae5e24a718a,0x69ce9f91f453d401,0x4e639273fa117d19,0x746ae08fde49f33a,0xe4c43b652bb243d,0x74fedf64380d1bdd,0xd71ba4d6584f4dfa,0xf717ef070f067}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa4d60ae5e24a718a,0x69ce9f91f453d401,0x4e639273fa117d19,0x746ae08fde49f33a,0xe4c43b652bb243d,0x74fedf64380d1bdd,0xd71ba4d6584f4dfa,0xf717ef070f067}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d93,0x7845,0xd1d0,0xe045,0xfa74,0x6b6,0x9400,0xad36,0x4e68,0xd3f6,0x9b00,0x7ca0,0xab22,0xfac,0x1fb6,0xb42f,0x57db,0xb2e3,0xbc5b,0x2b2d,0x94fa,0xc77e,0x34e2,0x2918,0x6ce9,0xf9dd,0x68cf,0xd4a2,0xbc59,0x6050,0xda60,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d93,0x7845,0xd1d0,0xe045,0xfa74,0x6b6,0x9400,0xad36,0x4e68,0xd3f6,0x9b00,0x7ca0,0xab22,0xfac,0x1fb6,0xb42f,0x57db,0xb2e3,0xbc5b,0x2b2d,0x94fa,0xc77e,0x34e2,0x2918,0x6ce9,0xf9dd,0x68cf,0xd4a2,0xbc59,0x6050,0xda60,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x78455d93,0xe045d1d0,0x6b6fa74,0xad369400,0xd3f64e68,0x7ca09b00,0xfacab22,0xb42f1fb6,0xb2e357db,0x2b2dbc5b,0xc77e94fa,0x291834e2,0xf9dd6ce9,0xd4a268cf,0x6050bc59,0x5da60}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x78455d93,0xe045d1d0,0x6b6fa74,0xad369400,0xd3f64e68,0x7ca09b00,0xfacab22,0xb42f1fb6,0xb2e357db,0x2b2dbc5b,0xc77e94fa,0x291834e2,0xf9dd6ce9,0xd4a268cf,0x6050bc59,0x5da60}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe045d1d078455d93,0xad36940006b6fa74,0x7ca09b00d3f64e68,0xb42f1fb60facab22,0x2b2dbc5bb2e357db,0x291834e2c77e94fa,0xd4a268cff9dd6ce9,0x5da606050bc59}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe045d1d078455d93,0xad36940006b6fa74,0x7ca09b00d3f64e68,0xb42f1fb60facab22,0x2b2dbc5bb2e357db,0x291834e2c77e94fa,0xd4a268cff9dd6ce9,0x5da606050bc59}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6dc,0x5d39,0xac2b,0x2d81,0xc9b8,0xf398,0xdab5,0x8e30,0xb3b2,0x1b25,0x7102,0x8cd2,0x952e,0x7c35,0xb4f3,0x52b8,0x5789,0xb877,0x6906,0x8d31,0x98a6,0x8a10,0x2b3,0x1667,0x856,0xa935,0xfc76,0xc8ec,0x6044,0x9148,0x4f02,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6dc,0x5d39,0xac2b,0x2d81,0xc9b8,0xf398,0xdab5,0x8e30,0xb3b2,0x1b25,0x7102,0x8cd2,0x952e,0x7c35,0xb4f3,0x52b8,0x5789,0xb877,0x6906,0x8d31,0x98a6,0x8a10,0x2b3,0x1667,0x856,0xa935,0xfc76,0xc8ec,0x6044,0x9148,0x4f02,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d39c6dc,0x2d81ac2b,0xf398c9b8,0x8e30dab5,0x1b25b3b2,0x8cd27102,0x7c35952e,0x52b8b4f3,0xb8775789,0x8d316906,0x8a1098a6,0x166702b3,0xa9350856,0xc8ecfc76,0x91486044,0xd4f02}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d39c6dc,0x2d81ac2b,0xf398c9b8,0x8e30dab5,0x1b25b3b2,0x8cd27102,0x7c35952e,0x52b8b4f3,0xb8775789,0x8d316906,0x8a1098a6,0x166702b3,0xa9350856,0xc8ecfc76,0x91486044,0xd4f02}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d81ac2b5d39c6dc,0x8e30dab5f398c9b8,0x8cd271021b25b3b2,0x52b8b4f37c35952e,0x8d316906b8775789,0x166702b38a1098a6,0xc8ecfc76a9350856,0xd4f0291486044}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d81ac2b5d39c6dc,0x8e30dab5f398c9b8,0x8cd271021b25b3b2,0x52b8b4f37c35952e,0x8d316906b8775789,0x166702b38a1098a6,0xc8ecfc76a9350856,0xd4f0291486044}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x8e76,0x1db5,0xf51a,0x5b29,0x2bfe,0xbac,0x606e,0x9631,0x82e6,0x5ee,0x6d8c,0xb19c,0xcc5,0x21b6,0x1f70,0x8b95,0xdbc2,0xad44,0xbc49,0xf1b3,0xe422,0xc7f2,0x209b,0x8b01,0xb205,0xa7b0,0x5b29,0x28e4,0xf98,0xf8f,0x8e81}}} +{{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x8e76,0x1db5,0xf51a,0x5b29,0x2bfe,0xbac,0x606e,0x9631,0x82e6,0x5ee,0x6d8c,0xb19c,0xcc5,0x21b6,0x1f70,0x8b95,0xdbc2,0xad44,0xbc49,0xf1b3,0xe422,0xc7f2,0x209b,0x8b01,0xb205,0xa7b0,0x5b29,0x28e4,0xf98,0xf8f,0x8e81}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1db58e76,0x5b29f51a,0xbac2bfe,0x9631606e,0x5ee82e6,0xb19c6d8c,0x21b60cc5,0x8b951f70,0xad44dbc2,0xf1b3bc49,0xc7f2e422,0x8b01209b,0xa7b0b205,0x28e45b29,0xf8f0f98,0x8e81}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1db58e76,0x5b29f51a,0xbac2bfe,0x9631606e,0x5ee82e6,0xb19c6d8c,0x21b60cc5,0x8b951f70,0xad44dbc2,0xf1b3bc49,0xc7f2e422,0x8b01209b,0xa7b0b205,0x28e45b29,0xf8f0f98,0x8e81}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5b29f51a1db58e76,0x9631606e0bac2bfe,0xb19c6d8c05ee82e6,0x8b951f7021b60cc5,0xf1b3bc49ad44dbc2,0x8b01209bc7f2e422,0x28e45b29a7b0b205,0x8e810f8f0f98}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5b29f51a1db58e76,0x9631606e0bac2bfe,0xb19c6d8c05ee82e6,0x8b951f7021b60cc5,0xf1b3bc49ad44dbc2,0x8b01209bc7f2e422,0x28e45b29a7b0b205,0x8e810f8f0f98}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0xca6, 0xc89, 0x411, 0x17e9, 0x188d, 0xad5, 0x8de, 0x403, 0x183a, 0x1e0d, 0x28c, 0xfdc, 0xbcc, 0xd0, 0xa3a, 0xb2e, 0x140c, 0x11b4, 0x4bb, 0x10da, 0xb22, 0x1a4c, 0xbfa, 0xbd5, 0xae6, 0xeff, 0x465, 0x8df, 0xcf9, 0x1c4a, 0x1807, 0x1462, 0xead, 0x1c43, 0x12a3, 0x1ec2, 0x1e12, 0xad0, 0x1cd} @@ -2165,223 +2165,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe7eb,0x27c8,0x739b,0x6eaa,0x7a17,0xf593,0xac1c,0x4a84,0x1a27,0x7771,0xe67e,0xea3d,0x4596,0xa34b,0x8edd,0xc51c,0x7c15,0xd1a1,0x2551,0x481b,0x402e,0xfed0,0x8b82,0x1eab,0xc98b,0x20fa,0x7143,0x6abf,0x463a,0x475f,0x510f,0x9}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe7eb,0x27c8,0x739b,0x6eaa,0x7a17,0xf593,0xac1c,0x4a84,0x1a27,0x7771,0xe67e,0xea3d,0x4596,0xa34b,0x8edd,0xc51c,0x7c15,0xd1a1,0x2551,0x481b,0x402e,0xfed0,0x8b82,0x1eab,0xc98b,0x20fa,0x7143,0x6abf,0x463a,0x475f,0x510f,0x9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27c8e7eb,0x6eaa739b,0xf5937a17,0x4a84ac1c,0x77711a27,0xea3de67e,0xa34b4596,0xc51c8edd,0xd1a17c15,0x481b2551,0xfed0402e,0x1eab8b82,0x20fac98b,0x6abf7143,0x475f463a,0x9510f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27c8e7eb,0x6eaa739b,0xf5937a17,0x4a84ac1c,0x77711a27,0xea3de67e,0xa34b4596,0xc51c8edd,0xd1a17c15,0x481b2551,0xfed0402e,0x1eab8b82,0x20fac98b,0x6abf7143,0x475f463a,0x9510f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6eaa739b27c8e7eb,0x4a84ac1cf5937a17,0xea3de67e77711a27,0xc51c8edda34b4596,0x481b2551d1a17c15,0x1eab8b82fed0402e,0x6abf714320fac98b,0x9510f475f463a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6eaa739b27c8e7eb,0x4a84ac1cf5937a17,0xea3de67e77711a27,0xc51c8edda34b4596,0x481b2551d1a17c15,0x1eab8b82fed0402e,0x6abf714320fac98b,0x9510f475f463a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e28,0x9e31,0xdab6,0x138c,0xc3c0,0x5193,0x444d,0xb2b7,0xf371,0x5630,0xb08b,0xc700,0x2404,0x3f08,0xc3f,0xbd7c,0x963b,0xd892,0x7bb2,0x429d,0x19d8,0xf277,0x853d,0x9aac,0x9bfa,0x42cd,0xf5e8,0x9e40,0x8a41,0x15a8,0x9c23,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e28,0x9e31,0xdab6,0x138c,0xc3c0,0x5193,0x444d,0xb2b7,0xf371,0x5630,0xb08b,0xc700,0x2404,0x3f08,0xc3f,0xbd7c,0x963b,0xd892,0x7bb2,0x429d,0x19d8,0xf277,0x853d,0x9aac,0x9bfa,0x42cd,0xf5e8,0x9e40,0x8a41,0x15a8,0x9c23,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e319e28,0x138cdab6,0x5193c3c0,0xb2b7444d,0x5630f371,0xc700b08b,0x3f082404,0xbd7c0c3f,0xd892963b,0x429d7bb2,0xf27719d8,0x9aac853d,0x42cd9bfa,0x9e40f5e8,0x15a88a41,0x69c23}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e319e28,0x138cdab6,0x5193c3c0,0xb2b7444d,0x5630f371,0xc700b08b,0x3f082404,0xbd7c0c3f,0xd892963b,0x429d7bb2,0xf27719d8,0x9aac853d,0x42cd9bfa,0x9e40f5e8,0x15a88a41,0x69c23}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x138cdab69e319e28,0xb2b7444d5193c3c0,0xc700b08b5630f371,0xbd7c0c3f3f082404,0x429d7bb2d892963b,0x9aac853df27719d8,0x9e40f5e842cd9bfa,0x69c2315a88a41}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x138cdab69e319e28,0xb2b7444d5193c3c0,0xc700b08b5630f371,0xbd7c0c3f3f082404,0x429d7bb2d892963b,0x9aac853df27719d8,0x9e40f5e842cd9bfa,0x69c2315a88a41}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x66d1,0x8ee,0x9219,0x9d61,0x13a4,0xfc63,0xc3ee,0xdf2a,0x1353,0x2ef,0xc391,0x8ad8,0x953b,0xb014,0x1029,0xa4b2,0x61a3,0xfc07,0xf3a8,0x199c,0xe6c8,0x6a41,0x6eb7,0xb459,0xa187,0x2f4e,0x9ec3,0x8b4e,0x5321,0x38b,0x5b21,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x66d1,0x8ee,0x9219,0x9d61,0x13a4,0xfc63,0xc3ee,0xdf2a,0x1353,0x2ef,0xc391,0x8ad8,0x953b,0xb014,0x1029,0xa4b2,0x61a3,0xfc07,0xf3a8,0x199c,0xe6c8,0x6a41,0x6eb7,0xb459,0xa187,0x2f4e,0x9ec3,0x8b4e,0x5321,0x38b,0x5b21,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8ee66d1,0x9d619219,0xfc6313a4,0xdf2ac3ee,0x2ef1353,0x8ad8c391,0xb014953b,0xa4b21029,0xfc0761a3,0x199cf3a8,0x6a41e6c8,0xb4596eb7,0x2f4ea187,0x8b4e9ec3,0x38b5321,0x35b21}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8ee66d1,0x9d619219,0xfc6313a4,0xdf2ac3ee,0x2ef1353,0x8ad8c391,0xb014953b,0xa4b21029,0xfc0761a3,0x199cf3a8,0x6a41e6c8,0xb4596eb7,0x2f4ea187,0x8b4e9ec3,0x38b5321,0x35b21}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d61921908ee66d1,0xdf2ac3eefc6313a4,0x8ad8c39102ef1353,0xa4b21029b014953b,0x199cf3a8fc0761a3,0xb4596eb76a41e6c8,0x8b4e9ec32f4ea187,0x35b21038b5321}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d61921908ee66d1,0xdf2ac3eefc6313a4,0x8ad8c39102ef1353,0xa4b21029b014953b,0x199cf3a8fc0761a3,0xb4596eb76a41e6c8,0x8b4e9ec32f4ea187,0x35b21038b5321}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1815,0xd837,0x8c64,0x9155,0x85e8,0xa6c,0x53e3,0xb57b,0xe5d8,0x888e,0x1981,0x15c2,0xba69,0x5cb4,0x7122,0x3ae3,0x83ea,0x2e5e,0xdaae,0xb7e4,0xbfd1,0x12f,0x747d,0xe154,0x3674,0xdf05,0x8ebc,0x9540,0xb9c5,0xb8a0,0xaef0,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1815,0xd837,0x8c64,0x9155,0x85e8,0xa6c,0x53e3,0xb57b,0xe5d8,0x888e,0x1981,0x15c2,0xba69,0x5cb4,0x7122,0x3ae3,0x83ea,0x2e5e,0xdaae,0xb7e4,0xbfd1,0x12f,0x747d,0xe154,0x3674,0xdf05,0x8ebc,0x9540,0xb9c5,0xb8a0,0xaef0,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8371815,0x91558c64,0xa6c85e8,0xb57b53e3,0x888ee5d8,0x15c21981,0x5cb4ba69,0x3ae37122,0x2e5e83ea,0xb7e4daae,0x12fbfd1,0xe154747d,0xdf053674,0x95408ebc,0xb8a0b9c5,0x6aef0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8371815,0x91558c64,0xa6c85e8,0xb57b53e3,0x888ee5d8,0x15c21981,0x5cb4ba69,0x3ae37122,0x2e5e83ea,0xb7e4daae,0x12fbfd1,0xe154747d,0xdf053674,0x95408ebc,0xb8a0b9c5,0x6aef0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91558c64d8371815,0xb57b53e30a6c85e8,0x15c21981888ee5d8,0x3ae371225cb4ba69,0xb7e4daae2e5e83ea,0xe154747d012fbfd1,0x95408ebcdf053674,0x6aef0b8a0b9c5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91558c64d8371815,0xb57b53e30a6c85e8,0x15c21981888ee5d8,0x3ae371225cb4ba69,0xb7e4daae2e5e83ea,0xe154747d012fbfd1,0x95408ebcdf053674,0x6aef0b8a0b9c5}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb83a,0x5e7a,0x2c9b,0xd483,0xeff9,0x71e9,0x4a21,0x2eae,0x921,0xbb26,0x6bf2,0xb038,0xeac9,0xc05a,0xd498,0x34fb,0x7ca,0xaae9,0x2674,0x81de,0x471f,0x7dbe,0x88c9,0xa354,0x9f03,0x5301,0x9acc,0x7c82,0xc479,0x732,0xdc7b,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb83a,0x5e7a,0x2c9b,0xd483,0xeff9,0x71e9,0x4a21,0x2eae,0x921,0xbb26,0x6bf2,0xb038,0xeac9,0xc05a,0xd498,0x34fb,0x7ca,0xaae9,0x2674,0x81de,0x471f,0x7dbe,0x88c9,0xa354,0x9f03,0x5301,0x9acc,0x7c82,0xc479,0x732,0xdc7b,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5e7ab83a,0xd4832c9b,0x71e9eff9,0x2eae4a21,0xbb260921,0xb0386bf2,0xc05aeac9,0x34fbd498,0xaae907ca,0x81de2674,0x7dbe471f,0xa35488c9,0x53019f03,0x7c829acc,0x732c479,0x8dc7b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5e7ab83a,0xd4832c9b,0x71e9eff9,0x2eae4a21,0xbb260921,0xb0386bf2,0xc05aeac9,0x34fbd498,0xaae907ca,0x81de2674,0x7dbe471f,0xa35488c9,0x53019f03,0x7c829acc,0x732c479,0x8dc7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4832c9b5e7ab83a,0x2eae4a2171e9eff9,0xb0386bf2bb260921,0x34fbd498c05aeac9,0x81de2674aae907ca,0xa35488c97dbe471f,0x7c829acc53019f03,0x8dc7b0732c479}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4832c9b5e7ab83a,0x2eae4a2171e9eff9,0xb0386bf2bb260921,0x34fbd498c05aeac9,0x81de2674aae907ca,0xa35488c97dbe471f,0x7c829acc53019f03,0x8dc7b0732c479}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x4733,0xaeba,0xf3d4,0x84bf,0x453a,0xa71a,0xe0fa,0x4604,0xf02b,0x9bc2,0xb114,0x5fc5,0x5f8d,0x1a8d,0x2302,0x175d,0x3655,0x8351,0x51b,0x698c,0xc745,0x8c83,0xdd6a,0xdd4b,0x682f,0x80b7,0xd1fc,0xe320,0xca30,0xc1d3,0xc365}}} +{{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x4733,0xaeba,0xf3d4,0x84bf,0x453a,0xa71a,0xe0fa,0x4604,0xf02b,0x9bc2,0xb114,0x5fc5,0x5f8d,0x1a8d,0x2302,0x175d,0x3655,0x8351,0x51b,0x698c,0xc745,0x8c83,0xdd6a,0xdd4b,0x682f,0x80b7,0xd1fc,0xe320,0xca30,0xc1d3,0xc365}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaeba4733,0x84bff3d4,0xa71a453a,0x4604e0fa,0x9bc2f02b,0x5fc5b114,0x1a8d5f8d,0x175d2302,0x83513655,0x698c051b,0x8c83c745,0xdd4bdd6a,0x80b7682f,0xe320d1fc,0xc1d3ca30,0xc365}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaeba4733,0x84bff3d4,0xa71a453a,0x4604e0fa,0x9bc2f02b,0x5fc5b114,0x1a8d5f8d,0x175d2302,0x83513655,0x698c051b,0x8c83c745,0xdd4bdd6a,0x80b7682f,0xe320d1fc,0xc1d3ca30,0xc365}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x84bff3d4aeba4733,0x4604e0faa71a453a,0x5fc5b1149bc2f02b,0x175d23021a8d5f8d,0x698c051b83513655,0xdd4bdd6a8c83c745,0xe320d1fc80b7682f,0xc365c1d3ca30}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x84bff3d4aeba4733,0x4604e0faa71a453a,0x5fc5b1149bc2f02b,0x175d23021a8d5f8d,0x698c051b83513655,0xdd4bdd6a8c83c745,0xe320d1fc80b7682f,0xc365c1d3ca30}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe32c,0x5173,0xdcb0,0xe05d,0x3a7e,0x6e8c,0xfd38,0xbed7,0x5fe0,0xa986,0x26f1,0xedf0,0x8fc7,0x1dbc,0xa48e,0x2e70,0x6648,0xe767,0xe8c3,0xf05b,0x26aa,0x63b6,0xf8f6,0x5304,0x7042,0x7c93,0x54a2,0xe675,0xd3ea,0x2b1,0xb36e,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe32c,0x5173,0xdcb0,0xe05d,0x3a7e,0x6e8c,0xfd38,0xbed7,0x5fe0,0xa986,0x26f1,0xedf0,0x8fc7,0x1dbc,0xa48e,0x2e70,0x6648,0xe767,0xe8c3,0xf05b,0x26aa,0x63b6,0xf8f6,0x5304,0x7042,0x7c93,0x54a2,0xe675,0xd3ea,0x2b1,0xb36e,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5173e32c,0xe05ddcb0,0x6e8c3a7e,0xbed7fd38,0xa9865fe0,0xedf026f1,0x1dbc8fc7,0x2e70a48e,0xe7676648,0xf05be8c3,0x63b626aa,0x5304f8f6,0x7c937042,0xe67554a2,0x2b1d3ea,0x8b36e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5173e32c,0xe05ddcb0,0x6e8c3a7e,0xbed7fd38,0xa9865fe0,0xedf026f1,0x1dbc8fc7,0x2e70a48e,0xe7676648,0xf05be8c3,0x63b626aa,0x5304f8f6,0x7c937042,0xe67554a2,0x2b1d3ea,0x8b36e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe05ddcb05173e32c,0xbed7fd386e8c3a7e,0xedf026f1a9865fe0,0x2e70a48e1dbc8fc7,0xf05be8c3e7676648,0x5304f8f663b626aa,0xe67554a27c937042,0x8b36e02b1d3ea}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe05ddcb05173e32c,0xbed7fd386e8c3a7e,0xedf026f1a9865fe0,0x2e70a48e1dbc8fc7,0xf05be8c3e7676648,0x5304f8f663b626aa,0xe67554a27c937042,0x8b36e02b1d3ea}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x47c6,0xa185,0xd364,0x2b7c,0x1006,0x8e16,0xb5de,0xd151,0xf6de,0x44d9,0x940d,0x4fc7,0x1536,0x3fa5,0x2b67,0xcb04,0xf835,0x5516,0xd98b,0x7e21,0xb8e0,0x8241,0x7736,0x5cab,0x60fc,0xacfe,0x6533,0x837d,0x3b86,0xf8cd,0x2384,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x47c6,0xa185,0xd364,0x2b7c,0x1006,0x8e16,0xb5de,0xd151,0xf6de,0x44d9,0x940d,0x4fc7,0x1536,0x3fa5,0x2b67,0xcb04,0xf835,0x5516,0xd98b,0x7e21,0xb8e0,0x8241,0x7736,0x5cab,0x60fc,0xacfe,0x6533,0x837d,0x3b86,0xf8cd,0x2384,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa18547c6,0x2b7cd364,0x8e161006,0xd151b5de,0x44d9f6de,0x4fc7940d,0x3fa51536,0xcb042b67,0x5516f835,0x7e21d98b,0x8241b8e0,0x5cab7736,0xacfe60fc,0x837d6533,0xf8cd3b86,0x72384}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa18547c6,0x2b7cd364,0x8e161006,0xd151b5de,0x44d9f6de,0x4fc7940d,0x3fa51536,0xcb042b67,0x5516f835,0x7e21d98b,0x8241b8e0,0x5cab7736,0xacfe60fc,0x837d6533,0xf8cd3b86,0x72384}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b7cd364a18547c6,0xd151b5de8e161006,0x4fc7940d44d9f6de,0xcb042b673fa51536,0x7e21d98b5516f835,0x5cab77368241b8e0,0x837d6533acfe60fc,0x72384f8cd3b86}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b7cd364a18547c6,0xd151b5de8e161006,0x4fc7940d44d9f6de,0xcb042b673fa51536,0x7e21d98b5516f835,0x5cab77368241b8e0,0x837d6533acfe60fc,0x72384f8cd3b86}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x116d, 0x165f, 0x7d3, 0x488, 0xe08, 0xb12, 0x2e0, 0x18b4, 0xdbb, 0x181e, 0xe50, 0x19b8, 0xc17, 0x1c82, 0x1cf6, 0x7b9, 0xebb, 0x1c0a, 0x183a, 0x1f42, 0x1a3e, 0x176b, 0x19fa, 0x7e4, 0x1646, 0x1dc5, 0x1e9d, 0x4b3, 0x18df, 0xf2d, 0xe2e, 0x2e3, 0x903, 0x19ef, 0x1f94, 0x237, 0x18ef, 0x26c, 0x12f} @@ -2488,27 +2488,27 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x51d, 0x1394, 0xcca, 0x1568, 0x1790, 0x11d6, 0x18aa, 0xe65, 0x1e8e, 0x4fe, 0xab9, 0x1496, 0x167d, 0x1b42, 0x1f85, 0x1d7a, 0x8c4, 0x17ea, 0x1269, 0x16, 0x1fbf, 0x8b5, 0x6f4, 0x1202, 0x17c4, 0x427, 0x1273, 0x14f, 0x49c, 0xfba, 0x1b3b, 0x13cd, 0x10ee, 0x634, 0x10ae, 0x2c4, 0x10b4, 0x1377, 0xfe} +{0x1e97, 0x1f23, 0x161, 0x7b2, 0x1221, 0x1d36, 0x14f1, 0xaa0, 0xce3, 0x1f6c, 0xeaf, 0x549, 0xa24, 0xe15, 0x1862, 0x1dba, 0xc75, 0xf1d, 0x15f9, 0x50d, 0xa99, 0x97b, 0xc21, 0x1549, 0x1c88, 0xfbe, 0xe33, 0xb27, 0x1dae, 0xb00, 0x82f, 0x44a, 0x371, 0x5c0, 0x1174, 0x1b28, 0xa0b, 0x9bd, 0x206} #elif RADIX == 32 -{0x28e92dc, 0x10cca9ca, 0x15af2156, 0x132e2aa3, 0x4fef473, 0x1692cab9, 0x2ed0acf, 0xc4ebd7e, 0x4d37ea4, 0xfefc02d, 0x237a22d, 0x4f7c490, 0x53e4e64, 0x17dd1270, 0x1d3cdd9d, 0xb8c690e, 0x5a0b121, 0x1bc} +{0x1f4ba664, 0x4161f91, 0xda4427b, 0x15053c7a, 0x1f6c671a, 0x10a92eaf, 0x11385544, 0x75edd61, 0xbf2f1d6, 0x1aa64a1b, 0x9610a5e, 0x17dc88aa, 0xc9dc66f, 0x158076b9, 0x244a417, 0x1d0b8037, 0x105eca22, 0x7ea} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x215686654e50a3a4, 0xfbd1ce65c55475af, 0x2ed0acfb49655c93, 0x6934dfa9189d7afc, 0x920237a22d7f7e01, 0x893814f939909ef8, 0x18d21dd3cdd9dbee, 0x134dde1682c4857} +{0x427b20b0fc8fd2e9, 0xb19c6aa0a78f4da4, 0x13855448549757fd, 0xdafcbc758ebdbac3, 0x1549610a5ed53250, 0x3b5cb27719befb91, 0x17006e244a417ac0, 0x1026f5417b288ba} #else -{0xad0cca9ca14749, 0x1ce65c55475af21, 0x7da4b2ae49fde8, 0x46275ebf0bb42b, 0x1afefc02d269bf5, 0x109ef8920237a22, 0xdf7449c0a7c9cc, 0x15c6348774f3767, 0xb9bbc2d05890} +{0xf64161f91fa5d3, 0x6aa0a78f4da442, 0x242a4babfed8ce, 0x163af6eb0c4e155, 0x1daa64a1b5f978e, 0x1efb911549610a5, 0x1d601dae593b8cd, 0xe85c01b8912905, 0x54dea82f6511} #endif #endif , #if 0 #elif RADIX == 16 -{0x17ab, 0x1e1a, 0x1bfe, 0x1f73, 0x1eb9, 0xf30, 0x1cca, 0x1aaf, 0xbea, 0xa1b, 0xb73, 0x86d, 0x1c13, 0x1c31, 0x1e6e, 0x1fbf, 0x968, 0x10f0, 0xb53, 0x1418, 0x11c6, 0x65f, 0x188, 0x2c7, 0x79b, 0xa9, 0xa92, 0x12b0, 0x1b53, 0x1564, 0xfa7, 0x1fd7, 0xa5b, 0xb32, 0x1bc8, 0xc90, 0x11ee, 0x1f6, 0x3f2} +{0xcc, 0x1cb1, 0x706, 0x1f0b, 0xa79, 0xd89, 0xd1f, 0x1067, 0x1c50, 0x1e70, 0x41c, 0x1ce8, 0xd29, 0x7c7, 0x733, 0x460, 0x1e22, 0xe0b, 0x7f6, 0x1387, 0xe84, 0x273, 0x13e1, 0x1f1d, 0x1643, 0x1f1a, 0x3e, 0x7b7, 0xecf, 0x1578, 0x357, 0xaf4, 0x1f6c, 0x4c8, 0x11b9, 0x866, 0x80a, 0x13e2, 0x499} #elif RADIX == 32 -{0xbd5cad1, 0x7bfef0d, 0xc3d73f7, 0x157f329e, 0xa1b5f56, 0xd0dab73, 0x1770c782, 0x168fdff9, 0x16a70f04, 0x1c71a830, 0x70c4197, 0x15279b16, 0xac15240, 0x1ab26d4e, 0x17fd77d3, 0x121664a5, 0xf732437, 0xa34} +{0x1066573b, 0x16706e58, 0x254f3f0, 0x33b47db, 0x1e70e284, 0x79d041c, 0x199f1da5, 0x222301c, 0xfece0bf, 0x1ba1270e, 0x1d9f089c, 0x35643f8, 0x1edc07df, 0x1abc3b3c, 0x18af41ab, 0xe4991f6, 0x5219a3, 0x292} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x73f73dff786af572, 0x6d7d5aafe653cc3d, 0x770c782686d5b9a8, 0x85a9c3c12d1fbff3, 0x62c70c4197e38d41, 0x36a72b054902a4f3, 0x2cc94b7fd77d3d59, 0x1307da3dcc90de4} +{0xf3f0b38372c41995, 0xc38a106768fb6254, 0x99f1da53ce820e79, 0x73fb382fc4446039, 0x7f1d9f089cdd0938, 0x1d9e7b701f7c6ac8, 0x9323ed8af41abd5e, 0x15cf890148668dc} #else -{0x1ee7bfef0d5eae5, 0x15aafe653cc3d73, 0x13436adcd436be, 0x4b47effcddc31e, 0xfc71a830b53878, 0x2a4f362c70c419, 0x1eac9b539582a48, 0x190b3252dff5df4, 0xb0fb47b9921b} +{0x1e16706e588332b, 0x106768fb6254f3, 0x129e741073ce1c5, 0x1f111180e667c76, 0x19ba1270e7f6705, 0x1c6ac87f1d9f089, 0x1eaf0ecf3db80fb, 0x1724c8fb62bd06a, 0x109f120290cd1} #endif #endif }, { @@ -2540,27 +2540,27 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}, {{ #if 0 #elif RADIX == 16 -{0x166f, 0x4b7, 0x1268, 0x18f5, 0x10a9, 0x17ea, 0x105e, 0x1090, 0x1c31, 0x624, 0xec6, 0xea1, 0x17d2, 0xf55, 0x10d3, 0x8fb, 0x9ab, 0x1ae2, 0x952, 0xcab, 0x100d, 0x702, 0xc4d, 0x1387, 0x344, 0xdaf, 0x1566, 0xf8c, 0x1e1c, 0x6f1, 0x1af9, 0xf1, 0xd6d, 0xa06, 0xb5c, 0x62c, 0x2e9, 0x1131, 0x683} +{0xd3d, 0x1bb8, 0x7b6, 0x2b7, 0x1f97, 0xc1a, 0x13ef, 0x6ac, 0xf50, 0x12de, 0xd45, 0x16d4, 0x69c, 0x16a8, 0xde4, 0xbd6, 0x14ea, 0x1d58, 0x193c, 0x160b, 0x1fc5, 0x20b, 0x1376, 0xbbb, 0x732, 0x8f8, 0x10f6, 0x1fef, 0xe7b, 0xb28, 0x10ba, 0x953, 0x1cfe, 0x1437, 0x1422, 0x178b, 0x1524, 0x590, 0x334} #elif RADIX == 32 -{0x1b37fb85, 0xb26825b, 0x1aa1538f, 0x48417af, 0x624e18c, 0x9d42ec6, 0x9bd56fa, 0x1ab47dc3, 0x12a5ae24, 0x14035956, 0x76269c0, 0x15e3449c, 0x1e32accd, 0x1378f871, 0x1a0f1d7c, 0x17140cd6, 0x17498b16, 0x608} +{0x69ebcc0, 0xe7b6ddc, 0x6bf2e2b, 0x1564fbd8, 0x12de7a81, 0x12da8d45, 0x125aa0d3, 0xea5eb37, 0x1279d58a, 0x1ff16c17, 0x1b9bb082, 0x1f07325d, 0x1fbe1ec8, 0x59439ef, 0x1c95385d, 0x8a86fcf, 0x925e2e8, 0xc85} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x538f593412decdfe, 0x9386309082f5faa1, 0x9bd56fa4ea176318, 0xb4a96b893568fb86, 0x93876269c0a01aca, 0x7c38f8cab336bc68, 0x2819ada0f1d7c9bc, 0x17c4c45d262c5ae} +{0x2e2b73db6ee1a7af, 0x79ea06ac9f7b06bf, 0x25aa0d396d46a2cb, 0xbc9e75629d4bd66f, 0x4bbb9bb082ff8b60, 0x1cf7fef87b23e0e6, 0x50df9fc95385d2ca, 0x51642a4978ba11} #else -{0x11eb26825bd9bfd, 0x309082f5faa153, 0x1d2750bb18c49c3, 0x4d5a3ee1a6f55b, 0x14035956952d71, 0x16bc6893876269c, 0x4de3e1c7c65599, 0xb8a066b683c75f, 0x148988ba4c58b} +{0x56e7b6ddc34f5e, 0x6ac9f7b06bf2e, 0x9cb6a35165bcf5, 0xa752f59bc96a83, 0x5ff16c1793ceac, 0x3e0e64bbb9bb08, 0x9650e7bff7c3d9, 0x45437e7f254e17, 0xa2c85492f174} #endif #endif , #if 0 #elif RADIX == 16 -{0x826, 0x1efe, 0xa95, 0x174d, 0x11b5, 0x1184, 0x1d4, 0x1024, 0x1d44, 0x349, 0x83c, 0x665, 0x4a2, 0x1288, 0x473, 0xa16, 0xe54, 0xafc, 0x6e2, 0x13f1, 0x217, 0x11e4, 0x1988, 0xe26, 0xd9a, 0x168f, 0x3d, 0x1436, 0x311, 0x148d, 0x168f, 0x1ad8, 0x1156, 0xb8, 0x193f, 0x1655, 0x279, 0x5cd, 0x65e} +{0xb59, 0x1fb4, 0x1dac, 0x52d, 0x794, 0x1254, 0x1f9f, 0xdba, 0x151d, 0x1f01, 0x7f7, 0xb2b, 0x7e4, 0x1b36, 0x912, 0x1366, 0x1a04, 0x8ed, 0x1e58, 0x18f0, 0xffd, 0x455, 0xba9, 0x16d, 0x155f, 0x1198, 0x1264, 0x158b, 0x766, 0x66e, 0x1403, 0x15fd, 0xe0e, 0x1368, 0x9e6, 0x4af, 0x1fba, 0x1047, 0x464} #elif RADIX == 32 -{0x41378c1, 0x1aa95f7f, 0x1236b74, 0x1207523, 0x349ea24, 0x8cca83c, 0x19ca2094, 0x5450b11, 0xdc4afc7, 0x85e7e2, 0x6cc4479, 0x11ed9a71, 0x10d807b6, 0x1a468c46, 0xdad8b47, 0xfc17115, 0x13cd9572, 0xe8} +{0x5acd34c, 0x1bdacfda, 0x150f2852, 0xdd7e7e4, 0x1f01a8eb, 0x116567f7, 0x96cd8fc, 0x49b324, 0x1cb08edd, 0xbff71e1, 0xd5d4915, 0x13155f0b, 0x162e4c91, 0x13371d9a, 0x1d5fda01, 0x19a6d0e0, 0x1dd12bd3, 0x3f} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6b74d54afbf904de, 0x27a890240ea46123, 0x9ca2094466541e0d, 0x13712bf1ca8a1623, 0x4e26cc4479042f3f, 0x462343601eda3db3, 0x82e22adad8b47d23, 0x517344f3655c9f} +{0x2852ded67ed16b34, 0x6a3adbafcfc950f, 0x96cd8fc8b2b3fbfc, 0xf2c23b740936648, 0xe16d5d49155ffb8f, 0x8ecd58b9324662ab, 0x4da1c1d5fda0199b, 0x16411ff744af4f3} #else -{0xe9aa95f7f209bc, 0x90240ea461236b, 0xa2332a0f0693d4, 0x72a28588e72882, 0x12085e7e26e257e, 0x1a3db34e26cc447, 0x1e91a311a1b00f6, 0x7e0b88ab6b62d1, 0xa2e689e6cab9} +{0xa5bdacfda2d669, 0x1adbafcfc950f28, 0x1e45959fdfe0351, 0x1d024d99225b363, 0xabff71e1e58476, 0x662abe16d5d491, 0xccdc766ac5c992, 0x1cd36870757f680, 0x11823fee895e9} #endif #endif }, { @@ -2592,27 +2592,27 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}, {{ #if 0 #elif RADIX == 16 -{0x1aab, 0xe01, 0x1bf3, 0x122d, 0xd71, 0x34e, 0x153b, 0x1444, 0x1d19, 0x1165, 0x1496, 0x568, 0x12d4, 0x105c, 0x1129, 0x2c7, 0x1706, 0x359, 0x1a4f, 0x114, 0x758, 0x1780, 0x1617, 0x1485, 0x1147, 0xa4f, 0x1f77, 0xf13, 0x1547, 0x103c, 0x352, 0x125d, 0xb1e, 0x1526, 0x1708, 0xfb5, 0x17bf, 0x1d55, 0x6bc} +{0x1156, 0x273, 0x1153, 0x89b, 0xc67, 0x9dc, 0x14b5, 0x1d27, 0x1c5e, 0x18e6, 0x1dfa, 0x1beb, 0x12e7, 0xe02, 0x1614, 0x12b0, 0x1646, 0x1bdb, 0x1e1f, 0x1eb6, 0x361, 0x1fb, 0x2ee, 0xee2, 0x178c, 0xedd, 0x1ba6, 0xf1c, 0x1e7f, 0x1dac, 0x137d, 0x18db, 0x8e8, 0xa0, 0x1faf, 0x5cb, 0x1078, 0x1562, 0x36e} #elif RADIX == 32 -{0x1d55ffc5, 0x1bbf3700, 0x139ae322, 0x2254ec6, 0x1165e8cd, 0x10ad1496, 0x14c1725a, 0x106163c4, 0x149e359b, 0x1d60229, 0x5b0bde0, 0x9f147a4, 0x1c4feeea, 0x81e551d, 0x1d25d1a9, 0x22a4cb1, 0x1dfbed6e, 0x72d} +{0x18ab4116, 0x17153139, 0x1718ce89, 0x93d2d53, 0x18e6e2f7, 0x1f7d7dfa, 0xa380a5c, 0x4695858, 0x1c3fbdbb, 0x18d87d6d, 0x217707e, 0x1bb78c77, 0x1c7374ce, 0x1ed679fd, 0x118db9be, 0xbc1408e, 0x3c172ff, 0x214} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe322ddf9b807557f, 0x97a33444a9d8d39a, 0x4c1725a8568a4b45, 0x4d278d66e0c2c789, 0xf485b0bde00eb011, 0x2a8ef13fbba93e28, 0x549963d25d1a940f, 0x197556f7efb5b84} +{0xce89b8a989ce2ad0, 0x9b8bdd27a5aa7718, 0xa380a5cfbebefd63, 0x6f0fef6ec8d2b0b0, 0x8ee217707ec6c3eb, 0x3cfef1cdd33b76f1, 0x82811d18db9bef6b, 0x7558a0f05cbfd7} #else -{0x45bbf3700eaaff, 0x13444a9d8d39ae3, 0xd42b4525a2cbd1, 0x1b830b1e25305c9, 0x1d60229a4f1ac, 0x93e28f485b0bde, 0xa079547789fddd, 0x1152658f49746a, 0x17eaadefdf6b7} +{0x1137153139c55a0, 0x1dd27a5aa7718ce, 0xe7df5f7eb1cdc5, 0x1b234ac2c28e029, 0x1d8d87d6de1fded, 0x1b76f18ee217707, 0x17b59e7f78e6e99, 0x15e0a0474636e6f, 0xeab141e0b97f} #endif #endif , #if 0 #elif RADIX == 16 -{0x204, 0x9f6, 0x1dba, 0x110e, 0x6ea, 0x112a, 0xa11, 0xd06, 0x15aa, 0x1f0b, 0xeec, 0xef1, 0x1edc, 0x1604, 0x65b, 0x129, 0x39d, 0x8f8, 0x5d5, 0x672, 0x150a, 0x233, 0xc20, 0x12ba, 0x1855, 0x15a6, 0xd50, 0x1c71, 0x15b7, 0xf04, 0x579, 0x16d2, 0xbac, 0x4c9, 0xaf5, 0x514, 0xf27, 0xef, 0x36a} +{0xb32, 0x149, 0x1615, 0x77e, 0xf55, 0x189, 0xe2a, 0x13bc, 0xf83, 0x124d, 0xcaa, 0x22, 0xcea, 0x8f9, 0xc5e, 0x8bc, 0x4ff, 0x14da, 0x394, 0x4a2, 0x1767, 0x1d20, 0x1531, 0x1dff, 0x929, 0x15cf, 0x1f69, 0x1630, 0x669, 0x11ec, 0x162c, 0xcf3, 0xde5, 0x185f, 0x1da0, 0x1db9, 0x1d93, 0xb9b, 0x38f} #elif RADIX == 32 -{0x10240be, 0x1ddba4fb, 0xa8dd510, 0x8328462, 0x1f0bad53, 0x11de2eec, 0xdd813db, 0x19d09499, 0xbaa8f81, 0x1d428ce4, 0x1a61008c, 0x14d85595, 0x11c5aa15, 0x178256df, 0x196d22bc, 0x1d4992ba, 0x19394515, 0x27b} +{0x15994382, 0x1d6150a4, 0x25eaa77, 0x1de38a83, 0x124d7c1c, 0x8044caa, 0xf23e59d, 0xff45e31, 0x7294da2, 0x5d9c944, 0x1fa98f48, 0x19e929ef, 0x18c3ed35, 0x8f619a6, 0xacf3b16, 0x830bede, 0xc9f6e7b, 0x1df} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd510eedd27d84090, 0x2eb54d06508c4a8d, 0xdd813db8ef17767c, 0x22eaa3e073a12932, 0xb2ba61008cea1467, 0x2b6fc716a8569b0a, 0x93257596d22bcbc1, 0x503bde4e51457a} +{0xaa77eb0a85256650, 0x35f073bc7150625e, 0xf23e59d402265549, 0x21ca53689fe8bc62, 0x3dffa98f482ece4a, 0xcd3630fb4d73d25, 0x617dbcacf3b1647b, 0x17ae6fb27db9ed0} #else -{0x21ddba4fb08120, 0x14d06508c4a8dd5, 0xdc778bbb3e175a, 0x1ce84a4cb7604f, 0x19d428ce45d547c, 0x169b0ab2ba61008, 0x5e095b7e38b542, 0x1ea4c95d65b48af, 0xa077bc9ca28a} +{0xefd6150a4acca1, 0x73bc7150625eaa, 0xea01132aa49af8, 0x27fa2f18bc8f96, 0x105d9c944394a6d, 0x173d253dffa98f4, 0x123d8669b187da6, 0x14185f6f2b3cec5, 0x145cdf64fb73d} #endif #endif }, { @@ -2641,223 +2641,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2ce5,0xf14f,0xd32e,0x18c2,0x5217,0x2833,0xa7fc,0x4090,0xd9d5,0x4d89,0xe770,0x3801,0xbca0,0x6ac9,0x5432,0x2ee5,0x4356,0x827a,0xdf1a,0x8911,0x4102,0xf05a,0x3e5,0x18b9,0xf66f,0x77ad,0xd99b,0xea2f,0xa030,0x36bb,0xe071,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1dad,0x7f9a,0xf7d5,0xe103,0xbcd7,0xd758,0xdffc,0x8775,0x424c,0xf512,0x8d24,0x9441,0xa2ff,0x1a96,0xfec2,0xdbf,0x1653,0x6a57,0x1c7f,0x2253,0x3ed1,0xfe65,0xc239,0x9d9d,0x3f9d,0xe53,0xa7cd,0x2102,0xbf75,0x72de,0xfa6c,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf14f2ce5,0x18c2d32e,0x28335217,0x4090a7fc,0x4d89d9d5,0x3801e770,0x6ac9bca0,0x2ee55432,0x827a4356,0x8911df1a,0xf05a4102,0x18b903e5,0x77adf66f,0xea2fd99b,0x36bba030,0xde071}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f9a1dad,0xe103f7d5,0xd758bcd7,0x8775dffc,0xf512424c,0x94418d24,0x1a96a2ff,0xdbffec2,0x6a571653,0x22531c7f,0xfe653ed1,0x9d9dc239,0xe533f9d,0x2102a7cd,0x72debf75,0x2fa6c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x18c2d32ef14f2ce5,0x4090a7fc28335217,0x3801e7704d89d9d5,0x2ee554326ac9bca0,0x8911df1a827a4356,0x18b903e5f05a4102,0xea2fd99b77adf66f,0xde07136bba030}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe103f7d57f9a1dad,0x8775dffcd758bcd7,0x94418d24f512424c,0xdbffec21a96a2ff,0x22531c7f6a571653,0x9d9dc239fe653ed1,0x2102a7cd0e533f9d,0x2fa6c72debf75}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x752,0x8c67,0x260,0x14f,0x138a,0x865,0x5e4d,0x4db9,0xca26,0x7a2c,0x4a7e,0x9422,0xb2f6,0xbdc8,0x841d,0x7e33,0x5677,0x38a6,0x9633,0x15fc,0xacce,0xbe04,0xdcef,0xb16c,0xd57b,0x7a5e,0x8395,0x1f8f,0xe4b9,0xe383,0x4be3,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3592,0xb6e5,0xb083,0xa93b,0xa140,0xb740,0x5865,0x1057,0xc57f,0xa78d,0x71f,0xf817,0x308d,0x708d,0xddf2,0x475,0x8850,0x2ada,0x50ee,0xe4bf,0x7038,0x5945,0xd2e9,0xe429,0x1765,0x5e1e,0x183a,0x249c,0x6da1,0x3147,0xcd38,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c670752,0x14f0260,0x865138a,0x4db95e4d,0x7a2cca26,0x94224a7e,0xbdc8b2f6,0x7e33841d,0x38a65677,0x15fc9633,0xbe04acce,0xb16cdcef,0x7a5ed57b,0x1f8f8395,0xe383e4b9,0x64be3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb6e53592,0xa93bb083,0xb740a140,0x10575865,0xa78dc57f,0xf817071f,0x708d308d,0x475ddf2,0x2ada8850,0xe4bf50ee,0x59457038,0xe429d2e9,0x5e1e1765,0x249c183a,0x31476da1,0xdcd38}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14f02608c670752,0x4db95e4d0865138a,0x94224a7e7a2cca26,0x7e33841dbdc8b2f6,0x15fc963338a65677,0xb16cdcefbe04acce,0x1f8f83957a5ed57b,0x64be3e383e4b9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa93bb083b6e53592,0x10575865b740a140,0xf817071fa78dc57f,0x475ddf2708d308d,0xe4bf50ee2ada8850,0xe429d2e959457038,0x249c183a5e1e1765,0xdcd3831476da1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9b63,0x40f2,0x5177,0x535f,0x5098,0x2f26,0x18a2,0x5e1c,0x5d70,0x33e9,0x1db9,0x2397,0xaccc,0xb98b,0xcae6,0x87bf,0xd43f,0xb329,0xf2d0,0x2f67,0x779c,0xd2ab,0x4369,0xc67d,0xd065,0x9550,0xf06b,0x1a2b,0xc6e,0x9b2b,0xbb64,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3b,0x696c,0x9cf7,0x924e,0x29b1,0xfa82,0x12f3,0xcd4b,0xf4c2,0xfdd9,0x406d,0x434e,0x4ab0,0xab5f,0xda69,0x708e,0xf25b,0x642d,0x6d6d,0xd93f,0x618f,0xe4dc,0x4514,0x2c42,0x38da,0x4841,0x27cd,0x2c70,0xde,0x9b00,0x5a54,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x40f29b63,0x535f5177,0x2f265098,0x5e1c18a2,0x33e95d70,0x23971db9,0xb98baccc,0x87bfcae6,0xb329d43f,0x2f67f2d0,0xd2ab779c,0xc67d4369,0x9550d065,0x1a2bf06b,0x9b2b0c6e,0x3bb64}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x696c003b,0x924e9cf7,0xfa8229b1,0xcd4b12f3,0xfdd9f4c2,0x434e406d,0xab5f4ab0,0x708eda69,0x642df25b,0xd93f6d6d,0xe4dc618f,0x2c424514,0x484138da,0x2c7027cd,0x9b0000de,0xd5a54}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x535f517740f29b63,0x5e1c18a22f265098,0x23971db933e95d70,0x87bfcae6b98baccc,0x2f67f2d0b329d43f,0xc67d4369d2ab779c,0x1a2bf06b9550d065,0x3bb649b2b0c6e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x924e9cf7696c003b,0xcd4b12f3fa8229b1,0x434e406dfdd9f4c2,0x708eda69ab5f4ab0,0xd93f6d6d642df25b,0x2c424514e4dc618f,0x2c7027cd484138da,0xd5a549b0000de}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xd31b,0xeb0,0x2cd1,0xe73d,0xade8,0xd7cc,0x5803,0xbf6f,0x262a,0xb276,0x188f,0xc7fe,0x435f,0x9536,0xabcd,0xd11a,0xbca9,0x7d85,0x20e5,0x76ee,0xbefd,0xfa5,0xfc1a,0xe746,0x990,0x8852,0x2664,0x15d0,0x5fcf,0xc944,0x1f8e,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe253,0x8065,0x82a,0x1efc,0x4328,0x28a7,0x2003,0x788a,0xbdb3,0xaed,0x72db,0x6bbe,0x5d00,0xe569,0x13d,0xf240,0xe9ac,0x95a8,0xe380,0xddac,0xc12e,0x19a,0x3dc6,0x6262,0xc062,0xf1ac,0x5832,0xdefd,0x408a,0x8d21,0x593,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xeb0d31b,0xe73d2cd1,0xd7ccade8,0xbf6f5803,0xb276262a,0xc7fe188f,0x9536435f,0xd11aabcd,0x7d85bca9,0x76ee20e5,0xfa5befd,0xe746fc1a,0x88520990,0x15d02664,0xc9445fcf,0x21f8e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8065e253,0x1efc082a,0x28a74328,0x788a2003,0xaedbdb3,0x6bbe72db,0xe5695d00,0xf240013d,0x95a8e9ac,0xddace380,0x19ac12e,0x62623dc6,0xf1acc062,0xdefd5832,0x8d21408a,0xd0593}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe73d2cd10eb0d31b,0xbf6f5803d7ccade8,0xc7fe188fb276262a,0xd11aabcd9536435f,0x76ee20e57d85bca9,0xe746fc1a0fa5befd,0x15d0266488520990,0x21f8ec9445fcf}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1efc082a8065e253,0x788a200328a74328,0x6bbe72db0aedbdb3,0xf240013de5695d00,0xddace38095a8e9ac,0x62623dc6019ac12e,0xdefd5832f1acc062,0xd05938d21408a}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4222,0xe40c,0x843f,0x3518,0x72d1,0xa757,0xb4e5,0x4347,0x3326,0xc267,0x30d,0xb77e,0x9907,0xcb8c,0xd175,0x8cf2,0x5440,0xb876,0x2316,0xa715,0xf0ab,0x9e96,0xa72f,0xcd7f,0x1e06,0xa42f,0x985f,0xdc2d,0xd9ee,0xe71e,0x2ae0,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe40c4222,0x3518843f,0xa75772d1,0x4347b4e5,0xc2673326,0xb77e030d,0xcb8c9907,0x8cf2d175,0xb8765440,0xa7152316,0x9e96f0ab,0xcd7fa72f,0xa42f1e06,0xdc2d985f,0xe71ed9ee,0x82ae0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3518843fe40c4222,0x4347b4e5a75772d1,0xb77e030dc2673326,0x8cf2d175cb8c9907,0xa7152316b8765440,0xcd7fa72f9e96f0ab,0xdc2d985fa42f1e06,0x82ae0e71ed9ee}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x11ac,0x1c90,0x6c62,0x15fd,0x1924,0x5851,0x60c6,0x744c,0x80fd,0xa6b,0x5654,0x51a1,0x6589,0x803f,0xf265,0x4132,0x96d2,0x7497,0xcf0b,0x65,0x2e51,0x2bc,0x4203,0x3aad,0x1f2,0x5b40,0xcc1a,0x67e4,0xdfd3,0xba17,0x7a8c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1c9011ac,0x15fd6c62,0x58511924,0x744c60c6,0xa6b80fd,0x51a15654,0x803f6589,0x4132f265,0x749796d2,0x65cf0b,0x2bc2e51,0x3aad4203,0x5b4001f2,0x67e4cc1a,0xba17dfd3,0x37a8c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x15fd6c621c9011ac,0x744c60c658511924,0x51a156540a6b80fd,0x4132f265803f6589,0x65cf0b749796d2,0x3aad420302bc2e51,0x67e4cc1a5b4001f2,0x37a8cba17dfd3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x99f9,0x50f4,0xd750,0xb0a2,0xfdaa,0x6986,0x6b4b,0x34be,0x7bd5,0x3974,0xe05,0x8c18,0x6bb8,0xbb5a,0xcc33,0x63b5,0x943b,0xec49,0xb4ef,0xbdc4,0x5a2a,0x2fc8,0x85ad,0x1291,0xa29f,0x9618,0x721b,0x93f6,0xb40f,0x2e85,0xdfbb,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x46df,0x993e,0x6cc0,0xa409,0xa063,0x3e90,0x1bb7,0x1ed7,0xe56b,0xada4,0xdce,0xb050,0xc6aa,0x2b91,0x61ef,0x10ec,0x6ecc,0x3168,0x72f7,0xe69d,0xf599,0x59ed,0x7fe9,0x1cf6,0xab4b,0x9fb7,0x21f0,0xa281,0xc4d8,0xabda,0xbaac,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x50f499f9,0xb0a2d750,0x6986fdaa,0x34be6b4b,0x39747bd5,0x8c180e05,0xbb5a6bb8,0x63b5cc33,0xec49943b,0xbdc4b4ef,0x2fc85a2a,0x129185ad,0x9618a29f,0x93f6721b,0x2e85b40f,0xbdfbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x993e46df,0xa4096cc0,0x3e90a063,0x1ed71bb7,0xada4e56b,0xb0500dce,0x2b91c6aa,0x10ec61ef,0x31686ecc,0xe69d72f7,0x59edf599,0x1cf67fe9,0x9fb7ab4b,0xa28121f0,0xabdac4d8,0x5baac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb0a2d75050f499f9,0x34be6b4b6986fdaa,0x8c180e0539747bd5,0x63b5cc33bb5a6bb8,0xbdc4b4efec49943b,0x129185ad2fc85a2a,0x93f6721b9618a29f,0xbdfbb2e85b40f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa4096cc0993e46df,0x1ed71bb73e90a063,0xb0500dceada4e56b,0x10ec61ef2b91c6aa,0xe69d72f731686ecc,0x1cf67fe959edf599,0xa28121f09fb7ab4b,0x5baacabdac4d8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf4c0,0x4ff5,0x2aee,0x3e90,0x49,0xb2af,0xf257,0x111c,0xead0,0xc1d5,0xc7d9,0x8a7c,0x9579,0xf62,0xe1f6,0xb43c,0x8f3f,0x14ca,0x1b7b,0xc209,0xac8,0xf5cd,0xdfc0,0x5d39,0x9d8d,0x9c9a,0x2e6e,0xba54,0x79d5,0x4f02,0x1cfc,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x69f0,0x1c1,0x40a6,0x59c0,0xdf7c,0x7343,0xeb2e,0xf036,0x6d07,0xc3ee,0x6377,0xe0f0,0x9377,0xbcdf,0xa53c,0x8e52,0x233,0x3530,0x72e2,0xa026,0x3748,0x9995,0xad2,0xc440,0x86a,0x4191,0x1081,0x4662,0x2148,0xf8cd,0x1a9a,0x9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4ff5f4c0,0x3e902aee,0xb2af0049,0x111cf257,0xc1d5ead0,0x8a7cc7d9,0xf629579,0xb43ce1f6,0x14ca8f3f,0xc2091b7b,0xf5cd0ac8,0x5d39dfc0,0x9c9a9d8d,0xba542e6e,0x4f0279d5,0x21cfc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1c169f0,0x59c040a6,0x7343df7c,0xf036eb2e,0xc3ee6d07,0xe0f06377,0xbcdf9377,0x8e52a53c,0x35300233,0xa02672e2,0x99953748,0xc4400ad2,0x4191086a,0x46621081,0xf8cd2148,0x91a9a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e902aee4ff5f4c0,0x111cf257b2af0049,0x8a7cc7d9c1d5ead0,0xb43ce1f60f629579,0xc2091b7b14ca8f3f,0x5d39dfc0f5cd0ac8,0xba542e6e9c9a9d8d,0x21cfc4f0279d5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x59c040a601c169f0,0xf036eb2e7343df7c,0xe0f06377c3ee6d07,0x8e52a53cbcdf9377,0xa02672e235300233,0xc4400ad299953748,0x466210814191086a,0x91a9af8cd2148}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1eb,0x1730,0x3343,0xcef3,0x2add,0x7615,0x353e,0xd52b,0x9951,0xc1,0x2292,0x69d0,0x4a9f,0xc1bd,0xfec7,0xd332,0x72b7,0x67f8,0xaa27,0x61a4,0x33dd,0x8ec0,0xfe1d,0x9a69,0x38ac,0x60f,0x209b,0xbb33,0x55b1,0x13f5,0x5c80,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x977d,0x5a09,0xd718,0x1ac3,0x2d52,0xdf82,0x9571,0x2023,0xdc9b,0xe759,0x26c0,0x2a59,0x5273,0x9024,0x6bf1,0x9b45,0xe3ef,0xfd9c,0x6189,0x621f,0xb7e6,0x6a8c,0xa219,0xf7d1,0xd502,0xb115,0xcb9e,0x7bf7,0x773,0x7222,0x7aa1,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x173001eb,0xcef33343,0x76152add,0xd52b353e,0xc19951,0x69d02292,0xc1bd4a9f,0xd332fec7,0x67f872b7,0x61a4aa27,0x8ec033dd,0x9a69fe1d,0x60f38ac,0xbb33209b,0x13f555b1,0xc5c80}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a09977d,0x1ac3d718,0xdf822d52,0x20239571,0xe759dc9b,0x2a5926c0,0x90245273,0x9b456bf1,0xfd9ce3ef,0x621f6189,0x6a8cb7e6,0xf7d1a219,0xb115d502,0x7bf7cb9e,0x72220773,0x47aa1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcef33343173001eb,0xd52b353e76152add,0x69d0229200c19951,0xd332fec7c1bd4a9f,0x61a4aa2767f872b7,0x9a69fe1d8ec033dd,0xbb33209b060f38ac,0xc5c8013f555b1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1ac3d7185a09977d,0x20239571df822d52,0x2a5926c0e759dc9b,0x9b456bf190245273,0x621f6189fd9ce3ef,0xf7d1a2196a8cb7e6,0x7bf7cb9eb115d502,0x47aa172220773}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6607,0xaf0b,0x28af,0x4f5d,0x255,0x9679,0x94b4,0xcb41,0x842a,0xc68b,0xf1fa,0x73e7,0x9447,0x44a5,0x33cc,0x9c4a,0x6bc4,0x13b6,0x4b10,0x423b,0xa5d5,0xd037,0x7a52,0xed6e,0x5d60,0x69e7,0x8de4,0x6c09,0x4bf0,0xd17a,0x2044,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb921,0x66c1,0x933f,0x5bf6,0x5f9c,0xc16f,0xe448,0xe128,0x1a94,0x525b,0xf231,0x4faf,0x3955,0xd46e,0x9e10,0xef13,0x9133,0xce97,0x8d08,0x1962,0xa66,0xa612,0x8016,0xe309,0x54b4,0x6048,0xde0f,0x5d7e,0x3b27,0x5425,0x4553,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaf0b6607,0x4f5d28af,0x96790255,0xcb4194b4,0xc68b842a,0x73e7f1fa,0x44a59447,0x9c4a33cc,0x13b66bc4,0x423b4b10,0xd037a5d5,0xed6e7a52,0x69e75d60,0x6c098de4,0xd17a4bf0,0x42044}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x66c1b921,0x5bf6933f,0xc16f5f9c,0xe128e448,0x525b1a94,0x4faff231,0xd46e3955,0xef139e10,0xce979133,0x19628d08,0xa6120a66,0xe3098016,0x604854b4,0x5d7ede0f,0x54253b27,0xa4553}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4f5d28afaf0b6607,0xcb4194b496790255,0x73e7f1fac68b842a,0x9c4a33cc44a59447,0x423b4b1013b66bc4,0xed6e7a52d037a5d5,0x6c098de469e75d60,0x42044d17a4bf0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bf6933f66c1b921,0xe128e448c16f5f9c,0x4faff231525b1a94,0xef139e10d46e3955,0x19628d08ce979133,0xe3098016a6120a66,0x5d7ede0f604854b4,0xa455354253b27}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2ce5,0xf14f,0xd32e,0x18c2,0x5217,0x2833,0xa7fc,0x4090,0xd9d5,0x4d89,0xe770,0x3801,0xbca0,0x6ac9,0x5432,0x2ee5,0x4356,0x827a,0xdf1a,0x8911,0x4102,0xf05a,0x3e5,0x18b9,0xf66f,0x77ad,0xd99b,0xea2f,0xa030,0x36bb,0xe071,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1dad,0x7f9a,0xf7d5,0xe103,0xbcd7,0xd758,0xdffc,0x8775,0x424c,0xf512,0x8d24,0x9441,0xa2ff,0x1a96,0xfec2,0xdbf,0x1653,0x6a57,0x1c7f,0x2253,0x3ed1,0xfe65,0xc239,0x9d9d,0x3f9d,0xe53,0xa7cd,0x2102,0xbf75,0x72de,0xfa6c,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf14f2ce5,0x18c2d32e,0x28335217,0x4090a7fc,0x4d89d9d5,0x3801e770,0x6ac9bca0,0x2ee55432,0x827a4356,0x8911df1a,0xf05a4102,0x18b903e5,0x77adf66f,0xea2fd99b,0x36bba030,0xde071}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f9a1dad,0xe103f7d5,0xd758bcd7,0x8775dffc,0xf512424c,0x94418d24,0x1a96a2ff,0xdbffec2,0x6a571653,0x22531c7f,0xfe653ed1,0x9d9dc239,0xe533f9d,0x2102a7cd,0x72debf75,0x2fa6c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x18c2d32ef14f2ce5,0x4090a7fc28335217,0x3801e7704d89d9d5,0x2ee554326ac9bca0,0x8911df1a827a4356,0x18b903e5f05a4102,0xea2fd99b77adf66f,0xde07136bba030}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe103f7d57f9a1dad,0x8775dffcd758bcd7,0x94418d24f512424c,0xdbffec21a96a2ff,0x22531c7f6a571653,0x9d9dc239fe653ed1,0x2102a7cd0e533f9d,0x2fa6c72debf75}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x752,0x8c67,0x260,0x14f,0x138a,0x865,0x5e4d,0x4db9,0xca26,0x7a2c,0x4a7e,0x9422,0xb2f6,0xbdc8,0x841d,0x7e33,0x5677,0x38a6,0x9633,0x15fc,0xacce,0xbe04,0xdcef,0xb16c,0xd57b,0x7a5e,0x8395,0x1f8f,0xe4b9,0xe383,0x4be3,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3592,0xb6e5,0xb083,0xa93b,0xa140,0xb740,0x5865,0x1057,0xc57f,0xa78d,0x71f,0xf817,0x308d,0x708d,0xddf2,0x475,0x8850,0x2ada,0x50ee,0xe4bf,0x7038,0x5945,0xd2e9,0xe429,0x1765,0x5e1e,0x183a,0x249c,0x6da1,0x3147,0xcd38,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c670752,0x14f0260,0x865138a,0x4db95e4d,0x7a2cca26,0x94224a7e,0xbdc8b2f6,0x7e33841d,0x38a65677,0x15fc9633,0xbe04acce,0xb16cdcef,0x7a5ed57b,0x1f8f8395,0xe383e4b9,0x64be3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb6e53592,0xa93bb083,0xb740a140,0x10575865,0xa78dc57f,0xf817071f,0x708d308d,0x475ddf2,0x2ada8850,0xe4bf50ee,0x59457038,0xe429d2e9,0x5e1e1765,0x249c183a,0x31476da1,0xdcd38}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14f02608c670752,0x4db95e4d0865138a,0x94224a7e7a2cca26,0x7e33841dbdc8b2f6,0x15fc963338a65677,0xb16cdcefbe04acce,0x1f8f83957a5ed57b,0x64be3e383e4b9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa93bb083b6e53592,0x10575865b740a140,0xf817071fa78dc57f,0x475ddf2708d308d,0xe4bf50ee2ada8850,0xe429d2e959457038,0x249c183a5e1e1765,0xdcd3831476da1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9b63,0x40f2,0x5177,0x535f,0x5098,0x2f26,0x18a2,0x5e1c,0x5d70,0x33e9,0x1db9,0x2397,0xaccc,0xb98b,0xcae6,0x87bf,0xd43f,0xb329,0xf2d0,0x2f67,0x779c,0xd2ab,0x4369,0xc67d,0xd065,0x9550,0xf06b,0x1a2b,0xc6e,0x9b2b,0xbb64,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3b,0x696c,0x9cf7,0x924e,0x29b1,0xfa82,0x12f3,0xcd4b,0xf4c2,0xfdd9,0x406d,0x434e,0x4ab0,0xab5f,0xda69,0x708e,0xf25b,0x642d,0x6d6d,0xd93f,0x618f,0xe4dc,0x4514,0x2c42,0x38da,0x4841,0x27cd,0x2c70,0xde,0x9b00,0x5a54,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x40f29b63,0x535f5177,0x2f265098,0x5e1c18a2,0x33e95d70,0x23971db9,0xb98baccc,0x87bfcae6,0xb329d43f,0x2f67f2d0,0xd2ab779c,0xc67d4369,0x9550d065,0x1a2bf06b,0x9b2b0c6e,0x3bb64}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x696c003b,0x924e9cf7,0xfa8229b1,0xcd4b12f3,0xfdd9f4c2,0x434e406d,0xab5f4ab0,0x708eda69,0x642df25b,0xd93f6d6d,0xe4dc618f,0x2c424514,0x484138da,0x2c7027cd,0x9b0000de,0xd5a54}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x535f517740f29b63,0x5e1c18a22f265098,0x23971db933e95d70,0x87bfcae6b98baccc,0x2f67f2d0b329d43f,0xc67d4369d2ab779c,0x1a2bf06b9550d065,0x3bb649b2b0c6e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x924e9cf7696c003b,0xcd4b12f3fa8229b1,0x434e406dfdd9f4c2,0x708eda69ab5f4ab0,0xd93f6d6d642df25b,0x2c424514e4dc618f,0x2c7027cd484138da,0xd5a549b0000de}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xd31b,0xeb0,0x2cd1,0xe73d,0xade8,0xd7cc,0x5803,0xbf6f,0x262a,0xb276,0x188f,0xc7fe,0x435f,0x9536,0xabcd,0xd11a,0xbca9,0x7d85,0x20e5,0x76ee,0xbefd,0xfa5,0xfc1a,0xe746,0x990,0x8852,0x2664,0x15d0,0x5fcf,0xc944,0x1f8e,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe253,0x8065,0x82a,0x1efc,0x4328,0x28a7,0x2003,0x788a,0xbdb3,0xaed,0x72db,0x6bbe,0x5d00,0xe569,0x13d,0xf240,0xe9ac,0x95a8,0xe380,0xddac,0xc12e,0x19a,0x3dc6,0x6262,0xc062,0xf1ac,0x5832,0xdefd,0x408a,0x8d21,0x593,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xeb0d31b,0xe73d2cd1,0xd7ccade8,0xbf6f5803,0xb276262a,0xc7fe188f,0x9536435f,0xd11aabcd,0x7d85bca9,0x76ee20e5,0xfa5befd,0xe746fc1a,0x88520990,0x15d02664,0xc9445fcf,0x21f8e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8065e253,0x1efc082a,0x28a74328,0x788a2003,0xaedbdb3,0x6bbe72db,0xe5695d00,0xf240013d,0x95a8e9ac,0xddace380,0x19ac12e,0x62623dc6,0xf1acc062,0xdefd5832,0x8d21408a,0xd0593}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe73d2cd10eb0d31b,0xbf6f5803d7ccade8,0xc7fe188fb276262a,0xd11aabcd9536435f,0x76ee20e57d85bca9,0xe746fc1a0fa5befd,0x15d0266488520990,0x21f8ec9445fcf}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1efc082a8065e253,0x788a200328a74328,0x6bbe72db0aedbdb3,0xf240013de5695d00,0xddace38095a8e9ac,0x62623dc6019ac12e,0xdefd5832f1acc062,0xd05938d21408a}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2111,0xf206,0x421f,0x9a8c,0xb968,0xd3ab,0xda72,0x21a3,0x9993,0xe133,0x186,0xdbbf,0x4c83,0xe5c6,0x68ba,0x4679,0x2a20,0x5c3b,0x918b,0xd38a,0x7855,0xcf4b,0xd397,0x66bf,0x8f03,0xd217,0xcc2f,0x6e16,0x6cf7,0x738f,0x1570,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf2062111,0x9a8c421f,0xd3abb968,0x21a3da72,0xe1339993,0xdbbf0186,0xe5c64c83,0x467968ba,0x5c3b2a20,0xd38a918b,0xcf4b7855,0x66bfd397,0xd2178f03,0x6e16cc2f,0x738f6cf7,0x41570}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9a8c421ff2062111,0x21a3da72d3abb968,0xdbbf0186e1339993,0x467968bae5c64c83,0xd38a918b5c3b2a20,0x66bfd397cf4b7855,0x6e16cc2fd2178f03,0x41570738f6cf7}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8d6,0xe48,0xb631,0xafe,0x8c92,0x2c28,0x3063,0xba26,0xc07e,0x535,0xab2a,0xa8d0,0xb2c4,0xc01f,0x7932,0x2099,0xcb69,0xba4b,0xe785,0x8032,0x1728,0x815e,0xa101,0x1d56,0xf9,0x2da0,0x660d,0xb3f2,0xefe9,0x5d0b,0xbd46,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4808d6,0xafeb631,0x2c288c92,0xba263063,0x535c07e,0xa8d0ab2a,0xc01fb2c4,0x20997932,0xba4bcb69,0x8032e785,0x815e1728,0x1d56a101,0x2da000f9,0xb3f2660d,0x5d0befe9,0x1bd46}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafeb6310e4808d6,0xba2630632c288c92,0xa8d0ab2a0535c07e,0x20997932c01fb2c4,0x8032e785ba4bcb69,0x1d56a101815e1728,0xb3f2660d2da000f9,0x1bd465d0befe9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x129d,0xdd4c,0xe2b2,0xca3b,0x6c0b,0x9c8b,0x68f9,0x412,0x51a8,0x7583,0xae25,0xb80d,0x35d5,0x387b,0x4ba1,0x66e1,0x754,0xf6b6,0x3d8c,0x650,0xa955,0x214f,0xc05f,0x16d2,0x9ce4,0x246f,0x123e,0x3ed3,0xa07f,0x2e24,0x8964,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xba36,0x53ab,0xedb2,0xfd40,0x24a1,0x7164,0x85ae,0xbaf5,0xb1e8,0xb6c1,0x8781,0xa06e,0xc4f,0xd656,0x7782,0xe745,0x8f5f,0x856,0x8eda,0xa10e,0x34e1,0x75e,0xa553,0xcbf2,0x6965,0xb963,0xb52c,0xd1ee,0xa39a,0x658c,0xb61c,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdd4c129d,0xca3be2b2,0x9c8b6c0b,0x41268f9,0x758351a8,0xb80dae25,0x387b35d5,0x66e14ba1,0xf6b60754,0x6503d8c,0x214fa955,0x16d2c05f,0x246f9ce4,0x3ed3123e,0x2e24a07f,0x58964}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x53abba36,0xfd40edb2,0x716424a1,0xbaf585ae,0xb6c1b1e8,0xa06e8781,0xd6560c4f,0xe7457782,0x8568f5f,0xa10e8eda,0x75e34e1,0xcbf2a553,0xb9636965,0xd1eeb52c,0x658ca39a,0xfb61c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xca3be2b2dd4c129d,0x41268f99c8b6c0b,0xb80dae25758351a8,0x66e14ba1387b35d5,0x6503d8cf6b60754,0x16d2c05f214fa955,0x3ed3123e246f9ce4,0x589642e24a07f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfd40edb253abba36,0xbaf585ae716424a1,0xa06e8781b6c1b1e8,0xe7457782d6560c4f,0xa10e8eda08568f5f,0xcbf2a553075e34e1,0xd1eeb52cb9636965,0xfb61c658ca39a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e3f,0xbc60,0xa44c,0x253c,0xa75e,0xa9f9,0x326f,0x9f9f,0x14aa,0xa47f,0x3889,0x5ee3,0x87d,0x933f,0x6cba,0x6222,0xcd43,0xa8c9,0xa815,0x992a,0x643a,0xc1d3,0x4cff,0xf675,0xf30b,0x7e2a,0x5248,0xb9e4,0xa454,0x2c53,0x525b,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3007,0xc6b6,0xc140,0x7909,0x6fb4,0x64d4,0x16ae,0xa3b7,0xa379,0xfe0f,0x7bc0,0xa6fa,0x978b,0x3c75,0x95db,0x19e0,0x7f23,0xb291,0x31a3,0xd4df,0x1d4c,0xb5b7,0x90e2,0xb611,0xd86e,0x4552,0x1240,0x836a,0xc94f,0xd456,0x3d8b,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbc609e3f,0x253ca44c,0xa9f9a75e,0x9f9f326f,0xa47f14aa,0x5ee33889,0x933f087d,0x62226cba,0xa8c9cd43,0x992aa815,0xc1d3643a,0xf6754cff,0x7e2af30b,0xb9e45248,0x2c53a454,0x3525b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc6b63007,0x7909c140,0x64d46fb4,0xa3b716ae,0xfe0fa379,0xa6fa7bc0,0x3c75978b,0x19e095db,0xb2917f23,0xd4df31a3,0xb5b71d4c,0xb61190e2,0x4552d86e,0x836a1240,0xd456c94f,0xf3d8b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x253ca44cbc609e3f,0x9f9f326fa9f9a75e,0x5ee33889a47f14aa,0x62226cba933f087d,0x992aa815a8c9cd43,0xf6754cffc1d3643a,0xb9e452487e2af30b,0x3525b2c53a454}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7909c140c6b63007,0xa3b716ae64d46fb4,0xa6fa7bc0fe0fa379,0x19e095db3c75978b,0xd4df31a3b2917f23,0xb61190e2b5b71d4c,0x836a12404552d86e,0xf3d8bd456c94f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x584d,0xa517,0xb681,0x45de,0xc2ea,0x7c58,0x123,0xe0fd,0xfd80,0x6c5b,0xf669,0xddc5,0xb21a,0xcaa9,0xc7a0,0x37ec,0xf8c6,0x12e7,0xe984,0xe812,0xef9f,0x128a,0x9fca,0x41f5,0x118f,0x5c32,0xf1cf,0x78c5,0x9424,0x2ae3,0x60d2,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1968,0x28ea,0x38ef,0x9c5d,0x974c,0x8a5,0xdf49,0xa49b,0x27ca,0x724c,0x963e,0x8465,0xc467,0x7fcf,0xf96e,0x72fa,0x3881,0x6839,0x4c67,0xb7a8,0xb9d2,0x5e2e,0xf9da,0x1c53,0x36af,0x307b,0xb14,0x9619,0xa56f,0x2286,0xdebd,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa517584d,0x45deb681,0x7c58c2ea,0xe0fd0123,0x6c5bfd80,0xddc5f669,0xcaa9b21a,0x37ecc7a0,0x12e7f8c6,0xe812e984,0x128aef9f,0x41f59fca,0x5c32118f,0x78c5f1cf,0x2ae39424,0x260d2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x28ea1968,0x9c5d38ef,0x8a5974c,0xa49bdf49,0x724c27ca,0x8465963e,0x7fcfc467,0x72faf96e,0x68393881,0xb7a84c67,0x5e2eb9d2,0x1c53f9da,0x307b36af,0x96190b14,0x2286a56f,0x5debd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45deb681a517584d,0xe0fd01237c58c2ea,0xddc5f6696c5bfd80,0x37ecc7a0caa9b21a,0xe812e98412e7f8c6,0x41f59fca128aef9f,0x78c5f1cf5c32118f,0x260d22ae39424}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9c5d38ef28ea1968,0xa49bdf4908a5974c,0x8465963e724c27ca,0x72faf96e7fcfc467,0xb7a84c6768393881,0x1c53f9da5e2eb9d2,0x96190b14307b36af,0x5debd2286a56f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed63,0x22b3,0x1d4d,0x35c4,0x93f4,0x6374,0x9706,0xfbed,0xae57,0x8a7c,0x51da,0x47f2,0xca2a,0xc784,0xb45e,0x991e,0xf8ab,0x949,0xc273,0xf9af,0x56aa,0xdeb0,0x3fa0,0xe92d,0x631b,0xdb90,0xedc1,0xc12c,0x5f80,0xd1db,0x769b,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x45ca,0xac54,0x124d,0x2bf,0xdb5e,0x8e9b,0x7a51,0x450a,0x4e17,0x493e,0x787e,0x5f91,0xf3b0,0x29a9,0x887d,0x18ba,0x70a0,0xf7a9,0x7125,0x5ef1,0xcb1e,0xf8a1,0x5aac,0x340d,0x969a,0x469c,0x4ad3,0x2e11,0x5c65,0x9a73,0x49e3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x22b3ed63,0x35c41d4d,0x637493f4,0xfbed9706,0x8a7cae57,0x47f251da,0xc784ca2a,0x991eb45e,0x949f8ab,0xf9afc273,0xdeb056aa,0xe92d3fa0,0xdb90631b,0xc12cedc1,0xd1db5f80,0xa769b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xac5445ca,0x2bf124d,0x8e9bdb5e,0x450a7a51,0x493e4e17,0x5f91787e,0x29a9f3b0,0x18ba887d,0xf7a970a0,0x5ef17125,0xf8a1cb1e,0x340d5aac,0x469c969a,0x2e114ad3,0x9a735c65,0x49e3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x35c41d4d22b3ed63,0xfbed9706637493f4,0x47f251da8a7cae57,0x991eb45ec784ca2a,0xf9afc2730949f8ab,0xe92d3fa0deb056aa,0xc12cedc1db90631b,0xa769bd1db5f80}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2bf124dac5445ca,0x450a7a518e9bdb5e,0x5f91787e493e4e17,0x18ba887d29a9f3b0,0x5ef17125f7a970a0,0x340d5aacf8a1cb1e,0x2e114ad3469c969a,0x49e39a735c65}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x1414, 0x1912, 0xddb, 0xb9, 0x121b, 0x195d, 0x6c5, 0xe75, 0x1d16, 0xcaf, 0x1a05, 0x15cc, 0x1537, 0x1c57, 0x141a, 0x1b9d, 0x5b0, 0x9e7, 0xf10, 0x300, 0x1d5c, 0xc13, 0x245, 0x1b91, 0x352, 0x730, 0xd55, 0x1ca3, 0x1dac, 0x7b0, 0x15b9, 0x1ba6, 0x7d6, 0xba4, 0xc12, 0x649, 0xf1, 0xd51, 0x107} @@ -3117,220 +3117,220 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x39f7,0x51a0,0x71ea,0x7557,0x794c,0x6b5e,0x6a81,0x9aa7,0xd8dd,0xab85,0xe387,0x2121,0x1086,0x7989,0xe273,0xf813,0xebd5,0xb13f,0x9ef5,0xc6d5,0x2da2,0x14f8,0xecf3,0x24c4,0xf485,0xc8de,0xb9ef,0xb213,0xbc4d,0xe587,0xd591,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x39f7,0x51a0,0x71ea,0x7557,0x794c,0x6b5e,0x6a81,0x9aa7,0xd8dd,0xab85,0xe387,0x2121,0x1086,0x7989,0xe273,0xf813,0xebd5,0xb13f,0x9ef5,0xc6d5,0x2da2,0x14f8,0xecf3,0x24c4,0xf485,0xc8de,0xb9ef,0xb213,0xbc4d,0xe587,0xd591,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x51a039f7,0x755771ea,0x6b5e794c,0x9aa76a81,0xab85d8dd,0x2121e387,0x79891086,0xf813e273,0xb13febd5,0xc6d59ef5,0x14f82da2,0x24c4ecf3,0xc8def485,0xb213b9ef,0xe587bc4d,0xdd591}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x51a039f7,0x755771ea,0x6b5e794c,0x9aa76a81,0xab85d8dd,0x2121e387,0x79891086,0xf813e273,0xb13febd5,0xc6d59ef5,0x14f82da2,0x24c4ecf3,0xc8def485,0xb213b9ef,0xe587bc4d,0xdd591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x755771ea51a039f7,0x9aa76a816b5e794c,0x2121e387ab85d8dd,0xf813e27379891086,0xc6d59ef5b13febd5,0x24c4ecf314f82da2,0xb213b9efc8def485,0xdd591e587bc4d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x755771ea51a039f7,0x9aa76a816b5e794c,0x2121e387ab85d8dd,0xf813e27379891086,0xc6d59ef5b13febd5,0x24c4ecf314f82da2,0xb213b9efc8def485,0xdd591e587bc4d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc5d4,0x133f,0xc116,0x2a9e,0xacf5,0xaedd,0x6173,0xdacf,0x6448,0xa33e,0x6d36,0x5013,0x2093,0x59f6,0xe571,0x906d,0x37c9,0xe4ab,0xb92a,0xbe30,0x1d49,0xde58,0xffc8,0x47ff,0xe0cb,0x6230,0x6128,0x8679,0x731c,0xc5e,0x66c7,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc5d4,0x133f,0xc116,0x2a9e,0xacf5,0xaedd,0x6173,0xdacf,0x6448,0xa33e,0x6d36,0x5013,0x2093,0x59f6,0xe571,0x906d,0x37c9,0xe4ab,0xb92a,0xbe30,0x1d49,0xde58,0xffc8,0x47ff,0xe0cb,0x6230,0x6128,0x8679,0x731c,0xc5e,0x66c7,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x133fc5d4,0x2a9ec116,0xaeddacf5,0xdacf6173,0xa33e6448,0x50136d36,0x59f62093,0x906de571,0xe4ab37c9,0xbe30b92a,0xde581d49,0x47ffffc8,0x6230e0cb,0x86796128,0xc5e731c,0xd66c7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x133fc5d4,0x2a9ec116,0xaeddacf5,0xdacf6173,0xa33e6448,0x50136d36,0x59f62093,0x906de571,0xe4ab37c9,0xbe30b92a,0xde581d49,0x47ffffc8,0x6230e0cb,0x86796128,0xc5e731c,0xd66c7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2a9ec116133fc5d4,0xdacf6173aeddacf5,0x50136d36a33e6448,0x906de57159f62093,0xbe30b92ae4ab37c9,0x47ffffc8de581d49,0x867961286230e0cb,0xd66c70c5e731c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2a9ec116133fc5d4,0xdacf6173aeddacf5,0x50136d36a33e6448,0x906de57159f62093,0xbe30b92ae4ab37c9,0x47ffffc8de581d49,0x867961286230e0cb,0xd66c70c5e731c}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55ad,0x2e3e,0xd0dc,0x8dad,0x4e0a,0xe1d0,0x3e27,0x81af,0x1bb4,0xa5fa,0x52f2,0x5bd4,0x2b9b,0xddfe,0x36,0xbdd4,0xf99a,0x3027,0x21d2,0x7b29,0x10ee,0x2146,0x6864,0xec5c,0x6bbd,0x540f,0xbc15,0xe4a1,0xee,0x3d9c,0xdf51,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55ad,0x2e3e,0xd0dc,0x8dad,0x4e0a,0xe1d0,0x3e27,0x81af,0x1bb4,0xa5fa,0x52f2,0x5bd4,0x2b9b,0xddfe,0x36,0xbdd4,0xf99a,0x3027,0x21d2,0x7b29,0x10ee,0x2146,0x6864,0xec5c,0x6bbd,0x540f,0xbc15,0xe4a1,0xee,0x3d9c,0xdf51,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2e3e55ad,0x8dadd0dc,0xe1d04e0a,0x81af3e27,0xa5fa1bb4,0x5bd452f2,0xddfe2b9b,0xbdd40036,0x3027f99a,0x7b2921d2,0x214610ee,0xec5c6864,0x540f6bbd,0xe4a1bc15,0x3d9c00ee,0x4df51}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2e3e55ad,0x8dadd0dc,0xe1d04e0a,0x81af3e27,0xa5fa1bb4,0x5bd452f2,0xddfe2b9b,0xbdd40036,0x3027f99a,0x7b2921d2,0x214610ee,0xec5c6864,0x540f6bbd,0xe4a1bc15,0x3d9c00ee,0x4df51}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8dadd0dc2e3e55ad,0x81af3e27e1d04e0a,0x5bd452f2a5fa1bb4,0xbdd40036ddfe2b9b,0x7b2921d23027f99a,0xec5c6864214610ee,0xe4a1bc15540f6bbd,0x4df513d9c00ee}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8dadd0dc2e3e55ad,0x81af3e27e1d04e0a,0x5bd452f2a5fa1bb4,0xbdd40036ddfe2b9b,0x7b2921d23027f99a,0xec5c6864214610ee,0xe4a1bc15540f6bbd,0x4df513d9c00ee}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc609,0xae5f,0x8e15,0x8aa8,0x86b3,0x94a1,0x957e,0x6558,0x2722,0x547a,0x1c78,0xdede,0xef79,0x8676,0x1d8c,0x7ec,0x142a,0x4ec0,0x610a,0x392a,0xd25d,0xeb07,0x130c,0xdb3b,0xb7a,0x3721,0x4610,0x4dec,0x43b2,0x1a78,0x2a6e,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc609,0xae5f,0x8e15,0x8aa8,0x86b3,0x94a1,0x957e,0x6558,0x2722,0x547a,0x1c78,0xdede,0xef79,0x8676,0x1d8c,0x7ec,0x142a,0x4ec0,0x610a,0x392a,0xd25d,0xeb07,0x130c,0xdb3b,0xb7a,0x3721,0x4610,0x4dec,0x43b2,0x1a78,0x2a6e,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xae5fc609,0x8aa88e15,0x94a186b3,0x6558957e,0x547a2722,0xdede1c78,0x8676ef79,0x7ec1d8c,0x4ec0142a,0x392a610a,0xeb07d25d,0xdb3b130c,0x37210b7a,0x4dec4610,0x1a7843b2,0x22a6e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xae5fc609,0x8aa88e15,0x94a186b3,0x6558957e,0x547a2722,0xdede1c78,0x8676ef79,0x7ec1d8c,0x4ec0142a,0x392a610a,0xeb07d25d,0xdb3b130c,0x37210b7a,0x4dec4610,0x1a7843b2,0x22a6e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8aa88e15ae5fc609,0x6558957e94a186b3,0xdede1c78547a2722,0x7ec1d8c8676ef79,0x392a610a4ec0142a,0xdb3b130ceb07d25d,0x4dec461037210b7a,0x22a6e1a7843b2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8aa88e15ae5fc609,0x6558957e94a186b3,0xdede1c78547a2722,0x7ec1d8c8676ef79,0x392a610a4ec0142a,0xdb3b130ceb07d25d,0x4dec461037210b7a,0x22a6e1a7843b2}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e37,0x619b,0xa159,0x8865,0xab15,0x85c2,0xb3b,0x57ce,0x8108,0xa8d6,0xfeb0,0x8cf0,0xef13,0xc7e1,0x6936,0xc3a9,0xd8f2,0x9c5d,0x7c68,0x7ba2,0xf4da,0x4c63,0x845b,0x22eb,0xbedd,0x37a0,0x24f3,0x7019,0x2855,0x6905,0xb81c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e37,0x619b,0xa159,0x8865,0xab15,0x85c2,0xb3b,0x57ce,0x8108,0xa8d6,0xfeb0,0x8cf0,0xef13,0xc7e1,0x6936,0xc3a9,0xd8f2,0x9c5d,0x7c68,0x7ba2,0xf4da,0x4c63,0x845b,0x22eb,0xbedd,0x37a0,0x24f3,0x7019,0x2855,0x6905,0xb81c,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x619b9e37,0x8865a159,0x85c2ab15,0x57ce0b3b,0xa8d68108,0x8cf0feb0,0xc7e1ef13,0xc3a96936,0x9c5dd8f2,0x7ba27c68,0x4c63f4da,0x22eb845b,0x37a0bedd,0x701924f3,0x69052855,0x3b81c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x619b9e37,0x8865a159,0x85c2ab15,0x57ce0b3b,0xa8d68108,0x8cf0feb0,0xc7e1ef13,0xc3a96936,0x9c5dd8f2,0x7ba27c68,0x4c63f4da,0x22eb845b,0x37a0bedd,0x701924f3,0x69052855,0x3b81c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8865a159619b9e37,0x57ce0b3b85c2ab15,0x8cf0feb0a8d68108,0xc3a96936c7e1ef13,0x7ba27c689c5dd8f2,0x22eb845b4c63f4da,0x701924f337a0bedd,0x3b81c69052855}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8865a159619b9e37,0x57ce0b3b85c2ab15,0x8cf0feb0a8d68108,0xc3a96936c7e1ef13,0x7ba27c689c5dd8f2,0x22eb845b4c63f4da,0x701924f337a0bedd,0x3b81c69052855}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x92b5,0x1309,0xc1ee,0xadd1,0x165,0x4911,0xaf0c,0x4a4f,0x5374,0xd4b2,0x926f,0xacc0,0xfd2f,0xeb63,0x7c68,0xc188,0x41ce,0x152e,0x6cfe,0x9a22,0xadb,0x933,0x438c,0x5fef,0xe17a,0x82aa,0x7732,0x8c5b,0xfa7b,0x4cd4,0xdcee,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x92b5,0x1309,0xc1ee,0xadd1,0x165,0x4911,0xaf0c,0x4a4f,0x5374,0xd4b2,0x926f,0xacc0,0xfd2f,0xeb63,0x7c68,0xc188,0x41ce,0x152e,0x6cfe,0x9a22,0xadb,0x933,0x438c,0x5fef,0xe17a,0x82aa,0x7732,0x8c5b,0xfa7b,0x4cd4,0xdcee,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x130992b5,0xadd1c1ee,0x49110165,0x4a4faf0c,0xd4b25374,0xacc0926f,0xeb63fd2f,0xc1887c68,0x152e41ce,0x9a226cfe,0x9330adb,0x5fef438c,0x82aae17a,0x8c5b7732,0x4cd4fa7b,0x6dcee}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x130992b5,0xadd1c1ee,0x49110165,0x4a4faf0c,0xd4b25374,0xacc0926f,0xeb63fd2f,0xc1887c68,0x152e41ce,0x9a226cfe,0x9330adb,0x5fef438c,0x82aae17a,0x8c5b7732,0x4cd4fa7b,0x6dcee}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xadd1c1ee130992b5,0x4a4faf0c49110165,0xacc0926fd4b25374,0xc1887c68eb63fd2f,0x9a226cfe152e41ce,0x5fef438c09330adb,0x8c5b773282aae17a,0x6dcee4cd4fa7b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xadd1c1ee130992b5,0x4a4faf0c49110165,0xacc0926fd4b25374,0xc1887c68eb63fd2f,0x9a226cfe152e41ce,0x5fef438c09330adb,0x8c5b773282aae17a,0x6dcee4cd4fa7b}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77b7,0xc00c,0x743e,0x91b3,0xc92c,0x3be,0xc9e8,0x4b6b,0x519c,0xed1b,0x857f,0x2be7,0x2270,0x64a0,0x3a21,0xd5ec,0xd5d1,0x2392,0x175a,0xa58f,0x5c36,0x3908,0x5f46,0x1875,0xee40,0xcd4a,0x7e0b,0x8eda,0x87e0,0xc28c,0x6e24,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77b7,0xc00c,0x743e,0x91b3,0xc92c,0x3be,0xc9e8,0x4b6b,0x519c,0xed1b,0x857f,0x2be7,0x2270,0x64a0,0x3a21,0xd5ec,0xd5d1,0x2392,0x175a,0xa58f,0x5c36,0x3908,0x5f46,0x1875,0xee40,0xcd4a,0x7e0b,0x8eda,0x87e0,0xc28c,0x6e24,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc00c77b7,0x91b3743e,0x3bec92c,0x4b6bc9e8,0xed1b519c,0x2be7857f,0x64a02270,0xd5ec3a21,0x2392d5d1,0xa58f175a,0x39085c36,0x18755f46,0xcd4aee40,0x8eda7e0b,0xc28c87e0,0xd6e24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc00c77b7,0x91b3743e,0x3bec92c,0x4b6bc9e8,0xed1b519c,0x2be7857f,0x64a02270,0xd5ec3a21,0x2392d5d1,0xa58f175a,0x39085c36,0x18755f46,0xcd4aee40,0x8eda7e0b,0xc28c87e0,0xd6e24}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91b3743ec00c77b7,0x4b6bc9e803bec92c,0x2be7857fed1b519c,0xd5ec3a2164a02270,0xa58f175a2392d5d1,0x18755f4639085c36,0x8eda7e0bcd4aee40,0xd6e24c28c87e0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91b3743ec00c77b7,0x4b6bc9e803bec92c,0x2be7857fed1b519c,0xd5ec3a2164a02270,0xa58f175a2392d5d1,0x18755f4639085c36,0x8eda7e0bcd4aee40,0xd6e24c28c87e0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x61c9,0x9e64,0x5ea6,0x779a,0x54ea,0x7a3d,0xf4c4,0xa831,0x7ef7,0x5729,0x14f,0x730f,0x10ec,0x381e,0x96c9,0x3c56,0x270d,0x63a2,0x8397,0x845d,0xb25,0xb39c,0x7ba4,0xdd14,0x4122,0xc85f,0xdb0c,0x8fe6,0xd7aa,0x96fa,0x47e3,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x61c9,0x9e64,0x5ea6,0x779a,0x54ea,0x7a3d,0xf4c4,0xa831,0x7ef7,0x5729,0x14f,0x730f,0x10ec,0x381e,0x96c9,0x3c56,0x270d,0x63a2,0x8397,0x845d,0xb25,0xb39c,0x7ba4,0xdd14,0x4122,0xc85f,0xdb0c,0x8fe6,0xd7aa,0x96fa,0x47e3,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e6461c9,0x779a5ea6,0x7a3d54ea,0xa831f4c4,0x57297ef7,0x730f014f,0x381e10ec,0x3c5696c9,0x63a2270d,0x845d8397,0xb39c0b25,0xdd147ba4,0xc85f4122,0x8fe6db0c,0x96fad7aa,0xc47e3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e6461c9,0x779a5ea6,0x7a3d54ea,0xa831f4c4,0x57297ef7,0x730f014f,0x381e10ec,0x3c5696c9,0x63a2270d,0x845d8397,0xb39c0b25,0xdd147ba4,0xc85f4122,0x8fe6db0c,0x96fad7aa,0xc47e3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x779a5ea69e6461c9,0xa831f4c47a3d54ea,0x730f014f57297ef7,0x3c5696c9381e10ec,0x845d839763a2270d,0xdd147ba4b39c0b25,0x8fe6db0cc85f4122,0xc47e396fad7aa}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x779a5ea69e6461c9,0xa831f4c47a3d54ea,0x730f014f57297ef7,0x3c5696c9381e10ec,0x845d839763a2270d,0xdd147ba4b39c0b25,0x8fe6db0cc85f4122,0xc47e396fad7aa}}}} #endif -}}}}; +}}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/finit.c index b3808edf07..c9a3687282 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/finit.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/finit.c @@ -29,29 +29,29 @@ quat_alg_elem_finalize(quat_alg_elem_t *elem) void ibz_vec_2_init(ibz_vec_2_t *vec) { - ibz_init(&((*vec)[0])); - ibz_init(&((*vec)[1])); + ibz_init(&(vec->v[0])); + ibz_init(&(vec->v[1])); } void ibz_vec_2_finalize(ibz_vec_2_t *vec) { - ibz_finalize(&((*vec)[0])); - ibz_finalize(&((*vec)[1])); + ibz_finalize(&(vec->v[0])); + ibz_finalize(&(vec->v[1])); } void ibz_vec_4_init(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_init(&(*vec)[i]); + ibz_init(&vec->v[i]); } } void ibz_vec_4_finalize(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_finalize(&(*vec)[i]); + ibz_finalize(&vec->v[i]); } } @@ -60,7 +60,7 @@ ibz_mat_2x2_init(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -69,7 +69,7 @@ ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } @@ -79,7 +79,7 @@ ibz_mat_4x4_init(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -88,7 +88,7 @@ ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c index 511a0a5d38..5edff425c8 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hnf.c @@ -14,21 +14,21 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) for (int i = 0; i < 4; i++) { // upper triangular for (int j = 0; j < i; j++) { - res = res && ibz_is_zero(&((*mat)[i][j])); + res = res && ibz_is_zero(&(mat->m[i][j])); } // find first non 0 element of line found = 0; for (int j = i; j < 4; j++) { if (found) { // all values are positive, and first non-0 is the largest of that line - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); - res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&(mat->m[i][ind]), &(mat->m[i][j])) > 0); } else { - if (!ibz_is_zero(&((*mat)[i][j]))) { + if (!ibz_is_zero(&(mat->m[i][j]))) { found = 1; ind = j; // mustbe non-negative - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) > 0); } } } @@ -37,7 +37,7 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) int linestart = -1; int i = 0; for (int j = 0; j < 4; j++) { - while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + while ((i < 4) && (ibz_is_zero(&(mat->m[i][j])))) { i = i + 1; } if (i != 4) { @@ -66,13 +66,13 @@ ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); - ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); + ibz_centered_mod(&(sums.v[i]), &(sums.v[i]), &m); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_finalize(&m); @@ -86,7 +86,7 @@ ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + ibz_centered_mod(&(res->v[i]), &(vec->v[i]), &m); } ibz_finalize(&m); } @@ -101,8 +101,8 @@ ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4 ibz_copy(&s, scalar); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); - ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + ibz_mul(&(prod->v[i]), &(vec->v[i]), &s); + ibz_mod(&(prod->v[i]), &(prod->v[i]), &m); } ibz_finalize(&m); ibz_finalize(&s); @@ -138,36 +138,36 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec if (h < 4) ibz_vec_4_init(&(w[h])); ibz_vec_4_init(&(a[h])); - ibz_copy(&(a[h][0]), &(generators[h][0])); - ibz_copy(&(a[h][1]), &(generators[h][1])); - ibz_copy(&(a[h][2]), &(generators[h][2])); - ibz_copy(&(a[h][3]), &(generators[h][3])); + ibz_copy(&(a[h].v[0]), &(generators[h].v[0])); + ibz_copy(&(a[h].v[1]), &(generators[h].v[1])); + ibz_copy(&(a[h].v[2]), &(generators[h].v[2])); + ibz_copy(&(a[h].v[3]), &(generators[h].v[3])); } assert(ibz_cmp(mod, &ibz_const_zero) > 0); ibz_copy(&m, mod); while (i != -1) { while (j != 0) { j = j - 1; - if (!ibz_is_zero(&(a[j][i]))) { + if (!ibz_is_zero(&(a[j].v[i]))) { // assumtion that ibz_xgcd outputs u,v which are small in absolute // value is needed here also, needs u non 0, but v can be 0 if needed - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &(a[j].v[i])); ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); - ibz_div(&coeff_1, &r, &(a[k][i]), &d); - ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_div(&coeff_1, &r, &(a[k].v[i]), &d); + ibz_div(&coeff_2, &r, &(a[j].v[i]), &d); ibz_neg(&coeff_2, &coeff_2); ibz_vec_4_linear_combination_mod( &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy } } - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &m); ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult - if (ibz_is_zero(&(w[i][i]))) { - ibz_copy(&(w[i][i]), &m); + if (ibz_is_zero(&(w[i].v[i]))) { + ibz_copy(&(w[i].v[i]), &m); } for (int h = i + 1; h < 4; h++) { - ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_div_floor(&q, &r, &(w[h].v[i]), &(w[i].v[i])); ibz_neg(&q, &q); ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); } @@ -177,8 +177,8 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec k = k - 1; i = i - 1; j = k; - if (ibz_is_zero(&(a[k][i]))) - ibz_copy(&(a[k][i]), &m); + if (ibz_is_zero(&(a[k].v[i]))) + ibz_copy(&(a[k].v[i]), &m); } else { k = k - 1; @@ -188,7 +188,7 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec } for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { - ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + ibz_copy(&((hnf->m)[i][j]), &(w[j].v[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c index 0fd35b5c65..f630f5a9fe 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ibz_division.c @@ -8,5 +8,5 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { - mpz_gcdext(*gcd, *u, *v, *a, *b); + mpz_gcdext(gcd->i, u->i, v->i, a->i, b->i); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.c index 0743974345..1be9d87e71 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/id2iso.c @@ -18,8 +18,8 @@ ec_biscalar_mul_ibz_vec(ec_point_t *res, const ec_curve_t *curve) { digit_t scalars[2][NWORDS_ORDER]; - ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); - ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ibz_to_digit_array(scalars[0], &scalar_vec->v[0]); + ibz_to_digit_array(scalars[1], &scalar_vec->v[1]); ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); } @@ -48,14 +48,14 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid quat_change_to_O0_basis(&coeffs, &alpha); for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); } } @@ -67,16 +67,16 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid { const ibz_t *const norm = &lideal->norm; - ibz_mod(&(*vec)[0], &mat[0][0], norm); - ibz_mod(&(*vec)[1], &mat[1][0], norm); - ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + ibz_mod(&vec->v[0], &mat.m[0][0], norm); + ibz_mod(&vec->v[1], &mat.m[1][0], norm); + ibz_gcd(&tmp, &vec->v[0], &vec->v[1]); if (ibz_is_even(&tmp)) { - ibz_mod(&(*vec)[0], &mat[0][1], norm); - ibz_mod(&(*vec)[1], &mat[1][1], norm); + ibz_mod(&vec->v[0], &mat.m[0][1], norm); + ibz_mod(&vec->v[1], &mat.m[1][1], norm); } #ifndef NDEBUG - ibz_gcd(&tmp, &(*vec)[0], norm); - ibz_gcd(&tmp, &(*vec)[1], &tmp); + ibz_gcd(&tmp, &vec->v[0], norm); + ibz_gcd(&tmp, &vec->v[1], &tmp); assert(!ibz_cmp(&tmp, &ibz_const_one)); #endif } @@ -102,28 +102,28 @@ matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_ copy_basis(&tmp_bas, bas); // reduction mod 2f - ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); - ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); - ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); - ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + ibz_mod(&mat->m[0][0], &mat->m[0][0], &pow_two); + ibz_mod(&mat->m[0][1], &mat->m[0][1], &pow_two); + ibz_mod(&mat->m[1][0], &mat->m[1][0], &pow_two); + ibz_mod(&mat->m[1][1], &mat->m[1][1], &pow_two); // For a matrix [[a, c], [b, d]] we compute: // // first basis element R = [a]P + [b]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][0]); - ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ibz_to_digit_array(scalars[0], &mat->m[0][0]); + ibz_to_digit_array(scalars[1], &mat->m[1][0]); ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); // second basis element S = [c]P + [d]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][1]); - ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ibz_to_digit_array(scalars[0], &mat->m[0][1]); + ibz_to_digit_array(scalars[1], &mat->m[1][1]); ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); // Their difference R - S = [a - c]P + [b - d]Q - ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_sub(&tmp, &mat->m[0][0], &mat->m[0][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[0], &tmp); - ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_sub(&tmp, &mat->m[1][0], &mat->m[1][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[1], &tmp); ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); @@ -157,23 +157,23 @@ endomorphism_application_even_basis(ec_basis_t *bas, quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); assert(ibz_is_odd(&content)); - ibz_set(&mat[0][0], 0); - ibz_set(&mat[0][1], 0); - ibz_set(&mat[1][0], 0); - ibz_set(&mat[1][1], 0); + ibz_set(&mat.m[0][0], 0); + ibz_set(&mat.m[0][1], 0); + ibz_set(&mat.m[1][0], 0); + ibz_set(&mat.m[1][1], 0); // computing the matrix for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&mat[i][j], &mat[i][j], &content); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&mat.m[i][j], &mat.m[i][j], &content); } } @@ -215,19 +215,19 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * ibz_mat_2x2_t mat; ibz_mat_2x2_init(&mat); - ibz_copy(&mat[0][0], &(*vec2)[0]); - ibz_copy(&mat[1][0], &(*vec2)[1]); + ibz_copy(&mat.m[0][0], &vec2->v[0]); + ibz_copy(&mat.m[1][0], &vec2->v[1]); ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); - ibz_copy(&mat[0][1], &vec[0]); - ibz_copy(&mat[1][1], &vec[1]); + ibz_copy(&mat.m[0][1], &vec.v[0]); + ibz_copy(&mat.m[1][1], &vec.v[1]); ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); - ibz_add(&mat[0][1], &mat[0][1], &vec[0]); - ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + ibz_add(&mat.m[0][1], &mat.m[0][1], &vec.v[0]); + ibz_add(&mat.m[1][1], &mat.m[1][1], &vec.v[1]); - ibz_mod(&mat[0][1], &mat[0][1], &two_pow); - ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + ibz_mod(&mat.m[0][1], &mat.m[0][1], &two_pow); + ibz_mod(&mat.m[1][1], &mat.m[1][1], &two_pow); ibz_mat_2x2_t inv; ibz_mat_2x2_init(&inv); @@ -247,11 +247,11 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * quat_alg_elem_t gen; quat_alg_elem_init(&gen); ibz_set(&gen.denom, 2); - ibz_add(&gen.coord[0], &vec[0], &vec[0]); - ibz_set(&gen.coord[1], -2); - ibz_add(&gen.coord[2], &vec[1], &vec[1]); - ibz_copy(&gen.coord[3], &vec[1]); - ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_add(&gen.coord.v[0], &vec.v[0], &vec.v[0]); + ibz_set(&gen.coord.v[1], -2); + ibz_add(&gen.coord.v[2], &vec.v[1], &vec.v[1]); + ibz_copy(&gen.coord.v[3], &vec.v[1]); + ibz_add(&gen.coord.v[0], &gen.coord.v[0], &vec.v[1]); ibz_vec_2_finalize(&vec); quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); @@ -319,10 +319,10 @@ _change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, #endif // Copy the results into the matrix - ibz_copy_digit_array(&((*mat)[0][0]), x1); - ibz_copy_digit_array(&((*mat)[1][0]), x2); - ibz_copy_digit_array(&((*mat)[0][1]), x3); - ibz_copy_digit_array(&((*mat)[1][1]), x4); + ibz_copy_digit_array(&(mat->m[0][0]), x1); + ibz_copy_digit_array(&(mat->m[1][0]), x2); + ibz_copy_digit_array(&(mat->m[0][1]), x3); + ibz_copy_digit_array(&(mat->m[1][1]), x4); } void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ideal.c index 9cf863a104..8634143941 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ideal.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ideal.c @@ -33,7 +33,7 @@ quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) ibz_copy(©->lattice.denom, &copied->lattice.denom); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + ibz_copy(©->lattice.basis.m[i][j], &copied->lattice.basis.m[i][j]); } } } @@ -248,13 +248,13 @@ quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + ibz_div(&G->m[i][j], &rmd, &G->m[i][j], &divisor); assert(ibz_is_zero(&rmd)); } } for (int i = 0; i < 4; i++) { for (int j = 0; j <= i - 1; j++) { - ibz_copy(&(*G)[j][i], &(*G)[i][j]); + ibz_copy(&G->m[j][i], &G->m[i][j]); } } @@ -289,8 +289,8 @@ quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg ibz_mat_4x4_transpose(&transposed, &(order->basis)); // multiply gram matrix by 2 because of reduced trace ibz_mat_4x4_identity(&norm); - ibz_copy(&(norm[2][2]), &(alg->p)); - ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_copy(&(norm.m[2][2]), &(alg->p)); + ibz_copy(&(norm.m[3][3]), &(alg->p)); ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); ibz_mat_4x4_mul(&prod, &transposed, &norm); ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.c index b0462dc8b5..e219bf3d96 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.c @@ -114,48 +114,48 @@ DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_ * @{ */ -const __mpz_struct ibz_const_zero[1] = { +const ibz_t ibz_const_zero = {{ { ._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]){ 0 }, } -}; +}}; -const __mpz_struct ibz_const_one[1] = { +const ibz_t ibz_const_one = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 1 }, } -}; +}}; -const __mpz_struct ibz_const_two[1] = { +const ibz_t ibz_const_two = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 2 }, } -}; +}}; -const __mpz_struct ibz_const_three[1] = { +const ibz_t ibz_const_three = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 3 }, } -}; +}}; void ibz_init(ibz_t *x) { - mpz_init(*x); + mpz_init(x->i); } void ibz_finalize(ibz_t *x) { - mpz_clear(*x); + mpz_clear(x->i); } void @@ -168,7 +168,7 @@ ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_add(*sum, *a, *b); + mpz_add(sum->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -186,7 +186,7 @@ ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_sub(*diff, *a, *b); + mpz_sub(diff->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); @@ -205,7 +205,7 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_mul(*prod, *a, *b); + mpz_mul(prod->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -216,13 +216,13 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) void ibz_neg(ibz_t *neg, const ibz_t *a) { - mpz_neg(*neg, *a); + mpz_neg(neg->i, a->i); } void ibz_abs(ibz_t *abs, const ibz_t *a) { - mpz_abs(*abs, *a); + mpz_abs(abs->i, a->i); } void @@ -235,7 +235,7 @@ ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_tdiv_qr(*quotient, *remainder, *a, *b); + mpz_tdiv_qr(quotient->i, remainder->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -251,7 +251,7 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) ibz_init(&a_cp); ibz_copy(&a_cp, a); #endif - mpz_tdiv_q_2exp(*quotient, *a, exp); + mpz_tdiv_q_2exp(quotient->i, a->i, exp); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); ibz_finalize(&a_cp); @@ -261,50 +261,50 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) { - mpz_fdiv_qr(*q, *r, *n, *d); + mpz_fdiv_qr(q->i, r->i, n->i, d->i); } void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) { - mpz_mod(*r, *a, *b); + mpz_mod(r->i, a->i, b->i); } unsigned long int -ibz_mod_ui(const mpz_t *n, unsigned long int d) +ibz_mod_ui(const ibz_t *n, unsigned long int d) { - return mpz_fdiv_ui(*n, d); + return mpz_fdiv_ui(n->i, d); } int ibz_divides(const ibz_t *a, const ibz_t *b) { - return mpz_divisible_p(*a, *b); + return mpz_divisible_p(a->i, b->i); } void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) { - mpz_pow_ui(*pow, *x, e); + mpz_pow_ui(pow->i, x->i, e); } void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) { - mpz_powm(*pow, *x, *e, *m); + mpz_powm(pow->i, x->i, e->i, m->i); DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); } int ibz_two_adic(ibz_t *pow) { - return mpz_scan1(*pow, 0); + return mpz_scan1(pow->i, 0); } int ibz_cmp(const ibz_t *a, const ibz_t *b) { - int ret = mpz_cmp(*a, *b); + int ret = mpz_cmp(a->i, b->i); DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); return ret; } @@ -312,7 +312,7 @@ ibz_cmp(const ibz_t *a, const ibz_t *b) int ibz_is_zero(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 0); + int ret = !mpz_cmp_ui(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); return ret; } @@ -320,7 +320,7 @@ ibz_is_zero(const ibz_t *x) int ibz_is_one(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 1); + int ret = !mpz_cmp_ui(x->i, 1); DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); return ret; } @@ -328,7 +328,7 @@ ibz_is_one(const ibz_t *x) int ibz_cmp_int32(const ibz_t *x, int32_t y) { - int ret = mpz_cmp_si(*x, (signed long int)y); + int ret = mpz_cmp_si(x->i, (signed long int)y); DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); return ret; } @@ -336,7 +336,7 @@ ibz_cmp_int32(const ibz_t *x, int32_t y) int ibz_is_even(const ibz_t *x) { - int ret = !mpz_tstbit(*x, 0); + int ret = !mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); return ret; } @@ -344,7 +344,7 @@ ibz_is_even(const ibz_t *x) int ibz_is_odd(const ibz_t *x) { - int ret = mpz_tstbit(*x, 0); + int ret = mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); return ret; } @@ -352,7 +352,7 @@ ibz_is_odd(const ibz_t *x) void ibz_set(ibz_t *i, int32_t x) { - mpz_set_si(*i, x); + mpz_set_si(i->i, x); } int @@ -361,7 +361,7 @@ ibz_convert_to_str(const ibz_t *i, char *str, int base) if (!str || (base != 10 && base != 16)) return 0; - mpz_get_str(str, base, *i); + mpz_get_str(str, base, i->i); return 1; } @@ -380,29 +380,29 @@ ibz_print(const ibz_t *num, int base) int ibz_set_from_str(ibz_t *i, const char *str, int base) { - return (1 + mpz_set_str(*i, str, base)); + return (1 + mpz_set_str(i->i, str, base)); } void ibz_copy(ibz_t *target, const ibz_t *value) { - mpz_set(*target, *value); + mpz_set(target->i, value->i); } void ibz_swap(ibz_t *a, ibz_t *b) { - mpz_swap(*a, *b); + mpz_swap(a->i, b->i); } int32_t ibz_get(const ibz_t *i) { #if LONG_MAX == INT32_MAX - return (int32_t)mpz_get_si(*i); + return (int32_t)mpz_get_si(i->i); #elif LONG_MAX > INT32_MAX // Extracts the sign bit and the 31 least significant bits - signed long int t = mpz_get_si(*i); + signed long int t = mpz_get_si(i->i); return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); #else #error Unsupported configuration: LONG_MAX must be >= INT32_MAX @@ -417,10 +417,10 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) mpz_t tmp; mpz_t bmina; mpz_init(bmina); - mpz_sub(bmina, *b, *a); + mpz_sub(bmina, b->i, a->i); if (mpz_sgn(bmina) == 0) { - mpz_set(*rand, *a); + mpz_set(rand->i, a->i); mpz_clear(bmina); return 1; } @@ -466,7 +466,7 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) break; } while (1); - mpz_add(*rand, tmp, *a); + mpz_add(rand->i, tmp, a->i); err: mpz_clear(bmina); return ret; @@ -534,19 +534,19 @@ int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) { int ret = 1; - mpz_t m_big; + ibz_t m_big; // m_big = 2 * m - mpz_init_set_si(m_big, m); - mpz_add(m_big, m_big, m_big); + mpz_init_set_si(m_big.i, m); + mpz_add(m_big.i, m_big.i, m_big.i); // Sample in [0, 2*m] ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); // Adjust to range [-m, m] - mpz_sub_ui(*rand, *rand, m); + mpz_sub_ui(rand->i, rand->i, m); - mpz_clear(m_big); + mpz_clear(m_big.i); return ret; } @@ -555,41 +555,41 @@ int ibz_rand_interval_bits(ibz_t *rand, uint32_t m) { int ret = 1; - mpz_t tmp; - mpz_t low; - mpz_init_set_ui(tmp, 1); - mpz_mul_2exp(tmp, tmp, m); - mpz_init(low); - mpz_neg(low, tmp); + ibz_t tmp; + ibz_t low; + mpz_init_set_ui(tmp.i, 1); + mpz_mul_2exp(tmp.i, tmp.i, m); + mpz_init(low.i); + mpz_neg(low.i, tmp.i); ret = ibz_rand_interval(rand, &low, &tmp); - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); if (ret != 1) goto err; - mpz_sub_ui(*rand, *rand, (unsigned long int)m); + mpz_sub_ui(rand->i, rand->i, (unsigned long int)m); return ret; err: - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); return ret; } int ibz_bitsize(const ibz_t *a) { - return (int)mpz_sizeinbase(*a, 2); + return (int)mpz_sizeinbase(a->i, 2); } int ibz_size_in_base(const ibz_t *a, int base) { - return (int)mpz_sizeinbase(*a, base); + return (int)mpz_sizeinbase(a->i, base); } void ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) { - mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); + mpz_import(target->i, dig_len, -1, sizeof(digit_t), 0, 0, dig); } void @@ -600,13 +600,13 @@ ibz_to_digits(digit_t *target, const ibz_t *ibz) // The next line ensures zero is written to the first limb of target if ibz is zero; // target is then overwritten by the actual value if it is not. target[0] = 0; - mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, ibz->i); } int ibz_probab_prime(const ibz_t *n, int reps) { - int ret = mpz_probab_prime_p(*n, reps); + int ret = mpz_probab_prime_p(n->i, reps); DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); return ret; } @@ -614,26 +614,26 @@ ibz_probab_prime(const ibz_t *n, int reps) void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) { - mpz_gcd(*gcd, *a, *b); + mpz_gcd(gcd->i, a->i, b->i); } int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) { - return (mpz_invert(*inv, *a, *mod) ? 1 : 0); + return (mpz_invert(inv->i, a->i, mod->i) ? 1 : 0); } int ibz_legendre(const ibz_t *a, const ibz_t *p) { - return mpz_legendre(*a, *p); + return mpz_legendre(a->i, p->i); } int ibz_sqrt(ibz_t *sqrt, const ibz_t *a) { - if (mpz_perfect_square_p(*a)) { - mpz_sqrt(*sqrt, *a); + if (mpz_perfect_square_p(a->i)) { + mpz_sqrt(sqrt->i, a->i); return 1; } else { return 0; @@ -643,7 +643,7 @@ ibz_sqrt(ibz_t *sqrt, const ibz_t *a) void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) { - mpz_sqrt(*sqrt, *a); + mpz_sqrt(sqrt->i, a->i); } int @@ -686,85 +686,85 @@ ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) int ret = 1; - mpz_mod(amod, *a, *p); + mpz_mod(amod, a->i, p->i); if (mpz_cmp_ui(amod, 0) < 0) { - mpz_add(amod, *p, amod); + mpz_add(amod, p->i, amod); } - if (mpz_legendre(amod, *p) != 1) { + if (mpz_legendre(amod, p->i) != 1) { ret = 0; goto end; } - mpz_sub_ui(pm1, *p, 1); + mpz_sub_ui(pm1, p->i, 1); - if (mpz_mod_ui(tmp, *p, 4) == 3) { + if (mpz_mod_ui(tmp, p->i, 4) == 3) { // p % 4 == 3 - mpz_add_ui(tmp, *p, 1); + mpz_add_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(*sqrt, amod, tmp, *p); - } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + mpz_powm(sqrt->i, amod, tmp, p->i); + } else if (mpz_mod_ui(tmp, p->i, 8) == 5) { // p % 8 == 5 - mpz_sub_ui(tmp, *p, 1); + mpz_sub_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + mpz_powm(tmp, amod, tmp, p->i); // a^{(p-1)/4} mod p if (!mpz_cmp_ui(tmp, 1)) { - mpz_add_ui(tmp, *p, 3); + mpz_add_ui(tmp, p->i, 3); mpz_fdiv_q_2exp(tmp, tmp, 3); - mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + mpz_powm(sqrt->i, amod, tmp, p->i); // a^{(p+3)/8} mod p } else { - mpz_sub_ui(tmp, *p, 5); + mpz_sub_ui(tmp, p->i, 5); mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 mpz_mul_2exp(a4, amod, 2); // 4*a - mpz_powm(tmp, a4, tmp, *p); + mpz_powm(tmp, a4, tmp, p->i); mpz_mul_2exp(a2, amod, 1); mpz_mul(tmp, a2, tmp); - mpz_mod(*sqrt, tmp, *p); + mpz_mod(sqrt->i, tmp, p->i); } } else { // p % 8 == 1 -> Shanks-Tonelli int e = 0; - mpz_sub_ui(q, *p, 1); + mpz_sub_ui(q, p->i, 1); while (mpz_tstbit(q, e) == 0) e++; mpz_fdiv_q_2exp(q, q, e); // 1. find generator - non-quadratic residue mpz_set_ui(qnr, 2); - while (mpz_legendre(qnr, *p) != -1) + while (mpz_legendre(qnr, p->i) != -1) mpz_add_ui(qnr, qnr, 1); - mpz_powm(z, qnr, q, *p); + mpz_powm(z, qnr, q, p->i); // 2. Initialize mpz_set(y, z); - mpz_powm(y, amod, q, *p); // y = a^q mod p + mpz_powm(y, amod, q, p->i); // y = a^q mod p mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 mpz_fdiv_q_2exp(tmp, tmp, 1); - mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + mpz_powm(x, amod, tmp, p->i); // x = a^(q + 1)/2 mod p mpz_set_ui(exp, 1); mpz_mul_2exp(exp, exp, e - 2); for (int i = 0; i < e; ++i) { - mpz_powm(b, y, exp, *p); + mpz_powm(b, y, exp, p->i); if (!mpz_cmp(b, pm1)) { mpz_mul(x, x, z); - mpz_mod(x, x, *p); + mpz_mod(x, x, p->i); mpz_mul(y, y, z); mpz_mul(y, y, z); - mpz_mod(y, y, *p); + mpz_mod(y, y, p->i); } - mpz_powm_ui(z, z, 2, *p); + mpz_powm_ui(z, z, 2, p->i); mpz_fdiv_q_2exp(exp, exp, 1); } - mpz_set(*sqrt, x); + mpz_set(sqrt->i, x); } #ifdef DEBUG_VERBOSE diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.h index a0c2c02477..28e478ff7f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/intbig.h @@ -33,7 +33,9 @@ * * For integers of arbitrary size, used by intbig module, using gmp */ -typedef mpz_t ibz_t; +typedef struct { + mpz_t i; +} ibz_t; /** @} */ @@ -129,7 +131,7 @@ int ibz_two_adic(ibz_t *pow); */ void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); -unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); +unsigned long int ibz_mod_ui(const ibz_t *n, unsigned long int d); /** @brief Test if a = 0 mod b */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c index 5491ee44d0..ea32213c75 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c @@ -57,25 +57,25 @@ to_etabar(fp_num *x) } static void -from_mpz(const mpz_t x, fp_num *r) +from_mpz(const ibz_t *x, fp_num *r) { long exp = 0; - r->s = mpz_get_d_2exp(&exp, x); + r->s = mpz_get_d_2exp(&exp, x->i); r->e = exp; } static void -to_mpz(const fp_num *x, mpz_t r) +to_mpz(const fp_num *x, ibz_t *r) { if (x->e >= DBL_MANT_DIG) { double s = x->s * 0x1P53; - mpz_set_d(r, s); - mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + mpz_set_d(r->i, s); + mpz_mul_2exp(r->i, r->i, x->e - DBL_MANT_DIG); } else if (x->e < 0) { - mpz_set_ui(r, 0); + mpz_set_ui(r->i, 0); } else { double s = ldexp(x->s, x->e); - mpz_set_d(r, round(s)); + mpz_set_d(r->i, round(s)); } } @@ -203,7 +203,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) ibz_init(&tmpI); // Main L² loop - from_mpz((*G)[0][0], &r[0][0]); + from_mpz(&G->m[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -213,7 +213,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - from_mpz((*G)[kappa][j], &r[kappa][j]); + from_mpz(&G->m[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { fp_mul(&r[kappa][k], &u[j][k], &tmpF); fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); @@ -229,22 +229,22 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) done = 0; copy(&u[kappa][i], &Xf); fp_round(&Xf); - to_mpz(&Xf, X); + to_mpz(&Xf, &X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { - ibz_mul(&tmpI, &X, &(*basis)[j][i]); - ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + ibz_mul(&tmpI, &X, &basis->m[j][i]); + ibz_sub(&basis->m[j][kappa], &basis->m[j][kappa], &tmpI); } // Update lower half of the Gram matrix // = - 2X + X² = // - X - X( - X·) //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 - ibz_mul(&tmpI, &X, &(*G)[kappa][i]); - ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + ibz_mul(&tmpI, &X, &G->m[kappa][i]); + ibz_sub(&G->m[kappa][kappa], &G->m[kappa][kappa], &tmpI); for (int j = 0; j < 4; j++) { // works because i < κ // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 - ibz_mul(&tmpI, &X, SYM((*G), i, j)); - ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + ibz_mul(&tmpI, &X, SYM(G->m, i, j)); + ibz_sub(SYM(G->m, kappa, j), SYM(G->m, kappa, j), &tmpI); } // After the loop: //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, @@ -261,7 +261,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - from_mpz((*G)[kappa][kappa], &lovasz[0]); + from_mpz(&G->m[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); @@ -279,11 +279,11 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Insert b_κ before b_swap in the basis and in the lower half Gram matrix for (int j = kappa; j > swap; j--) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + ibz_swap(&basis->m[i][j], &basis->m[i][j - 1]); if (i == j - 1) - ibz_swap(&(*G)[i][i], &(*G)[j][j]); + ibz_swap(&G->m[i][i], &G->m[j][j]); else if (i != j) - ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + ibz_swap(SYM(G->m, i, j), SYM(G->m, i, j - 1)); } } // Copy row u[κ] and r[κ] in swap position, ignore what follows @@ -318,7 +318,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Fill in the upper half of the Gram matrix for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } // Clearinghouse diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lat_ball.c index c7bbb9682f..3f7476988c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lat_ball.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lat_ball.c @@ -28,10 +28,10 @@ quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_m // Compute the parallelogram's bounds int trivial = 1; for (int i = 0; i < 4; i++) { - ibz_mul(&(*box)[i], &dualG[i][i], radius); - ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); - ibz_sqrt_floor(&(*box)[i], &(*box)[i]); - trivial &= ibz_is_zero(&(*box)[i]); + ibz_mul(&box->v[i], &dualG.m[i][i], radius); + ibz_div(&box->v[i], &rem, &box->v[i], &denom); + ibz_sqrt_floor(&box->v[i], &box->v[i]); + trivial &= ibz_is_zero(&box->v[i]); } // Compute the transpose transformation matrix @@ -95,12 +95,12 @@ quat_lattice_sample_from_ball(quat_alg_elem_t *res, do { // Sample vector for (int i = 0; i < 4; i++) { - if (ibz_is_zero(&box[i])) { - ibz_copy(&x[i], &ibz_const_zero); + if (ibz_is_zero(&box.v[i])) { + ibz_copy(&x.v[i], &ibz_const_zero); } else { - ibz_add(&tmp, &box[i], &box[i]); - ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); - ibz_sub(&x[i], &x[i], &box[i]); + ibz_add(&tmp, &box.v[i], &box.v[i]); + ok &= ibz_rand_interval(&x.v[i], &ibz_const_zero, &tmp); + ibz_sub(&x.v[i], &x.v[i], &box.v[i]); if (!ok) goto err; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lattice.c index c98bae9499..ef7b9ccdcc 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lattice.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lattice.c @@ -57,7 +57,7 @@ quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *l for (int row = 1; row < 4; ++row) { for (int col = 0; col < 4; ++col) { - ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + ibz_neg(&(conj->basis.m[row][col]), &(conj->basis.m[row][col])); } } } @@ -96,14 +96,14 @@ quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(tmp[i][j])); + ibz_copy(&(generators[j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + ibz_copy(&(generators[4 + j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); @@ -151,12 +151,12 @@ quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, ibz_vec_4_init(&p); ibz_vec_4_init(&a); for (int i = 0; i < 4; i++) { - ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + ibz_vec_4_copy_ibz(&a, &(lat->m[0][i]), &(lat->m[1][i]), &(lat->m[2][i]), &(lat->m[3][i])); quat_alg_coord_mul(&p, &a, coord, alg); - ibz_copy(&((*prod)[0][i]), &(p[0])); - ibz_copy(&((*prod)[1][i]), &(p[1])); - ibz_copy(&((*prod)[2][i]), &(p[2])); - ibz_copy(&((*prod)[3][i]), &(p[3])); + ibz_copy(&(prod->m[0][i]), &(p.v[0])); + ibz_copy(&(prod->m[1][i]), &(p.v[1])); + ibz_copy(&(prod->m[2][i]), &(p.v[2])); + ibz_copy(&(prod->m[3][i]), &(p.v[3])); } ibz_vec_4_finalize(&p); ibz_vec_4_finalize(&a); @@ -191,15 +191,15 @@ quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_vec_4_init(&(generators[i])); for (int k = 0; k < 4; k++) { ibz_vec_4_copy_ibz( - &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + &elem1, &(lat1->basis.m[0][k]), &(lat1->basis.m[1][k]), &(lat1->basis.m[2][k]), &(lat1->basis.m[3][k])); for (int i = 0; i < 4; i++) { ibz_vec_4_copy_ibz( - &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + &elem2, &(lat2->basis.m[0][i]), &(lat2->basis.m[1][i]), &(lat2->basis.m[2][i]), &(lat2->basis.m[3][i])); quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); for (int j = 0; j < 4; j++) { if (k == 0) - ibz_copy(&(detmat[i][j]), &(elem_res[j])); - ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + ibz_copy(&(detmat.m[i][j]), &(elem_res.v[j])); + ibz_copy(&(generators[4 * k + i].v[j]), &(elem_res.v[j])); } } } @@ -239,7 +239,7 @@ quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_ // copy result if (divisible && (coord != NULL)) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*coord)[i]), &(work_coord[i])); + ibz_copy(&(coord->v[i]), &(work_coord.v[i])); } } ibz_finalize(&prod); @@ -292,7 +292,7 @@ quat_lattice_hnf(quat_lattice_t *lat) ibz_vec_4_init(&(generators[i])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + ibz_copy(&(generators[j].v[i]), &(lat->basis.m[i][j])); } } ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); @@ -309,19 +309,19 @@ quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_al ibz_init(&tmp); for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_set(&(*G)[i][j], 0); + ibz_set(&G->m[i][j], 0); for (int k = 0; k < 4; k++) { - ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + ibz_mul(&tmp, &(lattice->basis.m)[k][i], &(lattice->basis.m)[k][j]); if (k >= 2) ibz_mul(&tmp, &tmp, &alg->p); - ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + ibz_add(&G->m[i][j], &G->m[i][j], &tmp); } - ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + ibz_mul(&G->m[i][j], &G->m[i][j], &ibz_const_two); } } for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) { - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } } ibz_finalize(&tmp); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_applications.c index 6c763b8c04..f5e9af922b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_applications.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_applications.c @@ -17,9 +17,9 @@ quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, quat_lll_core(gram, reduced); ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); for (int i = 0; i < 4; i++) { - ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + ibz_div_2exp(&(gram->m[i][i]), &(gram->m[i][i]), 1); for (int j = i + 1; j < 4; j++) { - ibz_set(&((*gram)[i][j]), 0); + ibz_set(&(gram->m[i][j]), 0); } } ibz_finalize(&gram_corrector); @@ -79,10 +79,10 @@ quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, while (!found && ctr < equiv_num_iter) { ctr++; // we select our linear combination at random - ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[3], equiv_bound_coeff); // computation of the norm of the vector sampled quat_qf_eval(&tmp, &gram, &new_alpha.coord); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/normeq.c index 8c133dd095..aadbbe06c7 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/normeq.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/normeq.c @@ -13,23 +13,23 @@ quat_lattice_O0_set(quat_lattice_t *O0) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(O0->basis[i][j]), 0); + ibz_set(&(O0->basis.m[i][j]), 0); } } ibz_set(&(O0->denom), 2); - ibz_set(&(O0->basis[0][0]), 2); - ibz_set(&(O0->basis[1][1]), 2); - ibz_set(&(O0->basis[2][2]), 1); - ibz_set(&(O0->basis[1][2]), 1); - ibz_set(&(O0->basis[3][3]), 1); - ibz_set(&(O0->basis[0][3]), 1); + ibz_set(&(O0->basis.m[0][0]), 2); + ibz_set(&(O0->basis.m[1][1]), 2); + ibz_set(&(O0->basis.m[2][2]), 1); + ibz_set(&(O0->basis.m[1][2]), 1); + ibz_set(&(O0->basis.m[3][3]), 1); + ibz_set(&(O0->basis.m[0][3]), 1); } void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) { - ibz_set(&O0->z.coord[1], 1); - ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.coord.v[1], 1); + ibz_set(&O0->t.coord.v[2], 1); ibz_set(&O0->z.denom, 1); ibz_set(&O0->t.denom, 1); O0->q = 1; @@ -50,24 +50,24 @@ quat_order_elem_create(quat_alg_elem_t *elem, quat_alg_elem_init(&quat_temp); // elem = x - quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + quat_alg_scalar(elem, &coeffs->v[0], &ibz_const_one); // quat_temp = i*y - quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_scalar(&quat_temp, &(coeffs->v[1]), &ibz_const_one); quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); // elem = x + i*y quat_alg_add(elem, elem, &quat_temp); // quat_temp = z * j - quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[2], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); // elem = x + i* + z*j quat_alg_add(elem, elem, &quat_temp); // quat_temp = t * j * i - quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[3], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); @@ -143,11 +143,11 @@ quat_represent_integer(quat_alg_elem_t *gamma, ibz_sub(&counter, &counter, &ibz_const_one); // we start by sampling the first coordinate - ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + ibz_rand_interval(&coeffs.v[2], &ibz_const_one, &bound); // then, we sample the second coordinate // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) - ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&cornacchia_target, &coeffs.v[2], &coeffs.v[2]); ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); ibz_sub(&temp, &adjusted_n_gamma, &temp); ibz_mul(&sq_bound, &q, &(params->algebra->p)); @@ -158,10 +158,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, continue; } // sampling the second value - ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + ibz_rand_interval(&coeffs.v[3], &ibz_const_one, &temp); // compute cornacchia_target = n_gamma - p * (z² + q*t²) - ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &coeffs.v[3], &coeffs.v[3]); ibz_mul(&temp, &q, &temp); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); @@ -170,7 +170,7 @@ quat_represent_integer(quat_alg_elem_t *gamma, // applying cornacchia if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) - found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + found = ibz_cornacchia_prime(&(coeffs.v[0]), &(coeffs.v[1]), &q, &cornacchia_target); else found = 0; @@ -179,33 +179,33 @@ quat_represent_integer(quat_alg_elem_t *gamma, // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 // we must have x = t mod 2 and y = z mod 2 // if q=1 we can simply swap x and y - if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { - ibz_swap(&coeffs[1], &coeffs[0]); + if (ibz_is_odd(&coeffs.v[0]) != ibz_is_odd(&coeffs.v[3])) { + ibz_swap(&coeffs.v[1], &coeffs.v[0]); } // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the // resulting endomorphism will behave well for dim 2 computations - found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && - ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + found = found && ((ibz_get(&coeffs.v[0]) - ibz_get(&coeffs.v[3])) % 4 == 2) && + ((ibz_get(&coeffs.v[1]) - ibz_get(&coeffs.v[2])) % 4 == 2); } if (found) { #ifndef NDEBUG ibz_set(&temp, (params->order->q)); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&test, &(coeffs.v[0]), &(coeffs.v[0])); ibz_add(&temp, &temp, &test); assert(0 == ibz_cmp(&temp, &cornacchia_target)); - ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &(coeffs.v[3]), &(coeffs.v[3])); ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); - ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_mul(&temp, &(coeffs.v[1]), &(coeffs.v[1])); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_set(&temp, (params->order->q)); ibz_mul(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_mul(&temp, &(coeffs.v[0]), &coeffs.v[0]); ibz_add(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &(coeffs.v[2]), &coeffs.v[2]); ibz_mul(&temp, &temp, &(params->algebra->p)); ibz_add(&cornacchia_target, &cornacchia_target, &temp); assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); @@ -213,8 +213,8 @@ quat_represent_integer(quat_alg_elem_t *gamma, // translate x,y,z,t into the quaternion element gamma quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); #ifndef NDEBUG - quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); - assert(ibz_is_one(&(coeffs[0]))); + quat_alg_norm(&temp, &(coeffs.v[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs.v[0]))); assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); #endif @@ -232,10 +232,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, if (found) { // new gamma ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); - ibz_copy(&gamma->coord[0], &coeffs[0]); - ibz_copy(&gamma->coord[1], &coeffs[1]); - ibz_copy(&gamma->coord[2], &coeffs[2]); - ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->coord.v[0], &coeffs.v[0]); + ibz_copy(&gamma->coord.v[1], &coeffs.v[1]); + ibz_copy(&gamma->coord.v[2], &coeffs.v[2]); + ibz_copy(&gamma->coord.v[3], &coeffs.v[3]); ibz_copy(&gamma->denom, &(((params->order)->order).denom)); } // var finalize @@ -279,10 +279,10 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, // we find a quaternion element of norm divisible by norm while (!found) { // generating a trace-zero element at random - ibz_set(&gen.coord[0], 0); + ibz_set(&gen.coord.v[0], 0); ibz_sub(&n_temp, norm, &ibz_const_one); for (int i = 1; i < 4; i++) - ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + ibz_rand_interval(&gen.coord.v[i], &ibz_const_zero, &n_temp); // first, we compute the norm of the gen quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); @@ -293,7 +293,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, ibz_mod(&disc, &disc, norm); // now we check that -n is a square mod norm // and if the square root exists we compute it - found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = ibz_sqrt_mod_p(&gen.coord.v[0], &disc, norm); found = found && !quat_alg_elem_is_zero(&gen); } } else { @@ -319,7 +319,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, found = 0; while (!found) { for (int i = 0; i < 4; i++) { - ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + ibz_rand_interval(&gen_rerand.coord.v[i], &ibz_const_one, norm); } quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); assert(ibz_is_one(&norm_d)); @@ -348,22 +348,22 @@ quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) { ibz_t tmp; ibz_init(&tmp); - ibz_copy(&(*vec)[2], &el->coord[2]); - ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) - ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) - ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); - ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); - ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); - - assert(ibz_divides(&(*vec)[0], &el->denom)); - assert(ibz_divides(&(*vec)[1], &el->denom)); - assert(ibz_divides(&(*vec)[2], &el->denom)); - assert(ibz_divides(&(*vec)[3], &el->denom)); - - ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); - ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); - ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); - ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + ibz_copy(&vec->v[2], &el->coord.v[2]); + ibz_add(&vec->v[2], &vec->v[2], &vec->v[2]); // double (not optimal if el->denom is even...) + ibz_copy(&vec->v[3], &el->coord.v[3]); // double (not optimal if el->denom is even...) + ibz_add(&vec->v[3], &vec->v[3], &vec->v[3]); + ibz_sub(&vec->v[0], &el->coord.v[0], &el->coord.v[3]); + ibz_sub(&vec->v[1], &el->coord.v[1], &el->coord.v[2]); + + assert(ibz_divides(&vec->v[0], &el->denom)); + assert(ibz_divides(&vec->v[1], &el->denom)); + assert(ibz_divides(&vec->v[2], &el->denom)); + assert(ibz_divides(&vec->v[3], &el->denom)); + + ibz_div(&vec->v[0], &tmp, &vec->v[0], &el->denom); + ibz_div(&vec->v[1], &tmp, &vec->v[1], &el->denom); + ibz_div(&vec->v[2], &tmp, &vec->v[2], &el->denom); + ibz_div(&vec->v[3], &tmp, &vec->v[3], &el->denom); ibz_finalize(&tmp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion.h index a567657464..2dd70a8c19 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion.h @@ -25,7 +25,9 @@ * * @typedef ibz_vec_2_t */ -typedef ibz_t ibz_vec_2_t[2]; +typedef struct { + ibz_t v[2]; +} ibz_vec_2_t; /** @brief Type for vectors of 4 integers * @@ -33,7 +35,9 @@ typedef ibz_t ibz_vec_2_t[2]; * * Represented as a vector of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_vec_4_t[4]; +typedef struct { + ibz_t v[4]; +} ibz_vec_4_t; /** @brief Type for 2 by 2 matrices of integers * @@ -41,7 +45,9 @@ typedef ibz_t ibz_vec_4_t[4]; * * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_2x2_t[2][2]; +typedef struct { + ibz_t m[2][2]; +} ibz_mat_2x2_t; /** @brief Type for 4 by 4 matrices of integers * @@ -49,7 +55,9 @@ typedef ibz_t ibz_mat_2x2_t[2][2]; * * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_4x4_t[4][4]; +typedef struct { + ibz_t m[4][4]; +} ibz_mat_4x4_t; /** * @} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c index 98b792431a..11ad7e707c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/quaternion_data.c @@ -4,3173 +4,3173 @@ const ibz_t QUAT_prime_cofactor = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x200}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x200}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000000}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x200000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x200000000000000}}}} #endif ; const quat_alg_t QUATALG_PINFTY = { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x1af}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x1af}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x1afffff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x1afffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x1afffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x1afffffffffffff}}}} #endif }; const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 1}, {{ +}}}, 1}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2f6d,0xbfbd,0x6af0,0xbcd3,0x5c61,0x8f62,0x9b0b,0xd78a,0x3142,0x61aa,0x4716,0x208,0x93c7,0x43bd,0x97d6,0xda1a,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xd7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2f6d,0xbfbd,0x6af0,0xbcd3,0x5c61,0x8f62,0x9b0b,0xd78a,0x3142,0x61aa,0x4716,0x208,0x93c7,0x43bd,0x97d6,0xda1a,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xd7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbfbd2f6d,0xbcd36af0,0x8f625c61,0xd78a9b0b,0x61aa3142,0x2084716,0x43bd93c7,0xda1a97d6,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xd7ffff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbfbd2f6d,0xbcd36af0,0x8f625c61,0xd78a9b0b,0x61aa3142,0x2084716,0x43bd93c7,0xda1a97d6,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xd7ffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbcd36af0bfbd2f6d,0xd78a9b0b8f625c61,0x208471661aa3142,0xda1a97d643bd93c7,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xd7ffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbcd36af0bfbd2f6d,0xd78a9b0b8f625c61,0x208471661aa3142,0xda1a97d643bd93c7,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xd7ffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9add,0x156b,0x8705,0x6bb9,0x8bdf,0xd034,0x21a6,0xb827,0x44e9,0x34c7,0x3da3,0xa9fd,0xcebd,0x3ec0,0xcd63,0xca1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9add,0x156b,0x8705,0x6bb9,0x8bdf,0xd034,0x21a6,0xb827,0x44e9,0x34c7,0x3da3,0xa9fd,0xcebd,0x3ec0,0xcd63,0xca1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x156b9add,0x6bb98705,0xd0348bdf,0xb82721a6,0x34c744e9,0xa9fd3da3,0x3ec0cebd,0xca1cd63}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x156b9add,0x6bb98705,0xd0348bdf,0xb82721a6,0x34c744e9,0xa9fd3da3,0x3ec0cebd,0xca1cd63}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6bb98705156b9add,0xb82721a6d0348bdf,0xa9fd3da334c744e9,0xca1cd633ec0cebd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6bb98705156b9add,0xb82721a6d0348bdf,0xa9fd3da334c744e9,0xca1cd633ec0cebd}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 5}, {{ +}}}, 5}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1f45,0x5630,0xd526,0x9cc7,0x1aab,0x114d,0x87b3,0xbb27,0xc6b6,0xe50,0x8bb4,0x813f,0xff7a,0xf810,0xa8d3,0x66ee,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1f45,0x5630,0xd526,0x9cc7,0x1aab,0x114d,0x87b3,0xbb27,0xc6b6,0xe50,0x8bb4,0x813f,0xff7a,0xf810,0xa8d3,0x66ee,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56301f45,0x9cc7d526,0x114d1aab,0xbb2787b3,0xe50c6b6,0x813f8bb4,0xf810ff7a,0x66eea8d3,0xfffffffc,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56301f45,0x9cc7d526,0x114d1aab,0xbb2787b3,0xe50c6b6,0x813f8bb4,0xf810ff7a,0x66eea8d3,0xfffffffc,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9cc7d52656301f45,0xbb2787b3114d1aab,0x813f8bb40e50c6b6,0x66eea8d3f810ff7a,0xfffffffffffffffc,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9cc7d52656301f45,0xbb2787b3114d1aab,0x813f8bb40e50c6b6,0x66eea8d3f810ff7a,0xfffffffffffffffc,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x233f,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x233f,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38d9233f,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38d9233f,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d9233f,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d9233f,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 37}, {{ +}}}, 37}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x3b03,0xe541,0x6454,0x6f9,0x3808,0xb93,0x7509,0x2b52,0xed1,0xf4fe,0x8961,0x4869,0x4671,0xdd21,0x4c4c,0x70b0,0xfff9,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}} +{{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x3b03,0xe541,0x6454,0x6f9,0x3808,0xb93,0x7509,0x2b52,0xed1,0xf4fe,0x8961,0x4869,0x4671,0xdd21,0x4c4c,0x70b0,0xfff9,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe5413b03,0x6f96454,0xb933808,0x2b527509,0xf4fe0ed1,0x48698961,0xdd214671,0x70b04c4c,0xfffffff9,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe5413b03,0x6f96454,0xb933808,0x2b527509,0xf4fe0ed1,0x48698961,0xdd214671,0x70b04c4c,0xfffffff9,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6f96454e5413b03,0x2b5275090b933808,0x48698961f4fe0ed1,0x70b04c4cdd214671,0xfffffffffffffff9,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6f96454e5413b03,0x2b5275090b933808,0x48698961f4fe0ed1,0x70b04c4cdd214671,0xfffffffffffffff9,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe953,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe953,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf5ace953,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf5ace953,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace953,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace953,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 61}, {{ +}}}, 61}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x7013,0x423f,0x42b7,0x3f3d,0x82a,0x9883,0x52bf,0xfede,0x8018,0xa449,0xf571,0xb8a,0x3139,0xbe7,0x439d,0x9e1f,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd8}}} +{{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x7013,0x423f,0x42b7,0x3f3d,0x82a,0x9883,0x52bf,0xfede,0x8018,0xa449,0xf571,0xb8a,0x3139,0xbe7,0x439d,0x9e1f,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x423f7013,0x3f3d42b7,0x9883082a,0xfede52bf,0xa4498018,0xb8af571,0xbe73139,0x9e1f439d,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0xd80000}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x423f7013,0x3f3d42b7,0x9883082a,0xfede52bf,0xa4498018,0xb8af571,0xbe73139,0x9e1f439d,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0xd80000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3f3d42b7423f7013,0xfede52bf9883082a,0xb8af571a4498018,0x9e1f439d0be73139,0x2,0x0,0x0,0xd8000000000000}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3f3d42b7423f7013,0xfede52bf9883082a,0xb8af571a4498018,0x9e1f439d0be73139,0x2,0x0,0x0,0xd8000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca2d,0x34af,0xea29,0x177b,0x91ed,0x86ca,0x588a,0xe94d,0x55df,0x4621,0xa1e4,0x67d7,0xb617,0x6a1,0x88f5,0x87b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca2d,0x34af,0xea29,0x177b,0x91ed,0x86ca,0x588a,0xe94d,0x55df,0x4621,0xa1e4,0x67d7,0xb617,0x6a1,0x88f5,0x87b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x34afca2d,0x177bea29,0x86ca91ed,0xe94d588a,0x462155df,0x67d7a1e4,0x6a1b617,0x87b88f5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x34afca2d,0x177bea29,0x86ca91ed,0xe94d588a,0x462155df,0x67d7a1e4,0x6a1b617,0x87b88f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x177bea2934afca2d,0xe94d588a86ca91ed,0x67d7a1e4462155df,0x87b88f506a1b617}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x177bea2934afca2d,0xe94d588a86ca91ed,0x67d7a1e4462155df,0x87b88f506a1b617}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 97}, {{ +}}}, 97}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1920,0xb8b5,0x9c5,0xfd99,0xd1a8,0xb311,0xd4d8,0x9a8e,0x7f4c,0x1ad6,0xeba7,0xb78,0xe77,0xa59b,0xe5bc,0x11f7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb8b51920,0xfd9909c5,0xb311d1a8,0x9a8ed4d8,0x1ad67f4c,0xb78eba7,0xa59b0e77,0x11f7e5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfd9909c5b8b51920,0x9a8ed4d8b311d1a8,0xb78eba71ad67f4c,0x11f7e5bca59b0e77}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1920,0xb8b5,0x9c5,0xfd99,0xd1a8,0xb311,0xd4d8,0x9a8e,0x7f4c,0x1ad6,0xeba7,0xb78,0xe77,0xa59b,0xe5bc,0x11f7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb8b51920,0xfd9909c5,0xb311d1a8,0x9a8ed4d8,0x1ad67f4c,0xb78eba7,0xa59b0e77,0x11f7e5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfd9909c5b8b51920,0x9a8ed4d8b311d1a8,0xb78eba71ad67f4c,0x11f7e5bca59b0e77}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8f1a,0x6fa2,0xe7d3,0x5101,0xf0c5,0x6c62,0xea1,0x3d18,0x4367,0xbd09,0xfc99,0x3a0e,0x35a4,0xf247,0x5fb2,0xc2a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c90,0xdc5a,0x84e2,0x7ecc,0xe8d4,0x5988,0x6a6c,0x4d47,0x3fa6,0x8d6b,0x75d3,0x85bc,0x873b,0x52cd,0xf2de,0x8fb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6fa28f1a,0x5101e7d3,0x6c62f0c5,0x3d180ea1,0xbd094367,0x3a0efc99,0xf24735a4,0xc2a5fb2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdc5a8c90,0x7ecc84e2,0x5988e8d4,0x4d476a6c,0x8d6b3fa6,0x85bc75d3,0x52cd873b,0x8fbf2de}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5101e7d36fa28f1a,0x3d180ea16c62f0c5,0x3a0efc99bd094367,0xc2a5fb2f24735a4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7ecc84e2dc5a8c90,0x4d476a6c5988e8d4,0x85bc75d38d6b3fa6,0x8fbf2de52cd873b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf03,0xa322,0x8523,0x93d,0x3bc,0x4b50,0x33ca,0x56b5,0x863f,0x87c8,0x38bf,0x98c6,0x8ce8,0xfab7,0xfc02,0x78ed}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x11f9,0xfb19,0xfec2,0xe1f1,0xe7a6,0xf9f,0x607,0xc29,0x62c,0x571,0x5f1e,0x9ef8,0x6833,0x4daa,0x5706,0x7bc1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa322cf03,0x93d8523,0x4b5003bc,0x56b533ca,0x87c8863f,0x98c638bf,0xfab78ce8,0x78edfc02}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xfb1911f9,0xe1f1fec2,0xf9fe7a6,0xc290607,0x571062c,0x9ef85f1e,0x4daa6833,0x7bc15706}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x93d8523a322cf03,0x56b533ca4b5003bc,0x98c638bf87c8863f,0x78edfc02fab78ce8}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe1f1fec2fb1911f9,0xc2906070f9fe7a6,0x9ef85f1e0571062c,0x7bc157064daa6833}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x24ed,0x1400,0x74a1,0x1310,0xce8a,0x1c0d,0x512a,0x3500,0x2451,0x6992,0x892c,0x3cdb,0x45d8,0x520,0x420,0xf11f,0x6cb,0xbe4d,0xd06c,0xcbe4,0x4d06,0x6cbe,0xe4d0,0x6cb,0xbe4d,0xd06c,0xcbe4,0x4d06,0x6cbe,0xe4d0,0x6cb,0x15}}} +{{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x8f6c,0x2df1,0x638e,0xe2e3,0x6ecb,0xaa5e,0x1866,0xef1d,0x3821,0xa3ab,0x2721,0x3107,0xaffc,0x377a,0x4bb0,0x86ee,0x616a,0xa7a5,0x5616,0x6a7a,0xa561,0x16a7,0x7a56,0x616a,0xa7a5,0x5616,0x6a7a,0xa561,0x16a7,0x7a56,0x616a,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x140024ed,0x131074a1,0x1c0dce8a,0x3500512a,0x69922451,0x3cdb892c,0x52045d8,0xf11f0420,0xbe4d06cb,0xcbe4d06c,0x6cbe4d06,0x6cbe4d0,0xd06cbe4d,0x4d06cbe4,0xe4d06cbe,0x1506cb}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2df18f6c,0xe2e3638e,0xaa5e6ecb,0xef1d1866,0xa3ab3821,0x31072721,0x377aaffc,0x86ee4bb0,0xa7a5616a,0x6a7a5616,0x16a7a561,0x616a7a56,0x5616a7a5,0xa5616a7a,0x7a5616a7,0xd616a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x131074a1140024ed,0x3500512a1c0dce8a,0x3cdb892c69922451,0xf11f0420052045d8,0xcbe4d06cbe4d06cb,0x6cbe4d06cbe4d06,0x4d06cbe4d06cbe4d,0x1506cbe4d06cbe}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xe2e3638e2df18f6c,0xef1d1866aa5e6ecb,0x31072721a3ab3821,0x86ee4bb0377aaffc,0x6a7a5616a7a5616a,0x616a7a5616a7a561,0xa5616a7a5616a7a5,0xd616a7a5616a7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8f1a,0x6fa2,0xe7d3,0x5101,0xf0c5,0x6c62,0xea1,0x3d18,0x4367,0xbd09,0xfc99,0x3a0e,0x35a4,0xf247,0x5fb2,0xc2a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c90,0xdc5a,0x84e2,0x7ecc,0xe8d4,0x5988,0x6a6c,0x4d47,0x3fa6,0x8d6b,0x75d3,0x85bc,0x873b,0x52cd,0xf2de,0x8fb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6fa28f1a,0x5101e7d3,0x6c62f0c5,0x3d180ea1,0xbd094367,0x3a0efc99,0xf24735a4,0xc2a5fb2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdc5a8c90,0x7ecc84e2,0x5988e8d4,0x4d476a6c,0x8d6b3fa6,0x85bc75d3,0x52cd873b,0x8fbf2de}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5101e7d36fa28f1a,0x3d180ea16c62f0c5,0x3a0efc99bd094367,0xc2a5fb2f24735a4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7ecc84e2dc5a8c90,0x4d476a6c5988e8d4,0x85bc75d38d6b3fa6,0x8fbf2de52cd873b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x98b3,0xd2e,0x314c,0x5199,0x7a5a,0xb592,0xbd65,0x1ef7,0x7d32,0x94fd,0x6cfe,0x68e3,0xcda6,0x8d91,0xfb73,0x88}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x696c,0x823e,0x85a8,0x5b9e,0x43db,0x5bd2,0x5e0b,0x9a1b,0x98f2,0x2445,0x26ef,0xbb9b,0x93b7,0xd9d4,0x2ed3,0x8c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd2e98b3,0x5199314c,0xb5927a5a,0x1ef7bd65,0x94fd7d32,0x68e36cfe,0x8d91cda6,0x88fb73}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x823e696c,0x5b9e85a8,0x5bd243db,0x9a1b5e0b,0x244598f2,0xbb9b26ef,0xd9d493b7,0x8c2ed3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5199314c0d2e98b3,0x1ef7bd65b5927a5a,0x68e36cfe94fd7d32,0x88fb738d91cda6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b9e85a8823e696c,0x9a1b5e0b5bd243db,0xbb9b26ef244598f2,0x8c2ed3d9d493b7}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1920,0xb8b5,0x9c5,0xfd99,0xd1a8,0xb311,0xd4d8,0x9a8e,0x7f4c,0x1ad6,0xeba7,0xb78,0xe77,0xa59b,0xe5bc,0x11f7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb8b51920,0xfd9909c5,0xb311d1a8,0x9a8ed4d8,0x1ad67f4c,0xb78eba7,0xa59b0e77,0x11f7e5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfd9909c5b8b51920,0x9a8ed4d8b311d1a8,0xb78eba71ad67f4c,0x11f7e5bca59b0e77}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf03,0xa322,0x8523,0x93d,0x3bc,0x4b50,0x33ca,0x56b5,0x863f,0x87c8,0x38bf,0x98c6,0x8ce8,0xfab7,0xfc02,0x78ed}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x11f9,0xfb19,0xfec2,0xe1f1,0xe7a6,0xf9f,0x607,0xc29,0x62c,0x571,0x5f1e,0x9ef8,0x6833,0x4daa,0x5706,0x7bc1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa322cf03,0x93d8523,0x4b5003bc,0x56b533ca,0x87c8863f,0x98c638bf,0xfab78ce8,0x78edfc02}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xfb1911f9,0xe1f1fec2,0xf9fe7a6,0xc290607,0x571062c,0x9ef85f1e,0x4daa6833,0x7bc15706}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x93d8523a322cf03,0x56b533ca4b5003bc,0x98c638bf87c8863f,0x78edfc02fab78ce8}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe1f1fec2fb1911f9,0xc2906070f9fe7a6,0x9ef85f1e0571062c,0x7bc157064daa6833}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 113}, {{ +}}}, 113}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x9c90,0x5de8,0xf815,0x67c5,0x989,0xc9,0x7c9e,0x180b,0x526d,0xdf5a,0x3386,0xea88,0x580a,0x24c5,0x5507,0x3bad,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x438}}} +{{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x9c90,0x5de8,0xf815,0x67c5,0x989,0xc9,0x7c9e,0x180b,0x526d,0xdf5a,0x3386,0xea88,0x580a,0x24c5,0x5507,0x3bad,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x438}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x5de89c90,0x67c5f815,0xc90989,0x180b7c9e,0xdf5a526d,0xea883386,0x24c5580a,0x3bad5507,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x4380000}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x5de89c90,0x67c5f815,0xc90989,0x180b7c9e,0xdf5a526d,0xea883386,0x24c5580a,0x3bad5507,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x4380000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x67c5f8155de89c90,0x180b7c9e00c90989,0xea883386df5a526d,0x3bad550724c5580a,0x10,0x0,0x0,0x438000000000000}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x67c5f8155de89c90,0x180b7c9e00c90989,0xea883386df5a526d,0x3bad550724c5580a,0x10,0x0,0x0,0x438000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa1f8,0x1530,0xa6be,0x126c,0xfd3b,0xbdd9,0xb3bc,0x8495,0x5457,0x1985,0xcfae,0xf440,0x4ea6,0x84ba,0x6881,0x2eb1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa1f8,0x1530,0xa6be,0x126c,0xfd3b,0xbdd9,0xb3bc,0x8495,0x5457,0x1985,0xcfae,0xf440,0x4ea6,0x84ba,0x6881,0x2eb1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1530a1f8,0x126ca6be,0xbdd9fd3b,0x8495b3bc,0x19855457,0xf440cfae,0x84ba4ea6,0x2eb16881}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1530a1f8,0x126ca6be,0xbdd9fd3b,0x8495b3bc,0x19855457,0xf440cfae,0x84ba4ea6,0x2eb16881}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x126ca6be1530a1f8,0x8495b3bcbdd9fd3b,0xf440cfae19855457,0x2eb1688184ba4ea6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x126ca6be1530a1f8,0x8495b3bcbdd9fd3b,0xf440cfae19855457,0x2eb1688184ba4ea6}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 149}}; +}}}, 149}}; const quat_left_ideal_t CONNECTING_IDEALS[7] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdb03,0x2777,0xbc36,0x4be5,0x38dd,0xd474,0x83b4,0x41a7,0x5426,0xa361,0x1f00,0xc617,0xe350,0x8cb4,0x2b1c,0xaa2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdb03,0x2777,0xbc36,0x4be5,0x38dd,0xd474,0x83b4,0x41a7,0x5426,0xa361,0x1f00,0xc617,0xe350,0x8cb4,0x2b1c,0xaa2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2777db03,0x4be5bc36,0xd47438dd,0x41a783b4,0xa3615426,0xc6171f00,0x8cb4e350,0xaa22b1c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2777db03,0x4be5bc36,0xd47438dd,0x41a783b4,0xa3615426,0xc6171f00,0x8cb4e350,0xaa22b1c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4be5bc362777db03,0x41a783b4d47438dd,0xc6171f00a3615426,0xaa22b1c8cb4e350}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4be5bc362777db03,0x41a783b4d47438dd,0xc6171f00a3615426,0xaa22b1c8cb4e350}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd9c7,0x9715,0x12ad,0x4a84,0xd0ee,0xb276,0x7344,0xf5a4,0xda41,0x2e90,0x1415,0xe548,0x3eb7,0x1d14,0x3d52,0x1a9f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd9c7,0x9715,0x12ad,0x4a84,0xd0ee,0xb276,0x7344,0xf5a4,0xda41,0x2e90,0x1415,0xe548,0x3eb7,0x1d14,0x3d52,0x1a9f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9715d9c7,0x4a8412ad,0xb276d0ee,0xf5a47344,0x2e90da41,0xe5481415,0x1d143eb7,0x1a9f3d52}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9715d9c7,0x4a8412ad,0xb276d0ee,0xf5a47344,0x2e90da41,0xe5481415,0x1d143eb7,0x1a9f3d52}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4a8412ad9715d9c7,0xf5a47344b276d0ee,0xe54814152e90da41,0x1a9f3d521d143eb7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4a8412ad9715d9c7,0xf5a47344b276d0ee,0xe54814152e90da41,0x1a9f3d521d143eb7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda65,0xdf46,0xe771,0xcb34,0x84e5,0xc375,0xfb7c,0x1ba5,0x1734,0xe8f9,0x998a,0x55af,0x9104,0x54e4,0xb437,0x12a0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda65,0xdf46,0xe771,0xcb34,0x84e5,0xc375,0xfb7c,0x1ba5,0x1734,0xe8f9,0x998a,0x55af,0x9104,0x54e4,0xb437,0x12a0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf46da65,0xcb34e771,0xc37584e5,0x1ba5fb7c,0xe8f91734,0x55af998a,0x54e49104,0x12a0b437}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf46da65,0xcb34e771,0xc37584e5,0x1ba5fb7c,0xe8f91734,0x55af998a,0x54e49104,0x12a0b437}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcb34e771df46da65,0x1ba5fb7cc37584e5,0x55af998ae8f91734,0x12a0b43754e49104}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcb34e771df46da65,0x1ba5fb7cc37584e5,0x55af998ae8f91734,0x12a0b43754e49104}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e7d,0xd8b2,0x8be,0xf2e3,0x7c3e,0x1572,0x7609,0xf4ae,0x8366,0xb93e,0x53ec,0x9b03,0x6573,0xae18,0x41b0,0x707}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e7d,0xd8b2,0x8be,0xf2e3,0x7c3e,0x1572,0x7609,0xf4ae,0x8366,0xb93e,0x53ec,0x9b03,0x6573,0xae18,0x41b0,0x707}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd8b26e7d,0xf2e308be,0x15727c3e,0xf4ae7609,0xb93e8366,0x9b0353ec,0xae186573,0x70741b0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd8b26e7d,0xf2e308be,0x15727c3e,0xf4ae7609,0xb93e8366,0x9b0353ec,0xae186573,0x70741b0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf2e308bed8b26e7d,0xf4ae760915727c3e,0x9b0353ecb93e8366,0x70741b0ae186573}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf2e308bed8b26e7d,0xf4ae760915727c3e,0x9b0353ecb93e8366,0x70741b0ae186573}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1595,0x819b,0xe0c3,0x8b65,0xe55f,0x5790,0xb373,0x30e9,0xe798,0x6bc0,0x74b1,0xb6c5,0xa184,0xbb4c,0x3cca,0xcd7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1595,0x819b,0xe0c3,0x8b65,0xe55f,0x5790,0xb373,0x30e9,0xe798,0x6bc0,0x74b1,0xb6c5,0xa184,0xbb4c,0x3cca,0xcd7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x819b1595,0x8b65e0c3,0x5790e55f,0x30e9b373,0x6bc0e798,0xb6c574b1,0xbb4ca184,0xcd73cca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x819b1595,0x8b65e0c3,0x5790e55f,0x30e9b373,0x6bc0e798,0xb6c574b1,0xbb4ca184,0xcd73cca}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8b65e0c3819b1595,0x30e9b3735790e55f,0xb6c574b16bc0e798,0xcd73ccabb4ca184}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8b65e0c3819b1595,0x30e9b3735790e55f,0xb6c574b16bc0e798,0xcd73ccabb4ca184}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc209,0x2d26,0x74c1,0x3f24,0xb0cf,0x3681,0x14be,0x92cc,0xb57f,0x127f,0x644f,0x28e4,0x837c,0xb4b2,0x3f3d,0x9ef}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc209,0x2d26,0x74c1,0x3f24,0xb0cf,0x3681,0x14be,0x92cc,0xb57f,0x127f,0x644f,0x28e4,0x837c,0xb4b2,0x3f3d,0x9ef}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d26c209,0x3f2474c1,0x3681b0cf,0x92cc14be,0x127fb57f,0x28e4644f,0xb4b2837c,0x9ef3f3d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d26c209,0x3f2474c1,0x3681b0cf,0x92cc14be,0x127fb57f,0x28e4644f,0xb4b2837c,0x9ef3f3d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3f2474c12d26c209,0x92cc14be3681b0cf,0x28e4644f127fb57f,0x9ef3f3db4b2837c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3f2474c12d26c209,0x92cc14be3681b0cf,0x28e4644f127fb57f,0x9ef3f3db4b2837c}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9427,0xa69c,0xda24,0xb3a7,0x4f9a,0x22fc,0xa39a,0xcb05,0xd93e,0x923d,0xb97d,0xad95,0x3374,0x96bd,0xbdeb,0x51}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9427,0xa69c,0xda24,0xb3a7,0x4f9a,0x22fc,0xa39a,0xcb05,0xd93e,0x923d,0xb97d,0xad95,0x3374,0x96bd,0xbdeb,0x51}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa69c9427,0xb3a7da24,0x22fc4f9a,0xcb05a39a,0x923dd93e,0xad95b97d,0x96bd3374,0x51bdeb}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa69c9427,0xb3a7da24,0x22fc4f9a,0xcb05a39a,0x923dd93e,0xad95b97d,0x96bd3374,0x51bdeb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3a7da24a69c9427,0xcb05a39a22fc4f9a,0xad95b97d923dd93e,0x51bdeb96bd3374}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3a7da24a69c9427,0xcb05a39a22fc4f9a,0xad95b97d923dd93e,0x51bdeb96bd3374}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1f4f,0xcff8,0x8a18,0x405f,0xbfc2,0x4b46,0x2fab,0x911a,0x1385,0xe540,0x5687,0x7768,0x556f,0xbcad,0x9e99,0xdb7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1f4f,0xcff8,0x8a18,0x405f,0xbfc2,0x4b46,0x2fab,0x911a,0x1385,0xe540,0x5687,0x7768,0x556f,0xbcad,0x9e99,0xdb7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcff81f4f,0x405f8a18,0x4b46bfc2,0x911a2fab,0xe5401385,0x77685687,0xbcad556f,0xdb79e99}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcff81f4f,0x405f8a18,0x4b46bfc2,0x911a2fab,0xe5401385,0x77685687,0xbcad556f,0xdb79e99}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x405f8a18cff81f4f,0x911a2fab4b46bfc2,0x77685687e5401385,0xdb79e99bcad556f}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x405f8a18cff81f4f,0x911a2fab4b46bfc2,0x77685687e5401385,0xdb79e99bcad556f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x59bb,0xbb4a,0xb21e,0x7a03,0x87ae,0xb721,0xe9a2,0x2e0f,0xf662,0xbbbe,0x802,0x127f,0x4472,0xa9b5,0xae42,0x704}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x59bb,0xbb4a,0xb21e,0x7a03,0x87ae,0xb721,0xe9a2,0x2e0f,0xf662,0xbbbe,0x802,0x127f,0x4472,0xa9b5,0xae42,0x704}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbb4a59bb,0x7a03b21e,0xb72187ae,0x2e0fe9a2,0xbbbef662,0x127f0802,0xa9b54472,0x704ae42}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbb4a59bb,0x7a03b21e,0xb72187ae,0x2e0fe9a2,0xbbbef662,0x127f0802,0xa9b54472,0x704ae42}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7a03b21ebb4a59bb,0x2e0fe9a2b72187ae,0x127f0802bbbef662,0x704ae42a9b54472}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7a03b21ebb4a59bb,0x2e0fe9a2b72187ae,0x127f0802bbbef662,0x704ae42a9b54472}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa3e3,0x12fb,0x32f3,0xb40f,0x4bbe,0x537d,0xbefc,0xdda9,0x8954,0xaca9,0xaaf3,0xc020,0x17da,0xf48f,0x88fd,0x21a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa3e3,0x12fb,0x32f3,0xb40f,0x4bbe,0x537d,0xbefc,0xdda9,0x8954,0xaca9,0xaaf3,0xc020,0x17da,0xf48f,0x88fd,0x21a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x12fba3e3,0xb40f32f3,0x537d4bbe,0xdda9befc,0xaca98954,0xc020aaf3,0xf48f17da,0x21a88fd}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x12fba3e3,0xb40f32f3,0x537d4bbe,0xdda9befc,0xaca98954,0xc020aaf3,0xf48f17da,0x21a88fd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb40f32f312fba3e3,0xdda9befc537d4bbe,0xc020aaf3aca98954,0x21a88fdf48f17da}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb40f32f312fba3e3,0xdda9befc537d4bbe,0xc020aaf3aca98954,0x21a88fdf48f17da}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xb938,0xecc6,0xa73e,0x1f10,0xfb92,0xfc6b,0x4373,0x1c26,0x1cb,0x5c8f,0xe4f1,0xbf81,0xc0e7,0xd1f7,0x9e1a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xb938,0xecc6,0xa73e,0x1f10,0xfb92,0xfc6b,0x4373,0x1c26,0x1cb,0x5c8f,0xe4f1,0xbf81,0xc0e7,0xd1f7,0x9e1a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb938d647,0xa73eecc6,0xfb921f10,0x4373fc6b,0x1cb1c26,0xe4f15c8f,0xc0e7bf81,0x9e1ad1f7}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb938d647,0xa73eecc6,0xfb921f10,0x4373fc6b,0x1cb1c26,0xe4f15c8f,0xc0e7bf81,0x9e1ad1f7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa73eecc6b938d647,0x4373fc6bfb921f10,0xe4f15c8f01cb1c26,0x9e1ad1f7c0e7bf81}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa73eecc6b938d647,0x4373fc6bfb921f10,0xe4f15c8f01cb1c26,0x9e1ad1f7c0e7bf81}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d15,0xe61a,0xfdc,0xada7,0xb567,0x2787,0xddb4,0x908e,0x52bd,0x573a,0x3c1,0x5289,0x6bae,0xdabb,0xad7a,0x501a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d15,0xe61a,0xfdc,0xada7,0xb567,0x2787,0xddb4,0x908e,0x52bd,0x573a,0x3c1,0x5289,0x6bae,0xdabb,0xad7a,0x501a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe61a3d15,0xada70fdc,0x2787b567,0x908eddb4,0x573a52bd,0x528903c1,0xdabb6bae,0x501aad7a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe61a3d15,0xada70fdc,0x2787b567,0x908eddb4,0x573a52bd,0x528903c1,0xdabb6bae,0x501aad7a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xada70fdce61a3d15,0x908eddb42787b567,0x528903c1573a52bd,0x501aad7adabb6bae}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xada70fdce61a3d15,0x908eddb42787b567,0x528903c1573a52bd,0x501aad7adabb6bae}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc0eb,0xf94,0x78d,0x1b2f,0x47a5,0xcae4,0x9c58,0xc3f8,0x5cff,0xce65,0xc11c,0x8e58,0x387,0xc7ef,0x2f9f,0x12df}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc0eb,0xf94,0x78d,0x1b2f,0x47a5,0xcae4,0x9c58,0xc3f8,0x5cff,0xce65,0xc11c,0x8e58,0x387,0xc7ef,0x2f9f,0x12df}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94c0eb,0x1b2f078d,0xcae447a5,0xc3f89c58,0xce655cff,0x8e58c11c,0xc7ef0387,0x12df2f9f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94c0eb,0x1b2f078d,0xcae447a5,0xc3f89c58,0xce655cff,0x8e58c11c,0xc7ef0387,0x12df2f9f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b2f078d0f94c0eb,0xc3f89c58cae447a5,0x8e58c11cce655cff,0x12df2f9fc7ef0387}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b2f078d0f94c0eb,0xc3f89c58cae447a5,0x8e58c11cce655cff,0x12df2f9fc7ef0387}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9203,0x57ee,0x3867,0xdf50,0xd8ad,0xbe9c,0x9e30,0x7a77,0xcd0f,0x77d9,0xbb7f,0x65f1,0x1b16,0xbbf5,0xe5c0,0x2563}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9203,0x57ee,0x3867,0xdf50,0xd8ad,0xbe9c,0x9e30,0x7a77,0xcd0f,0x77d9,0xbb7f,0x65f1,0x1b16,0xbbf5,0xe5c0,0x2563}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57ee9203,0xdf503867,0xbe9cd8ad,0x7a779e30,0x77d9cd0f,0x65f1bb7f,0xbbf51b16,0x2563e5c0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57ee9203,0xdf503867,0xbe9cd8ad,0x7a779e30,0x77d9cd0f,0x65f1bb7f,0xbbf51b16,0x2563e5c0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf50386757ee9203,0x7a779e30be9cd8ad,0x65f1bb7f77d9cd0f,0x2563e5c0bbf51b16}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf50386757ee9203,0x7a779e30be9cd8ad,0x65f1bb7f77d9cd0f,0x2563e5c0bbf51b16}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc883,0xbf3a,0x5485,0xa330,0xfbe1,0x5f72,0xc008,0xaa3b,0xa7aa,0x2aba,0x1e74,0xe83d,0x71aa,0x3276,0x2812,0xb15}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc883,0xbf3a,0x5485,0xa330,0xfbe1,0x5f72,0xc008,0xaa3b,0xa7aa,0x2aba,0x1e74,0xe83d,0x71aa,0x3276,0x2812,0xb15}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbf3ac883,0xa3305485,0x5f72fbe1,0xaa3bc008,0x2abaa7aa,0xe83d1e74,0x327671aa,0xb152812}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbf3ac883,0xa3305485,0x5f72fbe1,0xaa3bc008,0x2abaa7aa,0xe83d1e74,0x327671aa,0xb152812}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3305485bf3ac883,0xaa3bc0085f72fbe1,0xe83d1e742abaa7aa,0xb152812327671aa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3305485bf3ac883,0xaa3bc0085f72fbe1,0xe83d1e742abaa7aa,0xb152812327671aa}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad43,0x8b94,0x4676,0xc140,0xea47,0x8f07,0xaf1c,0x1259,0x3a5d,0xd14a,0x6cf9,0xa717,0xc660,0x7735,0x86e9,0x183c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad43,0x8b94,0x4676,0xc140,0xea47,0x8f07,0xaf1c,0x1259,0x3a5d,0xd14a,0x6cf9,0xa717,0xc660,0x7735,0x86e9,0x183c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b94ad43,0xc1404676,0x8f07ea47,0x1259af1c,0xd14a3a5d,0xa7176cf9,0x7735c660,0x183c86e9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b94ad43,0xc1404676,0x8f07ea47,0x1259af1c,0xd14a3a5d,0xa7176cf9,0x7735c660,0x183c86e9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc14046768b94ad43,0x1259af1c8f07ea47,0xa7176cf9d14a3a5d,0x183c86e97735c660}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc14046768b94ad43,0x1259af1c8f07ea47,0xa7176cf9d14a3a5d,0x183c86e97735c660}}}} #endif , &MAXORD_O0}}; const quat_alg_elem_t CONJUGATING_ELEMENTS[7] = {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #endif -}}}; +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sign.c index 9216bbe4d3..9520a6f7fd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sign.c @@ -31,12 +31,12 @@ compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const sig // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the // 2^TORSION_EVEN_POWER torsion of EA - ibz_set(&vec[0], 1); - ibz_copy_digit_array(&vec[1], sig->chall_coeff); + ibz_set(&vec.v[0], 1); + ibz_copy_digit_array(&vec.v[1], sig->chall_coeff); // now we compute the ideal associated to the challenge // for that, we need to find vec such that - // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // the kernel of the challenge isogeny is generated by vec.v[0]*B0[0] + vec.v[1]*B0[1] where B0 // is the image through the secret key isogeny of the canonical basis E0 ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); @@ -459,16 +459,16 @@ compute_and_set_basis_change_matrix(signature_t *sig, change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); // Assert all values in the matrix are of the expected size for packing - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][1]) <= SQIsign_response_length + HD_extra_torsion); // Set the basis change matrix to signature - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall.m[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall.m[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall.m[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall.m[1][1])); // Finalise the matrices ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c index 6fb2f97637..9f466eb17e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/torsion_constants.c @@ -4,40 +4,40 @@ const ibz_t TWO_TO_SECURITY_BITS = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t TORSION_PLUS_2POWER = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100000}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10000000000000}}}} #endif ; const ibz_t SEC_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t COM_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c index f4b4260755..a6298acf77 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/algebra.c @@ -21,54 +21,54 @@ quat_alg_coord_mul(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b, ibz_init(&prod); ibz_vec_4_init(&sum); - ibz_set(&(sum[0]), 0); - ibz_set(&(sum[1]), 0); - ibz_set(&(sum[2]), 0); - ibz_set(&(sum[3]), 0); + ibz_set(&(sum.v[0]), 0); + ibz_set(&(sum.v[1]), 0); + ibz_set(&(sum.v[2]), 0); + ibz_set(&(sum.v[3]), 0); // compute 1 coordinate - ibz_mul(&prod, &((*a)[2]), &((*b)[2])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[3])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&(sum[0]), &(sum[0]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[0])); - ibz_add(&(sum[0]), &(sum[0]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[1])); - ibz_sub(&(sum[0]), &(sum[0]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[2])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[3])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&(sum.v[0]), &(sum.v[0]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[0])); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[1])); + ibz_sub(&(sum.v[0]), &(sum.v[0]), &prod); // compute i coordiante - ibz_mul(&prod, &((*a)[2]), &((*b)[3])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[2])); - ibz_sub(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&(sum[1]), &(sum[1]), &(alg->p)); - ibz_mul(&prod, &((*a)[0]), &((*b)[1])); - ibz_add(&(sum[1]), &(sum[1]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[0])); - ibz_add(&(sum[1]), &(sum[1]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[3])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[2])); + ibz_sub(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&(sum.v[1]), &(sum.v[1]), &(alg->p)); + ibz_mul(&prod, &(a->v[0]), &(b->v[1])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[0])); + ibz_add(&(sum.v[1]), &(sum.v[1]), &prod); // compute j coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[2])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[0])); - ibz_add(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[3])); - ibz_sub(&(sum[2]), &(sum[2]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[1])); - ibz_add(&(sum[2]), &(sum[2]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[2])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[0])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[3])); + ibz_sub(&(sum.v[2]), &(sum.v[2]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[1])); + ibz_add(&(sum.v[2]), &(sum.v[2]), &prod); // compute ij coordiante - ibz_mul(&prod, &((*a)[0]), &((*b)[3])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[3]), &((*b)[0])); - ibz_add(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[2]), &((*b)[1])); - ibz_sub(&(sum[3]), &(sum[3]), &prod); - ibz_mul(&prod, &((*a)[1]), &((*b)[2])); - ibz_add(&(sum[3]), &(sum[3]), &prod); + ibz_mul(&prod, &(a->v[0]), &(b->v[3])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[3]), &(b->v[0])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[2]), &(b->v[1])); + ibz_sub(&(sum.v[3]), &(sum.v[3]), &prod); + ibz_mul(&prod, &(a->v[1]), &(b->v[2])); + ibz_add(&(sum.v[3]), &(sum.v[3]), &prod); - ibz_copy(&((*res)[0]), &(sum[0])); - ibz_copy(&((*res)[1]), &(sum[1])); - ibz_copy(&((*res)[2]), &(sum[2])); - ibz_copy(&((*res)[3]), &(sum[3])); + ibz_copy(&(res->v[0]), &(sum.v[0])); + ibz_copy(&(res->v[1]), &(sum.v[1])); + ibz_copy(&(res->v[2]), &(sum.v[2])); + ibz_copy(&(res->v[3]), &(sum.v[3])); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); @@ -86,8 +86,8 @@ quat_alg_equal_denom(quat_alg_elem_t *res_a, quat_alg_elem_t *res_b, const quat_ ibz_div(&(res_b->denom), &r, &(b->denom), &gcd); for (int i = 0; i < 4; i++) { // multiply coordiates by reduced denominators from the other element - ibz_mul(&(res_a->coord[i]), &(a->coord[i]), &(res_b->denom)); - ibz_mul(&(res_b->coord[i]), &(b->coord[i]), &(res_a->denom)); + ibz_mul(&(res_a->coord.v[i]), &(a->coord.v[i]), &(res_b->denom)); + ibz_mul(&(res_b->coord.v[i]), &(b->coord.v[i]), &(res_a->denom)); } // multiply both reduced denominators ibz_mul(&(res_a->denom), &(res_a->denom), &(res_b->denom)); @@ -149,8 +149,8 @@ quat_alg_norm(ibz_t *res_num, ibz_t *res_denom, const quat_alg_elem_t *a, const quat_alg_conj(&norm, a); quat_alg_mul(&norm, a, &norm, alg); - ibz_gcd(&g, &(norm.coord[0]), &(norm.denom)); - ibz_div(res_num, &r, &(norm.coord[0]), &g); + ibz_gcd(&g, &(norm.coord.v[0]), &(norm.denom)); + ibz_div(res_num, &r, &(norm.coord.v[0]), &g); ibz_div(res_denom, &r, &(norm.denom), &g); ibz_abs(res_denom, res_denom); ibz_abs(res_num, res_num); @@ -165,20 +165,20 @@ void quat_alg_scalar(quat_alg_elem_t *elem, const ibz_t *numerator, const ibz_t *denominator) { ibz_copy(&(elem->denom), denominator); - ibz_copy(&(elem->coord[0]), numerator); - ibz_set(&(elem->coord[1]), 0); - ibz_set(&(elem->coord[2]), 0); - ibz_set(&(elem->coord[3]), 0); + ibz_copy(&(elem->coord.v[0]), numerator); + ibz_set(&(elem->coord.v[1]), 0); + ibz_set(&(elem->coord.v[2]), 0); + ibz_set(&(elem->coord.v[3]), 0); } void quat_alg_conj(quat_alg_elem_t *conj, const quat_alg_elem_t *x) { ibz_copy(&(conj->denom), &(x->denom)); - ibz_copy(&(conj->coord[0]), &(x->coord[0])); - ibz_neg(&(conj->coord[1]), &(x->coord[1])); - ibz_neg(&(conj->coord[2]), &(x->coord[2])); - ibz_neg(&(conj->coord[3]), &(x->coord[3])); + ibz_copy(&(conj->coord.v[0]), &(x->coord.v[0])); + ibz_neg(&(conj->coord.v[1]), &(x->coord.v[1])); + ibz_neg(&(conj->coord.v[2]), &(x->coord.v[2])); + ibz_neg(&(conj->coord.v[3]), &(x->coord.v[3])); } void @@ -190,7 +190,8 @@ quat_alg_make_primitive(ibz_vec_4_t *primitive_x, ibz_t *content, const quat_alg ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(*primitive_x + i, &r, *primitive_x + i, content); + // TODO: check if this is correct + ibz_div(primitive_x->v + i, &r, primitive_x->v + i, content); } ibz_finalize(&r); } @@ -235,10 +236,10 @@ quat_alg_elem_is_zero(const quat_alg_elem_t *x) void quat_alg_elem_set(quat_alg_elem_t *elem, int32_t denom, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&(elem->coord[0]), coord0); - ibz_set(&(elem->coord[1]), coord1); - ibz_set(&(elem->coord[2]), coord2); - ibz_set(&(elem->coord[3]), coord3); + ibz_set(&(elem->coord.v[0]), coord0); + ibz_set(&(elem->coord.v[1]), coord1); + ibz_set(&(elem->coord.v[2]), coord2); + ibz_set(&(elem->coord.v[3]), coord3); ibz_set(&(elem->denom), denom); } @@ -247,10 +248,10 @@ void quat_alg_elem_copy(quat_alg_elem_t *copy, const quat_alg_elem_t *copied) { ibz_copy(©->denom, &copied->denom); - ibz_copy(©->coord[0], &copied->coord[0]); - ibz_copy(©->coord[1], &copied->coord[1]); - ibz_copy(©->coord[2], &copied->coord[2]); - ibz_copy(©->coord[3], &copied->coord[3]); + ibz_copy(©->coord.v[0], &copied->coord.v[0]); + ibz_copy(©->coord.v[1], &copied->coord.v[1]); + ibz_copy(©->coord.v[2], &copied->coord.v[2]); + ibz_copy(©->coord.v[3], &copied->coord.v[3]); } // helper functions for lattices @@ -262,10 +263,10 @@ quat_alg_elem_copy_ibz(quat_alg_elem_t *elem, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&(elem->coord[0]), coord0); - ibz_copy(&(elem->coord[1]), coord1); - ibz_copy(&(elem->coord[2]), coord2); - ibz_copy(&(elem->coord[3]), coord3); + ibz_copy(&(elem->coord.v[0]), coord0); + ibz_copy(&(elem->coord.v[1]), coord1); + ibz_copy(&(elem->coord.v[2]), coord2); + ibz_copy(&(elem->coord.v[3]), coord3); ibz_copy(&(elem->denom), denom); } @@ -274,7 +275,7 @@ void quat_alg_elem_mul_by_scalar(quat_alg_elem_t *res, const ibz_t *scalar, const quat_alg_elem_t *elem) { for (int i = 0; i < 4; i++) { - ibz_mul(&(res->coord[i]), &(elem->coord[i]), scalar); + ibz_mul(&(res->coord.v[i]), &(elem->coord.v[i]), scalar); } ibz_copy(&(res->denom), &(elem->denom)); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c index 1df7755a29..e051ac340a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/common.c @@ -14,6 +14,7 @@ public_key_init(public_key_t *pk) void public_key_finalize(public_key_t *pk) { + (void) pk; } // compute the challenge as the hash of the message and the commitment curve and public key diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2.c index b31ae7771a..5bf214c4e2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2.c @@ -5,34 +5,34 @@ void ibz_vec_2_set(ibz_vec_2_t *vec, int a0, int a1) { - ibz_set(&((*vec)[0]), a0); - ibz_set(&((*vec)[1]), a1); + ibz_set(&(vec->v[0]), a0); + ibz_set(&(vec->v[1]), a1); } void ibz_mat_2x2_set(ibz_mat_2x2_t *mat, int a00, int a01, int a10, int a11) { - ibz_set(&((*mat)[0][0]), a00); - ibz_set(&((*mat)[0][1]), a01); - ibz_set(&((*mat)[1][0]), a10); - ibz_set(&((*mat)[1][1]), a11); + ibz_set(&(mat->m[0][0]), a00); + ibz_set(&(mat->m[0][1]), a01); + ibz_set(&(mat->m[1][0]), a10); + ibz_set(&(mat->m[1][1]), a11); } void ibz_mat_2x2_copy(ibz_mat_2x2_t *copy, const ibz_mat_2x2_t *copied) { - ibz_copy(&((*copy)[0][0]), &((*copied)[0][0])); - ibz_copy(&((*copy)[0][1]), &((*copied)[0][1])); - ibz_copy(&((*copy)[1][0]), &((*copied)[1][0])); - ibz_copy(&((*copy)[1][1]), &((*copied)[1][1])); + ibz_copy(&(copy->m[0][0]), &(copied->m[0][0])); + ibz_copy(&(copy->m[0][1]), &(copied->m[0][1])); + ibz_copy(&(copy->m[1][0]), &(copied->m[1][0])); + ibz_copy(&(copy->m[1][1]), &(copied->m[1][1])); } void ibz_mat_2x2_add(ibz_mat_2x2_t *sum, const ibz_mat_2x2_t *a, const ibz_mat_2x2_t *b) { - ibz_add(&((*sum)[0][0]), &((*a)[0][0]), &((*b)[0][0])); - ibz_add(&((*sum)[0][1]), &((*a)[0][1]), &((*b)[0][1])); - ibz_add(&((*sum)[1][0]), &((*a)[1][0]), &((*b)[1][0])); - ibz_add(&((*sum)[1][1]), &((*a)[1][1]), &((*b)[1][1])); + ibz_add(&(sum->m[0][0]), &(a->m[0][0]), &(b->m[0][0])); + ibz_add(&(sum->m[0][1]), &(a->m[0][1]), &(b->m[0][1])); + ibz_add(&(sum->m[1][0]), &(a->m[1][0]), &(b->m[1][0])); + ibz_add(&(sum->m[1][1]), &(a->m[1][1]), &(b->m[1][1])); } void @@ -53,16 +53,16 @@ ibz_mat_2x2_eval(ibz_vec_2_t *res, const ibz_mat_2x2_t *mat, const ibz_vec_2_t * ibz_vec_2_t matvec; ibz_init(&prod); ibz_vec_2_init(&matvec); - ibz_mul(&prod, &((*mat)[0][0]), &((*vec)[0])); - ibz_copy(&(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[0][1]), &((*vec)[1])); - ibz_add(&(matvec[0]), &(matvec[0]), &prod); - ibz_mul(&prod, &((*mat)[1][0]), &((*vec)[0])); - ibz_copy(&(matvec[1]), &prod); - ibz_mul(&prod, &((*mat)[1][1]), &((*vec)[1])); - ibz_add(&(matvec[1]), &(matvec[1]), &prod); - ibz_copy(&((*res)[0]), &(matvec[0])); - ibz_copy(&((*res)[1]), &(matvec[1])); + ibz_mul(&prod, &(mat->m[0][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[0][1]), &(vec->v[1])); + ibz_add(&(matvec.v[0]), &(matvec.v[0]), &prod); + ibz_mul(&prod, &(mat->m[1][0]), &(vec->v[0])); + ibz_copy(&(matvec.v[1]), &prod); + ibz_mul(&prod, &(mat->m[1][1]), &(vec->v[1])); + ibz_add(&(matvec.v[1]), &(matvec.v[1]), &prod); + ibz_copy(&(res->v[0]), &(matvec.v[0])); + ibz_copy(&(res->v[1]), &(matvec.v[1])); ibz_finalize(&prod); ibz_vec_2_finalize(&matvec); } @@ -78,21 +78,21 @@ ibz_2x2_mul_mod(ibz_mat_2x2_t *prod, const ibz_mat_2x2_t *mat_a, const ibz_mat_2 ibz_mat_2x2_init(&sums); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_set(&(sums[i][j]), 0); + ibz_set(&(sums.m[i][j]), 0); } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { - ibz_mul(&mul, &((*mat_a)[i][k]), &((*mat_b)[k][j])); - ibz_add(&(sums[i][j]), &(sums[i][j]), &mul); - ibz_mod(&(sums[i][j]), &(sums[i][j]), m); + ibz_mul(&mul, &(mat_a->m[i][k]), &(mat_b->m[k][j])); + ibz_add(&(sums.m[i][j]), &(sums.m[i][j]), &mul); + ibz_mod(&(sums.m[i][j]), &(sums.m[i][j]), m); } } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_copy(&((*prod)[i][j]), &(sums[i][j])); + ibz_copy(&(prod->m[i][j]), &(sums.m[i][j])); } } ibz_finalize(&mul); @@ -105,9 +105,9 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_t det, prod; ibz_init(&det); ibz_init(&prod); - ibz_mul(&det, &((*mat)[0][0]), &((*mat)[1][1])); + ibz_mul(&det, &(mat->m[0][0]), &(mat->m[1][1])); ibz_mod(&det, &det, m); - ibz_mul(&prod, &((*mat)[0][1]), &((*mat)[1][0])); + ibz_mul(&prod, &(mat->m[0][1]), &(mat->m[1][0])); ibz_sub(&det, &det, &prod); ibz_mod(&det, &det, m); int res = ibz_invmod(&det, &det, m); @@ -115,15 +115,15 @@ ibz_mat_2x2_inv_mod(ibz_mat_2x2_t *inv, const ibz_mat_2x2_t *mat, const ibz_t *m ibz_set(&prod, res); ibz_mul(&det, &det, &prod); // compute inverse - ibz_copy(&prod, &((*mat)[0][0])); - ibz_copy(&((*inv)[0][0]), &((*mat)[1][1])); - ibz_copy(&((*inv)[1][1]), &prod); - ibz_neg(&((*inv)[1][0]), &((*mat)[1][0])); - ibz_neg(&((*inv)[0][1]), &((*mat)[0][1])); + ibz_copy(&prod, &(mat->m[0][0])); + ibz_copy(&(inv->m[0][0]), &(mat->m[1][1])); + ibz_copy(&(inv->m[1][1]), &prod); + ibz_neg(&(inv->m[1][0]), &(mat->m[1][0])); + ibz_neg(&(inv->m[0][1]), &(mat->m[0][1])); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_mul(&((*inv)[i][j]), &((*inv)[i][j]), &det); - ibz_mod(&((*inv)[i][j]), &((*inv)[i][j]), m); + ibz_mul(&(inv->m[i][j]), &(inv->m[i][j]), &det); + ibz_mod(&(inv->m[i][j]), &(inv->m[i][j]), m); } } ibz_finalize(&det); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c index 171473d481..143060e2c3 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c @@ -137,10 +137,10 @@ _fixed_degree_isogeny_impl(quat_left_ideal_t *lideal, ibz_invmod(&tmp, &tmp, &two_pow); assert(!ibz_is_even(&tmp)); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta to the basis ec_basis_t B0_two_theta; @@ -197,53 +197,53 @@ post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_ // treatment if (is_special_order) { // reordering the basis if needed - if (ibz_cmp(&(*gram)[0][0], &(*gram)[2][2]) == 0) { + if (ibz_cmp(&gram->m[0][0], &gram->m[2][2]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[0][0], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[0][0], &gram->m[3][3]) == 0) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][3]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][3]); } - ibz_swap(&(*gram)[0][3], &(*gram)[0][1]); - ibz_swap(&(*gram)[3][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[2][3], &(*gram)[2][1]); - ibz_swap(&(*gram)[3][2], &(*gram)[1][2]); - ibz_swap(&(*gram)[3][3], &(*gram)[1][1]); - } else if (ibz_cmp(&(*gram)[1][1], &(*gram)[3][3]) == 0) { + ibz_swap(&gram->m[0][3], &gram->m[0][1]); + ibz_swap(&gram->m[3][0], &gram->m[1][0]); + ibz_swap(&gram->m[2][3], &gram->m[2][1]); + ibz_swap(&gram->m[3][2], &gram->m[1][2]); + ibz_swap(&gram->m[3][3], &gram->m[1][1]); + } else if (ibz_cmp(&gram->m[1][1], &gram->m[3][3]) == 0) { // in this case it seems that we need to swap the second and third // element, and then recompute entirely the second element from the first // first we swap the second and third element for (int i = 0; i < 4; i++) { - ibz_swap(&(*reduced)[i][1], &(*reduced)[i][2]); + ibz_swap(&reduced->m[i][1], &reduced->m[i][2]); } - ibz_swap(&(*gram)[0][2], &(*gram)[0][1]); - ibz_swap(&(*gram)[2][0], &(*gram)[1][0]); - ibz_swap(&(*gram)[3][2], &(*gram)[3][1]); - ibz_swap(&(*gram)[2][3], &(*gram)[1][3]); - ibz_swap(&(*gram)[2][2], &(*gram)[1][1]); + ibz_swap(&gram->m[0][2], &gram->m[0][1]); + ibz_swap(&gram->m[2][0], &gram->m[1][0]); + ibz_swap(&gram->m[3][2], &gram->m[3][1]); + ibz_swap(&gram->m[2][3], &gram->m[1][3]); + ibz_swap(&gram->m[2][2], &gram->m[1][1]); } // adjusting the sign if needed - if (ibz_cmp(&(*reduced)[0][0], &(*reduced)[1][1]) != 0) { + if (ibz_cmp(&reduced->m[0][0], &reduced->m[1][1]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][1], &(*reduced)[i][1]); - ibz_neg(&(*gram)[i][1], &(*gram)[i][1]); - ibz_neg(&(*gram)[1][i], &(*gram)[1][i]); + ibz_neg(&reduced->m[i][1], &reduced->m[i][1]); + ibz_neg(&gram->m[i][1], &gram->m[i][1]); + ibz_neg(&gram->m[1][i], &gram->m[1][i]); } } - if (ibz_cmp(&(*reduced)[0][2], &(*reduced)[1][3]) != 0) { + if (ibz_cmp(&reduced->m[0][2], &reduced->m[1][3]) != 0) { for (int i = 0; i < 4; i++) { - ibz_neg(&(*reduced)[i][3], &(*reduced)[i][3]); - ibz_neg(&(*gram)[i][3], &(*gram)[i][3]); - ibz_neg(&(*gram)[3][i], &(*gram)[3][i]); + ibz_neg(&reduced->m[i][3], &reduced->m[i][3]); + ibz_neg(&gram->m[i][3], &gram->m[i][3]); + ibz_neg(&gram->m[3][i], &gram->m[3][i]); } - // assert(ibz_cmp(&(*reduced)[0][2],&(*reduced)[1][3])==0); + // assert(ibz_cmp(&reduced->m[0][2],&reduced->m[1][3])==0); } } } @@ -273,7 +273,7 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // if the basis is of the form alpha, i*alpha, beta, i*beta // we can remove some values due to symmetry of the basis that bool need_remove_symmetry = - (ibz_cmp(&(*gram)[0][0], &(*gram)[1][1]) == 0 && ibz_cmp(&(*gram)[3][3], &(*gram)[2][2]) == 0); + (ibz_cmp(&gram->m[0][0], &gram->m[1][1]) == 0 && ibz_cmp(&gram->m[3][3], &gram->m[2][2]) == 0); int check1, check2, check3; @@ -324,10 +324,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t // and we ensure that we don't record the same norm in the list if (!need_remove_symmetry || (check1 <= check2 && check1 <= check3)) { // Set the point as a vector (x, y, z, w) - ibz_set(&point[0], x); - ibz_set(&point[1], y); - ibz_set(&point[2], z); - ibz_set(&point[3], w); + ibz_set(&point.v[0], x); + ibz_set(&point.v[1], y); + ibz_set(&point.v[2], z); + ibz_set(&point.v[3], w); // Evaluate this through the gram matrix and divide out by the // adjusted_norm @@ -336,10 +336,10 @@ enumerate_hypercube(ibz_vec_4_t *vecs, ibz_t *norms, int m, const ibz_mat_4x4_t assert(ibz_is_zero(&remain)); if (ibz_mod_ui(&norm, 2) == 1) { - ibz_set(&vecs[count][0], x); - ibz_set(&vecs[count][1], y); - ibz_set(&vecs[count][2], z); - ibz_set(&vecs[count][3], w); + ibz_set(&vecs[count].v[0], x); + ibz_set(&vecs[count].v[1], y); + ibz_set(&vecs[count].v[2], z); + ibz_set(&vecs[count].v[3], w); ibz_copy(&norms[count], &norm); count++; } @@ -530,10 +530,10 @@ find_uv(ibz_t *u, quat_alg_elem_t delta; // delta will be the element of smallest norm quat_alg_elem_init(&delta); - ibz_set(&delta.coord[0], 1); - ibz_set(&delta.coord[1], 0); - ibz_set(&delta.coord[2], 0); - ibz_set(&delta.coord[3], 0); + ibz_set(&delta.coord.v[0], 1); + ibz_set(&delta.coord.v[1], 0); + ibz_set(&delta.coord.v[2], 0); + ibz_set(&delta.coord.v[3], 0); ibz_copy(&delta.denom, &reduced_id.lattice.denom); ibz_mat_4x4_eval(&delta.coord, &reduced[0], &delta.coord); assert(quat_lattice_contains(NULL, &reduced_id.lattice, &delta)); @@ -542,7 +542,7 @@ find_uv(ibz_t *u, quat_alg_conj(&delta, &delta); ibz_mul(&delta.denom, &delta.denom, &ideal[0].norm); quat_lattice_alg_elem_mul(&reduced_id.lattice, &reduced_id.lattice, &delta, Bpoo); - ibz_copy(&reduced_id.norm, &gram[0][0][0]); + ibz_copy(&reduced_id.norm, &gram[0].m[0][0]); ibz_div(&reduced_id.norm, &remain, &reduced_id.norm, &adjusted_norm[0]); assert(ibz_cmp(&remain, &ibz_const_zero) == 0); @@ -989,10 +989,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, } ibz_invmod(&tmp, &tmp, &two_pow); - ibz_mul(&theta.coord[0], &theta.coord[0], &tmp); - ibz_mul(&theta.coord[1], &theta.coord[1], &tmp); - ibz_mul(&theta.coord[2], &theta.coord[2], &tmp); - ibz_mul(&theta.coord[3], &theta.coord[3], &tmp); + ibz_mul(&theta.coord.v[0], &theta.coord.v[0], &tmp); + ibz_mul(&theta.coord.v[1], &theta.coord.v[1], &tmp); + ibz_mul(&theta.coord.v[2], &theta.coord.v[2], &tmp); + ibz_mul(&theta.coord.v[3], &theta.coord.v[3], &tmp); // applying theta endomorphism_application_even_basis(&bas2, 0, &Fv_codomain.E1, &theta, TORSION_EVEN_POWER); @@ -1092,10 +1092,10 @@ dim2id2iso_ideal_to_isogeny_clapotis(quat_alg_elem_t *beta1, ibz_mul(&tmp, &tmp, &CONNECTING_IDEALS[index_order1].norm); } ibz_invmod(&tmp, &tmp, &TORSION_PLUS_2POWER); - ibz_mul(&beta1->coord[0], &beta1->coord[0], &tmp); - ibz_mul(&beta1->coord[1], &beta1->coord[1], &tmp); - ibz_mul(&beta1->coord[2], &beta1->coord[2], &tmp); - ibz_mul(&beta1->coord[3], &beta1->coord[3], &tmp); + ibz_mul(&beta1->coord.v[0], &beta1->coord.v[0], &tmp); + ibz_mul(&beta1->coord.v[1], &beta1->coord.v[1], &tmp); + ibz_mul(&beta1->coord.v[2], &beta1->coord.v[2], &tmp); + ibz_mul(&beta1->coord.v[3], &beta1->coord.v[3], &tmp); endomorphism_application_even_basis(basis, 0, codomain, beta1, TORSION_EVEN_POWER); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim4.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim4.c index 495dc2dcb2..b024a7d46e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim4.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim4.c @@ -11,16 +11,16 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t ibz_mat_4x4_init(&mat); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(mat[i][j]), 0); + ibz_set(&(mat.m[i][j]), 0); for (int k = 0; k < 4; k++) { - ibz_mul(&prod, &((*a)[i][k]), &((*b)[k][j])); - ibz_add(&(mat[i][j]), &(mat[i][j]), &prod); + ibz_mul(&prod, &(a->m[i][k]), &(b->m[k][j])); + ibz_add(&(mat.m[i][j]), &(mat.m[i][j]), &prod); } } } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*res)[i][j]), &(mat[i][j])); + ibz_copy(&(res->m[i][j]), &(mat.m[i][j])); } } ibz_mat_4x4_finalize(&mat); @@ -31,61 +31,61 @@ ibz_mat_4x4_mul(ibz_mat_4x4_t *res, const ibz_mat_4x4_t *a, const ibz_mat_4x4_t void ibz_vec_4_set(ibz_vec_4_t *vec, int32_t coord0, int32_t coord1, int32_t coord2, int32_t coord3) { - ibz_set(&((*vec)[0]), coord0); - ibz_set(&((*vec)[1]), coord1); - ibz_set(&((*vec)[2]), coord2); - ibz_set(&((*vec)[3]), coord3); + ibz_set(&(vec->v[0]), coord0); + ibz_set(&(vec->v[1]), coord1); + ibz_set(&(vec->v[2]), coord2); + ibz_set(&(vec->v[3]), coord3); } void ibz_vec_4_copy(ibz_vec_4_t *new, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*new)[i]), &((*vec)[i])); + ibz_copy(&(new->v[i]), &(vec->v[i])); } } void ibz_vec_4_copy_ibz(ibz_vec_4_t *res, const ibz_t *coord0, const ibz_t *coord1, const ibz_t *coord2, const ibz_t *coord3) { - ibz_copy(&((*res)[0]), coord0); - ibz_copy(&((*res)[1]), coord1); - ibz_copy(&((*res)[2]), coord2); - ibz_copy(&((*res)[3]), coord3); + ibz_copy(&(res->v[0]), coord0); + ibz_copy(&(res->v[1]), coord1); + ibz_copy(&(res->v[2]), coord2); + ibz_copy(&(res->v[3]), coord3); } void ibz_vec_4_content(ibz_t *content, const ibz_vec_4_t *v) { - ibz_gcd(content, &((*v)[0]), &((*v)[1])); - ibz_gcd(content, &((*v)[2]), content); - ibz_gcd(content, &((*v)[3]), content); + ibz_gcd(content, &(v->v[0]), &(v->v[1])); + ibz_gcd(content, &(v->v[2]), content); + ibz_gcd(content, &(v->v[3]), content); } void ibz_vec_4_negate(ibz_vec_4_t *neg, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_neg(&((*neg)[i]), &((*vec)[i])); + ibz_neg(&(neg->v[i]), &(vec->v[i])); } } void ibz_vec_4_add(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_add(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_add(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_add(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_add(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_add(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_add(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_add(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_add(&(res->v[3]), &(a->v[3]), &(b->v[3])); } void ibz_vec_4_sub(ibz_vec_4_t *res, const ibz_vec_4_t *a, const ibz_vec_4_t *b) { - ibz_sub(&((*res)[0]), &((*a)[0]), &((*b)[0])); - ibz_sub(&((*res)[1]), &((*a)[1]), &((*b)[1])); - ibz_sub(&((*res)[2]), &((*a)[2]), &((*b)[2])); - ibz_sub(&((*res)[3]), &((*a)[3]), &((*b)[3])); + ibz_sub(&(res->v[0]), &(a->v[0]), &(b->v[0])); + ibz_sub(&(res->v[1]), &(a->v[1]), &(b->v[1])); + ibz_sub(&(res->v[2]), &(a->v[2]), &(b->v[2])); + ibz_sub(&(res->v[3]), &(a->v[3]), &(b->v[3])); } int @@ -93,7 +93,7 @@ ibz_vec_4_is_zero(const ibz_vec_4_t *x) { int res = 1; for (int i = 0; i < 4; i++) { - res &= ibz_is_zero(&((*x)[i])); + res &= ibz_is_zero(&(x->v[i])); } return (res); } @@ -110,12 +110,12 @@ ibz_vec_4_linear_combination(ibz_vec_4_t *lc, ibz_vec_4_init(&sums); ibz_init(&prod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_vec_4_finalize(&sums); @@ -125,7 +125,7 @@ void ibz_vec_4_scalar_mul(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), scalar); + ibz_mul(&(prod->v[i]), &(vec->v[i]), scalar); } } @@ -136,7 +136,7 @@ ibz_vec_4_scalar_div(ibz_vec_4_t *quot, const ibz_t *scalar, const ibz_vec_4_t * ibz_t r; ibz_init(&r); for (int i = 0; i < 4; i++) { - ibz_div(&((*quot)[i]), &r, &((*vec)[i]), scalar); + ibz_div(&(quot->v[i]), &r, &(vec->v[i]), scalar); res = res && ibz_is_zero(&r); } ibz_finalize(&r); @@ -148,7 +148,7 @@ ibz_mat_4x4_copy(ibz_mat_4x4_t *new, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&((*new)[i][j]), &((*mat)[i][j])); + ibz_copy(&(new->m[i][j]), &(mat->m[i][j])); } } } @@ -158,7 +158,7 @@ ibz_mat_4x4_negate(ibz_mat_4x4_t *neg, const ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_neg(&((*neg)[i][j]), &((*mat)[i][j])); + ibz_neg(&(neg->m[i][j]), &(mat->m[i][j])); } } } @@ -170,7 +170,7 @@ ibz_mat_4x4_transpose(ibz_mat_4x4_t *transposed, const ibz_mat_4x4_t *mat) ibz_mat_4x4_init(&work); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(work[i][j]), &((*mat)[j][i])); + ibz_copy(&(work.m[i][j]), &(mat->m[j][i])); } } ibz_mat_4x4_copy(transposed, &work); @@ -182,7 +182,7 @@ ibz_mat_4x4_zero(ibz_mat_4x4_t *zero) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*zero)[i][j]), 0); + ibz_set(&(zero->m[i][j]), 0); } } } @@ -192,9 +192,9 @@ ibz_mat_4x4_identity(ibz_mat_4x4_t *id) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&((*id)[i][j]), 0); + ibz_set(&(id->m[i][j]), 0); } - ibz_set(&((*id)[i][i]), 1); + ibz_set(&(id->m[i][i]), 1); } } @@ -204,7 +204,7 @@ ibz_mat_4x4_is_identity(const ibz_mat_4x4_t *mat) int res = 1; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res && ibz_is_one(&((*mat)[i][j])) == (i == j); + res = res && ibz_is_one(&(mat->m[i][j])) == (i == j); } } return (res); @@ -216,7 +216,7 @@ ibz_mat_4x4_equal(const ibz_mat_4x4_t *mat1, const ibz_mat_4x4_t *mat2) int res = 0; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - res = res | ibz_cmp(&((*mat1)[i][j]), &((*mat2)[i][j])); + res = res | ibz_cmp(&(mat1->m[i][j]), &(mat2->m[i][j])); } } return (!res); @@ -227,7 +227,7 @@ ibz_mat_4x4_scalar_mul(ibz_mat_4x4_t *prod, const ibz_t *scalar, const ibz_mat_4 { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&((*prod)[i][j]), &((*mat)[i][j]), scalar); + ibz_mul(&(prod->m[i][j]), &(mat->m[i][j]), scalar); } } } @@ -237,10 +237,10 @@ ibz_mat_4x4_gcd(ibz_t *gcd, const ibz_mat_4x4_t *mat) { ibz_t d; ibz_init(&d); - ibz_copy(&d, &((*mat)[0][0])); + ibz_copy(&d, &(mat->m[0][0])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_gcd(&d, &d, &((*mat)[i][j])); + ibz_gcd(&d, &d, &(mat->m[i][j])); } } ibz_copy(gcd, &d); @@ -255,7 +255,7 @@ ibz_mat_4x4_scalar_div(ibz_mat_4x4_t *quot, const ibz_t *scalar, const ibz_mat_4 ibz_init(&r); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_div(&((*quot)[i][j]), &r, &((*mat)[i][j]), scalar); + ibz_div(&(quot->m[i][j]), &r, &(mat->m[i][j]), scalar); res = res && ibz_is_zero(&r); } } @@ -325,17 +325,17 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ // compute some 2x2 minors, store them in s and c for (int i = 0; i < 3; i++) { - ibz_mat_2x2_det_from_ibz(&(s[i]), &((*mat)[0][0]), &((*mat)[0][i + 1]), &((*mat)[1][0]), &((*mat)[1][i + 1])); - ibz_mat_2x2_det_from_ibz(&(c[i]), &((*mat)[2][0]), &((*mat)[2][i + 1]), &((*mat)[3][0]), &((*mat)[3][i + 1])); + ibz_mat_2x2_det_from_ibz(&(s[i]), &(mat->m[0][0]), &(mat->m[0][i + 1]), &(mat->m[1][0]), &(mat->m[1][i + 1])); + ibz_mat_2x2_det_from_ibz(&(c[i]), &(mat->m[2][0]), &(mat->m[2][i + 1]), &(mat->m[3][0]), &(mat->m[3][i + 1])); } for (int i = 0; i < 2; i++) { ibz_mat_2x2_det_from_ibz( - &(s[3 + i]), &((*mat)[0][1]), &((*mat)[0][2 + i]), &((*mat)[1][1]), &((*mat)[1][2 + i])); + &(s[3 + i]), &(mat->m[0][1]), &(mat->m[0][2 + i]), &(mat->m[1][1]), &(mat->m[1][2 + i])); ibz_mat_2x2_det_from_ibz( - &(c[3 + i]), &((*mat)[2][1]), &((*mat)[2][2 + i]), &((*mat)[3][1]), &((*mat)[3][2 + i])); + &(c[3 + i]), &(mat->m[2][1]), &(mat->m[2][2 + i]), &(mat->m[3][1]), &(mat->m[3][2 + i])); } - ibz_mat_2x2_det_from_ibz(&(s[5]), &((*mat)[0][2]), &((*mat)[0][3]), &((*mat)[1][2]), &((*mat)[1][3])); - ibz_mat_2x2_det_from_ibz(&(c[5]), &((*mat)[2][2]), &((*mat)[2][3]), &((*mat)[3][2]), &((*mat)[3][3])); + ibz_mat_2x2_det_from_ibz(&(s[5]), &(mat->m[0][2]), &(mat->m[0][3]), &(mat->m[1][2]), &(mat->m[1][3])); + ibz_mat_2x2_det_from_ibz(&(c[5]), &(mat->m[2][2]), &(mat->m[2][3]), &(mat->m[3][2]), &(mat->m[3][3])); // compute det ibz_set(&work_det, 0); @@ -351,39 +351,39 @@ ibz_mat_4x4_inv_with_det_as_denom(ibz_mat_4x4_t *inv, ibz_t *det, const ibz_mat_ for (int j = 0; j < 4; j++) { for (int k = 0; k < 2; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[1 - k][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[1 - k][(j == 0)]), &(c[6 - j - (j == 0)]), - &((*mat)[1 - k][2 - (j > 1)]), + &(mat->m[1 - k][2 - (j > 1)]), &(c[4 - j - (j == 1)]), - &((*mat)[1 - k][3 - (j == 3)]), + &(mat->m[1 - k][3 - (j == 3)]), &(c[3 - j - (j == 1) - (j == 2)])); } } for (int k = 2; k < 4; k++) { if ((k + j + 1) % 2 == 1) { - ibz_inv_dim4_make_coeff_pmp(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_pmp(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } else { - ibz_inv_dim4_make_coeff_mpm(&(work[j][k]), - &((*mat)[3 - (k == 3)][(j == 0)]), + ibz_inv_dim4_make_coeff_mpm(&(work.m[j][k]), + &(mat->m[3 - (k == 3)][(j == 0)]), &(s[6 - j - (j == 0)]), - &((*mat)[3 - (k == 3)][2 - (j > 1)]), + &(mat->m[3 - (k == 3)][2 - (j > 1)]), &(s[4 - j - (j == 1)]), - &((*mat)[3 - (k == 3)][3 - (j == 3)]), + &(mat->m[3 - (k == 3)][3 - (j == 3)]), &(s[3 - j - (j == 1) - (j == 2)])); } } @@ -418,8 +418,8 @@ ibz_mat_4x4_eval(ibz_vec_4_t *res, const ibz_mat_4x4_t *mat, const ibz_vec_4_t * // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[i][j], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[i][j], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -437,8 +437,8 @@ ibz_mat_4x4_eval_t(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_mat_4x4_t // assume initialization to 0 for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_mul(&prod, &(*mat)[j][i], &(*vec)[j]); - ibz_add(&(sum[i]), &(sum[i]), &prod); + ibz_mul(&prod, &mat->m[j][i], &vec->v[j]); + ibz_add(&(sum.v[i]), &(sum.v[i]), &prod); } } ibz_vec_4_copy(res, &sum); @@ -457,14 +457,14 @@ quat_qf_eval(ibz_t *res, const ibz_mat_4x4_t *qf, const ibz_vec_4_t *coord) ibz_vec_4_init(&sum); ibz_mat_4x4_eval(&sum, qf, coord); for (int i = 0; i < 4; i++) { - ibz_mul(&prod, &(sum[i]), &(*coord)[i]); + ibz_mul(&prod, &(sum.v[i]), &coord->v[i]); if (i > 0) { - ibz_add(&(sum[0]), &(sum[0]), &prod); + ibz_add(&(sum.v[0]), &(sum.v[0]), &prod); } else { - ibz_copy(&sum[0], &prod); + ibz_copy(&sum.v[0], &prod); } } - ibz_copy(res, &sum[0]); + ibz_copy(res, &sum.v[0]); ibz_finalize(&prod); ibz_vec_4_finalize(&sum); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_signature.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_signature.c index 112c695941..3a630cfd58 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_signature.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_signature.c @@ -157,17 +157,17 @@ secret_key_to_bytes(byte_t *enc, const secret_key_t *sk, const public_key_t *pk) ibz_finalize(&gcd); } #endif - enc = ibz_to_bytes(enc, &gen.coord[0], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[1], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[2], FP_ENCODED_BYTES, true); - enc = ibz_to_bytes(enc, &gen.coord[3], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[0], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[1], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[2], FP_ENCODED_BYTES, true); + enc = ibz_to_bytes(enc, &gen.coord.v[3], FP_ENCODED_BYTES, true); quat_alg_elem_finalize(&gen); } - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[0][1], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][0], TORSION_2POWER_BYTES, false); - enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two[1][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[0][1], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][0], TORSION_2POWER_BYTES, false); + enc = ibz_to_bytes(enc, &sk->mat_BAcan_to_BA0_two.m[1][1], TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); } @@ -187,19 +187,19 @@ secret_key_from_bytes(secret_key_t *sk, public_key_t *pk, const byte_t *enc) quat_alg_elem_t gen; quat_alg_elem_init(&gen); enc = ibz_from_bytes(&norm, enc, FP_ENCODED_BYTES, false); - enc = ibz_from_bytes(&gen.coord[0], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[1], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[2], enc, FP_ENCODED_BYTES, true); - enc = ibz_from_bytes(&gen.coord[3], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[0], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[1], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[2], enc, FP_ENCODED_BYTES, true); + enc = ibz_from_bytes(&gen.coord.v[3], enc, FP_ENCODED_BYTES, true); quat_lideal_create(&sk->secret_ideal, &gen, &norm, &MAXORD_O0, &QUATALG_PINFTY); ibz_finalize(&norm); quat_alg_elem_finalize(&gen); } - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[0][1], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][0], enc, TORSION_2POWER_BYTES, false); - enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two[1][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[0][1], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][0], enc, TORSION_2POWER_BYTES, false); + enc = ibz_from_bytes(&sk->mat_BAcan_to_BA0_two.m[1][1], enc, TORSION_2POWER_BYTES, false); assert(enc - start == SECRETKEY_BYTES); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c index dd089e6f4f..d62ffc51c7 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c @@ -261,223 +261,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x20f3,0x77e0,0xc9a6,0xeb4f,0xb334,0xff68,0xecb4,0xa6e3,0x5015,0x43c1,0x9e87,0xf4eb,0x22e7,0x5f37,0x9392,0x80a0,0x9ea0,0x670f,0x1be3,0x7559,0x2cb5,0x900d,0xfa83,0x1519,0x67b8,0x4d7c,0xaf3a,0x6dc4,0x12e1,0x1e51,0x8d84,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x20f3,0x77e0,0xc9a6,0xeb4f,0xb334,0xff68,0xecb4,0xa6e3,0x5015,0x43c1,0x9e87,0xf4eb,0x22e7,0x5f37,0x9392,0x80a0,0x9ea0,0x670f,0x1be3,0x7559,0x2cb5,0x900d,0xfa83,0x1519,0x67b8,0x4d7c,0xaf3a,0x6dc4,0x12e1,0x1e51,0x8d84,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77e020f3,0xeb4fc9a6,0xff68b334,0xa6e3ecb4,0x43c15015,0xf4eb9e87,0x5f3722e7,0x80a09392,0x670f9ea0,0x75591be3,0x900d2cb5,0x1519fa83,0x4d7c67b8,0x6dc4af3a,0x1e5112e1,0x58d84}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77e020f3,0xeb4fc9a6,0xff68b334,0xa6e3ecb4,0x43c15015,0xf4eb9e87,0x5f3722e7,0x80a09392,0x670f9ea0,0x75591be3,0x900d2cb5,0x1519fa83,0x4d7c67b8,0x6dc4af3a,0x1e5112e1,0x58d84}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb4fc9a677e020f3,0xa6e3ecb4ff68b334,0xf4eb9e8743c15015,0x80a093925f3722e7,0x75591be3670f9ea0,0x1519fa83900d2cb5,0x6dc4af3a4d7c67b8,0x58d841e5112e1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xeb4fc9a677e020f3,0xa6e3ecb4ff68b334,0xf4eb9e8743c15015,0x80a093925f3722e7,0x75591be3670f9ea0,0x1519fa83900d2cb5,0x6dc4af3a4d7c67b8,0x58d841e5112e1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8e98,0xe430,0x6d21,0x2fa6,0x524f,0xf0cf,0xe5eb,0x30ec,0x3658,0x7711,0x7d2f,0x47bf,0xbbc5,0x720c,0xe7a6,0x1ef4,0x335f,0x2c25,0x59e5,0x471c,0x5e06,0x5d38,0x62d6,0xa2a7,0x65f3,0xdefc,0x5e15,0x7a7a,0xdac4,0xc542,0x7bb8,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8e98,0xe430,0x6d21,0x2fa6,0x524f,0xf0cf,0xe5eb,0x30ec,0x3658,0x7711,0x7d2f,0x47bf,0xbbc5,0x720c,0xe7a6,0x1ef4,0x335f,0x2c25,0x59e5,0x471c,0x5e06,0x5d38,0x62d6,0xa2a7,0x65f3,0xdefc,0x5e15,0x7a7a,0xdac4,0xc542,0x7bb8,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4308e98,0x2fa66d21,0xf0cf524f,0x30ece5eb,0x77113658,0x47bf7d2f,0x720cbbc5,0x1ef4e7a6,0x2c25335f,0x471c59e5,0x5d385e06,0xa2a762d6,0xdefc65f3,0x7a7a5e15,0xc542dac4,0xd7bb8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4308e98,0x2fa66d21,0xf0cf524f,0x30ece5eb,0x77113658,0x47bf7d2f,0x720cbbc5,0x1ef4e7a6,0x2c25335f,0x471c59e5,0x5d385e06,0xa2a762d6,0xdefc65f3,0x7a7a5e15,0xc542dac4,0xd7bb8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fa66d21e4308e98,0x30ece5ebf0cf524f,0x47bf7d2f77113658,0x1ef4e7a6720cbbc5,0x471c59e52c25335f,0xa2a762d65d385e06,0x7a7a5e15defc65f3,0xd7bb8c542dac4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2fa66d21e4308e98,0x30ece5ebf0cf524f,0x47bf7d2f77113658,0x1ef4e7a6720cbbc5,0x471c59e52c25335f,0xa2a762d65d385e06,0x7a7a5e15defc65f3,0xd7bb8c542dac4}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3249,0xe4fe,0xec61,0x49e0,0x5b5f,0xc495,0x6ef6,0x811,0x4fdf,0x59fc,0xbd69,0x608e,0xafe2,0xe9a9,0x5706,0x98ac,0xb327,0x481a,0x9c4e,0xecac,0x19fa,0x6401,0xfaad,0x14a4,0xeda,0x3fb5,0x7eb5,0x9768,0x6597,0x4c10,0xdc28,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3249,0xe4fe,0xec61,0x49e0,0x5b5f,0xc495,0x6ef6,0x811,0x4fdf,0x59fc,0xbd69,0x608e,0xafe2,0xe9a9,0x5706,0x98ac,0xb327,0x481a,0x9c4e,0xecac,0x19fa,0x6401,0xfaad,0x14a4,0xeda,0x3fb5,0x7eb5,0x9768,0x6597,0x4c10,0xdc28,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4fe3249,0x49e0ec61,0xc4955b5f,0x8116ef6,0x59fc4fdf,0x608ebd69,0xe9a9afe2,0x98ac5706,0x481ab327,0xecac9c4e,0x640119fa,0x14a4faad,0x3fb50eda,0x97687eb5,0x4c106597,0xbdc28}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4fe3249,0x49e0ec61,0xc4955b5f,0x8116ef6,0x59fc4fdf,0x608ebd69,0xe9a9afe2,0x98ac5706,0x481ab327,0xecac9c4e,0x640119fa,0x14a4faad,0x3fb50eda,0x97687eb5,0x4c106597,0xbdc28}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e0ec61e4fe3249,0x8116ef6c4955b5f,0x608ebd6959fc4fdf,0x98ac5706e9a9afe2,0xecac9c4e481ab327,0x14a4faad640119fa,0x97687eb53fb50eda,0xbdc284c106597}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x49e0ec61e4fe3249,0x8116ef6c4955b5f,0x608ebd6959fc4fdf,0x98ac5706e9a9afe2,0xecac9c4e481ab327,0x14a4faad640119fa,0x97687eb53fb50eda,0xbdc284c106597}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdf0d,0x881f,0x3659,0x14b0,0x4ccb,0x97,0x134b,0x591c,0xafea,0xbc3e,0x6178,0xb14,0xdd18,0xa0c8,0x6c6d,0x7f5f,0x615f,0x98f0,0xe41c,0x8aa6,0xd34a,0x6ff2,0x57c,0xeae6,0x9847,0xb283,0x50c5,0x923b,0xed1e,0xe1ae,0x727b,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdf0d,0x881f,0x3659,0x14b0,0x4ccb,0x97,0x134b,0x591c,0xafea,0xbc3e,0x6178,0xb14,0xdd18,0xa0c8,0x6c6d,0x7f5f,0x615f,0x98f0,0xe41c,0x8aa6,0xd34a,0x6ff2,0x57c,0xeae6,0x9847,0xb283,0x50c5,0x923b,0xed1e,0xe1ae,0x727b,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x881fdf0d,0x14b03659,0x974ccb,0x591c134b,0xbc3eafea,0xb146178,0xa0c8dd18,0x7f5f6c6d,0x98f0615f,0x8aa6e41c,0x6ff2d34a,0xeae6057c,0xb2839847,0x923b50c5,0xe1aeed1e,0xa727b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x881fdf0d,0x14b03659,0x974ccb,0x591c134b,0xbc3eafea,0xb146178,0xa0c8dd18,0x7f5f6c6d,0x98f0615f,0x8aa6e41c,0x6ff2d34a,0xeae6057c,0xb2839847,0x923b50c5,0xe1aeed1e,0xa727b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14b03659881fdf0d,0x591c134b00974ccb,0xb146178bc3eafea,0x7f5f6c6da0c8dd18,0x8aa6e41c98f0615f,0xeae6057c6ff2d34a,0x923b50c5b2839847,0xa727be1aeed1e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14b03659881fdf0d,0x591c134b00974ccb,0xb146178bc3eafea,0x7f5f6c6da0c8dd18,0x8aa6e41c98f0615f,0xeae6057c6ff2d34a,0x923b50c5b2839847,0xa727be1aeed1e}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3337,0x86b9,0x96a8,0x892f,0x9f38,0x4c8e,0xc497,0xdf75,0x4fd7,0xd5b3,0x5dec,0xd543,0xf3dc,0xd5c3,0x8de3,0xa71e,0xd939,0x1324,0x7073,0x5af3,0xbb6b,0x4122,0x9d0,0x81d7,0x74de,0x3877,0x5ea0,0x6d85,0x7c31,0x8f2d,0x9df2,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b93337,0x892f96a8,0x4c8e9f38,0xdf75c497,0xd5b34fd7,0xd5435dec,0xd5c3f3dc,0xa71e8de3,0x1324d939,0x5af37073,0x4122bb6b,0x81d709d0,0x387774de,0x6d855ea0,0x8f2d7c31,0x19df2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x892f96a886b93337,0xdf75c4974c8e9f38,0xd5435decd5b34fd7,0xa71e8de3d5c3f3dc,0x5af370731324d939,0x81d709d04122bb6b,0x6d855ea0387774de,0x19df28f2d7c31}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x28e,0xcfd,0xe1c2,0x2061,0xe412,0x3b18,0x40df,0x716c,0xd025,0xd980,0xc041,0xebb9,0xbb45,0x4982,0x17de,0xa8fe,0xb079,0xffe5,0x34d9,0x2aa6,0x2b0b,0x6787,0x9bab,0x6bc3,0x7365,0x3c03,0xf512,0xb57b,0x897,0xf0b5,0x9c9c,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcfd028e,0x2061e1c2,0x3b18e412,0x716c40df,0xd980d025,0xebb9c041,0x4982bb45,0xa8fe17de,0xffe5b079,0x2aa634d9,0x67872b0b,0x6bc39bab,0x3c037365,0xb57bf512,0xf0b50897,0x89c9c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2061e1c20cfd028e,0x716c40df3b18e412,0xebb9c041d980d025,0xa8fe17de4982bb45,0x2aa634d9ffe5b079,0x6bc39bab67872b0b,0xb57bf5123c037365,0x89c9cf0b50897}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x45b1,0x429e,0x37b2,0xd856,0x2f81,0x86cd,0x39cb,0x81ba,0x771e,0xee7e,0x58,0xfbe4,0xa4b,0x28fb,0x7a7d,0x5bb8,0xa413,0x1657,0x2d54,0x3a9d,0xbbad,0x75a3,0x689,0x3069,0xb0ad,0x2fdd,0x2e81,0xad39,0xd3cd,0x2bff,0xb5cb,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x429e45b1,0xd85637b2,0x86cd2f81,0x81ba39cb,0xee7e771e,0xfbe40058,0x28fb0a4b,0x5bb87a7d,0x1657a413,0x3a9d2d54,0x75a3bbad,0x30690689,0x2fddb0ad,0xad392e81,0x2bffd3cd,0x7b5cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd85637b2429e45b1,0x81ba39cb86cd2f81,0xfbe40058ee7e771e,0x5bb87a7d28fb0a4b,0x3a9d2d541657a413,0x3069068975a3bbad,0xad392e812fddb0ad,0x7b5cb2bffd3cd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xccc9,0x7946,0x6957,0x76d0,0x60c7,0xb371,0x3b68,0x208a,0xb028,0x2a4c,0xa213,0x2abc,0xc23,0x2a3c,0x721c,0x58e1,0x26c6,0xecdb,0x8f8c,0xa50c,0x4494,0xbedd,0xf62f,0x7e28,0x8b21,0xc788,0xa15f,0x927a,0x83ce,0x70d2,0x620d,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7946ccc9,0x76d06957,0xb37160c7,0x208a3b68,0x2a4cb028,0x2abca213,0x2a3c0c23,0x58e1721c,0xecdb26c6,0xa50c8f8c,0xbedd4494,0x7e28f62f,0xc7888b21,0x927aa15f,0x70d283ce,0xe620d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x76d069577946ccc9,0x208a3b68b37160c7,0x2abca2132a4cb028,0x58e1721c2a3c0c23,0xa50c8f8cecdb26c6,0x7e28f62fbedd4494,0x927aa15fc7888b21,0xe620d70d283ce}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xaa15,0x7f4c,0xb027,0xba3f,0xa936,0x25fb,0xd8a6,0xc32c,0x4ff6,0xcba,0x7e3a,0x6517,0x8b62,0x1a7d,0x90bb,0x13df,0x3bed,0x3d1a,0x462b,0x6826,0xf410,0xe897,0x8229,0x4b78,0xee4b,0x42f9,0x6ed,0x6da5,0x4789,0x56bf,0x95bb,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xaa15,0x7f4c,0xb027,0xba3f,0xa936,0x25fb,0xd8a6,0xc32c,0x4ff6,0xcba,0x7e3a,0x6517,0x8b62,0x1a7d,0x90bb,0x13df,0x3bed,0x3d1a,0x462b,0x6826,0xf410,0xe897,0x8229,0x4b78,0xee4b,0x42f9,0x6ed,0x6da5,0x4789,0x56bf,0x95bb,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f4caa15,0xba3fb027,0x25fba936,0xc32cd8a6,0xcba4ff6,0x65177e3a,0x1a7d8b62,0x13df90bb,0x3d1a3bed,0x6826462b,0xe897f410,0x4b788229,0x42f9ee4b,0x6da506ed,0x56bf4789,0xb95bb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f4caa15,0xba3fb027,0x25fba936,0xc32cd8a6,0xcba4ff6,0x65177e3a,0x1a7d8b62,0x13df90bb,0x3d1a3bed,0x6826462b,0xe897f410,0x4b788229,0x42f9ee4b,0x6da506ed,0x56bf4789,0xb95bb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xba3fb0277f4caa15,0xc32cd8a625fba936,0x65177e3a0cba4ff6,0x13df90bb1a7d8b62,0x6826462b3d1a3bed,0x4b788229e897f410,0x6da506ed42f9ee4b,0xb95bb56bf4789}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xba3fb0277f4caa15,0xc32cd8a625fba936,0x65177e3a0cba4ff6,0x13df90bb1a7d8b62,0x6826462b3d1a3bed,0x4b788229e897f410,0x6da506ed42f9ee4b,0xb95bb56bf4789}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc893,0xf896,0x2771,0xa804,0x1b30,0x95f4,0x9365,0xd12c,0x33e,0xa849,0x9eb8,0x99bc,0xbb85,0x5dc7,0x7fc2,0x63f9,0x71ec,0x9605,0x475f,0xb8e1,0xc488,0xe25f,0x7f40,0x8735,0xecac,0xd7f,0x2994,0x17fb,0xf1ae,0xdafb,0xc2a,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc893,0xf896,0x2771,0xa804,0x1b30,0x95f4,0x9365,0xd12c,0x33e,0xa849,0x9eb8,0x99bc,0xbb85,0x5dc7,0x7fc2,0x63f9,0x71ec,0x9605,0x475f,0xb8e1,0xc488,0xe25f,0x7f40,0x8735,0xecac,0xd7f,0x2994,0x17fb,0xf1ae,0xdafb,0xc2a,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf896c893,0xa8042771,0x95f41b30,0xd12c9365,0xa849033e,0x99bc9eb8,0x5dc7bb85,0x63f97fc2,0x960571ec,0xb8e1475f,0xe25fc488,0x87357f40,0xd7fecac,0x17fb2994,0xdafbf1ae,0x30c2a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf896c893,0xa8042771,0x95f41b30,0xd12c9365,0xa849033e,0x99bc9eb8,0x5dc7bb85,0x63f97fc2,0x960571ec,0xb8e1475f,0xe25fc488,0x87357f40,0xd7fecac,0x17fb2994,0xdafbf1ae,0x30c2a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8042771f896c893,0xd12c936595f41b30,0x99bc9eb8a849033e,0x63f97fc25dc7bb85,0xb8e1475f960571ec,0x87357f40e25fc488,0x17fb29940d7fecac,0x30c2adafbf1ae}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa8042771f896c893,0xd12c936595f41b30,0x99bc9eb8a849033e,0x63f97fc25dc7bb85,0xb8e1475f960571ec,0x87357f40e25fc488,0x17fb29940d7fecac,0x30c2adafbf1ae}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3bfd,0x13ce,0x920a,0x911b,0x4570,0x25b1,0xd461,0xc4e5,0x637e,0x243d,0x5ee1,0x2e39,0x5d17,0x952,0x68c2,0x7a32,0x2b9d,0x2f39,0xe4d1,0x13a4,0x6ad4,0x6cd2,0x9b,0xa287,0x5fc3,0x37c9,0xd69b,0xa250,0x1cb2,0xbc08,0xc8f9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3bfd,0x13ce,0x920a,0x911b,0x4570,0x25b1,0xd461,0xc4e5,0x637e,0x243d,0x5ee1,0x2e39,0x5d17,0x952,0x68c2,0x7a32,0x2b9d,0x2f39,0xe4d1,0x13a4,0x6ad4,0x6cd2,0x9b,0xa287,0x5fc3,0x37c9,0xd69b,0xa250,0x1cb2,0xbc08,0xc8f9,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x13ce3bfd,0x911b920a,0x25b14570,0xc4e5d461,0x243d637e,0x2e395ee1,0x9525d17,0x7a3268c2,0x2f392b9d,0x13a4e4d1,0x6cd26ad4,0xa287009b,0x37c95fc3,0xa250d69b,0xbc081cb2,0x1c8f9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x13ce3bfd,0x911b920a,0x25b14570,0xc4e5d461,0x243d637e,0x2e395ee1,0x9525d17,0x7a3268c2,0x2f392b9d,0x13a4e4d1,0x6cd26ad4,0xa287009b,0x37c95fc3,0xa250d69b,0xbc081cb2,0x1c8f9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x911b920a13ce3bfd,0xc4e5d46125b14570,0x2e395ee1243d637e,0x7a3268c209525d17,0x13a4e4d12f392b9d,0xa287009b6cd26ad4,0xa250d69b37c95fc3,0x1c8f9bc081cb2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x911b920a13ce3bfd,0xc4e5d46125b14570,0x2e395ee1243d637e,0x7a3268c209525d17,0x13a4e4d12f392b9d,0xa287009b6cd26ad4,0xa250d69b37c95fc3,0x1c8f9bc081cb2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55eb,0x80b3,0x4fd8,0x45c0,0x56c9,0xda04,0x2759,0x3cd3,0xb009,0xf345,0x81c5,0x9ae8,0x749d,0xe582,0x6f44,0xec20,0xc412,0xc2e5,0xb9d4,0x97d9,0xbef,0x1768,0x7dd6,0xb487,0x11b4,0xbd06,0xf912,0x925a,0xb876,0xa940,0x6a44,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55eb,0x80b3,0x4fd8,0x45c0,0x56c9,0xda04,0x2759,0x3cd3,0xb009,0xf345,0x81c5,0x9ae8,0x749d,0xe582,0x6f44,0xec20,0xc412,0xc2e5,0xb9d4,0x97d9,0xbef,0x1768,0x7dd6,0xb487,0x11b4,0xbd06,0xf912,0x925a,0xb876,0xa940,0x6a44,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x80b355eb,0x45c04fd8,0xda0456c9,0x3cd32759,0xf345b009,0x9ae881c5,0xe582749d,0xec206f44,0xc2e5c412,0x97d9b9d4,0x17680bef,0xb4877dd6,0xbd0611b4,0x925af912,0xa940b876,0x46a44}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x80b355eb,0x45c04fd8,0xda0456c9,0x3cd32759,0xf345b009,0x9ae881c5,0xe582749d,0xec206f44,0xc2e5c412,0x97d9b9d4,0x17680bef,0xb4877dd6,0xbd0611b4,0x925af912,0xa940b876,0x46a44}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45c04fd880b355eb,0x3cd32759da0456c9,0x9ae881c5f345b009,0xec206f44e582749d,0x97d9b9d4c2e5c412,0xb4877dd617680bef,0x925af912bd0611b4,0x46a44a940b876}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45c04fd880b355eb,0x3cd32759da0456c9,0x9ae881c5f345b009,0xec206f44e582749d,0x97d9b9d4c2e5c412,0xb4877dd617680bef,0x925af912bd0611b4,0x46a44a940b876}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x4d6, 0x18fd, 0x18c0, 0x13c1, 0x1718, 0x122c, 0x830, 0xa02, 0xee0, 0x1ad1, 0x1485, 0x27f, 0x13e5, 0x118f, 0xdce, 0x31c, 0x1b49, 0x998, 0x117, 0x343, 0xdae, 0x1fe7, 0x16a0, 0x1c43, 0x172, 0xa6e, 0x702, 0xeb8, 0x835, 0x1cf, 0x48d, 0x4e4, 0x13eb, 0x57d, 0xccf, 0x97e, 0x3b6, 0x910, 0x2dd} @@ -737,223 +737,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1aff,0x9f84,0xf1c6,0xd816,0xbdd0,0xd450,0x1990,0x119,0xbcf7,0x1a97,0x4780,0x8209,0x695b,0x1d73,0x20ba,0x7b53,0x5e3c,0x4ce5,0xac53,0x351f,0xaaa3,0x5a3e,0xd54c,0x121f,0xbf17,0xdb55,0xc9c,0x8370,0x2061,0x415c,0x1f35,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1aff,0x9f84,0xf1c6,0xd816,0xbdd0,0xd450,0x1990,0x119,0xbcf7,0x1a97,0x4780,0x8209,0x695b,0x1d73,0x20ba,0x7b53,0x5e3c,0x4ce5,0xac53,0x351f,0xaaa3,0x5a3e,0xd54c,0x121f,0xbf17,0xdb55,0xc9c,0x8370,0x2061,0x415c,0x1f35,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9f841aff,0xd816f1c6,0xd450bdd0,0x1191990,0x1a97bcf7,0x82094780,0x1d73695b,0x7b5320ba,0x4ce55e3c,0x351fac53,0x5a3eaaa3,0x121fd54c,0xdb55bf17,0x83700c9c,0x415c2061,0xc1f35}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9f841aff,0xd816f1c6,0xd450bdd0,0x1191990,0x1a97bcf7,0x82094780,0x1d73695b,0x7b5320ba,0x4ce55e3c,0x351fac53,0x5a3eaaa3,0x121fd54c,0xdb55bf17,0x83700c9c,0x415c2061,0xc1f35}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd816f1c69f841aff,0x1191990d450bdd0,0x820947801a97bcf7,0x7b5320ba1d73695b,0x351fac534ce55e3c,0x121fd54c5a3eaaa3,0x83700c9cdb55bf17,0xc1f35415c2061}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd816f1c69f841aff,0x1191990d450bdd0,0x820947801a97bcf7,0x7b5320ba1d73695b,0x351fac534ce55e3c,0x121fd54c5a3eaaa3,0x83700c9cdb55bf17,0xc1f35415c2061}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x7734,0xde6f,0xbab1,0xd4f3,0xc928,0x6c68,0x69b0,0x7cc0,0x994f,0x296c,0xb1dc,0x2eb2,0xe4ce,0x8494,0xa8ff,0x95d3,0x5f30,0xe7f,0x918,0x6cd6,0xae27,0x747c,0x1f93,0xed96,0x5590,0xc91a,0x713d,0xc33e,0xc075,0x40fd,0x9ce5,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x7734,0xde6f,0xbab1,0xd4f3,0xc928,0x6c68,0x69b0,0x7cc0,0x994f,0x296c,0xb1dc,0x2eb2,0xe4ce,0x8494,0xa8ff,0x95d3,0x5f30,0xe7f,0x918,0x6cd6,0xae27,0x747c,0x1f93,0xed96,0x5590,0xc91a,0x713d,0xc33e,0xc075,0x40fd,0x9ce5,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xde6f7734,0xd4f3bab1,0x6c68c928,0x7cc069b0,0x296c994f,0x2eb2b1dc,0x8494e4ce,0x95d3a8ff,0xe7f5f30,0x6cd60918,0x747cae27,0xed961f93,0xc91a5590,0xc33e713d,0x40fdc075,0x39ce5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xde6f7734,0xd4f3bab1,0x6c68c928,0x7cc069b0,0x296c994f,0x2eb2b1dc,0x8494e4ce,0x95d3a8ff,0xe7f5f30,0x6cd60918,0x747cae27,0xed961f93,0xc91a5590,0xc33e713d,0x40fdc075,0x39ce5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4f3bab1de6f7734,0x7cc069b06c68c928,0x2eb2b1dc296c994f,0x95d3a8ff8494e4ce,0x6cd609180e7f5f30,0xed961f93747cae27,0xc33e713dc91a5590,0x39ce540fdc075}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4f3bab1de6f7734,0x7cc069b06c68c928,0x2eb2b1dc296c994f,0x95d3a8ff8494e4ce,0x6cd609180e7f5f30,0xed961f93747cae27,0xc33e713dc91a5590,0x39ce540fdc075}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xda85,0x89f5,0x1aaf,0x9ec7,0xcfff,0xec63,0x3ae9,0x20bc,0xc2f3,0x9942,0x7d84,0xfa25,0x5e69,0xeb7b,0xc357,0x9342,0x5c58,0xd26c,0x857b,0x7a7f,0x757,0xfb5c,0xbb97,0x33,0x6c28,0xfceb,0xd644,0xcc0a,0x22ad,0xe1c0,0x12d6,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xda85,0x89f5,0x1aaf,0x9ec7,0xcfff,0xec63,0x3ae9,0x20bc,0xc2f3,0x9942,0x7d84,0xfa25,0x5e69,0xeb7b,0xc357,0x9342,0x5c58,0xd26c,0x857b,0x7a7f,0x757,0xfb5c,0xbb97,0x33,0x6c28,0xfceb,0xd644,0xcc0a,0x22ad,0xe1c0,0x12d6,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x89f5da85,0x9ec71aaf,0xec63cfff,0x20bc3ae9,0x9942c2f3,0xfa257d84,0xeb7b5e69,0x9342c357,0xd26c5c58,0x7a7f857b,0xfb5c0757,0x33bb97,0xfceb6c28,0xcc0ad644,0xe1c022ad,0x412d6}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x89f5da85,0x9ec71aaf,0xec63cfff,0x20bc3ae9,0x9942c2f3,0xfa257d84,0xeb7b5e69,0x9342c357,0xd26c5c58,0x7a7f857b,0xfb5c0757,0x33bb97,0xfceb6c28,0xcc0ad644,0xe1c022ad,0x412d6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9ec71aaf89f5da85,0x20bc3ae9ec63cfff,0xfa257d849942c2f3,0x9342c357eb7b5e69,0x7a7f857bd26c5c58,0x33bb97fb5c0757,0xcc0ad644fceb6c28,0x412d6e1c022ad}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9ec71aaf89f5da85,0x20bc3ae9ec63cfff,0xfa257d849942c2f3,0x9342c357eb7b5e69,0x7a7f857bd26c5c58,0x33bb97fb5c0757,0xcc0ad644fceb6c28,0x412d6e1c022ad}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe501,0x607b,0xe39,0x27e9,0x422f,0x2baf,0xe66f,0xfee6,0x4308,0xe568,0xb87f,0x7df6,0x96a4,0xe28c,0xdf45,0x84ac,0xa1c3,0xb31a,0x53ac,0xcae0,0x555c,0xa5c1,0x2ab3,0xede0,0x40e8,0x24aa,0xf363,0x7c8f,0xdf9e,0xbea3,0xe0ca,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe501,0x607b,0xe39,0x27e9,0x422f,0x2baf,0xe66f,0xfee6,0x4308,0xe568,0xb87f,0x7df6,0x96a4,0xe28c,0xdf45,0x84ac,0xa1c3,0xb31a,0x53ac,0xcae0,0x555c,0xa5c1,0x2ab3,0xede0,0x40e8,0x24aa,0xf363,0x7c8f,0xdf9e,0xbea3,0xe0ca,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x607be501,0x27e90e39,0x2baf422f,0xfee6e66f,0xe5684308,0x7df6b87f,0xe28c96a4,0x84acdf45,0xb31aa1c3,0xcae053ac,0xa5c1555c,0xede02ab3,0x24aa40e8,0x7c8ff363,0xbea3df9e,0x3e0ca}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x607be501,0x27e90e39,0x2baf422f,0xfee6e66f,0xe5684308,0x7df6b87f,0xe28c96a4,0x84acdf45,0xb31aa1c3,0xcae053ac,0xa5c1555c,0xede02ab3,0x24aa40e8,0x7c8ff363,0xbea3df9e,0x3e0ca}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27e90e39607be501,0xfee6e66f2baf422f,0x7df6b87fe5684308,0x84acdf45e28c96a4,0xcae053acb31aa1c3,0xede02ab3a5c1555c,0x7c8ff36324aa40e8,0x3e0cabea3df9e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x27e90e39607be501,0xfee6e66f2baf422f,0x7df6b87fe5684308,0x84acdf45e28c96a4,0xcae053acb31aa1c3,0xede02ab3a5c1555c,0x7c8ff36324aa40e8,0x3e0cabea3df9e}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x625d,0xe815,0xba6a,0x6297,0x1a75,0xd2b2,0xc698,0xd28b,0xb1,0x97f3,0x82e4,0x4126,0xf8e7,0x3639,0x1816,0x9c43,0xd2cf,0x37da,0xad52,0x2b8a,0x91cb,0x4297,0x6be7,0x1a98,0x19e4,0xa8a,0x22de,0x835c,0x4d5f,0x4596,0xa957,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe815625d,0x6297ba6a,0xd2b21a75,0xd28bc698,0x97f300b1,0x412682e4,0x3639f8e7,0x9c431816,0x37dad2cf,0x2b8aad52,0x429791cb,0x1a986be7,0xa8a19e4,0x835c22de,0x45964d5f,0x7a957}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6297ba6ae815625d,0xd28bc698d2b21a75,0x412682e497f300b1,0x9c4318163639f8e7,0x2b8aad5237dad2cf,0x1a986be7429791cb,0x835c22de0a8a19e4,0x7a95745964d5f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf6c6,0x71f7,0xa10,0xf04d,0x23dd,0x2419,0x60da,0xbcad,0xe86c,0xb26d,0x2f1d,0xdb61,0xae7,0xd947,0xb159,0x9d94,0x42db,0x8401,0xe3fe,0xcd5c,0x7586,0xfd38,0xf28a,0xc4c5,0x40d7,0x5c22,0x185c,0xd31f,0x843f,0xe782,0xb462,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x71f7f6c6,0xf04d0a10,0x241923dd,0xbcad60da,0xb26de86c,0xdb612f1d,0xd9470ae7,0x9d94b159,0x840142db,0xcd5ce3fe,0xfd387586,0xc4c5f28a,0x5c2240d7,0xd31f185c,0xe782843f,0x5b462}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf04d0a1071f7f6c6,0xbcad60da241923dd,0xdb612f1db26de86c,0x9d94b159d9470ae7,0xcd5ce3fe840142db,0xc4c5f28afd387586,0xd31f185c5c2240d7,0x5b462e782843f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1c53,0x7318,0xfc96,0x4b34,0xd504,0xae17,0x4d56,0xa914,0x6a8c,0x69,0x2448,0x28b,0x5716,0xce1b,0xa7cc,0xd29c,0x48cf,0x1028,0xc81e,0x40dd,0xbcdf,0x6d4,0x36f7,0xcb9f,0x8d7,0x3a34,0x99c0,0x38d3,0x6e18,0xb4bc,0xe1a8,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x73181c53,0x4b34fc96,0xae17d504,0xa9144d56,0x696a8c,0x28b2448,0xce1b5716,0xd29ca7cc,0x102848cf,0x40ddc81e,0x6d4bcdf,0xcb9f36f7,0x3a3408d7,0x38d399c0,0xb4bc6e18,0xbe1a8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4b34fc9673181c53,0xa9144d56ae17d504,0x28b244800696a8c,0xd29ca7ccce1b5716,0x40ddc81e102848cf,0xcb9f36f706d4bcdf,0x38d399c03a3408d7,0xbe1a8b4bc6e18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9da3,0x17ea,0x4595,0x9d68,0xe58a,0x2d4d,0x3967,0x2d74,0xff4e,0x680c,0x7d1b,0xbed9,0x718,0xc9c6,0xe7e9,0x63bc,0x2d30,0xc825,0x52ad,0xd475,0x6e34,0xbd68,0x9418,0xe567,0xe61b,0xf575,0xdd21,0x7ca3,0xb2a0,0xba69,0x56a8,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x17ea9da3,0x9d684595,0x2d4de58a,0x2d743967,0x680cff4e,0xbed97d1b,0xc9c60718,0x63bce7e9,0xc8252d30,0xd47552ad,0xbd686e34,0xe5679418,0xf575e61b,0x7ca3dd21,0xba69b2a0,0x856a8}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d68459517ea9da3,0x2d7439672d4de58a,0xbed97d1b680cff4e,0x63bce7e9c9c60718,0xd47552adc8252d30,0xe5679418bd686e34,0x7ca3dd21f575e61b,0x856a8ba69b2a0}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x679c,0x35ac,0x6c8c,0xee5e,0x2827,0x29fa,0x9f6c,0xbda,0x2083,0x5e20,0xd351,0x39bd,0xd9bc,0x4085,0x3727,0x8f2,0xe905,0x55dd,0x6f90,0x6e26,0x6779,0xf15a,0xf170,0xec90,0xdb0e,0x53a0,0x6f99,0xe710,0xad92,0xa7f0,0xe2e1,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x679c,0x35ac,0x6c8c,0xee5e,0x2827,0x29fa,0x9f6c,0xbda,0x2083,0x5e20,0xd351,0x39bd,0xd9bc,0x4085,0x3727,0x8f2,0xe905,0x55dd,0x6f90,0x6e26,0x6779,0xf15a,0xf170,0xec90,0xdb0e,0x53a0,0x6f99,0xe710,0xad92,0xa7f0,0xe2e1,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35ac679c,0xee5e6c8c,0x29fa2827,0xbda9f6c,0x5e202083,0x39bdd351,0x4085d9bc,0x8f23727,0x55dde905,0x6e266f90,0xf15a6779,0xec90f170,0x53a0db0e,0xe7106f99,0xa7f0ad92,0xde2e1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x35ac679c,0xee5e6c8c,0x29fa2827,0xbda9f6c,0x5e202083,0x39bdd351,0x4085d9bc,0x8f23727,0x55dde905,0x6e266f90,0xf15a6779,0xec90f170,0x53a0db0e,0xe7106f99,0xa7f0ad92,0xde2e1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xee5e6c8c35ac679c,0xbda9f6c29fa2827,0x39bdd3515e202083,0x8f237274085d9bc,0x6e266f9055dde905,0xec90f170f15a6779,0xe7106f9953a0db0e,0xde2e1a7f0ad92}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xee5e6c8c35ac679c,0xbda9f6c29fa2827,0x39bdd3515e202083,0x8f237274085d9bc,0x6e266f9055dde905,0xec90f170f15a6779,0xe7106f9953a0db0e,0xde2e1a7f0ad92}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa483,0xbf25,0x238c,0x4c65,0xdd0b,0xccc9,0xc5af,0xac20,0xe998,0xb162,0xe2bf,0xbd24,0x5fd,0x6720,0xd781,0xd37d,0xa89,0x595a,0x76b0,0x7f86,0xdea4,0x59ea,0x2c01,0xd679,0x714b,0x5454,0xe262,0x2bcf,0xfad4,0x8bc0,0x8cd3,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa483,0xbf25,0x238c,0x4c65,0xdd0b,0xccc9,0xc5af,0xac20,0xe998,0xb162,0xe2bf,0xbd24,0x5fd,0x6720,0xd781,0xd37d,0xa89,0x595a,0x76b0,0x7f86,0xdea4,0x59ea,0x2c01,0xd679,0x714b,0x5454,0xe262,0x2bcf,0xfad4,0x8bc0,0x8cd3,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbf25a483,0x4c65238c,0xccc9dd0b,0xac20c5af,0xb162e998,0xbd24e2bf,0x672005fd,0xd37dd781,0x595a0a89,0x7f8676b0,0x59eadea4,0xd6792c01,0x5454714b,0x2bcfe262,0x8bc0fad4,0xc8cd3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbf25a483,0x4c65238c,0xccc9dd0b,0xac20c5af,0xb162e998,0xbd24e2bf,0x672005fd,0xd37dd781,0x595a0a89,0x7f8676b0,0x59eadea4,0xd6792c01,0x5454714b,0x2bcfe262,0x8bc0fad4,0xc8cd3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4c65238cbf25a483,0xac20c5afccc9dd0b,0xbd24e2bfb162e998,0xd37dd781672005fd,0x7f8676b0595a0a89,0xd6792c0159eadea4,0x2bcfe2625454714b,0xc8cd38bc0fad4}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4c65238cbf25a483,0xac20c5afccc9dd0b,0xbd24e2bfb162e998,0xd37dd781672005fd,0x7f8676b0595a0a89,0xd6792c0159eadea4,0x2bcfe2625454714b,0xc8cd38bc0fad4}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3f72,0x6188,0x95e8,0xed15,0x2b1a,0x2fd,0xaae9,0x15d9,0x5945,0x23ff,0xfe55,0xce25,0xaa48,0xa648,0x8534,0x16db,0x3fcf,0xa301,0xfb7c,0x3a68,0x4ba,0x1c1d,0x30ee,0xf044,0x116f,0xc4f8,0x98b2,0x4971,0xea5c,0xb93e,0x2836,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3f72,0x6188,0x95e8,0xed15,0x2b1a,0x2fd,0xaae9,0x15d9,0x5945,0x23ff,0xfe55,0xce25,0xaa48,0xa648,0x8534,0x16db,0x3fcf,0xa301,0xfb7c,0x3a68,0x4ba,0x1c1d,0x30ee,0xf044,0x116f,0xc4f8,0x98b2,0x4971,0xea5c,0xb93e,0x2836,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x61883f72,0xed1595e8,0x2fd2b1a,0x15d9aae9,0x23ff5945,0xce25fe55,0xa648aa48,0x16db8534,0xa3013fcf,0x3a68fb7c,0x1c1d04ba,0xf04430ee,0xc4f8116f,0x497198b2,0xb93eea5c,0x32836}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x61883f72,0xed1595e8,0x2fd2b1a,0x15d9aae9,0x23ff5945,0xce25fe55,0xa648aa48,0x16db8534,0xa3013fcf,0x3a68fb7c,0x1c1d04ba,0xf04430ee,0xc4f8116f,0x497198b2,0xb93eea5c,0x32836}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed1595e861883f72,0x15d9aae902fd2b1a,0xce25fe5523ff5945,0x16db8534a648aa48,0x3a68fb7ca3013fcf,0xf04430ee1c1d04ba,0x497198b2c4f8116f,0x32836b93eea5c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xed1595e861883f72,0x15d9aae902fd2b1a,0xce25fe5523ff5945,0x16db8534a648aa48,0x3a68fb7ca3013fcf,0xf04430ee1c1d04ba,0x497198b2c4f8116f,0x32836b93eea5c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9864,0xca53,0x9373,0x11a1,0xd7d8,0xd605,0x6093,0xf425,0xdf7c,0xa1df,0x2cae,0xc642,0x2643,0xbf7a,0xc8d8,0xf70d,0x16fa,0xaa22,0x906f,0x91d9,0x9886,0xea5,0xe8f,0x136f,0x24f1,0xac5f,0x9066,0x18ef,0x526d,0x580f,0x1d1e,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9864,0xca53,0x9373,0x11a1,0xd7d8,0xd605,0x6093,0xf425,0xdf7c,0xa1df,0x2cae,0xc642,0x2643,0xbf7a,0xc8d8,0xf70d,0x16fa,0xaa22,0x906f,0x91d9,0x9886,0xea5,0xe8f,0x136f,0x24f1,0xac5f,0x9066,0x18ef,0x526d,0x580f,0x1d1e,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca539864,0x11a19373,0xd605d7d8,0xf4256093,0xa1dfdf7c,0xc6422cae,0xbf7a2643,0xf70dc8d8,0xaa2216fa,0x91d9906f,0xea59886,0x136f0e8f,0xac5f24f1,0x18ef9066,0x580f526d,0x21d1e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca539864,0x11a19373,0xd605d7d8,0xf4256093,0xa1dfdf7c,0xc6422cae,0xbf7a2643,0xf70dc8d8,0xaa2216fa,0x91d9906f,0xea59886,0x136f0e8f,0xac5f24f1,0x18ef9066,0x580f526d,0x21d1e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x11a19373ca539864,0xf4256093d605d7d8,0xc6422caea1dfdf7c,0xf70dc8d8bf7a2643,0x91d9906faa2216fa,0x136f0e8f0ea59886,0x18ef9066ac5f24f1,0x21d1e580f526d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x11a19373ca539864,0xf4256093d605d7d8,0xc6422caea1dfdf7c,0xf70dc8d8bf7a2643,0x91d9906faa2216fa,0x136f0e8f0ea59886,0x18ef9066ac5f24f1,0x21d1e580f526d}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x185f, 0xecc, 0x21c, 0xa62, 0x77, 0x8b1, 0x1188, 0xec2, 0xda1, 0xbf0, 0xb11, 0x82d, 0x1fe9, 0x10d4, 0x1e36, 0x194e, 0x77e, 0x1112, 0x14c0, 0x1dcd, 0x1be7, 0x1a4c, 0x140e, 0x10c5, 0x1aaf, 0x236, 0xdf3, 0x1a21, 0x1dff, 0x15bb, 0xda8, 0x30e, 0x17b0, 0xc7b, 0xd1, 0xcb, 0x868, 0x2e5, 0x5a} @@ -1213,223 +1213,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5eb9,0x2393,0xd8e8,0xc566,0xd78,0xa77f,0x1bf1,0x4577,0x3141,0xecd3,0x132c,0x281,0x13b5,0x1d34,0xb4bb,0xf25,0xdc3,0xbf86,0x5e9f,0xde50,0xf536,0xe95e,0xd5b0,0x687d,0x3ab,0x992c,0xdb8d,0xc8cc,0xfaf0,0xd954,0x6e1a,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5eb9,0x2393,0xd8e8,0xc566,0xd78,0xa77f,0x1bf1,0x4577,0x3141,0xecd3,0x132c,0x281,0x13b5,0x1d34,0xb4bb,0xf25,0xdc3,0xbf86,0x5e9f,0xde50,0xf536,0xe95e,0xd5b0,0x687d,0x3ab,0x992c,0xdb8d,0xc8cc,0xfaf0,0xd954,0x6e1a,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x23935eb9,0xc566d8e8,0xa77f0d78,0x45771bf1,0xecd33141,0x281132c,0x1d3413b5,0xf25b4bb,0xbf860dc3,0xde505e9f,0xe95ef536,0x687dd5b0,0x992c03ab,0xc8ccdb8d,0xd954faf0,0x56e1a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x23935eb9,0xc566d8e8,0xa77f0d78,0x45771bf1,0xecd33141,0x281132c,0x1d3413b5,0xf25b4bb,0xbf860dc3,0xde505e9f,0xe95ef536,0x687dd5b0,0x992c03ab,0xc8ccdb8d,0xd954faf0,0x56e1a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc566d8e823935eb9,0x45771bf1a77f0d78,0x281132cecd33141,0xf25b4bb1d3413b5,0xde505e9fbf860dc3,0x687dd5b0e95ef536,0xc8ccdb8d992c03ab,0x56e1ad954faf0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc566d8e823935eb9,0x45771bf1a77f0d78,0x281132cecd33141,0xf25b4bb1d3413b5,0xde505e9fbf860dc3,0x687dd5b0e95ef536,0xc8ccdb8d992c03ab,0x56e1ad954faf0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf17c,0xf7a8,0xd9f7,0x1544,0xb2c8,0xf5aa,0x3812,0x3fba,0xf63e,0xb545,0x678c,0xad77,0xed9f,0x12f8,0xa5dc,0x74c9,0xec1d,0xc1e0,0x806f,0x14a0,0xfb25,0x34f3,0x606c,0x57d5,0x9733,0x9c8c,0x83e3,0xa787,0x7cae,0x503b,0x2499,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf17c,0xf7a8,0xd9f7,0x1544,0xb2c8,0xf5aa,0x3812,0x3fba,0xf63e,0xb545,0x678c,0xad77,0xed9f,0x12f8,0xa5dc,0x74c9,0xec1d,0xc1e0,0x806f,0x14a0,0xfb25,0x34f3,0x606c,0x57d5,0x9733,0x9c8c,0x83e3,0xa787,0x7cae,0x503b,0x2499,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf7a8f17c,0x1544d9f7,0xf5aab2c8,0x3fba3812,0xb545f63e,0xad77678c,0x12f8ed9f,0x74c9a5dc,0xc1e0ec1d,0x14a0806f,0x34f3fb25,0x57d5606c,0x9c8c9733,0xa78783e3,0x503b7cae,0x12499}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf7a8f17c,0x1544d9f7,0xf5aab2c8,0x3fba3812,0xb545f63e,0xad77678c,0x12f8ed9f,0x74c9a5dc,0xc1e0ec1d,0x14a0806f,0x34f3fb25,0x57d5606c,0x9c8c9733,0xa78783e3,0x503b7cae,0x12499}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1544d9f7f7a8f17c,0x3fba3812f5aab2c8,0xad77678cb545f63e,0x74c9a5dc12f8ed9f,0x14a0806fc1e0ec1d,0x57d5606c34f3fb25,0xa78783e39c8c9733,0x12499503b7cae}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1544d9f7f7a8f17c,0x3fba3812f5aab2c8,0xad77678cb545f63e,0x74c9a5dc12f8ed9f,0x14a0806fc1e0ec1d,0x57d5606c34f3fb25,0xa78783e39c8c9733,0x12499503b7cae}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d83,0x57ac,0xb73f,0xb74d,0x1869,0x3588,0x43,0x915,0x7f31,0x82eb,0x4487,0xb830,0x6627,0x70a7,0x9911,0x5646,0x4779,0xe113,0x168c,0x925d,0xc1e8,0xd347,0xa95e,0xd5a6,0x7deb,0xbeb,0x72,0xf755,0x306,0x9ee2,0x7ef9,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d83,0x57ac,0xb73f,0xb74d,0x1869,0x3588,0x43,0x915,0x7f31,0x82eb,0x4487,0xb830,0x6627,0x70a7,0x9911,0x5646,0x4779,0xe113,0x168c,0x925d,0xc1e8,0xd347,0xa95e,0xd5a6,0x7deb,0xbeb,0x72,0xf755,0x306,0x9ee2,0x7ef9,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x57ac5d83,0xb74db73f,0x35881869,0x9150043,0x82eb7f31,0xb8304487,0x70a76627,0x56469911,0xe1134779,0x925d168c,0xd347c1e8,0xd5a6a95e,0xbeb7deb,0xf7550072,0x9ee20306,0x27ef9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x57ac5d83,0xb74db73f,0x35881869,0x9150043,0x82eb7f31,0xb8304487,0x70a76627,0x56469911,0xe1134779,0x925d168c,0xd347c1e8,0xd5a6a95e,0xbeb7deb,0xf7550072,0x9ee20306,0x27ef9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb74db73f57ac5d83,0x915004335881869,0xb830448782eb7f31,0x5646991170a76627,0x925d168ce1134779,0xd5a6a95ed347c1e8,0xf75500720beb7deb,0x27ef99ee20306}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb74db73f57ac5d83,0x915004335881869,0xb830448782eb7f31,0x5646991170a76627,0x925d168ce1134779,0xd5a6a95ed347c1e8,0xf75500720beb7deb,0x27ef99ee20306}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa147,0xdc6c,0x2717,0x3a99,0xf287,0x5880,0xe40e,0xba88,0xcebe,0x132c,0xecd3,0xfd7e,0xec4a,0xe2cb,0x4b44,0xf0da,0xf23c,0x4079,0xa160,0x21af,0xac9,0x16a1,0x2a4f,0x9782,0xfc54,0x66d3,0x2472,0x3733,0x50f,0x26ab,0x91e5,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa147,0xdc6c,0x2717,0x3a99,0xf287,0x5880,0xe40e,0xba88,0xcebe,0x132c,0xecd3,0xfd7e,0xec4a,0xe2cb,0x4b44,0xf0da,0xf23c,0x4079,0xa160,0x21af,0xac9,0x16a1,0x2a4f,0x9782,0xfc54,0x66d3,0x2472,0x3733,0x50f,0x26ab,0x91e5,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdc6ca147,0x3a992717,0x5880f287,0xba88e40e,0x132ccebe,0xfd7eecd3,0xe2cbec4a,0xf0da4b44,0x4079f23c,0x21afa160,0x16a10ac9,0x97822a4f,0x66d3fc54,0x37332472,0x26ab050f,0xa91e5}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdc6ca147,0x3a992717,0x5880f287,0xba88e40e,0x132ccebe,0xfd7eecd3,0xe2cbec4a,0xf0da4b44,0x4079f23c,0x21afa160,0x16a10ac9,0x97822a4f,0x66d3fc54,0x37332472,0x26ab050f,0xa91e5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3a992717dc6ca147,0xba88e40e5880f287,0xfd7eecd3132ccebe,0xf0da4b44e2cbec4a,0x21afa1604079f23c,0x97822a4f16a10ac9,0x3733247266d3fc54,0xa91e526ab050f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3a992717dc6ca147,0xba88e40e5880f287,0xfd7eecd3132ccebe,0xf0da4b44e2cbec4a,0x21afa1604079f23c,0x97822a4f16a10ac9,0x3733247266d3fc54,0xa91e526ab050f}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5b0b,0xfec,0x3254,0x9d11,0x8130,0x32bb,0x65ca,0x86b8,0x8184,0xfce7,0x3cc3,0x667c,0xa54b,0x4e65,0x271e,0x9834,0xa9c,0x1d81,0x5e0d,0x45fd,0x26eb,0x4b0b,0x5f8f,0x7e57,0xd36f,0xcb4c,0x56b4,0xe984,0xad75,0xcfdf,0x59a1,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfec5b0b,0x9d113254,0x32bb8130,0x86b865ca,0xfce78184,0x667c3cc3,0x4e65a54b,0x9834271e,0x1d810a9c,0x45fd5e0d,0x4b0b26eb,0x7e575f8f,0xcb4cd36f,0xe98456b4,0xcfdfad75,0xb59a1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d1132540fec5b0b,0x86b865ca32bb8130,0x667c3cc3fce78184,0x9834271e4e65a54b,0x45fd5e0d1d810a9c,0x7e575f8f4b0b26eb,0xe98456b4cb4cd36f,0xb59a1cfdfad75}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2bba,0x15e8,0x1c2b,0x55ed,0xb72c,0xb3cc,0x3a5c,0xb409,0x2f3d,0x71b,0xd365,0x90a1,0xb2f5,0x1e26,0xbe0,0xa633,0xc5a6,0xe2bd,0xe0e3,0x49b3,0xd4c8,0x9f1c,0x9ba3,0x3674,0x2e5a,0x5411,0xc603,0xcdd3,0xfb23,0x2e7e,0x88a,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x15e82bba,0x55ed1c2b,0xb3ccb72c,0xb4093a5c,0x71b2f3d,0x90a1d365,0x1e26b2f5,0xa6330be0,0xe2bdc5a6,0x49b3e0e3,0x9f1cd4c8,0x36749ba3,0x54112e5a,0xcdd3c603,0x2e7efb23,0x4088a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55ed1c2b15e82bba,0xb4093a5cb3ccb72c,0x90a1d365071b2f3d,0xa6330be01e26b2f5,0x49b3e0e3e2bdc5a6,0x36749ba39f1cd4c8,0xcdd3c60354112e5a,0x4088a2e7efb23}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa8e5,0xb5b7,0x12fc,0x2365,0x6f00,0x2267,0x5260,0x5ece,0xbf38,0x93ee,0xfcde,0xe5b3,0x9f9,0x712a,0x1770,0xdd48,0xdd27,0x5350,0xb7f5,0xbdf,0x46a4,0x7cb4,0xe6a9,0xca36,0x8565,0x3c9,0xe43b,0x3713,0x4fe2,0xeeb1,0x806a,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb5b7a8e5,0x236512fc,0x22676f00,0x5ece5260,0x93eebf38,0xe5b3fcde,0x712a09f9,0xdd481770,0x5350dd27,0xbdfb7f5,0x7cb446a4,0xca36e6a9,0x3c98565,0x3713e43b,0xeeb14fe2,0x4806a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x236512fcb5b7a8e5,0x5ece526022676f00,0xe5b3fcde93eebf38,0xdd481770712a09f9,0xbdfb7f55350dd27,0xca36e6a97cb446a4,0x3713e43b03c98565,0x4806aeeb14fe2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa4f5,0xf013,0xcdab,0x62ee,0x7ecf,0xcd44,0x9a35,0x7947,0x7e7b,0x318,0xc33c,0x9983,0x5ab4,0xb19a,0xd8e1,0x67cb,0xf563,0xe27e,0xa1f2,0xba02,0xd914,0xb4f4,0xa070,0x81a8,0x2c90,0x34b3,0xa94b,0x167b,0x528a,0x3020,0xa65e,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf013a4f5,0x62eecdab,0xcd447ecf,0x79479a35,0x3187e7b,0x9983c33c,0xb19a5ab4,0x67cbd8e1,0xe27ef563,0xba02a1f2,0xb4f4d914,0x81a8a070,0x34b32c90,0x167ba94b,0x3020528a,0x4a65e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x62eecdabf013a4f5,0x79479a35cd447ecf,0x9983c33c03187e7b,0x67cbd8e1b19a5ab4,0xba02a1f2e27ef563,0x81a8a070b4f4d914,0x167ba94b34b32c90,0x4a65e3020528a}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6f0b,0x3478,0x5aeb,0x64,0x9a1a,0xecff,0xccf0,0x2fab,0xf3a8,0x718a,0x97e7,0xc31a,0xa0cd,0xb872,0x514e,0x5ee1,0x4b79,0x4af9,0xd0c3,0x97c6,0x9591,0x2370,0xa987,0xa5e6,0xe201,0x8730,0x3150,0x1980,0x8452,0x3b83,0x25c9,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6f0b,0x3478,0x5aeb,0x64,0x9a1a,0xecff,0xccf0,0x2fab,0xf3a8,0x718a,0x97e7,0xc31a,0xa0cd,0xb872,0x514e,0x5ee1,0x4b79,0x4af9,0xd0c3,0x97c6,0x9591,0x2370,0xa987,0xa5e6,0xe201,0x8730,0x3150,0x1980,0x8452,0x3b83,0x25c9,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x34786f0b,0x645aeb,0xecff9a1a,0x2fabccf0,0x718af3a8,0xc31a97e7,0xb872a0cd,0x5ee1514e,0x4af94b79,0x97c6d0c3,0x23709591,0xa5e6a987,0x8730e201,0x19803150,0x3b838452,0xb25c9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x34786f0b,0x645aeb,0xecff9a1a,0x2fabccf0,0x718af3a8,0xc31a97e7,0xb872a0cd,0x5ee1514e,0x4af94b79,0x97c6d0c3,0x23709591,0xa5e6a987,0x8730e201,0x19803150,0x3b838452,0xb25c9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x645aeb34786f0b,0x2fabccf0ecff9a1a,0xc31a97e7718af3a8,0x5ee1514eb872a0cd,0x97c6d0c34af94b79,0xa5e6a98723709591,0x198031508730e201,0xb25c93b838452}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x645aeb34786f0b,0x2fabccf0ecff9a1a,0xc31a97e7718af3a8,0x5ee1514eb872a0cd,0x97c6d0c34af94b79,0xa5e6a98723709591,0x198031508730e201,0xb25c93b838452}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1de7,0x7f69,0xdefe,0xfc6b,0x6fd5,0xc100,0x5188,0x1318,0x416e,0x10dd,0x33ac,0x4260,0x8985,0x1d0e,0x5b13,0xd02e,0x6fb5,0x6e28,0x9b7d,0x4f72,0x9665,0xd5f3,0xf00d,0xda5f,0x98f2,0xd778,0x4b2a,0x958d,0xfcef,0xd837,0x4a93,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1de7,0x7f69,0xdefe,0xfc6b,0x6fd5,0xc100,0x5188,0x1318,0x416e,0x10dd,0x33ac,0x4260,0x8985,0x1d0e,0x5b13,0xd02e,0x6fb5,0x6e28,0x9b7d,0x4f72,0x9665,0xd5f3,0xf00d,0xda5f,0x98f2,0xd778,0x4b2a,0x958d,0xfcef,0xd837,0x4a93,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f691de7,0xfc6bdefe,0xc1006fd5,0x13185188,0x10dd416e,0x426033ac,0x1d0e8985,0xd02e5b13,0x6e286fb5,0x4f729b7d,0xd5f39665,0xda5ff00d,0xd77898f2,0x958d4b2a,0xd837fcef,0x34a93}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f691de7,0xfc6bdefe,0xc1006fd5,0x13185188,0x10dd416e,0x426033ac,0x1d0e8985,0xd02e5b13,0x6e286fb5,0x4f729b7d,0xd5f39665,0xda5ff00d,0xd77898f2,0x958d4b2a,0xd837fcef,0x34a93}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfc6bdefe7f691de7,0x13185188c1006fd5,0x426033ac10dd416e,0xd02e5b131d0e8985,0x4f729b7d6e286fb5,0xda5ff00dd5f39665,0x958d4b2ad77898f2,0x34a93d837fcef}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfc6bdefe7f691de7,0x13185188c1006fd5,0x426033ac10dd416e,0xd02e5b131d0e8985,0x4f729b7d6e286fb5,0xda5ff00dd5f39665,0x958d4b2ad77898f2,0x34a93d837fcef}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8527,0x81f3,0xcb8f,0x5e0d,0x7c93,0x7448,0x613,0xedcf,0x7d31,0x77c7,0x19dc,0x8ace,0xbfb8,0xa582,0x9ccc,0x28df,0xb6e0,0x4f69,0x33e6,0x546b,0xcfb2,0x1627,0x53ed,0xdc8d,0xd80b,0xb843,0xc438,0xb942,0x8fb5,0xb3c0,0xc1dc,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8527,0x81f3,0xcb8f,0x5e0d,0x7c93,0x7448,0x613,0xedcf,0x7d31,0x77c7,0x19dc,0x8ace,0xbfb8,0xa582,0x9ccc,0x28df,0xb6e0,0x4f69,0x33e6,0x546b,0xcfb2,0x1627,0x53ed,0xdc8d,0xd80b,0xb843,0xc438,0xb942,0x8fb5,0xb3c0,0xc1dc,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81f38527,0x5e0dcb8f,0x74487c93,0xedcf0613,0x77c77d31,0x8ace19dc,0xa582bfb8,0x28df9ccc,0x4f69b6e0,0x546b33e6,0x1627cfb2,0xdc8d53ed,0xb843d80b,0xb942c438,0xb3c08fb5,0x2c1dc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81f38527,0x5e0dcb8f,0x74487c93,0xedcf0613,0x77c77d31,0x8ace19dc,0xa582bfb8,0x28df9ccc,0x4f69b6e0,0x546b33e6,0x1627cfb2,0xdc8d53ed,0xb843d80b,0xb942c438,0xb3c08fb5,0x2c1dc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e0dcb8f81f38527,0xedcf061374487c93,0x8ace19dc77c77d31,0x28df9ccca582bfb8,0x546b33e64f69b6e0,0xdc8d53ed1627cfb2,0xb942c438b843d80b,0x2c1dcb3c08fb5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5e0dcb8f81f38527,0xedcf061374487c93,0x8ace19dc77c77d31,0x28df9ccca582bfb8,0x546b33e64f69b6e0,0xdc8d53ed1627cfb2,0xb942c438b843d80b,0x2c1dcb3c08fb5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x90f5,0xcb87,0xa514,0xff9b,0x65e5,0x1300,0x330f,0xd054,0xc57,0x8e75,0x6818,0x3ce5,0x5f32,0x478d,0xaeb1,0xa11e,0xb486,0xb506,0x2f3c,0x6839,0x6a6e,0xdc8f,0x5678,0x5a19,0x1dfe,0x78cf,0xceaf,0xe67f,0x7bad,0xc47c,0xda36,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x90f5,0xcb87,0xa514,0xff9b,0x65e5,0x1300,0x330f,0xd054,0xc57,0x8e75,0x6818,0x3ce5,0x5f32,0x478d,0xaeb1,0xa11e,0xb486,0xb506,0x2f3c,0x6839,0x6a6e,0xdc8f,0x5678,0x5a19,0x1dfe,0x78cf,0xceaf,0xe67f,0x7bad,0xc47c,0xda36,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcb8790f5,0xff9ba514,0x130065e5,0xd054330f,0x8e750c57,0x3ce56818,0x478d5f32,0xa11eaeb1,0xb506b486,0x68392f3c,0xdc8f6a6e,0x5a195678,0x78cf1dfe,0xe67fceaf,0xc47c7bad,0x4da36}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcb8790f5,0xff9ba514,0x130065e5,0xd054330f,0x8e750c57,0x3ce56818,0x478d5f32,0xa11eaeb1,0xb506b486,0x68392f3c,0xdc8f6a6e,0x5a195678,0x78cf1dfe,0xe67fceaf,0xc47c7bad,0x4da36}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xff9ba514cb8790f5,0xd054330f130065e5,0x3ce568188e750c57,0xa11eaeb1478d5f32,0x68392f3cb506b486,0x5a195678dc8f6a6e,0xe67fceaf78cf1dfe,0x4da36c47c7bad}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xff9ba514cb8790f5,0xd054330f130065e5,0x3ce568188e750c57,0xa11eaeb1478d5f32,0x68392f3cb506b486,0x5a195678dc8f6a6e,0xe67fceaf78cf1dfe,0x4da36c47c7bad}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x14d5, 0x1163, 0xf47, 0x337, 0x71e, 0x4ad, 0x1071, 0x19d4, 0x11d9, 0xdce, 0x186a, 0x1785, 0x13c8, 0x695, 0xe1b, 0x54b, 0x174f, 0xd0c, 0x17cb, 0x17db, 0xb41, 0xb78, 0x1a46, 0x66f, 0x23, 0x836, 0x19ce, 0xcdf, 0x90b, 0x6fb, 0x1508, 0x1bb5, 0x6aa, 0x1219, 0x1bba, 0x1d67, 0x1e46, 0x8d8, 0x62c} @@ -1689,223 +1689,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5fd3,0xc1bb,0x3527,0x289e,0x97fd,0xf5ce,0xa8e1,0xfbf2,0x8f04,0xb5e7,0xdf66,0xcb44,0x5b5,0x8314,0x31c,0x6e5c,0xa6b9,0x3134,0x3d19,0x5ea9,0x860d,0x37fe,0x8003,0xafb9,0xbfdd,0xf377,0xa36d,0xde5a,0xa9df,0x8da,0xc872,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5fd3,0xc1bb,0x3527,0x289e,0x97fd,0xf5ce,0xa8e1,0xfbf2,0x8f04,0xb5e7,0xdf66,0xcb44,0x5b5,0x8314,0x31c,0x6e5c,0xa6b9,0x3134,0x3d19,0x5ea9,0x860d,0x37fe,0x8003,0xafb9,0xbfdd,0xf377,0xa36d,0xde5a,0xa9df,0x8da,0xc872,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1bb5fd3,0x289e3527,0xf5ce97fd,0xfbf2a8e1,0xb5e78f04,0xcb44df66,0x831405b5,0x6e5c031c,0x3134a6b9,0x5ea93d19,0x37fe860d,0xafb98003,0xf377bfdd,0xde5aa36d,0x8daa9df,0xbc872}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1bb5fd3,0x289e3527,0xf5ce97fd,0xfbf2a8e1,0xb5e78f04,0xcb44df66,0x831405b5,0x6e5c031c,0x3134a6b9,0x5ea93d19,0x37fe860d,0xafb98003,0xf377bfdd,0xde5aa36d,0x8daa9df,0xbc872}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x289e3527c1bb5fd3,0xfbf2a8e1f5ce97fd,0xcb44df66b5e78f04,0x6e5c031c831405b5,0x5ea93d193134a6b9,0xafb9800337fe860d,0xde5aa36df377bfdd,0xbc87208daa9df}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x289e3527c1bb5fd3,0xfbf2a8e1f5ce97fd,0xcb44df66b5e78f04,0x6e5c031c831405b5,0x5ea93d193134a6b9,0xafb9800337fe860d,0xde5aa36df377bfdd,0xbc87208daa9df}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb354,0x6a4f,0xd461,0xf7db,0x4aec,0x6786,0xff6,0xb274,0xfcf4,0x66d,0x97e9,0x277e,0x5e43,0x68a3,0xb1fa,0x6062,0xa56a,0x8c2b,0x67ed,0xd926,0x444a,0x4883,0x5bc5,0x8084,0x1f0a,0x209e,0x3b85,0x4eb6,0x14fe,0xb973,0xb05c,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb354,0x6a4f,0xd461,0xf7db,0x4aec,0x6786,0xff6,0xb274,0xfcf4,0x66d,0x97e9,0x277e,0x5e43,0x68a3,0xb1fa,0x6062,0xa56a,0x8c2b,0x67ed,0xd926,0x444a,0x4883,0x5bc5,0x8084,0x1f0a,0x209e,0x3b85,0x4eb6,0x14fe,0xb973,0xb05c,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6a4fb354,0xf7dbd461,0x67864aec,0xb2740ff6,0x66dfcf4,0x277e97e9,0x68a35e43,0x6062b1fa,0x8c2ba56a,0xd92667ed,0x4883444a,0x80845bc5,0x209e1f0a,0x4eb63b85,0xb97314fe,0xab05c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6a4fb354,0xf7dbd461,0x67864aec,0xb2740ff6,0x66dfcf4,0x277e97e9,0x68a35e43,0x6062b1fa,0x8c2ba56a,0xd92667ed,0x4883444a,0x80845bc5,0x209e1f0a,0x4eb63b85,0xb97314fe,0xab05c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7dbd4616a4fb354,0xb2740ff667864aec,0x277e97e9066dfcf4,0x6062b1fa68a35e43,0xd92667ed8c2ba56a,0x80845bc54883444a,0x4eb63b85209e1f0a,0xab05cb97314fe}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf7dbd4616a4fb354,0xb2740ff667864aec,0x277e97e9066dfcf4,0x6062b1fa68a35e43,0xd92667ed8c2ba56a,0x80845bc54883444a,0x4eb63b85209e1f0a,0xab05cb97314fe}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9c41,0x213b,0x2271,0x4d2a,0xca4c,0x987c,0xf3fd,0x8462,0x84ba,0x5504,0xf930,0x5ca1,0xb075,0x84d2,0xb16,0x1bc1,0xe1ac,0xfeb5,0xe84e,0x4bb0,0xf6b6,0x57b6,0x3d98,0x97f4,0xda24,0x9866,0x1aae,0xb84,0x36ec,0xfcb7,0x4a2d,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9c41,0x213b,0x2271,0x4d2a,0xca4c,0x987c,0xf3fd,0x8462,0x84ba,0x5504,0xf930,0x5ca1,0xb075,0x84d2,0xb16,0x1bc1,0xe1ac,0xfeb5,0xe84e,0x4bb0,0xf6b6,0x57b6,0x3d98,0x97f4,0xda24,0x9866,0x1aae,0xb84,0x36ec,0xfcb7,0x4a2d,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x213b9c41,0x4d2a2271,0x987cca4c,0x8462f3fd,0x550484ba,0x5ca1f930,0x84d2b075,0x1bc10b16,0xfeb5e1ac,0x4bb0e84e,0x57b6f6b6,0x97f43d98,0x9866da24,0xb841aae,0xfcb736ec,0xf4a2d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x213b9c41,0x4d2a2271,0x987cca4c,0x8462f3fd,0x550484ba,0x5ca1f930,0x84d2b075,0x1bc10b16,0xfeb5e1ac,0x4bb0e84e,0x57b6f6b6,0x97f43d98,0x9866da24,0xb841aae,0xfcb736ec,0xf4a2d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4d2a2271213b9c41,0x8462f3fd987cca4c,0x5ca1f930550484ba,0x1bc10b1684d2b075,0x4bb0e84efeb5e1ac,0x97f43d9857b6f6b6,0xb841aae9866da24,0xf4a2dfcb736ec}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4d2a2271213b9c41,0x8462f3fd987cca4c,0x5ca1f930550484ba,0x1bc10b1684d2b075,0x4bb0e84efeb5e1ac,0x97f43d9857b6f6b6,0xb841aae9866da24,0xf4a2dfcb736ec}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa02d,0x3e44,0xcad8,0xd761,0x6802,0xa31,0x571e,0x40d,0x70fb,0x4a18,0x2099,0x34bb,0xfa4a,0x7ceb,0xfce3,0x91a3,0x5946,0xcecb,0xc2e6,0xa156,0x79f2,0xc801,0x7ffc,0x5046,0x4022,0xc88,0x5c92,0x21a5,0x5620,0xf725,0x378d,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xa02d,0x3e44,0xcad8,0xd761,0x6802,0xa31,0x571e,0x40d,0x70fb,0x4a18,0x2099,0x34bb,0xfa4a,0x7ceb,0xfce3,0x91a3,0x5946,0xcecb,0xc2e6,0xa156,0x79f2,0xc801,0x7ffc,0x5046,0x4022,0xc88,0x5c92,0x21a5,0x5620,0xf725,0x378d,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3e44a02d,0xd761cad8,0xa316802,0x40d571e,0x4a1870fb,0x34bb2099,0x7cebfa4a,0x91a3fce3,0xcecb5946,0xa156c2e6,0xc80179f2,0x50467ffc,0xc884022,0x21a55c92,0xf7255620,0x4378d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3e44a02d,0xd761cad8,0xa316802,0x40d571e,0x4a1870fb,0x34bb2099,0x7cebfa4a,0x91a3fce3,0xcecb5946,0xa156c2e6,0xc80179f2,0x50467ffc,0xc884022,0x21a55c92,0xf7255620,0x4378d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd761cad83e44a02d,0x40d571e0a316802,0x34bb20994a1870fb,0x91a3fce37cebfa4a,0xa156c2e6cecb5946,0x50467ffcc80179f2,0x21a55c920c884022,0x4378df7255620}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd761cad83e44a02d,0x40d571e0a316802,0x34bb20994a1870fb,0x91a3fce37cebfa4a,0xa156c2e6cecb5946,0x50467ffcc80179f2,0x21a55c920c884022,0x4378df7255620}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3919,0x9bc5,0xcbb7,0xe8a6,0x3f55,0x22a,0x8690,0xc64e,0x8e93,0x6c4d,0x80e8,0x422f,0xffd,0x7e1,0xf545,0xb342,0x87be,0x80a5,0xa62b,0xfb8d,0x4048,0x466e,0x4697,0xd4d,0x9d79,0xc9ba,0x52c1,0xa008,0x21f1,0xf309,0xb87c,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9bc53919,0xe8a6cbb7,0x22a3f55,0xc64e8690,0x6c4d8e93,0x422f80e8,0x7e10ffd,0xb342f545,0x80a587be,0xfb8da62b,0x466e4048,0xd4d4697,0xc9ba9d79,0xa00852c1,0xf30921f1,0xab87c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe8a6cbb79bc53919,0xc64e8690022a3f55,0x422f80e86c4d8e93,0xb342f54507e10ffd,0xfb8da62b80a587be,0xd4d4697466e4048,0xa00852c1c9ba9d79,0xab87cf30921f1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc03e,0x86b5,0xcdfe,0x5bc4,0xc7a1,0xbc7b,0x90d9,0x9464,0xc604,0x9345,0x853d,0xc710,0x1ad2,0x4b98,0x4050,0x8ef4,0xd73c,0x6664,0xbd2,0xb610,0x483e,0x5e87,0xabe9,0xcf72,0x679e,0xbfbf,0x52b1,0x3483,0xf7c,0x4a36,0xbd53,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x86b5c03e,0x5bc4cdfe,0xbc7bc7a1,0x946490d9,0x9345c604,0xc710853d,0x4b981ad2,0x8ef44050,0x6664d73c,0xb6100bd2,0x5e87483e,0xcf72abe9,0xbfbf679e,0x348352b1,0x4a360f7c,0xebd53}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bc4cdfe86b5c03e,0x946490d9bc7bc7a1,0xc710853d9345c604,0x8ef440504b981ad2,0xb6100bd26664d73c,0xcf72abe95e87483e,0x348352b1bfbf679e,0xebd534a360f7c}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5937,0x9b8c,0xba31,0x676c,0x8d31,0x9c3d,0xf619,0x42e2,0x4d1a,0xd072,0x76e5,0x390a,0x5086,0x7643,0x2d16,0xfd71,0x341,0xff7b,0x56ec,0xed63,0x1436,0x7324,0x6a5d,0x9488,0x9f0,0x145,0xd6b5,0x4043,0x10d8,0xeb6e,0x7784,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9b8c5937,0x676cba31,0x9c3d8d31,0x42e2f619,0xd0724d1a,0x390a76e5,0x76435086,0xfd712d16,0xff7b0341,0xed6356ec,0x73241436,0x94886a5d,0x14509f0,0x4043d6b5,0xeb6e10d8,0xf7784}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x676cba319b8c5937,0x42e2f6199c3d8d31,0x390a76e5d0724d1a,0xfd712d1676435086,0xed6356ecff7b0341,0x94886a5d73241436,0x4043d6b5014509f0,0xf7784eb6e10d8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6e7,0x643a,0x3448,0x1759,0xc0aa,0xfdd5,0x796f,0x39b1,0x716c,0x93b2,0x7f17,0xbdd0,0xf002,0xf81e,0xaba,0x4cbd,0x7841,0x7f5a,0x59d4,0x472,0xbfb7,0xb991,0xb968,0xf2b2,0x6286,0x3645,0xad3e,0x5ff7,0xde0e,0xcf6,0x4783,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x643ac6e7,0x17593448,0xfdd5c0aa,0x39b1796f,0x93b2716c,0xbdd07f17,0xf81ef002,0x4cbd0aba,0x7f5a7841,0x47259d4,0xb991bfb7,0xf2b2b968,0x36456286,0x5ff7ad3e,0xcf6de0e,0x54783}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17593448643ac6e7,0x39b1796ffdd5c0aa,0xbdd07f1793b2716c,0x4cbd0abaf81ef002,0x47259d47f5a7841,0xf2b2b968b991bfb7,0x5ff7ad3e36456286,0x547830cf6de0e}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x718a,0xe24a,0xae5,0xa4d6,0xd401,0xf453,0x9f91,0x69ce,0x7d19,0xfa11,0x9273,0x4e63,0xf33a,0xde49,0xe08f,0x746a,0x243d,0x52bb,0x43b6,0xe4c,0x1bdd,0x380d,0xdf64,0x74fe,0x4dfa,0x584f,0xa4d6,0xd71b,0xf067,0xf070,0x717e,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x718a,0xe24a,0xae5,0xa4d6,0xd401,0xf453,0x9f91,0x69ce,0x7d19,0xfa11,0x9273,0x4e63,0xf33a,0xde49,0xe08f,0x746a,0x243d,0x52bb,0x43b6,0xe4c,0x1bdd,0x380d,0xdf64,0x74fe,0x4dfa,0x584f,0xa4d6,0xd71b,0xf067,0xf070,0x717e,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe24a718a,0xa4d60ae5,0xf453d401,0x69ce9f91,0xfa117d19,0x4e639273,0xde49f33a,0x746ae08f,0x52bb243d,0xe4c43b6,0x380d1bdd,0x74fedf64,0x584f4dfa,0xd71ba4d6,0xf070f067,0xf717e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe24a718a,0xa4d60ae5,0xf453d401,0x69ce9f91,0xfa117d19,0x4e639273,0xde49f33a,0x746ae08f,0x52bb243d,0xe4c43b6,0x380d1bdd,0x74fedf64,0x584f4dfa,0xd71ba4d6,0xf070f067,0xf717e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa4d60ae5e24a718a,0x69ce9f91f453d401,0x4e639273fa117d19,0x746ae08fde49f33a,0xe4c43b652bb243d,0x74fedf64380d1bdd,0xd71ba4d6584f4dfa,0xf717ef070f067}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa4d60ae5e24a718a,0x69ce9f91f453d401,0x4e639273fa117d19,0x746ae08fde49f33a,0xe4c43b652bb243d,0x74fedf64380d1bdd,0xd71ba4d6584f4dfa,0xf717ef070f067}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d93,0x7845,0xd1d0,0xe045,0xfa74,0x6b6,0x9400,0xad36,0x4e68,0xd3f6,0x9b00,0x7ca0,0xab22,0xfac,0x1fb6,0xb42f,0x57db,0xb2e3,0xbc5b,0x2b2d,0x94fa,0xc77e,0x34e2,0x2918,0x6ce9,0xf9dd,0x68cf,0xd4a2,0xbc59,0x6050,0xda60,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x5d93,0x7845,0xd1d0,0xe045,0xfa74,0x6b6,0x9400,0xad36,0x4e68,0xd3f6,0x9b00,0x7ca0,0xab22,0xfac,0x1fb6,0xb42f,0x57db,0xb2e3,0xbc5b,0x2b2d,0x94fa,0xc77e,0x34e2,0x2918,0x6ce9,0xf9dd,0x68cf,0xd4a2,0xbc59,0x6050,0xda60,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x78455d93,0xe045d1d0,0x6b6fa74,0xad369400,0xd3f64e68,0x7ca09b00,0xfacab22,0xb42f1fb6,0xb2e357db,0x2b2dbc5b,0xc77e94fa,0x291834e2,0xf9dd6ce9,0xd4a268cf,0x6050bc59,0x5da60}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x78455d93,0xe045d1d0,0x6b6fa74,0xad369400,0xd3f64e68,0x7ca09b00,0xfacab22,0xb42f1fb6,0xb2e357db,0x2b2dbc5b,0xc77e94fa,0x291834e2,0xf9dd6ce9,0xd4a268cf,0x6050bc59,0x5da60}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe045d1d078455d93,0xad36940006b6fa74,0x7ca09b00d3f64e68,0xb42f1fb60facab22,0x2b2dbc5bb2e357db,0x291834e2c77e94fa,0xd4a268cff9dd6ce9,0x5da606050bc59}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe045d1d078455d93,0xad36940006b6fa74,0x7ca09b00d3f64e68,0xb42f1fb60facab22,0x2b2dbc5bb2e357db,0x291834e2c77e94fa,0xd4a268cff9dd6ce9,0x5da606050bc59}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6dc,0x5d39,0xac2b,0x2d81,0xc9b8,0xf398,0xdab5,0x8e30,0xb3b2,0x1b25,0x7102,0x8cd2,0x952e,0x7c35,0xb4f3,0x52b8,0x5789,0xb877,0x6906,0x8d31,0x98a6,0x8a10,0x2b3,0x1667,0x856,0xa935,0xfc76,0xc8ec,0x6044,0x9148,0x4f02,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc6dc,0x5d39,0xac2b,0x2d81,0xc9b8,0xf398,0xdab5,0x8e30,0xb3b2,0x1b25,0x7102,0x8cd2,0x952e,0x7c35,0xb4f3,0x52b8,0x5789,0xb877,0x6906,0x8d31,0x98a6,0x8a10,0x2b3,0x1667,0x856,0xa935,0xfc76,0xc8ec,0x6044,0x9148,0x4f02,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d39c6dc,0x2d81ac2b,0xf398c9b8,0x8e30dab5,0x1b25b3b2,0x8cd27102,0x7c35952e,0x52b8b4f3,0xb8775789,0x8d316906,0x8a1098a6,0x166702b3,0xa9350856,0xc8ecfc76,0x91486044,0xd4f02}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5d39c6dc,0x2d81ac2b,0xf398c9b8,0x8e30dab5,0x1b25b3b2,0x8cd27102,0x7c35952e,0x52b8b4f3,0xb8775789,0x8d316906,0x8a1098a6,0x166702b3,0xa9350856,0xc8ecfc76,0x91486044,0xd4f02}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d81ac2b5d39c6dc,0x8e30dab5f398c9b8,0x8cd271021b25b3b2,0x52b8b4f37c35952e,0x8d316906b8775789,0x166702b38a1098a6,0xc8ecfc76a9350856,0xd4f0291486044}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d81ac2b5d39c6dc,0x8e30dab5f398c9b8,0x8cd271021b25b3b2,0x52b8b4f37c35952e,0x8d316906b8775789,0x166702b38a1098a6,0xc8ecfc76a9350856,0xd4f0291486044}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x8e76,0x1db5,0xf51a,0x5b29,0x2bfe,0xbac,0x606e,0x9631,0x82e6,0x5ee,0x6d8c,0xb19c,0xcc5,0x21b6,0x1f70,0x8b95,0xdbc2,0xad44,0xbc49,0xf1b3,0xe422,0xc7f2,0x209b,0x8b01,0xb205,0xa7b0,0x5b29,0x28e4,0xf98,0xf8f,0x8e81}}} +{{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x8e76,0x1db5,0xf51a,0x5b29,0x2bfe,0xbac,0x606e,0x9631,0x82e6,0x5ee,0x6d8c,0xb19c,0xcc5,0x21b6,0x1f70,0x8b95,0xdbc2,0xad44,0xbc49,0xf1b3,0xe422,0xc7f2,0x209b,0x8b01,0xb205,0xa7b0,0x5b29,0x28e4,0xf98,0xf8f,0x8e81}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1db58e76,0x5b29f51a,0xbac2bfe,0x9631606e,0x5ee82e6,0xb19c6d8c,0x21b60cc5,0x8b951f70,0xad44dbc2,0xf1b3bc49,0xc7f2e422,0x8b01209b,0xa7b0b205,0x28e45b29,0xf8f0f98,0x8e81}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1db58e76,0x5b29f51a,0xbac2bfe,0x9631606e,0x5ee82e6,0xb19c6d8c,0x21b60cc5,0x8b951f70,0xad44dbc2,0xf1b3bc49,0xc7f2e422,0x8b01209b,0xa7b0b205,0x28e45b29,0xf8f0f98,0x8e81}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5b29f51a1db58e76,0x9631606e0bac2bfe,0xb19c6d8c05ee82e6,0x8b951f7021b60cc5,0xf1b3bc49ad44dbc2,0x8b01209bc7f2e422,0x28e45b29a7b0b205,0x8e810f8f0f98}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5b29f51a1db58e76,0x9631606e0bac2bfe,0xb19c6d8c05ee82e6,0x8b951f7021b60cc5,0xf1b3bc49ad44dbc2,0x8b01209bc7f2e422,0x28e45b29a7b0b205,0x8e810f8f0f98}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0xca6, 0xc89, 0x411, 0x17e9, 0x188d, 0xad5, 0x8de, 0x403, 0x183a, 0x1e0d, 0x28c, 0xfdc, 0xbcc, 0xd0, 0xa3a, 0xb2e, 0x140c, 0x11b4, 0x4bb, 0x10da, 0xb22, 0x1a4c, 0xbfa, 0xbd5, 0xae6, 0xeff, 0x465, 0x8df, 0xcf9, 0x1c4a, 0x1807, 0x1462, 0xead, 0x1c43, 0x12a3, 0x1ec2, 0x1e12, 0xad0, 0x1cd} @@ -2165,223 +2165,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe7eb,0x27c8,0x739b,0x6eaa,0x7a17,0xf593,0xac1c,0x4a84,0x1a27,0x7771,0xe67e,0xea3d,0x4596,0xa34b,0x8edd,0xc51c,0x7c15,0xd1a1,0x2551,0x481b,0x402e,0xfed0,0x8b82,0x1eab,0xc98b,0x20fa,0x7143,0x6abf,0x463a,0x475f,0x510f,0x9}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe7eb,0x27c8,0x739b,0x6eaa,0x7a17,0xf593,0xac1c,0x4a84,0x1a27,0x7771,0xe67e,0xea3d,0x4596,0xa34b,0x8edd,0xc51c,0x7c15,0xd1a1,0x2551,0x481b,0x402e,0xfed0,0x8b82,0x1eab,0xc98b,0x20fa,0x7143,0x6abf,0x463a,0x475f,0x510f,0x9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27c8e7eb,0x6eaa739b,0xf5937a17,0x4a84ac1c,0x77711a27,0xea3de67e,0xa34b4596,0xc51c8edd,0xd1a17c15,0x481b2551,0xfed0402e,0x1eab8b82,0x20fac98b,0x6abf7143,0x475f463a,0x9510f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27c8e7eb,0x6eaa739b,0xf5937a17,0x4a84ac1c,0x77711a27,0xea3de67e,0xa34b4596,0xc51c8edd,0xd1a17c15,0x481b2551,0xfed0402e,0x1eab8b82,0x20fac98b,0x6abf7143,0x475f463a,0x9510f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6eaa739b27c8e7eb,0x4a84ac1cf5937a17,0xea3de67e77711a27,0xc51c8edda34b4596,0x481b2551d1a17c15,0x1eab8b82fed0402e,0x6abf714320fac98b,0x9510f475f463a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6eaa739b27c8e7eb,0x4a84ac1cf5937a17,0xea3de67e77711a27,0xc51c8edda34b4596,0x481b2551d1a17c15,0x1eab8b82fed0402e,0x6abf714320fac98b,0x9510f475f463a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e28,0x9e31,0xdab6,0x138c,0xc3c0,0x5193,0x444d,0xb2b7,0xf371,0x5630,0xb08b,0xc700,0x2404,0x3f08,0xc3f,0xbd7c,0x963b,0xd892,0x7bb2,0x429d,0x19d8,0xf277,0x853d,0x9aac,0x9bfa,0x42cd,0xf5e8,0x9e40,0x8a41,0x15a8,0x9c23,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e28,0x9e31,0xdab6,0x138c,0xc3c0,0x5193,0x444d,0xb2b7,0xf371,0x5630,0xb08b,0xc700,0x2404,0x3f08,0xc3f,0xbd7c,0x963b,0xd892,0x7bb2,0x429d,0x19d8,0xf277,0x853d,0x9aac,0x9bfa,0x42cd,0xf5e8,0x9e40,0x8a41,0x15a8,0x9c23,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e319e28,0x138cdab6,0x5193c3c0,0xb2b7444d,0x5630f371,0xc700b08b,0x3f082404,0xbd7c0c3f,0xd892963b,0x429d7bb2,0xf27719d8,0x9aac853d,0x42cd9bfa,0x9e40f5e8,0x15a88a41,0x69c23}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e319e28,0x138cdab6,0x5193c3c0,0xb2b7444d,0x5630f371,0xc700b08b,0x3f082404,0xbd7c0c3f,0xd892963b,0x429d7bb2,0xf27719d8,0x9aac853d,0x42cd9bfa,0x9e40f5e8,0x15a88a41,0x69c23}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x138cdab69e319e28,0xb2b7444d5193c3c0,0xc700b08b5630f371,0xbd7c0c3f3f082404,0x429d7bb2d892963b,0x9aac853df27719d8,0x9e40f5e842cd9bfa,0x69c2315a88a41}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x138cdab69e319e28,0xb2b7444d5193c3c0,0xc700b08b5630f371,0xbd7c0c3f3f082404,0x429d7bb2d892963b,0x9aac853df27719d8,0x9e40f5e842cd9bfa,0x69c2315a88a41}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x66d1,0x8ee,0x9219,0x9d61,0x13a4,0xfc63,0xc3ee,0xdf2a,0x1353,0x2ef,0xc391,0x8ad8,0x953b,0xb014,0x1029,0xa4b2,0x61a3,0xfc07,0xf3a8,0x199c,0xe6c8,0x6a41,0x6eb7,0xb459,0xa187,0x2f4e,0x9ec3,0x8b4e,0x5321,0x38b,0x5b21,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x66d1,0x8ee,0x9219,0x9d61,0x13a4,0xfc63,0xc3ee,0xdf2a,0x1353,0x2ef,0xc391,0x8ad8,0x953b,0xb014,0x1029,0xa4b2,0x61a3,0xfc07,0xf3a8,0x199c,0xe6c8,0x6a41,0x6eb7,0xb459,0xa187,0x2f4e,0x9ec3,0x8b4e,0x5321,0x38b,0x5b21,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8ee66d1,0x9d619219,0xfc6313a4,0xdf2ac3ee,0x2ef1353,0x8ad8c391,0xb014953b,0xa4b21029,0xfc0761a3,0x199cf3a8,0x6a41e6c8,0xb4596eb7,0x2f4ea187,0x8b4e9ec3,0x38b5321,0x35b21}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8ee66d1,0x9d619219,0xfc6313a4,0xdf2ac3ee,0x2ef1353,0x8ad8c391,0xb014953b,0xa4b21029,0xfc0761a3,0x199cf3a8,0x6a41e6c8,0xb4596eb7,0x2f4ea187,0x8b4e9ec3,0x38b5321,0x35b21}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d61921908ee66d1,0xdf2ac3eefc6313a4,0x8ad8c39102ef1353,0xa4b21029b014953b,0x199cf3a8fc0761a3,0xb4596eb76a41e6c8,0x8b4e9ec32f4ea187,0x35b21038b5321}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9d61921908ee66d1,0xdf2ac3eefc6313a4,0x8ad8c39102ef1353,0xa4b21029b014953b,0x199cf3a8fc0761a3,0xb4596eb76a41e6c8,0x8b4e9ec32f4ea187,0x35b21038b5321}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1815,0xd837,0x8c64,0x9155,0x85e8,0xa6c,0x53e3,0xb57b,0xe5d8,0x888e,0x1981,0x15c2,0xba69,0x5cb4,0x7122,0x3ae3,0x83ea,0x2e5e,0xdaae,0xb7e4,0xbfd1,0x12f,0x747d,0xe154,0x3674,0xdf05,0x8ebc,0x9540,0xb9c5,0xb8a0,0xaef0,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1815,0xd837,0x8c64,0x9155,0x85e8,0xa6c,0x53e3,0xb57b,0xe5d8,0x888e,0x1981,0x15c2,0xba69,0x5cb4,0x7122,0x3ae3,0x83ea,0x2e5e,0xdaae,0xb7e4,0xbfd1,0x12f,0x747d,0xe154,0x3674,0xdf05,0x8ebc,0x9540,0xb9c5,0xb8a0,0xaef0,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8371815,0x91558c64,0xa6c85e8,0xb57b53e3,0x888ee5d8,0x15c21981,0x5cb4ba69,0x3ae37122,0x2e5e83ea,0xb7e4daae,0x12fbfd1,0xe154747d,0xdf053674,0x95408ebc,0xb8a0b9c5,0x6aef0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8371815,0x91558c64,0xa6c85e8,0xb57b53e3,0x888ee5d8,0x15c21981,0x5cb4ba69,0x3ae37122,0x2e5e83ea,0xb7e4daae,0x12fbfd1,0xe154747d,0xdf053674,0x95408ebc,0xb8a0b9c5,0x6aef0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91558c64d8371815,0xb57b53e30a6c85e8,0x15c21981888ee5d8,0x3ae371225cb4ba69,0xb7e4daae2e5e83ea,0xe154747d012fbfd1,0x95408ebcdf053674,0x6aef0b8a0b9c5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91558c64d8371815,0xb57b53e30a6c85e8,0x15c21981888ee5d8,0x3ae371225cb4ba69,0xb7e4daae2e5e83ea,0xe154747d012fbfd1,0x95408ebcdf053674,0x6aef0b8a0b9c5}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4f71,0xd850,0x620a,0x55cc,0x31b0,0x4b74,0xab42,0x125c,0xb589,0xa634,0x73b3,0xca64,0x6ed0,0x6c41,0x6022,0x9b4a,0xb03e,0xe0d,0x1e19,0x2925,0x275e,0x42a,0x10dd,0xfc6a,0x7f51,0xb592,0x977e,0xe556,0x10bc,0x873d,0xa68f,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd8504f71,0x55cc620a,0x4b7431b0,0x125cab42,0xa634b589,0xca6473b3,0x6c416ed0,0x9b4a6022,0xe0db03e,0x29251e19,0x42a275e,0xfc6a10dd,0xb5927f51,0xe556977e,0x873d10bc,0x3a68f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x55cc620ad8504f71,0x125cab424b7431b0,0xca6473b3a634b589,0x9b4a60226c416ed0,0x29251e190e0db03e,0xfc6a10dd042a275e,0xe556977eb5927f51,0x3a68f873d10bc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed2,0x7e79,0xb784,0x90c5,0xe054,0xfc99,0x606e,0x59f4,0xb88f,0xb6ae,0x29bf,0xe3fc,0xa3cf,0x8c88,0xba94,0x4bf9,0xcb60,0xf771,0x31c0,0x7236,0x2401,0xa9e7,0xb9b8,0xfdbf,0x326e,0xb4d5,0x9bd6,0xc810,0xb7f0,0x661,0x1f62,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7e790ed2,0x90c5b784,0xfc99e054,0x59f4606e,0xb6aeb88f,0xe3fc29bf,0x8c88a3cf,0x4bf9ba94,0xf771cb60,0x723631c0,0xa9e72401,0xfdbfb9b8,0xb4d5326e,0xc8109bd6,0x661b7f0,0xf1f62}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x90c5b7847e790ed2,0x59f4606efc99e054,0xe3fc29bfb6aeb88f,0x4bf9ba948c88a3cf,0x723631c0f771cb60,0xfdbfb9b8a9e72401,0xc8109bd6b4d5326e,0xf1f620661b7f0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb407,0xc1a7,0x6715,0xe1ad,0x31eb,0x15e8,0x1594,0xbd8e,0x3976,0x2bae,0x8f7a,0x1636,0x8d19,0x88d2,0x12ae,0x1627,0xf2b0,0x4868,0xdd78,0xfa51,0xfd6b,0xa1f3,0x7eb3,0x35d9,0xd826,0x2c09,0x12a8,0x54dd,0x87d6,0xc29c,0x5bcd,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc1a7b407,0xe1ad6715,0x15e831eb,0xbd8e1594,0x2bae3976,0x16368f7a,0x88d28d19,0x162712ae,0x4868f2b0,0xfa51dd78,0xa1f3fd6b,0x35d97eb3,0x2c09d826,0x54dd12a8,0xc29c87d6,0x15bcd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe1ad6715c1a7b407,0xbd8e159415e831eb,0x16368f7a2bae3976,0x162712ae88d28d19,0xfa51dd784868f2b0,0x35d97eb3a1f3fd6b,0x54dd12a82c09d826,0x15bcdc29c87d6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb08f,0x27af,0x9df5,0xaa33,0xce4f,0xb48b,0x54bd,0xeda3,0x4a76,0x59cb,0x8c4c,0x359b,0x912f,0x93be,0x9fdd,0x64b5,0x4fc1,0xf1f2,0xe1e6,0xd6da,0xd8a1,0xfbd5,0xef22,0x395,0x80ae,0x4a6d,0x6881,0x1aa9,0xef43,0x78c2,0x5970,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x27afb08f,0xaa339df5,0xb48bce4f,0xeda354bd,0x59cb4a76,0x359b8c4c,0x93be912f,0x64b59fdd,0xf1f24fc1,0xd6dae1e6,0xfbd5d8a1,0x395ef22,0x4a6d80ae,0x1aa96881,0x78c2ef43,0xc5970}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xaa339df527afb08f,0xeda354bdb48bce4f,0x359b8c4c59cb4a76,0x64b59fdd93be912f,0xd6dae1e6f1f24fc1,0x395ef22fbd5d8a1,0x1aa968814a6d80ae,0xc597078c2ef43}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb83a,0x5e7a,0x2c9b,0xd483,0xeff9,0x71e9,0x4a21,0x2eae,0x921,0xbb26,0x6bf2,0xb038,0xeac9,0xc05a,0xd498,0x34fb,0x7ca,0xaae9,0x2674,0x81de,0x471f,0x7dbe,0x88c9,0xa354,0x9f03,0x5301,0x9acc,0x7c82,0xc479,0x732,0xdc7b,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb83a,0x5e7a,0x2c9b,0xd483,0xeff9,0x71e9,0x4a21,0x2eae,0x921,0xbb26,0x6bf2,0xb038,0xeac9,0xc05a,0xd498,0x34fb,0x7ca,0xaae9,0x2674,0x81de,0x471f,0x7dbe,0x88c9,0xa354,0x9f03,0x5301,0x9acc,0x7c82,0xc479,0x732,0xdc7b,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5e7ab83a,0xd4832c9b,0x71e9eff9,0x2eae4a21,0xbb260921,0xb0386bf2,0xc05aeac9,0x34fbd498,0xaae907ca,0x81de2674,0x7dbe471f,0xa35488c9,0x53019f03,0x7c829acc,0x732c479,0x8dc7b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5e7ab83a,0xd4832c9b,0x71e9eff9,0x2eae4a21,0xbb260921,0xb0386bf2,0xc05aeac9,0x34fbd498,0xaae907ca,0x81de2674,0x7dbe471f,0xa35488c9,0x53019f03,0x7c829acc,0x732c479,0x8dc7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4832c9b5e7ab83a,0x2eae4a2171e9eff9,0xb0386bf2bb260921,0x34fbd498c05aeac9,0x81de2674aae907ca,0xa35488c97dbe471f,0x7c829acc53019f03,0x8dc7b0732c479}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd4832c9b5e7ab83a,0x2eae4a2171e9eff9,0xb0386bf2bb260921,0x34fbd498c05aeac9,0x81de2674aae907ca,0xa35488c97dbe471f,0x7c829acc53019f03,0x8dc7b0732c479}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x4733,0xaeba,0xf3d4,0x84bf,0x453a,0xa71a,0xe0fa,0x4604,0xf02b,0x9bc2,0xb114,0x5fc5,0x5f8d,0x1a8d,0x2302,0x175d,0x3655,0x8351,0x51b,0x698c,0xc745,0x8c83,0xdd6a,0xdd4b,0x682f,0x80b7,0xd1fc,0xe320,0xca30,0xc1d3,0xc365}}} +{{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x4733,0xaeba,0xf3d4,0x84bf,0x453a,0xa71a,0xe0fa,0x4604,0xf02b,0x9bc2,0xb114,0x5fc5,0x5f8d,0x1a8d,0x2302,0x175d,0x3655,0x8351,0x51b,0x698c,0xc745,0x8c83,0xdd6a,0xdd4b,0x682f,0x80b7,0xd1fc,0xe320,0xca30,0xc1d3,0xc365}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaeba4733,0x84bff3d4,0xa71a453a,0x4604e0fa,0x9bc2f02b,0x5fc5b114,0x1a8d5f8d,0x175d2302,0x83513655,0x698c051b,0x8c83c745,0xdd4bdd6a,0x80b7682f,0xe320d1fc,0xc1d3ca30,0xc365}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaeba4733,0x84bff3d4,0xa71a453a,0x4604e0fa,0x9bc2f02b,0x5fc5b114,0x1a8d5f8d,0x175d2302,0x83513655,0x698c051b,0x8c83c745,0xdd4bdd6a,0x80b7682f,0xe320d1fc,0xc1d3ca30,0xc365}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x84bff3d4aeba4733,0x4604e0faa71a453a,0x5fc5b1149bc2f02b,0x175d23021a8d5f8d,0x698c051b83513655,0xdd4bdd6a8c83c745,0xe320d1fc80b7682f,0xc365c1d3ca30}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x84bff3d4aeba4733,0x4604e0faa71a453a,0x5fc5b1149bc2f02b,0x175d23021a8d5f8d,0x698c051b83513655,0xdd4bdd6a8c83c745,0xe320d1fc80b7682f,0xc365c1d3ca30}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe32c,0x5173,0xdcb0,0xe05d,0x3a7e,0x6e8c,0xfd38,0xbed7,0x5fe0,0xa986,0x26f1,0xedf0,0x8fc7,0x1dbc,0xa48e,0x2e70,0x6648,0xe767,0xe8c3,0xf05b,0x26aa,0x63b6,0xf8f6,0x5304,0x7042,0x7c93,0x54a2,0xe675,0xd3ea,0x2b1,0xb36e,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe32c,0x5173,0xdcb0,0xe05d,0x3a7e,0x6e8c,0xfd38,0xbed7,0x5fe0,0xa986,0x26f1,0xedf0,0x8fc7,0x1dbc,0xa48e,0x2e70,0x6648,0xe767,0xe8c3,0xf05b,0x26aa,0x63b6,0xf8f6,0x5304,0x7042,0x7c93,0x54a2,0xe675,0xd3ea,0x2b1,0xb36e,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5173e32c,0xe05ddcb0,0x6e8c3a7e,0xbed7fd38,0xa9865fe0,0xedf026f1,0x1dbc8fc7,0x2e70a48e,0xe7676648,0xf05be8c3,0x63b626aa,0x5304f8f6,0x7c937042,0xe67554a2,0x2b1d3ea,0x8b36e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5173e32c,0xe05ddcb0,0x6e8c3a7e,0xbed7fd38,0xa9865fe0,0xedf026f1,0x1dbc8fc7,0x2e70a48e,0xe7676648,0xf05be8c3,0x63b626aa,0x5304f8f6,0x7c937042,0xe67554a2,0x2b1d3ea,0x8b36e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe05ddcb05173e32c,0xbed7fd386e8c3a7e,0xedf026f1a9865fe0,0x2e70a48e1dbc8fc7,0xf05be8c3e7676648,0x5304f8f663b626aa,0xe67554a27c937042,0x8b36e02b1d3ea}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe05ddcb05173e32c,0xbed7fd386e8c3a7e,0xedf026f1a9865fe0,0x2e70a48e1dbc8fc7,0xf05be8c3e7676648,0x5304f8f663b626aa,0xe67554a27c937042,0x8b36e02b1d3ea}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x47c6,0xa185,0xd364,0x2b7c,0x1006,0x8e16,0xb5de,0xd151,0xf6de,0x44d9,0x940d,0x4fc7,0x1536,0x3fa5,0x2b67,0xcb04,0xf835,0x5516,0xd98b,0x7e21,0xb8e0,0x8241,0x7736,0x5cab,0x60fc,0xacfe,0x6533,0x837d,0x3b86,0xf8cd,0x2384,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x47c6,0xa185,0xd364,0x2b7c,0x1006,0x8e16,0xb5de,0xd151,0xf6de,0x44d9,0x940d,0x4fc7,0x1536,0x3fa5,0x2b67,0xcb04,0xf835,0x5516,0xd98b,0x7e21,0xb8e0,0x8241,0x7736,0x5cab,0x60fc,0xacfe,0x6533,0x837d,0x3b86,0xf8cd,0x2384,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa18547c6,0x2b7cd364,0x8e161006,0xd151b5de,0x44d9f6de,0x4fc7940d,0x3fa51536,0xcb042b67,0x5516f835,0x7e21d98b,0x8241b8e0,0x5cab7736,0xacfe60fc,0x837d6533,0xf8cd3b86,0x72384}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa18547c6,0x2b7cd364,0x8e161006,0xd151b5de,0x44d9f6de,0x4fc7940d,0x3fa51536,0xcb042b67,0x5516f835,0x7e21d98b,0x8241b8e0,0x5cab7736,0xacfe60fc,0x837d6533,0xf8cd3b86,0x72384}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b7cd364a18547c6,0xd151b5de8e161006,0x4fc7940d44d9f6de,0xcb042b673fa51536,0x7e21d98b5516f835,0x5cab77368241b8e0,0x837d6533acfe60fc,0x72384f8cd3b86}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2b7cd364a18547c6,0xd151b5de8e161006,0x4fc7940d44d9f6de,0xcb042b673fa51536,0x7e21d98b5516f835,0x5cab77368241b8e0,0x837d6533acfe60fc,0x72384f8cd3b86}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x116d, 0x165f, 0x7d3, 0x488, 0xe08, 0xb12, 0x2e0, 0x18b4, 0xdbb, 0x181e, 0xe50, 0x19b8, 0xc17, 0x1c82, 0x1cf6, 0x7b9, 0xebb, 0x1c0a, 0x183a, 0x1f42, 0x1a3e, 0x176b, 0x19fa, 0x7e4, 0x1646, 0x1dc5, 0x1e9d, 0x4b3, 0x18df, 0xf2d, 0xe2e, 0x2e3, 0x903, 0x19ef, 0x1f94, 0x237, 0x18ef, 0x26c, 0x12f} @@ -2488,27 +2488,27 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x51d, 0x1394, 0xcca, 0x1568, 0x1790, 0x11d6, 0x18aa, 0xe65, 0x1e8e, 0x4fe, 0xab9, 0x1496, 0x167d, 0x1b42, 0x1f85, 0x1d7a, 0x8c4, 0x17ea, 0x1269, 0x16, 0x1fbf, 0x8b5, 0x6f4, 0x1202, 0x17c4, 0x427, 0x1273, 0x14f, 0x49c, 0xfba, 0x1b3b, 0x13cd, 0x10ee, 0x634, 0x10ae, 0x2c4, 0x10b4, 0x1377, 0xfe} +{0x1e97, 0x1f23, 0x161, 0x7b2, 0x1221, 0x1d36, 0x14f1, 0xaa0, 0xce3, 0x1f6c, 0xeaf, 0x549, 0xa24, 0xe15, 0x1862, 0x1dba, 0xc75, 0xf1d, 0x15f9, 0x50d, 0xa99, 0x97b, 0xc21, 0x1549, 0x1c88, 0xfbe, 0xe33, 0xb27, 0x1dae, 0xb00, 0x82f, 0x44a, 0x371, 0x5c0, 0x1174, 0x1b28, 0xa0b, 0x9bd, 0x206} #elif RADIX == 32 -{0x28e92dc, 0x10cca9ca, 0x15af2156, 0x132e2aa3, 0x4fef473, 0x1692cab9, 0x2ed0acf, 0xc4ebd7e, 0x4d37ea4, 0xfefc02d, 0x237a22d, 0x4f7c490, 0x53e4e64, 0x17dd1270, 0x1d3cdd9d, 0xb8c690e, 0x5a0b121, 0x1bc} +{0x1f4ba664, 0x4161f91, 0xda4427b, 0x15053c7a, 0x1f6c671a, 0x10a92eaf, 0x11385544, 0x75edd61, 0xbf2f1d6, 0x1aa64a1b, 0x9610a5e, 0x17dc88aa, 0xc9dc66f, 0x158076b9, 0x244a417, 0x1d0b8037, 0x105eca22, 0x7ea} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x215686654e50a3a4, 0xfbd1ce65c55475af, 0x2ed0acfb49655c93, 0x6934dfa9189d7afc, 0x920237a22d7f7e01, 0x893814f939909ef8, 0x18d21dd3cdd9dbee, 0x134dde1682c4857} +{0x427b20b0fc8fd2e9, 0xb19c6aa0a78f4da4, 0x13855448549757fd, 0xdafcbc758ebdbac3, 0x1549610a5ed53250, 0x3b5cb27719befb91, 0x17006e244a417ac0, 0x1026f5417b288ba} #else -{0xad0cca9ca14749, 0x1ce65c55475af21, 0x7da4b2ae49fde8, 0x46275ebf0bb42b, 0x1afefc02d269bf5, 0x109ef8920237a22, 0xdf7449c0a7c9cc, 0x15c6348774f3767, 0xb9bbc2d05890} +{0xf64161f91fa5d3, 0x6aa0a78f4da442, 0x242a4babfed8ce, 0x163af6eb0c4e155, 0x1daa64a1b5f978e, 0x1efb911549610a5, 0x1d601dae593b8cd, 0xe85c01b8912905, 0x54dea82f6511} #endif #endif , #if 0 #elif RADIX == 16 -{0x17ab, 0x1e1a, 0x1bfe, 0x1f73, 0x1eb9, 0xf30, 0x1cca, 0x1aaf, 0xbea, 0xa1b, 0xb73, 0x86d, 0x1c13, 0x1c31, 0x1e6e, 0x1fbf, 0x968, 0x10f0, 0xb53, 0x1418, 0x11c6, 0x65f, 0x188, 0x2c7, 0x79b, 0xa9, 0xa92, 0x12b0, 0x1b53, 0x1564, 0xfa7, 0x1fd7, 0xa5b, 0xb32, 0x1bc8, 0xc90, 0x11ee, 0x1f6, 0x3f2} +{0xcc, 0x1cb1, 0x706, 0x1f0b, 0xa79, 0xd89, 0xd1f, 0x1067, 0x1c50, 0x1e70, 0x41c, 0x1ce8, 0xd29, 0x7c7, 0x733, 0x460, 0x1e22, 0xe0b, 0x7f6, 0x1387, 0xe84, 0x273, 0x13e1, 0x1f1d, 0x1643, 0x1f1a, 0x3e, 0x7b7, 0xecf, 0x1578, 0x357, 0xaf4, 0x1f6c, 0x4c8, 0x11b9, 0x866, 0x80a, 0x13e2, 0x499} #elif RADIX == 32 -{0xbd5cad1, 0x7bfef0d, 0xc3d73f7, 0x157f329e, 0xa1b5f56, 0xd0dab73, 0x1770c782, 0x168fdff9, 0x16a70f04, 0x1c71a830, 0x70c4197, 0x15279b16, 0xac15240, 0x1ab26d4e, 0x17fd77d3, 0x121664a5, 0xf732437, 0xa34} +{0x1066573b, 0x16706e58, 0x254f3f0, 0x33b47db, 0x1e70e284, 0x79d041c, 0x199f1da5, 0x222301c, 0xfece0bf, 0x1ba1270e, 0x1d9f089c, 0x35643f8, 0x1edc07df, 0x1abc3b3c, 0x18af41ab, 0xe4991f6, 0x5219a3, 0x292} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x73f73dff786af572, 0x6d7d5aafe653cc3d, 0x770c782686d5b9a8, 0x85a9c3c12d1fbff3, 0x62c70c4197e38d41, 0x36a72b054902a4f3, 0x2cc94b7fd77d3d59, 0x1307da3dcc90de4} +{0xf3f0b38372c41995, 0xc38a106768fb6254, 0x99f1da53ce820e79, 0x73fb382fc4446039, 0x7f1d9f089cdd0938, 0x1d9e7b701f7c6ac8, 0x9323ed8af41abd5e, 0x15cf890148668dc} #else -{0x1ee7bfef0d5eae5, 0x15aafe653cc3d73, 0x13436adcd436be, 0x4b47effcddc31e, 0xfc71a830b53878, 0x2a4f362c70c419, 0x1eac9b539582a48, 0x190b3252dff5df4, 0xb0fb47b9921b} +{0x1e16706e588332b, 0x106768fb6254f3, 0x129e741073ce1c5, 0x1f111180e667c76, 0x19ba1270e7f6705, 0x1c6ac87f1d9f089, 0x1eaf0ecf3db80fb, 0x1724c8fb62bd06a, 0x109f120290cd1} #endif #endif }, { @@ -2540,27 +2540,27 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}, {{ #if 0 #elif RADIX == 16 -{0x166f, 0x4b7, 0x1268, 0x18f5, 0x10a9, 0x17ea, 0x105e, 0x1090, 0x1c31, 0x624, 0xec6, 0xea1, 0x17d2, 0xf55, 0x10d3, 0x8fb, 0x9ab, 0x1ae2, 0x952, 0xcab, 0x100d, 0x702, 0xc4d, 0x1387, 0x344, 0xdaf, 0x1566, 0xf8c, 0x1e1c, 0x6f1, 0x1af9, 0xf1, 0xd6d, 0xa06, 0xb5c, 0x62c, 0x2e9, 0x1131, 0x683} +{0xd3d, 0x1bb8, 0x7b6, 0x2b7, 0x1f97, 0xc1a, 0x13ef, 0x6ac, 0xf50, 0x12de, 0xd45, 0x16d4, 0x69c, 0x16a8, 0xde4, 0xbd6, 0x14ea, 0x1d58, 0x193c, 0x160b, 0x1fc5, 0x20b, 0x1376, 0xbbb, 0x732, 0x8f8, 0x10f6, 0x1fef, 0xe7b, 0xb28, 0x10ba, 0x953, 0x1cfe, 0x1437, 0x1422, 0x178b, 0x1524, 0x590, 0x334} #elif RADIX == 32 -{0x1b37fb85, 0xb26825b, 0x1aa1538f, 0x48417af, 0x624e18c, 0x9d42ec6, 0x9bd56fa, 0x1ab47dc3, 0x12a5ae24, 0x14035956, 0x76269c0, 0x15e3449c, 0x1e32accd, 0x1378f871, 0x1a0f1d7c, 0x17140cd6, 0x17498b16, 0x608} +{0x69ebcc0, 0xe7b6ddc, 0x6bf2e2b, 0x1564fbd8, 0x12de7a81, 0x12da8d45, 0x125aa0d3, 0xea5eb37, 0x1279d58a, 0x1ff16c17, 0x1b9bb082, 0x1f07325d, 0x1fbe1ec8, 0x59439ef, 0x1c95385d, 0x8a86fcf, 0x925e2e8, 0xc85} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x538f593412decdfe, 0x9386309082f5faa1, 0x9bd56fa4ea176318, 0xb4a96b893568fb86, 0x93876269c0a01aca, 0x7c38f8cab336bc68, 0x2819ada0f1d7c9bc, 0x17c4c45d262c5ae} +{0x2e2b73db6ee1a7af, 0x79ea06ac9f7b06bf, 0x25aa0d396d46a2cb, 0xbc9e75629d4bd66f, 0x4bbb9bb082ff8b60, 0x1cf7fef87b23e0e6, 0x50df9fc95385d2ca, 0x51642a4978ba11} #else -{0x11eb26825bd9bfd, 0x309082f5faa153, 0x1d2750bb18c49c3, 0x4d5a3ee1a6f55b, 0x14035956952d71, 0x16bc6893876269c, 0x4de3e1c7c65599, 0xb8a066b683c75f, 0x148988ba4c58b} +{0x56e7b6ddc34f5e, 0x6ac9f7b06bf2e, 0x9cb6a35165bcf5, 0xa752f59bc96a83, 0x5ff16c1793ceac, 0x3e0e64bbb9bb08, 0x9650e7bff7c3d9, 0x45437e7f254e17, 0xa2c85492f174} #endif #endif , #if 0 #elif RADIX == 16 -{0x826, 0x1efe, 0xa95, 0x174d, 0x11b5, 0x1184, 0x1d4, 0x1024, 0x1d44, 0x349, 0x83c, 0x665, 0x4a2, 0x1288, 0x473, 0xa16, 0xe54, 0xafc, 0x6e2, 0x13f1, 0x217, 0x11e4, 0x1988, 0xe26, 0xd9a, 0x168f, 0x3d, 0x1436, 0x311, 0x148d, 0x168f, 0x1ad8, 0x1156, 0xb8, 0x193f, 0x1655, 0x279, 0x5cd, 0x65e} +{0xb59, 0x1fb4, 0x1dac, 0x52d, 0x794, 0x1254, 0x1f9f, 0xdba, 0x151d, 0x1f01, 0x7f7, 0xb2b, 0x7e4, 0x1b36, 0x912, 0x1366, 0x1a04, 0x8ed, 0x1e58, 0x18f0, 0xffd, 0x455, 0xba9, 0x16d, 0x155f, 0x1198, 0x1264, 0x158b, 0x766, 0x66e, 0x1403, 0x15fd, 0xe0e, 0x1368, 0x9e6, 0x4af, 0x1fba, 0x1047, 0x464} #elif RADIX == 32 -{0x41378c1, 0x1aa95f7f, 0x1236b74, 0x1207523, 0x349ea24, 0x8cca83c, 0x19ca2094, 0x5450b11, 0xdc4afc7, 0x85e7e2, 0x6cc4479, 0x11ed9a71, 0x10d807b6, 0x1a468c46, 0xdad8b47, 0xfc17115, 0x13cd9572, 0xe8} +{0x5acd34c, 0x1bdacfda, 0x150f2852, 0xdd7e7e4, 0x1f01a8eb, 0x116567f7, 0x96cd8fc, 0x49b324, 0x1cb08edd, 0xbff71e1, 0xd5d4915, 0x13155f0b, 0x162e4c91, 0x13371d9a, 0x1d5fda01, 0x19a6d0e0, 0x1dd12bd3, 0x3f} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6b74d54afbf904de, 0x27a890240ea46123, 0x9ca2094466541e0d, 0x13712bf1ca8a1623, 0x4e26cc4479042f3f, 0x462343601eda3db3, 0x82e22adad8b47d23, 0x517344f3655c9f} +{0x2852ded67ed16b34, 0x6a3adbafcfc950f, 0x96cd8fc8b2b3fbfc, 0xf2c23b740936648, 0xe16d5d49155ffb8f, 0x8ecd58b9324662ab, 0x4da1c1d5fda0199b, 0x16411ff744af4f3} #else -{0xe9aa95f7f209bc, 0x90240ea461236b, 0xa2332a0f0693d4, 0x72a28588e72882, 0x12085e7e26e257e, 0x1a3db34e26cc447, 0x1e91a311a1b00f6, 0x7e0b88ab6b62d1, 0xa2e689e6cab9} +{0xa5bdacfda2d669, 0x1adbafcfc950f28, 0x1e45959fdfe0351, 0x1d024d99225b363, 0xabff71e1e58476, 0x662abe16d5d491, 0xccdc766ac5c992, 0x1cd36870757f680, 0x11823fee895e9} #endif #endif }, { @@ -2592,27 +2592,27 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}, {{ #if 0 #elif RADIX == 16 -{0x1aab, 0xe01, 0x1bf3, 0x122d, 0xd71, 0x34e, 0x153b, 0x1444, 0x1d19, 0x1165, 0x1496, 0x568, 0x12d4, 0x105c, 0x1129, 0x2c7, 0x1706, 0x359, 0x1a4f, 0x114, 0x758, 0x1780, 0x1617, 0x1485, 0x1147, 0xa4f, 0x1f77, 0xf13, 0x1547, 0x103c, 0x352, 0x125d, 0xb1e, 0x1526, 0x1708, 0xfb5, 0x17bf, 0x1d55, 0x6bc} +{0x1156, 0x273, 0x1153, 0x89b, 0xc67, 0x9dc, 0x14b5, 0x1d27, 0x1c5e, 0x18e6, 0x1dfa, 0x1beb, 0x12e7, 0xe02, 0x1614, 0x12b0, 0x1646, 0x1bdb, 0x1e1f, 0x1eb6, 0x361, 0x1fb, 0x2ee, 0xee2, 0x178c, 0xedd, 0x1ba6, 0xf1c, 0x1e7f, 0x1dac, 0x137d, 0x18db, 0x8e8, 0xa0, 0x1faf, 0x5cb, 0x1078, 0x1562, 0x36e} #elif RADIX == 32 -{0x1d55ffc5, 0x1bbf3700, 0x139ae322, 0x2254ec6, 0x1165e8cd, 0x10ad1496, 0x14c1725a, 0x106163c4, 0x149e359b, 0x1d60229, 0x5b0bde0, 0x9f147a4, 0x1c4feeea, 0x81e551d, 0x1d25d1a9, 0x22a4cb1, 0x1dfbed6e, 0x72d} +{0x18ab4116, 0x17153139, 0x1718ce89, 0x93d2d53, 0x18e6e2f7, 0x1f7d7dfa, 0xa380a5c, 0x4695858, 0x1c3fbdbb, 0x18d87d6d, 0x217707e, 0x1bb78c77, 0x1c7374ce, 0x1ed679fd, 0x118db9be, 0xbc1408e, 0x3c172ff, 0x214} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe322ddf9b807557f, 0x97a33444a9d8d39a, 0x4c1725a8568a4b45, 0x4d278d66e0c2c789, 0xf485b0bde00eb011, 0x2a8ef13fbba93e28, 0x549963d25d1a940f, 0x197556f7efb5b84} +{0xce89b8a989ce2ad0, 0x9b8bdd27a5aa7718, 0xa380a5cfbebefd63, 0x6f0fef6ec8d2b0b0, 0x8ee217707ec6c3eb, 0x3cfef1cdd33b76f1, 0x82811d18db9bef6b, 0x7558a0f05cbfd7} #else -{0x45bbf3700eaaff, 0x13444a9d8d39ae3, 0xd42b4525a2cbd1, 0x1b830b1e25305c9, 0x1d60229a4f1ac, 0x93e28f485b0bde, 0xa079547789fddd, 0x1152658f49746a, 0x17eaadefdf6b7} +{0x1137153139c55a0, 0x1dd27a5aa7718ce, 0xe7df5f7eb1cdc5, 0x1b234ac2c28e029, 0x1d8d87d6de1fded, 0x1b76f18ee217707, 0x17b59e7f78e6e99, 0x15e0a0474636e6f, 0xeab141e0b97f} #endif #endif , #if 0 #elif RADIX == 16 -{0x204, 0x9f6, 0x1dba, 0x110e, 0x6ea, 0x112a, 0xa11, 0xd06, 0x15aa, 0x1f0b, 0xeec, 0xef1, 0x1edc, 0x1604, 0x65b, 0x129, 0x39d, 0x8f8, 0x5d5, 0x672, 0x150a, 0x233, 0xc20, 0x12ba, 0x1855, 0x15a6, 0xd50, 0x1c71, 0x15b7, 0xf04, 0x579, 0x16d2, 0xbac, 0x4c9, 0xaf5, 0x514, 0xf27, 0xef, 0x36a} +{0xb32, 0x149, 0x1615, 0x77e, 0xf55, 0x189, 0xe2a, 0x13bc, 0xf83, 0x124d, 0xcaa, 0x22, 0xcea, 0x8f9, 0xc5e, 0x8bc, 0x4ff, 0x14da, 0x394, 0x4a2, 0x1767, 0x1d20, 0x1531, 0x1dff, 0x929, 0x15cf, 0x1f69, 0x1630, 0x669, 0x11ec, 0x162c, 0xcf3, 0xde5, 0x185f, 0x1da0, 0x1db9, 0x1d93, 0xb9b, 0x38f} #elif RADIX == 32 -{0x10240be, 0x1ddba4fb, 0xa8dd510, 0x8328462, 0x1f0bad53, 0x11de2eec, 0xdd813db, 0x19d09499, 0xbaa8f81, 0x1d428ce4, 0x1a61008c, 0x14d85595, 0x11c5aa15, 0x178256df, 0x196d22bc, 0x1d4992ba, 0x19394515, 0x27b} +{0x15994382, 0x1d6150a4, 0x25eaa77, 0x1de38a83, 0x124d7c1c, 0x8044caa, 0xf23e59d, 0xff45e31, 0x7294da2, 0x5d9c944, 0x1fa98f48, 0x19e929ef, 0x18c3ed35, 0x8f619a6, 0xacf3b16, 0x830bede, 0xc9f6e7b, 0x1df} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd510eedd27d84090, 0x2eb54d06508c4a8d, 0xdd813db8ef17767c, 0x22eaa3e073a12932, 0xb2ba61008cea1467, 0x2b6fc716a8569b0a, 0x93257596d22bcbc1, 0x503bde4e51457a} +{0xaa77eb0a85256650, 0x35f073bc7150625e, 0xf23e59d402265549, 0x21ca53689fe8bc62, 0x3dffa98f482ece4a, 0xcd3630fb4d73d25, 0x617dbcacf3b1647b, 0x17ae6fb27db9ed0} #else -{0x21ddba4fb08120, 0x14d06508c4a8dd5, 0xdc778bbb3e175a, 0x1ce84a4cb7604f, 0x19d428ce45d547c, 0x169b0ab2ba61008, 0x5e095b7e38b542, 0x1ea4c95d65b48af, 0xa077bc9ca28a} +{0xefd6150a4acca1, 0x73bc7150625eaa, 0xea01132aa49af8, 0x27fa2f18bc8f96, 0x105d9c944394a6d, 0x173d253dffa98f4, 0x123d8669b187da6, 0x14185f6f2b3cec5, 0x145cdf64fb73d} #endif #endif }, { @@ -2641,223 +2641,223 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2ce5,0xf14f,0xd32e,0x18c2,0x5217,0x2833,0xa7fc,0x4090,0xd9d5,0x4d89,0xe770,0x3801,0xbca0,0x6ac9,0x5432,0x2ee5,0x4356,0x827a,0xdf1a,0x8911,0x4102,0xf05a,0x3e5,0x18b9,0xf66f,0x77ad,0xd99b,0xea2f,0xa030,0x36bb,0xe071,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1dad,0x7f9a,0xf7d5,0xe103,0xbcd7,0xd758,0xdffc,0x8775,0x424c,0xf512,0x8d24,0x9441,0xa2ff,0x1a96,0xfec2,0xdbf,0x1653,0x6a57,0x1c7f,0x2253,0x3ed1,0xfe65,0xc239,0x9d9d,0x3f9d,0xe53,0xa7cd,0x2102,0xbf75,0x72de,0xfa6c,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf14f2ce5,0x18c2d32e,0x28335217,0x4090a7fc,0x4d89d9d5,0x3801e770,0x6ac9bca0,0x2ee55432,0x827a4356,0x8911df1a,0xf05a4102,0x18b903e5,0x77adf66f,0xea2fd99b,0x36bba030,0xde071}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f9a1dad,0xe103f7d5,0xd758bcd7,0x8775dffc,0xf512424c,0x94418d24,0x1a96a2ff,0xdbffec2,0x6a571653,0x22531c7f,0xfe653ed1,0x9d9dc239,0xe533f9d,0x2102a7cd,0x72debf75,0x2fa6c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x18c2d32ef14f2ce5,0x4090a7fc28335217,0x3801e7704d89d9d5,0x2ee554326ac9bca0,0x8911df1a827a4356,0x18b903e5f05a4102,0xea2fd99b77adf66f,0xde07136bba030}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe103f7d57f9a1dad,0x8775dffcd758bcd7,0x94418d24f512424c,0xdbffec21a96a2ff,0x22531c7f6a571653,0x9d9dc239fe653ed1,0x2102a7cd0e533f9d,0x2fa6c72debf75}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x752,0x8c67,0x260,0x14f,0x138a,0x865,0x5e4d,0x4db9,0xca26,0x7a2c,0x4a7e,0x9422,0xb2f6,0xbdc8,0x841d,0x7e33,0x5677,0x38a6,0x9633,0x15fc,0xacce,0xbe04,0xdcef,0xb16c,0xd57b,0x7a5e,0x8395,0x1f8f,0xe4b9,0xe383,0x4be3,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3592,0xb6e5,0xb083,0xa93b,0xa140,0xb740,0x5865,0x1057,0xc57f,0xa78d,0x71f,0xf817,0x308d,0x708d,0xddf2,0x475,0x8850,0x2ada,0x50ee,0xe4bf,0x7038,0x5945,0xd2e9,0xe429,0x1765,0x5e1e,0x183a,0x249c,0x6da1,0x3147,0xcd38,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c670752,0x14f0260,0x865138a,0x4db95e4d,0x7a2cca26,0x94224a7e,0xbdc8b2f6,0x7e33841d,0x38a65677,0x15fc9633,0xbe04acce,0xb16cdcef,0x7a5ed57b,0x1f8f8395,0xe383e4b9,0x64be3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb6e53592,0xa93bb083,0xb740a140,0x10575865,0xa78dc57f,0xf817071f,0x708d308d,0x475ddf2,0x2ada8850,0xe4bf50ee,0x59457038,0xe429d2e9,0x5e1e1765,0x249c183a,0x31476da1,0xdcd38}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14f02608c670752,0x4db95e4d0865138a,0x94224a7e7a2cca26,0x7e33841dbdc8b2f6,0x15fc963338a65677,0xb16cdcefbe04acce,0x1f8f83957a5ed57b,0x64be3e383e4b9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa93bb083b6e53592,0x10575865b740a140,0xf817071fa78dc57f,0x475ddf2708d308d,0xe4bf50ee2ada8850,0xe429d2e959457038,0x249c183a5e1e1765,0xdcd3831476da1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9b63,0x40f2,0x5177,0x535f,0x5098,0x2f26,0x18a2,0x5e1c,0x5d70,0x33e9,0x1db9,0x2397,0xaccc,0xb98b,0xcae6,0x87bf,0xd43f,0xb329,0xf2d0,0x2f67,0x779c,0xd2ab,0x4369,0xc67d,0xd065,0x9550,0xf06b,0x1a2b,0xc6e,0x9b2b,0xbb64,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3b,0x696c,0x9cf7,0x924e,0x29b1,0xfa82,0x12f3,0xcd4b,0xf4c2,0xfdd9,0x406d,0x434e,0x4ab0,0xab5f,0xda69,0x708e,0xf25b,0x642d,0x6d6d,0xd93f,0x618f,0xe4dc,0x4514,0x2c42,0x38da,0x4841,0x27cd,0x2c70,0xde,0x9b00,0x5a54,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x40f29b63,0x535f5177,0x2f265098,0x5e1c18a2,0x33e95d70,0x23971db9,0xb98baccc,0x87bfcae6,0xb329d43f,0x2f67f2d0,0xd2ab779c,0xc67d4369,0x9550d065,0x1a2bf06b,0x9b2b0c6e,0x3bb64}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x696c003b,0x924e9cf7,0xfa8229b1,0xcd4b12f3,0xfdd9f4c2,0x434e406d,0xab5f4ab0,0x708eda69,0x642df25b,0xd93f6d6d,0xe4dc618f,0x2c424514,0x484138da,0x2c7027cd,0x9b0000de,0xd5a54}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x535f517740f29b63,0x5e1c18a22f265098,0x23971db933e95d70,0x87bfcae6b98baccc,0x2f67f2d0b329d43f,0xc67d4369d2ab779c,0x1a2bf06b9550d065,0x3bb649b2b0c6e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x924e9cf7696c003b,0xcd4b12f3fa8229b1,0x434e406dfdd9f4c2,0x708eda69ab5f4ab0,0xd93f6d6d642df25b,0x2c424514e4dc618f,0x2c7027cd484138da,0xd5a549b0000de}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xd31b,0xeb0,0x2cd1,0xe73d,0xade8,0xd7cc,0x5803,0xbf6f,0x262a,0xb276,0x188f,0xc7fe,0x435f,0x9536,0xabcd,0xd11a,0xbca9,0x7d85,0x20e5,0x76ee,0xbefd,0xfa5,0xfc1a,0xe746,0x990,0x8852,0x2664,0x15d0,0x5fcf,0xc944,0x1f8e,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe253,0x8065,0x82a,0x1efc,0x4328,0x28a7,0x2003,0x788a,0xbdb3,0xaed,0x72db,0x6bbe,0x5d00,0xe569,0x13d,0xf240,0xe9ac,0x95a8,0xe380,0xddac,0xc12e,0x19a,0x3dc6,0x6262,0xc062,0xf1ac,0x5832,0xdefd,0x408a,0x8d21,0x593,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xeb0d31b,0xe73d2cd1,0xd7ccade8,0xbf6f5803,0xb276262a,0xc7fe188f,0x9536435f,0xd11aabcd,0x7d85bca9,0x76ee20e5,0xfa5befd,0xe746fc1a,0x88520990,0x15d02664,0xc9445fcf,0x21f8e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8065e253,0x1efc082a,0x28a74328,0x788a2003,0xaedbdb3,0x6bbe72db,0xe5695d00,0xf240013d,0x95a8e9ac,0xddace380,0x19ac12e,0x62623dc6,0xf1acc062,0xdefd5832,0x8d21408a,0xd0593}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe73d2cd10eb0d31b,0xbf6f5803d7ccade8,0xc7fe188fb276262a,0xd11aabcd9536435f,0x76ee20e57d85bca9,0xe746fc1a0fa5befd,0x15d0266488520990,0x21f8ec9445fcf}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1efc082a8065e253,0x788a200328a74328,0x6bbe72db0aedbdb3,0xf240013de5695d00,0xddace38095a8e9ac,0x62623dc6019ac12e,0xdefd5832f1acc062,0xd05938d21408a}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x4222,0xe40c,0x843f,0x3518,0x72d1,0xa757,0xb4e5,0x4347,0x3326,0xc267,0x30d,0xb77e,0x9907,0xcb8c,0xd175,0x8cf2,0x5440,0xb876,0x2316,0xa715,0xf0ab,0x9e96,0xa72f,0xcd7f,0x1e06,0xa42f,0x985f,0xdc2d,0xd9ee,0xe71e,0x2ae0,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe40c4222,0x3518843f,0xa75772d1,0x4347b4e5,0xc2673326,0xb77e030d,0xcb8c9907,0x8cf2d175,0xb8765440,0xa7152316,0x9e96f0ab,0xcd7fa72f,0xa42f1e06,0xdc2d985f,0xe71ed9ee,0x82ae0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3518843fe40c4222,0x4347b4e5a75772d1,0xb77e030dc2673326,0x8cf2d175cb8c9907,0xa7152316b8765440,0xcd7fa72f9e96f0ab,0xdc2d985fa42f1e06,0x82ae0e71ed9ee}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x11ac,0x1c90,0x6c62,0x15fd,0x1924,0x5851,0x60c6,0x744c,0x80fd,0xa6b,0x5654,0x51a1,0x6589,0x803f,0xf265,0x4132,0x96d2,0x7497,0xcf0b,0x65,0x2e51,0x2bc,0x4203,0x3aad,0x1f2,0x5b40,0xcc1a,0x67e4,0xdfd3,0xba17,0x7a8c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1c9011ac,0x15fd6c62,0x58511924,0x744c60c6,0xa6b80fd,0x51a15654,0x803f6589,0x4132f265,0x749796d2,0x65cf0b,0x2bc2e51,0x3aad4203,0x5b4001f2,0x67e4cc1a,0xba17dfd3,0x37a8c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x15fd6c621c9011ac,0x744c60c658511924,0x51a156540a6b80fd,0x4132f265803f6589,0x65cf0b749796d2,0x3aad420302bc2e51,0x67e4cc1a5b4001f2,0x37a8cba17dfd3}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x99f9,0x50f4,0xd750,0xb0a2,0xfdaa,0x6986,0x6b4b,0x34be,0x7bd5,0x3974,0xe05,0x8c18,0x6bb8,0xbb5a,0xcc33,0x63b5,0x943b,0xec49,0xb4ef,0xbdc4,0x5a2a,0x2fc8,0x85ad,0x1291,0xa29f,0x9618,0x721b,0x93f6,0xb40f,0x2e85,0xdfbb,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x46df,0x993e,0x6cc0,0xa409,0xa063,0x3e90,0x1bb7,0x1ed7,0xe56b,0xada4,0xdce,0xb050,0xc6aa,0x2b91,0x61ef,0x10ec,0x6ecc,0x3168,0x72f7,0xe69d,0xf599,0x59ed,0x7fe9,0x1cf6,0xab4b,0x9fb7,0x21f0,0xa281,0xc4d8,0xabda,0xbaac,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x50f499f9,0xb0a2d750,0x6986fdaa,0x34be6b4b,0x39747bd5,0x8c180e05,0xbb5a6bb8,0x63b5cc33,0xec49943b,0xbdc4b4ef,0x2fc85a2a,0x129185ad,0x9618a29f,0x93f6721b,0x2e85b40f,0xbdfbb}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x993e46df,0xa4096cc0,0x3e90a063,0x1ed71bb7,0xada4e56b,0xb0500dce,0x2b91c6aa,0x10ec61ef,0x31686ecc,0xe69d72f7,0x59edf599,0x1cf67fe9,0x9fb7ab4b,0xa28121f0,0xabdac4d8,0x5baac}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb0a2d75050f499f9,0x34be6b4b6986fdaa,0x8c180e0539747bd5,0x63b5cc33bb5a6bb8,0xbdc4b4efec49943b,0x129185ad2fc85a2a,0x93f6721b9618a29f,0xbdfbb2e85b40f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa4096cc0993e46df,0x1ed71bb73e90a063,0xb0500dceada4e56b,0x10ec61ef2b91c6aa,0xe69d72f731686ecc,0x1cf67fe959edf599,0xa28121f09fb7ab4b,0x5baacabdac4d8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf4c0,0x4ff5,0x2aee,0x3e90,0x49,0xb2af,0xf257,0x111c,0xead0,0xc1d5,0xc7d9,0x8a7c,0x9579,0xf62,0xe1f6,0xb43c,0x8f3f,0x14ca,0x1b7b,0xc209,0xac8,0xf5cd,0xdfc0,0x5d39,0x9d8d,0x9c9a,0x2e6e,0xba54,0x79d5,0x4f02,0x1cfc,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x69f0,0x1c1,0x40a6,0x59c0,0xdf7c,0x7343,0xeb2e,0xf036,0x6d07,0xc3ee,0x6377,0xe0f0,0x9377,0xbcdf,0xa53c,0x8e52,0x233,0x3530,0x72e2,0xa026,0x3748,0x9995,0xad2,0xc440,0x86a,0x4191,0x1081,0x4662,0x2148,0xf8cd,0x1a9a,0x9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4ff5f4c0,0x3e902aee,0xb2af0049,0x111cf257,0xc1d5ead0,0x8a7cc7d9,0xf629579,0xb43ce1f6,0x14ca8f3f,0xc2091b7b,0xf5cd0ac8,0x5d39dfc0,0x9c9a9d8d,0xba542e6e,0x4f0279d5,0x21cfc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1c169f0,0x59c040a6,0x7343df7c,0xf036eb2e,0xc3ee6d07,0xe0f06377,0xbcdf9377,0x8e52a53c,0x35300233,0xa02672e2,0x99953748,0xc4400ad2,0x4191086a,0x46621081,0xf8cd2148,0x91a9a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e902aee4ff5f4c0,0x111cf257b2af0049,0x8a7cc7d9c1d5ead0,0xb43ce1f60f629579,0xc2091b7b14ca8f3f,0x5d39dfc0f5cd0ac8,0xba542e6e9c9a9d8d,0x21cfc4f0279d5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x59c040a601c169f0,0xf036eb2e7343df7c,0xe0f06377c3ee6d07,0x8e52a53cbcdf9377,0xa02672e235300233,0xc4400ad299953748,0x466210814191086a,0x91a9af8cd2148}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1eb,0x1730,0x3343,0xcef3,0x2add,0x7615,0x353e,0xd52b,0x9951,0xc1,0x2292,0x69d0,0x4a9f,0xc1bd,0xfec7,0xd332,0x72b7,0x67f8,0xaa27,0x61a4,0x33dd,0x8ec0,0xfe1d,0x9a69,0x38ac,0x60f,0x209b,0xbb33,0x55b1,0x13f5,0x5c80,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x977d,0x5a09,0xd718,0x1ac3,0x2d52,0xdf82,0x9571,0x2023,0xdc9b,0xe759,0x26c0,0x2a59,0x5273,0x9024,0x6bf1,0x9b45,0xe3ef,0xfd9c,0x6189,0x621f,0xb7e6,0x6a8c,0xa219,0xf7d1,0xd502,0xb115,0xcb9e,0x7bf7,0x773,0x7222,0x7aa1,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x173001eb,0xcef33343,0x76152add,0xd52b353e,0xc19951,0x69d02292,0xc1bd4a9f,0xd332fec7,0x67f872b7,0x61a4aa27,0x8ec033dd,0x9a69fe1d,0x60f38ac,0xbb33209b,0x13f555b1,0xc5c80}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a09977d,0x1ac3d718,0xdf822d52,0x20239571,0xe759dc9b,0x2a5926c0,0x90245273,0x9b456bf1,0xfd9ce3ef,0x621f6189,0x6a8cb7e6,0xf7d1a219,0xb115d502,0x7bf7cb9e,0x72220773,0x47aa1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcef33343173001eb,0xd52b353e76152add,0x69d0229200c19951,0xd332fec7c1bd4a9f,0x61a4aa2767f872b7,0x9a69fe1d8ec033dd,0xbb33209b060f38ac,0xc5c8013f555b1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1ac3d7185a09977d,0x20239571df822d52,0x2a5926c0e759dc9b,0x9b456bf190245273,0x621f6189fd9ce3ef,0xf7d1a2196a8cb7e6,0x7bf7cb9eb115d502,0x47aa172220773}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x6607,0xaf0b,0x28af,0x4f5d,0x255,0x9679,0x94b4,0xcb41,0x842a,0xc68b,0xf1fa,0x73e7,0x9447,0x44a5,0x33cc,0x9c4a,0x6bc4,0x13b6,0x4b10,0x423b,0xa5d5,0xd037,0x7a52,0xed6e,0x5d60,0x69e7,0x8de4,0x6c09,0x4bf0,0xd17a,0x2044,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xb921,0x66c1,0x933f,0x5bf6,0x5f9c,0xc16f,0xe448,0xe128,0x1a94,0x525b,0xf231,0x4faf,0x3955,0xd46e,0x9e10,0xef13,0x9133,0xce97,0x8d08,0x1962,0xa66,0xa612,0x8016,0xe309,0x54b4,0x6048,0xde0f,0x5d7e,0x3b27,0x5425,0x4553,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xaf0b6607,0x4f5d28af,0x96790255,0xcb4194b4,0xc68b842a,0x73e7f1fa,0x44a59447,0x9c4a33cc,0x13b66bc4,0x423b4b10,0xd037a5d5,0xed6e7a52,0x69e75d60,0x6c098de4,0xd17a4bf0,0x42044}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x66c1b921,0x5bf6933f,0xc16f5f9c,0xe128e448,0x525b1a94,0x4faff231,0xd46e3955,0xef139e10,0xce979133,0x19628d08,0xa6120a66,0xe3098016,0x604854b4,0x5d7ede0f,0x54253b27,0xa4553}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4f5d28afaf0b6607,0xcb4194b496790255,0x73e7f1fac68b842a,0x9c4a33cc44a59447,0x423b4b1013b66bc4,0xed6e7a52d037a5d5,0x6c098de469e75d60,0x42044d17a4bf0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5bf6933f66c1b921,0xe128e448c16f5f9c,0x4faff231525b1a94,0xef139e10d46e3955,0x19628d08ce979133,0xe3098016a6120a66,0x5d7ede0f604854b4,0xa455354253b27}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2ce5,0xf14f,0xd32e,0x18c2,0x5217,0x2833,0xa7fc,0x4090,0xd9d5,0x4d89,0xe770,0x3801,0xbca0,0x6ac9,0x5432,0x2ee5,0x4356,0x827a,0xdf1a,0x8911,0x4102,0xf05a,0x3e5,0x18b9,0xf66f,0x77ad,0xd99b,0xea2f,0xa030,0x36bb,0xe071,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1dad,0x7f9a,0xf7d5,0xe103,0xbcd7,0xd758,0xdffc,0x8775,0x424c,0xf512,0x8d24,0x9441,0xa2ff,0x1a96,0xfec2,0xdbf,0x1653,0x6a57,0x1c7f,0x2253,0x3ed1,0xfe65,0xc239,0x9d9d,0x3f9d,0xe53,0xa7cd,0x2102,0xbf75,0x72de,0xfa6c,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf14f2ce5,0x18c2d32e,0x28335217,0x4090a7fc,0x4d89d9d5,0x3801e770,0x6ac9bca0,0x2ee55432,0x827a4356,0x8911df1a,0xf05a4102,0x18b903e5,0x77adf66f,0xea2fd99b,0x36bba030,0xde071}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7f9a1dad,0xe103f7d5,0xd758bcd7,0x8775dffc,0xf512424c,0x94418d24,0x1a96a2ff,0xdbffec2,0x6a571653,0x22531c7f,0xfe653ed1,0x9d9dc239,0xe533f9d,0x2102a7cd,0x72debf75,0x2fa6c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x18c2d32ef14f2ce5,0x4090a7fc28335217,0x3801e7704d89d9d5,0x2ee554326ac9bca0,0x8911df1a827a4356,0x18b903e5f05a4102,0xea2fd99b77adf66f,0xde07136bba030}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe103f7d57f9a1dad,0x8775dffcd758bcd7,0x94418d24f512424c,0xdbffec21a96a2ff,0x22531c7f6a571653,0x9d9dc239fe653ed1,0x2102a7cd0e533f9d,0x2fa6c72debf75}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x752,0x8c67,0x260,0x14f,0x138a,0x865,0x5e4d,0x4db9,0xca26,0x7a2c,0x4a7e,0x9422,0xb2f6,0xbdc8,0x841d,0x7e33,0x5677,0x38a6,0x9633,0x15fc,0xacce,0xbe04,0xdcef,0xb16c,0xd57b,0x7a5e,0x8395,0x1f8f,0xe4b9,0xe383,0x4be3,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3592,0xb6e5,0xb083,0xa93b,0xa140,0xb740,0x5865,0x1057,0xc57f,0xa78d,0x71f,0xf817,0x308d,0x708d,0xddf2,0x475,0x8850,0x2ada,0x50ee,0xe4bf,0x7038,0x5945,0xd2e9,0xe429,0x1765,0x5e1e,0x183a,0x249c,0x6da1,0x3147,0xcd38,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c670752,0x14f0260,0x865138a,0x4db95e4d,0x7a2cca26,0x94224a7e,0xbdc8b2f6,0x7e33841d,0x38a65677,0x15fc9633,0xbe04acce,0xb16cdcef,0x7a5ed57b,0x1f8f8395,0xe383e4b9,0x64be3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb6e53592,0xa93bb083,0xb740a140,0x10575865,0xa78dc57f,0xf817071f,0x708d308d,0x475ddf2,0x2ada8850,0xe4bf50ee,0x59457038,0xe429d2e9,0x5e1e1765,0x249c183a,0x31476da1,0xdcd38}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x14f02608c670752,0x4db95e4d0865138a,0x94224a7e7a2cca26,0x7e33841dbdc8b2f6,0x15fc963338a65677,0xb16cdcefbe04acce,0x1f8f83957a5ed57b,0x64be3e383e4b9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa93bb083b6e53592,0x10575865b740a140,0xf817071fa78dc57f,0x475ddf2708d308d,0xe4bf50ee2ada8850,0xe429d2e959457038,0x249c183a5e1e1765,0xdcd3831476da1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9b63,0x40f2,0x5177,0x535f,0x5098,0x2f26,0x18a2,0x5e1c,0x5d70,0x33e9,0x1db9,0x2397,0xaccc,0xb98b,0xcae6,0x87bf,0xd43f,0xb329,0xf2d0,0x2f67,0x779c,0xd2ab,0x4369,0xc67d,0xd065,0x9550,0xf06b,0x1a2b,0xc6e,0x9b2b,0xbb64,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3b,0x696c,0x9cf7,0x924e,0x29b1,0xfa82,0x12f3,0xcd4b,0xf4c2,0xfdd9,0x406d,0x434e,0x4ab0,0xab5f,0xda69,0x708e,0xf25b,0x642d,0x6d6d,0xd93f,0x618f,0xe4dc,0x4514,0x2c42,0x38da,0x4841,0x27cd,0x2c70,0xde,0x9b00,0x5a54,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x40f29b63,0x535f5177,0x2f265098,0x5e1c18a2,0x33e95d70,0x23971db9,0xb98baccc,0x87bfcae6,0xb329d43f,0x2f67f2d0,0xd2ab779c,0xc67d4369,0x9550d065,0x1a2bf06b,0x9b2b0c6e,0x3bb64}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x696c003b,0x924e9cf7,0xfa8229b1,0xcd4b12f3,0xfdd9f4c2,0x434e406d,0xab5f4ab0,0x708eda69,0x642df25b,0xd93f6d6d,0xe4dc618f,0x2c424514,0x484138da,0x2c7027cd,0x9b0000de,0xd5a54}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x535f517740f29b63,0x5e1c18a22f265098,0x23971db933e95d70,0x87bfcae6b98baccc,0x2f67f2d0b329d43f,0xc67d4369d2ab779c,0x1a2bf06b9550d065,0x3bb649b2b0c6e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x924e9cf7696c003b,0xcd4b12f3fa8229b1,0x434e406dfdd9f4c2,0x708eda69ab5f4ab0,0xd93f6d6d642df25b,0x2c424514e4dc618f,0x2c7027cd484138da,0xd5a549b0000de}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xd31b,0xeb0,0x2cd1,0xe73d,0xade8,0xd7cc,0x5803,0xbf6f,0x262a,0xb276,0x188f,0xc7fe,0x435f,0x9536,0xabcd,0xd11a,0xbca9,0x7d85,0x20e5,0x76ee,0xbefd,0xfa5,0xfc1a,0xe746,0x990,0x8852,0x2664,0x15d0,0x5fcf,0xc944,0x1f8e,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xe253,0x8065,0x82a,0x1efc,0x4328,0x28a7,0x2003,0x788a,0xbdb3,0xaed,0x72db,0x6bbe,0x5d00,0xe569,0x13d,0xf240,0xe9ac,0x95a8,0xe380,0xddac,0xc12e,0x19a,0x3dc6,0x6262,0xc062,0xf1ac,0x5832,0xdefd,0x408a,0x8d21,0x593,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xeb0d31b,0xe73d2cd1,0xd7ccade8,0xbf6f5803,0xb276262a,0xc7fe188f,0x9536435f,0xd11aabcd,0x7d85bca9,0x76ee20e5,0xfa5befd,0xe746fc1a,0x88520990,0x15d02664,0xc9445fcf,0x21f8e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8065e253,0x1efc082a,0x28a74328,0x788a2003,0xaedbdb3,0x6bbe72db,0xe5695d00,0xf240013d,0x95a8e9ac,0xddace380,0x19ac12e,0x62623dc6,0xf1acc062,0xdefd5832,0x8d21408a,0xd0593}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe73d2cd10eb0d31b,0xbf6f5803d7ccade8,0xc7fe188fb276262a,0xd11aabcd9536435f,0x76ee20e57d85bca9,0xe746fc1a0fa5befd,0x15d0266488520990,0x21f8ec9445fcf}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1efc082a8065e253,0x788a200328a74328,0x6bbe72db0aedbdb3,0xf240013de5695d00,0xddace38095a8e9ac,0x62623dc6019ac12e,0xdefd5832f1acc062,0xd05938d21408a}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2111,0xf206,0x421f,0x9a8c,0xb968,0xd3ab,0xda72,0x21a3,0x9993,0xe133,0x186,0xdbbf,0x4c83,0xe5c6,0x68ba,0x4679,0x2a20,0x5c3b,0x918b,0xd38a,0x7855,0xcf4b,0xd397,0x66bf,0x8f03,0xd217,0xcc2f,0x6e16,0x6cf7,0x738f,0x1570,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf2062111,0x9a8c421f,0xd3abb968,0x21a3da72,0xe1339993,0xdbbf0186,0xe5c64c83,0x467968ba,0x5c3b2a20,0xd38a918b,0xcf4b7855,0x66bfd397,0xd2178f03,0x6e16cc2f,0x738f6cf7,0x41570}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9a8c421ff2062111,0x21a3da72d3abb968,0xdbbf0186e1339993,0x467968bae5c64c83,0xd38a918b5c3b2a20,0x66bfd397cf4b7855,0x6e16cc2fd2178f03,0x41570738f6cf7}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x8d6,0xe48,0xb631,0xafe,0x8c92,0x2c28,0x3063,0xba26,0xc07e,0x535,0xab2a,0xa8d0,0xb2c4,0xc01f,0x7932,0x2099,0xcb69,0xba4b,0xe785,0x8032,0x1728,0x815e,0xa101,0x1d56,0xf9,0x2da0,0x660d,0xb3f2,0xefe9,0x5d0b,0xbd46,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe4808d6,0xafeb631,0x2c288c92,0xba263063,0x535c07e,0xa8d0ab2a,0xc01fb2c4,0x20997932,0xba4bcb69,0x8032e785,0x815e1728,0x1d56a101,0x2da000f9,0xb3f2660d,0x5d0befe9,0x1bd46}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xafeb6310e4808d6,0xba2630632c288c92,0xa8d0ab2a0535c07e,0x20997932c01fb2c4,0x8032e785ba4bcb69,0x1d56a101815e1728,0xb3f2660d2da000f9,0x1bd465d0befe9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x129d,0xdd4c,0xe2b2,0xca3b,0x6c0b,0x9c8b,0x68f9,0x412,0x51a8,0x7583,0xae25,0xb80d,0x35d5,0x387b,0x4ba1,0x66e1,0x754,0xf6b6,0x3d8c,0x650,0xa955,0x214f,0xc05f,0x16d2,0x9ce4,0x246f,0x123e,0x3ed3,0xa07f,0x2e24,0x8964,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xba36,0x53ab,0xedb2,0xfd40,0x24a1,0x7164,0x85ae,0xbaf5,0xb1e8,0xb6c1,0x8781,0xa06e,0xc4f,0xd656,0x7782,0xe745,0x8f5f,0x856,0x8eda,0xa10e,0x34e1,0x75e,0xa553,0xcbf2,0x6965,0xb963,0xb52c,0xd1ee,0xa39a,0x658c,0xb61c,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdd4c129d,0xca3be2b2,0x9c8b6c0b,0x41268f9,0x758351a8,0xb80dae25,0x387b35d5,0x66e14ba1,0xf6b60754,0x6503d8c,0x214fa955,0x16d2c05f,0x246f9ce4,0x3ed3123e,0x2e24a07f,0x58964}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x53abba36,0xfd40edb2,0x716424a1,0xbaf585ae,0xb6c1b1e8,0xa06e8781,0xd6560c4f,0xe7457782,0x8568f5f,0xa10e8eda,0x75e34e1,0xcbf2a553,0xb9636965,0xd1eeb52c,0x658ca39a,0xfb61c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xca3be2b2dd4c129d,0x41268f99c8b6c0b,0xb80dae25758351a8,0x66e14ba1387b35d5,0x6503d8cf6b60754,0x16d2c05f214fa955,0x3ed3123e246f9ce4,0x589642e24a07f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xfd40edb253abba36,0xbaf585ae716424a1,0xa06e8781b6c1b1e8,0xe7457782d6560c4f,0xa10e8eda08568f5f,0xcbf2a553075e34e1,0xd1eeb52cb9636965,0xfb61c658ca39a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e3f,0xbc60,0xa44c,0x253c,0xa75e,0xa9f9,0x326f,0x9f9f,0x14aa,0xa47f,0x3889,0x5ee3,0x87d,0x933f,0x6cba,0x6222,0xcd43,0xa8c9,0xa815,0x992a,0x643a,0xc1d3,0x4cff,0xf675,0xf30b,0x7e2a,0x5248,0xb9e4,0xa454,0x2c53,0x525b,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x3007,0xc6b6,0xc140,0x7909,0x6fb4,0x64d4,0x16ae,0xa3b7,0xa379,0xfe0f,0x7bc0,0xa6fa,0x978b,0x3c75,0x95db,0x19e0,0x7f23,0xb291,0x31a3,0xd4df,0x1d4c,0xb5b7,0x90e2,0xb611,0xd86e,0x4552,0x1240,0x836a,0xc94f,0xd456,0x3d8b,0xf}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbc609e3f,0x253ca44c,0xa9f9a75e,0x9f9f326f,0xa47f14aa,0x5ee33889,0x933f087d,0x62226cba,0xa8c9cd43,0x992aa815,0xc1d3643a,0xf6754cff,0x7e2af30b,0xb9e45248,0x2c53a454,0x3525b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc6b63007,0x7909c140,0x64d46fb4,0xa3b716ae,0xfe0fa379,0xa6fa7bc0,0x3c75978b,0x19e095db,0xb2917f23,0xd4df31a3,0xb5b71d4c,0xb61190e2,0x4552d86e,0x836a1240,0xd456c94f,0xf3d8b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x253ca44cbc609e3f,0x9f9f326fa9f9a75e,0x5ee33889a47f14aa,0x62226cba933f087d,0x992aa815a8c9cd43,0xf6754cffc1d3643a,0xb9e452487e2af30b,0x3525b2c53a454}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7909c140c6b63007,0xa3b716ae64d46fb4,0xa6fa7bc0fe0fa379,0x19e095db3c75978b,0xd4df31a3b2917f23,0xb61190e2b5b71d4c,0x836a12404552d86e,0xf3d8bd456c94f}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x584d,0xa517,0xb681,0x45de,0xc2ea,0x7c58,0x123,0xe0fd,0xfd80,0x6c5b,0xf669,0xddc5,0xb21a,0xcaa9,0xc7a0,0x37ec,0xf8c6,0x12e7,0xe984,0xe812,0xef9f,0x128a,0x9fca,0x41f5,0x118f,0x5c32,0xf1cf,0x78c5,0x9424,0x2ae3,0x60d2,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1968,0x28ea,0x38ef,0x9c5d,0x974c,0x8a5,0xdf49,0xa49b,0x27ca,0x724c,0x963e,0x8465,0xc467,0x7fcf,0xf96e,0x72fa,0x3881,0x6839,0x4c67,0xb7a8,0xb9d2,0x5e2e,0xf9da,0x1c53,0x36af,0x307b,0xb14,0x9619,0xa56f,0x2286,0xdebd,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa517584d,0x45deb681,0x7c58c2ea,0xe0fd0123,0x6c5bfd80,0xddc5f669,0xcaa9b21a,0x37ecc7a0,0x12e7f8c6,0xe812e984,0x128aef9f,0x41f59fca,0x5c32118f,0x78c5f1cf,0x2ae39424,0x260d2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x28ea1968,0x9c5d38ef,0x8a5974c,0xa49bdf49,0x724c27ca,0x8465963e,0x7fcfc467,0x72faf96e,0x68393881,0xb7a84c67,0x5e2eb9d2,0x1c53f9da,0x307b36af,0x96190b14,0x2286a56f,0x5debd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x45deb681a517584d,0xe0fd01237c58c2ea,0xddc5f6696c5bfd80,0x37ecc7a0caa9b21a,0xe812e98412e7f8c6,0x41f59fca128aef9f,0x78c5f1cf5c32118f,0x260d22ae39424}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9c5d38ef28ea1968,0xa49bdf4908a5974c,0x8465963e724c27ca,0x72faf96e7fcfc467,0xb7a84c6768393881,0x1c53f9da5e2eb9d2,0x96190b14307b36af,0x5debd2286a56f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xed63,0x22b3,0x1d4d,0x35c4,0x93f4,0x6374,0x9706,0xfbed,0xae57,0x8a7c,0x51da,0x47f2,0xca2a,0xc784,0xb45e,0x991e,0xf8ab,0x949,0xc273,0xf9af,0x56aa,0xdeb0,0x3fa0,0xe92d,0x631b,0xdb90,0xedc1,0xc12c,0x5f80,0xd1db,0x769b,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 31, ._mp_d = (mp_limb_t[]) {0x45ca,0xac54,0x124d,0x2bf,0xdb5e,0x8e9b,0x7a51,0x450a,0x4e17,0x493e,0x787e,0x5f91,0xf3b0,0x29a9,0x887d,0x18ba,0x70a0,0xf7a9,0x7125,0x5ef1,0xcb1e,0xf8a1,0x5aac,0x340d,0x969a,0x469c,0x4ad3,0x2e11,0x5c65,0x9a73,0x49e3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x22b3ed63,0x35c41d4d,0x637493f4,0xfbed9706,0x8a7cae57,0x47f251da,0xc784ca2a,0x991eb45e,0x949f8ab,0xf9afc273,0xdeb056aa,0xe92d3fa0,0xdb90631b,0xc12cedc1,0xd1db5f80,0xa769b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xac5445ca,0x2bf124d,0x8e9bdb5e,0x450a7a51,0x493e4e17,0x5f91787e,0x29a9f3b0,0x18ba887d,0xf7a970a0,0x5ef17125,0xf8a1cb1e,0x340d5aac,0x469c969a,0x2e114ad3,0x9a735c65,0x49e3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x35c41d4d22b3ed63,0xfbed9706637493f4,0x47f251da8a7cae57,0x991eb45ec784ca2a,0xf9afc2730949f8ab,0xe92d3fa0deb056aa,0xc12cedc1db90631b,0xa769bd1db5f80}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2bf124dac5445ca,0x450a7a518e9bdb5e,0x5f91787e493e4e17,0x18ba887d29a9f3b0,0x5ef17125f7a970a0,0x340d5aacf8a1cb1e,0x2e114ad3469c969a,0x49e39a735c65}}}} #endif -}}}, {{{ +}}}}, {{{ #if 0 #elif RADIX == 16 {0x1414, 0x1912, 0xddb, 0xb9, 0x121b, 0x195d, 0x6c5, 0xe75, 0x1d16, 0xcaf, 0x1a05, 0x15cc, 0x1537, 0x1c57, 0x141a, 0x1b9d, 0x5b0, 0x9e7, 0xf10, 0x300, 0x1d5c, 0xc13, 0x245, 0x1b91, 0x352, 0x730, 0xd55, 0x1ca3, 0x1dac, 0x7b0, 0x15b9, 0x1ba6, 0x7d6, 0xba4, 0xc12, 0x649, 0xf1, 0xd51, 0x107} @@ -3117,220 +3117,220 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} #endif #endif -}}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf3b3,0x88db,0xd050,0x75f1,0x10dd,0x8cbe,0x97c4,0xe7a2,0xe6ae,0xf1e2,0xd51f,0x8d12,0x55a8,0x6395,0xc98a,0xc097,0x60c3,0x66aa,0x54f3,0x78ce,0x5dce,0x7fb3,0x99a7,0x3e1,0x8753,0x3c9a,0x3339,0x30f3,0x406e,0xc05e,0xc99f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x88dbf3b3,0x75f1d050,0x8cbe10dd,0xe7a297c4,0xf1e2e6ae,0x8d12d51f,0x639555a8,0xc097c98a,0x66aa60c3,0x78ce54f3,0x7fb35dce,0x3e199a7,0x3c9a8753,0x30f33339,0xc05e406e,0x7c99f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x75f1d05088dbf3b3,0xe7a297c48cbe10dd,0x8d12d51ff1e2e6ae,0xc097c98a639555a8,0x78ce54f366aa60c3,0x3e199a77fb35dce,0x30f333393c9a8753,0x7c99fc05e406e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xbdde,0x1bf3,0x7bc0,0xcae7,0x8d2e,0x58a8,0x4b1a,0xbcb8,0xccd9,0x3d98,0xfcf2,0x4881,0x66f8,0x3473,0x2e8a,0x730d,0xabbf,0x4789,0xdce9,0x58ea,0xf54,0x6169,0x58d0,0x3280,0xe1f9,0x5bd0,0x67a0,0x23d2,0x2611,0x18e1,0xd51f,0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1bf3bdde,0xcae77bc0,0x58a88d2e,0xbcb84b1a,0x3d98ccd9,0x4881fcf2,0x347366f8,0x730d2e8a,0x4789abbf,0x58eadce9,0x61690f54,0x328058d0,0x5bd0e1f9,0x23d267a0,0x18e12611,0x7d51f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcae77bc01bf3bdde,0xbcb84b1a58a88d2e,0x4881fcf23d98ccd9,0x730d2e8a347366f8,0x58eadce94789abbf,0x328058d061690f54,0x23d267a05bd0e1f9,0x7d51f18e12611}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xee54,0xe36f,0x939d,0xea02,0xe6db,0xa7ae,0x9f39,0x8bb3,0x7f02,0xf594,0xa9ab,0xae5e,0x9a76,0x7fc0,0xd9a,0xbecd,0x692d,0x8b68,0x30f4,0xff9a,0xd1ae,0xfd43,0xbdfc,0xc552,0xfe0d,0xa4bf,0x33e5,0x981b,0x202c,0x45e8,0x8573,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe36fee54,0xea02939d,0xa7aee6db,0x8bb39f39,0xf5947f02,0xae5ea9ab,0x7fc09a76,0xbecd0d9a,0x8b68692d,0xff9a30f4,0xfd43d1ae,0xc552bdfc,0xa4bffe0d,0x981b33e5,0x45e8202c,0xc8573}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xea02939de36fee54,0x8bb39f39a7aee6db,0xae5ea9abf5947f02,0xbecd0d9a7fc09a76,0xff9a30f48b68692d,0xc552bdfcfd43d1ae,0x981b33e5a4bffe0d,0xc857345e8202c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc4d,0x7724,0x2faf,0x8a0e,0xef22,0x7341,0x683b,0x185d,0x1951,0xe1d,0x2ae0,0x72ed,0xaa57,0x9c6a,0x3675,0x3f68,0x9f3c,0x9955,0xab0c,0x8731,0xa231,0x804c,0x6658,0xfc1e,0x78ac,0xc365,0xccc6,0xcf0c,0xbf91,0x3fa1,0x3660,0x8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x77240c4d,0x8a0e2faf,0x7341ef22,0x185d683b,0xe1d1951,0x72ed2ae0,0x9c6aaa57,0x3f683675,0x99559f3c,0x8731ab0c,0x804ca231,0xfc1e6658,0xc36578ac,0xcf0cccc6,0x3fa1bf91,0x83660}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8a0e2faf77240c4d,0x185d683b7341ef22,0x72ed2ae00e1d1951,0x3f6836759c6aaa57,0x8731ab0c99559f3c,0xfc1e6658804ca231,0xcf0cccc6c36578ac,0x836603fa1bf91}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x39f7,0x51a0,0x71ea,0x7557,0x794c,0x6b5e,0x6a81,0x9aa7,0xd8dd,0xab85,0xe387,0x2121,0x1086,0x7989,0xe273,0xf813,0xebd5,0xb13f,0x9ef5,0xc6d5,0x2da2,0x14f8,0xecf3,0x24c4,0xf485,0xc8de,0xb9ef,0xb213,0xbc4d,0xe587,0xd591,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x39f7,0x51a0,0x71ea,0x7557,0x794c,0x6b5e,0x6a81,0x9aa7,0xd8dd,0xab85,0xe387,0x2121,0x1086,0x7989,0xe273,0xf813,0xebd5,0xb13f,0x9ef5,0xc6d5,0x2da2,0x14f8,0xecf3,0x24c4,0xf485,0xc8de,0xb9ef,0xb213,0xbc4d,0xe587,0xd591,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x51a039f7,0x755771ea,0x6b5e794c,0x9aa76a81,0xab85d8dd,0x2121e387,0x79891086,0xf813e273,0xb13febd5,0xc6d59ef5,0x14f82da2,0x24c4ecf3,0xc8def485,0xb213b9ef,0xe587bc4d,0xdd591}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x51a039f7,0x755771ea,0x6b5e794c,0x9aa76a81,0xab85d8dd,0x2121e387,0x79891086,0xf813e273,0xb13febd5,0xc6d59ef5,0x14f82da2,0x24c4ecf3,0xc8def485,0xb213b9ef,0xe587bc4d,0xdd591}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x755771ea51a039f7,0x9aa76a816b5e794c,0x2121e387ab85d8dd,0xf813e27379891086,0xc6d59ef5b13febd5,0x24c4ecf314f82da2,0xb213b9efc8def485,0xdd591e587bc4d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x755771ea51a039f7,0x9aa76a816b5e794c,0x2121e387ab85d8dd,0xf813e27379891086,0xc6d59ef5b13febd5,0x24c4ecf314f82da2,0xb213b9efc8def485,0xdd591e587bc4d}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc5d4,0x133f,0xc116,0x2a9e,0xacf5,0xaedd,0x6173,0xdacf,0x6448,0xa33e,0x6d36,0x5013,0x2093,0x59f6,0xe571,0x906d,0x37c9,0xe4ab,0xb92a,0xbe30,0x1d49,0xde58,0xffc8,0x47ff,0xe0cb,0x6230,0x6128,0x8679,0x731c,0xc5e,0x66c7,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc5d4,0x133f,0xc116,0x2a9e,0xacf5,0xaedd,0x6173,0xdacf,0x6448,0xa33e,0x6d36,0x5013,0x2093,0x59f6,0xe571,0x906d,0x37c9,0xe4ab,0xb92a,0xbe30,0x1d49,0xde58,0xffc8,0x47ff,0xe0cb,0x6230,0x6128,0x8679,0x731c,0xc5e,0x66c7,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x133fc5d4,0x2a9ec116,0xaeddacf5,0xdacf6173,0xa33e6448,0x50136d36,0x59f62093,0x906de571,0xe4ab37c9,0xbe30b92a,0xde581d49,0x47ffffc8,0x6230e0cb,0x86796128,0xc5e731c,0xd66c7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x133fc5d4,0x2a9ec116,0xaeddacf5,0xdacf6173,0xa33e6448,0x50136d36,0x59f62093,0x906de571,0xe4ab37c9,0xbe30b92a,0xde581d49,0x47ffffc8,0x6230e0cb,0x86796128,0xc5e731c,0xd66c7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2a9ec116133fc5d4,0xdacf6173aeddacf5,0x50136d36a33e6448,0x906de57159f62093,0xbe30b92ae4ab37c9,0x47ffffc8de581d49,0x867961286230e0cb,0xd66c70c5e731c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2a9ec116133fc5d4,0xdacf6173aeddacf5,0x50136d36a33e6448,0x906de57159f62093,0xbe30b92ae4ab37c9,0x47ffffc8de581d49,0x867961286230e0cb,0xd66c70c5e731c}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55ad,0x2e3e,0xd0dc,0x8dad,0x4e0a,0xe1d0,0x3e27,0x81af,0x1bb4,0xa5fa,0x52f2,0x5bd4,0x2b9b,0xddfe,0x36,0xbdd4,0xf99a,0x3027,0x21d2,0x7b29,0x10ee,0x2146,0x6864,0xec5c,0x6bbd,0x540f,0xbc15,0xe4a1,0xee,0x3d9c,0xdf51,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x55ad,0x2e3e,0xd0dc,0x8dad,0x4e0a,0xe1d0,0x3e27,0x81af,0x1bb4,0xa5fa,0x52f2,0x5bd4,0x2b9b,0xddfe,0x36,0xbdd4,0xf99a,0x3027,0x21d2,0x7b29,0x10ee,0x2146,0x6864,0xec5c,0x6bbd,0x540f,0xbc15,0xe4a1,0xee,0x3d9c,0xdf51,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2e3e55ad,0x8dadd0dc,0xe1d04e0a,0x81af3e27,0xa5fa1bb4,0x5bd452f2,0xddfe2b9b,0xbdd40036,0x3027f99a,0x7b2921d2,0x214610ee,0xec5c6864,0x540f6bbd,0xe4a1bc15,0x3d9c00ee,0x4df51}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x2e3e55ad,0x8dadd0dc,0xe1d04e0a,0x81af3e27,0xa5fa1bb4,0x5bd452f2,0xddfe2b9b,0xbdd40036,0x3027f99a,0x7b2921d2,0x214610ee,0xec5c6864,0x540f6bbd,0xe4a1bc15,0x3d9c00ee,0x4df51}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8dadd0dc2e3e55ad,0x81af3e27e1d04e0a,0x5bd452f2a5fa1bb4,0xbdd40036ddfe2b9b,0x7b2921d23027f99a,0xec5c6864214610ee,0xe4a1bc15540f6bbd,0x4df513d9c00ee}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8dadd0dc2e3e55ad,0x81af3e27e1d04e0a,0x5bd452f2a5fa1bb4,0xbdd40036ddfe2b9b,0x7b2921d23027f99a,0xec5c6864214610ee,0xe4a1bc15540f6bbd,0x4df513d9c00ee}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc609,0xae5f,0x8e15,0x8aa8,0x86b3,0x94a1,0x957e,0x6558,0x2722,0x547a,0x1c78,0xdede,0xef79,0x8676,0x1d8c,0x7ec,0x142a,0x4ec0,0x610a,0x392a,0xd25d,0xeb07,0x130c,0xdb3b,0xb7a,0x3721,0x4610,0x4dec,0x43b2,0x1a78,0x2a6e,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xc609,0xae5f,0x8e15,0x8aa8,0x86b3,0x94a1,0x957e,0x6558,0x2722,0x547a,0x1c78,0xdede,0xef79,0x8676,0x1d8c,0x7ec,0x142a,0x4ec0,0x610a,0x392a,0xd25d,0xeb07,0x130c,0xdb3b,0xb7a,0x3721,0x4610,0x4dec,0x43b2,0x1a78,0x2a6e,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xae5fc609,0x8aa88e15,0x94a186b3,0x6558957e,0x547a2722,0xdede1c78,0x8676ef79,0x7ec1d8c,0x4ec0142a,0x392a610a,0xeb07d25d,0xdb3b130c,0x37210b7a,0x4dec4610,0x1a7843b2,0x22a6e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xae5fc609,0x8aa88e15,0x94a186b3,0x6558957e,0x547a2722,0xdede1c78,0x8676ef79,0x7ec1d8c,0x4ec0142a,0x392a610a,0xeb07d25d,0xdb3b130c,0x37210b7a,0x4dec4610,0x1a7843b2,0x22a6e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8aa88e15ae5fc609,0x6558957e94a186b3,0xdede1c78547a2722,0x7ec1d8c8676ef79,0x392a610a4ec0142a,0xdb3b130ceb07d25d,0x4dec461037210b7a,0x22a6e1a7843b2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8aa88e15ae5fc609,0x6558957e94a186b3,0xdede1c78547a2722,0x7ec1d8c8676ef79,0x392a610a4ec0142a,0xdb3b130ceb07d25d,0x4dec461037210b7a,0x22a6e1a7843b2}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xfe45,0x561e,0xa798,0xc163,0xedaa,0x1fff,0xb3c0,0xda12,0x2588,0x5123,0x2390,0xface,0x6e59,0x651b,0xa21d,0xb190,0xe1b0,0x21b0,0x6030,0xd81,0x6542,0x64b0,0x8cef,0xa91c,0xe0bd,0xd947,0xe27b,0x961a,0x2a3e,0xf20a,0xed0,0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x561efe45,0xc163a798,0x1fffedaa,0xda12b3c0,0x51232588,0xface2390,0x651b6e59,0xb190a21d,0x21b0e1b0,0xd816030,0x64b06542,0xa91c8cef,0xd947e0bd,0x961ae27b,0xf20a2a3e,0x50ed0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xc163a798561efe45,0xda12b3c01fffedaa,0xface239051232588,0xb190a21d651b6e59,0xd81603021b0e1b0,0xa91c8cef64b06542,0x961ae27bd947e0bd,0x50ed0f20a2a3e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xff6,0x4b77,0xed4e,0x3538,0xc757,0x3304,0xd547,0xc49f,0x801b,0xb2ff,0x4796,0xcad,0x58be,0x969c,0x3e77,0x83e1,0xa371,0x7d43,0x6664,0x3720,0x6c98,0xe4e5,0x74dc,0xebbc,0xaab5,0xc7e0,0x3147,0x5bc9,0x537e,0x7ff5,0xb400,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x4b770ff6,0x3538ed4e,0x3304c757,0xc49fd547,0xb2ff801b,0xcad4796,0x969c58be,0x83e13e77,0x7d43a371,0x37206664,0xe4e56c98,0xebbc74dc,0xc7e0aab5,0x5bc93147,0x7ff5537e,0xcb400}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3538ed4e4b770ff6,0xc49fd5473304c757,0xcad4796b2ff801b,0x83e13e77969c58be,0x372066647d43a371,0xebbc74dce4e56c98,0x5bc93147c7e0aab5,0xcb4007ff5537e}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77eb,0xce58,0xfaa0,0x429c,0xc7dd,0xbe89,0x68a,0xc196,0xd249,0x63ae,0xa04a,0xbc4e,0xbbb5,0x64ad,0xfc2b,0x4738,0x4d4d,0x918a,0x6b55,0x459d,0xf5ab,0x748c,0x208a,0xfe42,0x21d2,0x3785,0x23d7,0x28cc,0xfd9f,0x106a,0x3406,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xce5877eb,0x429cfaa0,0xbe89c7dd,0xc196068a,0x63aed249,0xbc4ea04a,0x64adbbb5,0x4738fc2b,0x918a4d4d,0x459d6b55,0x748cf5ab,0xfe42208a,0x378521d2,0x28cc23d7,0x106afd9f,0xd3406}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x429cfaa0ce5877eb,0xc196068abe89c7dd,0xbc4ea04a63aed249,0x4738fc2b64adbbb5,0x459d6b55918a4d4d,0xfe42208a748cf5ab,0x28cc23d7378521d2,0xd3406106afd9f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1bb,0xa9e1,0x5867,0x3e9c,0x1255,0xe000,0x4c3f,0x25ed,0xda77,0xaedc,0xdc6f,0x531,0x91a6,0x9ae4,0x5de2,0x4e6f,0x1e4f,0xde4f,0x9fcf,0xf27e,0x9abd,0x9b4f,0x7310,0x56e3,0x1f42,0x26b8,0x1d84,0x69e5,0xd5c1,0xdf5,0xf12f,0xa}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa9e101bb,0x3e9c5867,0xe0001255,0x25ed4c3f,0xaedcda77,0x531dc6f,0x9ae491a6,0x4e6f5de2,0xde4f1e4f,0xf27e9fcf,0x9b4f9abd,0x56e37310,0x26b81f42,0x69e51d84,0xdf5d5c1,0xaf12f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x3e9c5867a9e101bb,0x25ed4c3fe0001255,0x531dc6faedcda77,0x4e6f5de29ae491a6,0xf27e9fcfde4f1e4f,0x56e373109b4f9abd,0x69e51d8426b81f42,0xaf12f0df5d5c1}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf9da,0x446d,0xe828,0xbaf8,0x86e,0x465f,0x4be2,0x73d1,0x7357,0xf8f1,0x6a8f,0x4689,0xaad4,0x31ca,0xe4c5,0xe04b,0x3061,0xb355,0x2a79,0x3c67,0xaee7,0xbfd9,0xccd3,0x81f0,0x43a9,0x9e4d,0x999c,0x1879,0x2037,0xe02f,0xe4cf,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x446df9da,0xbaf8e828,0x465f086e,0x73d14be2,0xf8f17357,0x46896a8f,0x31caaad4,0xe04be4c5,0xb3553061,0x3c672a79,0xbfd9aee7,0x81f0ccd3,0x9e4d43a9,0x1879999c,0xe02f2037,0xbe4cf}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbaf8e828446df9da,0x73d14be2465f086e,0x46896a8ff8f17357,0xe04be4c531caaad4,0x3c672a79b3553061,0x81f0ccd3bfd9aee7,0x1879999c9e4d43a9,0xbe4cfe02f2037}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xdeef,0xdf9,0xbde0,0x6573,0x4697,0x2c54,0x258d,0xde5c,0x666c,0x1ecc,0xfe79,0x2440,0xb37c,0x1a39,0x9745,0xb986,0xd5df,0xa3c4,0x6e74,0x2c75,0x87aa,0x30b4,0x2c68,0x9940,0x70fc,0x2de8,0x33d0,0x91e9,0x9308,0x8c70,0xea8f,0xb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdf9deef,0x6573bde0,0x2c544697,0xde5c258d,0x1ecc666c,0x2440fe79,0x1a39b37c,0xb9869745,0xa3c4d5df,0x2c756e74,0x30b487aa,0x99402c68,0x2de870fc,0x91e933d0,0x8c709308,0xbea8f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6573bde00df9deef,0xde5c258d2c544697,0x2440fe791ecc666c,0xb98697451a39b37c,0x2c756e74a3c4d5df,0x99402c6830b487aa,0x91e933d02de870fc,0xbea8f8c709308}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xf72a,0xf1b7,0x49ce,0xf501,0x736d,0xd3d7,0xcf9c,0x45d9,0x3f81,0xfaca,0x54d5,0x572f,0x4d3b,0x3fe0,0x86cd,0xdf66,0x3496,0x45b4,0x187a,0x7fcd,0xe8d7,0x7ea1,0x5efe,0xe2a9,0xff06,0xd25f,0x99f2,0x4c0d,0x1016,0xa2f4,0x42b9,0xe}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xf1b7f72a,0xf50149ce,0xd3d7736d,0x45d9cf9c,0xfaca3f81,0x572f54d5,0x3fe04d3b,0xdf6686cd,0x45b43496,0x7fcd187a,0x7ea1e8d7,0xe2a95efe,0xd25fff06,0x4c0d99f2,0xa2f41016,0xe42b9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf50149cef1b7f72a,0x45d9cf9cd3d7736d,0x572f54d5faca3f81,0xdf6686cd3fe04d3b,0x7fcd187a45b43496,0xe2a95efe7ea1e8d7,0x4c0d99f2d25fff06,0xe42b9a2f41016}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x627,0xbb92,0x17d7,0x4507,0xf791,0xb9a0,0xb41d,0x8c2e,0x8ca8,0x70e,0x9570,0xb976,0x552b,0xce35,0x1b3a,0x1fb4,0xcf9e,0x4caa,0xd586,0xc398,0x5118,0x4026,0x332c,0x7e0f,0xbc56,0x61b2,0x6663,0xe786,0xdfc8,0x1fd0,0x1b30,0x4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbb920627,0x450717d7,0xb9a0f791,0x8c2eb41d,0x70e8ca8,0xb9769570,0xce35552b,0x1fb41b3a,0x4caacf9e,0xc398d586,0x40265118,0x7e0f332c,0x61b2bc56,0xe7866663,0x1fd0dfc8,0x41b30}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x450717d7bb920627,0x8c2eb41db9a0f791,0xb9769570070e8ca8,0x1fb41b3ace35552b,0xc398d5864caacf9e,0x7e0f332c40265118,0xe786666361b2bc56,0x41b301fd0dfc8}}}} #endif -}}, {{ +}}}, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e37,0x619b,0xa159,0x8865,0xab15,0x85c2,0xb3b,0x57ce,0x8108,0xa8d6,0xfeb0,0x8cf0,0xef13,0xc7e1,0x6936,0xc3a9,0xd8f2,0x9c5d,0x7c68,0x7ba2,0xf4da,0x4c63,0x845b,0x22eb,0xbedd,0x37a0,0x24f3,0x7019,0x2855,0x6905,0xb81c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x9e37,0x619b,0xa159,0x8865,0xab15,0x85c2,0xb3b,0x57ce,0x8108,0xa8d6,0xfeb0,0x8cf0,0xef13,0xc7e1,0x6936,0xc3a9,0xd8f2,0x9c5d,0x7c68,0x7ba2,0xf4da,0x4c63,0x845b,0x22eb,0xbedd,0x37a0,0x24f3,0x7019,0x2855,0x6905,0xb81c,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x619b9e37,0x8865a159,0x85c2ab15,0x57ce0b3b,0xa8d68108,0x8cf0feb0,0xc7e1ef13,0xc3a96936,0x9c5dd8f2,0x7ba27c68,0x4c63f4da,0x22eb845b,0x37a0bedd,0x701924f3,0x69052855,0x3b81c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x619b9e37,0x8865a159,0x85c2ab15,0x57ce0b3b,0xa8d68108,0x8cf0feb0,0xc7e1ef13,0xc3a96936,0x9c5dd8f2,0x7ba27c68,0x4c63f4da,0x22eb845b,0x37a0bedd,0x701924f3,0x69052855,0x3b81c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8865a159619b9e37,0x57ce0b3b85c2ab15,0x8cf0feb0a8d68108,0xc3a96936c7e1ef13,0x7ba27c689c5dd8f2,0x22eb845b4c63f4da,0x701924f337a0bedd,0x3b81c69052855}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8865a159619b9e37,0x57ce0b3b85c2ab15,0x8cf0feb0a8d68108,0xc3a96936c7e1ef13,0x7ba27c689c5dd8f2,0x22eb845b4c63f4da,0x701924f337a0bedd,0x3b81c69052855}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x92b5,0x1309,0xc1ee,0xadd1,0x165,0x4911,0xaf0c,0x4a4f,0x5374,0xd4b2,0x926f,0xacc0,0xfd2f,0xeb63,0x7c68,0xc188,0x41ce,0x152e,0x6cfe,0x9a22,0xadb,0x933,0x438c,0x5fef,0xe17a,0x82aa,0x7732,0x8c5b,0xfa7b,0x4cd4,0xdcee,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x92b5,0x1309,0xc1ee,0xadd1,0x165,0x4911,0xaf0c,0x4a4f,0x5374,0xd4b2,0x926f,0xacc0,0xfd2f,0xeb63,0x7c68,0xc188,0x41ce,0x152e,0x6cfe,0x9a22,0xadb,0x933,0x438c,0x5fef,0xe17a,0x82aa,0x7732,0x8c5b,0xfa7b,0x4cd4,0xdcee,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x130992b5,0xadd1c1ee,0x49110165,0x4a4faf0c,0xd4b25374,0xacc0926f,0xeb63fd2f,0xc1887c68,0x152e41ce,0x9a226cfe,0x9330adb,0x5fef438c,0x82aae17a,0x8c5b7732,0x4cd4fa7b,0x6dcee}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x130992b5,0xadd1c1ee,0x49110165,0x4a4faf0c,0xd4b25374,0xacc0926f,0xeb63fd2f,0xc1887c68,0x152e41ce,0x9a226cfe,0x9330adb,0x5fef438c,0x82aae17a,0x8c5b7732,0x4cd4fa7b,0x6dcee}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xadd1c1ee130992b5,0x4a4faf0c49110165,0xacc0926fd4b25374,0xc1887c68eb63fd2f,0x9a226cfe152e41ce,0x5fef438c09330adb,0x8c5b773282aae17a,0x6dcee4cd4fa7b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xadd1c1ee130992b5,0x4a4faf0c49110165,0xacc0926fd4b25374,0xc1887c68eb63fd2f,0x9a226cfe152e41ce,0x5fef438c09330adb,0x8c5b773282aae17a,0x6dcee4cd4fa7b}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77b7,0xc00c,0x743e,0x91b3,0xc92c,0x3be,0xc9e8,0x4b6b,0x519c,0xed1b,0x857f,0x2be7,0x2270,0x64a0,0x3a21,0xd5ec,0xd5d1,0x2392,0x175a,0xa58f,0x5c36,0x3908,0x5f46,0x1875,0xee40,0xcd4a,0x7e0b,0x8eda,0x87e0,0xc28c,0x6e24,0xd}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x77b7,0xc00c,0x743e,0x91b3,0xc92c,0x3be,0xc9e8,0x4b6b,0x519c,0xed1b,0x857f,0x2be7,0x2270,0x64a0,0x3a21,0xd5ec,0xd5d1,0x2392,0x175a,0xa58f,0x5c36,0x3908,0x5f46,0x1875,0xee40,0xcd4a,0x7e0b,0x8eda,0x87e0,0xc28c,0x6e24,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc00c77b7,0x91b3743e,0x3bec92c,0x4b6bc9e8,0xed1b519c,0x2be7857f,0x64a02270,0xd5ec3a21,0x2392d5d1,0xa58f175a,0x39085c36,0x18755f46,0xcd4aee40,0x8eda7e0b,0xc28c87e0,0xd6e24}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc00c77b7,0x91b3743e,0x3bec92c,0x4b6bc9e8,0xed1b519c,0x2be7857f,0x64a02270,0xd5ec3a21,0x2392d5d1,0xa58f175a,0x39085c36,0x18755f46,0xcd4aee40,0x8eda7e0b,0xc28c87e0,0xd6e24}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91b3743ec00c77b7,0x4b6bc9e803bec92c,0x2be7857fed1b519c,0xd5ec3a2164a02270,0xa58f175a2392d5d1,0x18755f4639085c36,0x8eda7e0bcd4aee40,0xd6e24c28c87e0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x91b3743ec00c77b7,0x4b6bc9e803bec92c,0x2be7857fed1b519c,0xd5ec3a2164a02270,0xa58f175a2392d5d1,0x18755f4639085c36,0x8eda7e0bcd4aee40,0xd6e24c28c87e0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x61c9,0x9e64,0x5ea6,0x779a,0x54ea,0x7a3d,0xf4c4,0xa831,0x7ef7,0x5729,0x14f,0x730f,0x10ec,0x381e,0x96c9,0x3c56,0x270d,0x63a2,0x8397,0x845d,0xb25,0xb39c,0x7ba4,0xdd14,0x4122,0xc85f,0xdb0c,0x8fe6,0xd7aa,0x96fa,0x47e3,0xc}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x61c9,0x9e64,0x5ea6,0x779a,0x54ea,0x7a3d,0xf4c4,0xa831,0x7ef7,0x5729,0x14f,0x730f,0x10ec,0x381e,0x96c9,0x3c56,0x270d,0x63a2,0x8397,0x845d,0xb25,0xb39c,0x7ba4,0xdd14,0x4122,0xc85f,0xdb0c,0x8fe6,0xd7aa,0x96fa,0x47e3,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e6461c9,0x779a5ea6,0x7a3d54ea,0xa831f4c4,0x57297ef7,0x730f014f,0x381e10ec,0x3c5696c9,0x63a2270d,0x845d8397,0xb39c0b25,0xdd147ba4,0xc85f4122,0x8fe6db0c,0x96fad7aa,0xc47e3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9e6461c9,0x779a5ea6,0x7a3d54ea,0xa831f4c4,0x57297ef7,0x730f014f,0x381e10ec,0x3c5696c9,0x63a2270d,0x845d8397,0xb39c0b25,0xdd147ba4,0xc85f4122,0x8fe6db0c,0x96fad7aa,0xc47e3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x779a5ea69e6461c9,0xa831f4c47a3d54ea,0x730f014f57297ef7,0x3c5696c9381e10ec,0x845d839763a2270d,0xdd147ba4b39c0b25,0x8fe6db0cc85f4122,0xc47e396fad7aa}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x779a5ea69e6461c9,0xa831f4c47a3d54ea,0x730f014f57297ef7,0x3c5696c9381e10ec,0x845d839763a2270d,0xdd147ba4b39c0b25,0x8fe6db0cc85f4122,0xc47e396fad7aa}}}} #endif -}}}}; +}}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/finit.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/finit.c index b3808edf07..c9a3687282 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/finit.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/finit.c @@ -29,29 +29,29 @@ quat_alg_elem_finalize(quat_alg_elem_t *elem) void ibz_vec_2_init(ibz_vec_2_t *vec) { - ibz_init(&((*vec)[0])); - ibz_init(&((*vec)[1])); + ibz_init(&(vec->v[0])); + ibz_init(&(vec->v[1])); } void ibz_vec_2_finalize(ibz_vec_2_t *vec) { - ibz_finalize(&((*vec)[0])); - ibz_finalize(&((*vec)[1])); + ibz_finalize(&(vec->v[0])); + ibz_finalize(&(vec->v[1])); } void ibz_vec_4_init(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_init(&(*vec)[i]); + ibz_init(&vec->v[i]); } } void ibz_vec_4_finalize(ibz_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibz_finalize(&(*vec)[i]); + ibz_finalize(&vec->v[i]); } } @@ -60,7 +60,7 @@ ibz_mat_2x2_init(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -69,7 +69,7 @@ ibz_mat_2x2_finalize(ibz_mat_2x2_t *mat) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } @@ -79,7 +79,7 @@ ibz_mat_4x4_init(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_init(&(*mat)[i][j]); + ibz_init(&(mat->m)[i][j]); } } } @@ -88,7 +88,7 @@ ibz_mat_4x4_finalize(ibz_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_finalize(&(*mat)[i][j]); + ibz_finalize(&(mat->m)[i][j]); } } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c index 511a0a5d38..5edff425c8 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hnf.c @@ -14,21 +14,21 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) for (int i = 0; i < 4; i++) { // upper triangular for (int j = 0; j < i; j++) { - res = res && ibz_is_zero(&((*mat)[i][j])); + res = res && ibz_is_zero(&(mat->m[i][j])); } // find first non 0 element of line found = 0; for (int j = i; j < 4; j++) { if (found) { // all values are positive, and first non-0 is the largest of that line - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) >= 0); - res = res && (ibz_cmp(&((*mat)[i][ind]), &((*mat)[i][j])) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) >= 0); + res = res && (ibz_cmp(&(mat->m[i][ind]), &(mat->m[i][j])) > 0); } else { - if (!ibz_is_zero(&((*mat)[i][j]))) { + if (!ibz_is_zero(&(mat->m[i][j]))) { found = 1; ind = j; // mustbe non-negative - res = res && (ibz_cmp(&((*mat)[i][j]), &zero) > 0); + res = res && (ibz_cmp(&(mat->m[i][j]), &zero) > 0); } } } @@ -37,7 +37,7 @@ ibz_mat_4x4_is_hnf(const ibz_mat_4x4_t *mat) int linestart = -1; int i = 0; for (int j = 0; j < 4; j++) { - while ((i < 4) && (ibz_is_zero(&((*mat)[i][j])))) { + while ((i < 4) && (ibz_is_zero(&(mat->m[i][j])))) { i = i + 1; } if (i != 4) { @@ -66,13 +66,13 @@ ibz_vec_4_linear_combination_mod(ibz_vec_4_t *lc, ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&(sums[i]), coeff_a, &((*vec_a)[i])); - ibz_mul(&prod, coeff_b, &((*vec_b)[i])); - ibz_add(&(sums[i]), &(sums[i]), &prod); - ibz_centered_mod(&(sums[i]), &(sums[i]), &m); + ibz_mul(&(sums.v[i]), coeff_a, &(vec_a->v[i])); + ibz_mul(&prod, coeff_b, &(vec_b->v[i])); + ibz_add(&(sums.v[i]), &(sums.v[i]), &prod); + ibz_centered_mod(&(sums.v[i]), &(sums.v[i]), &m); } for (int i = 0; i < 4; i++) { - ibz_copy(&((*lc)[i]), &(sums[i])); + ibz_copy(&(lc->v[i]), &(sums.v[i])); } ibz_finalize(&prod); ibz_finalize(&m); @@ -86,7 +86,7 @@ ibz_vec_4_copy_mod(ibz_vec_4_t *res, const ibz_vec_4_t *vec, const ibz_t *mod) ibz_init(&m); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_centered_mod(&((*res)[i]), &((*vec)[i]), &m); + ibz_centered_mod(&(res->v[i]), &(vec->v[i]), &m); } ibz_finalize(&m); } @@ -101,8 +101,8 @@ ibz_vec_4_scalar_mul_mod(ibz_vec_4_t *prod, const ibz_t *scalar, const ibz_vec_4 ibz_copy(&s, scalar); ibz_copy(&m, mod); for (int i = 0; i < 4; i++) { - ibz_mul(&((*prod)[i]), &((*vec)[i]), &s); - ibz_mod(&((*prod)[i]), &((*prod)[i]), &m); + ibz_mul(&(prod->v[i]), &(vec->v[i]), &s); + ibz_mod(&(prod->v[i]), &(prod->v[i]), &m); } ibz_finalize(&m); ibz_finalize(&s); @@ -138,36 +138,36 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec if (h < 4) ibz_vec_4_init(&(w[h])); ibz_vec_4_init(&(a[h])); - ibz_copy(&(a[h][0]), &(generators[h][0])); - ibz_copy(&(a[h][1]), &(generators[h][1])); - ibz_copy(&(a[h][2]), &(generators[h][2])); - ibz_copy(&(a[h][3]), &(generators[h][3])); + ibz_copy(&(a[h].v[0]), &(generators[h].v[0])); + ibz_copy(&(a[h].v[1]), &(generators[h].v[1])); + ibz_copy(&(a[h].v[2]), &(generators[h].v[2])); + ibz_copy(&(a[h].v[3]), &(generators[h].v[3])); } assert(ibz_cmp(mod, &ibz_const_zero) > 0); ibz_copy(&m, mod); while (i != -1) { while (j != 0) { j = j - 1; - if (!ibz_is_zero(&(a[j][i]))) { + if (!ibz_is_zero(&(a[j].v[i]))) { // assumtion that ibz_xgcd outputs u,v which are small in absolute // value is needed here also, needs u non 0, but v can be 0 if needed - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &(a[j][i])); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &(a[j].v[i])); ibz_vec_4_linear_combination(&c, &u, &(a[k]), &v, &(a[j])); - ibz_div(&coeff_1, &r, &(a[k][i]), &d); - ibz_div(&coeff_2, &r, &(a[j][i]), &d); + ibz_div(&coeff_1, &r, &(a[k].v[i]), &d); + ibz_div(&coeff_2, &r, &(a[j].v[i]), &d); ibz_neg(&coeff_2, &coeff_2); ibz_vec_4_linear_combination_mod( &(a[j]), &coeff_1, &(a[j]), &coeff_2, &(a[k]), &m); // do lin comb mod m ibz_vec_4_copy_mod(&(a[k]), &c, &m); // mod m in copy } } - ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k][i]), &m); + ibz_xgcd_with_u_not_0(&d, &u, &v, &(a[k].v[i]), &m); ibz_vec_4_scalar_mul_mod(&(w[i]), &u, &(a[k]), &m); // mod m in scalar mult - if (ibz_is_zero(&(w[i][i]))) { - ibz_copy(&(w[i][i]), &m); + if (ibz_is_zero(&(w[i].v[i]))) { + ibz_copy(&(w[i].v[i]), &m); } for (int h = i + 1; h < 4; h++) { - ibz_div_floor(&q, &r, &(w[h][i]), &(w[i][i])); + ibz_div_floor(&q, &r, &(w[h].v[i]), &(w[i].v[i])); ibz_neg(&q, &q); ibz_vec_4_linear_combination(&(w[h]), &ibz_const_one, &(w[h]), &q, &(w[i])); } @@ -177,8 +177,8 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec k = k - 1; i = i - 1; j = k; - if (ibz_is_zero(&(a[k][i]))) - ibz_copy(&(a[k][i]), &m); + if (ibz_is_zero(&(a[k].v[i]))) + ibz_copy(&(a[k].v[i]), &m); } else { k = k - 1; @@ -188,7 +188,7 @@ ibz_mat_4xn_hnf_mod_core(ibz_mat_4x4_t *hnf, int generator_number, const ibz_vec } for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { - ibz_copy(&((*hnf)[i][j]), &(w[j][i])); + ibz_copy(&((hnf->m)[i][j]), &(w[j].v[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c index 0fd35b5c65..f630f5a9fe 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ibz_division.c @@ -8,5 +8,5 @@ void ibz_xgcd(ibz_t *gcd, ibz_t *u, ibz_t *v, const ibz_t *a, const ibz_t *b) { - mpz_gcdext(*gcd, *u, *v, *a, *b); + mpz_gcdext(gcd->i, u->i, v->i, a->i, b->i); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.c index 0743974345..1be9d87e71 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/id2iso.c @@ -18,8 +18,8 @@ ec_biscalar_mul_ibz_vec(ec_point_t *res, const ec_curve_t *curve) { digit_t scalars[2][NWORDS_ORDER]; - ibz_to_digit_array(scalars[0], &(*scalar_vec)[0]); - ibz_to_digit_array(scalars[1], &(*scalar_vec)[1]); + ibz_to_digit_array(scalars[0], &scalar_vec->v[0]); + ibz_to_digit_array(scalars[1], &scalar_vec->v[1]); ec_biscalar_mul(res, scalars[0], scalars[1], f, PQ, curve); } @@ -48,14 +48,14 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid quat_change_to_O0_basis(&coeffs, &alpha); for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &ACTION_GEN2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &ACTION_GEN4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &ACTION_GEN4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); } } @@ -67,16 +67,16 @@ id2iso_ideal_to_kernel_dlogs_even(ibz_vec_2_t *vec, const quat_left_ideal_t *lid { const ibz_t *const norm = &lideal->norm; - ibz_mod(&(*vec)[0], &mat[0][0], norm); - ibz_mod(&(*vec)[1], &mat[1][0], norm); - ibz_gcd(&tmp, &(*vec)[0], &(*vec)[1]); + ibz_mod(&vec->v[0], &mat.m[0][0], norm); + ibz_mod(&vec->v[1], &mat.m[1][0], norm); + ibz_gcd(&tmp, &vec->v[0], &vec->v[1]); if (ibz_is_even(&tmp)) { - ibz_mod(&(*vec)[0], &mat[0][1], norm); - ibz_mod(&(*vec)[1], &mat[1][1], norm); + ibz_mod(&vec->v[0], &mat.m[0][1], norm); + ibz_mod(&vec->v[1], &mat.m[1][1], norm); } #ifndef NDEBUG - ibz_gcd(&tmp, &(*vec)[0], norm); - ibz_gcd(&tmp, &(*vec)[1], &tmp); + ibz_gcd(&tmp, &vec->v[0], norm); + ibz_gcd(&tmp, &vec->v[1], &tmp); assert(!ibz_cmp(&tmp, &ibz_const_one)); #endif } @@ -102,28 +102,28 @@ matrix_application_even_basis(ec_basis_t *bas, const ec_curve_t *E, ibz_mat_2x2_ copy_basis(&tmp_bas, bas); // reduction mod 2f - ibz_mod(&(*mat)[0][0], &(*mat)[0][0], &pow_two); - ibz_mod(&(*mat)[0][1], &(*mat)[0][1], &pow_two); - ibz_mod(&(*mat)[1][0], &(*mat)[1][0], &pow_two); - ibz_mod(&(*mat)[1][1], &(*mat)[1][1], &pow_two); + ibz_mod(&mat->m[0][0], &mat->m[0][0], &pow_two); + ibz_mod(&mat->m[0][1], &mat->m[0][1], &pow_two); + ibz_mod(&mat->m[1][0], &mat->m[1][0], &pow_two); + ibz_mod(&mat->m[1][1], &mat->m[1][1], &pow_two); // For a matrix [[a, c], [b, d]] we compute: // // first basis element R = [a]P + [b]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][0]); - ibz_to_digit_array(scalars[1], &(*mat)[1][0]); + ibz_to_digit_array(scalars[0], &mat->m[0][0]); + ibz_to_digit_array(scalars[1], &mat->m[1][0]); ec_biscalar_mul(&bas->P, scalars[0], scalars[1], f, &tmp_bas, E); // second basis element S = [c]P + [d]Q - ibz_to_digit_array(scalars[0], &(*mat)[0][1]); - ibz_to_digit_array(scalars[1], &(*mat)[1][1]); + ibz_to_digit_array(scalars[0], &mat->m[0][1]); + ibz_to_digit_array(scalars[1], &mat->m[1][1]); ec_biscalar_mul(&bas->Q, scalars[0], scalars[1], f, &tmp_bas, E); // Their difference R - S = [a - c]P + [b - d]Q - ibz_sub(&tmp, &(*mat)[0][0], &(*mat)[0][1]); + ibz_sub(&tmp, &mat->m[0][0], &mat->m[0][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[0], &tmp); - ibz_sub(&tmp, &(*mat)[1][0], &(*mat)[1][1]); + ibz_sub(&tmp, &mat->m[1][0], &mat->m[1][1]); ibz_mod(&tmp, &tmp, &pow_two); ibz_to_digit_array(scalars[1], &tmp); ret = ec_biscalar_mul(&bas->PmQ, scalars[0], scalars[1], f, &tmp_bas, E); @@ -157,23 +157,23 @@ endomorphism_application_even_basis(ec_basis_t *bas, quat_alg_make_primitive(&coeffs, &content, theta, &EXTREMAL_ORDERS[index_alternate_curve].order); assert(ibz_is_odd(&content)); - ibz_set(&mat[0][0], 0); - ibz_set(&mat[0][1], 0); - ibz_set(&mat[1][0], 0); - ibz_set(&mat[1][1], 0); + ibz_set(&mat.m[0][0], 0); + ibz_set(&mat.m[0][1], 0); + ibz_set(&mat.m[1][0], 0); + ibz_set(&mat.m[1][1], 0); // computing the matrix for (unsigned i = 0; i < 2; ++i) { - ibz_add(&mat[i][i], &mat[i][i], &coeffs[0]); + ibz_add(&mat.m[i][i], &mat.m[i][i], &coeffs.v[0]); for (unsigned j = 0; j < 2; ++j) { - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2[i][j], &coeffs[1]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3[i][j], &coeffs[2]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4[i][j], &coeffs[3]); - ibz_add(&mat[i][j], &mat[i][j], &tmp); - ibz_mul(&mat[i][j], &mat[i][j], &content); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen2.m[i][j], &coeffs.v[1]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen3.m[i][j], &coeffs.v[2]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&tmp, &CURVES_WITH_ENDOMORPHISMS[index_alternate_curve].action_gen4.m[i][j], &coeffs.v[3]); + ibz_add(&mat.m[i][j], &mat.m[i][j], &tmp); + ibz_mul(&mat.m[i][j], &mat.m[i][j], &content); } } @@ -215,19 +215,19 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * ibz_mat_2x2_t mat; ibz_mat_2x2_init(&mat); - ibz_copy(&mat[0][0], &(*vec2)[0]); - ibz_copy(&mat[1][0], &(*vec2)[1]); + ibz_copy(&mat.m[0][0], &vec2->v[0]); + ibz_copy(&mat.m[1][0], &vec2->v[1]); ibz_mat_2x2_eval(&vec, &ACTION_J, vec2); - ibz_copy(&mat[0][1], &vec[0]); - ibz_copy(&mat[1][1], &vec[1]); + ibz_copy(&mat.m[0][1], &vec.v[0]); + ibz_copy(&mat.m[1][1], &vec.v[1]); ibz_mat_2x2_eval(&vec, &ACTION_GEN4, vec2); - ibz_add(&mat[0][1], &mat[0][1], &vec[0]); - ibz_add(&mat[1][1], &mat[1][1], &vec[1]); + ibz_add(&mat.m[0][1], &mat.m[0][1], &vec.v[0]); + ibz_add(&mat.m[1][1], &mat.m[1][1], &vec.v[1]); - ibz_mod(&mat[0][1], &mat[0][1], &two_pow); - ibz_mod(&mat[1][1], &mat[1][1], &two_pow); + ibz_mod(&mat.m[0][1], &mat.m[0][1], &two_pow); + ibz_mod(&mat.m[1][1], &mat.m[1][1], &two_pow); ibz_mat_2x2_t inv; ibz_mat_2x2_init(&inv); @@ -247,11 +247,11 @@ id2iso_kernel_dlogs_to_ideal_even(quat_left_ideal_t *lideal, const ibz_vec_2_t * quat_alg_elem_t gen; quat_alg_elem_init(&gen); ibz_set(&gen.denom, 2); - ibz_add(&gen.coord[0], &vec[0], &vec[0]); - ibz_set(&gen.coord[1], -2); - ibz_add(&gen.coord[2], &vec[1], &vec[1]); - ibz_copy(&gen.coord[3], &vec[1]); - ibz_add(&gen.coord[0], &gen.coord[0], &vec[1]); + ibz_add(&gen.coord.v[0], &vec.v[0], &vec.v[0]); + ibz_set(&gen.coord.v[1], -2); + ibz_add(&gen.coord.v[2], &vec.v[1], &vec.v[1]); + ibz_copy(&gen.coord.v[3], &vec.v[1]); + ibz_add(&gen.coord.v[0], &gen.coord.v[0], &vec.v[1]); ibz_vec_2_finalize(&vec); quat_lideal_create(lideal, &gen, &two_pow, &MAXORD_O0, &QUATALG_PINFTY); @@ -319,10 +319,10 @@ _change_of_basis_matrix_tate(ibz_mat_2x2_t *mat, #endif // Copy the results into the matrix - ibz_copy_digit_array(&((*mat)[0][0]), x1); - ibz_copy_digit_array(&((*mat)[1][0]), x2); - ibz_copy_digit_array(&((*mat)[0][1]), x3); - ibz_copy_digit_array(&((*mat)[1][1]), x4); + ibz_copy_digit_array(&(mat->m[0][0]), x1); + ibz_copy_digit_array(&(mat->m[1][0]), x2); + ibz_copy_digit_array(&(mat->m[0][1]), x3); + ibz_copy_digit_array(&(mat->m[1][1]), x4); } void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ideal.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ideal.c index 9cf863a104..8634143941 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ideal.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ideal.c @@ -33,7 +33,7 @@ quat_lideal_copy(quat_left_ideal_t *copy, const quat_left_ideal_t *copied) ibz_copy(©->lattice.denom, &copied->lattice.denom); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(©->lattice.basis[i][j], &copied->lattice.basis[i][j]); + ibz_copy(©->lattice.basis.m[i][j], &copied->lattice.basis.m[i][j]); } } } @@ -248,13 +248,13 @@ quat_lideal_class_gram(ibz_mat_4x4_t *G, const quat_left_ideal_t *lideal, const for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_div(&(*G)[i][j], &rmd, &(*G)[i][j], &divisor); + ibz_div(&G->m[i][j], &rmd, &G->m[i][j], &divisor); assert(ibz_is_zero(&rmd)); } } for (int i = 0; i < 4; i++) { for (int j = 0; j <= i - 1; j++) { - ibz_copy(&(*G)[j][i], &(*G)[i][j]); + ibz_copy(&G->m[j][i], &G->m[i][j]); } } @@ -289,8 +289,8 @@ quat_order_discriminant(ibz_t *disc, const quat_lattice_t *order, const quat_alg ibz_mat_4x4_transpose(&transposed, &(order->basis)); // multiply gram matrix by 2 because of reduced trace ibz_mat_4x4_identity(&norm); - ibz_copy(&(norm[2][2]), &(alg->p)); - ibz_copy(&(norm[3][3]), &(alg->p)); + ibz_copy(&(norm.m[2][2]), &(alg->p)); + ibz_copy(&(norm.m[3][3]), &(alg->p)); ibz_mat_4x4_scalar_mul(&norm, &ibz_const_two, &norm); ibz_mat_4x4_mul(&prod, &transposed, &norm); ibz_mat_4x4_mul(&prod, &prod, &(order->basis)); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.c index b0462dc8b5..e219bf3d96 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.c @@ -114,48 +114,48 @@ DEBUG_STR_FUN_4(const char *op, const ibz_t *arg1, const ibz_t *arg2, const ibz_ * @{ */ -const __mpz_struct ibz_const_zero[1] = { +const ibz_t ibz_const_zero = {{ { ._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]){ 0 }, } -}; +}}; -const __mpz_struct ibz_const_one[1] = { +const ibz_t ibz_const_one = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 1 }, } -}; +}}; -const __mpz_struct ibz_const_two[1] = { +const ibz_t ibz_const_two = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 2 }, } -}; +}}; -const __mpz_struct ibz_const_three[1] = { +const ibz_t ibz_const_three = {{ { ._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]){ 3 }, } -}; +}}; void ibz_init(ibz_t *x) { - mpz_init(*x); + mpz_init(x->i); } void ibz_finalize(ibz_t *x) { - mpz_clear(*x); + mpz_clear(x->i); } void @@ -168,7 +168,7 @@ ibz_add(ibz_t *sum, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_add(*sum, *a, *b); + mpz_add(sum->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_add", sum, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -186,7 +186,7 @@ ibz_sub(ibz_t *diff, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_sub(*diff, *a, *b); + mpz_sub(diff->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_sub", diff, &a_cp, &b_cp); @@ -205,7 +205,7 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_mul(*prod, *a, *b); + mpz_mul(prod->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_3("ibz_mul", prod, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -216,13 +216,13 @@ ibz_mul(ibz_t *prod, const ibz_t *a, const ibz_t *b) void ibz_neg(ibz_t *neg, const ibz_t *a) { - mpz_neg(*neg, *a); + mpz_neg(neg->i, a->i); } void ibz_abs(ibz_t *abs, const ibz_t *a) { - mpz_abs(*abs, *a); + mpz_abs(abs->i, a->i); } void @@ -235,7 +235,7 @@ ibz_div(ibz_t *quotient, ibz_t *remainder, const ibz_t *a, const ibz_t *b) ibz_copy(&a_cp, a); ibz_copy(&b_cp, b); #endif - mpz_tdiv_qr(*quotient, *remainder, *a, *b); + mpz_tdiv_qr(quotient->i, remainder->i, a->i, b->i); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_4("ibz_div", quotient, remainder, &a_cp, &b_cp); ibz_finalize(&a_cp); @@ -251,7 +251,7 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) ibz_init(&a_cp); ibz_copy(&a_cp, a); #endif - mpz_tdiv_q_2exp(*quotient, *a, exp); + mpz_tdiv_q_2exp(quotient->i, a->i, exp); #ifdef DEBUG_VERBOSE DEBUG_STR_FUN_MP2_INT("ibz_div_2exp,%Zx,%Zx,%x\n", quotient, &a_cp, exp); ibz_finalize(&a_cp); @@ -261,50 +261,50 @@ ibz_div_2exp(ibz_t *quotient, const ibz_t *a, uint32_t exp) void ibz_div_floor(ibz_t *q, ibz_t *r, const ibz_t *n, const ibz_t *d) { - mpz_fdiv_qr(*q, *r, *n, *d); + mpz_fdiv_qr(q->i, r->i, n->i, d->i); } void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b) { - mpz_mod(*r, *a, *b); + mpz_mod(r->i, a->i, b->i); } unsigned long int -ibz_mod_ui(const mpz_t *n, unsigned long int d) +ibz_mod_ui(const ibz_t *n, unsigned long int d) { - return mpz_fdiv_ui(*n, d); + return mpz_fdiv_ui(n->i, d); } int ibz_divides(const ibz_t *a, const ibz_t *b) { - return mpz_divisible_p(*a, *b); + return mpz_divisible_p(a->i, b->i); } void ibz_pow(ibz_t *pow, const ibz_t *x, uint32_t e) { - mpz_pow_ui(*pow, *x, e); + mpz_pow_ui(pow->i, x->i, e); } void ibz_pow_mod(ibz_t *pow, const ibz_t *x, const ibz_t *e, const ibz_t *m) { - mpz_powm(*pow, *x, *e, *m); + mpz_powm(pow->i, x->i, e->i, m->i); DEBUG_STR_FUN_4("ibz_pow_mod", pow, x, e, m); } int ibz_two_adic(ibz_t *pow) { - return mpz_scan1(*pow, 0); + return mpz_scan1(pow->i, 0); } int ibz_cmp(const ibz_t *a, const ibz_t *b) { - int ret = mpz_cmp(*a, *b); + int ret = mpz_cmp(a->i, b->i); DEBUG_STR_FUN_INT_MP2("ibz_cmp", ret, a, b); return ret; } @@ -312,7 +312,7 @@ ibz_cmp(const ibz_t *a, const ibz_t *b) int ibz_is_zero(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 0); + int ret = !mpz_cmp_ui(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_zero", ret, x); return ret; } @@ -320,7 +320,7 @@ ibz_is_zero(const ibz_t *x) int ibz_is_one(const ibz_t *x) { - int ret = !mpz_cmp_ui(*x, 1); + int ret = !mpz_cmp_ui(x->i, 1); DEBUG_STR_FUN_INT_MP("ibz_is_one", ret, x); return ret; } @@ -328,7 +328,7 @@ ibz_is_one(const ibz_t *x) int ibz_cmp_int32(const ibz_t *x, int32_t y) { - int ret = mpz_cmp_si(*x, (signed long int)y); + int ret = mpz_cmp_si(x->i, (signed long int)y); DEBUG_STR_FUN_INT_MP_INT("ibz_cmp_int32", ret, x, y); return ret; } @@ -336,7 +336,7 @@ ibz_cmp_int32(const ibz_t *x, int32_t y) int ibz_is_even(const ibz_t *x) { - int ret = !mpz_tstbit(*x, 0); + int ret = !mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_even", ret, x); return ret; } @@ -344,7 +344,7 @@ ibz_is_even(const ibz_t *x) int ibz_is_odd(const ibz_t *x) { - int ret = mpz_tstbit(*x, 0); + int ret = mpz_tstbit(x->i, 0); DEBUG_STR_FUN_INT_MP("ibz_is_odd", ret, x); return ret; } @@ -352,7 +352,7 @@ ibz_is_odd(const ibz_t *x) void ibz_set(ibz_t *i, int32_t x) { - mpz_set_si(*i, x); + mpz_set_si(i->i, x); } int @@ -361,7 +361,7 @@ ibz_convert_to_str(const ibz_t *i, char *str, int base) if (!str || (base != 10 && base != 16)) return 0; - mpz_get_str(str, base, *i); + mpz_get_str(str, base, i->i); return 1; } @@ -380,29 +380,29 @@ ibz_print(const ibz_t *num, int base) int ibz_set_from_str(ibz_t *i, const char *str, int base) { - return (1 + mpz_set_str(*i, str, base)); + return (1 + mpz_set_str(i->i, str, base)); } void ibz_copy(ibz_t *target, const ibz_t *value) { - mpz_set(*target, *value); + mpz_set(target->i, value->i); } void ibz_swap(ibz_t *a, ibz_t *b) { - mpz_swap(*a, *b); + mpz_swap(a->i, b->i); } int32_t ibz_get(const ibz_t *i) { #if LONG_MAX == INT32_MAX - return (int32_t)mpz_get_si(*i); + return (int32_t)mpz_get_si(i->i); #elif LONG_MAX > INT32_MAX // Extracts the sign bit and the 31 least significant bits - signed long int t = mpz_get_si(*i); + signed long int t = mpz_get_si(i->i); return (int32_t)((t >> (sizeof(signed long int) * 8 - 32)) & INT32_C(0x80000000)) | (t & INT32_C(0x7FFFFFFF)); #else #error Unsupported configuration: LONG_MAX must be >= INT32_MAX @@ -417,10 +417,10 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) mpz_t tmp; mpz_t bmina; mpz_init(bmina); - mpz_sub(bmina, *b, *a); + mpz_sub(bmina, b->i, a->i); if (mpz_sgn(bmina) == 0) { - mpz_set(*rand, *a); + mpz_set(rand->i, a->i); mpz_clear(bmina); return 1; } @@ -466,7 +466,7 @@ ibz_rand_interval(ibz_t *rand, const ibz_t *a, const ibz_t *b) break; } while (1); - mpz_add(*rand, tmp, *a); + mpz_add(rand->i, tmp, a->i); err: mpz_clear(bmina); return ret; @@ -534,19 +534,19 @@ int ibz_rand_interval_minm_m(ibz_t *rand, int32_t m) { int ret = 1; - mpz_t m_big; + ibz_t m_big; // m_big = 2 * m - mpz_init_set_si(m_big, m); - mpz_add(m_big, m_big, m_big); + mpz_init_set_si(m_big.i, m); + mpz_add(m_big.i, m_big.i, m_big.i); // Sample in [0, 2*m] ret = ibz_rand_interval(rand, &ibz_const_zero, &m_big); // Adjust to range [-m, m] - mpz_sub_ui(*rand, *rand, m); + mpz_sub_ui(rand->i, rand->i, m); - mpz_clear(m_big); + mpz_clear(m_big.i); return ret; } @@ -555,41 +555,41 @@ int ibz_rand_interval_bits(ibz_t *rand, uint32_t m) { int ret = 1; - mpz_t tmp; - mpz_t low; - mpz_init_set_ui(tmp, 1); - mpz_mul_2exp(tmp, tmp, m); - mpz_init(low); - mpz_neg(low, tmp); + ibz_t tmp; + ibz_t low; + mpz_init_set_ui(tmp.i, 1); + mpz_mul_2exp(tmp.i, tmp.i, m); + mpz_init(low.i); + mpz_neg(low.i, tmp.i); ret = ibz_rand_interval(rand, &low, &tmp); - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); if (ret != 1) goto err; - mpz_sub_ui(*rand, *rand, (unsigned long int)m); + mpz_sub_ui(rand->i, rand->i, (unsigned long int)m); return ret; err: - mpz_clear(tmp); - mpz_clear(low); + mpz_clear(tmp.i); + mpz_clear(low.i); return ret; } int ibz_bitsize(const ibz_t *a) { - return (int)mpz_sizeinbase(*a, 2); + return (int)mpz_sizeinbase(a->i, 2); } int ibz_size_in_base(const ibz_t *a, int base) { - return (int)mpz_sizeinbase(*a, base); + return (int)mpz_sizeinbase(a->i, base); } void ibz_copy_digits(ibz_t *target, const digit_t *dig, int dig_len) { - mpz_import(*target, dig_len, -1, sizeof(digit_t), 0, 0, dig); + mpz_import(target->i, dig_len, -1, sizeof(digit_t), 0, 0, dig); } void @@ -600,13 +600,13 @@ ibz_to_digits(digit_t *target, const ibz_t *ibz) // The next line ensures zero is written to the first limb of target if ibz is zero; // target is then overwritten by the actual value if it is not. target[0] = 0; - mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, *ibz); + mpz_export(target, NULL, -1, sizeof(digit_t), 0, 0, ibz->i); } int ibz_probab_prime(const ibz_t *n, int reps) { - int ret = mpz_probab_prime_p(*n, reps); + int ret = mpz_probab_prime_p(n->i, reps); DEBUG_STR_FUN_INT_MP_INT("ibz_probab_prime", ret, n, reps); return ret; } @@ -614,26 +614,26 @@ ibz_probab_prime(const ibz_t *n, int reps) void ibz_gcd(ibz_t *gcd, const ibz_t *a, const ibz_t *b) { - mpz_gcd(*gcd, *a, *b); + mpz_gcd(gcd->i, a->i, b->i); } int ibz_invmod(ibz_t *inv, const ibz_t *a, const ibz_t *mod) { - return (mpz_invert(*inv, *a, *mod) ? 1 : 0); + return (mpz_invert(inv->i, a->i, mod->i) ? 1 : 0); } int ibz_legendre(const ibz_t *a, const ibz_t *p) { - return mpz_legendre(*a, *p); + return mpz_legendre(a->i, p->i); } int ibz_sqrt(ibz_t *sqrt, const ibz_t *a) { - if (mpz_perfect_square_p(*a)) { - mpz_sqrt(*sqrt, *a); + if (mpz_perfect_square_p(a->i)) { + mpz_sqrt(sqrt->i, a->i); return 1; } else { return 0; @@ -643,7 +643,7 @@ ibz_sqrt(ibz_t *sqrt, const ibz_t *a) void ibz_sqrt_floor(ibz_t *sqrt, const ibz_t *a) { - mpz_sqrt(*sqrt, *a); + mpz_sqrt(sqrt->i, a->i); } int @@ -686,85 +686,85 @@ ibz_sqrt_mod_p(ibz_t *sqrt, const ibz_t *a, const ibz_t *p) int ret = 1; - mpz_mod(amod, *a, *p); + mpz_mod(amod, a->i, p->i); if (mpz_cmp_ui(amod, 0) < 0) { - mpz_add(amod, *p, amod); + mpz_add(amod, p->i, amod); } - if (mpz_legendre(amod, *p) != 1) { + if (mpz_legendre(amod, p->i) != 1) { ret = 0; goto end; } - mpz_sub_ui(pm1, *p, 1); + mpz_sub_ui(pm1, p->i, 1); - if (mpz_mod_ui(tmp, *p, 4) == 3) { + if (mpz_mod_ui(tmp, p->i, 4) == 3) { // p % 4 == 3 - mpz_add_ui(tmp, *p, 1); + mpz_add_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(*sqrt, amod, tmp, *p); - } else if (mpz_mod_ui(tmp, *p, 8) == 5) { + mpz_powm(sqrt->i, amod, tmp, p->i); + } else if (mpz_mod_ui(tmp, p->i, 8) == 5) { // p % 8 == 5 - mpz_sub_ui(tmp, *p, 1); + mpz_sub_ui(tmp, p->i, 1); mpz_fdiv_q_2exp(tmp, tmp, 2); - mpz_powm(tmp, amod, tmp, *p); // a^{(p-1)/4} mod p + mpz_powm(tmp, amod, tmp, p->i); // a^{(p-1)/4} mod p if (!mpz_cmp_ui(tmp, 1)) { - mpz_add_ui(tmp, *p, 3); + mpz_add_ui(tmp, p->i, 3); mpz_fdiv_q_2exp(tmp, tmp, 3); - mpz_powm(*sqrt, amod, tmp, *p); // a^{(p+3)/8} mod p + mpz_powm(sqrt->i, amod, tmp, p->i); // a^{(p+3)/8} mod p } else { - mpz_sub_ui(tmp, *p, 5); + mpz_sub_ui(tmp, p->i, 5); mpz_fdiv_q_2exp(tmp, tmp, 3); // (p - 5) / 8 mpz_mul_2exp(a4, amod, 2); // 4*a - mpz_powm(tmp, a4, tmp, *p); + mpz_powm(tmp, a4, tmp, p->i); mpz_mul_2exp(a2, amod, 1); mpz_mul(tmp, a2, tmp); - mpz_mod(*sqrt, tmp, *p); + mpz_mod(sqrt->i, tmp, p->i); } } else { // p % 8 == 1 -> Shanks-Tonelli int e = 0; - mpz_sub_ui(q, *p, 1); + mpz_sub_ui(q, p->i, 1); while (mpz_tstbit(q, e) == 0) e++; mpz_fdiv_q_2exp(q, q, e); // 1. find generator - non-quadratic residue mpz_set_ui(qnr, 2); - while (mpz_legendre(qnr, *p) != -1) + while (mpz_legendre(qnr, p->i) != -1) mpz_add_ui(qnr, qnr, 1); - mpz_powm(z, qnr, q, *p); + mpz_powm(z, qnr, q, p->i); // 2. Initialize mpz_set(y, z); - mpz_powm(y, amod, q, *p); // y = a^q mod p + mpz_powm(y, amod, q, p->i); // y = a^q mod p mpz_add_ui(tmp, q, 1); // tmp = (q + 1) / 2 mpz_fdiv_q_2exp(tmp, tmp, 1); - mpz_powm(x, amod, tmp, *p); // x = a^(q + 1)/2 mod p + mpz_powm(x, amod, tmp, p->i); // x = a^(q + 1)/2 mod p mpz_set_ui(exp, 1); mpz_mul_2exp(exp, exp, e - 2); for (int i = 0; i < e; ++i) { - mpz_powm(b, y, exp, *p); + mpz_powm(b, y, exp, p->i); if (!mpz_cmp(b, pm1)) { mpz_mul(x, x, z); - mpz_mod(x, x, *p); + mpz_mod(x, x, p->i); mpz_mul(y, y, z); mpz_mul(y, y, z); - mpz_mod(y, y, *p); + mpz_mod(y, y, p->i); } - mpz_powm_ui(z, z, 2, *p); + mpz_powm_ui(z, z, 2, p->i); mpz_fdiv_q_2exp(exp, exp, 1); } - mpz_set(*sqrt, x); + mpz_set(sqrt->i, x); } #ifdef DEBUG_VERBOSE diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.h index a0c2c02477..28e478ff7f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/intbig.h @@ -33,7 +33,9 @@ * * For integers of arbitrary size, used by intbig module, using gmp */ -typedef mpz_t ibz_t; +typedef struct { + mpz_t i; +} ibz_t; /** @} */ @@ -129,7 +131,7 @@ int ibz_two_adic(ibz_t *pow); */ void ibz_mod(ibz_t *r, const ibz_t *a, const ibz_t *b); -unsigned long int ibz_mod_ui(const mpz_t *n, unsigned long int d); +unsigned long int ibz_mod_ui(const ibz_t *n, unsigned long int d); /** @brief Test if a = 0 mod b */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c index 5491ee44d0..ea32213c75 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c @@ -57,25 +57,25 @@ to_etabar(fp_num *x) } static void -from_mpz(const mpz_t x, fp_num *r) +from_mpz(const ibz_t *x, fp_num *r) { long exp = 0; - r->s = mpz_get_d_2exp(&exp, x); + r->s = mpz_get_d_2exp(&exp, x->i); r->e = exp; } static void -to_mpz(const fp_num *x, mpz_t r) +to_mpz(const fp_num *x, ibz_t *r) { if (x->e >= DBL_MANT_DIG) { double s = x->s * 0x1P53; - mpz_set_d(r, s); - mpz_mul_2exp(r, r, x->e - DBL_MANT_DIG); + mpz_set_d(r->i, s); + mpz_mul_2exp(r->i, r->i, x->e - DBL_MANT_DIG); } else if (x->e < 0) { - mpz_set_ui(r, 0); + mpz_set_ui(r->i, 0); } else { double s = ldexp(x->s, x->e); - mpz_set_d(r, round(s)); + mpz_set_d(r->i, round(s)); } } @@ -203,7 +203,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) ibz_init(&tmpI); // Main L² loop - from_mpz((*G)[0][0], &r[0][0]); + from_mpz(&G->m[0][0], &r[0][0]); int kappa = 1; while (kappa < 4) { // size reduce b_κ @@ -213,7 +213,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Loop invariant: // r[κ][j] ≈ u[κ][j] ‖b_j*‖² ≈ 〈b_κ, b_j*〉 for (int j = 0; j <= kappa; j++) { - from_mpz((*G)[kappa][j], &r[kappa][j]); + from_mpz(&G->m[kappa][j], &r[kappa][j]); for (int k = 0; k < j; k++) { fp_mul(&r[kappa][k], &u[j][k], &tmpF); fp_sub(&r[kappa][j], &tmpF, &r[kappa][j]); @@ -229,22 +229,22 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) done = 0; copy(&u[kappa][i], &Xf); fp_round(&Xf); - to_mpz(&Xf, X); + to_mpz(&Xf, &X); // Update basis: b_κ ← b_κ - X·b_i for (int j = 0; j < 4; j++) { - ibz_mul(&tmpI, &X, &(*basis)[j][i]); - ibz_sub(&(*basis)[j][kappa], &(*basis)[j][kappa], &tmpI); + ibz_mul(&tmpI, &X, &basis->m[j][i]); + ibz_sub(&basis->m[j][kappa], &basis->m[j][kappa], &tmpI); } // Update lower half of the Gram matrix // = - 2X + X² = // - X - X( - X·) //// 〈b_κ, b_κ〉 ← 〈b_κ, b_κ〉 - X·〈b_κ, b_i〉 - ibz_mul(&tmpI, &X, &(*G)[kappa][i]); - ibz_sub(&(*G)[kappa][kappa], &(*G)[kappa][kappa], &tmpI); + ibz_mul(&tmpI, &X, &G->m[kappa][i]); + ibz_sub(&G->m[kappa][kappa], &G->m[kappa][kappa], &tmpI); for (int j = 0; j < 4; j++) { // works because i < κ // 〈b_κ, b_j〉 ← 〈b_κ, b_j〉 - X·〈b_i, b_j〉 - ibz_mul(&tmpI, &X, SYM((*G), i, j)); - ibz_sub(SYM((*G), kappa, j), SYM((*G), kappa, j), &tmpI); + ibz_mul(&tmpI, &X, SYM(G->m, i, j)); + ibz_sub(SYM(G->m, kappa, j), SYM(G->m, kappa, j), &tmpI); } // After the loop: //// 〈b_κ,b_κ〉 ← 〈b_κ,b_κ〉 - X·〈b_κ,b_i〉 - X·(〈b_κ,b_i〉 - X·〈b_i, @@ -261,7 +261,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Check Lovasz' conditions // lovasz[0] = ‖b_κ‖² - from_mpz((*G)[kappa][kappa], &lovasz[0]); + from_mpz(&G->m[kappa][kappa], &lovasz[0]); // lovasz[i] = lovasz[i-1] - u[κ][i-1]·r[κ][i-1] for (int i = 1; i < kappa; i++) { fp_mul(&u[kappa][i - 1], &r[kappa][i - 1], &tmpF); @@ -279,11 +279,11 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Insert b_κ before b_swap in the basis and in the lower half Gram matrix for (int j = kappa; j > swap; j--) { for (int i = 0; i < 4; i++) { - ibz_swap(&(*basis)[i][j], &(*basis)[i][j - 1]); + ibz_swap(&basis->m[i][j], &basis->m[i][j - 1]); if (i == j - 1) - ibz_swap(&(*G)[i][i], &(*G)[j][j]); + ibz_swap(&G->m[i][i], &G->m[j][j]); else if (i != j) - ibz_swap(SYM((*G), i, j), SYM((*G), i, j - 1)); + ibz_swap(SYM(G->m, i, j), SYM(G->m, i, j - 1)); } } // Copy row u[κ] and r[κ] in swap position, ignore what follows @@ -318,7 +318,7 @@ quat_lll_core(ibz_mat_4x4_t *G, ibz_mat_4x4_t *basis) // Fill in the upper half of the Gram matrix for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } // Clearinghouse diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lat_ball.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lat_ball.c index c7bbb9682f..3f7476988c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lat_ball.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lat_ball.c @@ -28,10 +28,10 @@ quat_lattice_bound_parallelogram(ibz_vec_4_t *box, ibz_mat_4x4_t *U, const ibz_m // Compute the parallelogram's bounds int trivial = 1; for (int i = 0; i < 4; i++) { - ibz_mul(&(*box)[i], &dualG[i][i], radius); - ibz_div(&(*box)[i], &rem, &(*box)[i], &denom); - ibz_sqrt_floor(&(*box)[i], &(*box)[i]); - trivial &= ibz_is_zero(&(*box)[i]); + ibz_mul(&box->v[i], &dualG.m[i][i], radius); + ibz_div(&box->v[i], &rem, &box->v[i], &denom); + ibz_sqrt_floor(&box->v[i], &box->v[i]); + trivial &= ibz_is_zero(&box->v[i]); } // Compute the transpose transformation matrix @@ -95,12 +95,12 @@ quat_lattice_sample_from_ball(quat_alg_elem_t *res, do { // Sample vector for (int i = 0; i < 4; i++) { - if (ibz_is_zero(&box[i])) { - ibz_copy(&x[i], &ibz_const_zero); + if (ibz_is_zero(&box.v[i])) { + ibz_copy(&x.v[i], &ibz_const_zero); } else { - ibz_add(&tmp, &box[i], &box[i]); - ok &= ibz_rand_interval(&x[i], &ibz_const_zero, &tmp); - ibz_sub(&x[i], &x[i], &box[i]); + ibz_add(&tmp, &box.v[i], &box.v[i]); + ok &= ibz_rand_interval(&x.v[i], &ibz_const_zero, &tmp); + ibz_sub(&x.v[i], &x.v[i], &box.v[i]); if (!ok) goto err; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lattice.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lattice.c index c98bae9499..ef7b9ccdcc 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lattice.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lattice.c @@ -57,7 +57,7 @@ quat_lattice_conjugate_without_hnf(quat_lattice_t *conj, const quat_lattice_t *l for (int row = 1; row < 4; ++row) { for (int col = 0; col < 4; ++col) { - ibz_neg(&(conj->basis[row][col]), &(conj->basis[row][col])); + ibz_neg(&(conj->basis.m[row][col]), &(conj->basis.m[row][col])); } } } @@ -96,14 +96,14 @@ quat_lattice_add(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_mat_4x4_scalar_mul(&tmp, &(lat1->denom), &(lat2->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(tmp[i][j])); + ibz_copy(&(generators[j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det1, &tmp); ibz_mat_4x4_scalar_mul(&tmp, &(lat2->denom), &(lat1->basis)); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[4 + j][i]), &(tmp[i][j])); + ibz_copy(&(generators[4 + j].v[i]), &(tmp.m[i][j])); } } ibz_mat_4x4_inv_with_det_as_denom(NULL, &det2, &tmp); @@ -151,12 +151,12 @@ quat_lattice_mat_alg_coord_mul_without_hnf(ibz_mat_4x4_t *prod, ibz_vec_4_init(&p); ibz_vec_4_init(&a); for (int i = 0; i < 4; i++) { - ibz_vec_4_copy_ibz(&a, &((*lat)[0][i]), &((*lat)[1][i]), &((*lat)[2][i]), &((*lat)[3][i])); + ibz_vec_4_copy_ibz(&a, &(lat->m[0][i]), &(lat->m[1][i]), &(lat->m[2][i]), &(lat->m[3][i])); quat_alg_coord_mul(&p, &a, coord, alg); - ibz_copy(&((*prod)[0][i]), &(p[0])); - ibz_copy(&((*prod)[1][i]), &(p[1])); - ibz_copy(&((*prod)[2][i]), &(p[2])); - ibz_copy(&((*prod)[3][i]), &(p[3])); + ibz_copy(&(prod->m[0][i]), &(p.v[0])); + ibz_copy(&(prod->m[1][i]), &(p.v[1])); + ibz_copy(&(prod->m[2][i]), &(p.v[2])); + ibz_copy(&(prod->m[3][i]), &(p.v[3])); } ibz_vec_4_finalize(&p); ibz_vec_4_finalize(&a); @@ -191,15 +191,15 @@ quat_lattice_mul(quat_lattice_t *res, const quat_lattice_t *lat1, const quat_lat ibz_vec_4_init(&(generators[i])); for (int k = 0; k < 4; k++) { ibz_vec_4_copy_ibz( - &elem1, &(lat1->basis[0][k]), &(lat1->basis[1][k]), &(lat1->basis[2][k]), &(lat1->basis[3][k])); + &elem1, &(lat1->basis.m[0][k]), &(lat1->basis.m[1][k]), &(lat1->basis.m[2][k]), &(lat1->basis.m[3][k])); for (int i = 0; i < 4; i++) { ibz_vec_4_copy_ibz( - &elem2, &(lat2->basis[0][i]), &(lat2->basis[1][i]), &(lat2->basis[2][i]), &(lat2->basis[3][i])); + &elem2, &(lat2->basis.m[0][i]), &(lat2->basis.m[1][i]), &(lat2->basis.m[2][i]), &(lat2->basis.m[3][i])); quat_alg_coord_mul(&elem_res, &elem1, &elem2, alg); for (int j = 0; j < 4; j++) { if (k == 0) - ibz_copy(&(detmat[i][j]), &(elem_res[j])); - ibz_copy(&(generators[4 * k + i][j]), &(elem_res[j])); + ibz_copy(&(detmat.m[i][j]), &(elem_res.v[j])); + ibz_copy(&(generators[4 * k + i].v[j]), &(elem_res.v[j])); } } } @@ -239,7 +239,7 @@ quat_lattice_contains(ibz_vec_4_t *coord, const quat_lattice_t *lat, const quat_ // copy result if (divisible && (coord != NULL)) { for (int i = 0; i < 4; i++) { - ibz_copy(&((*coord)[i]), &(work_coord[i])); + ibz_copy(&(coord->v[i]), &(work_coord.v[i])); } } ibz_finalize(&prod); @@ -292,7 +292,7 @@ quat_lattice_hnf(quat_lattice_t *lat) ibz_vec_4_init(&(generators[i])); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_copy(&(generators[j][i]), &(lat->basis[i][j])); + ibz_copy(&(generators[j].v[i]), &(lat->basis.m[i][j])); } } ibz_mat_4xn_hnf_mod_core(&(lat->basis), 4, generators, &mod); @@ -309,19 +309,19 @@ quat_lattice_gram(ibz_mat_4x4_t *G, const quat_lattice_t *lattice, const quat_al ibz_init(&tmp); for (int i = 0; i < 4; i++) { for (int j = 0; j <= i; j++) { - ibz_set(&(*G)[i][j], 0); + ibz_set(&G->m[i][j], 0); for (int k = 0; k < 4; k++) { - ibz_mul(&tmp, &(lattice->basis)[k][i], &(lattice->basis)[k][j]); + ibz_mul(&tmp, &(lattice->basis.m)[k][i], &(lattice->basis.m)[k][j]); if (k >= 2) ibz_mul(&tmp, &tmp, &alg->p); - ibz_add(&(*G)[i][j], &(*G)[i][j], &tmp); + ibz_add(&G->m[i][j], &G->m[i][j], &tmp); } - ibz_mul(&(*G)[i][j], &(*G)[i][j], &ibz_const_two); + ibz_mul(&G->m[i][j], &G->m[i][j], &ibz_const_two); } } for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) { - ibz_copy(&(*G)[i][j], &(*G)[j][i]); + ibz_copy(&G->m[i][j], &G->m[j][i]); } } ibz_finalize(&tmp); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_applications.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_applications.c index 6c763b8c04..f5e9af922b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_applications.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_applications.c @@ -17,9 +17,9 @@ quat_lideal_reduce_basis(ibz_mat_4x4_t *reduced, quat_lll_core(gram, reduced); ibz_mat_4x4_scalar_mul(gram, &gram_corrector, gram); for (int i = 0; i < 4; i++) { - ibz_div_2exp(&((*gram)[i][i]), &((*gram)[i][i]), 1); + ibz_div_2exp(&(gram->m[i][i]), &(gram->m[i][i]), 1); for (int j = i + 1; j < 4; j++) { - ibz_set(&((*gram)[i][j]), 0); + ibz_set(&(gram->m[i][j]), 0); } } ibz_finalize(&gram_corrector); @@ -79,10 +79,10 @@ quat_lideal_prime_norm_reduced_equivalent(quat_left_ideal_t *lideal, while (!found && ctr < equiv_num_iter) { ctr++; // we select our linear combination at random - ibz_rand_interval_minm_m(&new_alpha.coord[0], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[1], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[2], equiv_bound_coeff); - ibz_rand_interval_minm_m(&new_alpha.coord[3], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[0], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[1], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[2], equiv_bound_coeff); + ibz_rand_interval_minm_m(&new_alpha.coord.v[3], equiv_bound_coeff); // computation of the norm of the vector sampled quat_qf_eval(&tmp, &gram, &new_alpha.coord); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/normeq.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/normeq.c index 8c133dd095..aadbbe06c7 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/normeq.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/normeq.c @@ -13,23 +13,23 @@ quat_lattice_O0_set(quat_lattice_t *O0) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_set(&(O0->basis[i][j]), 0); + ibz_set(&(O0->basis.m[i][j]), 0); } } ibz_set(&(O0->denom), 2); - ibz_set(&(O0->basis[0][0]), 2); - ibz_set(&(O0->basis[1][1]), 2); - ibz_set(&(O0->basis[2][2]), 1); - ibz_set(&(O0->basis[1][2]), 1); - ibz_set(&(O0->basis[3][3]), 1); - ibz_set(&(O0->basis[0][3]), 1); + ibz_set(&(O0->basis.m[0][0]), 2); + ibz_set(&(O0->basis.m[1][1]), 2); + ibz_set(&(O0->basis.m[2][2]), 1); + ibz_set(&(O0->basis.m[1][2]), 1); + ibz_set(&(O0->basis.m[3][3]), 1); + ibz_set(&(O0->basis.m[0][3]), 1); } void quat_lattice_O0_set_extremal(quat_p_extremal_maximal_order_t *O0) { - ibz_set(&O0->z.coord[1], 1); - ibz_set(&O0->t.coord[2], 1); + ibz_set(&O0->z.coord.v[1], 1); + ibz_set(&O0->t.coord.v[2], 1); ibz_set(&O0->z.denom, 1); ibz_set(&O0->t.denom, 1); O0->q = 1; @@ -50,24 +50,24 @@ quat_order_elem_create(quat_alg_elem_t *elem, quat_alg_elem_init(&quat_temp); // elem = x - quat_alg_scalar(elem, &(*coeffs)[0], &ibz_const_one); + quat_alg_scalar(elem, &coeffs->v[0], &ibz_const_one); // quat_temp = i*y - quat_alg_scalar(&quat_temp, &((*coeffs)[1]), &ibz_const_one); + quat_alg_scalar(&quat_temp, &(coeffs->v[1]), &ibz_const_one); quat_alg_mul(&quat_temp, &order->z, &quat_temp, Bpoo); // elem = x + i*y quat_alg_add(elem, elem, &quat_temp); // quat_temp = z * j - quat_alg_scalar(&quat_temp, &(*coeffs)[2], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[2], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); // elem = x + i* + z*j quat_alg_add(elem, elem, &quat_temp); // quat_temp = t * j * i - quat_alg_scalar(&quat_temp, &(*coeffs)[3], &ibz_const_one); + quat_alg_scalar(&quat_temp, &coeffs->v[3], &ibz_const_one); quat_alg_mul(&quat_temp, &order->t, &quat_temp, Bpoo); quat_alg_mul(&quat_temp, &quat_temp, &order->z, Bpoo); @@ -143,11 +143,11 @@ quat_represent_integer(quat_alg_elem_t *gamma, ibz_sub(&counter, &counter, &ibz_const_one); // we start by sampling the first coordinate - ibz_rand_interval(&coeffs[2], &ibz_const_one, &bound); + ibz_rand_interval(&coeffs.v[2], &ibz_const_one, &bound); // then, we sample the second coordinate // computing the second bound in temp as sqrt( (adjust_n_gamma - p*coeffs[2]²)/qp ) - ibz_mul(&cornacchia_target, &coeffs[2], &coeffs[2]); + ibz_mul(&cornacchia_target, &coeffs.v[2], &coeffs.v[2]); ibz_mul(&temp, &cornacchia_target, &(params->algebra->p)); ibz_sub(&temp, &adjusted_n_gamma, &temp); ibz_mul(&sq_bound, &q, &(params->algebra->p)); @@ -158,10 +158,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, continue; } // sampling the second value - ibz_rand_interval(&coeffs[3], &ibz_const_one, &temp); + ibz_rand_interval(&coeffs.v[3], &ibz_const_one, &temp); // compute cornacchia_target = n_gamma - p * (z² + q*t²) - ibz_mul(&temp, &coeffs[3], &coeffs[3]); + ibz_mul(&temp, &coeffs.v[3], &coeffs.v[3]); ibz_mul(&temp, &q, &temp); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_mul(&cornacchia_target, &cornacchia_target, &((params->algebra)->p)); @@ -170,7 +170,7 @@ quat_represent_integer(quat_alg_elem_t *gamma, // applying cornacchia if (ibz_probab_prime(&cornacchia_target, params->primality_test_iterations)) - found = ibz_cornacchia_prime(&(coeffs[0]), &(coeffs[1]), &q, &cornacchia_target); + found = ibz_cornacchia_prime(&(coeffs.v[0]), &(coeffs.v[1]), &q, &cornacchia_target); else found = 0; @@ -179,33 +179,33 @@ quat_represent_integer(quat_alg_elem_t *gamma, // the treatmeat depends if the basis contains (1+j)/2 or (1+k)/2 // we must have x = t mod 2 and y = z mod 2 // if q=1 we can simply swap x and y - if (ibz_is_odd(&coeffs[0]) != ibz_is_odd(&coeffs[3])) { - ibz_swap(&coeffs[1], &coeffs[0]); + if (ibz_is_odd(&coeffs.v[0]) != ibz_is_odd(&coeffs.v[3])) { + ibz_swap(&coeffs.v[1], &coeffs.v[0]); } // we further check that (x-t)/2 = 1 mod 2 and (y-z)/2 = 1 mod 2 to ensure that the // resulting endomorphism will behave well for dim 2 computations - found = found && ((ibz_get(&coeffs[0]) - ibz_get(&coeffs[3])) % 4 == 2) && - ((ibz_get(&coeffs[1]) - ibz_get(&coeffs[2])) % 4 == 2); + found = found && ((ibz_get(&coeffs.v[0]) - ibz_get(&coeffs.v[3])) % 4 == 2) && + ((ibz_get(&coeffs.v[1]) - ibz_get(&coeffs.v[2])) % 4 == 2); } if (found) { #ifndef NDEBUG ibz_set(&temp, (params->order->q)); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&temp, &temp, &(coeffs[1])); - ibz_mul(&test, &(coeffs[0]), &(coeffs[0])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&temp, &temp, &(coeffs.v[1])); + ibz_mul(&test, &(coeffs.v[0]), &(coeffs.v[0])); ibz_add(&temp, &temp, &test); assert(0 == ibz_cmp(&temp, &cornacchia_target)); - ibz_mul(&cornacchia_target, &(coeffs[3]), &(coeffs[3])); + ibz_mul(&cornacchia_target, &(coeffs.v[3]), &(coeffs.v[3])); ibz_mul(&cornacchia_target, &cornacchia_target, &(params->algebra->p)); - ibz_mul(&temp, &(coeffs[1]), &(coeffs[1])); + ibz_mul(&temp, &(coeffs.v[1]), &(coeffs.v[1])); ibz_add(&cornacchia_target, &cornacchia_target, &temp); ibz_set(&temp, (params->order->q)); ibz_mul(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[0]), &coeffs[0]); + ibz_mul(&temp, &(coeffs.v[0]), &coeffs.v[0]); ibz_add(&cornacchia_target, &cornacchia_target, &temp); - ibz_mul(&temp, &(coeffs[2]), &coeffs[2]); + ibz_mul(&temp, &(coeffs.v[2]), &coeffs.v[2]); ibz_mul(&temp, &temp, &(params->algebra->p)); ibz_add(&cornacchia_target, &cornacchia_target, &temp); assert(0 == ibz_cmp(&cornacchia_target, &adjusted_n_gamma)); @@ -213,8 +213,8 @@ quat_represent_integer(quat_alg_elem_t *gamma, // translate x,y,z,t into the quaternion element gamma quat_order_elem_create(gamma, (params->order), &coeffs, (params->algebra)); #ifndef NDEBUG - quat_alg_norm(&temp, &(coeffs[0]), gamma, (params->algebra)); - assert(ibz_is_one(&(coeffs[0]))); + quat_alg_norm(&temp, &(coeffs.v[0]), gamma, (params->algebra)); + assert(ibz_is_one(&(coeffs.v[0]))); assert(0 == ibz_cmp(&temp, &adjusted_n_gamma)); assert(quat_lattice_contains(NULL, &((params->order)->order), gamma)); #endif @@ -232,10 +232,10 @@ quat_represent_integer(quat_alg_elem_t *gamma, if (found) { // new gamma ibz_mat_4x4_eval(&coeffs, &(((params->order)->order).basis), &coeffs); - ibz_copy(&gamma->coord[0], &coeffs[0]); - ibz_copy(&gamma->coord[1], &coeffs[1]); - ibz_copy(&gamma->coord[2], &coeffs[2]); - ibz_copy(&gamma->coord[3], &coeffs[3]); + ibz_copy(&gamma->coord.v[0], &coeffs.v[0]); + ibz_copy(&gamma->coord.v[1], &coeffs.v[1]); + ibz_copy(&gamma->coord.v[2], &coeffs.v[2]); + ibz_copy(&gamma->coord.v[3], &coeffs.v[3]); ibz_copy(&gamma->denom, &(((params->order)->order).denom)); } // var finalize @@ -279,10 +279,10 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, // we find a quaternion element of norm divisible by norm while (!found) { // generating a trace-zero element at random - ibz_set(&gen.coord[0], 0); + ibz_set(&gen.coord.v[0], 0); ibz_sub(&n_temp, norm, &ibz_const_one); for (int i = 1; i < 4; i++) - ibz_rand_interval(&gen.coord[i], &ibz_const_zero, &n_temp); + ibz_rand_interval(&gen.coord.v[i], &ibz_const_zero, &n_temp); // first, we compute the norm of the gen quat_alg_norm(&n_temp, &norm_d, &gen, (params->algebra)); @@ -293,7 +293,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, ibz_mod(&disc, &disc, norm); // now we check that -n is a square mod norm // and if the square root exists we compute it - found = ibz_sqrt_mod_p(&gen.coord[0], &disc, norm); + found = ibz_sqrt_mod_p(&gen.coord.v[0], &disc, norm); found = found && !quat_alg_elem_is_zero(&gen); } } else { @@ -319,7 +319,7 @@ quat_sampling_random_ideal_O0_given_norm(quat_left_ideal_t *lideal, found = 0; while (!found) { for (int i = 0; i < 4; i++) { - ibz_rand_interval(&gen_rerand.coord[i], &ibz_const_one, norm); + ibz_rand_interval(&gen_rerand.coord.v[i], &ibz_const_one, norm); } quat_alg_norm(&n_temp, &norm_d, &gen_rerand, (params->algebra)); assert(ibz_is_one(&norm_d)); @@ -348,22 +348,22 @@ quat_change_to_O0_basis(ibz_vec_4_t *vec, const quat_alg_elem_t *el) { ibz_t tmp; ibz_init(&tmp); - ibz_copy(&(*vec)[2], &el->coord[2]); - ibz_add(&(*vec)[2], &(*vec)[2], &(*vec)[2]); // double (not optimal if el->denom is even...) - ibz_copy(&(*vec)[3], &el->coord[3]); // double (not optimal if el->denom is even...) - ibz_add(&(*vec)[3], &(*vec)[3], &(*vec)[3]); - ibz_sub(&(*vec)[0], &el->coord[0], &el->coord[3]); - ibz_sub(&(*vec)[1], &el->coord[1], &el->coord[2]); - - assert(ibz_divides(&(*vec)[0], &el->denom)); - assert(ibz_divides(&(*vec)[1], &el->denom)); - assert(ibz_divides(&(*vec)[2], &el->denom)); - assert(ibz_divides(&(*vec)[3], &el->denom)); - - ibz_div(&(*vec)[0], &tmp, &(*vec)[0], &el->denom); - ibz_div(&(*vec)[1], &tmp, &(*vec)[1], &el->denom); - ibz_div(&(*vec)[2], &tmp, &(*vec)[2], &el->denom); - ibz_div(&(*vec)[3], &tmp, &(*vec)[3], &el->denom); + ibz_copy(&vec->v[2], &el->coord.v[2]); + ibz_add(&vec->v[2], &vec->v[2], &vec->v[2]); // double (not optimal if el->denom is even...) + ibz_copy(&vec->v[3], &el->coord.v[3]); // double (not optimal if el->denom is even...) + ibz_add(&vec->v[3], &vec->v[3], &vec->v[3]); + ibz_sub(&vec->v[0], &el->coord.v[0], &el->coord.v[3]); + ibz_sub(&vec->v[1], &el->coord.v[1], &el->coord.v[2]); + + assert(ibz_divides(&vec->v[0], &el->denom)); + assert(ibz_divides(&vec->v[1], &el->denom)); + assert(ibz_divides(&vec->v[2], &el->denom)); + assert(ibz_divides(&vec->v[3], &el->denom)); + + ibz_div(&vec->v[0], &tmp, &vec->v[0], &el->denom); + ibz_div(&vec->v[1], &tmp, &vec->v[1], &el->denom); + ibz_div(&vec->v[2], &tmp, &vec->v[2], &el->denom); + ibz_div(&vec->v[3], &tmp, &vec->v[3], &el->denom); ibz_finalize(&tmp); } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/printer.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/printer.c index 6d6a3ca9b7..7702fb7ca4 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/printer.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/printer.c @@ -7,7 +7,7 @@ ibz_mat_2x2_print(const ibz_mat_2x2_t *mat) printf("matrix: "); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { - ibz_print(&((*mat)[i][j]), 10); + ibz_print(&(mat->m[i][j]), 10); printf(" "); } printf("\n "); @@ -21,7 +21,7 @@ ibz_mat_4x4_print(const ibz_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j]), 10); + ibz_print(&(mat->m[i][j]), 10); printf(" "); } printf("\n "); @@ -34,7 +34,7 @@ ibz_vec_2_print(const ibz_vec_2_t *vec) { printf("vector: "); for (int i = 0; i < 2; i++) { - ibz_print(&((*vec)[i]), 10); + ibz_print(&(vec->v[i]), 10); printf(" "); } printf("\n\n"); @@ -45,7 +45,7 @@ ibz_vec_4_print(const ibz_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i]), 10); + ibz_print(&(vec->v[i]), 10); printf(" "); } printf("\n\n"); @@ -61,7 +61,7 @@ quat_lattice_print(const quat_lattice_t *lat) printf("basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lat->basis)[i][j]), 10); + ibz_print(&((lat->basis.m)[i][j]), 10); printf(" "); } printf("\n "); @@ -85,7 +85,7 @@ quat_alg_elem_print(const quat_alg_elem_t *elem) printf("\n"); printf("coordinates: "); for (int i = 0; i < 4; i++) { - ibz_print(&((elem->coord)[i]), 10); + ibz_print(&((elem->coord.v)[i]), 10); printf(" "); } printf("\n\n"); @@ -104,7 +104,7 @@ quat_left_ideal_print(const quat_left_ideal_t *lideal) printf("basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lideal->lattice.basis)[i][j]), 10); + ibz_print(&((lideal->lattice.basis.m)[i][j]), 10); printf(" "); } if (i != 3) { @@ -120,7 +120,7 @@ quat_left_ideal_print(const quat_left_ideal_t *lideal) printf("parent order basis: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((lideal->parent_order->basis)[i][j]), 10); + ibz_print(&((lideal->parent_order->basis.m)[i][j]), 10); printf(" "); } printf("\n "); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion.h index a567657464..2dd70a8c19 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion.h @@ -25,7 +25,9 @@ * * @typedef ibz_vec_2_t */ -typedef ibz_t ibz_vec_2_t[2]; +typedef struct { + ibz_t v[2]; +} ibz_vec_2_t; /** @brief Type for vectors of 4 integers * @@ -33,7 +35,9 @@ typedef ibz_t ibz_vec_2_t[2]; * * Represented as a vector of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_vec_4_t[4]; +typedef struct { + ibz_t v[4]; +} ibz_vec_4_t; /** @brief Type for 2 by 2 matrices of integers * @@ -41,7 +45,9 @@ typedef ibz_t ibz_vec_4_t[4]; * * Represented as a matrix of 2 vectors of 2 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_2x2_t[2][2]; +typedef struct { + ibz_t m[2][2]; +} ibz_mat_2x2_t; /** @brief Type for 4 by 4 matrices of integers * @@ -49,7 +55,9 @@ typedef ibz_t ibz_mat_2x2_t[2][2]; * * Represented as a matrix of 4 vectors of 4 ibz_t (big integer) elements */ -typedef ibz_t ibz_mat_4x4_t[4][4]; +typedef struct { + ibz_t m[4][4]; +} ibz_mat_4x4_t; /** * @} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.c index 98b792431a..11ad7e707c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/quaternion_data.c @@ -4,3173 +4,3173 @@ const ibz_t QUAT_prime_cofactor = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x200}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x200}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000000}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x200000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x200000000000000}}}} #endif ; const quat_alg_t QUATALG_PINFTY = { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x1af}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x1af}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x1afffff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x1afffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x1afffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0x1afffffffffffff}}}} #endif }; const quat_p_extremal_maximal_order_t EXTREMAL_ORDERS[7] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 1}, {{ +}}}, 1}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2f6d,0xbfbd,0x6af0,0xbcd3,0x5c61,0x8f62,0x9b0b,0xd78a,0x3142,0x61aa,0x4716,0x208,0x93c7,0x43bd,0x97d6,0xda1a,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xd7}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x2f6d,0xbfbd,0x6af0,0xbcd3,0x5c61,0x8f62,0x9b0b,0xd78a,0x3142,0x61aa,0x4716,0x208,0x93c7,0x43bd,0x97d6,0xda1a,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xd7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbfbd2f6d,0xbcd36af0,0x8f625c61,0xd78a9b0b,0x61aa3142,0x2084716,0x43bd93c7,0xda1a97d6,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xd7ffff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbfbd2f6d,0xbcd36af0,0x8f625c61,0xd78a9b0b,0x61aa3142,0x2084716,0x43bd93c7,0xda1a97d6,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xd7ffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbcd36af0bfbd2f6d,0xd78a9b0b8f625c61,0x208471661aa3142,0xda1a97d643bd93c7,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xd7ffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbcd36af0bfbd2f6d,0xd78a9b0b8f625c61,0x208471661aa3142,0xda1a97d643bd93c7,0xffffffffffffffff,0xffffffffffffffff,0xffffffffffffffff,0xd7ffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x62c2,0x86e,0x4c92,0xdce1,0x2d33,0xfde4,0xdefd,0xf4a0,0x630,0x6f71,0x66ae,0x62b8,0xd16a,0xd9ff,0x9728,0x2493}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x86e62c2,0xdce14c92,0xfde42d33,0xf4a0defd,0x6f710630,0x62b866ae,0xd9ffd16a,0x24939728}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdce14c92086e62c2,0xf4a0defdfde42d33,0x62b866ae6f710630,0x24939728d9ffd16a}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9add,0x156b,0x8705,0x6bb9,0x8bdf,0xd034,0x21a6,0xb827,0x44e9,0x34c7,0x3da3,0xa9fd,0xcebd,0x3ec0,0xcd63,0xca1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9add,0x156b,0x8705,0x6bb9,0x8bdf,0xd034,0x21a6,0xb827,0x44e9,0x34c7,0x3da3,0xa9fd,0xcebd,0x3ec0,0xcd63,0xca1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x156b9add,0x6bb98705,0xd0348bdf,0xb82721a6,0x34c744e9,0xa9fd3da3,0x3ec0cebd,0xca1cd63}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x156b9add,0x6bb98705,0xd0348bdf,0xb82721a6,0x34c744e9,0xa9fd3da3,0x3ec0cebd,0xca1cd63}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6bb98705156b9add,0xb82721a6d0348bdf,0xa9fd3da334c744e9,0xca1cd633ec0cebd}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6bb98705156b9add,0xb82721a6d0348bdf,0xa9fd3da334c744e9,0xca1cd633ec0cebd}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc584,0x10dc,0x9924,0xb9c2,0x5a67,0xfbc8,0xbdfb,0xe941,0xc61,0xdee2,0xcd5c,0xc570,0xa2d4,0xb3ff,0x2e51,0x4927}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x10dcc584,0xb9c29924,0xfbc85a67,0xe941bdfb,0xdee20c61,0xc570cd5c,0xb3ffa2d4,0x49272e51}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb9c2992410dcc584,0xe941bdfbfbc85a67,0xc570cd5cdee20c61,0x49272e51b3ffa2d4}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc93,0xd634,0x4632,0x353f,0x76ba,0x220d,0x5084,0x3187,0xb121,0xfc8,0x6860,0xa3e4,0x1368,0x7388,0x5e0,0x7e52}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd6340c93,0x353f4632,0x220d76ba,0x31875084,0xfc8b121,0xa3e46860,0x73881368,0x7e5205e0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x353f4632d6340c93,0x31875084220d76ba,0xa3e468600fc8b121,0x7e5205e073881368}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x5}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 5}, {{ +}}}, 5}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1f45,0x5630,0xd526,0x9cc7,0x1aab,0x114d,0x87b3,0xbb27,0xc6b6,0xe50,0x8bb4,0x813f,0xff7a,0xf810,0xa8d3,0x66ee,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x1f45,0x5630,0xd526,0x9cc7,0x1aab,0x114d,0x87b3,0xbb27,0xc6b6,0xe50,0x8bb4,0x813f,0xff7a,0xf810,0xa8d3,0x66ee,0xfffc,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56301f45,0x9cc7d526,0x114d1aab,0xbb2787b3,0xe50c6b6,0x813f8bb4,0xf810ff7a,0x66eea8d3,0xfffffffc,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x56301f45,0x9cc7d526,0x114d1aab,0xbb2787b3,0xe50c6b6,0x813f8bb4,0xf810ff7a,0x66eea8d3,0xfffffffc,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9cc7d52656301f45,0xbb2787b3114d1aab,0x813f8bb40e50c6b6,0x66eea8d3f810ff7a,0xfffffffffffffffc,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9cc7d52656301f45,0xbb2787b3114d1aab,0x813f8bb40e50c6b6,0x66eea8d3f810ff7a,0xfffffffffffffffc,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xb4d,0x869a,0xdfeb,0x2044,0x8df4,0xe0be,0xff7d,0x377e,0x3b70,0xac75,0x7f6d,0xe902,0xf1,0x1dcd,0x23eb,0x6f94,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x869a0b4d,0x2044dfeb,0xe0be8df4,0x377eff7d,0xac753b70,0xe9027f6d,0x1dcd00f1,0x6f9423eb,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2044dfeb869a0b4d,0x377eff7de0be8df4,0xe9027f6dac753b70,0x6f9423eb1dcd00f1,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x233f,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x233f,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38d9233f,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x38d9233f,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d9233f,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d9233f,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x169a,0xd34,0xbfd7,0x4089,0x1be8,0xc17d,0xfefb,0x6efd,0x76e0,0x58ea,0xfedb,0xd204,0x1e3,0x3b9a,0x47d6,0xdf28,0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xd34169a,0x4089bfd7,0xc17d1be8,0x6efdfefb,0x58ea76e0,0xd204fedb,0x3b9a01e3,0xdf2847d6,0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x4089bfd70d34169a,0x6efdfefbc17d1be8,0xd204fedb58ea76e0,0xdf2847d63b9a01e3,0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x2412,0x6ec4,0x4dda,0x2cde,0x281d,0xaaa7,0x3a33,0xc1d6,0x5c26,0x22e3,0x816d,0x13fb,0xac81,0x58e8,0xd1a7,0xadaa,0xc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0x6ec42412,0x2cde4dda,0xaaa7281d,0xc1d63a33,0x22e35c26,0x13fb816d,0x58e8ac81,0xadaad1a7,0xc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x2cde4dda6ec42412,0xc1d63a33aaa7281d,0x13fb816d22e35c26,0xadaad1a758e8ac81,0xc}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x94}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 37}, {{ +}}}, 37}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x3b03,0xe541,0x6454,0x6f9,0x3808,0xb93,0x7509,0x2b52,0xed1,0xf4fe,0x8961,0x4869,0x4671,0xdd21,0x4c4c,0x70b0,0xfff9,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}} +{{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x3b03,0xe541,0x6454,0x6f9,0x3808,0xb93,0x7509,0x2b52,0xed1,0xf4fe,0x8961,0x4869,0x4671,0xdd21,0x4c4c,0x70b0,0xfff9,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0x35f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe5413b03,0x6f96454,0xb933808,0x2b527509,0xf4fe0ed1,0x48698961,0xdd214671,0x70b04c4c,0xfffffff9,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe5413b03,0x6f96454,0xb933808,0x2b527509,0xf4fe0ed1,0x48698961,0xdd214671,0x70b04c4c,0xfffffff9,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x35fffff}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6f96454e5413b03,0x2b5275090b933808,0x48698961f4fe0ed1,0x70b04c4cdd214671,0xfffffffffffffff9,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x6f96454e5413b03,0x2b5275090b933808,0x48698961f4fe0ed1,0x70b04c4cdd214671,0xfffffffffffffff9,0xffffffffffffffff,0xffffffffffffffff,0x35fffffffffffff}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x618f,0xa0b7,0x7152,0x12e1,0x5493,0xa2fd,0xabc5,0xf9ca,0xb564,0xbc80,0xe8a6,0x6844,0x4f2e,0x7031,0x85e2,0xac1d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xa0b7618f,0x12e17152,0xa2fd5493,0xf9caabc5,0xbc80b564,0x6844e8a6,0x70314f2e,0xac1d85e2,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x12e17152a0b7618f,0xf9caabc5a2fd5493,0x6844e8a6bc80b564,0xac1d85e270314f2e,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe953,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xe953,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf5ace953,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xf5ace953,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace953,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace953,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xc31e,0x416e,0xe2a5,0x25c2,0xa926,0x45fa,0x578b,0xf395,0x6ac9,0x7901,0xd14d,0xd089,0x9e5c,0xe062,0xbc4,0x583b,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x416ec31e,0x25c2e2a5,0x45faa926,0xf395578b,0x79016ac9,0xd089d14d,0xe0629e5c,0x583b0bc4,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x25c2e2a5416ec31e,0xf395578b45faa926,0xd089d14d79016ac9,0x583b0bc4e0629e5c,0x3}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x4c2,0x1467,0x5829,0xf8ca,0x2d31,0xb661,0xa4a1,0x434a,0x25cb,0xbffa,0xe232,0x8565,0x2305,0xe42,0x3f64,0x710,0x11}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x146704c2,0xf8ca5829,0xb6612d31,0x434aa4a1,0xbffa25cb,0x8565e232,0xe422305,0x7103f64,0x11}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xf8ca5829146704c2,0x434aa4a1b6612d31,0x8565e232bffa25cb,0x7103f640e422305,0x11}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0xf4}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 61}, {{ +}}}, 61}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x7013,0x423f,0x42b7,0x3f3d,0x82a,0x9883,0x52bf,0xfede,0x8018,0xa449,0xf571,0xb8a,0x3139,0xbe7,0x439d,0x9e1f,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd8}}} +{{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x7013,0x423f,0x42b7,0x3f3d,0x82a,0x9883,0x52bf,0xfede,0x8018,0xa449,0xf571,0xb8a,0x3139,0xbe7,0x439d,0x9e1f,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd8}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x423f7013,0x3f3d42b7,0x9883082a,0xfede52bf,0xa4498018,0xb8af571,0xbe73139,0x9e1f439d,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0xd80000}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x423f7013,0x3f3d42b7,0x9883082a,0xfede52bf,0xa4498018,0xb8af571,0xbe73139,0x9e1f439d,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0xd80000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3f3d42b7423f7013,0xfede52bf9883082a,0xb8af571a4498018,0x9e1f439d0be73139,0x2,0x0,0x0,0xd8000000000000}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x3f3d42b7423f7013,0xfede52bf9883082a,0xb8af571a4498018,0x9e1f439d0be73139,0x2,0x0,0x0,0xd8000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7ca,0x9478,0x44aa,0xdbd5,0x29c7,0xb2db,0x2d0a,0xe9da,0x4b33,0x65d2,0x1aaa,0x88fc,0x92d4,0x3132,0x8cb5,0x841e}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x947807ca,0xdbd544aa,0xb2db29c7,0xe9da2d0a,0x65d24b33,0x88fc1aaa,0x313292d4,0x841e8cb5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdbd544aa947807ca,0xe9da2d0ab2db29c7,0x88fc1aaa65d24b33,0x841e8cb5313292d4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca2d,0x34af,0xea29,0x177b,0x91ed,0x86ca,0x588a,0xe94d,0x55df,0x4621,0xa1e4,0x67d7,0xb617,0x6a1,0x88f5,0x87b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xca2d,0x34af,0xea29,0x177b,0x91ed,0x86ca,0x588a,0xe94d,0x55df,0x4621,0xa1e4,0x67d7,0xb617,0x6a1,0x88f5,0x87b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x34afca2d,0x177bea29,0x86ca91ed,0xe94d588a,0x462155df,0x67d7a1e4,0x6a1b617,0x87b88f5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x34afca2d,0x177bea29,0x86ca91ed,0xe94d588a,0x462155df,0x67d7a1e4,0x6a1b617,0x87b88f5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x177bea2934afca2d,0xe94d588a86ca91ed,0x67d7a1e4462155df,0x87b88f506a1b617}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x177bea2934afca2d,0xe94d588a86ca91ed,0x67d7a1e4462155df,0x87b88f506a1b617}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xf94,0x28f0,0x8955,0xb7aa,0x538f,0x65b6,0x5a15,0xd3b4,0x9667,0xcba4,0x3554,0x11f8,0x25a9,0x6265,0x196a,0x83d,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x28f00f94,0xb7aa8955,0x65b6538f,0xd3b45a15,0xcba49667,0x11f83554,0x626525a9,0x83d196a,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0xb7aa895528f00f94,0xd3b45a1565b6538f,0x11f83554cba49667,0x83d196a626525a9,0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x5409,0xed37,0x7339,0xcbe7,0x95ab,0x2582,0x18fa,0xcc9d,0x13ae,0x2543,0xaefd,0xb168,0xfdbc,0x68b,0xc9af,0x6d9d,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xed375409,0xcbe77339,0x258295ab,0xcc9d18fa,0x254313ae,0xb168aefd,0x68bfdbc,0x6d9dc9af,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0xcbe77339ed375409,0xcc9d18fa258295ab,0xb168aefd254313ae,0x6d9dc9af068bfdbc,0x6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x61}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 97}, {{ +}}}, 97}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1920,0xb8b5,0x9c5,0xfd99,0xd1a8,0xb311,0xd4d8,0x9a8e,0x7f4c,0x1ad6,0xeba7,0xb78,0xe77,0xa59b,0xe5bc,0x11f7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb8b51920,0xfd9909c5,0xb311d1a8,0x9a8ed4d8,0x1ad67f4c,0xb78eba7,0xa59b0e77,0x11f7e5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfd9909c5b8b51920,0x9a8ed4d8b311d1a8,0xb78eba71ad67f4c,0x11f7e5bca59b0e77}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1920,0xb8b5,0x9c5,0xfd99,0xd1a8,0xb311,0xd4d8,0x9a8e,0x7f4c,0x1ad6,0xeba7,0xb78,0xe77,0xa59b,0xe5bc,0x11f7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb8b51920,0xfd9909c5,0xb311d1a8,0x9a8ed4d8,0x1ad67f4c,0xb78eba7,0xa59b0e77,0x11f7e5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfd9909c5b8b51920,0x9a8ed4d8b311d1a8,0xb78eba71ad67f4c,0x11f7e5bca59b0e77}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8f1a,0x6fa2,0xe7d3,0x5101,0xf0c5,0x6c62,0xea1,0x3d18,0x4367,0xbd09,0xfc99,0x3a0e,0x35a4,0xf247,0x5fb2,0xc2a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c90,0xdc5a,0x84e2,0x7ecc,0xe8d4,0x5988,0x6a6c,0x4d47,0x3fa6,0x8d6b,0x75d3,0x85bc,0x873b,0x52cd,0xf2de,0x8fb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6fa28f1a,0x5101e7d3,0x6c62f0c5,0x3d180ea1,0xbd094367,0x3a0efc99,0xf24735a4,0xc2a5fb2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdc5a8c90,0x7ecc84e2,0x5988e8d4,0x4d476a6c,0x8d6b3fa6,0x85bc75d3,0x52cd873b,0x8fbf2de}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5101e7d36fa28f1a,0x3d180ea16c62f0c5,0x3a0efc99bd094367,0xc2a5fb2f24735a4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7ecc84e2dc5a8c90,0x4d476a6c5988e8d4,0x85bc75d38d6b3fa6,0x8fbf2de52cd873b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf03,0xa322,0x8523,0x93d,0x3bc,0x4b50,0x33ca,0x56b5,0x863f,0x87c8,0x38bf,0x98c6,0x8ce8,0xfab7,0xfc02,0x78ed}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x11f9,0xfb19,0xfec2,0xe1f1,0xe7a6,0xf9f,0x607,0xc29,0x62c,0x571,0x5f1e,0x9ef8,0x6833,0x4daa,0x5706,0x7bc1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa322cf03,0x93d8523,0x4b5003bc,0x56b533ca,0x87c8863f,0x98c638bf,0xfab78ce8,0x78edfc02}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xfb1911f9,0xe1f1fec2,0xf9fe7a6,0xc290607,0x571062c,0x9ef85f1e,0x4daa6833,0x7bc15706}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x93d8523a322cf03,0x56b533ca4b5003bc,0x98c638bf87c8863f,0x78edfc02fab78ce8}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe1f1fec2fb1911f9,0xc2906070f9fe7a6,0x9ef85f1e0571062c,0x7bc157064daa6833}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x24ed,0x1400,0x74a1,0x1310,0xce8a,0x1c0d,0x512a,0x3500,0x2451,0x6992,0x892c,0x3cdb,0x45d8,0x520,0x420,0xf11f,0x6cb,0xbe4d,0xd06c,0xcbe4,0x4d06,0x6cbe,0xe4d0,0x6cb,0xbe4d,0xd06c,0xcbe4,0x4d06,0x6cbe,0xe4d0,0x6cb,0x15}}} +{{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x8f6c,0x2df1,0x638e,0xe2e3,0x6ecb,0xaa5e,0x1866,0xef1d,0x3821,0xa3ab,0x2721,0x3107,0xaffc,0x377a,0x4bb0,0x86ee,0x616a,0xa7a5,0x5616,0x6a7a,0xa561,0x16a7,0x7a56,0x616a,0xa7a5,0x5616,0x6a7a,0xa561,0x16a7,0x7a56,0x616a,0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x140024ed,0x131074a1,0x1c0dce8a,0x3500512a,0x69922451,0x3cdb892c,0x52045d8,0xf11f0420,0xbe4d06cb,0xcbe4d06c,0x6cbe4d06,0x6cbe4d0,0xd06cbe4d,0x4d06cbe4,0xe4d06cbe,0x1506cb}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2df18f6c,0xe2e3638e,0xaa5e6ecb,0xef1d1866,0xa3ab3821,0x31072721,0x377aaffc,0x86ee4bb0,0xa7a5616a,0x6a7a5616,0x16a7a561,0x616a7a56,0x5616a7a5,0xa5616a7a,0x7a5616a7,0xd616a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x131074a1140024ed,0x3500512a1c0dce8a,0x3cdb892c69922451,0xf11f0420052045d8,0xcbe4d06cbe4d06cb,0x6cbe4d06cbe4d06,0x4d06cbe4d06cbe4d,0x1506cbe4d06cbe}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xe2e3638e2df18f6c,0xef1d1866aa5e6ecb,0x31072721a3ab3821,0x86ee4bb0377aaffc,0x6a7a5616a7a5616a,0x616a7a5616a7a561,0xa5616a7a5616a7a5,0xd616a7a5616a7}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8f1a,0x6fa2,0xe7d3,0x5101,0xf0c5,0x6c62,0xea1,0x3d18,0x4367,0xbd09,0xfc99,0x3a0e,0x35a4,0xf247,0x5fb2,0xc2a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8c90,0xdc5a,0x84e2,0x7ecc,0xe8d4,0x5988,0x6a6c,0x4d47,0x3fa6,0x8d6b,0x75d3,0x85bc,0x873b,0x52cd,0xf2de,0x8fb}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x6fa28f1a,0x5101e7d3,0x6c62f0c5,0x3d180ea1,0xbd094367,0x3a0efc99,0xf24735a4,0xc2a5fb2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdc5a8c90,0x7ecc84e2,0x5988e8d4,0x4d476a6c,0x8d6b3fa6,0x85bc75d3,0x52cd873b,0x8fbf2de}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5101e7d36fa28f1a,0x3d180ea16c62f0c5,0x3a0efc99bd094367,0xc2a5fb2f24735a4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7ecc84e2dc5a8c90,0x4d476a6c5988e8d4,0x85bc75d38d6b3fa6,0x8fbf2de52cd873b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x98b3,0xd2e,0x314c,0x5199,0x7a5a,0xb592,0xbd65,0x1ef7,0x7d32,0x94fd,0x6cfe,0x68e3,0xcda6,0x8d91,0xfb73,0x88}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x696c,0x823e,0x85a8,0x5b9e,0x43db,0x5bd2,0x5e0b,0x9a1b,0x98f2,0x2445,0x26ef,0xbb9b,0x93b7,0xd9d4,0x2ed3,0x8c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xd2e98b3,0x5199314c,0xb5927a5a,0x1ef7bd65,0x94fd7d32,0x68e36cfe,0x8d91cda6,0x88fb73}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x823e696c,0x5b9e85a8,0x5bd243db,0x9a1b5e0b,0x244598f2,0xbb9b26ef,0xd9d493b7,0x8c2ed3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x5199314c0d2e98b3,0x1ef7bd65b5927a5a,0x68e36cfe94fd7d32,0x88fb738d91cda6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b9e85a8823e696c,0x9a1b5e0b5bd243db,0xbb9b26ef244598f2,0x8c2ed3d9d493b7}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1e34,0xdf45,0xcfa6,0xa203,0xe18a,0xd8c5,0x1d42,0x7a30,0x86ce,0x7a12,0xf933,0x741d,0x6b48,0xe48e,0xbf65,0x1854}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1920,0xb8b5,0x9c5,0xfd99,0xd1a8,0xb311,0xd4d8,0x9a8e,0x7f4c,0x1ad6,0xeba7,0xb78,0xe77,0xa59b,0xe5bc,0x11f7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf451e34,0xa203cfa6,0xd8c5e18a,0x7a301d42,0x7a1286ce,0x741df933,0xe48e6b48,0x1854bf65}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb8b51920,0xfd9909c5,0xb311d1a8,0x9a8ed4d8,0x1ad67f4c,0xb78eba7,0xa59b0e77,0x11f7e5bc}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa203cfa6df451e34,0x7a301d42d8c5e18a,0x741df9337a1286ce,0x1854bf65e48e6b48}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xfd9909c5b8b51920,0x9a8ed4d8b311d1a8,0xb78eba71ad67f4c,0x11f7e5bca59b0e77}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xcf03,0xa322,0x8523,0x93d,0x3bc,0x4b50,0x33ca,0x56b5,0x863f,0x87c8,0x38bf,0x98c6,0x8ce8,0xfab7,0xfc02,0x78ed}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x11f9,0xfb19,0xfec2,0xe1f1,0xe7a6,0xf9f,0x607,0xc29,0x62c,0x571,0x5f1e,0x9ef8,0x6833,0x4daa,0x5706,0x7bc1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa322cf03,0x93d8523,0x4b5003bc,0x56b533ca,0x87c8863f,0x98c638bf,0xfab78ce8,0x78edfc02}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xfb1911f9,0xe1f1fec2,0xf9fe7a6,0xc290607,0x571062c,0x9ef85f1e,0x4daa6833,0x7bc15706}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x93d8523a322cf03,0x56b533ca4b5003bc,0x98c638bf87c8863f,0x78edfc02fab78ce8}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe1f1fec2fb1911f9,0xc2906070f9fe7a6,0x9ef85f1e0571062c,0x7bc157064daa6833}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xb}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 113}, {{ +}}}, 113}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x9c90,0x5de8,0xf815,0x67c5,0x989,0xc9,0x7c9e,0x180b,0x526d,0xdf5a,0x3386,0xea88,0x580a,0x24c5,0x5507,0x3bad,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x438}}} +{{{._mp_alloc = 0, ._mp_size = -32, ._mp_d = (mp_limb_t[]) {0x9c90,0x5de8,0xf815,0x67c5,0x989,0xc9,0x7c9e,0x180b,0x526d,0xdf5a,0x3386,0xea88,0x580a,0x24c5,0x5507,0x3bad,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x438}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x5de89c90,0x67c5f815,0xc90989,0x180b7c9e,0xdf5a526d,0xea883386,0x24c5580a,0x3bad5507,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x4380000}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x5de89c90,0x67c5f815,0xc90989,0x180b7c9e,0xdf5a526d,0xea883386,0x24c5580a,0x3bad5507,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x4380000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x67c5f8155de89c90,0x180b7c9e00c90989,0xea883386df5a526d,0x3bad550724c5580a,0x10,0x0,0x0,0x438000000000000}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x67c5f8155de89c90,0x180b7c9e00c90989,0xea883386df5a526d,0x3bad550724c5580a,0x10,0x0,0x0,0x438000000000000}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0xbbce,0xee06,0x9e3f,0x9728,0xc2dc,0x7dbe,0x594f,0xe9c6,0xb25d,0x4f52,0xd27d,0x3cd1,0xefb8,0xf9ac,0xe510,0x54bb,0x3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xee06bbce,0x97289e3f,0x7dbec2dc,0xe9c6594f,0x4f52b25d,0x3cd1d27d,0xf9acefb8,0x54bbe510,0x3}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x97289e3fee06bbce,0xe9c6594f7dbec2dc,0x3cd1d27d4f52b25d,0x54bbe510f9acefb8,0x3}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa1f8,0x1530,0xa6be,0x126c,0xfd3b,0xbdd9,0xb3bc,0x8495,0x5457,0x1985,0xcfae,0xf440,0x4ea6,0x84ba,0x6881,0x2eb1}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa1f8,0x1530,0xa6be,0x126c,0xfd3b,0xbdd9,0xb3bc,0x8495,0x5457,0x1985,0xcfae,0xf440,0x4ea6,0x84ba,0x6881,0x2eb1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1530a1f8,0x126ca6be,0xbdd9fd3b,0x8495b3bc,0x19855457,0xf440cfae,0x84ba4ea6,0x2eb16881}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1530a1f8,0x126ca6be,0xbdd9fd3b,0x8495b3bc,0x19855457,0xf440cfae,0x84ba4ea6,0x2eb16881}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x126ca6be1530a1f8,0x8495b3bcbdd9fd3b,0xf440cfae19855457,0x2eb1688184ba4ea6}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x126ca6be1530a1f8,0x8495b3bcbdd9fd3b,0xf440cfae19855457,0x2eb1688184ba4ea6}}}} #endif -}}}, { +}}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x779c,0xdc0d,0x3c7f,0x2e51,0x85b9,0xfb7d,0xb29e,0xd38c,0x64bb,0x9ea5,0xa4fa,0x79a3,0xdf70,0xf359,0xca21,0xa977,0x6}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0xdc0d779c,0x2e513c7f,0xfb7d85b9,0xd38cb29e,0x9ea564bb,0x79a3a4fa,0xf359df70,0xa977ca21,0x6}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x2e513c7fdc0d779c,0xd38cb29efb7d85b9,0x79a3a4fa9ea564bb,0xa977ca21f359df70,0x6}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -17, ._mp_d = (mp_limb_t[]) {0x8db1,0xaa9d,0x1944,0x727a,0xc6c3,0xffc0,0x39b4,0x5643,0x2de0,0xb534,0xc0a9,0x5371,0x8e58,0x80df,0xa6c4,0x5a83,0x36}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -9, ._mp_d = (mp_limb_t[]) {0xaa9d8db1,0x727a1944,0xffc0c6c3,0x564339b4,0xb5342de0,0x5371c0a9,0x80df8e58,0x5a83a6c4,0x36}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}} +{{{._mp_alloc = 0, ._mp_size = -5, ._mp_d = (mp_limb_t[]) {0x727a1944aa9d8db1,0x564339b4ffc0c6c3,0x5371c0a9b5342de0,0x5a83a6c480df8e58,0x36}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2e9}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, 149}}; +}}}, 149}}; const quat_left_ideal_t CONNECTING_IDEALS[7] = {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdb03,0x2777,0xbc36,0x4be5,0x38dd,0xd474,0x83b4,0x41a7,0x5426,0xa361,0x1f00,0xc617,0xe350,0x8cb4,0x2b1c,0xaa2}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xdb03,0x2777,0xbc36,0x4be5,0x38dd,0xd474,0x83b4,0x41a7,0x5426,0xa361,0x1f00,0xc617,0xe350,0x8cb4,0x2b1c,0xaa2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2777db03,0x4be5bc36,0xd47438dd,0x41a783b4,0xa3615426,0xc6171f00,0x8cb4e350,0xaa22b1c}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2777db03,0x4be5bc36,0xd47438dd,0x41a783b4,0xa3615426,0xc6171f00,0x8cb4e350,0xaa22b1c}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4be5bc362777db03,0x41a783b4d47438dd,0xc6171f00a3615426,0xaa22b1c8cb4e350}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4be5bc362777db03,0x41a783b4d47438dd,0xc6171f00a3615426,0xaa22b1c8cb4e350}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb4ca,0xbe8d,0xcee3,0x9669,0x9cb,0x86eb,0xf6f9,0x374b,0x2e68,0xd1f2,0x3315,0xab5f,0x2208,0xa9c9,0x686e,0x2541}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbe8db4ca,0x9669cee3,0x86eb09cb,0x374bf6f9,0xd1f22e68,0xab5f3315,0xa9c92208,0x2541686e}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x9669cee3be8db4ca,0x374bf6f986eb09cb,0xab5f3315d1f22e68,0x2541686ea9c92208}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd9c7,0x9715,0x12ad,0x4a84,0xd0ee,0xb276,0x7344,0xf5a4,0xda41,0x2e90,0x1415,0xe548,0x3eb7,0x1d14,0x3d52,0x1a9f}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd9c7,0x9715,0x12ad,0x4a84,0xd0ee,0xb276,0x7344,0xf5a4,0xda41,0x2e90,0x1415,0xe548,0x3eb7,0x1d14,0x3d52,0x1a9f}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9715d9c7,0x4a8412ad,0xb276d0ee,0xf5a47344,0x2e90da41,0xe5481415,0x1d143eb7,0x1a9f3d52}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x9715d9c7,0x4a8412ad,0xb276d0ee,0xf5a47344,0x2e90da41,0xe5481415,0x1d143eb7,0x1a9f3d52}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4a8412ad9715d9c7,0xf5a47344b276d0ee,0xe54814152e90da41,0x1a9f3d521d143eb7}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x4a8412ad9715d9c7,0xf5a47344b276d0ee,0xe54814152e90da41,0x1a9f3d521d143eb7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda65,0xdf46,0xe771,0xcb34,0x84e5,0xc375,0xfb7c,0x1ba5,0x1734,0xe8f9,0x998a,0x55af,0x9104,0x54e4,0xb437,0x12a0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xda65,0xdf46,0xe771,0xcb34,0x84e5,0xc375,0xfb7c,0x1ba5,0x1734,0xe8f9,0x998a,0x55af,0x9104,0x54e4,0xb437,0x12a0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf46da65,0xcb34e771,0xc37584e5,0x1ba5fb7c,0xe8f91734,0x55af998a,0x54e49104,0x12a0b437}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xdf46da65,0xcb34e771,0xc37584e5,0x1ba5fb7c,0xe8f91734,0x55af998a,0x54e49104,0x12a0b437}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcb34e771df46da65,0x1ba5fb7cc37584e5,0x55af998ae8f91734,0x12a0b43754e49104}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xcb34e771df46da65,0x1ba5fb7cc37584e5,0x55af998ae8f91734,0x12a0b43754e49104}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e7d,0xd8b2,0x8be,0xf2e3,0x7c3e,0x1572,0x7609,0xf4ae,0x8366,0xb93e,0x53ec,0x9b03,0x6573,0xae18,0x41b0,0x707}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x6e7d,0xd8b2,0x8be,0xf2e3,0x7c3e,0x1572,0x7609,0xf4ae,0x8366,0xb93e,0x53ec,0x9b03,0x6573,0xae18,0x41b0,0x707}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd8b26e7d,0xf2e308be,0x15727c3e,0xf4ae7609,0xb93e8366,0x9b0353ec,0xae186573,0x70741b0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xd8b26e7d,0xf2e308be,0x15727c3e,0xf4ae7609,0xb93e8366,0x9b0353ec,0xae186573,0x70741b0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf2e308bed8b26e7d,0xf4ae760915727c3e,0x9b0353ecb93e8366,0x70741b0ae186573}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf2e308bed8b26e7d,0xf4ae760915727c3e,0x9b0353ecb93e8366,0x70741b0ae186573}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8412,0x5a4d,0xe982,0x7e48,0x619e,0x6d03,0x297c,0x2598,0x6aff,0x24ff,0xc89e,0x51c8,0x6f8,0x6965,0x7e7b,0x13de}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x5a4d8412,0x7e48e982,0x6d03619e,0x2598297c,0x24ff6aff,0x51c8c89e,0x696506f8,0x13de7e7b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7e48e9825a4d8412,0x2598297c6d03619e,0x51c8c89e24ff6aff,0x13de7e7b696506f8}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1595,0x819b,0xe0c3,0x8b65,0xe55f,0x5790,0xb373,0x30e9,0xe798,0x6bc0,0x74b1,0xb6c5,0xa184,0xbb4c,0x3cca,0xcd7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1595,0x819b,0xe0c3,0x8b65,0xe55f,0x5790,0xb373,0x30e9,0xe798,0x6bc0,0x74b1,0xb6c5,0xa184,0xbb4c,0x3cca,0xcd7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x819b1595,0x8b65e0c3,0x5790e55f,0x30e9b373,0x6bc0e798,0xb6c574b1,0xbb4ca184,0xcd73cca}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x819b1595,0x8b65e0c3,0x5790e55f,0x30e9b373,0x6bc0e798,0xb6c574b1,0xbb4ca184,0xcd73cca}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8b65e0c3819b1595,0x30e9b3735790e55f,0xb6c574b16bc0e798,0xcd73ccabb4ca184}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x8b65e0c3819b1595,0x30e9b3735790e55f,0xb6c574b16bc0e798,0xcd73ccabb4ca184}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc209,0x2d26,0x74c1,0x3f24,0xb0cf,0x3681,0x14be,0x92cc,0xb57f,0x127f,0x644f,0x28e4,0x837c,0xb4b2,0x3f3d,0x9ef}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc209,0x2d26,0x74c1,0x3f24,0xb0cf,0x3681,0x14be,0x92cc,0xb57f,0x127f,0x644f,0x28e4,0x837c,0xb4b2,0x3f3d,0x9ef}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d26c209,0x3f2474c1,0x3681b0cf,0x92cc14be,0x127fb57f,0x28e4644f,0xb4b2837c,0x9ef3f3d}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x2d26c209,0x3f2474c1,0x3681b0cf,0x92cc14be,0x127fb57f,0x28e4644f,0xb4b2837c,0x9ef3f3d}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3f2474c12d26c209,0x92cc14be3681b0cf,0x28e4644f127fb57f,0x9ef3f3db4b2837c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x3f2474c12d26c209,0x92cc14be3681b0cf,0x28e4644f127fb57f,0x9ef3f3db4b2837c}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9427,0xa69c,0xda24,0xb3a7,0x4f9a,0x22fc,0xa39a,0xcb05,0xd93e,0x923d,0xb97d,0xad95,0x3374,0x96bd,0xbdeb,0x51}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9427,0xa69c,0xda24,0xb3a7,0x4f9a,0x22fc,0xa39a,0xcb05,0xd93e,0x923d,0xb97d,0xad95,0x3374,0x96bd,0xbdeb,0x51}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa69c9427,0xb3a7da24,0x22fc4f9a,0xcb05a39a,0x923dd93e,0xad95b97d,0x96bd3374,0x51bdeb}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa69c9427,0xb3a7da24,0x22fc4f9a,0xcb05a39a,0x923dd93e,0xad95b97d,0x96bd3374,0x51bdeb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3a7da24a69c9427,0xcb05a39a22fc4f9a,0xad95b97d923dd93e,0x51bdeb96bd3374}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb3a7da24a69c9427,0xcb05a39a22fc4f9a,0xad95b97d923dd93e,0x51bdeb96bd3374}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xb376,0x7694,0x643d,0xf407,0xf5c,0x6e43,0xd345,0x5c1f,0xecc4,0x777d,0x1005,0x24fe,0x88e4,0x536a,0x5c85,0xe09}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7694b376,0xf407643d,0x6e430f5c,0x5c1fd345,0x777decc4,0x24fe1005,0x536a88e4,0xe095c85}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xf407643d7694b376,0x5c1fd3456e430f5c,0x24fe1005777decc4,0xe095c85536a88e4}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1f4f,0xcff8,0x8a18,0x405f,0xbfc2,0x4b46,0x2fab,0x911a,0x1385,0xe540,0x5687,0x7768,0x556f,0xbcad,0x9e99,0xdb7}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x1f4f,0xcff8,0x8a18,0x405f,0xbfc2,0x4b46,0x2fab,0x911a,0x1385,0xe540,0x5687,0x7768,0x556f,0xbcad,0x9e99,0xdb7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcff81f4f,0x405f8a18,0x4b46bfc2,0x911a2fab,0xe5401385,0x77685687,0xbcad556f,0xdb79e99}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcff81f4f,0x405f8a18,0x4b46bfc2,0x911a2fab,0xe5401385,0x77685687,0xbcad556f,0xdb79e99}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x405f8a18cff81f4f,0x911a2fab4b46bfc2,0x77685687e5401385,0xdb79e99bcad556f}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x405f8a18cff81f4f,0x911a2fab4b46bfc2,0x77685687e5401385,0xdb79e99bcad556f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x59bb,0xbb4a,0xb21e,0x7a03,0x87ae,0xb721,0xe9a2,0x2e0f,0xf662,0xbbbe,0x802,0x127f,0x4472,0xa9b5,0xae42,0x704}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x59bb,0xbb4a,0xb21e,0x7a03,0x87ae,0xb721,0xe9a2,0x2e0f,0xf662,0xbbbe,0x802,0x127f,0x4472,0xa9b5,0xae42,0x704}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbb4a59bb,0x7a03b21e,0xb72187ae,0x2e0fe9a2,0xbbbef662,0x127f0802,0xa9b54472,0x704ae42}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbb4a59bb,0x7a03b21e,0xb72187ae,0x2e0fe9a2,0xbbbef662,0x127f0802,0xa9b54472,0x704ae42}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7a03b21ebb4a59bb,0x2e0fe9a2b72187ae,0x127f0802bbbef662,0x704ae42a9b54472}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x7a03b21ebb4a59bb,0x2e0fe9a2b72187ae,0x127f0802bbbef662,0x704ae42a9b54472}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa3e3,0x12fb,0x32f3,0xb40f,0x4bbe,0x537d,0xbefc,0xdda9,0x8954,0xaca9,0xaaf3,0xc020,0x17da,0xf48f,0x88fd,0x21a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xa3e3,0x12fb,0x32f3,0xb40f,0x4bbe,0x537d,0xbefc,0xdda9,0x8954,0xaca9,0xaaf3,0xc020,0x17da,0xf48f,0x88fd,0x21a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x12fba3e3,0xb40f32f3,0x537d4bbe,0xdda9befc,0xaca98954,0xc020aaf3,0xf48f17da,0x21a88fd}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x12fba3e3,0xb40f32f3,0x537d4bbe,0xdda9befc,0xaca98954,0xc020aaf3,0xf48f17da,0x21a88fd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb40f32f312fba3e3,0xdda9befc537d4bbe,0xc020aaf3aca98954,0x21a88fdf48f17da}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xb40f32f312fba3e3,0xdda9befc537d4bbe,0xc020aaf3aca98954,0x21a88fdf48f17da}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x7a2a,0xcc34,0x1fb9,0x5b4e,0x6acf,0x4f0f,0xbb68,0x211d,0xa57b,0xae74,0x782,0xa512,0xd75c,0xb576,0x5af5,0xa035}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xcc347a2a,0x5b4e1fb9,0x4f0f6acf,0x211dbb68,0xae74a57b,0xa5120782,0xb576d75c,0xa0355af5}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x5b4e1fb9cc347a2a,0x211dbb684f0f6acf,0xa5120782ae74a57b,0xa0355af5b576d75c}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xb938,0xecc6,0xa73e,0x1f10,0xfb92,0xfc6b,0x4373,0x1c26,0x1cb,0x5c8f,0xe4f1,0xbf81,0xc0e7,0xd1f7,0x9e1a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xd647,0xb938,0xecc6,0xa73e,0x1f10,0xfb92,0xfc6b,0x4373,0x1c26,0x1cb,0x5c8f,0xe4f1,0xbf81,0xc0e7,0xd1f7,0x9e1a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb938d647,0xa73eecc6,0xfb921f10,0x4373fc6b,0x1cb1c26,0xe4f15c8f,0xc0e7bf81,0x9e1ad1f7}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xb938d647,0xa73eecc6,0xfb921f10,0x4373fc6b,0x1cb1c26,0xe4f15c8f,0xc0e7bf81,0x9e1ad1f7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa73eecc6b938d647,0x4373fc6bfb921f10,0xe4f15c8f01cb1c26,0x9e1ad1f7c0e7bf81}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa73eecc6b938d647,0x4373fc6bfb921f10,0xe4f15c8f01cb1c26,0x9e1ad1f7c0e7bf81}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d15,0xe61a,0xfdc,0xada7,0xb567,0x2787,0xddb4,0x908e,0x52bd,0x573a,0x3c1,0x5289,0x6bae,0xdabb,0xad7a,0x501a}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x3d15,0xe61a,0xfdc,0xada7,0xb567,0x2787,0xddb4,0x908e,0x52bd,0x573a,0x3c1,0x5289,0x6bae,0xdabb,0xad7a,0x501a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe61a3d15,0xada70fdc,0x2787b567,0x908eddb4,0x573a52bd,0x528903c1,0xdabb6bae,0x501aad7a}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe61a3d15,0xada70fdc,0x2787b567,0x908eddb4,0x573a52bd,0x528903c1,0xdabb6bae,0x501aad7a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xada70fdce61a3d15,0x908eddb42787b567,0x528903c1573a52bd,0x501aad7adabb6bae}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xada70fdce61a3d15,0x908eddb42787b567,0x528903c1573a52bd,0x501aad7adabb6bae}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x81d6,0x1f29,0xf1a,0x365e,0x8f4a,0x95c8,0x38b1,0x87f1,0xb9ff,0x9cca,0x8239,0x1cb1,0x70f,0x8fde,0x5f3f,0x25be}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x1f2981d6,0x365e0f1a,0x95c88f4a,0x87f138b1,0x9ccab9ff,0x1cb18239,0x8fde070f,0x25be5f3f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x365e0f1a1f2981d6,0x87f138b195c88f4a,0x1cb182399ccab9ff,0x25be5f3f8fde070f}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc0eb,0xf94,0x78d,0x1b2f,0x47a5,0xcae4,0x9c58,0xc3f8,0x5cff,0xce65,0xc11c,0x8e58,0x387,0xc7ef,0x2f9f,0x12df}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc0eb,0xf94,0x78d,0x1b2f,0x47a5,0xcae4,0x9c58,0xc3f8,0x5cff,0xce65,0xc11c,0x8e58,0x387,0xc7ef,0x2f9f,0x12df}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94c0eb,0x1b2f078d,0xcae447a5,0xc3f89c58,0xce655cff,0x8e58c11c,0xc7ef0387,0x12df2f9f}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf94c0eb,0x1b2f078d,0xcae447a5,0xc3f89c58,0xce655cff,0x8e58c11c,0xc7ef0387,0x12df2f9f}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b2f078d0f94c0eb,0xc3f89c58cae447a5,0x8e58c11cce655cff,0x12df2f9fc7ef0387}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x1b2f078d0f94c0eb,0xc3f89c58cae447a5,0x8e58c11cce655cff,0x12df2f9fc7ef0387}}}} #endif , &MAXORD_O0}, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, {{ +, {{{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9203,0x57ee,0x3867,0xdf50,0xd8ad,0xbe9c,0x9e30,0x7a77,0xcd0f,0x77d9,0xbb7f,0x65f1,0x1b16,0xbbf5,0xe5c0,0x2563}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x9203,0x57ee,0x3867,0xdf50,0xd8ad,0xbe9c,0x9e30,0x7a77,0xcd0f,0x77d9,0xbb7f,0x65f1,0x1b16,0xbbf5,0xe5c0,0x2563}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57ee9203,0xdf503867,0xbe9cd8ad,0x7a779e30,0x77d9cd0f,0x65f1bb7f,0xbbf51b16,0x2563e5c0}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x57ee9203,0xdf503867,0xbe9cd8ad,0x7a779e30,0x77d9cd0f,0x65f1bb7f,0xbbf51b16,0x2563e5c0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf50386757ee9203,0x7a779e30be9cd8ad,0x65f1bb7f77d9cd0f,0x2563e5c0bbf51b16}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xdf50386757ee9203,0x7a779e30be9cd8ad,0x65f1bb7f77d9cd0f,0x2563e5c0bbf51b16}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x5a86,0x1729,0x8ced,0x8280,0xd48f,0x1e0f,0x5e39,0x24b3,0x74ba,0xa294,0xd9f3,0x4e2e,0x8cc1,0xee6b,0xdd2,0x3079}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x17295a86,0x82808ced,0x1e0fd48f,0x24b35e39,0xa29474ba,0x4e2ed9f3,0xee6b8cc1,0x30790dd2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x82808ced17295a86,0x24b35e391e0fd48f,0x4e2ed9f3a29474ba,0x30790dd2ee6b8cc1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc883,0xbf3a,0x5485,0xa330,0xfbe1,0x5f72,0xc008,0xaa3b,0xa7aa,0x2aba,0x1e74,0xe83d,0x71aa,0x3276,0x2812,0xb15}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xc883,0xbf3a,0x5485,0xa330,0xfbe1,0x5f72,0xc008,0xaa3b,0xa7aa,0x2aba,0x1e74,0xe83d,0x71aa,0x3276,0x2812,0xb15}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbf3ac883,0xa3305485,0x5f72fbe1,0xaa3bc008,0x2abaa7aa,0xe83d1e74,0x327671aa,0xb152812}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xbf3ac883,0xa3305485,0x5f72fbe1,0xaa3bc008,0x2abaa7aa,0xe83d1e74,0x327671aa,0xb152812}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3305485bf3ac883,0xaa3bc0085f72fbe1,0xe83d1e742abaa7aa,0xb152812327671aa}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xa3305485bf3ac883,0xaa3bc0085f72fbe1,0xe83d1e742abaa7aa,0xb152812327671aa}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif }, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}}, +}}}}, #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad43,0x8b94,0x4676,0xc140,0xea47,0x8f07,0xaf1c,0x1259,0x3a5d,0xd14a,0x6cf9,0xa717,0xc660,0x7735,0x86e9,0x183c}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xad43,0x8b94,0x4676,0xc140,0xea47,0x8f07,0xaf1c,0x1259,0x3a5d,0xd14a,0x6cf9,0xa717,0xc660,0x7735,0x86e9,0x183c}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b94ad43,0xc1404676,0x8f07ea47,0x1259af1c,0xd14a3a5d,0xa7176cf9,0x7735c660,0x183c86e9}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x8b94ad43,0xc1404676,0x8f07ea47,0x1259af1c,0xd14a3a5d,0xa7176cf9,0x7735c660,0x183c86e9}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc14046768b94ad43,0x1259af1c8f07ea47,0xa7176cf9d14a3a5d,0x183c86e97735c660}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc14046768b94ad43,0x1259af1c8f07ea47,0xa7176cf9d14a3a5d,0x183c86e97735c660}}}} #endif , &MAXORD_O0}}; const quat_alg_elem_t CONJUGATING_ELEMENTS[7] = {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}} +{{{._mp_alloc = 0, ._mp_size = 0, ._mp_d = (mp_limb_t[]) {0x0}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x8fcd,0xe605,0x8b19,0xe24f,0x42a8,0x5b5f,0x7aae,0x78f3,0x828e,0x7553,0x5216,0x7176,0x559,0x367e,0x938b,0x2fe3}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xe6058fcd,0xe24f8b19,0x5b5f42a8,0x78f37aae,0x7553828e,0x71765216,0x367e0559,0x2fe3938b}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xe24f8b19e6058fcd,0x78f37aae5b5f42a8,0x717652167553828e,0x2fe3938b367e0559}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x2315,0x38d9,0x6fc1,0xa333,0xaeb,0xce6a,0x2a4c,0xa1c1,0x274c,0xa9fc,0xd4c6,0xb0b3,0x555b,0x7a48,0x411a,0x2bdc}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x38d92315,0xa3336fc1,0xce6a0aeb,0xa1c12a4c,0xa9fc274c,0xb0b3d4c6,0x7a48555b,0x2bdc411a}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xa3336fc138d92315,0xa1c12a4cce6a0aeb,0xb0b3d4c6a9fc274c,0x2bdc411a7a48555b}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xe8f5,0xf5ac,0x2ee2,0xc962,0x459d,0xd9a0,0xd761,0x7c5a,0x8268,0xcf36,0x9b08,0xb7a6,0xbd23,0x7e04,0xe324,0x23ba}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0xf5ace8f5,0xc9622ee2,0xd9a0459d,0x7c5ad761,0xcf368268,0xb7a69b08,0x7e04bd23,0x23bae324}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0xc9622ee2f5ace8f5,0x7c5ad761d9a0459d,0xb7a69b08cf368268,0x23bae3247e04bd23}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xfef5,0x4752,0xbb14,0x6ee3,0x5898,0x6a2,0x8282,0x1179,0xe429,0xf5d1,0x5ad8,0x642d,0x3061,0x58d,0x9c04,0x917b}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x4752fef5,0x6ee3bb14,0x6a25898,0x11798282,0xf5d1e429,0x642d5ad8,0x58d3061,0x917b9c04}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x6ee3bb144752fef5,0x1179828206a25898,0x642d5ad8f5d1e429,0x917b9c04058d3061}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x7}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x7}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0xc375,0xa2dc,0x6a7d,0xed64,0x4f95,0xddbe,0x408d,0xb3c9,0x60c0,0xfe03,0x47e2,0x42ab,0xb1f0,0x6768,0x9c74,0x160d}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0xbe61,0x7c4c,0xa49c,0x48f9,0x3fb4,0xb80a,0xf823,0xd427,0x593e,0x9ec7,0x3a56,0xda06,0x551e,0x2875,0xc2cb,0xfb0}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xa2dcc375,0xed646a7d,0xddbe4f95,0xb3c9408d,0xfe0360c0,0x42ab47e2,0x6768b1f0,0x160d9c74}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x7c4cbe61,0x48f9a49c,0xb80a3fb4,0xd427f823,0x9ec7593e,0xda063a56,0x2875551e,0xfb0c2cb}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0xed646a7da2dcc375,0xb3c9408dddbe4f95,0x42ab47e2fe0360c0,0x160d9c746768b1f0}}} +{{{._mp_alloc = 0, ._mp_size = 4, ._mp_d = (mp_limb_t[]) {0x48f9a49c7c4cbe61,0xd427f823b80a3fb4,0xda063a569ec7593e,0xfb0c2cb2875551e}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}} +{{{._mp_alloc = 0, ._mp_size = -1, ._mp_d = (mp_limb_t[]) {0x1}}}} #endif -}}, { +}}}, { #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0x2}}}} #endif -, { +, {{ #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}} +{{{._mp_alloc = 0, ._mp_size = -16, ._mp_d = (mp_limb_t[]) {0x95f7,0xbdd2,0x75d6,0x430e,0x5c58,0xbd78,0x16b0,0x1278,0xc3f8,0x16b,0xb5dc,0xbbcf,0xfa18,0x1815,0x3c32,0x624a}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}} +{{{._mp_alloc = 0, ._mp_size = -8, ._mp_d = (mp_limb_t[]) {0xbdd295f7,0x430e75d6,0xbd785c58,0x127816b0,0x16bc3f8,0xbbcfb5dc,0x1815fa18,0x624a3c32}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}} +{{{._mp_alloc = 0, ._mp_size = -4, ._mp_d = (mp_limb_t[]) {0x430e75d6bdd295f7,0x127816b0bd785c58,0xbbcfb5dc016bc3f8,0x624a3c321815fa18}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #endif , #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}} +{{{._mp_alloc = 0, ._mp_size = 1, ._mp_d = (mp_limb_t[]) {0xd}}}} #endif -}}}; +}}}}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sign.c index 9216bbe4d3..9520a6f7fd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sign.c @@ -31,12 +31,12 @@ compute_challenge_ideal_signature(quat_left_ideal_t *lideal_chall_two, const sig // vec is a vector [1, chall_coeff] coefficients encoding the kernel of the challenge // isogeny as B[0] + chall_coeff*B[1] where B is the canonical basis of the // 2^TORSION_EVEN_POWER torsion of EA - ibz_set(&vec[0], 1); - ibz_copy_digit_array(&vec[1], sig->chall_coeff); + ibz_set(&vec.v[0], 1); + ibz_copy_digit_array(&vec.v[1], sig->chall_coeff); // now we compute the ideal associated to the challenge // for that, we need to find vec such that - // the kernel of the challenge isogeny is generated by vec[0]*B0[0] + vec[1]*B0[1] where B0 + // the kernel of the challenge isogeny is generated by vec.v[0]*B0[0] + vec.v[1]*B0[1] where B0 // is the image through the secret key isogeny of the canonical basis E0 ibz_mat_2x2_eval(&vec, &(sk->mat_BAcan_to_BA0_two), &vec); @@ -459,16 +459,16 @@ compute_and_set_basis_change_matrix(signature_t *sig, change_of_basis_matrix_tate(&mat_Bchall_can_to_Bchall, B_chall_2, &B_can_chall, E_chall, f); // Assert all values in the matrix are of the expected size for packing - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[0][1]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][0]) <= SQIsign_response_length + HD_extra_torsion); - assert(ibz_bitsize(&mat_Bchall_can_to_Bchall[1][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[0][1]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][0]) <= SQIsign_response_length + HD_extra_torsion); + assert(ibz_bitsize(&mat_Bchall_can_to_Bchall.m[1][1]) <= SQIsign_response_length + HD_extra_torsion); // Set the basis change matrix to signature - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall[0][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall[0][1])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall[1][0])); - ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall[1][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][0], &(mat_Bchall_can_to_Bchall.m[0][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[0][1], &(mat_Bchall_can_to_Bchall.m[0][1])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][0], &(mat_Bchall_can_to_Bchall.m[1][0])); + ibz_to_digit_array(sig->mat_Bchall_can_to_B_chall[1][1], &(mat_Bchall_can_to_Bchall.m[1][1])); // Finalise the matrices ibz_mat_2x2_finalize(&mat_Bchall_can_to_Bchall); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.c index 6fb2f97637..9f466eb17e 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/torsion_constants.c @@ -4,40 +4,40 @@ const ibz_t TWO_TO_SECURITY_BITS = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 9, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 5, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t TORSION_PLUS_2POWER = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10}}} +{{{._mp_alloc = 0, ._mp_size = 32, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100000}}} +{{{._mp_alloc = 0, ._mp_size = 16, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100000}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10000000000000}}} +{{{._mp_alloc = 0, ._mp_size = 8, ._mp_d = (mp_limb_t[]) {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10000000000000}}}} #endif ; const ibz_t SEC_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; const ibz_t COM_DEGREE = #if 0 #elif GMP_LIMB_BITS == 16 -{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 65, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 32 -{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 33, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #elif GMP_LIMB_BITS == 64 -{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}} +{{{._mp_alloc = 0, ._mp_size = 17, ._mp_d = (mp_limb_t[]) {0x283,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1}}}} #endif ; diff --git a/tests/test_leaks.py b/tests/test_leaks.py index f75fece11a..5346f7d32e 100644 --- a/tests/test_leaks.py +++ b/tests/test_leaks.py @@ -21,7 +21,7 @@ def test_sig_leak(sig_name): if not(helpers.is_sig_enabled_by_name(sig_name)): pytest.skip('Not enabled') if sys.platform != "linux" or os.system("grep ubuntu /etc/os-release") != 0 or os.system("uname -a | grep x86_64") != 0: pytest.skip('Leak testing not supported on this platform') helpers.run_subprocess( - ["valgrind", "-s", "--error-exitcode=1", "--leak-check=full", "--show-leak-kinds=all", helpers.path_to_executable('test_sig'), sig_name], + ["valgrind", "-s", "--max-stackframe=4116160", "--error-exitcode=1", "--leak-check=full", "--show-leak-kinds=all", helpers.path_to_executable('test_sig'), sig_name], ) @helpers.filtered_test From 6aa8065d904013265568ab3ad3670b2264450c13 Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Mon, 18 Aug 2025 15:21:51 +0200 Subject: [PATCH 14/19] new sqisign pull [full tests] Signed-off-by: Basil Hess --- docs/algorithms/sig/sqisign.md | 2 +- docs/algorithms/sig/sqisign.yml | 2 +- .../copy_from_upstream/copy_from_upstream.yml | 2 +- .../sqisign/the-sqisign_sqisign_lvl1_ref/fp.c | 2 +- .../sqisign/the-sqisign_sqisign_lvl1_ref/fp.h | 10 ++- .../fp_p5248_32.c | 70 +++++++++--------- .../fp_p5248_64.c | 70 +++++++++--------- .../sqisign/the-sqisign_sqisign_lvl3_ref/fp.c | 2 +- .../sqisign/the-sqisign_sqisign_lvl3_ref/fp.h | 10 ++- .../fp_p65376_32.c | 70 +++++++++--------- .../fp_p65376_64.c | 70 +++++++++--------- .../sqisign/the-sqisign_sqisign_lvl5_ref/fp.c | 2 +- .../sqisign/the-sqisign_sqisign_lvl5_ref/fp.h | 10 ++- .../fp_p27500_32.c | 72 +++++++++---------- .../fp_p27500_64.c | 70 +++++++++--------- 15 files changed, 238 insertions(+), 226 deletions(-) diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md index 1fd0517a55..7eafbe9733 100644 --- a/docs/algorithms/sig/sqisign.md +++ b/docs/algorithms/sig/sqisign.md @@ -6,7 +6,7 @@ - **Authors' website**: https://sqisign.org/ - **Specification version**: Round 2. - **Primary Source**: - - **Source**: https://github.com/bhess/the-sqisign/commit/f86bf0851967e6a1daf3ced46af22c9e92f08913 + - **Source**: https://github.com/bhess/the-sqisign/commit/70faebf85d1536f16db4386af9a65d43f1015767 - **Implementation license (SPDX-Identifier)**: Apache-2.0 diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml index 827179b2fe..3fe126dee2 100644 --- a/docs/algorithms/sig/sqisign.yml +++ b/docs/algorithms/sig/sqisign.yml @@ -36,7 +36,7 @@ website: https://sqisign.org/ nist-round: 2 spec-version: Round 2 primary-upstream: - source: https://github.com/bhess/the-sqisign/commit/f86bf0851967e6a1daf3ced46af22c9e92f08913 + source: https://github.com/bhess/the-sqisign/commit/70faebf85d1536f16db4386af9a65d43f1015767 spdx-license-identifier: Apache-2.0 parameter-sets: - name: SQIsign-lvl1 diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index f3b86e99c1..d208c0d253 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -96,7 +96,7 @@ upstreams: name: the-sqisign git_url: https://github.com/bhess/the-sqisign.git git_branch: oqs - git_commit: f86bf0851967e6a1daf3ced46af22c9e92f08913 + git_commit: 70faebf85d1536f16db4386af9a65d43f1015767 sig_scheme_path: '.' sig_meta_path: 'integration/liboqs/{pqclean_scheme}.yml' diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.c index 48e2937f17..447ca94a97 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.c @@ -10,6 +10,6 @@ fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) { digit_t cw = (int32_t)ctl; for (unsigned int i = 0; i < NWORDS_FIELD; i++) { - (*d)[i] = (*a0)[i] ^ (cw & ((*a0)[i] ^ (*a1)[i])); + d->fp[i] = a0->fp[i] ^ (cw & (a0->fp[i] ^ a1->fp[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.h index 1241d5801e..e3072a6138 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp.h @@ -11,10 +11,14 @@ #include #include -typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements +typedef struct { + digit_t fp[NWORDS_FIELD]; // vector of field elements +} fp_t; -extern const digit_t ONE[NWORDS_FIELD]; -extern const digit_t ZERO[NWORDS_FIELD]; +//typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements + +extern const fp_t ONE; +extern const fp_t ZERO; // extern const digit_t PM1O3[NWORDS_FIELD]; void fp_set_small(fp_t *x, const digit_t val); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c index 054ded92f0..aad3ce9edd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c @@ -678,132 +678,132 @@ static int modcmp(const spint *a, const spint *b) { #include -const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; -const digit_t ONE[NWORDS_FIELD] = { 0x00000666, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00020000 }; +const fp_t ZERO = {{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }}; +const fp_t ONE = {{ 0x00000666, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00020000 }}; // Montgomery representation of 2^-1 -static const digit_t TWO_INV[NWORDS_FIELD] = { 0x00000333, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00010000 }; +static const fp_t TWO_INV = {{ 0x00000333, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00010000 }}; // Montgomery representation of 3^-1 -static const digit_t THREE_INV[NWORDS_FIELD] = { +static const fp_t THREE_INV = {{ 0x15555777, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x00025555, -}; +}}; // Montgomery representation of 2^256 -static const digit_t R2[NWORDS_FIELD] = { 0x0667ae14, 0x13333333, 0x19999999, 0x0ccccccc, 0x06666666, - 0x13333333, 0x19999999, 0x0ccccccc, 0x00026666 }; +static const fp_t R2 = {{ 0x0667ae14, 0x13333333, 0x19999999, 0x0ccccccc, 0x06666666, + 0x13333333, 0x19999999, 0x0ccccccc, 0x00026666 }}; void fp_set_small(fp_t *x, const digit_t val) { - modint((int)val, *x); + modint((int)val, x->fp); } void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) { - modmli(*a, (int)val, *x); + modmli(a->fp, (int)val, x->fp); } void fp_set_zero(fp_t *x) { - modzer(*x); + modzer(x->fp); } void fp_set_one(fp_t *x) { - modone(*x); + modone(x->fp); } uint32_t fp_is_equal(const fp_t *a, const fp_t *b) { - return -(uint32_t)modcmp(*a, *b); + return -(uint32_t)modcmp(a->fp, b->fp); } uint32_t fp_is_zero(const fp_t *a) { - return -(uint32_t)modis0(*a); + return -(uint32_t)modis0(a->fp); } void fp_copy(fp_t *out, const fp_t *a) { - modcpy(*a, *out); + modcpy(a->fp, out->fp); } void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) { - modcsw((int)(ctl & 0x1), *a, *b); + modcsw((int)(ctl & 0x1), a->fp, b->fp); } void fp_add(fp_t *out, const fp_t *a, const fp_t *b) { - modadd(*a, *b, *out); + modadd(a->fp, b->fp, out->fp); } void fp_sub(fp_t *out, const fp_t *a, const fp_t *b) { - modsub(*a, *b, *out); + modsub(a->fp, b->fp, out->fp); } void fp_neg(fp_t *out, const fp_t *a) { - modneg(*a, *out); + modneg(a->fp, out->fp); } void fp_sqr(fp_t *out, const fp_t *a) { - modsqr(*a, *out); + modsqr(a->fp, out->fp); } void fp_mul(fp_t *out, const fp_t *a, const fp_t *b) { - modmul(*a, *b, *out); + modmul(a->fp, b->fp, out->fp); } void fp_inv(fp_t *x) { - modinv(*x, NULL, *x); + modinv(x->fp, NULL, x->fp); } uint32_t fp_is_square(const fp_t *a) { - return -(uint32_t)modqr(NULL, *a); + return -(uint32_t)modqr(NULL, a->fp); } void fp_sqrt(fp_t *a) { - modsqrt(*a, NULL, *a); + modsqrt(a->fp, NULL, a->fp); } void fp_half(fp_t *out, const fp_t *a) { - modmul(TWO_INV, *a, *out); + modmul(TWO_INV.fp, a->fp, out->fp); } void fp_exp3div4(fp_t *out, const fp_t *a) { - modpro(*a, *out); + modpro(a->fp, out->fp); } void fp_div3(fp_t *out, const fp_t *a) { - modmul(THREE_INV, *a, *out); + modmul(THREE_INV.fp, a->fp, out->fp); } void @@ -812,7 +812,7 @@ fp_encode(void *dst, const fp_t *a) // Modified version of modexp() int i; spint c[9]; - redc(*a, c); + redc(a->fp, c); for (i = 0; i < 32; i++) { ((char *)dst)[i] = c[0] & (spint)0xff; (void)modshr(8, c); @@ -827,17 +827,17 @@ fp_decode(fp_t *d, const void *src) spint res; const unsigned char *b = src; for (i = 0; i < 9; i++) { - (*d)[i] = 0; + d->fp[i] = 0; } for (i = 31; i >= 0; i--) { - modshl(8, *d); - (*d)[0] += (spint)b[i]; + modshl(8, d->fp); + d->fp[0] += (spint)b[i]; } - res = (spint)-modfsb(*d); - nres(*d, *d); + res = (spint)-modfsb(d->fp); + nres(d->fp, d->fp); // If the value was canonical then res = -1; otherwise, res = 0 for (i = 0; i < 9; i++) { - (*d)[i] &= res; + d->fp[i] &= res; } return (uint32_t)res; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c index aa2c7d4ede..00cc61ec13 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c @@ -523,143 +523,143 @@ static int modcmp(const spint *a, const spint *b) { #include -const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0 }; -const digit_t ONE[NWORDS_FIELD] = { 0x0000000000000019, +const fp_t ZERO = {{ 0x0, 0x0, 0x0, 0x0, 0x0 }}; +const fp_t ONE = {{ 0x0000000000000019, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000300000000000 }; + 0x0000300000000000 }}; // Montgomery representation of 2^-1 -static const digit_t TWO_INV[NWORDS_FIELD] = { 0x000000000000000c, +static const fp_t TWO_INV = {{ 0x000000000000000c, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000400000000000 }; + 0x0000400000000000 }}; // Montgomery representation of 3^-1 -static const digit_t THREE_INV[NWORDS_FIELD] = { 0x000555555555555d, +static const fp_t THREE_INV = {{ 0x000555555555555d, 0x0002aaaaaaaaaaaa, 0x0005555555555555, 0x0002aaaaaaaaaaaa, - 0x0000455555555555 }; + 0x0000455555555555 }}; // Montgomery representation of 2^256 -static const digit_t R2[NWORDS_FIELD] = { 0x0001999999999eb8, +static const fp_t R2 = {{ 0x0001999999999eb8, 0x0003333333333333, 0x0006666666666666, 0x0004cccccccccccc, - 0x0000199999999999 }; + 0x0000199999999999 }}; void fp_set_small(fp_t *x, const digit_t val) { - modint((int)val, *x); + modint((int)val, x->fp); } void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) { - modmli(*a, (int)val, *x); + modmli(a->fp, (int)val, x->fp); } void fp_set_zero(fp_t *x) { - modzer(*x); + modzer(x->fp); } void fp_set_one(fp_t *x) { - modone(*x); + modone(x->fp); } uint32_t fp_is_equal(const fp_t *a, const fp_t *b) { - return -(uint32_t)modcmp(*a, *b); + return -(uint32_t)modcmp(a->fp, b->fp); } uint32_t fp_is_zero(const fp_t *a) { - return -(uint32_t)modis0(*a); + return -(uint32_t)modis0(a->fp); } void fp_copy(fp_t *out, const fp_t *a) { - modcpy(*a, *out); + modcpy(a->fp, out->fp); } void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) { - modcsw((int)(ctl & 0x1), *a, *b); + modcsw((int)(ctl & 0x1), a->fp, b->fp); } void fp_add(fp_t *out, const fp_t *a, const fp_t *b) { - modadd(*a, *b, *out); + modadd(a->fp, b->fp, out->fp); } void fp_sub(fp_t *out, const fp_t *a, const fp_t *b) { - modsub(*a, *b, *out); + modsub(a->fp, b->fp, out->fp); } void fp_neg(fp_t *out, const fp_t *a) { - modneg(*a, *out); + modneg(a->fp, out->fp); } void fp_sqr(fp_t *out, const fp_t *a) { - modsqr(*a, *out); + modsqr(a->fp, out->fp); } void fp_mul(fp_t *out, const fp_t *a, const fp_t *b) { - modmul(*a, *b, *out); + modmul(a->fp, b->fp, out->fp); } void fp_inv(fp_t *x) { - modinv(*x, NULL, *x); + modinv(x->fp, NULL, x->fp); } uint32_t fp_is_square(const fp_t *a) { - return -(uint32_t)modqr(NULL, *a); + return -(uint32_t)modqr(NULL, a->fp); } void fp_sqrt(fp_t *a) { - modsqrt(*a, NULL, *a); + modsqrt(a->fp, NULL, a->fp); } void fp_half(fp_t *out, const fp_t *a) { - modmul(TWO_INV, *a, *out); + modmul(TWO_INV.fp, a->fp, out->fp); } void fp_exp3div4(fp_t *out, const fp_t *a) { - modpro(*a, *out); + modpro(a->fp, out->fp); } void fp_div3(fp_t *out, const fp_t *a) { - modmul(THREE_INV, *a, *out); + modmul(THREE_INV.fp, a->fp, out->fp); } void @@ -668,7 +668,7 @@ fp_encode(void *dst, const fp_t *a) // Modified version of modexp() int i; spint c[5]; - redc(*a, c); + redc(a->fp, c); for (i = 0; i < 32; i++) { ((char *)dst)[i] = c[0] & (spint)0xff; (void)modshr(8, c); @@ -683,17 +683,17 @@ fp_decode(fp_t *d, const void *src) spint res; const unsigned char *b = src; for (i = 0; i < 5; i++) { - (*d)[i] = 0; + d->fp[i] = 0; } for (i = 31; i >= 0; i--) { - modshl(8, *d); - (*d)[0] += (spint)b[i]; + modshl(8, d->fp); + d->fp[0] += (spint)b[i]; } - res = (spint)-modfsb(*d); - nres(*d, *d); + res = (spint)-modfsb(d->fp); + nres(d->fp, d->fp); // If the value was canonical then res = -1; otherwise, res = 0 for (i = 0; i < 5; i++) { - (*d)[i] &= res; + d->fp[i] &= res; } return (uint32_t)res; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.c index 48e2937f17..447ca94a97 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.c @@ -10,6 +10,6 @@ fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) { digit_t cw = (int32_t)ctl; for (unsigned int i = 0; i < NWORDS_FIELD; i++) { - (*d)[i] = (*a0)[i] ^ (cw & ((*a0)[i] ^ (*a1)[i])); + d->fp[i] = a0->fp[i] ^ (cw & (a0->fp[i] ^ a1->fp[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.h index 1241d5801e..e3072a6138 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp.h @@ -11,10 +11,14 @@ #include #include -typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements +typedef struct { + digit_t fp[NWORDS_FIELD]; // vector of field elements +} fp_t; -extern const digit_t ONE[NWORDS_FIELD]; -extern const digit_t ZERO[NWORDS_FIELD]; +//typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements + +extern const fp_t ONE; +extern const fp_t ZERO; // extern const digit_t PM1O3[NWORDS_FIELD]; void fp_set_small(fp_t *x, const digit_t val); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c index eacf6e28eb..b5916de330 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c @@ -951,136 +951,136 @@ static int modcmp(const spint *a, const spint *b) { #include -const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; -const digit_t ONE[NWORDS_FIELD] = { +const fp_t ZERO = {{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }}; +const fp_t ONE = {{ 0x000003f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00010000 -}; +}}; // Montgomery representation of 2^-1 -static const digit_t TWO_INV[NWORDS_FIELD] = { 0x000001f8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +static const fp_t TWO_INV = {{ 0x000001f8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00008000 }; + 0x00000000, 0x00000000, 0x00000000, 0x00008000 }}; // Montgomery representation of 3^-1 -static const digit_t THREE_INV[NWORDS_FIELD] = { 0x0aaaabfa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, +static const fp_t THREE_INV = {{ 0x0aaaabfa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, - 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x00030aaa }; + 0x0aaaaaaa, 0x0aaaaaaa, 0x0aaaaaaa, 0x00030aaa }}; // Montgomery representation of 2^384 -static const digit_t R2[NWORDS_FIELD] = { 0x003f1373, 0x0f03f03f, 0x03f03f03, 0x003f03f0, 0x0f03f03f, +static const fp_t R2 = {{ 0x003f1373, 0x0f03f03f, 0x03f03f03, 0x003f03f0, 0x0f03f03f, 0x03f03f03, 0x003f03f0, 0x0f03f03f, 0x03f03f03, 0x003f03f0, - 0x0f03f03f, 0x03f03f03, 0x003f03f0, 0x0000c03f }; + 0x0f03f03f, 0x03f03f03, 0x003f03f0, 0x0000c03f }}; void fp_set_small(fp_t *x, const digit_t val) { - modint((int)val, *x); + modint((int)val, x->fp); } void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) { - modmli(*a, (int)val, *x); + modmli(a->fp, (int)val, x->fp); } void fp_set_zero(fp_t *x) { - modzer(*x); + modzer(x->fp); } void fp_set_one(fp_t *x) { - modone(*x); + modone(x->fp); } uint32_t fp_is_equal(const fp_t *a, const fp_t *b) { - return -(uint32_t)modcmp(*a, *b); + return -(uint32_t)modcmp(a->fp, b->fp); } uint32_t fp_is_zero(const fp_t *a) { - return -(uint32_t)modis0(*a); + return -(uint32_t)modis0(a->fp); } void fp_copy(fp_t *out, const fp_t *a) { - modcpy(*a, *out); + modcpy(a->fp, out->fp); } void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) { - modcsw((int)(ctl & 0x1), *a, *b); + modcsw((int)(ctl & 0x1), a->fp, b->fp); } void fp_add(fp_t *out, const fp_t *a, const fp_t *b) { - modadd(*a, *b, *out); + modadd(a->fp, b->fp, out->fp); } void fp_sub(fp_t *out, const fp_t *a, const fp_t *b) { - modsub(*a, *b, *out); + modsub(a->fp, b->fp, out->fp); } void fp_neg(fp_t *out, const fp_t *a) { - modneg(*a, *out); + modneg(a->fp, out->fp); } void fp_sqr(fp_t *out, const fp_t *a) { - modsqr(*a, *out); + modsqr(a->fp, out->fp); } void fp_mul(fp_t *out, const fp_t *a, const fp_t *b) { - modmul(*a, *b, *out); + modmul(a->fp, b->fp, out->fp); } void fp_inv(fp_t *x) { - modinv(*x, NULL, *x); + modinv(x->fp, NULL, x->fp); } uint32_t fp_is_square(const fp_t *a) { - return -(uint32_t)modqr(NULL, *a); + return -(uint32_t)modqr(NULL, a->fp); } void fp_sqrt(fp_t *a) { - modsqrt(*a, NULL, *a); + modsqrt(a->fp, NULL, a->fp); } void fp_half(fp_t *out, const fp_t *a) { - modmul(TWO_INV, *a, *out); + modmul(TWO_INV.fp, a->fp, out->fp); } void fp_exp3div4(fp_t *out, const fp_t *a) { - modpro(*a, *out); + modpro(a->fp, out->fp); } void fp_div3(fp_t *out, const fp_t *a) { - modmul(THREE_INV, *a, *out); + modmul(THREE_INV.fp, a->fp, out->fp); } void @@ -1089,7 +1089,7 @@ fp_encode(void *dst, const fp_t *a) // Modified version of modexp() int i; spint c[14]; - redc(*a, c); + redc(a->fp, c); for (i = 0; i < 48; i++) { ((char *)dst)[i] = c[0] & (spint)0xff; (void)modshr(8, c); @@ -1104,17 +1104,17 @@ fp_decode(fp_t *d, const void *src) spint res; const unsigned char *b = src; for (i = 0; i < 14; i++) { - (*d)[i] = 0; + d->fp[i] = 0; } for (i = 47; i >= 0; i--) { - modshl(8, *d); - (*d)[0] += (spint)b[i]; + modshl(8, d->fp); + d->fp[0] += (spint)b[i]; } - res = (spint)-modfsb(*d); - nres(*d, *d); + res = (spint)-modfsb(d->fp); + nres(d->fp, d->fp); // If the value was canonical then res = -1; otherwise, res = 0 for (i = 0; i < 14; i++) { - (*d)[i] &= res; + d->fp[i] &= res; } return (uint32_t)res; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c index fa363c65fd..00f689bf79 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c @@ -607,134 +607,134 @@ static int modcmp(const spint *a, const spint *b) { #include -const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; -const digit_t ONE[NWORDS_FIELD] = { 0x0000000000000007, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x000e400000000000 }; +const fp_t ZERO = {{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }}; +const fp_t ONE = {{ 0x0000000000000007, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x000e400000000000 }}; // Montgomery representation of 2^-1 -static const digit_t TWO_INV[NWORDS_FIELD] = { 0x0000000000000003, 0x0000000000000000, 0x0000000000000000, +static const fp_t TWO_INV = {{ 0x0000000000000003, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x000f400000000000 }; + 0x000f400000000000 }}; // Montgomery representation of 3^-1 -static const digit_t THREE_INV[NWORDS_FIELD] = { 0x0055555555555557, 0x002aaaaaaaaaaaaa, 0x0055555555555555, +static const fp_t THREE_INV = {{ 0x0055555555555557, 0x002aaaaaaaaaaaaa, 0x0055555555555555, 0x002aaaaaaaaaaaaa, 0x0055555555555555, 0x002aaaaaaaaaaaaa, - 0x000f955555555555 }; + 0x000f955555555555 }}; // Montgomery representation of 2^384 -static const digit_t R2[NWORDS_FIELD] = { 0x0007e07e07e07e26, 0x007c0fc0fc0fc0fc, 0x0001f81f81f81f81, +static const fp_t R2 = {{ 0x0007e07e07e07e26, 0x007c0fc0fc0fc0fc, 0x0001f81f81f81f81, 0x003f03f03f03f03f, 0x00607e07e07e07e0, 0x000fc0fc0fc0fc0f, - 0x000e9f81f81f81f8 }; + 0x000e9f81f81f81f8 }}; void fp_set_small(fp_t *x, const digit_t val) { - modint((int)val, *x); + modint((int)val, x->fp); } void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) { - modmli(*a, (int)val, *x); + modmli(a->fp, (int)val, x->fp); } void fp_set_zero(fp_t *x) { - modzer(*x); + modzer(x->fp); } void fp_set_one(fp_t *x) { - modone(*x); + modone(x->fp); } uint32_t fp_is_equal(const fp_t *a, const fp_t *b) { - return -(uint32_t)modcmp(*a, *b); + return -(uint32_t)modcmp(a->fp, b->fp); } uint32_t fp_is_zero(const fp_t *a) { - return -(uint32_t)modis0(*a); + return -(uint32_t)modis0(a->fp); } void fp_copy(fp_t *out, const fp_t *a) { - modcpy(*a, *out); + modcpy(a->fp, out->fp); } void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) { - modcsw((int)(ctl & 0x1), *a, *b); + modcsw((int)(ctl & 0x1), a->fp, b->fp); } void fp_add(fp_t *out, const fp_t *a, const fp_t *b) { - modadd(*a, *b, *out); + modadd(a->fp, b->fp, out->fp); } void fp_sub(fp_t *out, const fp_t *a, const fp_t *b) { - modsub(*a, *b, *out); + modsub(a->fp, b->fp, out->fp); } void fp_neg(fp_t *out, const fp_t *a) { - modneg(*a, *out); + modneg(a->fp, out->fp); } void fp_sqr(fp_t *out, const fp_t *a) { - modsqr(*a, *out); + modsqr(a->fp, out->fp); } void fp_mul(fp_t *out, const fp_t *a, const fp_t *b) { - modmul(*a, *b, *out); + modmul(a->fp, b->fp, out->fp); } void fp_inv(fp_t *x) { - modinv(*x, NULL, *x); + modinv(x->fp, NULL, x->fp); } uint32_t fp_is_square(const fp_t *a) { - return -(uint32_t)modqr(NULL, *a); + return -(uint32_t)modqr(NULL, a->fp); } void fp_sqrt(fp_t *a) { - modsqrt(*a, NULL, *a); + modsqrt(a->fp, NULL, a->fp); } void fp_half(fp_t *out, const fp_t *a) { - modmul(TWO_INV, *a, *out); + modmul(TWO_INV.fp, a->fp, out->fp); } void fp_exp3div4(fp_t *out, const fp_t *a) { - modpro(*a, *out); + modpro(a->fp, out->fp); } void fp_div3(fp_t *out, const fp_t *a) { - modmul(THREE_INV, *a, *out); + modmul(THREE_INV.fp, a->fp, out->fp); } void @@ -743,7 +743,7 @@ fp_encode(void *dst, const fp_t *a) // Modified version of modexp() int i; spint c[7]; - redc(*a, c); + redc(a->fp, c); for (i = 0; i < 48; i++) { ((char *)dst)[i] = c[0] & (spint)0xff; (void)modshr(8, c); @@ -758,17 +758,17 @@ fp_decode(fp_t *d, const void *src) spint res; const unsigned char *b = src; for (i = 0; i < 7; i++) { - (*d)[i] = 0; + d->fp[i] = 0; } for (i = 47; i >= 0; i--) { - modshl(8, *d); - (*d)[0] += (spint)b[i]; + modshl(8, d->fp); + d->fp[0] += (spint)b[i]; } - res = (spint)-modfsb(*d); - nres(*d, *d); + res = (spint)-modfsb(d->fp); + nres(d->fp, d->fp); // If the value was canonical then res = -1; otherwise, res = 0 for (i = 0; i < 7; i++) { - (*d)[i] &= res; + d->fp[i] &= res; } return (uint32_t)res; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.c index 48e2937f17..447ca94a97 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.c @@ -10,6 +10,6 @@ fp_select(fp_t *d, const fp_t *a0, const fp_t *a1, uint32_t ctl) { digit_t cw = (int32_t)ctl; for (unsigned int i = 0; i < NWORDS_FIELD; i++) { - (*d)[i] = (*a0)[i] ^ (cw & ((*a0)[i] ^ (*a1)[i])); + d->fp[i] = a0->fp[i] ^ (cw & (a0->fp[i] ^ a1->fp[i])); } } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.h index 1241d5801e..e3072a6138 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp.h @@ -11,10 +11,14 @@ #include #include -typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements +typedef struct { + digit_t fp[NWORDS_FIELD]; // vector of field elements +} fp_t; -extern const digit_t ONE[NWORDS_FIELD]; -extern const digit_t ZERO[NWORDS_FIELD]; +//typedef digit_t fp_t[NWORDS_FIELD]; // Datatype for representing field elements + +extern const fp_t ONE; +extern const fp_t ZERO; // extern const digit_t PM1O3[NWORDS_FIELD]; void fp_set_small(fp_t *x, const digit_t val); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c index 491b052b7b..7033623b6b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c @@ -1221,137 +1221,137 @@ static int modcmp(const spint *a, const spint *b) { #include -const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; -const digit_t ONE[NWORDS_FIELD] = { 0x00025ed0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +const fp_t ZERO = {{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }}; +const fp_t ONE = {{ 0x00025ed0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000800 }; + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000800 }}; // Montgomery representation of 2^-1 -static const digit_t TWO_INV[NWORDS_FIELD] = { 0x00012f68, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +static const fp_t TWO_INV = {{ 0x00012f68, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000400 }; + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000400 }}; // Montgomery representation of 3^-1 -static const digit_t THREE_INV[NWORDS_FIELD] = { +static const fp_t THREE_INV = {{ 0x15561f9a, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x0aaaaaaa, 0x15555555, 0x00000baa -}; +}}; // Montgomery representation of 2^512 -static const digit_t R2[NWORDS_FIELD] = { 0x03c668a5, 0x0f684bda, 0x1425ed09, 0x12f684bd, 0x1b425ed0, 0x012f684b, +static const fp_t R2 = {{ 0x03c668a5, 0x0f684bda, 0x1425ed09, 0x12f684bd, 0x1b425ed0, 0x012f684b, 0x17b425ed, 0x1a12f684, 0x097b425e, 0x1da12f68, 0x1097b425, 0x0bda12f6, - 0x0d097b42, 0x04bda12f, 0x1ed097b4, 0x084bda12, 0x05ed097b, 0x00000a21 }; + 0x0d097b42, 0x04bda12f, 0x1ed097b4, 0x084bda12, 0x05ed097b, 0x00000a21 }}; void fp_set_small(fp_t *x, const digit_t val) { - modint((int)val, *x); + modint((int)val, x->fp); } void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) { - modmli(*a, (int)val, *x); + modmli(a->fp, (int)val, x->fp); } void fp_set_zero(fp_t *x) { - modzer(*x); + modzer(x->fp); } void fp_set_one(fp_t *x) { - modone(*x); + modone(x->fp); } uint32_t fp_is_equal(const fp_t *a, const fp_t *b) { - return -(uint32_t)modcmp(*a, *b); + return -(uint32_t)modcmp(a->fp, b->fp); } uint32_t fp_is_zero(const fp_t *a) { - return -(uint32_t)modis0(*a); + return -(uint32_t)modis0(a->fp); } void fp_copy(fp_t *out, const fp_t *a) { - modcpy(*a, *out); + modcpy(a->fp, out->fp); } void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) { - modcsw((int)(ctl & 0x1), *a, *b); + modcsw((int)(ctl & 0x1), a->fp, b->fp); } void fp_add(fp_t *out, const fp_t *a, const fp_t *b) { - modadd(*a, *b, *out); + modadd(a->fp, b->fp, out->fp); } void fp_sub(fp_t *out, const fp_t *a, const fp_t *b) { - modsub(*a, *b, *out); + modsub(a->fp, b->fp, out->fp); } void fp_neg(fp_t *out, const fp_t *a) { - modneg(*a, *out); + modneg(a->fp, out->fp); } void fp_sqr(fp_t *out, const fp_t *a) { - modsqr(*a, *out); + modsqr(a->fp, out->fp); } void fp_mul(fp_t *out, const fp_t *a, const fp_t *b) { - modmul(*a, *b, *out); + modmul(a->fp, b->fp, out->fp); } void fp_inv(fp_t *x) { - modinv(*x, NULL, *x); + modinv(x->fp, NULL, x->fp); } uint32_t fp_is_square(const fp_t *a) { - return -(uint32_t)modqr(NULL, *a); + return -(uint32_t)modqr(NULL, a->fp); } void fp_sqrt(fp_t *a) { - modsqrt(*a, NULL, *a); + modsqrt(a->fp, NULL, a->fp); } void fp_half(fp_t *out, const fp_t *a) { - modmul(TWO_INV, *a, *out); + modmul(TWO_INV.fp, a->fp, out->fp); } void fp_exp3div4(fp_t *out, const fp_t *a) { - modpro(*a, *out); + modpro(a->fp, out->fp); } void fp_div3(fp_t *out, const fp_t *a) { - modmul(THREE_INV, *a, *out); + modmul(THREE_INV.fp, a->fp, out->fp); } void @@ -1360,7 +1360,7 @@ fp_encode(void *dst, const fp_t *a) // Modified version of modexp() int i; spint c[18]; - redc(*a, c); + redc(a->fp, c); for (i = 0; i < 64; i++) { ((char *)dst)[i] = c[0] & (spint)0xff; (void)modshr(8, c); @@ -1375,17 +1375,17 @@ fp_decode(fp_t *d, const void *src) spint res; const unsigned char *b = src; for (i = 0; i < 18; i++) { - (*d)[i] = 0; + d->fp[i] = 0; } for (i = 63; i >= 0; i--) { - modshl(8, *d); - (*d)[0] += (spint)b[i]; + modshl(8, d->fp); + d->fp[0] += (spint)b[i]; } - res = (spint)-modfsb(*d); - nres(*d, *d); + res = (spint)-modfsb(d->fp); + nres(d->fp, d->fp); // If the value was canonical then res = -1; otherwise, res = 0 for (i = 0; i < 18; i++) { - (*d)[i] &= res; + d->fp[i] &= res; } return (uint32_t)res; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c index 67b3b9ba54..887e86f3f9 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c @@ -698,135 +698,135 @@ static int modcmp(const spint *a, const spint *b) { #include -const digit_t ZERO[NWORDS_FIELD] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; -const digit_t ONE[NWORDS_FIELD] = { 0x000000000000012f, 0x0000000000000000, 0x0000000000000000, +const fp_t ZERO = {{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }}; +const fp_t ONE = {{ 0x000000000000012f, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000b00000000000 }; + 0x0000000000000000, 0x0000000000000000, 0x0000b00000000000 }}; // Montgomery representation of 2^-1 -static const digit_t TWO_INV[NWORDS_FIELD] = { 0x0000000000000097, 0x0000000000000000, 0x0000000000000000, +static const fp_t TWO_INV = {{ 0x0000000000000097, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0001300000000000 }; + 0x0000000000000000, 0x0000000000000000, 0x0001300000000000 }}; // Montgomery representation of 3^-1 -static const digit_t THREE_INV[NWORDS_FIELD] = { 0x00aaaaaaaaaaab0f, 0x0155555555555555, 0x00aaaaaaaaaaaaaa, +static const fp_t THREE_INV = {{ 0x00aaaaaaaaaaab0f, 0x0155555555555555, 0x00aaaaaaaaaaaaaa, 0x0155555555555555, 0x00aaaaaaaaaaaaaa, 0x0155555555555555, - 0x00aaaaaaaaaaaaaa, 0x0155555555555555, 0x00015aaaaaaaaaaa }; + 0x00aaaaaaaaaaaaaa, 0x0155555555555555, 0x00015aaaaaaaaaaa }}; // Montgomery representation of 2^512 -static const digit_t R2[NWORDS_FIELD] = { 0x0012f684bda1e334, 0x01425ed097b425ed, 0x01684bda12f684bd, +static const fp_t R2 = {{ 0x0012f684bda1e334, 0x01425ed097b425ed, 0x01684bda12f684bd, 0x01ed097b425ed097, 0x00bda12f684bda12, 0x0097b425ed097b42, - 0x0012f684bda12f68, 0x01425ed097b425ed, 0x00008bda12f684bd }; + 0x0012f684bda12f68, 0x01425ed097b425ed, 0x00008bda12f684bd }}; void fp_set_small(fp_t *x, const digit_t val) { - modint((int)val, *x); + modint((int)val, x->fp); } void fp_mul_small(fp_t *x, const fp_t *a, const uint32_t val) { - modmli(*a, (int)val, *x); + modmli(a->fp, (int)val, x->fp); } void fp_set_zero(fp_t *x) { - modzer(*x); + modzer(x->fp); } void fp_set_one(fp_t *x) { - modone(*x); + modone(x->fp); } uint32_t fp_is_equal(const fp_t *a, const fp_t *b) { - return -(uint32_t)modcmp(*a, *b); + return -(uint32_t)modcmp(a->fp, b->fp); } uint32_t fp_is_zero(const fp_t *a) { - return -(uint32_t)modis0(*a); + return -(uint32_t)modis0(a->fp); } void fp_copy(fp_t *out, const fp_t *a) { - modcpy(*a, *out); + modcpy(a->fp, out->fp); } void fp_cswap(fp_t *a, fp_t *b, uint32_t ctl) { - modcsw((int)(ctl & 0x1), *a, *b); + modcsw((int)(ctl & 0x1), a->fp, b->fp); } void fp_add(fp_t *out, const fp_t *a, const fp_t *b) { - modadd(*a, *b, *out); + modadd(a->fp, b->fp, out->fp); } void fp_sub(fp_t *out, const fp_t *a, const fp_t *b) { - modsub(*a, *b, *out); + modsub(a->fp, b->fp, out->fp); } void fp_neg(fp_t *out, const fp_t *a) { - modneg(*a, *out); + modneg(a->fp, out->fp); } void fp_sqr(fp_t *out, const fp_t *a) { - modsqr(*a, *out); + modsqr(a->fp, out->fp); } void fp_mul(fp_t *out, const fp_t *a, const fp_t *b) { - modmul(*a, *b, *out); + modmul(a->fp, b->fp, out->fp); } void fp_inv(fp_t *x) { - modinv(*x, NULL, *x); + modinv(x->fp, NULL, x->fp); } uint32_t fp_is_square(const fp_t *a) { - return -(uint32_t)modqr(NULL, *a); + return -(uint32_t)modqr(NULL, a->fp); } void fp_sqrt(fp_t *a) { - modsqrt(*a, NULL, *a); + modsqrt(a->fp, NULL, a->fp); } void fp_half(fp_t *out, const fp_t *a) { - modmul(TWO_INV, *a, *out); + modmul(TWO_INV.fp, a->fp, out->fp); } void fp_exp3div4(fp_t *out, const fp_t *a) { - modpro(*a, *out); + modpro(a->fp, out->fp); } void fp_div3(fp_t *out, const fp_t *a) { - modmul(THREE_INV, *a, *out); + modmul(THREE_INV.fp, a->fp, out->fp); } void @@ -835,7 +835,7 @@ fp_encode(void *dst, const fp_t *a) // Modified version of modexp() int i; spint c[9]; - redc(*a, c); + redc(a->fp, c); for (i = 0; i < 64; i++) { ((char *)dst)[i] = c[0] & (spint)0xff; (void)modshr(8, c); @@ -850,17 +850,17 @@ fp_decode(fp_t *d, const void *src) spint res; const unsigned char *b = src; for (i = 0; i < 9; i++) { - (*d)[i] = 0; + d->fp[i] = 0; } for (i = 63; i >= 0; i--) { - modshl(8, *d); - (*d)[0] += (spint)b[i]; + modshl(8, d->fp); + d->fp[0] += (spint)b[i]; } - res = (spint)-modfsb(*d); - nres(*d, *d); + res = (spint)-modfsb(d->fp); + nres(d->fp, d->fp); // If the value was canonical then res = -1; otherwise, res = 0 for (i = 0; i < 9; i++) { - (*d)[i] &= res; + d->fp[i] &= res; } return (uint32_t)res; } From 0c78bbb865bdb035c52d2400d9f39108e953d122 Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Mon, 18 Aug 2025 15:40:54 +0200 Subject: [PATCH 15/19] commit after merge Signed-off-by: Basil Hess --- src/sig/sig.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/sig/sig.c b/src/sig/sig.c index 6ab094d6ff..74a051d53d 100644 --- a/src/sig/sig.c +++ b/src/sig/sig.c @@ -83,11 +83,9 @@ OQS_API const char *OQS_SIG_alg_identifier(size_t i) { OQS_SIG_alg_snova_SNOVA_24_5_5, OQS_SIG_alg_snova_SNOVA_60_10_4, OQS_SIG_alg_snova_SNOVA_29_6_5, - OQS_SIG_alg_snova_SNOVA_29_6_5, OQS_SIG_alg_sqisign_lvl1, OQS_SIG_alg_sqisign_lvl3, OQS_SIG_alg_sqisign_lvl5,///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ALG_IDENTIFIER_END - OQS_SIG_alg_snova_SNOVA_29_6_5,///// OQS_COPY_FROM_UPSTREAM_FRAGMENT_ALG_IDENTIFIER_END ///// OQS_COPY_FROM_SLH_DSA_FRAGMENT_ALGID_START OQS_SIG_alg_slh_dsa_pure_sha2_128s, OQS_SIG_alg_slh_dsa_pure_sha2_128f, From e058cb133907bbd736c782ef26a22bbe959d64fa Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Mon, 18 Aug 2025 15:57:14 +0200 Subject: [PATCH 16/19] new pull sqisign [full tests] Signed-off-by: Basil Hess --- .CMake/compiler_opts.cmake | 2 + CMakeLists.txt | 10 - docs/algorithms/sig/sqisign.md | 2 +- docs/algorithms/sig/sqisign.yml | 2 +- .../copy_from_upstream/copy_from_upstream.yml | 2 +- .../asm_preamble.h | 6 +- .../dim2id2iso.c | 6 +- .../e0_basis.c | 32 +- .../the-sqisign_sqisign_lvl1_broadwell/ec.h | 4 +- .../encode_verification.c | 30 - .../endomorphism_action.c | 1120 +++++++-------- .../the-sqisign_sqisign_lvl1_broadwell/fp.h | 2 +- .../the-sqisign_sqisign_lvl1_broadwell/fp2.h | 2 +- .../gf5248.c | 14 +- .../gf5248.h | 45 +- .../the-sqisign_sqisign_lvl1_broadwell/hd.h | 2 +- .../hd_splitting_transforms.c | 80 +- .../the-sqisign_sqisign_lvl1_broadwell/l2.c | 11 +- .../lll_internals.h | 14 +- .../the-sqisign_sqisign_lvl1_broadwell/mp.c | 4 +- .../rationals.c | 84 +- .../the-sqisign_sqisign_lvl1_broadwell/rng.h | 2 +- .../sqisign.c | 2 +- .../the-sqisign_sqisign_lvl1_ref/dim2id2iso.c | 6 +- .../the-sqisign_sqisign_lvl1_ref/e0_basis.c | 32 +- .../sqisign/the-sqisign_sqisign_lvl1_ref/ec.h | 4 +- .../encode_verification.c | 30 - .../endomorphism_action.c | 1120 +++++++-------- .../fp_p5248_32.c | 68 +- .../fp_p5248_64.c | 66 +- .../sqisign/the-sqisign_sqisign_lvl1_ref/hd.h | 2 +- .../hd_splitting_transforms.c | 80 +- .../sqisign/the-sqisign_sqisign_lvl1_ref/l2.c | 11 +- .../lll_internals.h | 14 +- .../sqisign/the-sqisign_sqisign_lvl1_ref/mp.c | 4 +- .../the-sqisign_sqisign_lvl1_ref/rationals.c | 84 +- .../the-sqisign_sqisign_lvl1_ref/rng.h | 2 +- .../the-sqisign_sqisign_lvl1_ref/sqisign.c | 2 +- .../asm_preamble.h | 6 +- .../dim2id2iso.c | 6 +- .../e0_basis.c | 32 +- .../the-sqisign_sqisign_lvl3_broadwell/ec.h | 4 +- .../encode_verification.c | 30 - .../endomorphism_action.c | 1280 ++++++++--------- .../the-sqisign_sqisign_lvl3_broadwell/fp.h | 2 +- .../the-sqisign_sqisign_lvl3_broadwell/fp2.h | 2 +- .../gf65376.c | 26 +- .../gf65376.h | 43 +- .../the-sqisign_sqisign_lvl3_broadwell/hd.h | 2 +- .../hd_splitting_transforms.c | 80 +- .../the-sqisign_sqisign_lvl3_broadwell/l2.c | 11 +- .../lll_internals.h | 14 +- .../the-sqisign_sqisign_lvl3_broadwell/mp.c | 4 +- .../rationals.c | 84 +- .../the-sqisign_sqisign_lvl3_broadwell/rng.h | 2 +- .../sqisign.c | 2 +- .../the-sqisign_sqisign_lvl3_ref/dim2id2iso.c | 6 +- .../the-sqisign_sqisign_lvl3_ref/e0_basis.c | 32 +- .../sqisign/the-sqisign_sqisign_lvl3_ref/ec.h | 4 +- .../encode_verification.c | 30 - .../endomorphism_action.c | 1280 ++++++++--------- .../fp_p65376_32.c | 68 +- .../fp_p65376_64.c | 66 +- .../sqisign/the-sqisign_sqisign_lvl3_ref/hd.h | 2 +- .../hd_splitting_transforms.c | 80 +- .../sqisign/the-sqisign_sqisign_lvl3_ref/l2.c | 11 +- .../lll_internals.h | 14 +- .../sqisign/the-sqisign_sqisign_lvl3_ref/mp.c | 4 +- .../the-sqisign_sqisign_lvl3_ref/rationals.c | 84 +- .../the-sqisign_sqisign_lvl3_ref/rng.h | 2 +- .../the-sqisign_sqisign_lvl3_ref/sqisign.c | 2 +- .../asm_preamble.h | 6 +- .../dim2id2iso.c | 6 +- .../e0_basis.c | 32 +- .../the-sqisign_sqisign_lvl5_broadwell/ec.h | 4 +- .../encode_verification.c | 30 - .../endomorphism_action.c | 1120 +++++++-------- .../the-sqisign_sqisign_lvl5_broadwell/fp.h | 2 +- .../the-sqisign_sqisign_lvl5_broadwell/fp2.h | 2 +- .../gf27500.c | 26 +- .../gf27500.h | 45 +- .../the-sqisign_sqisign_lvl5_broadwell/hd.h | 2 +- .../hd_splitting_transforms.c | 80 +- .../the-sqisign_sqisign_lvl5_broadwell/l2.c | 11 +- .../lll_internals.h | 14 +- .../the-sqisign_sqisign_lvl5_broadwell/mp.c | 4 +- .../rationals.c | 84 +- .../the-sqisign_sqisign_lvl5_broadwell/rng.h | 2 +- .../sqisign.c | 2 +- .../the-sqisign_sqisign_lvl5_ref/dim2id2iso.c | 6 +- .../the-sqisign_sqisign_lvl5_ref/e0_basis.c | 32 +- .../sqisign/the-sqisign_sqisign_lvl5_ref/ec.h | 4 +- .../encode_verification.c | 30 - .../endomorphism_action.c | 1120 +++++++-------- .../fp_p27500_32.c | 68 +- .../fp_p27500_64.c | 66 +- .../sqisign/the-sqisign_sqisign_lvl5_ref/hd.h | 2 +- .../hd_splitting_transforms.c | 80 +- .../sqisign/the-sqisign_sqisign_lvl5_ref/l2.c | 11 +- .../lll_internals.h | 14 +- .../sqisign/the-sqisign_sqisign_lvl5_ref/mp.c | 4 +- .../the-sqisign_sqisign_lvl5_ref/rationals.c | 84 +- .../the-sqisign_sqisign_lvl5_ref/rng.h | 2 +- .../the-sqisign_sqisign_lvl5_ref/sqisign.c | 2 +- tests/test_leaks.py | 2 +- 105 files changed, 4431 insertions(+), 4886 deletions(-) diff --git a/.CMake/compiler_opts.cmake b/.CMake/compiler_opts.cmake index 8e65be544d..e6e9b03dfa 100644 --- a/.CMake/compiler_opts.cmake +++ b/.CMake/compiler_opts.cmake @@ -96,6 +96,7 @@ if(CMAKE_C_COMPILER_ID MATCHES "Clang") add_compile_options(-Wextra) add_compile_options(-Wpedantic) add_compile_options(-Wno-unused-command-line-argument) + add_compile_definitions(C_PEDANTIC_MODE) endif() if(CC_SUPPORTS_WA_NOEXECSTACK) add_compile_options("-Wa,--noexecstack") @@ -163,6 +164,7 @@ elseif(CMAKE_C_COMPILER_ID STREQUAL "GNU") add_compile_options(-Wformat=2) add_compile_options(-Wfloat-equal) add_compile_options(-Wwrite-strings) + add_compile_definitions(C_PEDANTIC_MODE) endif() if (NOT CMAKE_SYSTEM_NAME STREQUAL "Darwin") if(CC_SUPPORTS_WA_NOEXECSTACK) diff --git a/CMakeLists.txt b/CMakeLists.txt index 57f79ba1b4..820177ba29 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -171,16 +171,6 @@ if(${OQS_USE_GMP}) else() add_compile_definitions(RADIX_64) include(CheckCSourceCompiles) - check_c_source_compiles(" - int main() { - __uint128_t x = 0; - (void)x; - return 0; - } - " HAVE_UINT128_T) - if (HAVE_UINT128_T) - add_compile_definitions(HAVE_UINT128) - endif() add_compile_definitions(GMP_LIMB_BITS=64) endif() else() diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md index 7eafbe9733..b7f2002798 100644 --- a/docs/algorithms/sig/sqisign.md +++ b/docs/algorithms/sig/sqisign.md @@ -6,7 +6,7 @@ - **Authors' website**: https://sqisign.org/ - **Specification version**: Round 2. - **Primary Source**: - - **Source**: https://github.com/bhess/the-sqisign/commit/70faebf85d1536f16db4386af9a65d43f1015767 + - **Source**: https://github.com/bhess/the-sqisign/commit/a8884349ee78b0c4da296c9f8ce6f208910d5ee6 - **Implementation license (SPDX-Identifier)**: Apache-2.0 diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml index 3fe126dee2..d22bb1fb70 100644 --- a/docs/algorithms/sig/sqisign.yml +++ b/docs/algorithms/sig/sqisign.yml @@ -36,7 +36,7 @@ website: https://sqisign.org/ nist-round: 2 spec-version: Round 2 primary-upstream: - source: https://github.com/bhess/the-sqisign/commit/70faebf85d1536f16db4386af9a65d43f1015767 + source: https://github.com/bhess/the-sqisign/commit/a8884349ee78b0c4da296c9f8ce6f208910d5ee6 spdx-license-identifier: Apache-2.0 parameter-sets: - name: SQIsign-lvl1 diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index 8b05d22e44..d10ab094b0 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -104,7 +104,7 @@ upstreams: name: the-sqisign git_url: https://github.com/bhess/the-sqisign.git git_branch: oqs - git_commit: 70faebf85d1536f16db4386af9a65d43f1015767 + git_commit: a8884349ee78b0c4da296c9f8ce6f208910d5ee6 sig_scheme_path: '.' sig_meta_path: 'integration/liboqs/{pqclean_scheme}.yml' diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/asm_preamble.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/asm_preamble.h index 3ef7927e9c..ca2a054ce2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/asm_preamble.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/asm_preamble.h @@ -9,8 +9,10 @@ #undef fp2_mul_c1 #undef fp2_sq_c0 #undef fp2_sq_c1 -#define p2 CAT(_, p2) -#define p CAT(_, p) +#undef p2 +#undef p +#define p2 CAT(_, SQISIGN_NAMESPACE(p2)) +#define p CAT(_, SQISIGN_NAMESPACE(p)) #define fp_add CAT(_, SQISIGN_NAMESPACE(fp_add)) #define fp_sub CAT(_, SQISIGN_NAMESPACE(fp_sub)) #define fp_mul CAT(_, SQISIGN_NAMESPACE(fp_mul)) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c index 143060e2c3..74184fc97b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/dim2id2iso.c @@ -191,7 +191,7 @@ fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, // reordering vectors and switching some signs if needed to make it in a nicer // shape static void -post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, bool is_special_order) { // if the left order is the special one, then we apply some additional post // treatment @@ -520,7 +520,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[0], 1); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); - post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + post_LLL_basis_treatment(&gram[0], &reduced[0], true); // for efficient lattice reduction, we replace ideal[0] by the equivalent // ideal of smallest norm @@ -562,7 +562,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[i], 1); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); - post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + post_LLL_basis_treatment(&gram[i], &reduced[i], false); } // enumerating small vectors diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.c index 5be2b8e57e..c24fe29409 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/e0_basis.c @@ -2,54 +2,54 @@ const fp2_t BASIS_E0_PX = { #if 0 #elif RADIX == 16 -{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3} +{{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3}} #elif RADIX == 32 -{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998} +{{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3} +{{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3}} #else -{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b} +{{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7} +{{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7}} #elif RADIX == 32 -{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15} +{{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10} +{{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10}} #else -{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166} +{{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166}} #endif #endif }; const fp2_t BASIS_E0_QX = { #if 0 #elif RADIX == 16 -{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9} +{{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9}} #elif RADIX == 32 -{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96} +{{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec} +{{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec}} #else -{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012} +{{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8} +{{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8}} #elif RADIX == 32 -{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f} +{{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40} +{{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40}} #else -{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52} +{{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52}} #endif #endif }; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h index e609c93a08..7cef95ca49 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/ec.h @@ -566,7 +566,7 @@ uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) { ec_point_t test; @@ -595,7 +595,7 @@ test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) { int check_P = test_point_order_twof(&B->P, E, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_verification.c index fecdb9c259..8aa451d366 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_verification.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/encode_verification.c @@ -99,36 +99,6 @@ ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) return proj_from_bytes(&curve->A, &curve->C, enc); } -static byte_t * -ec_point_to_bytes(byte_t *enc, const ec_point_t *point) -{ - return proj_to_bytes(enc, &point->x, &point->z); -} - -static const byte_t * -ec_point_from_bytes(ec_point_t *point, const byte_t *enc) -{ - return proj_from_bytes(&point->x, &point->z, enc); -} - -static byte_t * -ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) -{ - enc = ec_point_to_bytes(enc, &basis->P); - enc = ec_point_to_bytes(enc, &basis->Q); - enc = ec_point_to_bytes(enc, &basis->PmQ); - return enc; -} - -static const byte_t * -ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) -{ - enc = ec_point_from_bytes(&basis->P, enc); - enc = ec_point_from_bytes(&basis->Q, enc); - enc = ec_point_from_bytes(&basis->PmQ, enc); - return enc; -} - // public API byte_t * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c index 1a93e36455..7993e79f8c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/endomorphism_action.c @@ -4,261 +4,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x199, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6} +{{0x199, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6}} #elif RADIX == 32 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x19, 0x0, 0x0, 0x300000000000000} +{{0x19, 0x0, 0x0, 0x300000000000000}} #else -{0xc, 0x0, 0x0, 0x0, 0x400000000000} +{{0xc, 0x0, 0x0, 0x0, 0x400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3} +{{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3}} #elif RADIX == 32 -{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998} +{{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3} +{{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3}} #else -{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b} +{{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7} +{{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7}} #elif RADIX == 32 -{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15} +{{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10} +{{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10}} #else -{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166} +{{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9} +{{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9}} #elif RADIX == 32 -{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96} +{{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec} +{{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec}} #else -{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012} +{{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8} +{{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8}} #elif RADIX == 32 -{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f} +{{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40} +{{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40}} #else -{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52} +{{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x342, 0xfb7, 0xed, 0x1d80, 0x17f1, 0x4a2, 0x1c26, 0xb96, 0x1367, 0x3dc, 0x1624, 0x1f2a, 0x5e, 0x1cab, 0x27, 0x1e89, 0x1293, 0x1e24, 0x417, 0x5} +{{0x342, 0xfb7, 0xed, 0x1d80, 0x17f1, 0x4a2, 0x1c26, 0xb96, 0x1367, 0x3dc, 0x1624, 0x1f2a, 0x5e, 0x1cab, 0x27, 0x1e89, 0x1293, 0x1e24, 0x417, 0x5}} #elif RADIX == 32 -{0xbedc685, 0x11ec003b, 0x4c4a2bf, 0xd9d72dc, 0xb120f72, 0x1605ef95, 0x2404fca, 0x1124a4fd, 0x20bf} +{{0xbedc685, 0x11ec003b, 0x4c4a2bf, 0xd9d72dc, 0xb120f72, 0x1605ef95, 0x2404fca, 0x1124a4fd, 0x20bf}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x57f1ec003b5f6e34, 0x7b93675cb709894, 0x809f95605ef95589, 0xc905fc49293f44} +{{0x57f1ec003b5f6e34, 0x7b93675cb709894, 0x809f95605ef95589, 0xc905fc49293f44}} #else -{0xf6001dafb71a, 0x75cb70989457f, 0x5f2ab120f726c, 0x7d12027e55817, 0x6482fe24949} +{{0xf6001dafb71a, 0x75cb70989457f, 0x5f2ab120f726c, 0x7d12027e55817, 0x6482fe24949}} #endif #endif , #if 0 #elif RADIX == 16 -{0xf3c, 0x1d21, 0xd78, 0xe8e, 0x1f3c, 0x11b, 0x12c, 0x1851, 0x19b1, 0xd9, 0xf3f, 0x759, 0xf47, 0x1e88, 0x56e, 0x8ef, 0x116e, 0x1fa1, 0x1199, 0x0} +{{0xf3c, 0x1d21, 0xd78, 0xe8e, 0x1f3c, 0x11b, 0x12c, 0x1851, 0x19b1, 0xd9, 0xf3f, 0x759, 0xf47, 0x1e88, 0x56e, 0x8ef, 0x116e, 0x1fa1, 0x1199, 0x0}} #elif RADIX == 32 -{0x7485e78, 0x1c74735e, 0x5811bf9, 0x6c70a21, 0x179f8367, 0x10f473ac, 0x1bcadde8, 0x1d0c5b91, 0x8ccf} +{{0x7485e78, 0x1c74735e, 0x5811bf9, 0x6c70a21, 0x179f8367, 0x10f473ac, 0x1bcadde8, 0x1d0c5b91, 0x8ccf}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x7f3c74735e3a42f3, 0xc1b39b1c2884b023, 0x95bbd10f473acbcf, 0x3c4667f4316e477} +{{0x7f3c74735e3a42f3, 0xc1b39b1c2884b023, 0x95bbd10f473acbcf, 0x3c4667f4316e477}} #else -{0x63a39af1d2179, 0x1c2884b0237f3, 0x675979f836736, 0x11de56ef443d1, 0x462333fa18b7} +{{0x63a39af1d2179, 0x1c2884b0237f3, 0x675979f836736, 0x11de56ef443d1, 0x462333fa18b7}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -480,261 +480,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x19e1, 0x1d98, 0x1de9, 0x1dfc, 0x922, 0x1fb8, 0x476, 0xd05, 0xc85, 0x1788, 0x1967, 0x155d, 0x1f93, 0x629, 0x188f, 0x119, 0x1f6f, 0x241, 0x1378, 0x0} +{{0x19e1, 0x1d98, 0x1de9, 0x1dfc, 0x922, 0x1fb8, 0x476, 0xd05, 0xc85, 0x1788, 0x1967, 0x155d, 0x1f93, 0x629, 0x188f, 0x119, 0x1f6f, 0x241, 0x1378, 0x0}} #elif RADIX == 32 -{0xf6633c2, 0x2efe77a, 0xedfb849, 0x1215a0a4, 0x1cb3de21, 0x13f93aae, 0x6711e62, 0x120fdbc2, 0x9bc0} +{{0xf6633c2, 0x2efe77a, 0xedfb849, 0x1215a0a4, 0x1cb3de21, 0x13f93aae, 0x6711e62, 0x120fdbc2, 0x9bc0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x922efe77a7b319e, 0xef10c8568291dbf7, 0xe23cc53f93aaee59, 0x54de0483f6f08c} +{{0x922efe77a7b319e, 0xef10c8568291dbf7, 0xe23cc53f93aaee59, 0x54de0483f6f08c}} #else -{0x177f3bd3d98cf, 0x568291dbf7092, 0x755dcb3de2190, 0x423388f314fe4, 0x2a6f0241fb7} +{{0x177f3bd3d98cf, 0x568291dbf7092, 0x755dcb3de2190, 0x423388f314fe4, 0x2a6f0241fb7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x811, 0xf66, 0x77a, 0x177f, 0x248, 0x17ee, 0x91d, 0xb41, 0x321, 0x1de2, 0xe59, 0x1d57, 0xfe4, 0x198a, 0xe23, 0x1846, 0xfdb, 0x90, 0x14de, 0x8} +{{0x811, 0xf66, 0x77a, 0x177f, 0x248, 0x17ee, 0x91d, 0xb41, 0x321, 0x1de2, 0xe59, 0x1d57, 0xfe4, 0x198a, 0xe23, 0x1846, 0xfdb, 0x90, 0x14de, 0x8}} #elif RADIX == 32 -{0x13d99023, 0x8bbf9de, 0x3b7ee12, 0xc856829, 0x172cf788, 0x14fe4eab, 0x119c4798, 0x483f6f0, 0x3a6f0} +{{0x13d99023, 0x8bbf9de, 0x3b7ee12, 0xc856829, 0x172cf788, 0x14fe4eab, 0x119c4798, 0x483f6f0, 0x3a6f0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc248bbf9de9ecc81, 0x7bc43215a0a476fd, 0x388f314fe4eabb96, 0x95378120fdbc23} +{{0xc248bbf9de9ecc81, 0x7bc43215a0a476fd, 0x388f314fe4eabb96, 0x95378120fdbc23}} #else -{0x45dfcef4f6640, 0x15a0a476fdc24, 0x1d5772cf78864, 0x708ce23cc53f9, 0x2ca9bc0907ed} +{{0x45dfcef4f6640, 0x15a0a476fdc24, 0x1d5772cf78864, 0x708ce23cc53f9, 0x2ca9bc0907ed}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x869, 0x197b, 0xcdb, 0x1d89, 0xf9b, 0x1d79, 0x18ec, 0xafe, 0x1d41, 0x77, 0x9d4, 0x1a3f, 0x2b, 0x46d, 0x173e, 0xedd, 0x172, 0x1c77, 0x8a6, 0x8} +{{0x869, 0x197b, 0xcdb, 0x1d89, 0xf9b, 0x1d79, 0x18ec, 0xafe, 0x1d41, 0x77, 0x9d4, 0x1a3f, 0x2b, 0x46d, 0x173e, 0xedd, 0x172, 0x1c77, 0x8a6, 0x8}} #elif RADIX == 32 -{0x1e5ed0d3, 0x1bec4b36, 0x1d9d797c, 0x15055fd8, 0x14ea01df, 0x1a02bd1f, 0x176e7c46, 0x3b85c9d, 0x34537} +{{0x1e5ed0d3, 0x1bec4b36, 0x1d9d797c, 0x15055fd8, 0x14ea01df, 0x1a02bd1f, 0x176e7c46, 0x3b85c9d, 0x34537}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2f9bec4b36f2f686, 0xefd4157f63b3af, 0xdcf88da02bd1fa75, 0x31229b8ee17276e} +{{0x2f9bec4b36f2f686, 0xefd4157f63b3af, 0xdcf88da02bd1fa75, 0x31229b8ee17276e}} #else -{0x5f6259b797b43, 0x157f63b3af2f9, 0x7a3f4ea01dfa8, 0x1dbb73e23680a, 0x18914dc770b9} +{{0x5f6259b797b43, 0x157f63b3af2f9, 0x7a3f4ea01dfa8, 0x1dbb73e23680a, 0x18914dc770b9}} #endif #endif , #if 0 #elif RADIX == 16 -{0x124b, 0xed4, 0x1706, 0x32d, 0x1541, 0x11b8, 0x2b0, 0xbe4, 0x1ee8, 0x1a3c, 0x16e3, 0x1d25, 0x19bb, 0xb63, 0x1fc1, 0x5fa, 0xf03, 0xfa, 0x1ec, 0x9} +{{0x124b, 0xed4, 0x1706, 0x32d, 0x1541, 0x11b8, 0x2b0, 0xbe4, 0x1ee8, 0x1a3c, 0x16e3, 0x1d25, 0x19bb, 0xb63, 0x1fc1, 0x5fa, 0xf03, 0xfa, 0x1ec, 0x9}} #elif RADIX == 32 -{0x13b52497, 0x1196dc1, 0x1611b8aa, 0x1ba17c82, 0x1b71e8f3, 0x79bbe92, 0x1ebf82b6, 0x7d3c0cb, 0x40f60} +{{0x13b52497, 0x1196dc1, 0x1611b8aa, 0x1ba17c82, 0x1b71e8f3, 0x79bbe92, 0x1ebf82b6, 0x7d3c0cb, 0x40f60}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1541196dc19da924, 0xf479ee85f20ac237, 0x7f056c79bbe92db8, 0x3b87b01f4f032fd} +{{0x1541196dc19da924, 0xf479ee85f20ac237, 0x7f056c79bbe92db8, 0x3b87b01f4f032fd}} #else -{0x8cb6e0ced492, 0x5f20ac237154, 0x7d25b71e8f3dd, 0x4bf5fc15b1e6e, 0x1dc3d80fa781} +{{0x8cb6e0ced492, 0x5f20ac237154, 0x7d25b71e8f3dd, 0x4bf5fc15b1e6e, 0x1dc3d80fa781}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1e71, 0xd67, 0x13da, 0x19eb, 0x137a, 0x1d27, 0x1ba7, 0x1996, 0x755, 0xe3d, 0x1139, 0x1764, 0x18ac, 0x1020, 0x3c4, 0x150e, 0x1ffd, 0x14fe, 0xa16, 0x6} +{{0x1e71, 0xd67, 0x13da, 0x19eb, 0x137a, 0x1d27, 0x1ba7, 0x1996, 0x755, 0xe3d, 0x1139, 0x1764, 0x18ac, 0x1020, 0x3c4, 0x150e, 0x1ffd, 0x14fe, 0xa16, 0x6}} #elif RADIX == 32 -{0x1359fce3, 0x1acf5cf6, 0x14fd279b, 0x1d5732db, 0x89cb8f4, 0x18acbb2, 0x3878902, 0x7f7ff6a, 0x150b5} +{{0x1359fce3, 0x1acf5cf6, 0x14fd279b, 0x1d5732db, 0x89cb8f4, 0x18acbb2, 0x3878902, 0x7f7ff6a, 0x150b5}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf37acf5cf69acfe7, 0x5c7a755ccb6e9fa4, 0xf120418acbb244e, 0x8285a9fdffda87} +{{0xf37acf5cf69acfe7, 0x5c7a755ccb6e9fa4, 0xf120418acbb244e, 0x8285a9fdffda87}} #else -{0x567ae7b4d67f3, 0x5ccb6e9fa4f37, 0x176489cb8f4ea, 0x6a1c3c481062b, 0x2c142d4feffe} +{{0x567ae7b4d67f3, 0x5ccb6e9fa4f37, 0x176489cb8f4ea, 0x6a1c3c481062b, 0x2c142d4feffe}} #endif #endif , #if 0 #elif RADIX == 16 -{0x13ec, 0x10a3, 0x1e69, 0x106f, 0x619, 0x1cb5, 0x9aa, 0x362, 0x53a, 0x1af5, 0x1bae, 0x60a, 0x2a4, 0x448, 0x3d0, 0x535, 0xeb1, 0x1a6e, 0x978, 0x5} +{{0x13ec, 0x10a3, 0x1e69, 0x106f, 0x619, 0x1cb5, 0x9aa, 0x362, 0x53a, 0x1af5, 0x1bae, 0x60a, 0x2a4, 0x448, 0x3d0, 0x535, 0xeb1, 0x1a6e, 0x978, 0x5}} #elif RADIX == 32 -{0xc28e7d9, 0x19837f9a, 0x155cb530, 0x14e86c49, 0xdd76bd4, 0x102a4305, 0xd47a044, 0x1373ac4a, 0x4bc6} +{{0xc28e7d9, 0x19837f9a, 0x155cb530, 0x14e86c49, 0xdd76bd4, 0x102a4305, 0xd47a044, 0x1373ac4a, 0x4bc6}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa619837f9a61473e, 0xb5ea53a1b126ab96, 0x8f408902a43056eb, 0x3ea5e34dceb129a} +{{0xa619837f9a61473e, 0xb5ea53a1b126ab96, 0x8f408902a43056eb, 0x3ea5e34dceb129a}} #else -{0x4c1bfcd30a39f, 0x21b126ab96a61, 0x60add76bd4a7, 0x4a6a3d02240a9, 0x1f52f1a6e758} +{{0x4c1bfcd30a39f, 0x21b126ab96a61, 0x60add76bd4a7, 0x4a6a3d02240a9, 0x1f52f1a6e758}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x77a, 0x201, 0x168d, 0x8fe, 0x780, 0x1ccb, 0x52b, 0x1c83, 0x18dd, 0xcef, 0x11f5, 0x1446, 0x301, 0xb63, 0xe3f, 0x1b72, 0x1, 0x1da9, 0x1281, 0x8} +{{0x77a, 0x201, 0x168d, 0x8fe, 0x780, 0x1ccb, 0x52b, 0x1c83, 0x18dd, 0xcef, 0x11f5, 0x1446, 0x301, 0xb63, 0xe3f, 0x1b72, 0x1, 0x1da9, 0x1281, 0x8}} #elif RADIX == 32 -{0x8804ef5, 0x47f5a3, 0x57ccb3c, 0x3779065, 0x8fab3bf, 0x6301a23, 0x1c9c7eb6, 0xd480076, 0x3940f} +{{0x8804ef5, 0x47f5a3, 0x57ccb3c, 0x3779065, 0x8fab3bf, 0x6301a23, 0x1c9c7eb6, 0xd480076, 0x3940f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x678047f5a3440277, 0x59df8dde4194af99, 0x38fd6c6301a2347d, 0x364a07b52001db9} +{{0x678047f5a3440277, 0x59df8dde4194af99, 0x38fd6c6301a2347d, 0x364a07b52001db9}} #else -{0x23fad1a2013b, 0x5e4194af99678, 0x34468fab3bf1b, 0x76e4e3f5b18c0, 0x432503da9000} +{{0x23fad1a2013b, 0x5e4194af99678, 0x34468fab3bf1b, 0x76e4e3f5b18c0, 0x432503da9000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1, 0xb39, 0x969, 0x1324, 0xbe6, 0x86e, 0x1021, 0x29a, 0x1ff0, 0xd23, 0x7d5, 0x72a, 0x1e33, 0x1fd9, 0x10af, 0x15bc, 0x1d56, 0x928, 0x1d49, 0x0} +{{0x1, 0xb39, 0x969, 0x1324, 0xbe6, 0x86e, 0x1021, 0x29a, 0x1ff0, 0xd23, 0x7d5, 0x72a, 0x1e33, 0x1fd9, 0x10af, 0x15bc, 0x1d56, 0x928, 0x1d49, 0x0}} #elif RADIX == 32 -{0xace4002, 0x699225a, 0x4286e5f, 0x1fc05350, 0x3eab48f, 0x13e33395, 0xf215ffd, 0x94755ab, 0xea4a} +{{0xace4002, 0x699225a, 0x4286e5f, 0x1fc05350, 0x3eab48f, 0x13e33395, 0xf215ffd, 0x94755ab, 0xea4a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xcbe699225a567200, 0x5a47ff014d40850d, 0x42bffb3e333951f5, 0x57525251d56ade} +{{0xcbe699225a567200, 0x5a47ff014d40850d, 0x42bffb3e333951f5, 0x57525251d56ade}} #else -{0x34c912d2b3900, 0x14d40850dcbe, 0x672a3eab48ffe, 0x2b790affecf8c, 0x2ba92928eab} +{{0x34c912d2b3900, 0x14d40850dcbe, 0x672a3eab48ffe, 0x2b790affecf8c, 0x2ba92928eab}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -956,261 +956,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x13f5, 0x8b7, 0x1873, 0x144a, 0x1a89, 0x13f9, 0xd49, 0x6f2, 0x3bb, 0x102, 0x1ecb, 0x180b, 0x4d5, 0x608, 0x119, 0x5e6, 0x751, 0x961, 0xd37, 0x5} +{{0x13f5, 0x8b7, 0x1873, 0x144a, 0x1a89, 0x13f9, 0xd49, 0x6f2, 0x3bb, 0x102, 0x1ecb, 0x180b, 0x4d5, 0x608, 0x119, 0x5e6, 0x751, 0x961, 0xd37, 0x5}} #elif RADIX == 32 -{0x1a2de7eb, 0x9a2561c, 0x933f9d4, 0xeecde4d, 0x1f658408, 0x104d5c05, 0x19823260, 0xb09d44b, 0x69ba} +{{0x1a2de7eb, 0x9a2561c, 0x933f9d4, 0xeecde4d, 0x1f658408, 0x104d5c05, 0x19823260, 0xb09d44b, 0x69ba}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3a89a2561cd16f3f, 0xc2043bb37935267f, 0x464c104d5c05fb2, 0x1bb4dd2c27512f3} +{{0x3a89a2561cd16f3f, 0xc2043bb37935267f, 0x464c104d5c05fb2, 0x1bb4dd2c27512f3}} #else -{0x4d12b0e68b79f, 0x337935267f3a8, 0x380bf65840877, 0x4bcc119304135, 0x35da6e9613a8} +{{0x4d12b0e68b79f, 0x337935267f3a8, 0x380bf65840877, 0x4bcc119304135, 0x35da6e9613a8}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1e96, 0x1a2d, 0x161c, 0xd12, 0xea2, 0xcfe, 0x1352, 0x19bc, 0x10ee, 0x1840, 0x1fb2, 0xe02, 0x135, 0x982, 0x1046, 0x979, 0x9d4, 0x1a58, 0x1b4d, 0x9} +{{0x1e96, 0x1a2d, 0x161c, 0xd12, 0xea2, 0xcfe, 0x1352, 0x19bc, 0x10ee, 0x1840, 0x1fb2, 0xe02, 0x135, 0x982, 0x1046, 0x979, 0x9d4, 0x1a58, 0x1b4d, 0x9}} #elif RADIX == 32 -{0x68b7d2d, 0x2689587, 0xa4cfe75, 0x3bb3793, 0xfd96102, 0x4135701, 0x1e608c98, 0x12c27512, 0x4da6e} +{{0x68b7d2d, 0x2689587, 0xa4cfe75, 0x3bb3793, 0xfd96102, 0x4135701, 0x1e608c98, 0x12c27512, 0x4da6e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xcea2689587345be9, 0xb0810eecde4d499f, 0xc1193041357017ec, 0x22ed374b09d44bc} +{{0xcea2689587345be9, 0xb0810eecde4d499f, 0xc1193041357017ec, 0x22ed374b09d44bc}} #else -{0x1344ac39a2df4, 0x6cde4d499fcea, 0x2e02fd961021d, 0x12f30464c104d, 0x39769ba584ea} +{{0x1344ac39a2df4, 0x6cde4d499fcea, 0x2e02fd961021d, 0x12f30464c104d, 0x39769ba584ea}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xa82, 0x1d2d, 0x15b8, 0x404, 0x1a32, 0xaf9, 0xa86, 0xddf, 0x14bf, 0x100c, 0xc42, 0xa89, 0x1df, 0x82f, 0x1f07, 0x782, 0x664, 0x1ba5, 0x5d7, 0x2} +{{0xa82, 0x1d2d, 0x15b8, 0x404, 0x1a32, 0xaf9, 0xa86, 0xddf, 0x14bf, 0x100c, 0xc42, 0xa89, 0x1df, 0x82f, 0x1f07, 0x782, 0x664, 0x1ba5, 0x5d7, 0x2}} #elif RADIX == 32 -{0x74b5504, 0x1220256e, 0x10caf9d1, 0x12fdbbea, 0x16214032, 0x1e1df544, 0xbe0e82, 0x1d29990f, 0x22ebe} +{{0x74b5504, 0x1220256e, 0x10caf9d1, 0x12fdbbea, 0x16214032, 0x1e1df544, 0xbe0e82, 0x1d29990f, 0x22ebe}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3a3220256e3a5aa8, 0xa0194bf6efaa195f, 0x7c1d05e1df544b10, 0xb175f74a6643c1} +{{0x3a3220256e3a5aa8, 0xa0194bf6efaa195f, 0x7c1d05e1df544b10, 0xb175f74a6643c1}} #else -{0x11012b71d2d54, 0x76efaa195f3a3, 0x6a89621403297, 0xf05f07417877, 0x58bafba5332} +{{0x11012b71d2d54, 0x76efaa195f3a3, 0x6a89621403297, 0xf05f07417877, 0x58bafba5332}} #endif #endif , #if 0 #elif RADIX == 16 -{0x5a1, 0x46a, 0x17ab, 0x1cfa, 0x547, 0x1b9c, 0xda5, 0x141e, 0x216, 0x1f49, 0xaca, 0x15a1, 0xfe0, 0x1afb, 0x1a47, 0x133d, 0x1887, 0x590, 0xbc2, 0x1} +{{0x5a1, 0x46a, 0x17ab, 0x1cfa, 0x547, 0x1b9c, 0xda5, 0x141e, 0x216, 0x1f49, 0xaca, 0x15a1, 0xfe0, 0x1afb, 0x1a47, 0x133d, 0x1887, 0x590, 0xbc2, 0x1}} #elif RADIX == 32 -{0x191a8b42, 0x7e7d5ea, 0x14bb9c2a, 0x85a83cd, 0x15657d24, 0x16fe0ad0, 0xf748faf, 0xc8621e6, 0x15e11} +{{0x191a8b42, 0x7e7d5ea, 0x14bb9c2a, 0x85a83cd, 0x15657d24, 0x16fe0ad0, 0xf748faf, 0xc8621e6, 0x15e11}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8547e7d5eac8d45a, 0xbe92216a0f369773, 0xe91f5f6fe0ad0ab2, 0x5af08b2188799e} +{{0x8547e7d5eac8d45a, 0xbe92216a0f369773, 0xe91f5f6fe0ad0ab2, 0x5af08b2188799e}} #else -{0x3f3eaf5646a2d, 0x6a0f369773854, 0x15a15657d2442, 0x667ba47d7dbf8, 0x2d784590c43} +{{0x3f3eaf5646a2d, 0x6a0f369773854, 0x15a15657d2442, 0x667ba47d7dbf8, 0x2d784590c43}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1311, 0x910, 0x413, 0x1d16, 0x14f7, 0x19c9, 0x14d3, 0x1504, 0x776, 0x1c2c, 0x15b0, 0xc6e, 0x36b, 0x1777, 0x1ed2, 0xb34, 0x1281, 0x1281, 0xd0f, 0x4} +{{0x1311, 0x910, 0x413, 0x1d16, 0x14f7, 0x19c9, 0x14d3, 0x1504, 0x776, 0x1c2c, 0x15b0, 0xc6e, 0x36b, 0x1777, 0x1ed2, 0xb34, 0x1281, 0x1281, 0xd0f, 0x4}} #elif RADIX == 32 -{0x1a442622, 0x17e8b104, 0x1a79c9a7, 0x1ddaa094, 0xad870b0, 0xe36b637, 0xd3da577, 0x140ca056, 0x4687c} +{{0x1a442622, 0x17e8b104, 0x1a79c9a7, 0x1ddaa094, 0xad870b0, 0xe36b637, 0xd3da577, 0x140ca056, 0x4687c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x34f7e8b104d22131, 0x3858776a82534f39, 0x7b4aeee36b63756c, 0x7343e50328159a} +{{0x34f7e8b104d22131, 0x3858776a82534f39, 0x7b4aeee36b63756c, 0x7343e50328159a}} #else -{0x3f45882691098, 0x6a82534f3934f, 0x6c6ead870b0ee, 0x5669ed2bbb8da, 0x2b9a1f281940} +{{0x3f45882691098, 0x6a82534f3934f, 0x6c6ead870b0ee, 0x5669ed2bbb8da, 0x2b9a1f281940}} #endif #endif , #if 0 #elif RADIX == 16 -{0x12d2, 0x6d8, 0x1e2c, 0x6f9, 0x5e8, 0x4e5, 0x32c, 0x58d, 0x1bda, 0x16f9, 0x8b5, 0x3c0, 0x10c, 0xb18, 0x450, 0x834, 0x3b7, 0x8d7, 0x15bf, 0x0} +{{0x12d2, 0x6d8, 0x1e2c, 0x6f9, 0x5e8, 0x4e5, 0x32c, 0x58d, 0x1bda, 0x16f9, 0x8b5, 0x3c0, 0x10c, 0xb18, 0x450, 0x834, 0x3b7, 0x8d7, 0x15bf, 0x0}} #elif RADIX == 32 -{0x1b625a4, 0x837cf8b, 0x584e52f, 0xf68b1a3, 0x45adbe7, 0x1010c1e0, 0xd08a0b1, 0x6b8edd0, 0xadfa} +{{0x1b625a4, 0x837cf8b, 0x584e52f, 0xf68b1a3, 0x45adbe7, 0x1010c1e0, 0xd08a0b1, 0x6b8edd0, 0xadfa}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa5e837cf8b0db12d, 0x6df3bda2c68cb09c, 0x114163010c1e022d, 0xa56fd1ae3b741a} +{{0xa5e837cf8b0db12d, 0x6df3bda2c68cb09c, 0x114163010c1e022d, 0xa56fd1ae3b741a}} #else -{0x41be7c586d896, 0x22c68cb09ca5e, 0x3c045adbe77b, 0x506845058c043, 0x2d2b7e8d71db} +{{0x41be7c586d896, 0x22c68cb09ca5e, 0x3c045adbe77b, 0x506845058c043, 0x2d2b7e8d71db}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x5f, 0x444, 0x49e, 0xae7, 0x248, 0x1a37, 0x9b6, 0xc28, 0x464, 0x19b7, 0x1560, 0xd7a, 0x2e3, 0x81a, 0x6f5, 0x5f9, 0x1818, 0x164c, 0x1713, 0x7} +{{0x5f, 0x444, 0x49e, 0xae7, 0x248, 0x1a37, 0x9b6, 0xc28, 0x464, 0x19b7, 0x1560, 0xd7a, 0x2e3, 0x81a, 0x6f5, 0x5f9, 0x1818, 0x164c, 0x1713, 0x7}} #elif RADIX == 32 -{0x111100bf, 0x8573927, 0x16da3712, 0x11918509, 0xab066dc, 0x142e36bd, 0x1e4dea81, 0x1266060b, 0x2b89d} +{{0x111100bf, 0x8573927, 0x16da3712, 0x11918509, 0xab066dc, 0x142e36bd, 0x1e4dea81, 0x1266060b, 0x2b89d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe248573927888805, 0x336e46461426db46, 0x9bd50342e36bd558, 0x4edc4ec998182fc} +{{0xe248573927888805, 0x336e46461426db46, 0x9bd50342e36bd558, 0x4edc4ec998182fc}} #else -{0x42b9c93c44402, 0x461426db46e24, 0x6d7aab066dc8c, 0xbf26f540d0b8, 0x4f6e2764cc0c} +{{0x42b9c93c44402, 0x461426db46e24, 0x6d7aab066dc8c, 0xbf26f540d0b8, 0x4f6e2764cc0c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x19b1, 0x1912, 0x1eb, 0x1cbc, 0x210, 0x17cf, 0x1b9e, 0x754, 0x38c, 0x816, 0x1431, 0x79a, 0xa57, 0x15ff, 0x756, 0xa60, 0x1064, 0x162f, 0x1e5e, 0x0} +{{0x19b1, 0x1912, 0x1eb, 0x1cbc, 0x210, 0x17cf, 0x1b9e, 0x754, 0x38c, 0x816, 0x1431, 0x79a, 0xa57, 0x15ff, 0x756, 0xa60, 0x1064, 0x162f, 0x1e5e, 0x0}} #elif RADIX == 32 -{0x1e44b362, 0x10e5e07a, 0x13d7cf10, 0xe30ea9b, 0xa18a058, 0x1ea573cd, 0x180ead5f, 0x117c1914, 0xf2f5} +{{0x1e44b362, 0x10e5e07a, 0x13d7cf10, 0xe30ea9b, 0xa18a058, 0x1ea573cd, 0x180ead5f, 0x117c1914, 0xf2f5}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe210e5e07af2259b, 0x502c38c3aa6e7af9, 0x1d5abfea573cd50c, 0x5797ac5f064530} +{{0xe210e5e07af2259b, 0x502c38c3aa6e7af9, 0x1d5abfea573cd50c, 0x5797ac5f064530}} #else -{0x72f03d7912cd, 0x43aa6e7af9e21, 0x679aa18a05871, 0x14c0756affa95, 0x2abcbd62f832} +{{0x72f03d7912cd, 0x43aa6e7af9e21, 0x679aa18a05871, 0x14c0756affa95, 0x2abcbd62f832}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1432,261 +1432,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0xa72, 0x186f, 0x81c, 0x105c, 0x151, 0x1451, 0x1b96, 0x4d1, 0x12be, 0x3bf, 0x996, 0x1130, 0x1460, 0x1ed8, 0xc2a, 0x1c23, 0x1ee, 0x3f4, 0xbe2, 0x9} +{{0xa72, 0x186f, 0x81c, 0x105c, 0x151, 0x1451, 0x1b96, 0x4d1, 0x12be, 0x3bf, 0x996, 0x1130, 0x1460, 0x1ed8, 0xc2a, 0x1c23, 0x1ee, 0x3f4, 0xbe2, 0x9}} #elif RADIX == 32 -{0x61bd4e5, 0x1182e207, 0x12d4510a, 0xaf89a3b, 0x4cb0efe, 0x11460898, 0x8d855ed, 0x1fa07bb8, 0x45f10} +{{0x61bd4e5, 0x1182e207, 0x12d4510a, 0xaf89a3b, 0x4cb0efe, 0x11460898, 0x8d855ed, 0x1fa07bb8, 0x45f10}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x215182e20730dea7, 0x877f2be268ee5a8a, 0xb0abdb1460898265, 0xeaf887e81eee11} +{{0x215182e20730dea7, 0x877f2be268ee5a8a, 0xb0abdb1460898265, 0xeaf887e81eee11}} #else -{0xc17103986f53, 0x6268ee5a8a215, 0x11304cb0efe57, 0x3846c2af6c518, 0x2f57c43f40f7} +{{0xc17103986f53, 0x6268ee5a8a215, 0x11304cb0efe57, 0x3846c2af6c518, 0x2f57c43f40f7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1c36, 0x61b, 0x207, 0xc17, 0x854, 0x1514, 0xee5, 0x1134, 0x1caf, 0x10ef, 0x265, 0x44c, 0x518, 0x17b6, 0x1b0a, 0x1708, 0x7b, 0x10fd, 0xaf8, 0x3} +{{0x1c36, 0x61b, 0x207, 0xc17, 0x854, 0x1514, 0xee5, 0x1134, 0x1caf, 0x10ef, 0x265, 0x44c, 0x518, 0x17b6, 0x1b0a, 0x1708, 0x7b, 0x10fd, 0xaf8, 0x3}} #elif RADIX == 32 -{0x1986f86c, 0x1460b881, 0x1cb51442, 0x12be268e, 0x132c3bf, 0xc518226, 0x236157b, 0x7e81eee, 0x357c4} +{{0x1986f86c, 0x1460b881, 0x1cb51442, 0x12be268e, 0x132c3bf, 0xc518226, 0x236157b, 0x7e81eee, 0x357c4}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x885460b881cc37c3, 0x61dfcaf89a3b96a2, 0x6c2af6c518226099, 0x1fabe21fa07bb84} +{{0x885460b881cc37c3, 0x61dfcaf89a3b96a2, 0x6c2af6c518226099, 0x1fabe21fa07bb84}} #else -{0x2305c40e61be1, 0x789a3b96a2885, 0x44c132c3bf95, 0x6e11b0abdb146, 0x37d5f10fd03d} +{{0x2305c40e61be1, 0x789a3b96a2885, 0x44c132c3bf95, 0x6e11b0abdb146, 0x37d5f10fd03d}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1c07, 0x15d6, 0x526, 0xde7, 0x149b, 0x719, 0x1786, 0x1272, 0x18b, 0x1bac, 0xf74, 0x1588, 0xe6f, 0x24c, 0x1204, 0x1e9d, 0x13bb, 0x1ccb, 0x78d, 0x9} +{{0x1c07, 0x15d6, 0x526, 0xde7, 0x149b, 0x719, 0x1786, 0x1272, 0x18b, 0x1bac, 0xf74, 0x1588, 0xe6f, 0x24c, 0x1204, 0x1e9d, 0x13bb, 0x1ccb, 0x78d, 0x9}} #elif RADIX == 32 -{0x1575b80f, 0x1b6f3949, 0x10c719a4, 0x62e4e57, 0x7ba6eb0, 0x18e6fac4, 0x7640824, 0x65ceefd, 0x43c6f} +{{0x1575b80f, 0x1b6f3949, 0x10c719a4, 0x62e4e57, 0x7ba6eb0, 0x18e6fac4, 0x7640824, 0x65ceefd, 0x43c6f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x349b6f3949abadc0, 0x375818b9395e18e3, 0xc810498e6fac43dd, 0x279e379973bbf4e} +{{0x349b6f3949abadc0, 0x375818b9395e18e3, 0xc810498e6fac43dd, 0x279e379973bbf4e}} #else -{0x5b79ca4d5d6e0, 0x39395e18e3349, 0x75887ba6eb031, 0x7d3b20412639b, 0x13cf1bccb9dd} +{{0x5b79ca4d5d6e0, 0x39395e18e3349, 0x75887ba6eb031, 0x7d3b20412639b, 0x13cf1bccb9dd}} #endif #endif , #if 0 #elif RADIX == 16 -{0xddf, 0x238, 0xe4b, 0x1958, 0xe6e, 0x1059, 0x133, 0x1e11, 0x5ae, 0x2ab, 0x1044, 0xdd, 0xe9d, 0x1aa8, 0x15e2, 0xc9b, 0xaa6, 0x3c8, 0x10ac, 0x0} +{{0xddf, 0x238, 0xe4b, 0x1958, 0xe6e, 0x1059, 0x133, 0x1e11, 0x5ae, 0x2ab, 0x1044, 0xdd, 0xe9d, 0x1aa8, 0x15e2, 0xc9b, 0xaa6, 0x3c8, 0x10ac, 0x0}} #elif RADIX == 32 -{0x188e1bbe, 0xecac392, 0x6705973, 0x16bbc221, 0x18220aac, 0x10e9d06e, 0x6ebc5aa, 0x1e42a999, 0x8560} +{{0x188e1bbe, 0xecac392, 0x6705973, 0x16bbc221, 0x18220aac, 0x10e9d06e, 0x6ebc5aa, 0x1e42a999, 0x8560}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2e6ecac392c470dd, 0x5565aef0884ce0b, 0xd78b550e9d06ec11, 0x4b42b0790aa664d} +{{0x2e6ecac392c470dd, 0x5565aef0884ce0b, 0xd78b550e9d06ec11, 0x4b42b0790aa664d}} #else -{0x76561c962386e, 0x6f0884ce0b2e6, 0x20dd8220aacb5, 0x19375e2d543a7, 0x4da1583c8553} +{{0x76561c962386e, 0x6f0884ce0b2e6, 0x20dd8220aacb5, 0x19375e2d543a7, 0x4da1583c8553}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x192, 0x1c6d, 0x18a4, 0x152, 0x1aa9, 0xec4, 0x1be8, 0x1209, 0x7f, 0x797, 0x1295, 0x1433, 0x1a75, 0x15a, 0x1d64, 0x146c, 0x12df, 0x10af, 0x188f, 0x1} +{{0x192, 0x1c6d, 0x18a4, 0x152, 0x1aa9, 0xec4, 0x1be8, 0x1209, 0x7f, 0x797, 0x1295, 0x1433, 0x1a75, 0x15a, 0x1d64, 0x146c, 0x12df, 0x10af, 0x188f, 0x1}} #elif RADIX == 32 -{0x71b4324, 0x90a9629, 0x1d0ec4d5, 0x1fe413b, 0x194a9e5c, 0x15a75a19, 0x1b3ac815, 0x57cb7e8, 0x1c47c} +{{0x71b4324, 0x90a9629, 0x1d0ec4d5, 0x1fe413b, 0x194a9e5c, 0x15a75a19, 0x1b3ac815, 0x57cb7e8, 0x1c47c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9aa90a962938da19, 0x4f2e07f904efa1d8, 0x75902b5a75a19ca5, 0xae23e15f2dfa36} +{{0x9aa90a962938da19, 0x4f2e07f904efa1d8, 0x75902b5a75a19ca5, 0xae23e15f2dfa36}} #else -{0x4854b149c6d0c, 0x7904efa1d89aa, 0x343394a9e5c0f, 0x68d9d640ad69d, 0x2d711f0af96f} +{{0x4854b149c6d0c, 0x7904efa1d89aa, 0x343394a9e5c0f, 0x68d9d640ad69d, 0x2d711f0af96f}} #endif #endif , #if 0 #elif RADIX == 16 -{0x129c, 0xe1d, 0x1bd3, 0xf2a, 0x937, 0xf81, 0xa47, 0x186b, 0x1bbe, 0x1c6d, 0x1edd, 0x1b51, 0xa10, 0x167a, 0x1f0b, 0x374, 0x720, 0x1547, 0x726, 0x1} +{{0x129c, 0xe1d, 0x1bd3, 0xf2a, 0x937, 0xf81, 0xa47, 0x186b, 0x1bbe, 0x1c6d, 0x1edd, 0x1b51, 0xa10, 0x167a, 0x1f0b, 0x374, 0x720, 0x1547, 0x726, 0x1}} #elif RADIX == 32 -{0x1b876538, 0x177956f4, 0x8ef8149, 0xefb0d6a, 0x1f6ef1b7, 0x14a10da8, 0x1d3e1767, 0xa39c806, 0x13935} +{{0x1b876538, 0x177956f4, 0x8ef8149, 0xefb0d6a, 0x1f6ef1b7, 0x14a10da8, 0x1d3e1767, 0xa39c806, 0x13935}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x29377956f4dc3b29, 0x78dbbbec35a91df0, 0x7c2ecf4a10da8fb7, 0x3c9c9aa8e7201ba} +{{0x29377956f4dc3b29, 0x78dbbbec35a91df0, 0x7c2ecf4a10da8fb7, 0x3c9c9aa8e7201ba}} #else -{0x3bcab7a6e1d94, 0x6c35a91df0293, 0x1b51f6ef1b777, 0x6e9f0bb3d284, 0x464e4d547390} +{{0x3bcab7a6e1d94, 0x6c35a91df0293, 0x1b51f6ef1b777, 0x6e9f0bb3d284, 0x464e4d547390}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x12cc, 0x495, 0x1a14, 0x1db0, 0xb66, 0x76a, 0x1a77, 0xaf6, 0x1656, 0x1ad7, 0xb35, 0x4b1, 0xffa, 0x37b, 0xabf, 0xa5c, 0xdc9, 0x1a74, 0x11c9, 0x8} +{{0x12cc, 0x495, 0x1a14, 0x1db0, 0xb66, 0x76a, 0x1a77, 0xaf6, 0x1656, 0x1ad7, 0xb35, 0x4b1, 0xffa, 0x37b, 0xabf, 0xa5c, 0xdc9, 0x1a74, 0x11c9, 0x8}} #elif RADIX == 32 -{0x1256599, 0x6ed8685, 0xee76a5b, 0x19595eda, 0x159aeb5e, 0x16ffa258, 0x17157e37, 0x13a37254, 0x38e4e} +{{0x1256599, 0x6ed8685, 0xee76a5b, 0x19595eda, 0x159aeb5e, 0x16ffa258, 0x17157e37, 0x13a37254, 0x38e4e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x4b66ed8685092b2c, 0x75af65657b69dced, 0x2afc6f6ffa258acd, 0x4047274e8dc952e} +{{0x4b66ed8685092b2c, 0x75af65657b69dced, 0x2afc6f6ffa258acd, 0x4047274e8dc952e}} #else -{0x376c342849596, 0x657b69dced4b6, 0x44b159aeb5eca, 0x54b8abf1bdbfe, 0x202393a746e4} +{{0x376c342849596, 0x657b69dced4b6, 0x44b159aeb5eca, 0x54b8abf1bdbfe, 0x202393a746e4}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1379, 0x125e, 0x1c56, 0x1811, 0x144, 0x2a8, 0xbb3, 0x2ca, 0x6d2, 0x565, 0x91e, 0x1280, 0x1b4f, 0x51a, 0x1eb7, 0x35a, 0x14fe, 0x1b59, 0x182e, 0x2} +{{0x1379, 0x125e, 0x1c56, 0x1811, 0x144, 0x2a8, 0xbb3, 0x2ca, 0x6d2, 0x565, 0x91e, 0x1280, 0x1b4f, 0x51a, 0x1eb7, 0x35a, 0x14fe, 0x1b59, 0x182e, 0x2}} #elif RADIX == 32 -{0x1497a6f2, 0x4c08f15, 0x1662a80a, 0x1b48594b, 0x48f1594, 0x15b4f940, 0x16bd6e51, 0x1acd3f86, 0x2c176} +{{0x1497a6f2, 0x4c08f15, 0x1662a80a, 0x1b48594b, 0x48f1594, 0x15b4f940, 0x16bd6e51, 0x1acd3f86, 0x2c176}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x144c08f15a4bd37, 0x8aca6d21652ecc55, 0x7adca35b4f940247, 0x2e60bb6b34fe1ad} +{{0x144c08f15a4bd37, 0x8aca6d21652ecc55, 0x7adca35b4f940247, 0x2e60bb6b34fe1ad}} #else -{0x260478ad25e9b, 0x21652ecc55014, 0x728048f1594da, 0x6b5eb728d6d3, 0x3f305db59a7f} +{{0x260478ad25e9b, 0x21652ecc55014, 0x728048f1594da, 0x6b5eb728d6d3, 0x3f305db59a7f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1908,261 +1908,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x102e, 0x4c4, 0x1586, 0x1184, 0xa12, 0x9ce, 0x1866, 0x433, 0x5ef, 0x82a, 0x13a5, 0xbc6, 0x591, 0x175b, 0x10bc, 0xfa5, 0x109d, 0x8d4, 0x1325, 0x9} +{{0x102e, 0x4c4, 0x1586, 0x1184, 0xa12, 0x9ce, 0x1866, 0x433, 0x5ef, 0x82a, 0x13a5, 0xbc6, 0x591, 0x175b, 0x10bc, 0xfa5, 0x109d, 0x8d4, 0x1325, 0x9}} #elif RADIX == 32 -{0x1131205d, 0x128c2561, 0xcc9ce50, 0x17bc8678, 0x9d2a0a8, 0x165915e3, 0x9617975, 0x6a4275f, 0x4992a} +{{0x1131205d, 0x128c2561, 0xcc9ce50, 0x17bc8678, 0x9d2a0a8, 0x165915e3, 0x9617975, 0x6a4275f, 0x4992a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xca128c2561898902, 0x50545ef219e19939, 0xc2f2eb65915e34e9, 0x4acc951a909d7d2} +{{0xca128c2561898902, 0x50545ef219e19939, 0xc2f2eb65915e34e9, 0x4acc951a909d7d2}} #else -{0x14612b0c4c481, 0x7219e19939ca1, 0x2bc69d2a0a8bd, 0x5f4b0bcbad964, 0x25664a8d484e} +{{0x14612b0c4c481, 0x7219e19939ca1, 0x2bc69d2a0a8bd, 0x5f4b0bcbad964, 0x25664a8d484e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x5a5, 0x1131, 0x561, 0x1461, 0x1284, 0x1273, 0x1e19, 0x190c, 0x117b, 0xa0a, 0x14e9, 0xaf1, 0x1964, 0x5d6, 0xc2f, 0xbe9, 0x427, 0xa35, 0xcc9, 0x3} +{{0x5a5, 0x1131, 0x561, 0x1461, 0x1284, 0x1273, 0x1e19, 0x190c, 0x117b, 0xa0a, 0x14e9, 0xaf1, 0x1964, 0x5d6, 0xc2f, 0xbe9, 0x427, 0xa35, 0xcc9, 0x3}} #elif RADIX == 32 -{0xc4c4b4a, 0x4a30958, 0x3327394, 0x5ef219e, 0x1a74a82a, 0xd964578, 0x1a585e5d, 0x11a909d7, 0x3664a} +{{0xc4c4b4a, 0x4a30958, 0x3327394, 0x5ef219e, 0x1a74a82a, 0xd964578, 0x1a585e5d, 0x11a909d7, 0x3664a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x7284a3095862625a, 0x541517bc8678664e, 0xb0bcbad964578d3a, 0x1ab32546a4275f4} +{{0x7284a3095862625a, 0x541517bc8678664e, 0xb0bcbad964578d3a, 0x1ab32546a4275f4}} #else -{0x25184ac31312d, 0x3c8678664e728, 0xaf1a74a82a2f, 0x57d2c2f2eb659, 0xd5992a35213} +{{0x25184ac31312d, 0x3c8678664e728, 0xaf1a74a82a2f, 0x57d2c2f2eb659, 0xd5992a35213}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1b4a, 0xf6a, 0xadd, 0x302, 0x196b, 0x366, 0x1399, 0xe83, 0x1540, 0xcd, 0x169d, 0x1007, 0xfe6, 0x1fd2, 0xebb, 0x808, 0x1725, 0x1c1e, 0x1009, 0x8} +{{0x1b4a, 0xf6a, 0xadd, 0x302, 0x196b, 0x366, 0x1399, 0xe83, 0x1540, 0xcd, 0x169d, 0x1007, 0xfe6, 0x1fd2, 0xebb, 0x808, 0x1725, 0x1c1e, 0x1009, 0x8}} #elif RADIX == 32 -{0xbdab695, 0xb1812b7, 0x132366cb, 0x1501d073, 0x1b4e8336, 0x4fe6803, 0x21d77fd, 0xf5c950, 0x3804f} +{{0xbdab695, 0xb1812b7, 0x132366cb, 0x1501d073, 0x1b4e8336, 0x4fe6803, 0x21d77fd, 0xf5c950, 0x3804f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd96b1812b75ed5b4, 0x419b540741ce646c, 0x3aeffa4fe6803da7, 0x36402783d725404} +{{0xd96b1812b75ed5b4, 0x419b540741ce646c, 0x3aeffa4fe6803da7, 0x36402783d725404}} #else -{0x58c095baf6ada, 0x741ce646cd96, 0x5007b4e8336a8, 0x5010ebbfe93f9, 0x1b2013c1eb92} +{{0x58c095baf6ada, 0x741ce646cd96, 0x5007b4e8336a8, 0x5010ebbfe93f9, 0x1b2013c1eb92}} #endif #endif , #if 0 #elif RADIX == 16 -{0x122a, 0x94e, 0x1927, 0x1701, 0x58e, 0x79, 0x134e, 0xecc, 0xa0f, 0x7be, 0xc39, 0xfb2, 0x1df0, 0x79a, 0x154a, 0x1a4a, 0x23f, 0x3de, 0x1be1, 0x9} +{{0x122a, 0x94e, 0x1927, 0x1701, 0x58e, 0x79, 0x134e, 0xecc, 0xa0f, 0x7be, 0xc39, 0xfb2, 0x1df0, 0x79a, 0x154a, 0x1a4a, 0x23f, 0x3de, 0x1be1, 0x9}} #elif RADIX == 32 -{0x1a53a455, 0xeb80e49, 0x9c0792c, 0x83dd993, 0x61c9ef9, 0x15df07d9, 0x12aa9479, 0x1ef08ff4, 0x4df08} +{{0x1a53a455, 0xeb80e49, 0x9c0792c, 0x83dd993, 0x61c9ef9, 0x15df07d9, 0x12aa9479, 0x1ef08ff4, 0x4df08}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x258eb80e49d29d22, 0x4f7ca0f7664d380f, 0x5528f35df07d930e, 0x36ef847bc23fd25} +{{0x258eb80e49d29d22, 0x4f7ca0f7664d380f, 0x5528f35df07d930e, 0x36ef847bc23fd25}} #else -{0x75c0724e94e91, 0x77664d380f258, 0xfb261c9ef941, 0x749554a3cd77c, 0x1b77c23de11f} +{{0x75c0724e94e91, 0x77664d380f258, 0xfb261c9ef941, 0x749554a3cd77c, 0x1b77c23de11f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1943, 0x2e1, 0x677, 0x614, 0x19e, 0x11e6, 0xde2, 0x104d, 0x551, 0x1455, 0x1d7e, 0xdd, 0x15e0, 0x14c5, 0xeeb, 0x14b5, 0x168f, 0x1a03, 0xa9d, 0x4} +{{0x1943, 0x2e1, 0x677, 0x614, 0x19e, 0x11e6, 0xde2, 0x104d, 0x551, 0x1455, 0x1d7e, 0xdd, 0x15e0, 0x14c5, 0xeeb, 0x14b5, 0x168f, 0x1a03, 0xa9d, 0x4}} #elif RADIX == 32 -{0x18b87286, 0x1e30a19d, 0x1c51e60c, 0x154609ad, 0x1ebf5154, 0xb5e006e, 0xd5dd74c, 0x101da3e9, 0x454ee} +{{0x18b87286, 0x1e30a19d, 0x1c51e60c, 0x154609ad, 0x1ebf5154, 0xb5e006e, 0xd5dd74c, 0x101da3e9, 0x454ee}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc19e30a19dc5c394, 0xa8aa551826b78a3c, 0xbbae98b5e006ef5f, 0x112a7740768fa5a} +{{0xc19e30a19dc5c394, 0xa8aa551826b78a3c, 0xbbae98b5e006ef5f, 0x112a7740768fa5a}} #else -{0x71850cee2e1ca, 0x1826b78a3cc19, 0xddebf5154aa, 0x696aeeba62d78, 0x8953ba03b47} +{{0x71850cee2e1ca, 0x1826b78a3cc19, 0xddebf5154aa, 0x696aeeba62d78, 0x8953ba03b47}} #endif #endif , #if 0 #elif RADIX == 16 -{0x512, 0xda9, 0x31a, 0x1711, 0x1b65, 0x9f0, 0xe54, 0x1d4a, 0xe1c, 0xc90, 0x1837, 0x1728, 0x15fa, 0xa40, 0xf21, 0x1b43, 0x1716, 0x1277, 0x11a8, 0x9} +{{0x512, 0xda9, 0x31a, 0x1711, 0x1b65, 0x9f0, 0xe54, 0x1d4a, 0xe1c, 0xc90, 0x1837, 0x1728, 0x15fa, 0xa40, 0xf21, 0x1b43, 0x1716, 0x1277, 0x11a8, 0x9}} #elif RADIX == 32 -{0x136a4a25, 0x5b888c6, 0xa89f0db, 0x1873a94e, 0xc1bb241, 0x15fab94, 0x10de42a4, 0x13bdc5b6, 0x48d44} +{{0x136a4a25, 0x5b888c6, 0xa89f0db, 0x1873a94e, 0xc1bb241, 0x15fab94, 0x10de42a4, 0x13bdc5b6, 0x48d44}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1b65b888c69b5251, 0xd920e1cea539513e, 0xbc854815fab9460d, 0xec6a24ef716da1} +{{0x1b65b888c69b5251, 0xd920e1cea539513e, 0xbc854815fab9460d, 0xec6a24ef716da1}} #else -{0x2dc44634da928, 0x4ea539513e1b6, 0x5728c1bb241c3, 0x3686f2152057e, 0x2f6351277b8b} +{{0x2dc44634da928, 0x4ea539513e1b6, 0x5728c1bb241c3, 0x3686f2152057e, 0x2f6351277b8b}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x822, 0x1a13, 0x11d, 0x10e0, 0x2b9, 0x1d20, 0x19f9, 0x1dc2, 0x1770, 0x135e, 0x1c13, 0x1cba, 0x14df, 0x5c8, 0x1f31, 0x215, 0x16ed, 0x1f7a, 0xc6c, 0x5} +{{0x822, 0x1a13, 0x11d, 0x10e0, 0x2b9, 0x1d20, 0x19f9, 0x1dc2, 0x1770, 0x135e, 0x1c13, 0x1cba, 0x14df, 0x5c8, 0x1f31, 0x215, 0x16ed, 0x1f7a, 0xc6c, 0x5}} #elif RADIX == 32 -{0xe84d045, 0x19870047, 0x1f3d2015, 0x1dc3b859, 0xe09cd7a, 0x114dfe5d, 0x57e625c, 0x1bd5bb44, 0x6367} +{{0xe84d045, 0x19870047, 0x1f3d2015, 0x1dc3b859, 0xe09cd7a, 0x114dfe5d, 0x57e625c, 0x1bd5bb44, 0x6367}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2b9870047742682, 0xe6bd770ee167e7a4, 0xfcc4b914dfe5d704, 0xcb1b3ef56ed10a} +{{0x2b9870047742682, 0xe6bd770ee167e7a4, 0xfcc4b914dfe5d704, 0xcb1b3ef56ed10a}} #else -{0x4c38023ba1341, 0xee167e7a402b, 0x7cbae09cd7aee, 0x442bf312e4537, 0x658d9f7ab76} +{{0x4c38023ba1341, 0xee167e7a402b, 0x7cbae09cd7aee, 0x442bf312e4537, 0x658d9f7ab76}} #endif #endif , #if 0 #elif RADIX == 16 -{0x2cc, 0xd50, 0xeda, 0x1c3c, 0x8a6, 0x1659, 0xffb, 0x1cee, 0x1f14, 0x17fe, 0x1860, 0x427, 0x132c, 0x5c0, 0xb9f, 0x143d, 0x639, 0x19f0, 0x1551, 0x7} +{{0x2cc, 0xd50, 0xeda, 0x1c3c, 0x8a6, 0x1659, 0xffb, 0x1cee, 0x1f14, 0x17fe, 0x1860, 0x427, 0x132c, 0x5c0, 0xb9f, 0x143d, 0x639, 0x19f0, 0x1551, 0x7}} #elif RADIX == 32 -{0x13540599, 0x6e1e3b6, 0x1f765945, 0x1c539dcf, 0x1c305ffb, 0x132c213, 0xf573e5c, 0xf818e68, 0x2aa8e} +{{0x13540599, 0x6e1e3b6, 0x1f765945, 0x1c539dcf, 0x1c305ffb, 0x132c213, 0xf573e5c, 0xf818e68, 0x2aa8e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x28a6e1e3b69aa02c, 0x2ffdf14e773feecb, 0xae7cb8132c213e18, 0x3fd5473e0639a1e} +{{0x28a6e1e3b69aa02c, 0x2ffdf14e773feecb, 0xae7cb8132c213e18, 0x3fd5473e0639a1e}} #else -{0x370f1db4d5016, 0x4e773feecb28a, 0x427c305ffbe2, 0x687ab9f2e04cb, 0x1feaa39f031c} +{{0x370f1db4d5016, 0x4e773feecb28a, 0x427c305ffbe2, 0x687ab9f2e04cb, 0x1feaa39f031c}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2384,261 +2384,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x6b9, 0xd4c, 0x1d8d, 0x1f99, 0x1be4, 0x1f53, 0x5c1, 0x1937, 0x9c, 0xe68, 0x61e, 0x14e8, 0x352, 0x3d6, 0x174, 0x133, 0x18a6, 0x1b19, 0x94b, 0x9} +{{0x6b9, 0xd4c, 0x1d8d, 0x1f99, 0x1be4, 0x1f53, 0x5c1, 0x1937, 0x9c, 0xe68, 0x61e, 0x14e8, 0x352, 0x3d6, 0x174, 0x133, 0x18a6, 0x1b19, 0x94b, 0x9}} #elif RADIX == 32 -{0xb530d73, 0x4fccf63, 0x183f53df, 0x27326e5, 0x30f39a0, 0xc352a74, 0xcc2e83d, 0x18ce2982, 0x44a5e} +{{0xb530d73, 0x4fccf63, 0x183f53df, 0x27326e5, 0x30f39a0, 0xc352a74, 0xcc2e83d, 0x18ce2982, 0x44a5e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x7be4fccf635a986b, 0x9cd009cc9b9707ea, 0x85d07ac352a74187, 0x31a52f6338a6099} +{{0x7be4fccf635a986b, 0x9cd009cc9b9707ea, 0x85d07ac352a74187, 0x31a52f6338a6099}} #else -{0x27e67b1ad4c35, 0x4c9b9707ea7be, 0x54e830f39a013, 0x2661741eb0d4, 0x40d297b19c53} +{{0x27e67b1ad4c35, 0x4c9b9707ea7be, 0x54e830f39a013, 0x2661741eb0d4, 0x40d297b19c53}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x348, 0xb53, 0xf63, 0x7e6, 0x1ef9, 0xfd4, 0x1970, 0x64d, 0x27, 0x139a, 0x187, 0x153a, 0x10d4, 0xf5, 0x185d, 0x104c, 0xe29, 0x1ec6, 0x1a52, 0x0} +{{0x348, 0xb53, 0xf63, 0x7e6, 0x1ef9, 0xfd4, 0x1970, 0x64d, 0x27, 0x139a, 0x187, 0x153a, 0x10d4, 0xf5, 0x185d, 0x104c, 0xe29, 0x1ec6, 0x1a52, 0x0}} #elif RADIX == 32 -{0x1ad4c690, 0x193f33d8, 0xe0fd4f7, 0x9cc9b9, 0xc3ce68, 0xb0d4a9d, 0x1330ba0f, 0x16338a60, 0xd297} +{{0x1ad4c690, 0x193f33d8, 0xe0fd4f7, 0x9cc9b9, 0xc3ce68, 0xb0d4a9d, 0x1330ba0f, 0x16338a60, 0xd297}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9ef93f33d8d6a634, 0xe734027326e5c1fa, 0x61741eb0d4a9d061, 0x28694bd8ce29826} +{{0x9ef93f33d8d6a634, 0xe734027326e5c1fa, 0x61741eb0d4a9d061, 0x28694bd8ce29826}} #else -{0x49f99ec6b531a, 0x7326e5c1fa9ef, 0x153a0c3ce6804, 0x609985d07ac35, 0x1434a5ec6714} +{{0x49f99ec6b531a, 0x7326e5c1fa9ef, 0x153a0c3ce6804, 0x609985d07ac35, 0x1434a5ec6714}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x18af, 0xb6e, 0x124d, 0xa49, 0xa8c, 0x11f5, 0xea9, 0x298, 0xa55, 0x1738, 0xb61, 0x2b9, 0x8a, 0x167a, 0x17e6, 0x2b0, 0x1290, 0x16ad, 0x1505, 0x2} +{{0x18af, 0xb6e, 0x124d, 0xa49, 0xa8c, 0x11f5, 0xea9, 0x298, 0xa55, 0x1738, 0xb61, 0x2b9, 0x8a, 0x167a, 0x17e6, 0x2b0, 0x1290, 0x16ad, 0x1505, 0x2}} #elif RADIX == 32 -{0xadbb15e, 0xc524c93, 0x1531f554, 0x954530e, 0x15b0dce1, 0x1408a15c, 0xc2fcd67, 0x156ca405, 0x2a82d} +{{0xadbb15e, 0xc524c93, 0x1531f554, 0x954530e, 0x15b0dce1, 0x1408a15c, 0xc2fcd67, 0x156ca405, 0x2a82d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xaa8c524c9356dd8a, 0x6e70a5514c3aa63e, 0x5f9acf408a15cad8, 0x4c5416d5b290158} +{{0xaa8c524c9356dd8a, 0x6e70a5514c3aa63e, 0x5f9acf408a15cad8, 0x4c5416d5b290158}} #else -{0x6292649ab6ec5, 0x514c3aa63eaa8, 0x42b95b0dce14a, 0x5617e6b3d022, 0x262a0b6ad948} +{{0x6292649ab6ec5, 0x514c3aa63eaa8, 0x42b95b0dce14a, 0x5617e6b3d022, 0x262a0b6ad948}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1390, 0x1895, 0x9b7, 0xa5a, 0x1030, 0x16c1, 0xd21, 0x1053, 0x327, 0x1a4c, 0x1a22, 0x11e4, 0x16ba, 0x13a1, 0x1dbc, 0x1aac, 0x148c, 0x5c8, 0x15d2, 0x0} +{{0x1390, 0x1895, 0x9b7, 0xa5a, 0x1030, 0x16c1, 0xd21, 0x1053, 0x327, 0x1a4c, 0x1a22, 0x11e4, 0x16ba, 0x13a1, 0x1dbc, 0x1aac, 0x148c, 0x5c8, 0x15d2, 0x0}} #elif RADIX == 32 -{0x1e256720, 0x1052d26d, 0x436c181, 0xc9e0a6d, 0xd116930, 0x36ba8f2, 0xb3b793a, 0xe452335, 0xae91} +{{0x1e256720, 0x1052d26d, 0x436c181, 0xc9e0a6d, 0xd116930, 0x36ba8f2, 0xb3b793a, 0xe452335, 0xae91}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x303052d26df12b39, 0xb498327829b486d8, 0x76f27436ba8f2688, 0x5748b9148cd56} +{{0x303052d26df12b39, 0xb498327829b486d8, 0x76f27436ba8f2688, 0x5748b9148cd56}} #else -{0x296936f8959c, 0x7829b486d8303, 0x51e4d11693064, 0x3559dbc9d0dae, 0x282ba45c8a46} +{{0x296936f8959c, 0x7829b486d8303, 0x51e4d11693064, 0x3559dbc9d0dae, 0x282ba45c8a46}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1be6, 0x11b3, 0x14ba, 0xf43, 0x1bd1, 0x215, 0x1e9a, 0x137a, 0x7b2, 0x15, 0x126, 0x148, 0x1c2b, 0x1b70, 0xf1c, 0x1e48, 0x1259, 0x188a, 0x1e44, 0x7} +{{0x1be6, 0x11b3, 0x14ba, 0xf43, 0x1bd1, 0x215, 0x1e9a, 0x137a, 0x7b2, 0x15, 0x126, 0x148, 0x1c2b, 0x1b70, 0xf1c, 0x1e48, 0x1259, 0x188a, 0x1e44, 0x7}} #elif RADIX == 32 -{0x146cf7cd, 0x117a1d2e, 0x134215de, 0x1eca6f5e, 0x930054, 0x1c2b0a4, 0x121e39b7, 0x454967c, 0x2f226} +{{0x146cf7cd, 0x117a1d2e, 0x134215de, 0x1eca6f5e, 0x930054, 0x1c2b0a4, 0x121e39b7, 0x454967c, 0x2f226}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbbd17a1d2ea367be, 0x802a7b29bd7a6842, 0x3c736e1c2b0a4049, 0x21f913115259f24} +{{0xbbd17a1d2ea367be, 0x802a7b29bd7a6842, 0x3c736e1c2b0a4049, 0x21f913115259f24}} #else -{0xbd0e9751b3df, 0x29bd7a6842bbd, 0x61480930054f6, 0x7c90f1cdb870a, 0x10fc8988a92c} +{{0xbd0e9751b3df, 0x29bd7a6842bbd, 0x61480930054f6, 0x7c90f1cdb870a, 0x10fc8988a92c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x4c5, 0x37e, 0xafa, 0x1b90, 0x13d, 0x8d3, 0xaa7, 0x489, 0x1d4a, 0x17bc, 0x168, 0x37f, 0x1ed6, 0x666, 0x1889, 0x1a4e, 0xa57, 0xeb7, 0xd37, 0x7} +{{0x4c5, 0x37e, 0xafa, 0x1b90, 0x13d, 0x8d3, 0xaa7, 0x489, 0x1d4a, 0x17bc, 0x168, 0x37f, 0x1ed6, 0x666, 0x1889, 0x1a4e, 0xa57, 0xeb7, 0xd37, 0x7}} #elif RADIX == 32 -{0x10df898b, 0x1ddc82be, 0x14e8d309, 0x1528912a, 0x10b45ef3, 0xded61bf, 0x13b11266, 0x15ba95f4, 0x269bb} +{{0x10df898b, 0x1ddc82be, 0x14e8d309, 0x1528912a, 0x10b45ef3, 0xded61bf, 0x13b11266, 0x15ba95f4, 0x269bb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x613ddc82be86fc4c, 0x2f79d4a244aa9d1a, 0x6224ccded61bf85a, 0x1cb4ddd6ea57d27} +{{0x613ddc82be86fc4c, 0x2f79d4a244aa9d1a, 0x6224ccded61bf85a, 0x1cb4ddd6ea57d27}} #else -{0x6ee415f437e26, 0x2244aa9d1a613, 0x437f0b45ef3a9, 0x749d8893337b5, 0xe5a6eeb752b} +{{0x6ee415f437e26, 0x2244aa9d1a613, 0x437f0b45ef3a9, 0x749d8893337b5, 0xe5a6eeb752b}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x447, 0x1b87, 0x1cf0, 0x155, 0xb1, 0x804, 0x97a, 0x64a, 0x886, 0x3a3, 0x126f, 0x1553, 0x74d, 0xde9, 0x941, 0x39c, 0x8f, 0x1bbb, 0xf3, 0x1} +{{0x447, 0x1b87, 0x1cf0, 0x155, 0xb1, 0x804, 0x97a, 0x64a, 0x886, 0x3a3, 0x126f, 0x1553, 0x74d, 0xde9, 0x941, 0x39c, 0x8f, 0x1bbb, 0xf3, 0x1}} #elif RADIX == 32 -{0x6e1c88e, 0x110aaf3c, 0xf480405, 0x218c949, 0x19378e8d, 0x1274daa9, 0x71282de, 0x1dd823c7, 0x1079e} +{{0x6e1c88e, 0x110aaf3c, 0xf480405, 0x218c949, 0x19378e8d, 0x1274daa9, 0x71282de, 0x1dd823c7, 0x1079e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x80b10aaf3c370e44, 0xc74688632525e900, 0x2505bd274daa9c9b, 0x2383cf77608f1ce} +{{0x80b10aaf3c370e44, 0xc74688632525e900, 0x2505bd274daa9c9b, 0x2383cf77608f1ce}} #else -{0x85579e1b8722, 0x632525e90080b, 0x35539378e8d10, 0x47389416f49d3, 0x11c1e7bbb047} +{{0x85579e1b8722, 0x632525e90080b, 0x35539378e8d10, 0x47389416f49d3, 0x11c1e7bbb047}} #endif #endif , #if 0 #elif RADIX == 16 -{0xf9d, 0x552, 0x797, 0x19fc, 0x166, 0x7a8, 0x1ee5, 0xc77, 0x1ee7, 0x15ef, 0x340, 0x10df, 0x1d5f, 0x170, 0xf2, 0x123, 0x1bb1, 0xd23, 0x3fc, 0x6} +{{0xf9d, 0x552, 0x797, 0x19fc, 0x166, 0x7a8, 0x1ee5, 0xc77, 0x1ee7, 0x15ef, 0x340, 0x10df, 0x1d5f, 0x170, 0xf2, 0x123, 0x1bb1, 0xd23, 0x3fc, 0x6}} #elif RADIX == 32 -{0x19549f3b, 0x6cfe1e5, 0x1ca7a80b, 0x1b9d8efe, 0x11a057bf, 0x1d5f86f, 0x8c1e417, 0x91eec42, 0x11fe3} +{{0x19549f3b, 0x6cfe1e5, 0x1ca7a80b, 0x1b9d8efe, 0x11a057bf, 0x1d5f86f, 0x8c1e417, 0x91eec42, 0x11fe3}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x166cfe1e5caa4f9, 0x2bdfee763bfb94f5, 0x83c82e1d5f86f8d0, 0x440ff1a47bb1091} +{{0x166cfe1e5caa4f9, 0x2bdfee763bfb94f5, 0x83c82e1d5f86f8d0, 0x440ff1a47bb1091}} #else -{0x367f0f2e5527c, 0x763bfb94f5016, 0x70df1a057bfdc, 0x42460f20b8757, 0x4a07f8d23dd8} +{{0x367f0f2e5527c, 0x763bfb94f5016, 0x70df1a057bfdc, 0x42460f20b8757, 0x4a07f8d23dd8}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2860,261 +2860,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x1068, 0xf2c, 0x1ada, 0x1f58, 0x1343, 0x5cc, 0x90, 0x1bba, 0x50b, 0x1a35, 0x35f, 0x1388, 0x5ce, 0xc4f, 0x1462, 0x191f, 0x665, 0x843, 0x1cb1, 0x1} +{{0x1068, 0xf2c, 0x1ada, 0x1f58, 0x1343, 0x5cc, 0x90, 0x1bba, 0x50b, 0x1a35, 0x35f, 0x1388, 0x5ce, 0xc4f, 0x1462, 0x191f, 0x665, 0x843, 0x1cb1, 0x1}} #elif RADIX == 32 -{0x13cb20d0, 0x3fac6b6, 0x1205cc9a, 0x142f7740, 0x1afe8d4, 0x1e5ce9c4, 0x7e8c4c4, 0x2199972, 0x1e58a} +{{0x13cb20d0, 0x3fac6b6, 0x1205cc9a, 0x142f7740, 0x1afe8d4, 0x1e5ce9c4, 0x7e8c4c4, 0x2199972, 0x1e58a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9343fac6b69e5906, 0xf46a50bddd0240b9, 0xd18989e5ce9c40d7, 0x28f2c5086665c8f} +{{0x9343fac6b69e5906, 0xf46a50bddd0240b9, 0xd18989e5ce9c40d7, 0x28f2c5086665c8f}} #else -{0x1fd635b4f2c83, 0x3ddd0240b9934, 0x53881afe8d4a1, 0x723f462627973, 0x147962843332} +{{0x1fd635b4f2c83, 0x3ddd0240b9934, 0x53881afe8d4a1, 0x723f462627973, 0x147962843332}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x5b3, 0x13cb, 0x6b6, 0x1fd6, 0x4d0, 0x173, 0x1024, 0x1eee, 0x942, 0x1e8d, 0xd7, 0x14e2, 0x1973, 0x1313, 0x1d18, 0xe47, 0x1999, 0xa10, 0xf2c, 0x6} +{{0x5b3, 0x13cb, 0x6b6, 0x1fd6, 0x4d0, 0x173, 0x1024, 0x1eee, 0x942, 0x1e8d, 0xd7, 0x14e2, 0x1973, 0x1313, 0x1d18, 0xe47, 0x1999, 0xa10, 0xf2c, 0x6}} #elif RADIX == 32 -{0x14f2cb67, 0x10feb1ad, 0x4817326, 0x50bddd0, 0x6bfa35, 0x7973a71, 0x11fa3131, 0x1086665c, 0x17962} +{{0x14f2cb67, 0x10feb1ad, 0x4817326, 0x50bddd0, 0x6bfa35, 0x7973a71, 0x11fa3131, 0x1086665c, 0x17962}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x64d0feb1ada7965b, 0xfd1a942f7740902e, 0xf462627973a71035, 0x123cb1421999723} +{{0x64d0feb1ada7965b, 0xfd1a942f7740902e, 0xf462627973a71035, 0x123cb1421999723}} #else -{0x7f58d6d3cb2d, 0x2f7740902e64d, 0x74e206bfa3528, 0x5c8fd18989e5c, 0x311e58a10ccc} +{{0x7f58d6d3cb2d, 0x2f7740902e64d, 0x74e206bfa3528, 0x5c8fd18989e5c, 0x311e58a10ccc}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x14ba, 0xa50, 0x219, 0x1ca8, 0x1858, 0xe67, 0x1b19, 0xb09, 0x17fa, 0x89f, 0x10d7, 0x1a55, 0x14de, 0x1f37, 0x12f0, 0x1247, 0x1aa6, 0x109f, 0x493, 0x6} +{{0x14ba, 0xa50, 0x219, 0x1ca8, 0x1858, 0xe67, 0x1b19, 0xb09, 0x17fa, 0x89f, 0x10d7, 0x1a55, 0x14de, 0x1f37, 0x12f0, 0x1247, 0x1aa6, 0x109f, 0x493, 0x6}} #elif RADIX == 32 -{0xa942975, 0x18e54086, 0x32e67c2, 0x1fe9613b, 0x186ba27e, 0xf4ded2a, 0x11e5e1f3, 0x4fea9a4, 0x1249c} +{{0xa942975, 0x18e54086, 0x32e67c2, 0x1fe9613b, 0x186ba27e, 0xf4ded2a, 0x11e5e1f3, 0x4fea9a4, 0x1249c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf858e5408654a14b, 0xd13f7fa584ec65cc, 0xcbc3e6f4ded2ac35, 0x35124e13faa6923} +{{0xf858e5408654a14b, 0xd13f7fa584ec65cc, 0xcbc3e6f4ded2ac35, 0x35124e13faa6923}} #else -{0x472a0432a50a5, 0x2584ec65ccf85, 0x5a5586ba27eff, 0x248f2f0f9bd37, 0x42892709fd53} +{{0x472a0432a50a5, 0x2584ec65ccf85, 0x5a5586ba27eff, 0x248f2f0f9bd37, 0x42892709fd53}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1ba, 0xab8, 0x1ded, 0xdc9, 0xf40, 0xaa3, 0x169, 0x53c, 0x2, 0x848, 0x9a6, 0xbad, 0xb7e, 0x15dc, 0x87, 0x1cf3, 0x1791, 0x1af2, 0x1cdf, 0x7} +{{0x1ba, 0xab8, 0x1ded, 0xdc9, 0xf40, 0xaa3, 0x169, 0x53c, 0x2, 0x848, 0x9a6, 0xbad, 0xb7e, 0x15dc, 0x87, 0x1cf3, 0x1791, 0x1af2, 0x1cdf, 0x7}} #elif RADIX == 32 -{0xaae0375, 0x6e4f7b, 0xd2aa37a, 0x8a781, 0x14d32120, 0x18b7e5d6, 0x1cc10f5d, 0x1795e479, 0x2e6fe} +{{0xaae0375, 0x6e4f7b, 0xd2aa37a, 0x8a781, 0x14d32120, 0x18b7e5d6, 0x1cc10f5d, 0x1795e479, 0x2e6fe}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6f406e4f7b55701b, 0x909000229e05a554, 0x821ebb8b7e5d6a69, 0x35f37f5e5791e79} +{{0x6f406e4f7b55701b, 0x909000229e05a554, 0x821ebb8b7e5d6a69, 0x35f37f5e5791e79}} #else -{0x3727bdaab80d, 0x229e05a5546f4, 0x4bad4d3212000, 0x79e6087aee2df, 0x42f9bfaf2bc8} +{{0x3727bdaab80d, 0x229e05a5546f4, 0x4bad4d3212000, 0x79e6087aee2df, 0x42f9bfaf2bc8}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x5b, 0xad0, 0x69, 0x1038, 0x18d2, 0x180d, 0x1871, 0x46b, 0x26b, 0x1ef2, 0xe46, 0x72d, 0xc0d, 0x15a4, 0x6d7, 0x221, 0x1611, 0x1a89, 0xd3f, 0x8} +{{0x5b, 0xad0, 0x69, 0x1038, 0x18d2, 0x180d, 0x1871, 0x46b, 0x26b, 0x1ef2, 0xe46, 0x72d, 0xc0d, 0x15a4, 0x6d7, 0x221, 0x1611, 0x1a89, 0xd3f, 0x8}} #elif RADIX == 32 -{0xab400b7, 0x1281c01a, 0xe380dc6, 0x9ac8d78, 0x17237bc8, 0x8c0d396, 0x84daf5a, 0x144d8444, 0x369fe} +{{0xab400b7, 0x1281c01a, 0xe380dc6, 0x9ac8d78, 0x17237bc8, 0x8c0d396, 0x84daf5a, 0x144d8444, 0x369fe}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xb8d281c01a55a005, 0xbde426b235e1c701, 0x9b5eb48c0d396b91, 0x3b34ff513611110} +{{0xb8d281c01a55a005, 0xbde426b235e1c701, 0x9b5eb48c0d396b91, 0x3b34ff513611110}} #else -{0x140e00d2ad002, 0x3235e1c701b8d, 0x272d7237bc84d, 0x44426d7ad2303, 0x459a7fa89b08} +{{0x140e00d2ad002, 0x3235e1c701b8d, 0x272d7237bc84d, 0x44426d7ad2303, 0x459a7fa89b08}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1131, 0xac7, 0xa16, 0x918, 0x5d8, 0x1e64, 0x3e5, 0x142c, 0x1f89, 0x1cb7, 0xf96, 0x370, 0x4da, 0xf45, 0x1aa5, 0x1872, 0x1fc, 0xd83, 0x1145, 0x6} +{{0x1131, 0xac7, 0xa16, 0x918, 0x5d8, 0x1e64, 0x3e5, 0x142c, 0x1f89, 0x1cb7, 0xf96, 0x370, 0x4da, 0xf45, 0x1aa5, 0x1872, 0x1fc, 0xd83, 0x1145, 0x6}} #elif RADIX == 32 -{0x12b1e263, 0x1848c285, 0x1cbe642e, 0x1e268583, 0x7cb72df, 0xa4da1b8, 0x1cb54af4, 0xc187f30, 0x18a2b} +{{0x12b1e263, 0x1848c285, 0x1cbe642e, 0x1e268583, 0x7cb72df, 0xa4da1b8, 0x1cb54af4, 0xc187f30, 0x18a2b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x85d848c285958f13, 0xb96ff89a160f97cc, 0x6a95e8a4da1b83e5, 0x84515b061fcc39} +{{0x85d848c285958f13, 0xb96ff89a160f97cc, 0x6a95e8a4da1b83e5, 0x84515b061fcc39}} #else -{0x4246142cac789, 0x1a160f97cc85d, 0x43707cb72dff1, 0x30e5aa57a2936, 0x2c228ad830fe} +{{0x4246142cac789, 0x1a160f97cc85d, 0x43707cb72dff1, 0x30e5aa57a2936, 0x2c228ad830fe}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x7a4, 0x388, 0xd00, 0x66c, 0x1a9a, 0xabc, 0x97b, 0xadc, 0xaab, 0x1601, 0x287, 0xb2a, 0x1ab7, 0x1803, 0x1d06, 0x81c, 0x890, 0x11e0, 0x1e19, 0x0} +{{0x7a4, 0x388, 0xd00, 0x66c, 0x1a9a, 0xabc, 0x97b, 0xadc, 0xaab, 0x1601, 0x287, 0xb2a, 0x1ab7, 0x1803, 0x1d06, 0x81c, 0x890, 0x11e0, 0x1e19, 0x0}} #elif RADIX == 32 -{0xe20f48, 0x1a336340, 0xf6abcd4, 0xaad5b89, 0x143d805, 0x7ab7595, 0x73a0d80, 0xf022410, 0xf0cc} +{{0xe20f48, 0x1a336340, 0xf6abcd4, 0xaad5b89, 0x143d805, 0x7ab7595, 0x73a0d80, 0xf022410, 0xf0cc}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9a9a33634007107a, 0xec02aab56e25ed57, 0x741b007ab75950a1, 0x1478663c089040e} +{{0x9a9a33634007107a, 0xec02aab56e25ed57, 0x741b007ab75950a1, 0x1478663c089040e}} #else -{0x519b1a003883d, 0x356e25ed579a9, 0x6b2a143d80555, 0x1039d06c01ead, 0xa3c331e0448} +{{0x519b1a003883d, 0x356e25ed579a9, 0x6b2a143d80555, 0x1039d06c01ead, 0xa3c331e0448}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1e68, 0xcde, 0x29, 0x1777, 0x1ef8, 0x1a1c, 0x204, 0x148, 0x14ba, 0x1c39, 0x175, 0x1263, 0x4de, 0x1032, 0x1649, 0x5a4, 0xad, 0xcfb, 0x870, 0x3} +{{0x1e68, 0xcde, 0x29, 0x1777, 0x1ef8, 0x1a1c, 0x204, 0x148, 0x14ba, 0x1c39, 0x175, 0x1263, 0x4de, 0x1032, 0x1649, 0x5a4, 0xad, 0xcfb, 0x870, 0x3}} #elif RADIX == 32 -{0xb37bcd0, 0x18bbb80a, 0x9a1cf7, 0x12e82902, 0x10baf0e6, 0x44de931, 0x92c9303, 0x7d82b4b, 0x34383} +{{0xb37bcd0, 0x18bbb80a, 0x9a1cf7, 0x12e82902, 0x10baf0e6, 0x44de931, 0x92c9303, 0x7d82b4b, 0x34383}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9ef8bbb80a59bde6, 0x78734ba0a4081343, 0x59260644de93185d, 0x29a1c19f60ad2d2} +{{0x9ef8bbb80a59bde6, 0x78734ba0a4081343, 0x59260644de93185d, 0x29a1c19f60ad2d2}} #else -{0x45ddc052cdef3, 0x20a40813439ef, 0x52630baf0e697, 0x4b49649819137, 0x14d0e0cfb056} +{{0x45ddc052cdef3, 0x20a40813439ef, 0x52630baf0e697, 0x4b49649819137, 0x14d0e0cfb056}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.h index 3210a041c8..e63ee8c5a9 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp.h @@ -111,7 +111,7 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) static inline void fp_copy(fp_t *out, const fp_t *a) { - memcpy(out, a, sizeof(fp_t)); + memmove(out, a, sizeof(fp_t)); } static inline void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.h index 5f84fdf646..3bbb68a016 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/fp2.h @@ -38,4 +38,4 @@ fp2_sqr(fp2_t *x, const fp2_t *y) x->re.arr[3] = t.re.arr[3]; } -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.c index 1d4a41dae0..901d5ea5e2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.c @@ -5,24 +5,24 @@ #include "gf5248.h" // see gf5248.h -const gf5248 ZERO = { 0, 0, 0, 0 }; +const gf5248 ZERO = {{ 0, 0, 0, 0 }}; // see gf5248.h -const gf5248 ONE = { 0x0000000000000033, 0x0000000000000000, 0x0000000000000000, 0x0100000000000000 }; +const gf5248 ONE = {{ 0x0000000000000033, 0x0000000000000000, 0x0000000000000000, 0x0100000000000000 }}; // see gf5248.h -const gf5248 gf5248_MINUS_ONE = { 0xFFFFFFFFFFFFFFCC, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x03FFFFFFFFFFFFFF }; +const gf5248 gf5248_MINUS_ONE = {{ 0xFFFFFFFFFFFFFFCC, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x03FFFFFFFFFFFFFF }}; // Montgomery representation of 2^256. -static const gf5248 R2 = { 0x3333333333333d70, 0x3333333333333333, 0x3333333333333333, 0x0333333333333333 }; +static const gf5248 R2 = {{ 0x3333333333333d70, 0x3333333333333333, 0x3333333333333333, 0x0333333333333333 }}; // The modulus itself (this is also a valid representation of zero). -static const gf5248 MODULUS = { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x04FFFFFFFFFFFFFF }; +static const gf5248 MODULUS = {{ 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x04FFFFFFFFFFFFFF }}; // 1/2^244 (in Montgomery representation). -static const gf5248 INVT244 = { 0x0000000000001000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; +static const gf5248 INVT244 = {{ 0x0000000000001000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }}; -static const gf5248 PM1O3 = { 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0x01aaaaaaaaaaaaaa }; +static const gf5248 PM1O3 = {{ 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0x01aaaaaaaaaaaaaa }}; // Normalize value *a into *d. static inline void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.h index f1d21b45c6..a2e561757c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/gf5248.h @@ -98,7 +98,7 @@ extern "C" * support the API inline functions; they MUST NOT be used directly. */ -#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) +#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) || defined(C_PEDANTIC_MODE) #include #define inner_gf5248_adc(cc, a, b, d) _addcarry_u64(cc, a, b, (unsigned long long *)(void *)d) #define inner_gf5248_sbb(cc, a, b, d) _subborrow_u64(cc, a, b, (unsigned long long *)(void *)d) @@ -119,17 +119,48 @@ inner_gf5248_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) } #endif -#if defined _MSC_VER +#if defined _MSC_VER || defined(C_PEDANTIC_MODE) +#if defined _MSC_VER #define inner_gf5248_umul(lo, hi, x, y) \ do { \ uint64_t umul_hi; \ (lo) = _umul128((x), (y), &umul_hi); \ (hi) = umul_hi; \ } while (0) +#else +#define inner_gf5248_umul(lo, hi, a, b) \ + do { \ + register uint64_t al, ah, bl, bh, temp; \ + uint64_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; \ + uint64_t mask_low = (uint64_t)(-1) >> (sizeof(uint64_t) * 4), mask_high = (uint64_t)(-1) << (sizeof(uint64_t) * 4); \ + al = a & mask_low; \ + ah = a >> (sizeof(uint64_t) * 4); \ + bl = b & mask_low; \ + bh = b >> (sizeof(uint64_t) * 4); \ + albl = al * bl; \ + albh = al * bh; \ + ahbl = ah * bl; \ + ahbh = ah * bh; \ + (lo) = albl & mask_low; \ + res1 = albl >> (sizeof(uint64_t) * 4); \ + res2 = ahbl & mask_low; \ + res3 = albh & mask_low; \ + temp = res1 + res2 + res3 ; \ + carry = temp >> (sizeof(uint64_t) * 4); \ + (lo) ^= temp << (sizeof(uint64_t) * 4); \ + res1 = ahbl >> (sizeof(uint64_t) * 4); \ + res2 = albh >> (sizeof(uint64_t) * 4); \ + res3 = ahbh & mask_low; \ + temp = res1 + res2 + res3 + carry; \ + (hi) = temp & mask_low; \ + carry = temp & mask_high; \ + (hi) ^= (ahbh & mask_high) + carry; \ + } while (0) +#endif #define inner_gf5248_umul_add(lo, hi, x, y, z) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x), (y), &umul_hi); \ + inner_gf5248_umul(umul_lo, umul_hi, (x), (y)); \ unsigned char umul_cc; \ umul_cc = inner_gf5248_adc(0, umul_lo, (z), &umul_lo); \ (void)inner_gf5248_adc(umul_cc, umul_hi, 0, &umul_hi); \ @@ -139,9 +170,9 @@ inner_gf5248_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) #define inner_gf5248_umul_x2(lo, hi, x1, y1, x2, y2) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x1), (y1), &umul_hi); \ + inner_gf5248_umul(umul_lo, umul_hi, (x1), (y1)); \ uint64_t umul_lo2, umul_hi2; \ - umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + inner_gf5248_umul(umul_lo2, umul_hi2, (x2), (y2)); \ unsigned char umul_cc; \ umul_cc = inner_gf5248_adc(0, umul_lo, umul_lo2, &umul_lo); \ (void)inner_gf5248_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ @@ -151,9 +182,9 @@ inner_gf5248_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) #define inner_gf5248_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x1), (y1), &umul_hi); \ + inner_gf5248_umul(umul_lo, umul_hi, (x1), (y1)); \ uint64_t umul_lo2, umul_hi2; \ - umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + inner_gf5248_umul(umul_lo2, umul_hi2, (x2), (y2)); \ unsigned char umul_cc; \ umul_cc = inner_gf5248_adc(0, umul_lo, umul_lo2, &umul_lo); \ (void)inner_gf5248_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.h index 2b16e23834..616504c7b1 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd.h @@ -415,7 +415,7 @@ void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B * @param t: an integer * @returns 0xFFFFFFFF on success, 0 on failure */ -static int +static inline int test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) { int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c index 6332d21f8e..14482e01cd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/hd_splitting_transforms.c @@ -11,131 +11,131 @@ const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1 const fp2_t FP2_CONSTANTS[5] = {{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7} +{{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7}} #elif RADIX == 32 -{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff} +{{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +{{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff}} #else -{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff} +{{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7} +{{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7}} #elif RADIX == 32 -{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff} +{{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +{{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff}} #else -{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff} +{{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff}} #endif #endif }}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c index ea32213c75..0fed774a04 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/l2.c @@ -24,8 +24,8 @@ copy(fp_num *x, fp_num *r) static void normalize(fp_num *x) { - if (x->s == 0.0 || isfinite(x->s) == 0) { - if (x->s == 0.0) { + if (fpclassify(x->s) == FP_ZERO || isfinite(x->s) == 0) { + if (fpclassify(x->s) == FP_ZERO) { x->e = INT_MIN; } } else { @@ -49,13 +49,6 @@ to_deltabar(fp_num *x) x->e = 0; } -static void -to_etabar(fp_num *x) -{ - x->s = ETABAR; - x->e = 0; -} - static void from_mpz(const ibz_t *x, fp_num *r) { diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_internals.h index e8d90141ac..2b76857205 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_internals.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/lll_internals.h @@ -43,13 +43,19 @@ /** @brief Type for fractions of integers * - * @typedef ibq_t +* @typedef ibq_t * * For fractions of integers of arbitrary size, used by intbig module, using gmp */ -typedef ibz_t ibq_t[2]; -typedef ibq_t ibq_vec_4_t[4]; -typedef ibq_t ibq_mat_4x4_t[4][4]; +typedef struct { + ibz_t q[2]; +} ibq_t; +typedef struct { + ibq_t v[4]; +} ibq_vec_4_t; +typedef struct { + ibq_vec_4_t m[4]; +} ibq_mat_4x4_t; /**@} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.c index 27f4a963db..13714eee4a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/mp.c @@ -2,6 +2,7 @@ #include #include #include +#include // double-wide multiplication void @@ -17,7 +18,7 @@ MUL(digit_t *out, const digit_t a, const digit_t b) out[0] = _umul128(a, b, &umul_hi); out[1] = umul_hi; -#elif defined(RADIX_64) && defined(HAVE_UINT128) +#elif defined(RADIX_64) && (defined(HAVE_UINT128) || defined(__SIZEOF_INT128__) || defined(__int128)) && !defined(C_PEDANTIC_MODE) unsigned __int128 umul_tmp; umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); out[0] = (uint64_t)umul_tmp; @@ -277,6 +278,7 @@ mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) assert((a[0] & 1) == 1); digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + memset(x, 0, sizeof(x)); mp_copy(aa, a, nwords); mp_one[0] = 1; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rationals.c index 0c5387e5e8..25f8519b3f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rationals.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rationals.c @@ -1,20 +1,20 @@ -#include + #include #include "internal.h" #include "lll_internals.h" void ibq_init(ibq_t *x) { - ibz_init(&((*x)[0])); - ibz_init(&((*x)[1])); - ibz_set(&((*x)[1]), 1); + ibz_init(&(x->q[0])); + ibz_init(&(x->q[1])); + ibz_set(&(x->q[1]), 1); } void ibq_finalize(ibq_t *x) { - ibz_finalize(&((*x)[0])); - ibz_finalize(&((*x)[1])); + ibz_finalize(&(x->q[0])); + ibz_finalize(&(x->q[1])); } void @@ -22,7 +22,7 @@ ibq_mat_4x4_init(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_init(&(*mat)[i][j]); + ibq_init(&mat->m[i].v[j]); } } } @@ -31,7 +31,7 @@ ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_finalize(&(*mat)[i][j]); + ibq_finalize(&mat->m[i].v[j]); } } } @@ -40,14 +40,14 @@ void ibq_vec_4_init(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_init(&(*vec)[i]); + ibq_init(&vec->v[i]); } } void ibq_vec_4_finalize(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_finalize(&(*vec)[i]); + ibq_finalize(&vec->v[i]); } } @@ -57,9 +57,9 @@ ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j][0]), 10); + ibz_print(&(mat->m[i].v[j].q[0]), 10); printf("/"); - ibz_print(&((*mat)[i][j][1]), 10); + ibz_print(&(mat->m[i].v[j].q[1]), 10); printf(" "); } printf("\n "); @@ -72,9 +72,9 @@ ibq_vec_4_print(const ibq_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i][0]), 10); + ibz_print(&(vec->v[i].q[0]), 10); printf("/"); - ibz_print(&((*vec)[i][1]), 10); + ibz_print(&(vec->v[i].q[1]), 10); printf(" "); } printf("\n\n"); @@ -86,10 +86,10 @@ ibq_reduce(ibq_t *x) ibz_t gcd, r; ibz_init(&gcd); ibz_init(&r); - ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); - ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + ibz_gcd(&gcd, &(x->q[0]), &(x->q[1])); + ibz_div(&(x->q[0]), &r, &(x->q[0]), &gcd); assert(ibz_is_zero(&r)); - ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + ibz_div(&(x->q[1]), &r, &(x->q[1]), &gcd); assert(ibz_is_zero(&r)); ibz_finalize(&gcd); ibz_finalize(&r); @@ -102,10 +102,10 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) ibz_init(&add); ibz_init(&prod); - ibz_mul(&add, &((*a)[0]), &((*b)[1])); - ibz_mul(&prod, &((*b)[0]), &((*a)[1])); - ibz_add(&((*sum)[0]), &add, &prod); - ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&add, &(a->q[0]), &(b->q[1])); + ibz_mul(&prod, &(b->q[0]), &(a->q[1])); + ibz_add(&(sum->q[0]), &add, &prod); + ibz_mul(&(sum->q[1]), &(a->q[1]), &(b->q[1])); ibz_finalize(&add); ibz_finalize(&prod); } @@ -113,8 +113,8 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) void ibq_neg(ibq_t *neg, const ibq_t *x) { - ibz_copy(&((*neg)[1]), &((*x)[1])); - ibz_neg(&((*neg)[0]), &((*x)[0])); + ibz_copy(&(neg->q[1]), &(x->q[1])); + ibz_neg(&(neg->q[0]), &(x->q[0])); } void @@ -143,8 +143,8 @@ ibq_abs(ibq_t *abs, const ibq_t *x) // once void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) { - ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); - ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&(prod->q[0]), &(a->q[0]), &(b->q[0])); + ibz_mul(&(prod->q[1]), &(a->q[1]), &(b->q[1])); } int @@ -152,9 +152,9 @@ ibq_inv(ibq_t *inv, const ibq_t *x) { int res = !ibq_is_zero(x); if (res) { - ibz_copy(&((*inv)[0]), &((*x)[0])); - ibz_copy(&((*inv)[1]), &((*x)[1])); - ibz_swap(&((*inv)[1]), &((*inv)[0])); + ibz_copy(&(inv->q[0]), &(x->q[0])); + ibz_copy(&(inv->q[1]), &(x->q[1])); + ibz_swap(&(inv->q[1]), &(inv->q[0])); } return (res); } @@ -165,15 +165,15 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) ibz_t x, y; ibz_init(&x); ibz_init(&y); - ibz_copy(&x, &((*a)[0])); - ibz_copy(&y, &((*b)[0])); - ibz_mul(&y, &y, &((*a)[1])); - ibz_mul(&x, &x, &((*b)[1])); - if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_copy(&x, &(a->q[0])); + ibz_copy(&y, &(b->q[0])); + ibz_mul(&y, &y, &(a->q[1])); + ibz_mul(&x, &x, &(b->q[1])); + if (ibz_cmp(&(a->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } - if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + if (ibz_cmp(&(b->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } @@ -186,28 +186,28 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) int ibq_is_zero(const ibq_t *x) { - return ibz_is_zero(&((*x)[0])); + return ibz_is_zero(&(x->q[0])); } int ibq_is_one(const ibq_t *x) { - return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); + return (0 == ibz_cmp(&(x->q[0]), &(x->q[1]))); } int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) { - ibz_copy(&((*q)[0]), a); - ibz_copy(&((*q)[1]), b); + ibz_copy(&(q->q[0]), a); + ibz_copy(&(q->q[1]), b); return !ibz_is_zero(b); } void ibq_copy(ibq_t *target, const ibq_t *value) // once { - ibz_copy(&((*target)[0]), &((*value)[0])); - ibz_copy(&((*target)[1]), &((*value)[1])); + ibz_copy(&(target->q[0]), &(value->q[0])); + ibz_copy(&(target->q[1]), &(value->q[1])); } int @@ -215,7 +215,7 @@ ibq_is_ibz(const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_mod(&r, &((*q)[0]), &((*q)[1])); + ibz_mod(&r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); @@ -226,7 +226,7 @@ ibq_to_ibz(ibz_t *z, const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + ibz_div(z, &r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h index d0861ac036..0362ca0c42 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/rng.h @@ -5,7 +5,7 @@ #include -static int randombytes(unsigned char *x, unsigned long long xlen){ +static inline int randombytes(unsigned char *x, unsigned long long xlen){ OQS_randombytes(x, xlen); return 0; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign.c index 7335c38d9a..cf2134085b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_broadwell/sqisign.c @@ -121,7 +121,7 @@ sqisign_verify(const unsigned char *m, unsigned long long siglen, const unsigned char *pk) { - + (void) siglen; int ret = 0; public_key_t pkt = { 0 }; signature_t sigt; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c index 143060e2c3..74184fc97b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/dim2id2iso.c @@ -191,7 +191,7 @@ fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, // reordering vectors and switching some signs if needed to make it in a nicer // shape static void -post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, bool is_special_order) { // if the left order is the special one, then we apply some additional post // treatment @@ -520,7 +520,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[0], 1); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); - post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + post_LLL_basis_treatment(&gram[0], &reduced[0], true); // for efficient lattice reduction, we replace ideal[0] by the equivalent // ideal of smallest norm @@ -562,7 +562,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[i], 1); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); - post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + post_LLL_basis_treatment(&gram[i], &reduced[i], false); } // enumerating small vectors diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.c index 5be2b8e57e..c24fe29409 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/e0_basis.c @@ -2,54 +2,54 @@ const fp2_t BASIS_E0_PX = { #if 0 #elif RADIX == 16 -{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3} +{{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3}} #elif RADIX == 32 -{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998} +{{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3} +{{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3}} #else -{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b} +{{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7} +{{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7}} #elif RADIX == 32 -{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15} +{{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10} +{{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10}} #else -{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166} +{{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166}} #endif #endif }; const fp2_t BASIS_E0_QX = { #if 0 #elif RADIX == 16 -{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9} +{{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9}} #elif RADIX == 32 -{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96} +{{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec} +{{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec}} #else -{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012} +{{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8} +{{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8}} #elif RADIX == 32 -{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f} +{{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40} +{{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40}} #else -{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52} +{{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52}} #endif #endif }; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h index e609c93a08..7cef95ca49 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/ec.h @@ -566,7 +566,7 @@ uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) { ec_point_t test; @@ -595,7 +595,7 @@ test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) { int check_P = test_point_order_twof(&B->P, E, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_verification.c index fecdb9c259..8aa451d366 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_verification.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/encode_verification.c @@ -99,36 +99,6 @@ ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) return proj_from_bytes(&curve->A, &curve->C, enc); } -static byte_t * -ec_point_to_bytes(byte_t *enc, const ec_point_t *point) -{ - return proj_to_bytes(enc, &point->x, &point->z); -} - -static const byte_t * -ec_point_from_bytes(ec_point_t *point, const byte_t *enc) -{ - return proj_from_bytes(&point->x, &point->z, enc); -} - -static byte_t * -ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) -{ - enc = ec_point_to_bytes(enc, &basis->P); - enc = ec_point_to_bytes(enc, &basis->Q); - enc = ec_point_to_bytes(enc, &basis->PmQ); - return enc; -} - -static const byte_t * -ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) -{ - enc = ec_point_from_bytes(&basis->P, enc); - enc = ec_point_from_bytes(&basis->Q, enc); - enc = ec_point_from_bytes(&basis->PmQ, enc); - return enc; -} - // public API byte_t * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c index 1a93e36455..7993e79f8c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/endomorphism_action.c @@ -4,261 +4,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x199, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6} +{{0x199, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6}} #elif RADIX == 32 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x19, 0x0, 0x0, 0x300000000000000} +{{0x19, 0x0, 0x0, 0x300000000000000}} #else -{0xc, 0x0, 0x0, 0x0, 0x400000000000} +{{0xc, 0x0, 0x0, 0x0, 0x400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3} +{{0x107, 0xc, 0x1890, 0xf2a, 0x52b, 0xb68, 0x152d, 0xa4c, 0x1054, 0x642, 0x36a, 0x6f8, 0x7ad, 0x146c, 0x1d66, 0x1b67, 0x236, 0x10d, 0x1933, 0x3}} #elif RADIX == 32 -{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998} +{{0x3020e, 0xb795624, 0x5ab6829, 0x1514995, 0x1b5190a, 0x187ad37c, 0x19facd46, 0x8688db6, 0x3c998}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3} +{{0x52b795624001810, 0x8c8505452654b56d, 0xf59a8d87ad37c0da, 0x24e4cc21a236db3}} #else -{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b} +{{0x5bcab12000c08, 0x452654b56d052, 0x26f81b5190a0a, 0x36cfd66a361eb, 0x12726610d11b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7} +{{0x1f87, 0x83e, 0x32e, 0xe58, 0xd9d, 0x1416, 0x752, 0x13b4, 0x1efa, 0xe62, 0x12f5, 0x1907, 0x1814, 0x1ddd, 0x1aa6, 0x1420, 0x2cd, 0x1431, 0x1be2, 0x7}} #elif RADIX == 32 -{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15} +{{0x120fbf0f, 0x1d72c0cb, 0xa54166c, 0x1bea7687, 0x197ab98b, 0x1b814c83, 0x8354ddd, 0x188b368, 0x2df15}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10} +{{0xcd9d72c0cb907df8, 0x5cc5efa9da1d4a82, 0x6a9bbbb814c83cbd, 0x26ef8a8622cda10}} #else -{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166} +{{0x6b96065c83efc, 0x29da1d4a82cd9, 0x190797ab98bdf, 0x6841aa6eeee05, 0x1377c5431166}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9} +{{0x5ff, 0x1783, 0xadc, 0x775, 0xad4, 0x593, 0xb4c, 0x21e, 0x1cb2, 0x13d8, 0x179f, 0x680, 0x1a9c, 0x1824, 0x118e, 0x13d9, 0x24, 0x1956, 0x1dd2, 0x9}} #elif RADIX == 32 -{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96} +{{0x5e0cbff, 0x143baab7, 0x9859356, 0x12c843cb, 0xbcfcf63, 0x9a9c340, 0x16631d82, 0xab00927, 0x4ee96}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec} +{{0x6ad43baab72f065f, 0xe7b1cb210f2d30b2, 0xc63b049a9c3405e7, 0x4ff74b2ac0249ec}} #else -{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012} +{{0x21dd55b97832f, 0x210f2d30b26ad, 0x680bcfcf6396, 0x27b318ec126a7, 0x4ffba5956012}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8} +{{0x1c7f, 0x1117, 0xa4, 0x1164, 0x6e, 0x1e63, 0x1b7b, 0x1305, 0x424, 0x131a, 0x1b61, 0xae3, 0x17b1, 0xe5e, 0x1848, 0x1e81, 0x14a5, 0x1cb5, 0x1d87, 0x8}} #elif RADIX == 32 -{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f} +{{0x445f8ff, 0xe8b2029, 0xf7e6303, 0x109260bb, 0x1db0cc68, 0x1d7b1571, 0x7090e5, 0x5ad297d, 0x3ec3f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40} +{{0x606e8b2029222fc7, 0x6634424982edefcc, 0xe121cbd7b1571ed8, 0x4f761f96b4a5f40}} #else -{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52} +{{0x74590149117e3, 0x4982edefcc606, 0x2ae3db0cc6884, 0x7d0384872f5ec, 0x4fbb0fcb5a52}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x342, 0xfb7, 0xed, 0x1d80, 0x17f1, 0x4a2, 0x1c26, 0xb96, 0x1367, 0x3dc, 0x1624, 0x1f2a, 0x5e, 0x1cab, 0x27, 0x1e89, 0x1293, 0x1e24, 0x417, 0x5} +{{0x342, 0xfb7, 0xed, 0x1d80, 0x17f1, 0x4a2, 0x1c26, 0xb96, 0x1367, 0x3dc, 0x1624, 0x1f2a, 0x5e, 0x1cab, 0x27, 0x1e89, 0x1293, 0x1e24, 0x417, 0x5}} #elif RADIX == 32 -{0xbedc685, 0x11ec003b, 0x4c4a2bf, 0xd9d72dc, 0xb120f72, 0x1605ef95, 0x2404fca, 0x1124a4fd, 0x20bf} +{{0xbedc685, 0x11ec003b, 0x4c4a2bf, 0xd9d72dc, 0xb120f72, 0x1605ef95, 0x2404fca, 0x1124a4fd, 0x20bf}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x57f1ec003b5f6e34, 0x7b93675cb709894, 0x809f95605ef95589, 0xc905fc49293f44} +{{0x57f1ec003b5f6e34, 0x7b93675cb709894, 0x809f95605ef95589, 0xc905fc49293f44}} #else -{0xf6001dafb71a, 0x75cb70989457f, 0x5f2ab120f726c, 0x7d12027e55817, 0x6482fe24949} +{{0xf6001dafb71a, 0x75cb70989457f, 0x5f2ab120f726c, 0x7d12027e55817, 0x6482fe24949}} #endif #endif , #if 0 #elif RADIX == 16 -{0xf3c, 0x1d21, 0xd78, 0xe8e, 0x1f3c, 0x11b, 0x12c, 0x1851, 0x19b1, 0xd9, 0xf3f, 0x759, 0xf47, 0x1e88, 0x56e, 0x8ef, 0x116e, 0x1fa1, 0x1199, 0x0} +{{0xf3c, 0x1d21, 0xd78, 0xe8e, 0x1f3c, 0x11b, 0x12c, 0x1851, 0x19b1, 0xd9, 0xf3f, 0x759, 0xf47, 0x1e88, 0x56e, 0x8ef, 0x116e, 0x1fa1, 0x1199, 0x0}} #elif RADIX == 32 -{0x7485e78, 0x1c74735e, 0x5811bf9, 0x6c70a21, 0x179f8367, 0x10f473ac, 0x1bcadde8, 0x1d0c5b91, 0x8ccf} +{{0x7485e78, 0x1c74735e, 0x5811bf9, 0x6c70a21, 0x179f8367, 0x10f473ac, 0x1bcadde8, 0x1d0c5b91, 0x8ccf}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x7f3c74735e3a42f3, 0xc1b39b1c2884b023, 0x95bbd10f473acbcf, 0x3c4667f4316e477} +{{0x7f3c74735e3a42f3, 0xc1b39b1c2884b023, 0x95bbd10f473acbcf, 0x3c4667f4316e477}} #else -{0x63a39af1d2179, 0x1c2884b0237f3, 0x675979f836736, 0x11de56ef443d1, 0x462333fa18b7} +{{0x63a39af1d2179, 0x1c2884b0237f3, 0x675979f836736, 0x11de56ef443d1, 0x462333fa18b7}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -480,261 +480,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x19e1, 0x1d98, 0x1de9, 0x1dfc, 0x922, 0x1fb8, 0x476, 0xd05, 0xc85, 0x1788, 0x1967, 0x155d, 0x1f93, 0x629, 0x188f, 0x119, 0x1f6f, 0x241, 0x1378, 0x0} +{{0x19e1, 0x1d98, 0x1de9, 0x1dfc, 0x922, 0x1fb8, 0x476, 0xd05, 0xc85, 0x1788, 0x1967, 0x155d, 0x1f93, 0x629, 0x188f, 0x119, 0x1f6f, 0x241, 0x1378, 0x0}} #elif RADIX == 32 -{0xf6633c2, 0x2efe77a, 0xedfb849, 0x1215a0a4, 0x1cb3de21, 0x13f93aae, 0x6711e62, 0x120fdbc2, 0x9bc0} +{{0xf6633c2, 0x2efe77a, 0xedfb849, 0x1215a0a4, 0x1cb3de21, 0x13f93aae, 0x6711e62, 0x120fdbc2, 0x9bc0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x922efe77a7b319e, 0xef10c8568291dbf7, 0xe23cc53f93aaee59, 0x54de0483f6f08c} +{{0x922efe77a7b319e, 0xef10c8568291dbf7, 0xe23cc53f93aaee59, 0x54de0483f6f08c}} #else -{0x177f3bd3d98cf, 0x568291dbf7092, 0x755dcb3de2190, 0x423388f314fe4, 0x2a6f0241fb7} +{{0x177f3bd3d98cf, 0x568291dbf7092, 0x755dcb3de2190, 0x423388f314fe4, 0x2a6f0241fb7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x811, 0xf66, 0x77a, 0x177f, 0x248, 0x17ee, 0x91d, 0xb41, 0x321, 0x1de2, 0xe59, 0x1d57, 0xfe4, 0x198a, 0xe23, 0x1846, 0xfdb, 0x90, 0x14de, 0x8} +{{0x811, 0xf66, 0x77a, 0x177f, 0x248, 0x17ee, 0x91d, 0xb41, 0x321, 0x1de2, 0xe59, 0x1d57, 0xfe4, 0x198a, 0xe23, 0x1846, 0xfdb, 0x90, 0x14de, 0x8}} #elif RADIX == 32 -{0x13d99023, 0x8bbf9de, 0x3b7ee12, 0xc856829, 0x172cf788, 0x14fe4eab, 0x119c4798, 0x483f6f0, 0x3a6f0} +{{0x13d99023, 0x8bbf9de, 0x3b7ee12, 0xc856829, 0x172cf788, 0x14fe4eab, 0x119c4798, 0x483f6f0, 0x3a6f0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc248bbf9de9ecc81, 0x7bc43215a0a476fd, 0x388f314fe4eabb96, 0x95378120fdbc23} +{{0xc248bbf9de9ecc81, 0x7bc43215a0a476fd, 0x388f314fe4eabb96, 0x95378120fdbc23}} #else -{0x45dfcef4f6640, 0x15a0a476fdc24, 0x1d5772cf78864, 0x708ce23cc53f9, 0x2ca9bc0907ed} +{{0x45dfcef4f6640, 0x15a0a476fdc24, 0x1d5772cf78864, 0x708ce23cc53f9, 0x2ca9bc0907ed}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x869, 0x197b, 0xcdb, 0x1d89, 0xf9b, 0x1d79, 0x18ec, 0xafe, 0x1d41, 0x77, 0x9d4, 0x1a3f, 0x2b, 0x46d, 0x173e, 0xedd, 0x172, 0x1c77, 0x8a6, 0x8} +{{0x869, 0x197b, 0xcdb, 0x1d89, 0xf9b, 0x1d79, 0x18ec, 0xafe, 0x1d41, 0x77, 0x9d4, 0x1a3f, 0x2b, 0x46d, 0x173e, 0xedd, 0x172, 0x1c77, 0x8a6, 0x8}} #elif RADIX == 32 -{0x1e5ed0d3, 0x1bec4b36, 0x1d9d797c, 0x15055fd8, 0x14ea01df, 0x1a02bd1f, 0x176e7c46, 0x3b85c9d, 0x34537} +{{0x1e5ed0d3, 0x1bec4b36, 0x1d9d797c, 0x15055fd8, 0x14ea01df, 0x1a02bd1f, 0x176e7c46, 0x3b85c9d, 0x34537}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2f9bec4b36f2f686, 0xefd4157f63b3af, 0xdcf88da02bd1fa75, 0x31229b8ee17276e} +{{0x2f9bec4b36f2f686, 0xefd4157f63b3af, 0xdcf88da02bd1fa75, 0x31229b8ee17276e}} #else -{0x5f6259b797b43, 0x157f63b3af2f9, 0x7a3f4ea01dfa8, 0x1dbb73e23680a, 0x18914dc770b9} +{{0x5f6259b797b43, 0x157f63b3af2f9, 0x7a3f4ea01dfa8, 0x1dbb73e23680a, 0x18914dc770b9}} #endif #endif , #if 0 #elif RADIX == 16 -{0x124b, 0xed4, 0x1706, 0x32d, 0x1541, 0x11b8, 0x2b0, 0xbe4, 0x1ee8, 0x1a3c, 0x16e3, 0x1d25, 0x19bb, 0xb63, 0x1fc1, 0x5fa, 0xf03, 0xfa, 0x1ec, 0x9} +{{0x124b, 0xed4, 0x1706, 0x32d, 0x1541, 0x11b8, 0x2b0, 0xbe4, 0x1ee8, 0x1a3c, 0x16e3, 0x1d25, 0x19bb, 0xb63, 0x1fc1, 0x5fa, 0xf03, 0xfa, 0x1ec, 0x9}} #elif RADIX == 32 -{0x13b52497, 0x1196dc1, 0x1611b8aa, 0x1ba17c82, 0x1b71e8f3, 0x79bbe92, 0x1ebf82b6, 0x7d3c0cb, 0x40f60} +{{0x13b52497, 0x1196dc1, 0x1611b8aa, 0x1ba17c82, 0x1b71e8f3, 0x79bbe92, 0x1ebf82b6, 0x7d3c0cb, 0x40f60}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1541196dc19da924, 0xf479ee85f20ac237, 0x7f056c79bbe92db8, 0x3b87b01f4f032fd} +{{0x1541196dc19da924, 0xf479ee85f20ac237, 0x7f056c79bbe92db8, 0x3b87b01f4f032fd}} #else -{0x8cb6e0ced492, 0x5f20ac237154, 0x7d25b71e8f3dd, 0x4bf5fc15b1e6e, 0x1dc3d80fa781} +{{0x8cb6e0ced492, 0x5f20ac237154, 0x7d25b71e8f3dd, 0x4bf5fc15b1e6e, 0x1dc3d80fa781}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1e71, 0xd67, 0x13da, 0x19eb, 0x137a, 0x1d27, 0x1ba7, 0x1996, 0x755, 0xe3d, 0x1139, 0x1764, 0x18ac, 0x1020, 0x3c4, 0x150e, 0x1ffd, 0x14fe, 0xa16, 0x6} +{{0x1e71, 0xd67, 0x13da, 0x19eb, 0x137a, 0x1d27, 0x1ba7, 0x1996, 0x755, 0xe3d, 0x1139, 0x1764, 0x18ac, 0x1020, 0x3c4, 0x150e, 0x1ffd, 0x14fe, 0xa16, 0x6}} #elif RADIX == 32 -{0x1359fce3, 0x1acf5cf6, 0x14fd279b, 0x1d5732db, 0x89cb8f4, 0x18acbb2, 0x3878902, 0x7f7ff6a, 0x150b5} +{{0x1359fce3, 0x1acf5cf6, 0x14fd279b, 0x1d5732db, 0x89cb8f4, 0x18acbb2, 0x3878902, 0x7f7ff6a, 0x150b5}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf37acf5cf69acfe7, 0x5c7a755ccb6e9fa4, 0xf120418acbb244e, 0x8285a9fdffda87} +{{0xf37acf5cf69acfe7, 0x5c7a755ccb6e9fa4, 0xf120418acbb244e, 0x8285a9fdffda87}} #else -{0x567ae7b4d67f3, 0x5ccb6e9fa4f37, 0x176489cb8f4ea, 0x6a1c3c481062b, 0x2c142d4feffe} +{{0x567ae7b4d67f3, 0x5ccb6e9fa4f37, 0x176489cb8f4ea, 0x6a1c3c481062b, 0x2c142d4feffe}} #endif #endif , #if 0 #elif RADIX == 16 -{0x13ec, 0x10a3, 0x1e69, 0x106f, 0x619, 0x1cb5, 0x9aa, 0x362, 0x53a, 0x1af5, 0x1bae, 0x60a, 0x2a4, 0x448, 0x3d0, 0x535, 0xeb1, 0x1a6e, 0x978, 0x5} +{{0x13ec, 0x10a3, 0x1e69, 0x106f, 0x619, 0x1cb5, 0x9aa, 0x362, 0x53a, 0x1af5, 0x1bae, 0x60a, 0x2a4, 0x448, 0x3d0, 0x535, 0xeb1, 0x1a6e, 0x978, 0x5}} #elif RADIX == 32 -{0xc28e7d9, 0x19837f9a, 0x155cb530, 0x14e86c49, 0xdd76bd4, 0x102a4305, 0xd47a044, 0x1373ac4a, 0x4bc6} +{{0xc28e7d9, 0x19837f9a, 0x155cb530, 0x14e86c49, 0xdd76bd4, 0x102a4305, 0xd47a044, 0x1373ac4a, 0x4bc6}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa619837f9a61473e, 0xb5ea53a1b126ab96, 0x8f408902a43056eb, 0x3ea5e34dceb129a} +{{0xa619837f9a61473e, 0xb5ea53a1b126ab96, 0x8f408902a43056eb, 0x3ea5e34dceb129a}} #else -{0x4c1bfcd30a39f, 0x21b126ab96a61, 0x60add76bd4a7, 0x4a6a3d02240a9, 0x1f52f1a6e758} +{{0x4c1bfcd30a39f, 0x21b126ab96a61, 0x60add76bd4a7, 0x4a6a3d02240a9, 0x1f52f1a6e758}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x77a, 0x201, 0x168d, 0x8fe, 0x780, 0x1ccb, 0x52b, 0x1c83, 0x18dd, 0xcef, 0x11f5, 0x1446, 0x301, 0xb63, 0xe3f, 0x1b72, 0x1, 0x1da9, 0x1281, 0x8} +{{0x77a, 0x201, 0x168d, 0x8fe, 0x780, 0x1ccb, 0x52b, 0x1c83, 0x18dd, 0xcef, 0x11f5, 0x1446, 0x301, 0xb63, 0xe3f, 0x1b72, 0x1, 0x1da9, 0x1281, 0x8}} #elif RADIX == 32 -{0x8804ef5, 0x47f5a3, 0x57ccb3c, 0x3779065, 0x8fab3bf, 0x6301a23, 0x1c9c7eb6, 0xd480076, 0x3940f} +{{0x8804ef5, 0x47f5a3, 0x57ccb3c, 0x3779065, 0x8fab3bf, 0x6301a23, 0x1c9c7eb6, 0xd480076, 0x3940f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x678047f5a3440277, 0x59df8dde4194af99, 0x38fd6c6301a2347d, 0x364a07b52001db9} +{{0x678047f5a3440277, 0x59df8dde4194af99, 0x38fd6c6301a2347d, 0x364a07b52001db9}} #else -{0x23fad1a2013b, 0x5e4194af99678, 0x34468fab3bf1b, 0x76e4e3f5b18c0, 0x432503da9000} +{{0x23fad1a2013b, 0x5e4194af99678, 0x34468fab3bf1b, 0x76e4e3f5b18c0, 0x432503da9000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1, 0xb39, 0x969, 0x1324, 0xbe6, 0x86e, 0x1021, 0x29a, 0x1ff0, 0xd23, 0x7d5, 0x72a, 0x1e33, 0x1fd9, 0x10af, 0x15bc, 0x1d56, 0x928, 0x1d49, 0x0} +{{0x1, 0xb39, 0x969, 0x1324, 0xbe6, 0x86e, 0x1021, 0x29a, 0x1ff0, 0xd23, 0x7d5, 0x72a, 0x1e33, 0x1fd9, 0x10af, 0x15bc, 0x1d56, 0x928, 0x1d49, 0x0}} #elif RADIX == 32 -{0xace4002, 0x699225a, 0x4286e5f, 0x1fc05350, 0x3eab48f, 0x13e33395, 0xf215ffd, 0x94755ab, 0xea4a} +{{0xace4002, 0x699225a, 0x4286e5f, 0x1fc05350, 0x3eab48f, 0x13e33395, 0xf215ffd, 0x94755ab, 0xea4a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xcbe699225a567200, 0x5a47ff014d40850d, 0x42bffb3e333951f5, 0x57525251d56ade} +{{0xcbe699225a567200, 0x5a47ff014d40850d, 0x42bffb3e333951f5, 0x57525251d56ade}} #else -{0x34c912d2b3900, 0x14d40850dcbe, 0x672a3eab48ffe, 0x2b790affecf8c, 0x2ba92928eab} +{{0x34c912d2b3900, 0x14d40850dcbe, 0x672a3eab48ffe, 0x2b790affecf8c, 0x2ba92928eab}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -956,261 +956,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x13f5, 0x8b7, 0x1873, 0x144a, 0x1a89, 0x13f9, 0xd49, 0x6f2, 0x3bb, 0x102, 0x1ecb, 0x180b, 0x4d5, 0x608, 0x119, 0x5e6, 0x751, 0x961, 0xd37, 0x5} +{{0x13f5, 0x8b7, 0x1873, 0x144a, 0x1a89, 0x13f9, 0xd49, 0x6f2, 0x3bb, 0x102, 0x1ecb, 0x180b, 0x4d5, 0x608, 0x119, 0x5e6, 0x751, 0x961, 0xd37, 0x5}} #elif RADIX == 32 -{0x1a2de7eb, 0x9a2561c, 0x933f9d4, 0xeecde4d, 0x1f658408, 0x104d5c05, 0x19823260, 0xb09d44b, 0x69ba} +{{0x1a2de7eb, 0x9a2561c, 0x933f9d4, 0xeecde4d, 0x1f658408, 0x104d5c05, 0x19823260, 0xb09d44b, 0x69ba}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3a89a2561cd16f3f, 0xc2043bb37935267f, 0x464c104d5c05fb2, 0x1bb4dd2c27512f3} +{{0x3a89a2561cd16f3f, 0xc2043bb37935267f, 0x464c104d5c05fb2, 0x1bb4dd2c27512f3}} #else -{0x4d12b0e68b79f, 0x337935267f3a8, 0x380bf65840877, 0x4bcc119304135, 0x35da6e9613a8} +{{0x4d12b0e68b79f, 0x337935267f3a8, 0x380bf65840877, 0x4bcc119304135, 0x35da6e9613a8}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1e96, 0x1a2d, 0x161c, 0xd12, 0xea2, 0xcfe, 0x1352, 0x19bc, 0x10ee, 0x1840, 0x1fb2, 0xe02, 0x135, 0x982, 0x1046, 0x979, 0x9d4, 0x1a58, 0x1b4d, 0x9} +{{0x1e96, 0x1a2d, 0x161c, 0xd12, 0xea2, 0xcfe, 0x1352, 0x19bc, 0x10ee, 0x1840, 0x1fb2, 0xe02, 0x135, 0x982, 0x1046, 0x979, 0x9d4, 0x1a58, 0x1b4d, 0x9}} #elif RADIX == 32 -{0x68b7d2d, 0x2689587, 0xa4cfe75, 0x3bb3793, 0xfd96102, 0x4135701, 0x1e608c98, 0x12c27512, 0x4da6e} +{{0x68b7d2d, 0x2689587, 0xa4cfe75, 0x3bb3793, 0xfd96102, 0x4135701, 0x1e608c98, 0x12c27512, 0x4da6e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xcea2689587345be9, 0xb0810eecde4d499f, 0xc1193041357017ec, 0x22ed374b09d44bc} +{{0xcea2689587345be9, 0xb0810eecde4d499f, 0xc1193041357017ec, 0x22ed374b09d44bc}} #else -{0x1344ac39a2df4, 0x6cde4d499fcea, 0x2e02fd961021d, 0x12f30464c104d, 0x39769ba584ea} +{{0x1344ac39a2df4, 0x6cde4d499fcea, 0x2e02fd961021d, 0x12f30464c104d, 0x39769ba584ea}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xa82, 0x1d2d, 0x15b8, 0x404, 0x1a32, 0xaf9, 0xa86, 0xddf, 0x14bf, 0x100c, 0xc42, 0xa89, 0x1df, 0x82f, 0x1f07, 0x782, 0x664, 0x1ba5, 0x5d7, 0x2} +{{0xa82, 0x1d2d, 0x15b8, 0x404, 0x1a32, 0xaf9, 0xa86, 0xddf, 0x14bf, 0x100c, 0xc42, 0xa89, 0x1df, 0x82f, 0x1f07, 0x782, 0x664, 0x1ba5, 0x5d7, 0x2}} #elif RADIX == 32 -{0x74b5504, 0x1220256e, 0x10caf9d1, 0x12fdbbea, 0x16214032, 0x1e1df544, 0xbe0e82, 0x1d29990f, 0x22ebe} +{{0x74b5504, 0x1220256e, 0x10caf9d1, 0x12fdbbea, 0x16214032, 0x1e1df544, 0xbe0e82, 0x1d29990f, 0x22ebe}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3a3220256e3a5aa8, 0xa0194bf6efaa195f, 0x7c1d05e1df544b10, 0xb175f74a6643c1} +{{0x3a3220256e3a5aa8, 0xa0194bf6efaa195f, 0x7c1d05e1df544b10, 0xb175f74a6643c1}} #else -{0x11012b71d2d54, 0x76efaa195f3a3, 0x6a89621403297, 0xf05f07417877, 0x58bafba5332} +{{0x11012b71d2d54, 0x76efaa195f3a3, 0x6a89621403297, 0xf05f07417877, 0x58bafba5332}} #endif #endif , #if 0 #elif RADIX == 16 -{0x5a1, 0x46a, 0x17ab, 0x1cfa, 0x547, 0x1b9c, 0xda5, 0x141e, 0x216, 0x1f49, 0xaca, 0x15a1, 0xfe0, 0x1afb, 0x1a47, 0x133d, 0x1887, 0x590, 0xbc2, 0x1} +{{0x5a1, 0x46a, 0x17ab, 0x1cfa, 0x547, 0x1b9c, 0xda5, 0x141e, 0x216, 0x1f49, 0xaca, 0x15a1, 0xfe0, 0x1afb, 0x1a47, 0x133d, 0x1887, 0x590, 0xbc2, 0x1}} #elif RADIX == 32 -{0x191a8b42, 0x7e7d5ea, 0x14bb9c2a, 0x85a83cd, 0x15657d24, 0x16fe0ad0, 0xf748faf, 0xc8621e6, 0x15e11} +{{0x191a8b42, 0x7e7d5ea, 0x14bb9c2a, 0x85a83cd, 0x15657d24, 0x16fe0ad0, 0xf748faf, 0xc8621e6, 0x15e11}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8547e7d5eac8d45a, 0xbe92216a0f369773, 0xe91f5f6fe0ad0ab2, 0x5af08b2188799e} +{{0x8547e7d5eac8d45a, 0xbe92216a0f369773, 0xe91f5f6fe0ad0ab2, 0x5af08b2188799e}} #else -{0x3f3eaf5646a2d, 0x6a0f369773854, 0x15a15657d2442, 0x667ba47d7dbf8, 0x2d784590c43} +{{0x3f3eaf5646a2d, 0x6a0f369773854, 0x15a15657d2442, 0x667ba47d7dbf8, 0x2d784590c43}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1311, 0x910, 0x413, 0x1d16, 0x14f7, 0x19c9, 0x14d3, 0x1504, 0x776, 0x1c2c, 0x15b0, 0xc6e, 0x36b, 0x1777, 0x1ed2, 0xb34, 0x1281, 0x1281, 0xd0f, 0x4} +{{0x1311, 0x910, 0x413, 0x1d16, 0x14f7, 0x19c9, 0x14d3, 0x1504, 0x776, 0x1c2c, 0x15b0, 0xc6e, 0x36b, 0x1777, 0x1ed2, 0xb34, 0x1281, 0x1281, 0xd0f, 0x4}} #elif RADIX == 32 -{0x1a442622, 0x17e8b104, 0x1a79c9a7, 0x1ddaa094, 0xad870b0, 0xe36b637, 0xd3da577, 0x140ca056, 0x4687c} +{{0x1a442622, 0x17e8b104, 0x1a79c9a7, 0x1ddaa094, 0xad870b0, 0xe36b637, 0xd3da577, 0x140ca056, 0x4687c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x34f7e8b104d22131, 0x3858776a82534f39, 0x7b4aeee36b63756c, 0x7343e50328159a} +{{0x34f7e8b104d22131, 0x3858776a82534f39, 0x7b4aeee36b63756c, 0x7343e50328159a}} #else -{0x3f45882691098, 0x6a82534f3934f, 0x6c6ead870b0ee, 0x5669ed2bbb8da, 0x2b9a1f281940} +{{0x3f45882691098, 0x6a82534f3934f, 0x6c6ead870b0ee, 0x5669ed2bbb8da, 0x2b9a1f281940}} #endif #endif , #if 0 #elif RADIX == 16 -{0x12d2, 0x6d8, 0x1e2c, 0x6f9, 0x5e8, 0x4e5, 0x32c, 0x58d, 0x1bda, 0x16f9, 0x8b5, 0x3c0, 0x10c, 0xb18, 0x450, 0x834, 0x3b7, 0x8d7, 0x15bf, 0x0} +{{0x12d2, 0x6d8, 0x1e2c, 0x6f9, 0x5e8, 0x4e5, 0x32c, 0x58d, 0x1bda, 0x16f9, 0x8b5, 0x3c0, 0x10c, 0xb18, 0x450, 0x834, 0x3b7, 0x8d7, 0x15bf, 0x0}} #elif RADIX == 32 -{0x1b625a4, 0x837cf8b, 0x584e52f, 0xf68b1a3, 0x45adbe7, 0x1010c1e0, 0xd08a0b1, 0x6b8edd0, 0xadfa} +{{0x1b625a4, 0x837cf8b, 0x584e52f, 0xf68b1a3, 0x45adbe7, 0x1010c1e0, 0xd08a0b1, 0x6b8edd0, 0xadfa}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa5e837cf8b0db12d, 0x6df3bda2c68cb09c, 0x114163010c1e022d, 0xa56fd1ae3b741a} +{{0xa5e837cf8b0db12d, 0x6df3bda2c68cb09c, 0x114163010c1e022d, 0xa56fd1ae3b741a}} #else -{0x41be7c586d896, 0x22c68cb09ca5e, 0x3c045adbe77b, 0x506845058c043, 0x2d2b7e8d71db} +{{0x41be7c586d896, 0x22c68cb09ca5e, 0x3c045adbe77b, 0x506845058c043, 0x2d2b7e8d71db}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x5f, 0x444, 0x49e, 0xae7, 0x248, 0x1a37, 0x9b6, 0xc28, 0x464, 0x19b7, 0x1560, 0xd7a, 0x2e3, 0x81a, 0x6f5, 0x5f9, 0x1818, 0x164c, 0x1713, 0x7} +{{0x5f, 0x444, 0x49e, 0xae7, 0x248, 0x1a37, 0x9b6, 0xc28, 0x464, 0x19b7, 0x1560, 0xd7a, 0x2e3, 0x81a, 0x6f5, 0x5f9, 0x1818, 0x164c, 0x1713, 0x7}} #elif RADIX == 32 -{0x111100bf, 0x8573927, 0x16da3712, 0x11918509, 0xab066dc, 0x142e36bd, 0x1e4dea81, 0x1266060b, 0x2b89d} +{{0x111100bf, 0x8573927, 0x16da3712, 0x11918509, 0xab066dc, 0x142e36bd, 0x1e4dea81, 0x1266060b, 0x2b89d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe248573927888805, 0x336e46461426db46, 0x9bd50342e36bd558, 0x4edc4ec998182fc} +{{0xe248573927888805, 0x336e46461426db46, 0x9bd50342e36bd558, 0x4edc4ec998182fc}} #else -{0x42b9c93c44402, 0x461426db46e24, 0x6d7aab066dc8c, 0xbf26f540d0b8, 0x4f6e2764cc0c} +{{0x42b9c93c44402, 0x461426db46e24, 0x6d7aab066dc8c, 0xbf26f540d0b8, 0x4f6e2764cc0c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x19b1, 0x1912, 0x1eb, 0x1cbc, 0x210, 0x17cf, 0x1b9e, 0x754, 0x38c, 0x816, 0x1431, 0x79a, 0xa57, 0x15ff, 0x756, 0xa60, 0x1064, 0x162f, 0x1e5e, 0x0} +{{0x19b1, 0x1912, 0x1eb, 0x1cbc, 0x210, 0x17cf, 0x1b9e, 0x754, 0x38c, 0x816, 0x1431, 0x79a, 0xa57, 0x15ff, 0x756, 0xa60, 0x1064, 0x162f, 0x1e5e, 0x0}} #elif RADIX == 32 -{0x1e44b362, 0x10e5e07a, 0x13d7cf10, 0xe30ea9b, 0xa18a058, 0x1ea573cd, 0x180ead5f, 0x117c1914, 0xf2f5} +{{0x1e44b362, 0x10e5e07a, 0x13d7cf10, 0xe30ea9b, 0xa18a058, 0x1ea573cd, 0x180ead5f, 0x117c1914, 0xf2f5}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe210e5e07af2259b, 0x502c38c3aa6e7af9, 0x1d5abfea573cd50c, 0x5797ac5f064530} +{{0xe210e5e07af2259b, 0x502c38c3aa6e7af9, 0x1d5abfea573cd50c, 0x5797ac5f064530}} #else -{0x72f03d7912cd, 0x43aa6e7af9e21, 0x679aa18a05871, 0x14c0756affa95, 0x2abcbd62f832} +{{0x72f03d7912cd, 0x43aa6e7af9e21, 0x679aa18a05871, 0x14c0756affa95, 0x2abcbd62f832}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1432,261 +1432,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0xa72, 0x186f, 0x81c, 0x105c, 0x151, 0x1451, 0x1b96, 0x4d1, 0x12be, 0x3bf, 0x996, 0x1130, 0x1460, 0x1ed8, 0xc2a, 0x1c23, 0x1ee, 0x3f4, 0xbe2, 0x9} +{{0xa72, 0x186f, 0x81c, 0x105c, 0x151, 0x1451, 0x1b96, 0x4d1, 0x12be, 0x3bf, 0x996, 0x1130, 0x1460, 0x1ed8, 0xc2a, 0x1c23, 0x1ee, 0x3f4, 0xbe2, 0x9}} #elif RADIX == 32 -{0x61bd4e5, 0x1182e207, 0x12d4510a, 0xaf89a3b, 0x4cb0efe, 0x11460898, 0x8d855ed, 0x1fa07bb8, 0x45f10} +{{0x61bd4e5, 0x1182e207, 0x12d4510a, 0xaf89a3b, 0x4cb0efe, 0x11460898, 0x8d855ed, 0x1fa07bb8, 0x45f10}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x215182e20730dea7, 0x877f2be268ee5a8a, 0xb0abdb1460898265, 0xeaf887e81eee11} +{{0x215182e20730dea7, 0x877f2be268ee5a8a, 0xb0abdb1460898265, 0xeaf887e81eee11}} #else -{0xc17103986f53, 0x6268ee5a8a215, 0x11304cb0efe57, 0x3846c2af6c518, 0x2f57c43f40f7} +{{0xc17103986f53, 0x6268ee5a8a215, 0x11304cb0efe57, 0x3846c2af6c518, 0x2f57c43f40f7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1c36, 0x61b, 0x207, 0xc17, 0x854, 0x1514, 0xee5, 0x1134, 0x1caf, 0x10ef, 0x265, 0x44c, 0x518, 0x17b6, 0x1b0a, 0x1708, 0x7b, 0x10fd, 0xaf8, 0x3} +{{0x1c36, 0x61b, 0x207, 0xc17, 0x854, 0x1514, 0xee5, 0x1134, 0x1caf, 0x10ef, 0x265, 0x44c, 0x518, 0x17b6, 0x1b0a, 0x1708, 0x7b, 0x10fd, 0xaf8, 0x3}} #elif RADIX == 32 -{0x1986f86c, 0x1460b881, 0x1cb51442, 0x12be268e, 0x132c3bf, 0xc518226, 0x236157b, 0x7e81eee, 0x357c4} +{{0x1986f86c, 0x1460b881, 0x1cb51442, 0x12be268e, 0x132c3bf, 0xc518226, 0x236157b, 0x7e81eee, 0x357c4}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x885460b881cc37c3, 0x61dfcaf89a3b96a2, 0x6c2af6c518226099, 0x1fabe21fa07bb84} +{{0x885460b881cc37c3, 0x61dfcaf89a3b96a2, 0x6c2af6c518226099, 0x1fabe21fa07bb84}} #else -{0x2305c40e61be1, 0x789a3b96a2885, 0x44c132c3bf95, 0x6e11b0abdb146, 0x37d5f10fd03d} +{{0x2305c40e61be1, 0x789a3b96a2885, 0x44c132c3bf95, 0x6e11b0abdb146, 0x37d5f10fd03d}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1c07, 0x15d6, 0x526, 0xde7, 0x149b, 0x719, 0x1786, 0x1272, 0x18b, 0x1bac, 0xf74, 0x1588, 0xe6f, 0x24c, 0x1204, 0x1e9d, 0x13bb, 0x1ccb, 0x78d, 0x9} +{{0x1c07, 0x15d6, 0x526, 0xde7, 0x149b, 0x719, 0x1786, 0x1272, 0x18b, 0x1bac, 0xf74, 0x1588, 0xe6f, 0x24c, 0x1204, 0x1e9d, 0x13bb, 0x1ccb, 0x78d, 0x9}} #elif RADIX == 32 -{0x1575b80f, 0x1b6f3949, 0x10c719a4, 0x62e4e57, 0x7ba6eb0, 0x18e6fac4, 0x7640824, 0x65ceefd, 0x43c6f} +{{0x1575b80f, 0x1b6f3949, 0x10c719a4, 0x62e4e57, 0x7ba6eb0, 0x18e6fac4, 0x7640824, 0x65ceefd, 0x43c6f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x349b6f3949abadc0, 0x375818b9395e18e3, 0xc810498e6fac43dd, 0x279e379973bbf4e} +{{0x349b6f3949abadc0, 0x375818b9395e18e3, 0xc810498e6fac43dd, 0x279e379973bbf4e}} #else -{0x5b79ca4d5d6e0, 0x39395e18e3349, 0x75887ba6eb031, 0x7d3b20412639b, 0x13cf1bccb9dd} +{{0x5b79ca4d5d6e0, 0x39395e18e3349, 0x75887ba6eb031, 0x7d3b20412639b, 0x13cf1bccb9dd}} #endif #endif , #if 0 #elif RADIX == 16 -{0xddf, 0x238, 0xe4b, 0x1958, 0xe6e, 0x1059, 0x133, 0x1e11, 0x5ae, 0x2ab, 0x1044, 0xdd, 0xe9d, 0x1aa8, 0x15e2, 0xc9b, 0xaa6, 0x3c8, 0x10ac, 0x0} +{{0xddf, 0x238, 0xe4b, 0x1958, 0xe6e, 0x1059, 0x133, 0x1e11, 0x5ae, 0x2ab, 0x1044, 0xdd, 0xe9d, 0x1aa8, 0x15e2, 0xc9b, 0xaa6, 0x3c8, 0x10ac, 0x0}} #elif RADIX == 32 -{0x188e1bbe, 0xecac392, 0x6705973, 0x16bbc221, 0x18220aac, 0x10e9d06e, 0x6ebc5aa, 0x1e42a999, 0x8560} +{{0x188e1bbe, 0xecac392, 0x6705973, 0x16bbc221, 0x18220aac, 0x10e9d06e, 0x6ebc5aa, 0x1e42a999, 0x8560}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2e6ecac392c470dd, 0x5565aef0884ce0b, 0xd78b550e9d06ec11, 0x4b42b0790aa664d} +{{0x2e6ecac392c470dd, 0x5565aef0884ce0b, 0xd78b550e9d06ec11, 0x4b42b0790aa664d}} #else -{0x76561c962386e, 0x6f0884ce0b2e6, 0x20dd8220aacb5, 0x19375e2d543a7, 0x4da1583c8553} +{{0x76561c962386e, 0x6f0884ce0b2e6, 0x20dd8220aacb5, 0x19375e2d543a7, 0x4da1583c8553}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x192, 0x1c6d, 0x18a4, 0x152, 0x1aa9, 0xec4, 0x1be8, 0x1209, 0x7f, 0x797, 0x1295, 0x1433, 0x1a75, 0x15a, 0x1d64, 0x146c, 0x12df, 0x10af, 0x188f, 0x1} +{{0x192, 0x1c6d, 0x18a4, 0x152, 0x1aa9, 0xec4, 0x1be8, 0x1209, 0x7f, 0x797, 0x1295, 0x1433, 0x1a75, 0x15a, 0x1d64, 0x146c, 0x12df, 0x10af, 0x188f, 0x1}} #elif RADIX == 32 -{0x71b4324, 0x90a9629, 0x1d0ec4d5, 0x1fe413b, 0x194a9e5c, 0x15a75a19, 0x1b3ac815, 0x57cb7e8, 0x1c47c} +{{0x71b4324, 0x90a9629, 0x1d0ec4d5, 0x1fe413b, 0x194a9e5c, 0x15a75a19, 0x1b3ac815, 0x57cb7e8, 0x1c47c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9aa90a962938da19, 0x4f2e07f904efa1d8, 0x75902b5a75a19ca5, 0xae23e15f2dfa36} +{{0x9aa90a962938da19, 0x4f2e07f904efa1d8, 0x75902b5a75a19ca5, 0xae23e15f2dfa36}} #else -{0x4854b149c6d0c, 0x7904efa1d89aa, 0x343394a9e5c0f, 0x68d9d640ad69d, 0x2d711f0af96f} +{{0x4854b149c6d0c, 0x7904efa1d89aa, 0x343394a9e5c0f, 0x68d9d640ad69d, 0x2d711f0af96f}} #endif #endif , #if 0 #elif RADIX == 16 -{0x129c, 0xe1d, 0x1bd3, 0xf2a, 0x937, 0xf81, 0xa47, 0x186b, 0x1bbe, 0x1c6d, 0x1edd, 0x1b51, 0xa10, 0x167a, 0x1f0b, 0x374, 0x720, 0x1547, 0x726, 0x1} +{{0x129c, 0xe1d, 0x1bd3, 0xf2a, 0x937, 0xf81, 0xa47, 0x186b, 0x1bbe, 0x1c6d, 0x1edd, 0x1b51, 0xa10, 0x167a, 0x1f0b, 0x374, 0x720, 0x1547, 0x726, 0x1}} #elif RADIX == 32 -{0x1b876538, 0x177956f4, 0x8ef8149, 0xefb0d6a, 0x1f6ef1b7, 0x14a10da8, 0x1d3e1767, 0xa39c806, 0x13935} +{{0x1b876538, 0x177956f4, 0x8ef8149, 0xefb0d6a, 0x1f6ef1b7, 0x14a10da8, 0x1d3e1767, 0xa39c806, 0x13935}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x29377956f4dc3b29, 0x78dbbbec35a91df0, 0x7c2ecf4a10da8fb7, 0x3c9c9aa8e7201ba} +{{0x29377956f4dc3b29, 0x78dbbbec35a91df0, 0x7c2ecf4a10da8fb7, 0x3c9c9aa8e7201ba}} #else -{0x3bcab7a6e1d94, 0x6c35a91df0293, 0x1b51f6ef1b777, 0x6e9f0bb3d284, 0x464e4d547390} +{{0x3bcab7a6e1d94, 0x6c35a91df0293, 0x1b51f6ef1b777, 0x6e9f0bb3d284, 0x464e4d547390}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x12cc, 0x495, 0x1a14, 0x1db0, 0xb66, 0x76a, 0x1a77, 0xaf6, 0x1656, 0x1ad7, 0xb35, 0x4b1, 0xffa, 0x37b, 0xabf, 0xa5c, 0xdc9, 0x1a74, 0x11c9, 0x8} +{{0x12cc, 0x495, 0x1a14, 0x1db0, 0xb66, 0x76a, 0x1a77, 0xaf6, 0x1656, 0x1ad7, 0xb35, 0x4b1, 0xffa, 0x37b, 0xabf, 0xa5c, 0xdc9, 0x1a74, 0x11c9, 0x8}} #elif RADIX == 32 -{0x1256599, 0x6ed8685, 0xee76a5b, 0x19595eda, 0x159aeb5e, 0x16ffa258, 0x17157e37, 0x13a37254, 0x38e4e} +{{0x1256599, 0x6ed8685, 0xee76a5b, 0x19595eda, 0x159aeb5e, 0x16ffa258, 0x17157e37, 0x13a37254, 0x38e4e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x4b66ed8685092b2c, 0x75af65657b69dced, 0x2afc6f6ffa258acd, 0x4047274e8dc952e} +{{0x4b66ed8685092b2c, 0x75af65657b69dced, 0x2afc6f6ffa258acd, 0x4047274e8dc952e}} #else -{0x376c342849596, 0x657b69dced4b6, 0x44b159aeb5eca, 0x54b8abf1bdbfe, 0x202393a746e4} +{{0x376c342849596, 0x657b69dced4b6, 0x44b159aeb5eca, 0x54b8abf1bdbfe, 0x202393a746e4}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1379, 0x125e, 0x1c56, 0x1811, 0x144, 0x2a8, 0xbb3, 0x2ca, 0x6d2, 0x565, 0x91e, 0x1280, 0x1b4f, 0x51a, 0x1eb7, 0x35a, 0x14fe, 0x1b59, 0x182e, 0x2} +{{0x1379, 0x125e, 0x1c56, 0x1811, 0x144, 0x2a8, 0xbb3, 0x2ca, 0x6d2, 0x565, 0x91e, 0x1280, 0x1b4f, 0x51a, 0x1eb7, 0x35a, 0x14fe, 0x1b59, 0x182e, 0x2}} #elif RADIX == 32 -{0x1497a6f2, 0x4c08f15, 0x1662a80a, 0x1b48594b, 0x48f1594, 0x15b4f940, 0x16bd6e51, 0x1acd3f86, 0x2c176} +{{0x1497a6f2, 0x4c08f15, 0x1662a80a, 0x1b48594b, 0x48f1594, 0x15b4f940, 0x16bd6e51, 0x1acd3f86, 0x2c176}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x144c08f15a4bd37, 0x8aca6d21652ecc55, 0x7adca35b4f940247, 0x2e60bb6b34fe1ad} +{{0x144c08f15a4bd37, 0x8aca6d21652ecc55, 0x7adca35b4f940247, 0x2e60bb6b34fe1ad}} #else -{0x260478ad25e9b, 0x21652ecc55014, 0x728048f1594da, 0x6b5eb728d6d3, 0x3f305db59a7f} +{{0x260478ad25e9b, 0x21652ecc55014, 0x728048f1594da, 0x6b5eb728d6d3, 0x3f305db59a7f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1908,261 +1908,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x102e, 0x4c4, 0x1586, 0x1184, 0xa12, 0x9ce, 0x1866, 0x433, 0x5ef, 0x82a, 0x13a5, 0xbc6, 0x591, 0x175b, 0x10bc, 0xfa5, 0x109d, 0x8d4, 0x1325, 0x9} +{{0x102e, 0x4c4, 0x1586, 0x1184, 0xa12, 0x9ce, 0x1866, 0x433, 0x5ef, 0x82a, 0x13a5, 0xbc6, 0x591, 0x175b, 0x10bc, 0xfa5, 0x109d, 0x8d4, 0x1325, 0x9}} #elif RADIX == 32 -{0x1131205d, 0x128c2561, 0xcc9ce50, 0x17bc8678, 0x9d2a0a8, 0x165915e3, 0x9617975, 0x6a4275f, 0x4992a} +{{0x1131205d, 0x128c2561, 0xcc9ce50, 0x17bc8678, 0x9d2a0a8, 0x165915e3, 0x9617975, 0x6a4275f, 0x4992a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xca128c2561898902, 0x50545ef219e19939, 0xc2f2eb65915e34e9, 0x4acc951a909d7d2} +{{0xca128c2561898902, 0x50545ef219e19939, 0xc2f2eb65915e34e9, 0x4acc951a909d7d2}} #else -{0x14612b0c4c481, 0x7219e19939ca1, 0x2bc69d2a0a8bd, 0x5f4b0bcbad964, 0x25664a8d484e} +{{0x14612b0c4c481, 0x7219e19939ca1, 0x2bc69d2a0a8bd, 0x5f4b0bcbad964, 0x25664a8d484e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x5a5, 0x1131, 0x561, 0x1461, 0x1284, 0x1273, 0x1e19, 0x190c, 0x117b, 0xa0a, 0x14e9, 0xaf1, 0x1964, 0x5d6, 0xc2f, 0xbe9, 0x427, 0xa35, 0xcc9, 0x3} +{{0x5a5, 0x1131, 0x561, 0x1461, 0x1284, 0x1273, 0x1e19, 0x190c, 0x117b, 0xa0a, 0x14e9, 0xaf1, 0x1964, 0x5d6, 0xc2f, 0xbe9, 0x427, 0xa35, 0xcc9, 0x3}} #elif RADIX == 32 -{0xc4c4b4a, 0x4a30958, 0x3327394, 0x5ef219e, 0x1a74a82a, 0xd964578, 0x1a585e5d, 0x11a909d7, 0x3664a} +{{0xc4c4b4a, 0x4a30958, 0x3327394, 0x5ef219e, 0x1a74a82a, 0xd964578, 0x1a585e5d, 0x11a909d7, 0x3664a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x7284a3095862625a, 0x541517bc8678664e, 0xb0bcbad964578d3a, 0x1ab32546a4275f4} +{{0x7284a3095862625a, 0x541517bc8678664e, 0xb0bcbad964578d3a, 0x1ab32546a4275f4}} #else -{0x25184ac31312d, 0x3c8678664e728, 0xaf1a74a82a2f, 0x57d2c2f2eb659, 0xd5992a35213} +{{0x25184ac31312d, 0x3c8678664e728, 0xaf1a74a82a2f, 0x57d2c2f2eb659, 0xd5992a35213}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1b4a, 0xf6a, 0xadd, 0x302, 0x196b, 0x366, 0x1399, 0xe83, 0x1540, 0xcd, 0x169d, 0x1007, 0xfe6, 0x1fd2, 0xebb, 0x808, 0x1725, 0x1c1e, 0x1009, 0x8} +{{0x1b4a, 0xf6a, 0xadd, 0x302, 0x196b, 0x366, 0x1399, 0xe83, 0x1540, 0xcd, 0x169d, 0x1007, 0xfe6, 0x1fd2, 0xebb, 0x808, 0x1725, 0x1c1e, 0x1009, 0x8}} #elif RADIX == 32 -{0xbdab695, 0xb1812b7, 0x132366cb, 0x1501d073, 0x1b4e8336, 0x4fe6803, 0x21d77fd, 0xf5c950, 0x3804f} +{{0xbdab695, 0xb1812b7, 0x132366cb, 0x1501d073, 0x1b4e8336, 0x4fe6803, 0x21d77fd, 0xf5c950, 0x3804f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd96b1812b75ed5b4, 0x419b540741ce646c, 0x3aeffa4fe6803da7, 0x36402783d725404} +{{0xd96b1812b75ed5b4, 0x419b540741ce646c, 0x3aeffa4fe6803da7, 0x36402783d725404}} #else -{0x58c095baf6ada, 0x741ce646cd96, 0x5007b4e8336a8, 0x5010ebbfe93f9, 0x1b2013c1eb92} +{{0x58c095baf6ada, 0x741ce646cd96, 0x5007b4e8336a8, 0x5010ebbfe93f9, 0x1b2013c1eb92}} #endif #endif , #if 0 #elif RADIX == 16 -{0x122a, 0x94e, 0x1927, 0x1701, 0x58e, 0x79, 0x134e, 0xecc, 0xa0f, 0x7be, 0xc39, 0xfb2, 0x1df0, 0x79a, 0x154a, 0x1a4a, 0x23f, 0x3de, 0x1be1, 0x9} +{{0x122a, 0x94e, 0x1927, 0x1701, 0x58e, 0x79, 0x134e, 0xecc, 0xa0f, 0x7be, 0xc39, 0xfb2, 0x1df0, 0x79a, 0x154a, 0x1a4a, 0x23f, 0x3de, 0x1be1, 0x9}} #elif RADIX == 32 -{0x1a53a455, 0xeb80e49, 0x9c0792c, 0x83dd993, 0x61c9ef9, 0x15df07d9, 0x12aa9479, 0x1ef08ff4, 0x4df08} +{{0x1a53a455, 0xeb80e49, 0x9c0792c, 0x83dd993, 0x61c9ef9, 0x15df07d9, 0x12aa9479, 0x1ef08ff4, 0x4df08}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x258eb80e49d29d22, 0x4f7ca0f7664d380f, 0x5528f35df07d930e, 0x36ef847bc23fd25} +{{0x258eb80e49d29d22, 0x4f7ca0f7664d380f, 0x5528f35df07d930e, 0x36ef847bc23fd25}} #else -{0x75c0724e94e91, 0x77664d380f258, 0xfb261c9ef941, 0x749554a3cd77c, 0x1b77c23de11f} +{{0x75c0724e94e91, 0x77664d380f258, 0xfb261c9ef941, 0x749554a3cd77c, 0x1b77c23de11f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1943, 0x2e1, 0x677, 0x614, 0x19e, 0x11e6, 0xde2, 0x104d, 0x551, 0x1455, 0x1d7e, 0xdd, 0x15e0, 0x14c5, 0xeeb, 0x14b5, 0x168f, 0x1a03, 0xa9d, 0x4} +{{0x1943, 0x2e1, 0x677, 0x614, 0x19e, 0x11e6, 0xde2, 0x104d, 0x551, 0x1455, 0x1d7e, 0xdd, 0x15e0, 0x14c5, 0xeeb, 0x14b5, 0x168f, 0x1a03, 0xa9d, 0x4}} #elif RADIX == 32 -{0x18b87286, 0x1e30a19d, 0x1c51e60c, 0x154609ad, 0x1ebf5154, 0xb5e006e, 0xd5dd74c, 0x101da3e9, 0x454ee} +{{0x18b87286, 0x1e30a19d, 0x1c51e60c, 0x154609ad, 0x1ebf5154, 0xb5e006e, 0xd5dd74c, 0x101da3e9, 0x454ee}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc19e30a19dc5c394, 0xa8aa551826b78a3c, 0xbbae98b5e006ef5f, 0x112a7740768fa5a} +{{0xc19e30a19dc5c394, 0xa8aa551826b78a3c, 0xbbae98b5e006ef5f, 0x112a7740768fa5a}} #else -{0x71850cee2e1ca, 0x1826b78a3cc19, 0xddebf5154aa, 0x696aeeba62d78, 0x8953ba03b47} +{{0x71850cee2e1ca, 0x1826b78a3cc19, 0xddebf5154aa, 0x696aeeba62d78, 0x8953ba03b47}} #endif #endif , #if 0 #elif RADIX == 16 -{0x512, 0xda9, 0x31a, 0x1711, 0x1b65, 0x9f0, 0xe54, 0x1d4a, 0xe1c, 0xc90, 0x1837, 0x1728, 0x15fa, 0xa40, 0xf21, 0x1b43, 0x1716, 0x1277, 0x11a8, 0x9} +{{0x512, 0xda9, 0x31a, 0x1711, 0x1b65, 0x9f0, 0xe54, 0x1d4a, 0xe1c, 0xc90, 0x1837, 0x1728, 0x15fa, 0xa40, 0xf21, 0x1b43, 0x1716, 0x1277, 0x11a8, 0x9}} #elif RADIX == 32 -{0x136a4a25, 0x5b888c6, 0xa89f0db, 0x1873a94e, 0xc1bb241, 0x15fab94, 0x10de42a4, 0x13bdc5b6, 0x48d44} +{{0x136a4a25, 0x5b888c6, 0xa89f0db, 0x1873a94e, 0xc1bb241, 0x15fab94, 0x10de42a4, 0x13bdc5b6, 0x48d44}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1b65b888c69b5251, 0xd920e1cea539513e, 0xbc854815fab9460d, 0xec6a24ef716da1} +{{0x1b65b888c69b5251, 0xd920e1cea539513e, 0xbc854815fab9460d, 0xec6a24ef716da1}} #else -{0x2dc44634da928, 0x4ea539513e1b6, 0x5728c1bb241c3, 0x3686f2152057e, 0x2f6351277b8b} +{{0x2dc44634da928, 0x4ea539513e1b6, 0x5728c1bb241c3, 0x3686f2152057e, 0x2f6351277b8b}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x822, 0x1a13, 0x11d, 0x10e0, 0x2b9, 0x1d20, 0x19f9, 0x1dc2, 0x1770, 0x135e, 0x1c13, 0x1cba, 0x14df, 0x5c8, 0x1f31, 0x215, 0x16ed, 0x1f7a, 0xc6c, 0x5} +{{0x822, 0x1a13, 0x11d, 0x10e0, 0x2b9, 0x1d20, 0x19f9, 0x1dc2, 0x1770, 0x135e, 0x1c13, 0x1cba, 0x14df, 0x5c8, 0x1f31, 0x215, 0x16ed, 0x1f7a, 0xc6c, 0x5}} #elif RADIX == 32 -{0xe84d045, 0x19870047, 0x1f3d2015, 0x1dc3b859, 0xe09cd7a, 0x114dfe5d, 0x57e625c, 0x1bd5bb44, 0x6367} +{{0xe84d045, 0x19870047, 0x1f3d2015, 0x1dc3b859, 0xe09cd7a, 0x114dfe5d, 0x57e625c, 0x1bd5bb44, 0x6367}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2b9870047742682, 0xe6bd770ee167e7a4, 0xfcc4b914dfe5d704, 0xcb1b3ef56ed10a} +{{0x2b9870047742682, 0xe6bd770ee167e7a4, 0xfcc4b914dfe5d704, 0xcb1b3ef56ed10a}} #else -{0x4c38023ba1341, 0xee167e7a402b, 0x7cbae09cd7aee, 0x442bf312e4537, 0x658d9f7ab76} +{{0x4c38023ba1341, 0xee167e7a402b, 0x7cbae09cd7aee, 0x442bf312e4537, 0x658d9f7ab76}} #endif #endif , #if 0 #elif RADIX == 16 -{0x2cc, 0xd50, 0xeda, 0x1c3c, 0x8a6, 0x1659, 0xffb, 0x1cee, 0x1f14, 0x17fe, 0x1860, 0x427, 0x132c, 0x5c0, 0xb9f, 0x143d, 0x639, 0x19f0, 0x1551, 0x7} +{{0x2cc, 0xd50, 0xeda, 0x1c3c, 0x8a6, 0x1659, 0xffb, 0x1cee, 0x1f14, 0x17fe, 0x1860, 0x427, 0x132c, 0x5c0, 0xb9f, 0x143d, 0x639, 0x19f0, 0x1551, 0x7}} #elif RADIX == 32 -{0x13540599, 0x6e1e3b6, 0x1f765945, 0x1c539dcf, 0x1c305ffb, 0x132c213, 0xf573e5c, 0xf818e68, 0x2aa8e} +{{0x13540599, 0x6e1e3b6, 0x1f765945, 0x1c539dcf, 0x1c305ffb, 0x132c213, 0xf573e5c, 0xf818e68, 0x2aa8e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x28a6e1e3b69aa02c, 0x2ffdf14e773feecb, 0xae7cb8132c213e18, 0x3fd5473e0639a1e} +{{0x28a6e1e3b69aa02c, 0x2ffdf14e773feecb, 0xae7cb8132c213e18, 0x3fd5473e0639a1e}} #else -{0x370f1db4d5016, 0x4e773feecb28a, 0x427c305ffbe2, 0x687ab9f2e04cb, 0x1feaa39f031c} +{{0x370f1db4d5016, 0x4e773feecb28a, 0x427c305ffbe2, 0x687ab9f2e04cb, 0x1feaa39f031c}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2384,261 +2384,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x6b9, 0xd4c, 0x1d8d, 0x1f99, 0x1be4, 0x1f53, 0x5c1, 0x1937, 0x9c, 0xe68, 0x61e, 0x14e8, 0x352, 0x3d6, 0x174, 0x133, 0x18a6, 0x1b19, 0x94b, 0x9} +{{0x6b9, 0xd4c, 0x1d8d, 0x1f99, 0x1be4, 0x1f53, 0x5c1, 0x1937, 0x9c, 0xe68, 0x61e, 0x14e8, 0x352, 0x3d6, 0x174, 0x133, 0x18a6, 0x1b19, 0x94b, 0x9}} #elif RADIX == 32 -{0xb530d73, 0x4fccf63, 0x183f53df, 0x27326e5, 0x30f39a0, 0xc352a74, 0xcc2e83d, 0x18ce2982, 0x44a5e} +{{0xb530d73, 0x4fccf63, 0x183f53df, 0x27326e5, 0x30f39a0, 0xc352a74, 0xcc2e83d, 0x18ce2982, 0x44a5e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x7be4fccf635a986b, 0x9cd009cc9b9707ea, 0x85d07ac352a74187, 0x31a52f6338a6099} +{{0x7be4fccf635a986b, 0x9cd009cc9b9707ea, 0x85d07ac352a74187, 0x31a52f6338a6099}} #else -{0x27e67b1ad4c35, 0x4c9b9707ea7be, 0x54e830f39a013, 0x2661741eb0d4, 0x40d297b19c53} +{{0x27e67b1ad4c35, 0x4c9b9707ea7be, 0x54e830f39a013, 0x2661741eb0d4, 0x40d297b19c53}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x348, 0xb53, 0xf63, 0x7e6, 0x1ef9, 0xfd4, 0x1970, 0x64d, 0x27, 0x139a, 0x187, 0x153a, 0x10d4, 0xf5, 0x185d, 0x104c, 0xe29, 0x1ec6, 0x1a52, 0x0} +{{0x348, 0xb53, 0xf63, 0x7e6, 0x1ef9, 0xfd4, 0x1970, 0x64d, 0x27, 0x139a, 0x187, 0x153a, 0x10d4, 0xf5, 0x185d, 0x104c, 0xe29, 0x1ec6, 0x1a52, 0x0}} #elif RADIX == 32 -{0x1ad4c690, 0x193f33d8, 0xe0fd4f7, 0x9cc9b9, 0xc3ce68, 0xb0d4a9d, 0x1330ba0f, 0x16338a60, 0xd297} +{{0x1ad4c690, 0x193f33d8, 0xe0fd4f7, 0x9cc9b9, 0xc3ce68, 0xb0d4a9d, 0x1330ba0f, 0x16338a60, 0xd297}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9ef93f33d8d6a634, 0xe734027326e5c1fa, 0x61741eb0d4a9d061, 0x28694bd8ce29826} +{{0x9ef93f33d8d6a634, 0xe734027326e5c1fa, 0x61741eb0d4a9d061, 0x28694bd8ce29826}} #else -{0x49f99ec6b531a, 0x7326e5c1fa9ef, 0x153a0c3ce6804, 0x609985d07ac35, 0x1434a5ec6714} +{{0x49f99ec6b531a, 0x7326e5c1fa9ef, 0x153a0c3ce6804, 0x609985d07ac35, 0x1434a5ec6714}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x18af, 0xb6e, 0x124d, 0xa49, 0xa8c, 0x11f5, 0xea9, 0x298, 0xa55, 0x1738, 0xb61, 0x2b9, 0x8a, 0x167a, 0x17e6, 0x2b0, 0x1290, 0x16ad, 0x1505, 0x2} +{{0x18af, 0xb6e, 0x124d, 0xa49, 0xa8c, 0x11f5, 0xea9, 0x298, 0xa55, 0x1738, 0xb61, 0x2b9, 0x8a, 0x167a, 0x17e6, 0x2b0, 0x1290, 0x16ad, 0x1505, 0x2}} #elif RADIX == 32 -{0xadbb15e, 0xc524c93, 0x1531f554, 0x954530e, 0x15b0dce1, 0x1408a15c, 0xc2fcd67, 0x156ca405, 0x2a82d} +{{0xadbb15e, 0xc524c93, 0x1531f554, 0x954530e, 0x15b0dce1, 0x1408a15c, 0xc2fcd67, 0x156ca405, 0x2a82d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xaa8c524c9356dd8a, 0x6e70a5514c3aa63e, 0x5f9acf408a15cad8, 0x4c5416d5b290158} +{{0xaa8c524c9356dd8a, 0x6e70a5514c3aa63e, 0x5f9acf408a15cad8, 0x4c5416d5b290158}} #else -{0x6292649ab6ec5, 0x514c3aa63eaa8, 0x42b95b0dce14a, 0x5617e6b3d022, 0x262a0b6ad948} +{{0x6292649ab6ec5, 0x514c3aa63eaa8, 0x42b95b0dce14a, 0x5617e6b3d022, 0x262a0b6ad948}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1390, 0x1895, 0x9b7, 0xa5a, 0x1030, 0x16c1, 0xd21, 0x1053, 0x327, 0x1a4c, 0x1a22, 0x11e4, 0x16ba, 0x13a1, 0x1dbc, 0x1aac, 0x148c, 0x5c8, 0x15d2, 0x0} +{{0x1390, 0x1895, 0x9b7, 0xa5a, 0x1030, 0x16c1, 0xd21, 0x1053, 0x327, 0x1a4c, 0x1a22, 0x11e4, 0x16ba, 0x13a1, 0x1dbc, 0x1aac, 0x148c, 0x5c8, 0x15d2, 0x0}} #elif RADIX == 32 -{0x1e256720, 0x1052d26d, 0x436c181, 0xc9e0a6d, 0xd116930, 0x36ba8f2, 0xb3b793a, 0xe452335, 0xae91} +{{0x1e256720, 0x1052d26d, 0x436c181, 0xc9e0a6d, 0xd116930, 0x36ba8f2, 0xb3b793a, 0xe452335, 0xae91}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x303052d26df12b39, 0xb498327829b486d8, 0x76f27436ba8f2688, 0x5748b9148cd56} +{{0x303052d26df12b39, 0xb498327829b486d8, 0x76f27436ba8f2688, 0x5748b9148cd56}} #else -{0x296936f8959c, 0x7829b486d8303, 0x51e4d11693064, 0x3559dbc9d0dae, 0x282ba45c8a46} +{{0x296936f8959c, 0x7829b486d8303, 0x51e4d11693064, 0x3559dbc9d0dae, 0x282ba45c8a46}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1be6, 0x11b3, 0x14ba, 0xf43, 0x1bd1, 0x215, 0x1e9a, 0x137a, 0x7b2, 0x15, 0x126, 0x148, 0x1c2b, 0x1b70, 0xf1c, 0x1e48, 0x1259, 0x188a, 0x1e44, 0x7} +{{0x1be6, 0x11b3, 0x14ba, 0xf43, 0x1bd1, 0x215, 0x1e9a, 0x137a, 0x7b2, 0x15, 0x126, 0x148, 0x1c2b, 0x1b70, 0xf1c, 0x1e48, 0x1259, 0x188a, 0x1e44, 0x7}} #elif RADIX == 32 -{0x146cf7cd, 0x117a1d2e, 0x134215de, 0x1eca6f5e, 0x930054, 0x1c2b0a4, 0x121e39b7, 0x454967c, 0x2f226} +{{0x146cf7cd, 0x117a1d2e, 0x134215de, 0x1eca6f5e, 0x930054, 0x1c2b0a4, 0x121e39b7, 0x454967c, 0x2f226}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbbd17a1d2ea367be, 0x802a7b29bd7a6842, 0x3c736e1c2b0a4049, 0x21f913115259f24} +{{0xbbd17a1d2ea367be, 0x802a7b29bd7a6842, 0x3c736e1c2b0a4049, 0x21f913115259f24}} #else -{0xbd0e9751b3df, 0x29bd7a6842bbd, 0x61480930054f6, 0x7c90f1cdb870a, 0x10fc8988a92c} +{{0xbd0e9751b3df, 0x29bd7a6842bbd, 0x61480930054f6, 0x7c90f1cdb870a, 0x10fc8988a92c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x4c5, 0x37e, 0xafa, 0x1b90, 0x13d, 0x8d3, 0xaa7, 0x489, 0x1d4a, 0x17bc, 0x168, 0x37f, 0x1ed6, 0x666, 0x1889, 0x1a4e, 0xa57, 0xeb7, 0xd37, 0x7} +{{0x4c5, 0x37e, 0xafa, 0x1b90, 0x13d, 0x8d3, 0xaa7, 0x489, 0x1d4a, 0x17bc, 0x168, 0x37f, 0x1ed6, 0x666, 0x1889, 0x1a4e, 0xa57, 0xeb7, 0xd37, 0x7}} #elif RADIX == 32 -{0x10df898b, 0x1ddc82be, 0x14e8d309, 0x1528912a, 0x10b45ef3, 0xded61bf, 0x13b11266, 0x15ba95f4, 0x269bb} +{{0x10df898b, 0x1ddc82be, 0x14e8d309, 0x1528912a, 0x10b45ef3, 0xded61bf, 0x13b11266, 0x15ba95f4, 0x269bb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x613ddc82be86fc4c, 0x2f79d4a244aa9d1a, 0x6224ccded61bf85a, 0x1cb4ddd6ea57d27} +{{0x613ddc82be86fc4c, 0x2f79d4a244aa9d1a, 0x6224ccded61bf85a, 0x1cb4ddd6ea57d27}} #else -{0x6ee415f437e26, 0x2244aa9d1a613, 0x437f0b45ef3a9, 0x749d8893337b5, 0xe5a6eeb752b} +{{0x6ee415f437e26, 0x2244aa9d1a613, 0x437f0b45ef3a9, 0x749d8893337b5, 0xe5a6eeb752b}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x447, 0x1b87, 0x1cf0, 0x155, 0xb1, 0x804, 0x97a, 0x64a, 0x886, 0x3a3, 0x126f, 0x1553, 0x74d, 0xde9, 0x941, 0x39c, 0x8f, 0x1bbb, 0xf3, 0x1} +{{0x447, 0x1b87, 0x1cf0, 0x155, 0xb1, 0x804, 0x97a, 0x64a, 0x886, 0x3a3, 0x126f, 0x1553, 0x74d, 0xde9, 0x941, 0x39c, 0x8f, 0x1bbb, 0xf3, 0x1}} #elif RADIX == 32 -{0x6e1c88e, 0x110aaf3c, 0xf480405, 0x218c949, 0x19378e8d, 0x1274daa9, 0x71282de, 0x1dd823c7, 0x1079e} +{{0x6e1c88e, 0x110aaf3c, 0xf480405, 0x218c949, 0x19378e8d, 0x1274daa9, 0x71282de, 0x1dd823c7, 0x1079e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x80b10aaf3c370e44, 0xc74688632525e900, 0x2505bd274daa9c9b, 0x2383cf77608f1ce} +{{0x80b10aaf3c370e44, 0xc74688632525e900, 0x2505bd274daa9c9b, 0x2383cf77608f1ce}} #else -{0x85579e1b8722, 0x632525e90080b, 0x35539378e8d10, 0x47389416f49d3, 0x11c1e7bbb047} +{{0x85579e1b8722, 0x632525e90080b, 0x35539378e8d10, 0x47389416f49d3, 0x11c1e7bbb047}} #endif #endif , #if 0 #elif RADIX == 16 -{0xf9d, 0x552, 0x797, 0x19fc, 0x166, 0x7a8, 0x1ee5, 0xc77, 0x1ee7, 0x15ef, 0x340, 0x10df, 0x1d5f, 0x170, 0xf2, 0x123, 0x1bb1, 0xd23, 0x3fc, 0x6} +{{0xf9d, 0x552, 0x797, 0x19fc, 0x166, 0x7a8, 0x1ee5, 0xc77, 0x1ee7, 0x15ef, 0x340, 0x10df, 0x1d5f, 0x170, 0xf2, 0x123, 0x1bb1, 0xd23, 0x3fc, 0x6}} #elif RADIX == 32 -{0x19549f3b, 0x6cfe1e5, 0x1ca7a80b, 0x1b9d8efe, 0x11a057bf, 0x1d5f86f, 0x8c1e417, 0x91eec42, 0x11fe3} +{{0x19549f3b, 0x6cfe1e5, 0x1ca7a80b, 0x1b9d8efe, 0x11a057bf, 0x1d5f86f, 0x8c1e417, 0x91eec42, 0x11fe3}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x166cfe1e5caa4f9, 0x2bdfee763bfb94f5, 0x83c82e1d5f86f8d0, 0x440ff1a47bb1091} +{{0x166cfe1e5caa4f9, 0x2bdfee763bfb94f5, 0x83c82e1d5f86f8d0, 0x440ff1a47bb1091}} #else -{0x367f0f2e5527c, 0x763bfb94f5016, 0x70df1a057bfdc, 0x42460f20b8757, 0x4a07f8d23dd8} +{{0x367f0f2e5527c, 0x763bfb94f5016, 0x70df1a057bfdc, 0x42460f20b8757, 0x4a07f8d23dd8}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2860,261 +2860,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x1068, 0xf2c, 0x1ada, 0x1f58, 0x1343, 0x5cc, 0x90, 0x1bba, 0x50b, 0x1a35, 0x35f, 0x1388, 0x5ce, 0xc4f, 0x1462, 0x191f, 0x665, 0x843, 0x1cb1, 0x1} +{{0x1068, 0xf2c, 0x1ada, 0x1f58, 0x1343, 0x5cc, 0x90, 0x1bba, 0x50b, 0x1a35, 0x35f, 0x1388, 0x5ce, 0xc4f, 0x1462, 0x191f, 0x665, 0x843, 0x1cb1, 0x1}} #elif RADIX == 32 -{0x13cb20d0, 0x3fac6b6, 0x1205cc9a, 0x142f7740, 0x1afe8d4, 0x1e5ce9c4, 0x7e8c4c4, 0x2199972, 0x1e58a} +{{0x13cb20d0, 0x3fac6b6, 0x1205cc9a, 0x142f7740, 0x1afe8d4, 0x1e5ce9c4, 0x7e8c4c4, 0x2199972, 0x1e58a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9343fac6b69e5906, 0xf46a50bddd0240b9, 0xd18989e5ce9c40d7, 0x28f2c5086665c8f} +{{0x9343fac6b69e5906, 0xf46a50bddd0240b9, 0xd18989e5ce9c40d7, 0x28f2c5086665c8f}} #else -{0x1fd635b4f2c83, 0x3ddd0240b9934, 0x53881afe8d4a1, 0x723f462627973, 0x147962843332} +{{0x1fd635b4f2c83, 0x3ddd0240b9934, 0x53881afe8d4a1, 0x723f462627973, 0x147962843332}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x5b3, 0x13cb, 0x6b6, 0x1fd6, 0x4d0, 0x173, 0x1024, 0x1eee, 0x942, 0x1e8d, 0xd7, 0x14e2, 0x1973, 0x1313, 0x1d18, 0xe47, 0x1999, 0xa10, 0xf2c, 0x6} +{{0x5b3, 0x13cb, 0x6b6, 0x1fd6, 0x4d0, 0x173, 0x1024, 0x1eee, 0x942, 0x1e8d, 0xd7, 0x14e2, 0x1973, 0x1313, 0x1d18, 0xe47, 0x1999, 0xa10, 0xf2c, 0x6}} #elif RADIX == 32 -{0x14f2cb67, 0x10feb1ad, 0x4817326, 0x50bddd0, 0x6bfa35, 0x7973a71, 0x11fa3131, 0x1086665c, 0x17962} +{{0x14f2cb67, 0x10feb1ad, 0x4817326, 0x50bddd0, 0x6bfa35, 0x7973a71, 0x11fa3131, 0x1086665c, 0x17962}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x64d0feb1ada7965b, 0xfd1a942f7740902e, 0xf462627973a71035, 0x123cb1421999723} +{{0x64d0feb1ada7965b, 0xfd1a942f7740902e, 0xf462627973a71035, 0x123cb1421999723}} #else -{0x7f58d6d3cb2d, 0x2f7740902e64d, 0x74e206bfa3528, 0x5c8fd18989e5c, 0x311e58a10ccc} +{{0x7f58d6d3cb2d, 0x2f7740902e64d, 0x74e206bfa3528, 0x5c8fd18989e5c, 0x311e58a10ccc}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x14ba, 0xa50, 0x219, 0x1ca8, 0x1858, 0xe67, 0x1b19, 0xb09, 0x17fa, 0x89f, 0x10d7, 0x1a55, 0x14de, 0x1f37, 0x12f0, 0x1247, 0x1aa6, 0x109f, 0x493, 0x6} +{{0x14ba, 0xa50, 0x219, 0x1ca8, 0x1858, 0xe67, 0x1b19, 0xb09, 0x17fa, 0x89f, 0x10d7, 0x1a55, 0x14de, 0x1f37, 0x12f0, 0x1247, 0x1aa6, 0x109f, 0x493, 0x6}} #elif RADIX == 32 -{0xa942975, 0x18e54086, 0x32e67c2, 0x1fe9613b, 0x186ba27e, 0xf4ded2a, 0x11e5e1f3, 0x4fea9a4, 0x1249c} +{{0xa942975, 0x18e54086, 0x32e67c2, 0x1fe9613b, 0x186ba27e, 0xf4ded2a, 0x11e5e1f3, 0x4fea9a4, 0x1249c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf858e5408654a14b, 0xd13f7fa584ec65cc, 0xcbc3e6f4ded2ac35, 0x35124e13faa6923} +{{0xf858e5408654a14b, 0xd13f7fa584ec65cc, 0xcbc3e6f4ded2ac35, 0x35124e13faa6923}} #else -{0x472a0432a50a5, 0x2584ec65ccf85, 0x5a5586ba27eff, 0x248f2f0f9bd37, 0x42892709fd53} +{{0x472a0432a50a5, 0x2584ec65ccf85, 0x5a5586ba27eff, 0x248f2f0f9bd37, 0x42892709fd53}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1ba, 0xab8, 0x1ded, 0xdc9, 0xf40, 0xaa3, 0x169, 0x53c, 0x2, 0x848, 0x9a6, 0xbad, 0xb7e, 0x15dc, 0x87, 0x1cf3, 0x1791, 0x1af2, 0x1cdf, 0x7} +{{0x1ba, 0xab8, 0x1ded, 0xdc9, 0xf40, 0xaa3, 0x169, 0x53c, 0x2, 0x848, 0x9a6, 0xbad, 0xb7e, 0x15dc, 0x87, 0x1cf3, 0x1791, 0x1af2, 0x1cdf, 0x7}} #elif RADIX == 32 -{0xaae0375, 0x6e4f7b, 0xd2aa37a, 0x8a781, 0x14d32120, 0x18b7e5d6, 0x1cc10f5d, 0x1795e479, 0x2e6fe} +{{0xaae0375, 0x6e4f7b, 0xd2aa37a, 0x8a781, 0x14d32120, 0x18b7e5d6, 0x1cc10f5d, 0x1795e479, 0x2e6fe}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6f406e4f7b55701b, 0x909000229e05a554, 0x821ebb8b7e5d6a69, 0x35f37f5e5791e79} +{{0x6f406e4f7b55701b, 0x909000229e05a554, 0x821ebb8b7e5d6a69, 0x35f37f5e5791e79}} #else -{0x3727bdaab80d, 0x229e05a5546f4, 0x4bad4d3212000, 0x79e6087aee2df, 0x42f9bfaf2bc8} +{{0x3727bdaab80d, 0x229e05a5546f4, 0x4bad4d3212000, 0x79e6087aee2df, 0x42f9bfaf2bc8}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x5b, 0xad0, 0x69, 0x1038, 0x18d2, 0x180d, 0x1871, 0x46b, 0x26b, 0x1ef2, 0xe46, 0x72d, 0xc0d, 0x15a4, 0x6d7, 0x221, 0x1611, 0x1a89, 0xd3f, 0x8} +{{0x5b, 0xad0, 0x69, 0x1038, 0x18d2, 0x180d, 0x1871, 0x46b, 0x26b, 0x1ef2, 0xe46, 0x72d, 0xc0d, 0x15a4, 0x6d7, 0x221, 0x1611, 0x1a89, 0xd3f, 0x8}} #elif RADIX == 32 -{0xab400b7, 0x1281c01a, 0xe380dc6, 0x9ac8d78, 0x17237bc8, 0x8c0d396, 0x84daf5a, 0x144d8444, 0x369fe} +{{0xab400b7, 0x1281c01a, 0xe380dc6, 0x9ac8d78, 0x17237bc8, 0x8c0d396, 0x84daf5a, 0x144d8444, 0x369fe}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xb8d281c01a55a005, 0xbde426b235e1c701, 0x9b5eb48c0d396b91, 0x3b34ff513611110} +{{0xb8d281c01a55a005, 0xbde426b235e1c701, 0x9b5eb48c0d396b91, 0x3b34ff513611110}} #else -{0x140e00d2ad002, 0x3235e1c701b8d, 0x272d7237bc84d, 0x44426d7ad2303, 0x459a7fa89b08} +{{0x140e00d2ad002, 0x3235e1c701b8d, 0x272d7237bc84d, 0x44426d7ad2303, 0x459a7fa89b08}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1131, 0xac7, 0xa16, 0x918, 0x5d8, 0x1e64, 0x3e5, 0x142c, 0x1f89, 0x1cb7, 0xf96, 0x370, 0x4da, 0xf45, 0x1aa5, 0x1872, 0x1fc, 0xd83, 0x1145, 0x6} +{{0x1131, 0xac7, 0xa16, 0x918, 0x5d8, 0x1e64, 0x3e5, 0x142c, 0x1f89, 0x1cb7, 0xf96, 0x370, 0x4da, 0xf45, 0x1aa5, 0x1872, 0x1fc, 0xd83, 0x1145, 0x6}} #elif RADIX == 32 -{0x12b1e263, 0x1848c285, 0x1cbe642e, 0x1e268583, 0x7cb72df, 0xa4da1b8, 0x1cb54af4, 0xc187f30, 0x18a2b} +{{0x12b1e263, 0x1848c285, 0x1cbe642e, 0x1e268583, 0x7cb72df, 0xa4da1b8, 0x1cb54af4, 0xc187f30, 0x18a2b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x85d848c285958f13, 0xb96ff89a160f97cc, 0x6a95e8a4da1b83e5, 0x84515b061fcc39} +{{0x85d848c285958f13, 0xb96ff89a160f97cc, 0x6a95e8a4da1b83e5, 0x84515b061fcc39}} #else -{0x4246142cac789, 0x1a160f97cc85d, 0x43707cb72dff1, 0x30e5aa57a2936, 0x2c228ad830fe} +{{0x4246142cac789, 0x1a160f97cc85d, 0x43707cb72dff1, 0x30e5aa57a2936, 0x2c228ad830fe}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x7a4, 0x388, 0xd00, 0x66c, 0x1a9a, 0xabc, 0x97b, 0xadc, 0xaab, 0x1601, 0x287, 0xb2a, 0x1ab7, 0x1803, 0x1d06, 0x81c, 0x890, 0x11e0, 0x1e19, 0x0} +{{0x7a4, 0x388, 0xd00, 0x66c, 0x1a9a, 0xabc, 0x97b, 0xadc, 0xaab, 0x1601, 0x287, 0xb2a, 0x1ab7, 0x1803, 0x1d06, 0x81c, 0x890, 0x11e0, 0x1e19, 0x0}} #elif RADIX == 32 -{0xe20f48, 0x1a336340, 0xf6abcd4, 0xaad5b89, 0x143d805, 0x7ab7595, 0x73a0d80, 0xf022410, 0xf0cc} +{{0xe20f48, 0x1a336340, 0xf6abcd4, 0xaad5b89, 0x143d805, 0x7ab7595, 0x73a0d80, 0xf022410, 0xf0cc}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9a9a33634007107a, 0xec02aab56e25ed57, 0x741b007ab75950a1, 0x1478663c089040e} +{{0x9a9a33634007107a, 0xec02aab56e25ed57, 0x741b007ab75950a1, 0x1478663c089040e}} #else -{0x519b1a003883d, 0x356e25ed579a9, 0x6b2a143d80555, 0x1039d06c01ead, 0xa3c331e0448} +{{0x519b1a003883d, 0x356e25ed579a9, 0x6b2a143d80555, 0x1039d06c01ead, 0xa3c331e0448}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1e68, 0xcde, 0x29, 0x1777, 0x1ef8, 0x1a1c, 0x204, 0x148, 0x14ba, 0x1c39, 0x175, 0x1263, 0x4de, 0x1032, 0x1649, 0x5a4, 0xad, 0xcfb, 0x870, 0x3} +{{0x1e68, 0xcde, 0x29, 0x1777, 0x1ef8, 0x1a1c, 0x204, 0x148, 0x14ba, 0x1c39, 0x175, 0x1263, 0x4de, 0x1032, 0x1649, 0x5a4, 0xad, 0xcfb, 0x870, 0x3}} #elif RADIX == 32 -{0xb37bcd0, 0x18bbb80a, 0x9a1cf7, 0x12e82902, 0x10baf0e6, 0x44de931, 0x92c9303, 0x7d82b4b, 0x34383} +{{0xb37bcd0, 0x18bbb80a, 0x9a1cf7, 0x12e82902, 0x10baf0e6, 0x44de931, 0x92c9303, 0x7d82b4b, 0x34383}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9ef8bbb80a59bde6, 0x78734ba0a4081343, 0x59260644de93185d, 0x29a1c19f60ad2d2} +{{0x9ef8bbb80a59bde6, 0x78734ba0a4081343, 0x59260644de93185d, 0x29a1c19f60ad2d2}} #else -{0x45ddc052cdef3, 0x20a40813439ef, 0x52630baf0e697, 0x4b49649819137, 0x14d0e0cfb056} +{{0x45ddc052cdef3, 0x20a40813439ef, 0x52630baf0e697, 0x4b49649819137, 0x14d0e0cfb056}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c index aad3ce9edd..33363fda99 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_32.c @@ -1,11 +1,11 @@ // clang-format off // Command line : python monty.py 32 // 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -#ifdef RADIX_32 - #include #include +#ifdef RADIX_32 + #define sspint int32_t #define spint uint32_t #define udpint uint64_t @@ -544,22 +544,6 @@ static int modqr(const spint *h, const spint *x) { return modis1(r) | modis0(x); } -// conditional move g to f if d=1 -// strongly recommend inlining be disabled using compiler specific syntax -static void modcmv(int b, const spint *g, volatile spint *f) { - int i; - spint c0, c1, s, t; - spint r = 0x5aa5a55au; - c0 = (1 - b) + r; - c1 = b + r; - for (i = 0; i < 9; i++) { - s = g[i]; - t = f[i]; - f[i] = c0 * t + c1 * s; - f[i] -= r * (t + s); - } -} - // conditional swap g and f if d=1 // strongly recommend inlining be disabled using compiler specific syntax static void modcsw(int b, volatile spint *g, volatile spint *f) { @@ -613,52 +597,6 @@ static int modshr(unsigned int n, spint *a) { return r; } -// set a= 2^r -static void mod2r(unsigned int r, spint *a) { - unsigned int n = r / 29u; - unsigned int m = r % 29u; - modzer(a); - if (r >= 32 * 8) - return; - a[n] = 1; - a[n] <<= m; - nres(a, a); -} - -// export to byte array -static void modexp(const spint *a, char *b) { - int i; - spint c[9]; - redc(a, c); - for (i = 31; i >= 0; i--) { - b[i] = c[0] & (spint)0xff; - (void)modshr(8, c); - } -} - -// import from byte array -// returns 1 if in range, else 0 -static int modimp(const char *b, spint *a) { - int i, res; - for (i = 0; i < 9; i++) { - a[i] = 0; - } - for (i = 0; i < 32; i++) { - modshl(8, a); - a[0] += (spint)(unsigned char)b[i]; - } - res = modfsb(a); - nres(a, a); - return res; -} - -// determine sign -static int modsign(const spint *a) { - spint c[9]; - redc(a, c); - return c[0] % 2; -} - // return true if equal static int modcmp(const spint *a, const spint *b) { spint c[9], d[9]; @@ -942,4 +880,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif /* RADIX_32 */ \ No newline at end of file +#endif /* RADIX_32 */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c index 00cc61ec13..69abee0d48 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/fp_p5248_64.c @@ -1,11 +1,11 @@ // clang-format off // Command line : python monty.py 64 // 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -#ifdef RADIX_64 - #include #include +#ifdef RADIX_64 + #define sspint int64_t #define spint uint64_t #define udpint __uint128_t @@ -389,22 +389,6 @@ static int modqr(const spint *h, const spint *x) { return modis1(r) | modis0(x); } -// conditional move g to f if d=1 -// strongly recommend inlining be disabled using compiler specific syntax -static void modcmv(int b, const spint *g, volatile spint *f) { - int i; - spint c0, c1, s, t; - spint r = 0x3cc3c33c5aa5a55au; - c0 = (1 - b) + r; - c1 = b + r; - for (i = 0; i < 5; i++) { - s = g[i]; - t = f[i]; - f[i] = c0 * t + c1 * s; - f[i] -= r * (t + s); - } -} - // conditional swap g and f if d=1 // strongly recommend inlining be disabled using compiler specific syntax static void modcsw(int b, volatile spint *g, volatile spint *f) { @@ -458,52 +442,6 @@ static int modshr(unsigned int n, spint *a) { return r; } -// set a= 2^r -static void mod2r(unsigned int r, spint *a) { - unsigned int n = r / 51u; - unsigned int m = r % 51u; - modzer(a); - if (r >= 32 * 8) - return; - a[n] = 1; - a[n] <<= m; - nres(a, a); -} - -// export to byte array -static void modexp(const spint *a, char *b) { - int i; - spint c[5]; - redc(a, c); - for (i = 31; i >= 0; i--) { - b[i] = c[0] & (spint)0xff; - (void)modshr(8, c); - } -} - -// import from byte array -// returns 1 if in range, else 0 -static int modimp(const char *b, spint *a) { - int i, res; - for (i = 0; i < 5; i++) { - a[i] = 0; - } - for (i = 0; i < 32; i++) { - modshl(8, a); - a[0] += (spint)(unsigned char)b[i]; - } - res = modfsb(a); - nres(a, a); - return res; -} - -// determine sign -static int modsign(const spint *a) { - spint c[5]; - redc(a, c); - return c[0] % 2; -} - // return true if equal static int modcmp(const spint *a, const spint *b) { spint c[5], d[5]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.h index 2b16e23834..616504c7b1 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd.h @@ -415,7 +415,7 @@ void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B * @param t: an integer * @returns 0xFFFFFFFF on success, 0 on failure */ -static int +static inline int test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) { int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c index 6332d21f8e..14482e01cd 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/hd_splitting_transforms.c @@ -11,131 +11,131 @@ const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1 const fp2_t FP2_CONSTANTS[5] = {{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0x333, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000} +{{0x666, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x33, 0x0, 0x0, 0x100000000000000} +{{0x33, 0x0, 0x0, 0x100000000000000}} #else -{0x19, 0x0, 0x0, 0x0, 0x300000000000} +{{0x19, 0x0, 0x0, 0x0, 0x300000000000}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7} +{{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7}} #elif RADIX == 32 -{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff} +{{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +{{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff}} #else -{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff} +{{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7} +{{0x1ccc, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x7}} #elif RADIX == 32 -{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff} +{{0x1ffff999, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x2ffff}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +{{0xffffffffffffffcc, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff}} #else -{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff} +{{0x7ffffffffffe6, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x1fffffffffff}} #endif #endif }}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c index ea32213c75..0fed774a04 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/l2.c @@ -24,8 +24,8 @@ copy(fp_num *x, fp_num *r) static void normalize(fp_num *x) { - if (x->s == 0.0 || isfinite(x->s) == 0) { - if (x->s == 0.0) { + if (fpclassify(x->s) == FP_ZERO || isfinite(x->s) == 0) { + if (fpclassify(x->s) == FP_ZERO) { x->e = INT_MIN; } } else { @@ -49,13 +49,6 @@ to_deltabar(fp_num *x) x->e = 0; } -static void -to_etabar(fp_num *x) -{ - x->s = ETABAR; - x->e = 0; -} - static void from_mpz(const ibz_t *x, fp_num *r) { diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_internals.h index e8d90141ac..2b76857205 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_internals.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/lll_internals.h @@ -43,13 +43,19 @@ /** @brief Type for fractions of integers * - * @typedef ibq_t +* @typedef ibq_t * * For fractions of integers of arbitrary size, used by intbig module, using gmp */ -typedef ibz_t ibq_t[2]; -typedef ibq_t ibq_vec_4_t[4]; -typedef ibq_t ibq_mat_4x4_t[4][4]; +typedef struct { + ibz_t q[2]; +} ibq_t; +typedef struct { + ibq_t v[4]; +} ibq_vec_4_t; +typedef struct { + ibq_vec_4_t m[4]; +} ibq_mat_4x4_t; /**@} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.c index 27f4a963db..13714eee4a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/mp.c @@ -2,6 +2,7 @@ #include #include #include +#include // double-wide multiplication void @@ -17,7 +18,7 @@ MUL(digit_t *out, const digit_t a, const digit_t b) out[0] = _umul128(a, b, &umul_hi); out[1] = umul_hi; -#elif defined(RADIX_64) && defined(HAVE_UINT128) +#elif defined(RADIX_64) && (defined(HAVE_UINT128) || defined(__SIZEOF_INT128__) || defined(__int128)) && !defined(C_PEDANTIC_MODE) unsigned __int128 umul_tmp; umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); out[0] = (uint64_t)umul_tmp; @@ -277,6 +278,7 @@ mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) assert((a[0] & 1) == 1); digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + memset(x, 0, sizeof(x)); mp_copy(aa, a, nwords); mp_one[0] = 1; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rationals.c index 0c5387e5e8..25f8519b3f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rationals.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rationals.c @@ -1,20 +1,20 @@ -#include + #include #include "internal.h" #include "lll_internals.h" void ibq_init(ibq_t *x) { - ibz_init(&((*x)[0])); - ibz_init(&((*x)[1])); - ibz_set(&((*x)[1]), 1); + ibz_init(&(x->q[0])); + ibz_init(&(x->q[1])); + ibz_set(&(x->q[1]), 1); } void ibq_finalize(ibq_t *x) { - ibz_finalize(&((*x)[0])); - ibz_finalize(&((*x)[1])); + ibz_finalize(&(x->q[0])); + ibz_finalize(&(x->q[1])); } void @@ -22,7 +22,7 @@ ibq_mat_4x4_init(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_init(&(*mat)[i][j]); + ibq_init(&mat->m[i].v[j]); } } } @@ -31,7 +31,7 @@ ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_finalize(&(*mat)[i][j]); + ibq_finalize(&mat->m[i].v[j]); } } } @@ -40,14 +40,14 @@ void ibq_vec_4_init(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_init(&(*vec)[i]); + ibq_init(&vec->v[i]); } } void ibq_vec_4_finalize(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_finalize(&(*vec)[i]); + ibq_finalize(&vec->v[i]); } } @@ -57,9 +57,9 @@ ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j][0]), 10); + ibz_print(&(mat->m[i].v[j].q[0]), 10); printf("/"); - ibz_print(&((*mat)[i][j][1]), 10); + ibz_print(&(mat->m[i].v[j].q[1]), 10); printf(" "); } printf("\n "); @@ -72,9 +72,9 @@ ibq_vec_4_print(const ibq_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i][0]), 10); + ibz_print(&(vec->v[i].q[0]), 10); printf("/"); - ibz_print(&((*vec)[i][1]), 10); + ibz_print(&(vec->v[i].q[1]), 10); printf(" "); } printf("\n\n"); @@ -86,10 +86,10 @@ ibq_reduce(ibq_t *x) ibz_t gcd, r; ibz_init(&gcd); ibz_init(&r); - ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); - ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + ibz_gcd(&gcd, &(x->q[0]), &(x->q[1])); + ibz_div(&(x->q[0]), &r, &(x->q[0]), &gcd); assert(ibz_is_zero(&r)); - ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + ibz_div(&(x->q[1]), &r, &(x->q[1]), &gcd); assert(ibz_is_zero(&r)); ibz_finalize(&gcd); ibz_finalize(&r); @@ -102,10 +102,10 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) ibz_init(&add); ibz_init(&prod); - ibz_mul(&add, &((*a)[0]), &((*b)[1])); - ibz_mul(&prod, &((*b)[0]), &((*a)[1])); - ibz_add(&((*sum)[0]), &add, &prod); - ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&add, &(a->q[0]), &(b->q[1])); + ibz_mul(&prod, &(b->q[0]), &(a->q[1])); + ibz_add(&(sum->q[0]), &add, &prod); + ibz_mul(&(sum->q[1]), &(a->q[1]), &(b->q[1])); ibz_finalize(&add); ibz_finalize(&prod); } @@ -113,8 +113,8 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) void ibq_neg(ibq_t *neg, const ibq_t *x) { - ibz_copy(&((*neg)[1]), &((*x)[1])); - ibz_neg(&((*neg)[0]), &((*x)[0])); + ibz_copy(&(neg->q[1]), &(x->q[1])); + ibz_neg(&(neg->q[0]), &(x->q[0])); } void @@ -143,8 +143,8 @@ ibq_abs(ibq_t *abs, const ibq_t *x) // once void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) { - ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); - ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&(prod->q[0]), &(a->q[0]), &(b->q[0])); + ibz_mul(&(prod->q[1]), &(a->q[1]), &(b->q[1])); } int @@ -152,9 +152,9 @@ ibq_inv(ibq_t *inv, const ibq_t *x) { int res = !ibq_is_zero(x); if (res) { - ibz_copy(&((*inv)[0]), &((*x)[0])); - ibz_copy(&((*inv)[1]), &((*x)[1])); - ibz_swap(&((*inv)[1]), &((*inv)[0])); + ibz_copy(&(inv->q[0]), &(x->q[0])); + ibz_copy(&(inv->q[1]), &(x->q[1])); + ibz_swap(&(inv->q[1]), &(inv->q[0])); } return (res); } @@ -165,15 +165,15 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) ibz_t x, y; ibz_init(&x); ibz_init(&y); - ibz_copy(&x, &((*a)[0])); - ibz_copy(&y, &((*b)[0])); - ibz_mul(&y, &y, &((*a)[1])); - ibz_mul(&x, &x, &((*b)[1])); - if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_copy(&x, &(a->q[0])); + ibz_copy(&y, &(b->q[0])); + ibz_mul(&y, &y, &(a->q[1])); + ibz_mul(&x, &x, &(b->q[1])); + if (ibz_cmp(&(a->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } - if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + if (ibz_cmp(&(b->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } @@ -186,28 +186,28 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) int ibq_is_zero(const ibq_t *x) { - return ibz_is_zero(&((*x)[0])); + return ibz_is_zero(&(x->q[0])); } int ibq_is_one(const ibq_t *x) { - return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); + return (0 == ibz_cmp(&(x->q[0]), &(x->q[1]))); } int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) { - ibz_copy(&((*q)[0]), a); - ibz_copy(&((*q)[1]), b); + ibz_copy(&(q->q[0]), a); + ibz_copy(&(q->q[1]), b); return !ibz_is_zero(b); } void ibq_copy(ibq_t *target, const ibq_t *value) // once { - ibz_copy(&((*target)[0]), &((*value)[0])); - ibz_copy(&((*target)[1]), &((*value)[1])); + ibz_copy(&(target->q[0]), &(value->q[0])); + ibz_copy(&(target->q[1]), &(value->q[1])); } int @@ -215,7 +215,7 @@ ibq_is_ibz(const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_mod(&r, &((*q)[0]), &((*q)[1])); + ibz_mod(&r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); @@ -226,7 +226,7 @@ ibq_to_ibz(ibz_t *z, const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + ibz_div(z, &r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h index d0861ac036..0362ca0c42 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/rng.h @@ -5,7 +5,7 @@ #include -static int randombytes(unsigned char *x, unsigned long long xlen){ +static inline int randombytes(unsigned char *x, unsigned long long xlen){ OQS_randombytes(x, xlen); return 0; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign.c index 7335c38d9a..cf2134085b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl1_ref/sqisign.c @@ -121,7 +121,7 @@ sqisign_verify(const unsigned char *m, unsigned long long siglen, const unsigned char *pk) { - + (void) siglen; int ret = 0; public_key_t pkt = { 0 }; signature_t sigt; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/asm_preamble.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/asm_preamble.h index 3ef7927e9c..ca2a054ce2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/asm_preamble.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/asm_preamble.h @@ -9,8 +9,10 @@ #undef fp2_mul_c1 #undef fp2_sq_c0 #undef fp2_sq_c1 -#define p2 CAT(_, p2) -#define p CAT(_, p) +#undef p2 +#undef p +#define p2 CAT(_, SQISIGN_NAMESPACE(p2)) +#define p CAT(_, SQISIGN_NAMESPACE(p)) #define fp_add CAT(_, SQISIGN_NAMESPACE(fp_add)) #define fp_sub CAT(_, SQISIGN_NAMESPACE(fp_sub)) #define fp_mul CAT(_, SQISIGN_NAMESPACE(fp_mul)) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c index 143060e2c3..74184fc97b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/dim2id2iso.c @@ -191,7 +191,7 @@ fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, // reordering vectors and switching some signs if needed to make it in a nicer // shape static void -post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, bool is_special_order) { // if the left order is the special one, then we apply some additional post // treatment @@ -520,7 +520,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[0], 1); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); - post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + post_LLL_basis_treatment(&gram[0], &reduced[0], true); // for efficient lattice reduction, we replace ideal[0] by the equivalent // ideal of smallest norm @@ -562,7 +562,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[i], 1); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); - post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + post_LLL_basis_treatment(&gram[i], &reduced[i], false); } // enumerating small vectors diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.c index 1b12a8380f..6f7311e3c9 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/e0_basis.c @@ -2,54 +2,54 @@ const fp2_t BASIS_E0_PX = { #if 0 #elif RADIX == 16 -{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12} +{{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12}} #elif RADIX == 32 -{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd} +{{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1} +{{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1}} #else -{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5} +{{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5}} #endif #endif , #if 0 #elif RADIX == 16 -{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e} +{{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e}} #elif RADIX == 32 -{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581} +{{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164} +{{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164}} #else -{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418} +{{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418}} #endif #endif }; const fp2_t BASIS_E0_QX = { #if 0 #elif RADIX == 16 -{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd} +{{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd}} #elif RADIX == 32 -{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb} +{{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28} +{{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28}} #else -{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff} +{{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a} +{{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a}} #elif RADIX == 32 -{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875} +{{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9} +{{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9}} #else -{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d} +{{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d}} #endif #endif }; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h index e609c93a08..7cef95ca49 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/ec.h @@ -566,7 +566,7 @@ uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) { ec_point_t test; @@ -595,7 +595,7 @@ test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) { int check_P = test_point_order_twof(&B->P, E, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_verification.c index fecdb9c259..8aa451d366 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_verification.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/encode_verification.c @@ -99,36 +99,6 @@ ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) return proj_from_bytes(&curve->A, &curve->C, enc); } -static byte_t * -ec_point_to_bytes(byte_t *enc, const ec_point_t *point) -{ - return proj_to_bytes(enc, &point->x, &point->z); -} - -static const byte_t * -ec_point_from_bytes(ec_point_t *point, const byte_t *enc) -{ - return proj_from_bytes(&point->x, &point->z, enc); -} - -static byte_t * -ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) -{ - enc = ec_point_to_bytes(enc, &basis->P); - enc = ec_point_to_bytes(enc, &basis->Q); - enc = ec_point_to_bytes(enc, &basis->PmQ); - return enc; -} - -static const byte_t * -ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) -{ - enc = ec_point_from_bytes(&basis->P, enc); - enc = ec_point_from_bytes(&basis->Q, enc); - enc = ec_point_from_bytes(&basis->PmQ, enc); - return enc; -} - // public API byte_t * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c index a598a89c0e..936b00d135 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/endomorphism_action.c @@ -4,261 +4,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x7e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1} +{{0x7e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}} #elif RADIX == 32 -{0x1f8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8000} +{{0x1f8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1, 0x0, 0x0, 0x0, 0x0, 0x3f00000000000000} +{{0x1, 0x0, 0x0, 0x0, 0x0, 0x3f00000000000000}} #else -{0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf400000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12} +{{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12}} #elif RADIX == 32 -{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd} +{{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1} +{{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1}} #else -{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5} +{{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5}} #endif #endif , #if 0 #elif RADIX == 16 -{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e} +{{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e}} #elif RADIX == 32 -{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581} +{{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164} +{{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164}} #else -{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418} +{{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd} +{{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd}} #elif RADIX == 32 -{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb} +{{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28} +{{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28}} #else -{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff} +{{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a} +{{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a}} #elif RADIX == 32 -{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875} +{{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9} +{{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9}} #else -{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d} +{{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1e36, 0x1718, 0xced, 0x186e, 0x83d, 0x1a23, 0xf5b, 0x5ca, 0x194d, 0x1bd8, 0xb67, 0x9f7, 0x1806, 0x17ae, 0x508, 0x117f, 0x5cc, 0x1809, 0x14b1, 0x85f, 0xcf0, 0x1b0c, 0x1753, 0x1484, 0xb5f, 0x1d62, 0x808, 0x1cc3, 0x844, 0x9} +{{0x1e36, 0x1718, 0xced, 0x186e, 0x83d, 0x1a23, 0xf5b, 0x5ca, 0x194d, 0x1bd8, 0xb67, 0x9f7, 0x1806, 0x17ae, 0x508, 0x117f, 0x5cc, 0x1809, 0x14b1, 0x85f, 0xcf0, 0x1b0c, 0x1753, 0x1484, 0xb5f, 0x1d62, 0x808, 0x1cc3, 0x844, 0x9}} #elif RADIX == 32 -{0xb8c78d9, 0x70dcced, 0xbd11a0f, 0x34b94f5, 0x67dec65, 0x193eeb, 0x508bd76, 0x97322fe, 0xf4b1c04, 0x633c10b, 0x9753d8, 0xb12d7e9, 0x986808e, 0x9113} +{{0xb8c78d9, 0x70dcced, 0xbd11a0f, 0x34b94f5, 0x67dec65, 0x193eeb, 0x508bd76, 0x97322fe, 0xf4b1c04, 0x633c10b, 0x9753d8, 0xb12d7e9, 0x986808e, 0x9113}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1a0f70dccedb8c78, 0x7dec6534b94f5bd1, 0xe508bd760193eeb6, 0x10bf4b1c0497322f, 0x2d7e909753d8633c, 0x3722113986808eb1} +{{0x1a0f70dccedb8c78, 0x7dec6534b94f5bd1, 0xe508bd760193eeb6, 0x10bf4b1c0497322f, 0x2d7e909753d8633c, 0x3722113986808eb1}} #else -{0x1ee1b99db718f1, 0x14d2e53d6f4468, 0x300c9f75b3ef63, 0x497322fe508bd7, 0xc678217e96380, 0x2c4b5fa425d4f6, 0xb51089cc34047} +{{0x1ee1b99db718f1, 0x14d2e53d6f4468, 0x300c9f75b3ef63, 0x497322fe508bd7, 0xc678217e96380, 0x2c4b5fa425d4f6, 0xb51089cc34047}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1785, 0x1652, 0x4b4, 0x1b37, 0x918, 0x12d, 0x1340, 0x16d3, 0xee, 0xb43, 0x52a, 0x1ff, 0x1e6b, 0x1424, 0x609, 0x1e2c, 0x19bd, 0x18f, 0x174a, 0x134d, 0x6f4, 0xa33, 0x1d5c, 0xa53, 0x73c, 0x361, 0x372, 0x1242, 0x87c, 0x17} +{{0x1785, 0x1652, 0x4b4, 0x1b37, 0x918, 0x12d, 0x1340, 0x16d3, 0xee, 0xb43, 0x52a, 0x1ff, 0x1e6b, 0x1424, 0x609, 0x1e2c, 0x19bd, 0x18f, 0x174a, 0x134d, 0x6f4, 0xa33, 0x1d5c, 0xa53, 0x73c, 0x361, 0x372, 0x1242, 0x87c, 0x17}} #elif RADIX == 32 -{0xb295e16, 0x366e4b4, 0x96a46, 0xbada734, 0x2a5a183, 0x9ac3fe5, 0x609a127, 0xe6f7c58, 0xb74a0c7, 0x99bd269, 0xa7d5c51, 0xb09cf14, 0x4843721, 0x381f2} +{{0xb295e16, 0x366e4b4, 0x96a46, 0xbada734, 0x2a5a183, 0x9ac3fe5, 0x609a127, 0xe6f7c58, 0xb74a0c7, 0x99bd269, 0xa7d5c51, 0xb09cf14, 0x4843721, 0x381f2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6a46366e4b4b295e, 0xa5a183bada734009, 0x8609a1279ac3fe52, 0x269b74a0c7e6f7c5, 0x9cf14a7d5c5199bd, 0x5ce1f24843721b0} +{{0x6a46366e4b4b295e, 0xa5a183bada734009, 0x8609a1279ac3fe52, 0x269b74a0c7e6f7c5, 0x9cf14a7d5c5199bd, 0x5ce1f24843721b0}} #else -{0xc6cdc969652bc, 0xeeb69cd0025a9, 0x3cd61ff2952d0c, 0x7e6f7c58609a12, 0x3337a4d36e9418, 0x6c273c529f5714, 0x2e70f92421b90} +{{0xc6cdc969652bc, 0xeeb69cd0025a9, 0x3cd61ff2952d0c, 0x7e6f7c58609a12, 0x3337a4d36e9418, 0x6c273c529f5714, 0x2e70f92421b90}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -480,261 +480,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x194c, 0x43, 0xc70, 0x1c7a, 0xa7a, 0xdf7, 0x1564, 0x1809, 0x8e8, 0x3a5, 0x1399, 0xf0a, 0x914, 0x1a27, 0xb1c, 0xcd0, 0xfc, 0xaa4, 0xd87, 0x1ed2, 0x2c0, 0x8e4, 0x1b93, 0x1a3f, 0x1d9b, 0x1a00, 0xbce, 0x17d2, 0x1a07, 0xf} +{{0x194c, 0x43, 0xc70, 0x1c7a, 0xa7a, 0xdf7, 0x1564, 0x1809, 0x8e8, 0x3a5, 0x1399, 0xf0a, 0x914, 0x1a27, 0xb1c, 0xcd0, 0xfc, 0xaa4, 0xd87, 0x1ed2, 0x2c0, 0x8e4, 0x1b93, 0x1a3f, 0x1d9b, 0x1a00, 0xbce, 0x17d2, 0x1a07, 0xf}} #elif RADIX == 32 -{0x21e531, 0xb8f4c70, 0x46fba9e, 0xa301356, 0x991d2a3, 0x451e153, 0xb1cd13a, 0x3f19a0, 0x4d87552, 0x20b03da, 0x7fb9347, 0x766f4, 0xfa4bced, 0x3d81e} +{{0x21e531, 0xb8f4c70, 0x46fba9e, 0xa301356, 0x991d2a3, 0x451e153, 0xb1cd13a, 0x3f19a0, 0x4d87552, 0x20b03da, 0x7fb9347, 0x766f4, 0xfa4bced, 0x3d81e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xba9eb8f4c70021e5, 0x91d2a3a30135646f, 0xb1cd13a451e1539, 0x3da4d8755203f19a, 0x766f47fb934720b0, 0xcae81efa4bced00} +{{0xba9eb8f4c70021e5, 0x91d2a3a30135646f, 0xb1cd13a451e1539, 0x3da4d8755203f19a, 0x766f47fb934720b0, 0xcae81efa4bced00}} #else -{0x3d71e98e0043ca, 0xe8c04d591beea, 0x5228f0a9cc8e95, 0x203f19a0b1cd13, 0x641607b49b0eaa, 0x401d9bd1fee4d1, 0x65740f7d25e76} +{{0x3d71e98e0043ca, 0xe8c04d591beea, 0x5228f0a9cc8e95, 0x203f19a0b1cd13, 0x641607b49b0eaa, 0x401d9bd1fee4d1, 0x65740f7d25e76}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1ed1, 0x10, 0x131c, 0x171e, 0x1a9e, 0x37d, 0xd59, 0x602, 0xa3a, 0x8e9, 0x14e6, 0x3c2, 0x1a45, 0x689, 0x2c7, 0x334, 0x3f, 0x1aa9, 0x1361, 0x7b4, 0xb0, 0x1a39, 0x1ee4, 0x1e8f, 0x766, 0x1680, 0x12f3, 0x1df4, 0x1e81, 0x4} +{{0x1ed1, 0x10, 0x131c, 0x171e, 0x1a9e, 0x37d, 0xd59, 0x602, 0xa3a, 0x8e9, 0x14e6, 0x3c2, 0x1a45, 0x689, 0x2c7, 0x334, 0x3f, 0x1aa9, 0x1361, 0x7b4, 0xb0, 0x1a39, 0x1ee4, 0x1e8f, 0x766, 0x1680, 0x12f3, 0x1df4, 0x1e81, 0x4}} #elif RADIX == 32 -{0x87b44, 0xae3d31c, 0x91beea7, 0xe8c04d5, 0xe6474a8, 0x9147854, 0x2c7344e, 0x80fc668, 0x9361d54, 0xc82c0f6, 0x1fee4d1, 0x401d9bd, 0xbe92f3b, 0x27a07} +{{0x87b44, 0xae3d31c, 0x91beea7, 0xe8c04d5, 0xe6474a8, 0x9147854, 0x2c7344e, 0x80fc668, 0x9361d54, 0xc82c0f6, 0x1fee4d1, 0x401d9bd, 0xbe92f3b, 0x27a07}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xeea7ae3d31c0087b, 0x6474a8e8c04d591b, 0x82c7344e9147854e, 0xf69361d5480fc66, 0x1d9bd1fee4d1c82c, 0x116ba07be92f3b40} +{{0xeea7ae3d31c0087b, 0x6474a8e8c04d591b, 0x82c7344e9147854e, 0xf69361d5480fc66, 0x1d9bd1fee4d1c82c, 0x116ba07be92f3b40}} #else -{0x4f5c7a638010f6, 0x23a30135646fba, 0x748a3c2a7323a5, 0x480fc6682c7344, 0x390581ed26c3aa, 0x500766f47fb934, 0x8b5d03df4979d} +{{0x4f5c7a638010f6, 0x23a30135646fba, 0x748a3c2a7323a5, 0x480fc6682c7344, 0x390581ed26c3aa, 0x500766f47fb934, 0x8b5d03df4979d}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x187c, 0x10c9, 0xfda, 0x189b, 0x3b, 0xbcd, 0x16ab, 0xabe, 0x102, 0x19b7, 0x288, 0x1c7e, 0x1ee8, 0x452, 0x853, 0x1b5a, 0x1ca8, 0x1129, 0xd16, 0x168a, 0x1414, 0x6ed, 0xc0, 0xda2, 0x19ae, 0x12fe, 0x1813, 0xdd8, 0x102e, 0x1f} +{{0x187c, 0x10c9, 0xfda, 0x189b, 0x3b, 0xbcd, 0x16ab, 0xabe, 0x102, 0x19b7, 0x288, 0x1c7e, 0x1ee8, 0x452, 0x853, 0x1b5a, 0x1ca8, 0x1129, 0xd16, 0x168a, 0x1414, 0x6ed, 0xc0, 0xda2, 0x19ae, 0x12fe, 0x1813, 0xdd8, 0x102e, 0x1f}} #elif RADIX == 32 -{0x864e1f3, 0xf136fda, 0xb5e680e, 0x957d6a, 0x88cdb84, 0xba38fc2, 0x8532297, 0xf2a36b4, 0x4d16894, 0x6d052d1, 0x440c037, 0x7f66b9b, 0xbb18139, 0x390b9} +{{0x864e1f3, 0xf136fda, 0xb5e680e, 0x957d6a, 0x88cdb84, 0xba38fc2, 0x8532297, 0xf2a36b4, 0x4d16894, 0x6d052d1, 0x440c037, 0x7f66b9b, 0xbb18139, 0x390b9}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x680ef136fda864e1, 0x8cdb840957d6ab5e, 0x48532297ba38fc28, 0x2d14d16894f2a36b, 0x66b9b440c0376d05, 0x3dec0b9bb181397f} +{{0x680ef136fda864e1, 0x8cdb840957d6ab5e, 0x48532297ba38fc28, 0x2d14d16894f2a36b, 0x66b9b440c0376d05, 0x3dec0b9bb181397f}} #else -{0x1de26dfb50c9c3, 0x10255f5aad79a0, 0x3dd1c7e14466dc, 0x4f2a36b4853229, 0x6da0a5a29a2d12, 0x5fd9ae6d10300d, 0xeb605cdd8c09c} +{{0x1de26dfb50c9c3, 0x10255f5aad79a0, 0x3dd1c7e14466dc, 0x4f2a36b4853229, 0x6da0a5a29a2d12, 0x5fd9ae6d10300d, 0xeb605cdd8c09c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1ca3, 0x16ad, 0x12b3, 0x9d7, 0xb37, 0x118b, 0xb22, 0x1662, 0xa8f, 0xd68, 0x6d5, 0x1a1f, 0x1f29, 0x632, 0x1b7e, 0xb6, 0xba7, 0xeca, 0x11ed, 0x13b, 0x18cc, 0x19a2, 0x77, 0x1582, 0x11ff, 0xc5f, 0x7de, 0x4b1, 0x1a7f, 0x18} +{{0x1ca3, 0x16ad, 0x12b3, 0x9d7, 0xb37, 0x118b, 0xb22, 0x1662, 0xa8f, 0xd68, 0x6d5, 0x1a1f, 0x1f29, 0x632, 0x1b7e, 0xb6, 0xba7, 0xeca, 0x11ed, 0x13b, 0x18cc, 0x19a2, 0x77, 0x1582, 0x11ff, 0xc5f, 0x7de, 0x4b1, 0x1a7f, 0x18}} #elif RADIX == 32 -{0xb56f28f, 0xd3af2b3, 0x28c5acd, 0x3ecc4b2, 0xd56b42a, 0xca743e6, 0xb7e3197, 0x2e9c16d, 0x71ed765, 0x1633027, 0x4077cd, 0x2fc7feb, 0x9627de6, 0x39fc} +{{0xb56f28f, 0xd3af2b3, 0x28c5acd, 0x3ecc4b2, 0xd56b42a, 0xca743e6, 0xb7e3197, 0x2e9c16d, 0x71ed765, 0x1633027, 0x4077cd, 0x2fc7feb, 0x9627de6, 0x39fc}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x5acdd3af2b3b56f2, 0x56b42a3ecc4b228c, 0xdb7e3197ca743e6d, 0x2771ed7652e9c16, 0xc7feb04077cd1633, 0x24529fc9627de62f} +{{0x5acdd3af2b3b56f2, 0x56b42a3ecc4b228c, 0xdb7e3197ca743e6d, 0x2771ed7652e9c16, 0xc7feb04077cd1633, 0x24529fc9627de62f}} #else -{0x1ba75e5676ade5, 0x28fb312c8a316b, 0x3e53a1f36ab5a1, 0x52e9c16db7e319, 0x22c6604ee3daec, 0xbf1ffac101df3, 0x1e94fe4b13ef3} +{{0x1ba75e5676ade5, 0x28fb312c8a316b, 0x3e53a1f36ab5a1, 0x52e9c16db7e319, 0x22c6604ee3daec, 0xbf1ffac101df3, 0x1e94fe4b13ef3}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1f7a, 0x1a13, 0x11f4, 0xaeb, 0x997, 0x12d, 0x315, 0x1d7, 0x2fc, 0x736, 0x927, 0x350, 0x695, 0x14ac, 0x703, 0x1ec7, 0x1567, 0x1527, 0x7ee, 0x1a23, 0x11aa, 0x919, 0x130b, 0x199e, 0x137d, 0x795, 0x4e4, 0x1dc6, 0xa87, 0xd} +{{0x1f7a, 0x1a13, 0x11f4, 0xaeb, 0x997, 0x12d, 0x315, 0x1d7, 0x2fc, 0x736, 0x927, 0x350, 0x695, 0x14ac, 0x703, 0x1ec7, 0x1567, 0x1527, 0x7ee, 0x1a23, 0x11aa, 0x919, 0x130b, 0x199e, 0x137d, 0x795, 0x4e4, 0x1dc6, 0xa87, 0xd}} #elif RADIX == 32 -{0xd09fde9, 0xd5d71f4, 0x5096a65, 0xf03ae31, 0x2739b0b, 0xa546a09, 0x703a561, 0xd59fd8e, 0x67eea93, 0xcc6ab44, 0x3d30b48, 0xcacdf73, 0xb8c4e43, 0x29a1f} +{{0xd09fde9, 0xd5d71f4, 0x5096a65, 0xf03ae31, 0x2739b0b, 0xa546a09, 0x703a561, 0xd59fd8e, 0x67eea93, 0xcc6ab44, 0x3d30b48, 0xcacdf73, 0xb8c4e43, 0x29a1f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6a65d5d71f4d09fd, 0x739b0bf03ae31509, 0xe703a561a546a092, 0xb4467eea93d59fd8, 0xcdf733d30b48cc6a, 0x3b52a1fb8c4e43ca} +{{0x6a65d5d71f4d09fd, 0x739b0bf03ae31509, 0xe703a561a546a092, 0xb4467eea93d59fd8, 0xcdf733d30b48cc6a, 0x3b52a1fb8c4e43ca}} #else -{0x4babae3e9a13fb, 0x2fc0eb8c5425a9, 0xd2a3504939cd8, 0x3d59fd8e703a56, 0x198d5688cfdd52, 0x72b37dccf4c2d2, 0xd6950fdc62721} +{{0x4babae3e9a13fb, 0x2fc0eb8c5425a9, 0xd2a3504939cd8, 0x3d59fd8e703a56, 0x198d5688cfdd52, 0x72b37dccf4c2d2, 0xd6950fdc62721}} #endif #endif , #if 0 #elif RADIX == 16 -{0xa54, 0x1685, 0x1b20, 0x1632, 0x1047, 0x159e, 0x14a0, 0x94c, 0x3c8, 0x793, 0x3a2, 0x1938, 0x1899, 0x15b7, 0xefa, 0xcc8, 0x12c3, 0x1335, 0x4ef, 0x1e93, 0x1861, 0x1602, 0x1d6c, 0x1ae7, 0x187, 0x18b1, 0x857, 0x8da, 0x12f7, 0xa} +{{0xa54, 0x1685, 0x1b20, 0x1632, 0x1047, 0x159e, 0x14a0, 0x94c, 0x3c8, 0x793, 0x3a2, 0x1938, 0x1899, 0x15b7, 0xefa, 0xcc8, 0x12c3, 0x1335, 0x4ef, 0x1e93, 0x1861, 0x1602, 0x1d6c, 0x1ae7, 0x187, 0x18b1, 0x857, 0x8da, 0x12f7, 0xa}} #elif RADIX == 32 -{0xb42a951, 0xec65b20, 0xacf411, 0x212994a, 0xa23c98f, 0x2672703, 0xefaadbe, 0xcb0d990, 0x64ef99a, 0x16187d2, 0xcfd6cb0, 0x58861f5, 0x1b4857c, 0x13bdd} +{{0xb42a951, 0xec65b20, 0xacf411, 0x212994a, 0xa23c98f, 0x2672703, 0xefaadbe, 0xcb0d990, 0x64ef99a, 0x16187d2, 0xcfd6cb0, 0x58861f5, 0x1b4857c, 0x13bdd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf411ec65b20b42a9, 0x23c98f212994a0ac, 0xefaadbe2672703a, 0x7d264ef99acb0d99, 0x861f5cfd6cb01618, 0x14a4bdd1b4857c58} +{{0xf411ec65b20b42a9, 0x23c98f212994a0ac, 0xefaadbe2672703a, 0x7d264ef99acb0d99, 0x861f5cfd6cb01618, 0x14a4bdd1b4857c58}} #else -{0x23d8cb64168552, 0x3c84a65282b3d0, 0x71339381d11e4c, 0x2cb0d990efaadb, 0x2c30fa4c9df33, 0x162187d73f5b2c, 0xa525ee8da42be} +{{0x23d8cb64168552, 0x3c84a65282b3d0, 0x71339381d11e4c, 0x2cb0d990efaadb, 0x2c30fa4c9df33, 0x162187d73f5b2c, 0xa525ee8da42be}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1e6b, 0x111, 0x74d, 0xb04, 0x738, 0x178f, 0xdc5, 0x835, 0x724, 0xaf9, 0xf3c, 0x1855, 0x266, 0x1b16, 0x1cf0, 0x1aa3, 0x32f, 0xce, 0x1f26, 0x16ba, 0x1cb6, 0x9b8, 0x12de, 0x1cef, 0x1a72, 0x1d68, 0xa02, 0x1c67, 0xa67, 0x13} +{{0x1e6b, 0x111, 0x74d, 0xb04, 0x738, 0x178f, 0xdc5, 0x835, 0x724, 0xaf9, 0xf3c, 0x1855, 0x266, 0x1b16, 0x1cf0, 0x1aa3, 0x32f, 0xce, 0x1f26, 0x16ba, 0x1cb6, 0x9b8, 0x12de, 0x1cef, 0x1a72, 0x1d68, 0xa02, 0x1c67, 0xa67, 0x13}} #elif RADIX == 32 -{0x88f9ae, 0x160874d, 0x5bc79ce, 0x9106adc, 0x3c57c9c, 0x99b0aaf, 0xcf0d8b0, 0xcbf547, 0x5f26067, 0xc72dad7, 0xdf2de4d, 0xb469cb9, 0x8cea02e, 0x1899f} +{{0x88f9ae, 0x160874d, 0x5bc79ce, 0x9106adc, 0x3c57c9c, 0x99b0aaf, 0xcf0d8b0, 0xcbf547, 0x5f26067, 0xc72dad7, 0xdf2de4d, 0xb469cb9, 0x8cea02e, 0x1899f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x79ce160874d088f9, 0xc57c9c9106adc5bc, 0x7cf0d8b099b0aaf3, 0xad75f260670cbf54, 0x69cb9df2de4dc72d, 0x2c4699f8cea02eb4} +{{0x79ce160874d088f9, 0xc57c9c9106adc5bc, 0x7cf0d8b099b0aaf3, 0xad75f260670cbf54, 0x69cb9df2de4dc72d, 0x2c4699f8cea02eb4}} #else -{0x1c2c10e9a111f3, 0x72441ab716f1e7, 0x4cd85579e2be4, 0x70cbf547cf0d8b, 0x38e5b5aebe4c0c, 0x2d1a72e77cb793, 0x5e34cfc675017} +{{0x1c2c10e9a111f3, 0x72441ab716f1e7, 0x4cd85579e2be4, 0x70cbf547cf0d8b, 0x38e5b5aebe4c0c, 0x2d1a72e77cb793, 0x5e34cfc675017}} #endif #endif , #if 0 #elif RADIX == 16 -{0x12d6, 0x1c7a, 0x9bb, 0x1ce1, 0x1ca, 0xf3f, 0x1036, 0x19a6, 0x1c79, 0x5bf, 0x3, 0x1a92, 0x1d08, 0xeaa, 0x11e8, 0xab1, 0x1ed2, 0x80c, 0x10c9, 0x1517, 0xc18, 0x1513, 0x1dff, 0xc00, 0x16a0, 0x14ce, 0x72d, 0x1a86, 0xd45, 0x19} +{{0x12d6, 0x1c7a, 0x9bb, 0x1ce1, 0x1ca, 0xf3f, 0x1036, 0x19a6, 0x1c79, 0x5bf, 0x3, 0x1a92, 0x1d08, 0xeaa, 0x11e8, 0xab1, 0x1ed2, 0x80c, 0x10c9, 0x1517, 0xc18, 0x1513, 0x1dff, 0xc00, 0x16a0, 0x14ce, 0x72d, 0x1a86, 0xd45, 0x19}} #elif RADIX == 32 -{0xe3d4b5b, 0xb9c29bb, 0x679f872, 0xe734d03, 0x32dff1, 0x4235240, 0x1e87557, 0x7b49563, 0xf0c9406, 0x9b062a2, 0x1dffa8, 0x675a818, 0x50c72da, 0x8517} +{{0xe3d4b5b, 0xb9c29bb, 0x679f872, 0xe734d03, 0x32dff1, 0x4235240, 0x1e87557, 0x7b49563, 0xf0c9406, 0x9b062a2, 0x1dffa8, 0x675a818, 0x50c72da, 0x8517}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf872b9c29bbe3d4b, 0x32dff1e734d03679, 0x31e8755742352400, 0x2a2f0c94067b4956, 0x5a81801dffa89b06, 0x172351750c72da67} +{{0xf872b9c29bbe3d4b, 0x32dff1e734d03679, 0x31e8755742352400, 0x2a2f0c94067b4956, 0x5a81801dffa89b06, 0x172351750c72da67}} #else -{0x657385377c7a96, 0x479cd340d9e7e1, 0x3a11a9200196ff, 0x67b495631e8755, 0x1360c545e19280, 0x19d6a060077fea, 0xb91a8ba86396d} +{{0x657385377c7a96, 0x479cd340d9e7e1, 0x3a11a9200196ff, 0x67b495631e8755, 0x1360c545e19280, 0x19d6a060077fea, 0xb91a8ba86396d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -956,261 +956,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x9a9, 0x8c7, 0x119d, 0xada, 0x1d98, 0xc97, 0x1a31, 0xdc6, 0x192a, 0xf3c, 0x509, 0xc5b, 0xea7, 0x1eb9, 0x59d, 0x1e3c, 0x114b, 0x88, 0x5a9, 0x1154, 0x18c0, 0x11db, 0x1ba9, 0x3b8, 0x837, 0x18d0, 0x17eb, 0x211, 0x1aa7, 0x11} +{{0x9a9, 0x8c7, 0x119d, 0xada, 0x1d98, 0xc97, 0x1a31, 0xdc6, 0x192a, 0xf3c, 0x509, 0xc5b, 0xea7, 0x1eb9, 0x59d, 0x1e3c, 0x114b, 0x88, 0x5a9, 0x1154, 0x18c0, 0x11db, 0x1ba9, 0x3b8, 0x837, 0x18d0, 0x17eb, 0x211, 0x1aa7, 0x11}} #elif RADIX == 32 -{0x463a6a6, 0x15b519d, 0x164bf66, 0xa9b8da3, 0x979e64, 0xa9d8b65, 0x59df5cb, 0x452fc78, 0x85a9044, 0xde3022a, 0x71ba98e, 0x6820dc7, 0x4237ebc, 0xca9c} +{{0x463a6a6, 0x15b519d, 0x164bf66, 0xa9b8da3, 0x979e64, 0xa9d8b65, 0x59df5cb, 0x452fc78, 0x85a9044, 0xde3022a, 0x71ba98e, 0x6820dc7, 0x4237ebc, 0xca9c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbf6615b519d463a6, 0x979e64a9b8da3164, 0x859df5cba9d8b650, 0x22a85a9044452fc7, 0x20dc771ba98ede30, 0x2a32a9c4237ebc68} +{{0xbf6615b519d463a6, 0x979e64a9b8da3164, 0x859df5cba9d8b650, 0x22a85a9044452fc7, 0x20dc771ba98ede30, 0x2a32a9c4237ebc68}} #else -{0x4c2b6a33a8c74d, 0x12a6e368c592fd, 0x5d4ec5b284bcf3, 0x4452fc7859df5c, 0x5bc604550b5208, 0x1a08371dc6ea63, 0x4d954e211bf5e} +{{0x4c2b6a33a8c74d, 0x12a6e368c592fd, 0x5d4ec5b284bcf3, 0x4452fc7859df5c, 0x5bc604550b5208, 0x1a08371dc6ea63, 0x4d954e211bf5e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1ae8, 0xa31, 0x1467, 0x2b6, 0x1f66, 0xb25, 0x168c, 0x1371, 0x64a, 0xbcf, 0x1942, 0x1b16, 0xba9, 0xfae, 0x167, 0x1f8f, 0x452, 0x822, 0x16a, 0x455, 0x1e30, 0xc76, 0x6ea, 0x18ee, 0x20d, 0x1e34, 0xdfa, 0x1884, 0x12a9, 0xd} +{{0x1ae8, 0xa31, 0x1467, 0x2b6, 0x1f66, 0xb25, 0x168c, 0x1371, 0x64a, 0xbcf, 0x1942, 0x1b16, 0xba9, 0xfae, 0x167, 0x1f8f, 0x452, 0x822, 0x16a, 0x455, 0x1e30, 0xc76, 0x6ea, 0x18ee, 0x20d, 0x1e34, 0xdfa, 0x1884, 0x12a9, 0xd}} #elif RADIX == 32 -{0x518eba1, 0x856d467, 0xc592fd9, 0x2a6e368, 0x425e799, 0xea762d9, 0x1677d72, 0x114bf1e, 0xa16a411, 0xb78c08a, 0xdc6ea63, 0x1a08371, 0x108dfaf, 0x2baa7} +{{0x518eba1, 0x856d467, 0xc592fd9, 0x2a6e368, 0x425e799, 0xea762d9, 0x1677d72, 0x114bf1e, 0xa16a411, 0xb78c08a, 0xdc6ea63, 0x1a08371, 0x108dfaf, 0x2baa7}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2fd9856d467518eb, 0x25e7992a6e368c59, 0xe1677d72ea762d94, 0x8aa16a411114bf1, 0x8371dc6ea63b78c, 0x290caa7108dfaf1a} +{{0x2fd9856d467518eb, 0x25e7992a6e368c59, 0xe1677d72ea762d94, 0x8aa16a411114bf1, 0x8371dc6ea63b78c, 0x290caa7108dfaf1a}} #else -{0x330ada8cea31d7, 0x64a9b8da3164bf, 0x1753b16ca12f3c, 0x1114bf1e1677d7, 0x76f1811542d482, 0x46820dc771ba98, 0x4465538846fd7} +{{0x330ada8cea31d7, 0x64a9b8da3164bf, 0x1753b16ca12f3c, 0x1114bf1e1677d7, 0x76f1811542d482, 0x46820dc771ba98, 0x4465538846fd7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x954, 0x49a, 0xee7, 0x1037, 0x171c, 0x81, 0x448, 0x76f, 0x1615, 0xefe, 0xe70, 0xc54, 0x3d4, 0xc30, 0x1aaf, 0x72c, 0x464, 0x7a7, 0x5b7, 0x1f2a, 0xa98, 0x8db, 0x1689, 0x1cc1, 0x11ae, 0x4bf, 0x1ddc, 0x1f93, 0x1b3e, 0xb} +{{0x954, 0x49a, 0xee7, 0x1037, 0x171c, 0x81, 0x448, 0x76f, 0x1615, 0xefe, 0xe70, 0xc54, 0x3d4, 0xc30, 0x1aaf, 0x72c, 0x464, 0x7a7, 0x5b7, 0x1f2a, 0xa98, 0x8db, 0x1689, 0x1cc1, 0x11ae, 0x4bf, 0x1ddc, 0x1f93, 0x1b3e, 0xb}} #elif RADIX == 32 -{0x24d2551, 0x206eee7, 0x8040dc7, 0x54ede44, 0x7077f58, 0xf518a8e, 0xaaf6180, 0x9190e59, 0x45b73d3, 0xdaa63e5, 0x8368946, 0x5fc6bb9, 0xf27ddc2, 0x1dcfb} +{{0x24d2551, 0x206eee7, 0x8040dc7, 0x54ede44, 0x7077f58, 0xf518a8e, 0xaaf6180, 0x9190e59, 0x45b73d3, 0xdaa63e5, 0x8368946, 0x5fc6bb9, 0xf27ddc2, 0x1dcfb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xdc7206eee724d25, 0x77f5854ede44804, 0x9aaf6180f518a8e7, 0x3e545b73d39190e5, 0xc6bb98368946daa6, 0x14aecfbf27ddc25f} +{{0xdc7206eee724d25, 0x77f5854ede44804, 0x9aaf6180f518a8e7, 0x3e545b73d39190e5, 0xc6bb98368946daa6, 0x14aecfbf27ddc25f}} #else -{0xe40dddce49a4a, 0x6153b791201037, 0x7a8c547383bfa, 0x39190e59aaf618, 0x5b54c7ca8b6e7a, 0x17f1aee60da251, 0xa5767df93eee1} +{{0xe40dddce49a4a, 0x6153b791201037, 0x7a8c547383bfa, 0x39190e59aaf618, 0x5b54c7ca8b6e7a, 0x17f1aee60da251, 0xa5767df93eee1}} #endif #endif , #if 0 #elif RADIX == 16 -{0xf14, 0xa31, 0x805, 0x19bd, 0x1b37, 0x5d5, 0x1211, 0x9c0, 0x557, 0x6b5, 0x1b2a, 0x775, 0x1a4f, 0x1d9, 0x520, 0x16be, 0x3d, 0x1cae, 0x4ca, 0x1a17, 0x1e64, 0x170b, 0x136, 0x1cd4, 0x150b, 0x1111, 0xf0b, 0x1af9, 0x3ce, 0x1c} +{{0xf14, 0xa31, 0x805, 0x19bd, 0x1b37, 0x5d5, 0x1211, 0x9c0, 0x557, 0x6b5, 0x1b2a, 0x775, 0x1a4f, 0x1d9, 0x520, 0x16be, 0x3d, 0x1cae, 0x4ca, 0x1a17, 0x1e64, 0x170b, 0x136, 0x1cd4, 0x150b, 0x1111, 0xf0b, 0x1af9, 0x3ce, 0x1c}} #elif RADIX == 32 -{0x518bc53, 0xf37a805, 0x12eaecd, 0x5d38121, 0x2a35a95, 0x93ceebb, 0x5200ece, 0xf6d7c, 0xe4cae57, 0x5f99342, 0xa8136b8, 0x88d42f9, 0x5f2f0b8, 0x1df3b} +{{0x518bc53, 0xf37a805, 0x12eaecd, 0x5d38121, 0x2a35a95, 0x93ceebb, 0x5200ece, 0xf6d7c, 0xe4cae57, 0x5f99342, 0xa8136b8, 0x88d42f9, 0x5f2f0b8, 0x1df3b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xaecdf37a805518bc, 0xa35a955d3812112e, 0xc5200ece93ceebb2, 0x342e4cae5700f6d7, 0xd42f9a8136b85f99, 0x1530f3b5f2f0b888} +{{0xaecdf37a805518bc, 0xa35a955d3812112e, 0xc5200ece93ceebb2, 0x342e4cae5700f6d7, 0xd42f9a8136b85f99, 0x1530f3b5f2f0b888}} #else -{0x1be6f500aa3178, 0x5574e04844babb, 0x749e775d951ad4, 0x700f6d7c5200ec, 0xbf32685c995ca, 0x22350be6a04dae, 0xa9879daf9785c} +{{0x1be6f500aa3178, 0x5574e04844babb, 0x749e775d951ad4, 0x700f6d7c5200ec, 0xbf32685c995ca, 0x22350be6a04dae, 0xa9879daf9785c}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1b6e, 0x5aa, 0x1bd9, 0x1e85, 0x1615, 0x1629, 0xb8b, 0x1066, 0x1532, 0x19ad, 0xe24, 0xcb8, 0x17fc, 0x2ab, 0x1726, 0x1ad5, 0x1c83, 0x1b32, 0x75e, 0x1794, 0x161d, 0x9c4, 0x11b6, 0x1c02, 0x14bb, 0x15d2, 0x10d5, 0x26b, 0x1765, 0x14} +{{0x1b6e, 0x5aa, 0x1bd9, 0x1e85, 0x1615, 0x1629, 0xb8b, 0x1066, 0x1532, 0x19ad, 0xe24, 0xcb8, 0x17fc, 0x2ab, 0x1726, 0x1ad5, 0x1c83, 0x1b32, 0x75e, 0x1794, 0x161d, 0x9c4, 0x11b6, 0x1c02, 0x14bb, 0x15d2, 0x10d5, 0x26b, 0x1765, 0x14}} #elif RADIX == 32 -{0x2d56dba, 0x7d0bbd9, 0xbb14d85, 0xca0ccb8, 0x24cd6d4, 0xff1970e, 0x726155d, 0x720f5ab, 0x875ed99, 0x25876f2, 0x51b64e, 0xe952ef8, 0x4d70d5a, 0x23d94} +{{0x2d56dba, 0x7d0bbd9, 0xbb14d85, 0xca0ccb8, 0x24cd6d4, 0xff1970e, 0x726155d, 0x720f5ab, 0x875ed99, 0x25876f2, 0x51b64e, 0xe952ef8, 0x4d70d5a, 0x23d94}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x4d857d0bbd92d56d, 0x4cd6d4ca0ccb8bb1, 0xb726155dff1970e2, 0x6f2875ed99720f5a, 0x52ef8051b64e2587, 0x2f5dd944d70d5ae9} +{{0x4d857d0bbd92d56d, 0x4cd6d4ca0ccb8bb1, 0xb726155dff1970e2, 0x6f2875ed99720f5a, 0x52ef8051b64e2587, 0x2f5dd944d70d5ae9}} #else -{0xafa177b25aadb, 0x5328332e2ec536, 0x6ff8cb871266b6, 0x1720f5ab726155, 0x44b0ede50ebdb3, 0x3a54bbe0146d93, 0x76eeca26b86ad} +{{0xafa177b25aadb, 0x5328332e2ec536, 0x6ff8cb871266b6, 0x1720f5ab726155, 0x44b0ede50ebdb3, 0x3a54bbe0146d93, 0x76eeca26b86ad}} #endif #endif , #if 0 #elif RADIX == 16 -{0x18aa, 0x459, 0x747, 0x401, 0x14be, 0x13ba, 0xafb, 0x1cb4, 0x636, 0xd10, 0x16ec, 0x1e6e, 0x1ee5, 0x1475, 0xf82, 0x1695, 0x1a54, 0xe4e, 0x1856, 0x459, 0x752, 0x1d56, 0x15a7, 0xde2, 0x158c, 0x623, 0x17, 0x10d9, 0x1156, 0x19} +{{0x18aa, 0x459, 0x747, 0x401, 0x14be, 0x13ba, 0xafb, 0x1cb4, 0x636, 0xd10, 0x16ec, 0x1e6e, 0x1ee5, 0x1475, 0xf82, 0x1695, 0x1a54, 0xe4e, 0x1856, 0x459, 0x752, 0x1d56, 0x15a7, 0xde2, 0x158c, 0x623, 0x17, 0x10d9, 0x1156, 0x19}} #elif RADIX == 32 -{0x22ce2ab, 0x8802747, 0xb9dd52f, 0xdb968af, 0xec68818, 0xb97cdd6, 0xf82a3af, 0x6952d2a, 0x3856727, 0xb1d488b, 0xc55a7ea, 0x11d631b, 0x1b20173, 0x955a} +{{0x22ce2ab, 0x8802747, 0xb9dd52f, 0xdb968af, 0xec68818, 0xb97cdd6, 0xf82a3af, 0x6952d2a, 0x3856727, 0xb1d488b, 0xc55a7ea, 0x11d631b, 0x1b20173, 0x955a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd52f880274722ce2, 0xc68818db968afb9d, 0xaf82a3afb97cdd6e, 0x88b38567276952d2, 0xd631bc55a7eab1d4, 0x2b7455a1b2017311} +{{0xd52f880274722ce2, 0xc68818db968afb9d, 0xaf82a3afb97cdd6e, 0x88b38567276952d2, 0xd631bc55a7eab1d4, 0x2b7455a1b2017311}} #else -{0x5f1004e8e459c5, 0x636e5a2bee7754, 0x7dcbe6eb763440, 0x76952d2af82a3a, 0x563a911670ace4, 0x44758c6f1569fa, 0x57a2ad0d900b9} +{{0x5f1004e8e459c5, 0x636e5a2bee7754, 0x7dcbe6eb763440, 0x76952d2af82a3a, 0x563a911670ace4, 0x44758c6f1569fa, 0x57a2ad0d900b9}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1557, 0x1987, 0x65f, 0x1c20, 0x14ef, 0xb3b, 0xbbe, 0x19db, 0xc77, 0x566, 0x9ea, 0xcab, 0xafc, 0x1fda, 0xb44, 0x1fe6, 0x1af3, 0x1829, 0x2ef, 0xc23, 0x83d, 0x82c, 0x1fa8, 0x14b, 0xd6e, 0xde8, 0x260, 0x1019, 0x97a, 0x3} +{{0x1557, 0x1987, 0x65f, 0x1c20, 0x14ef, 0xb3b, 0xbbe, 0x19db, 0xc77, 0x566, 0x9ea, 0xcab, 0xafc, 0x1fda, 0xb44, 0x1fe6, 0x1af3, 0x1829, 0x2ef, 0xc23, 0x83d, 0x82c, 0x1fa8, 0x14b, 0xd6e, 0xde8, 0x260, 0x1019, 0x97a, 0x3}} #elif RADIX == 32 -{0xcc3d55c, 0xf84065f, 0xe59dd3b, 0xdf3b6bb, 0xea2b331, 0xbf19569, 0xb44fed2, 0xebcffcc, 0x62efc14, 0x620f584, 0x97fa841, 0xf435b82, 0x322606, 0x1a5ea} +{{0xcc3d55c, 0xf84065f, 0xe59dd3b, 0xdf3b6bb, 0xea2b331, 0xbf19569, 0xb44fed2, 0xebcffcc, 0x62efc14, 0x620f584, 0x97fa841, 0xf435b82, 0x322606, 0x1a5ea}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xdd3bf84065fcc3d5, 0xa2b331df3b6bbe59, 0xcb44fed2bf19569e, 0x58462efc14ebcffc, 0x35b8297fa841620f, 0x17765ea0322606f4} +{{0xdd3bf84065fcc3d5, 0xa2b331df3b6bbe59, 0xcb44fed2bf19569e, 0x58462efc14ebcffc, 0x35b8297fa841620f, 0x17765ea0322606f4}} #else -{0x77f080cbf987aa, 0x477cedaef96774, 0x15f8cab4f51599, 0x4ebcffccb44fed, 0x2c41eb08c5df82, 0x3d0d6e0a5fea10, 0xbbb2f50191303} +{{0x77f080cbf987aa, 0x477cedaef96774, 0x15f8cab4f51599, 0x4ebcffccb44fed, 0x2c41eb08c5df82, 0x3d0d6e0a5fea10, 0xbbb2f50191303}} #endif #endif , #if 0 #elif RADIX == 16 -{0xb02, 0xc60, 0x791, 0x1cf7, 0xc15, 0x125a, 0x1697, 0xca1, 0x327, 0x89f, 0xf64, 0xddf, 0xcb7, 0x1977, 0x29f, 0x100a, 0xdac, 0xc8, 0x1e16, 0x1c4e, 0xedf, 0x1ec0, 0x1ac0, 0x1bbd, 0x16ee, 0x106a, 0x35c, 0x11cc, 0xdde, 0x20} +{{0xb02, 0xc60, 0x791, 0x1cf7, 0xc15, 0x125a, 0x1697, 0xca1, 0x327, 0x89f, 0xf64, 0xddf, 0xcb7, 0x1977, 0x29f, 0x100a, 0xdac, 0xc8, 0x1e16, 0x1c4e, 0xedf, 0x1ec0, 0x1ac0, 0x1bbd, 0x16ee, 0x106a, 0x35c, 0x11cc, 0xdde, 0x20}} #elif RADIX == 32 -{0x6302c0b, 0x79ee791, 0x792d305, 0x9d94369, 0x6444f8c, 0x2ddbbef, 0x29fcbbb, 0x36b2014, 0xde16064, 0x3b7f89, 0x7bac0f6, 0x355bbb7, 0x39835c8, 0x4077a} +{{0x6302c0b, 0x79ee791, 0x792d305, 0x9d94369, 0x6444f8c, 0x2ddbbef, 0x29fcbbb, 0x36b2014, 0xde16064, 0x3b7f89, 0x7bac0f6, 0x355bbb7, 0x39835c8, 0x4077a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd30579ee7916302c, 0x444f8c9d94369792, 0x429fcbbb2ddbbef6, 0xf89de1606436b201, 0x5bbb77bac0f603b7, 0x30b77a39835c835} +{{0xd30579ee7916302c, 0x444f8c9d94369792, 0x429fcbbb2ddbbef6, 0xf89de1606436b201, 0x5bbb77bac0f603b7, 0x30b77a39835c835}} #else -{0xaf3dcf22c6058, 0x327650da5e4b4c, 0x596eddf7b2227c, 0x436b201429fcbb, 0x4076ff13bc2c0c, 0xd56eeddeeb03d, 0x185bbd1cc1ae4} +{{0xaf3dcf22c6058, 0x327650da5e4b4c, 0x596eddf7b2227c, 0x436b201429fcbb, 0x4076ff13bc2c0c, 0xd56eeddeeb03d, 0x185bbd1cc1ae4}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1432,261 +1432,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0xccf, 0xad2, 0x1c72, 0x1f31, 0x155, 0xa3, 0x1062, 0xf6e, 0xe60, 0x87b, 0x1891, 0xb45, 0x1de8, 0x7f8, 0x18a6, 0x1e67, 0x988, 0x1887, 0xca7, 0x216, 0x1daa, 0x1aa5, 0xc3a, 0x11d3, 0x1282, 0x55d, 0x15fc, 0x1132, 0x1c5e, 0x8} +{{0xccf, 0xad2, 0x1c72, 0x1f31, 0x155, 0xa3, 0x1062, 0xf6e, 0xe60, 0x87b, 0x1891, 0xb45, 0x1de8, 0x7f8, 0x18a6, 0x1e67, 0x988, 0x1887, 0xca7, 0x216, 0x1daa, 0x1aa5, 0xc3a, 0x11d3, 0x1282, 0x55d, 0x15fc, 0x1132, 0x1c5e, 0x8}} #elif RADIX == 32 -{0x569333d, 0x7e63c72, 0x2051855, 0x81edd06, 0x9143db9, 0x7a168b8, 0x8a63fc7, 0xa623ccf, 0xcca7c43, 0x2f6a842, 0xa6c3ad5, 0xaeca0a3, 0x2655fc2, 0x617a} +{{0x569333d, 0x7e63c72, 0x2051855, 0x81edd06, 0x9143db9, 0x7a168b8, 0x8a63fc7, 0xa623ccf, 0xcca7c43, 0x2f6a842, 0xa6c3ad5, 0xaeca0a3, 0x2655fc2, 0x617a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x18557e63c7256933, 0x143db981edd06205, 0xf8a63fc77a168b89, 0x842cca7c43a623cc, 0xca0a3a6c3ad52f6a, 0xf8317a2655fc2ae} +{{0x18557e63c7256933, 0x143db981edd06205, 0xf8a63fc77a168b89, 0x842cca7c43a623cc, 0xca0a3a6c3ad52f6a, 0xf8317a2655fc2ae}} #else -{0x2afcc78e4ad266, 0x6607b741881461, 0x3bd0b45c48a1ed, 0x3a623ccf8a63fc, 0x25ed5085994f88, 0x2bb2828e9b0eb5, 0x7c18bd132afe1} +{{0x2afcc78e4ad266, 0x6607b741881461, 0x3bd0b45c48a1ed, 0x3a623ccf8a63fc, 0x25ed5085994f88, 0x2bb2828e9b0eb5, 0x7c18bd132afe1}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x13b1, 0x12b4, 0xf1c, 0xfcc, 0x1855, 0x1028, 0x1418, 0x3db, 0x1b98, 0xa1e, 0xe24, 0x2d1, 0x77a, 0x11fe, 0x1e29, 0x799, 0x1a62, 0x1e21, 0x1329, 0x1085, 0xf6a, 0x16a9, 0x1b0e, 0x1474, 0xca0, 0x157, 0x157f, 0x144c, 0x1317, 0x1b} +{{0x13b1, 0x12b4, 0xf1c, 0xfcc, 0x1855, 0x1028, 0x1418, 0x3db, 0x1b98, 0xa1e, 0xe24, 0x2d1, 0x77a, 0x11fe, 0x1e29, 0x799, 0x1a62, 0x1e21, 0x1329, 0x1085, 0xf6a, 0x16a9, 0x1b0e, 0x1474, 0xca0, 0x157, 0x157f, 0x144c, 0x1317, 0x1b}} #elif RADIX == 32 -{0x95a4ec7, 0x5f98f1c, 0x8814615, 0x607b741, 0x2450f6e, 0xde85a2e, 0xe298ff1, 0xe988f33, 0xb329f10, 0x4bdaa10, 0xe9b0eb5, 0xabb2828, 0x89957f0, 0x19c5e} +{{0x95a4ec7, 0x5f98f1c, 0x8814615, 0x607b741, 0x2450f6e, 0xde85a2e, 0xe298ff1, 0xe988f33, 0xb329f10, 0x4bdaa10, 0xe9b0eb5, 0xabb2828, 0x89957f0, 0x19c5e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x46155f98f1c95a4e, 0x450f6e607b741881, 0x3e298ff1de85a2e2, 0xa10b329f10e988f3, 0xb2828e9b0eb54bda, 0x32a0c5e89957f0ab} +{{0x46155f98f1c95a4e, 0x450f6e607b741881, 0x3e298ff1de85a2e2, 0xa10b329f10e988f3, 0xb2828e9b0eb54bda, 0x32a0c5e89957f0ab}} #else -{0x2abf31e392b49d, 0x3981edd0620518, 0xef42d1712287b, 0xe988f33e298ff, 0x297b54216653e2, 0x2aeca0a3a6c3ad, 0x91062f44cabf8} +{{0x2abf31e392b49d, 0x3981edd0620518, 0xef42d1712287b, 0xe988f33e298ff, 0x297b54216653e2, 0x2aeca0a3a6c3ad, 0x91062f44cabf8}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xdd8, 0x13bc, 0x17ae, 0x83e, 0x10c6, 0x1a72, 0x270, 0x84, 0xb92, 0x431, 0x1fdf, 0x9cf, 0x2a9, 0x121d, 0x5d5, 0x1d9f, 0xa48, 0xec9, 0xcfc, 0x6ee, 0x1812, 0x66b, 0xed8, 0xf7, 0x117b, 0x1fb7, 0xc5, 0x1f00, 0x134f, 0x1f} +{{0xdd8, 0x13bc, 0x17ae, 0x83e, 0x10c6, 0x1a72, 0x270, 0x84, 0xb92, 0x431, 0x1fdf, 0x9cf, 0x2a9, 0x121d, 0x5d5, 0x1d9f, 0xa48, 0xec9, 0xcfc, 0x6ee, 0x1812, 0x66b, 0xed8, 0xf7, 0x117b, 0x1fb7, 0xc5, 0x1f00, 0x134f, 0x1f}} #elif RADIX == 32 -{0x9de3763, 0x907d7ae, 0xd39431, 0x4810827, 0xdf218ae, 0xaa539ff, 0x5d590e8, 0xa923b3e, 0xccfc764, 0x5e048dd, 0xeeed833, 0xdbc5ec1, 0xe000c5f, 0x39d3f} +{{0x9de3763, 0x907d7ae, 0xd39431, 0x4810827, 0xdf218ae, 0xaa539ff, 0x5d590e8, 0xa923b3e, 0xccfc764, 0x5e048dd, 0xeeed833, 0xdbc5ec1, 0xe000c5f, 0x39d3f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9431907d7ae9de37, 0xf218ae48108270d3, 0xe5d590e8aa539ffd, 0x8ddccfc764a923b3, 0xc5ec1eeed8335e04, 0x195cd3fe000c5fdb} +{{0x9431907d7ae9de37, 0xf218ae48108270d3, 0xe5d590e8aa539ffd, 0x8ddccfc764a923b3, 0xc5ec1eeed8335e04, 0x195cd3fe000c5fdb}} #else -{0x6320faf5d3bc6e, 0x39204209c34e50, 0x45529cffef90c5, 0x4a923b3e5d590e, 0x6bc091bb99f8ec, 0x76f17b07bbb60c, 0xcae69ff00062f} +{{0x6320faf5d3bc6e, 0x39204209c34e50, 0x45529cffef90c5, 0x4a923b3e5d590e, 0x6bc091bb99f8ec, 0x76f17b07bbb60c, 0xcae69ff00062f}} #endif #endif , #if 0 #elif RADIX == 16 -{0xf36, 0x2c8, 0x1ab4, 0x17c1, 0x10be, 0x1a20, 0x1baf, 0x3ce, 0x1088, 0xd75, 0x1e25, 0x10f8, 0x3d2, 0x1b8, 0x9c7, 0x168, 0x44c, 0x372, 0xc50, 0x1d9a, 0x1b99, 0xab9, 0x8af, 0x657, 0xe84, 0xe1d, 0x1675, 0x47, 0x157e, 0xc} +{{0xf36, 0x2c8, 0x1ab4, 0x17c1, 0x10be, 0x1a20, 0x1baf, 0x3ce, 0x1088, 0xd75, 0x1e25, 0x10f8, 0x3d2, 0x1b8, 0x9c7, 0x168, 0x44c, 0x372, 0xc50, 0x1d9a, 0x1b99, 0xab9, 0x8af, 0x657, 0xe84, 0xe1d, 0x1675, 0x47, 0x157e, 0xc}} #elif RADIX == 32 -{0x1643cd9, 0xaf83ab4, 0xfd1042f, 0x2079dba, 0x256bac2, 0xf4a1f1e, 0x9c70dc0, 0x11302d0, 0x4c501b9, 0xcee67b3, 0xae8af55, 0xeba10c, 0x8f6757, 0x245f8} +{{0x1643cd9, 0xaf83ab4, 0xfd1042f, 0x2079dba, 0x256bac2, 0xf4a1f1e, 0x9c70dc0, 0x11302d0, 0x4c501b9, 0xcee67b3, 0xae8af55, 0xeba10c, 0x8f6757, 0x245f8}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x42faf83ab41643c, 0x56bac22079dbafd1, 0x9c70dc0f4a1f1e2, 0x7b34c501b911302d, 0xba10cae8af55cee6, 0x373d5f808f67570e} +{{0x42faf83ab41643c, 0x56bac22079dbafd1, 0x9c70dc0f4a1f1e2, 0x7b34c501b911302d, 0xba10cae8af55cee6, 0x373d5f808f67570e}} #else -{0x5f5f075682c879, 0x881e76ebf4410, 0x7a50f8f12b5d6, 0x111302d09c70dc, 0x39dccf6698a037, 0x43ae8432ba2bd5, 0xb5eafc047b3ab} +{{0x5f5f075682c879, 0x881e76ebf4410, 0x7a50f8f12b5d6, 0x111302d09c70dc, 0x39dccf6698a037, 0x43ae8432ba2bd5, 0xb5eafc047b3ab}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x4b0, 0x31c, 0x92f, 0xf0d, 0xbc1, 0x1e89, 0x4ce, 0x1480, 0xdee, 0x504, 0x970, 0x16c3, 0xcb6, 0xae7, 0x1147, 0x8c, 0xc2a, 0x1ff9, 0x7d8, 0xfe9, 0x1fb1, 0x748, 0x998, 0xb85, 0x1a8e, 0x19c7, 0x5f7, 0x103c, 0x12a4, 0xe} +{{0x4b0, 0x31c, 0x92f, 0xf0d, 0xbc1, 0x1e89, 0x4ce, 0x1480, 0xdee, 0x504, 0x970, 0x16c3, 0xcb6, 0xae7, 0x1147, 0x8c, 0xc2a, 0x1ff9, 0x7d8, 0xfe9, 0x1fb1, 0x748, 0x998, 0xb85, 0x1a8e, 0x19c7, 0x5f7, 0x103c, 0x12a4, 0xe}} #elif RADIX == 32 -{0x18e12c1, 0x5e1a92f, 0xef44af0, 0xba9004c, 0x7028237, 0x2dad869, 0x147573b, 0xb0a8119, 0x27d8ffc, 0x47ec5fd, 0xa9983a, 0xe3ea397, 0x785f7c, 0x33a92} +{{0x18e12c1, 0x5e1a92f, 0xef44af0, 0xba9004c, 0x7028237, 0x2dad869, 0x147573b, 0xb0a8119, 0x27d8ffc, 0x47ec5fd, 0xa9983a, 0xe3ea397, 0x785f7c, 0x33a92}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x4af05e1a92f18e12, 0x28237ba9004cef4, 0x9147573b2dad8697, 0x5fd27d8ffcb0a811, 0xea3970a9983a47ec, 0x3134a920785f7ce3} +{{0x4af05e1a92f18e12, 0x28237ba9004cef4, 0x9147573b2dad8697, 0x5fd27d8ffcb0a811, 0xea3970a9983a47ec, 0x3134a920785f7ce3}} #else -{0x60bc3525e31c25, 0x5eea40133bd12b, 0x596d6c34b81411, 0x4b0a8119147573, 0x48fd8bfa4fb1ff, 0x38fa8e5c2a660e, 0x85a54903c2fbe} +{{0x60bc3525e31c25, 0x5eea40133bd12b, 0x596d6c34b81411, 0x4b0a8119147573, 0x48fd8bfa4fb1ff, 0x38fa8e5c2a660e, 0x85a54903c2fbe}} #endif #endif , #if 0 #elif RADIX == 16 -{0x15a9, 0x1ae1, 0x1dd2, 0xa61, 0x1259, 0xfad, 0xe49, 0x1f6d, 0xd9a, 0x1371, 0xee7, 0x1179, 0x1bcf, 0x876, 0x3ca, 0xf7c, 0x1192, 0x315, 0x916, 0x1aa5, 0x1ca9, 0x10cb, 0xe32, 0x18b9, 0xf58, 0x1932, 0x1cce, 0x1ba7, 0x1377, 0x6} +{{0x15a9, 0x1ae1, 0x1dd2, 0xa61, 0x1259, 0xfad, 0xe49, 0x1f6d, 0xd9a, 0x1371, 0xee7, 0x1179, 0x1bcf, 0x876, 0x3ca, 0xf7c, 0x1192, 0x315, 0x916, 0x1aa5, 0x1ca9, 0x10cb, 0xe32, 0x18b9, 0xf58, 0x1932, 0x1cce, 0x1ba7, 0x1377, 0x6}} #elif RADIX == 32 -{0xd70d6a4, 0x54c3dd2, 0x97d6c96, 0x6bedae4, 0xe79b8b6, 0xf3e2f2e, 0x3ca43b6, 0xc649ef8, 0xa91618a, 0x5f2a754, 0x72e3286, 0x993d631, 0x74fccec, 0x34ddf} +{{0xd70d6a4, 0x54c3dd2, 0x97d6c96, 0x6bedae4, 0xe79b8b6, 0xf3e2f2e, 0x3ca43b6, 0xc649ef8, 0xa91618a, 0x5f2a754, 0x72e3286, 0x993d631, 0x74fccec, 0x34ddf}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6c9654c3dd2d70d6, 0x79b8b66bedae497d, 0x83ca43b6f3e2f2ee, 0x754a91618ac649ef, 0x3d63172e32865f2a, 0x29d8ddf74fccec99} +{{0x6c9654c3dd2d70d6, 0x79b8b66bedae497d, 0x83ca43b6f3e2f2ee, 0x754a91618ac649ef, 0x3d63172e32865f2a, 0x29d8ddf74fccec99}} #else -{0x2ca987ba5ae1ad, 0x59afb6b925f5b2, 0x379f179773cdc5, 0x2c649ef83ca43b, 0x4be54ea9522c31, 0x264f58c5cb8ca1, 0x4ac6efba7e676} +{{0x2ca987ba5ae1ad, 0x59afb6b925f5b2, 0x379f179773cdc5, 0x2c649ef83ca43b, 0x4be54ea9522c31, 0x264f58c5cb8ca1, 0x4ac6efba7e676}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1f79, 0xcad, 0x18f2, 0x1ba7, 0x1d14, 0x1fc6, 0x197d, 0x522, 0xab, 0x7bd, 0x57b, 0x1fbf, 0x12, 0xb50, 0x425, 0x1aa3, 0x1c8e, 0x11cf, 0x1c1b, 0x1774, 0x3fc, 0x36a, 0x148f, 0x1fd3, 0x608, 0x1711, 0x1142, 0xcfa, 0xd43, 0xd} +{{0x1f79, 0xcad, 0x18f2, 0x1ba7, 0x1d14, 0x1fc6, 0x197d, 0x522, 0xab, 0x7bd, 0x57b, 0x1fbf, 0x12, 0xb50, 0x425, 0x1aa3, 0x1c8e, 0x11cf, 0x1c1b, 0x1774, 0x3fc, 0x36a, 0x148f, 0x1fd3, 0x608, 0x1711, 0x1142, 0xcfa, 0xd43, 0xd}} #elif RADIX == 32 -{0x656fde5, 0x374f8f2, 0xdfe3745, 0xaca4597, 0x7b3de82, 0x4bf7e5, 0x4255a80, 0xf23b546, 0x9c1b8e7, 0x50ff2ee, 0xa748f1b, 0x889823f, 0x9f5142b, 0x2a50d} +{{0x656fde5, 0x374f8f2, 0xdfe3745, 0xaca4597, 0x7b3de82, 0x4bf7e5, 0x4255a80, 0xf23b546, 0x9c1b8e7, 0x50ff2ee, 0xa748f1b, 0x889823f, 0x9f5142b, 0x2a50d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3745374f8f2656fd, 0xb3de82aca4597dfe, 0x64255a8004bf7e57, 0x2ee9c1b8e7f23b54, 0x9823fa748f1b50ff, 0x3a4f50d9f5142b88} +{{0x3745374f8f2656fd, 0xb3de82aca4597dfe, 0x64255a8004bf7e57, 0x2ee9c1b8e7f23b54, 0x9823fa748f1b50ff, 0x3a4f50d9f5142b88}} #else -{0xa6e9f1e4cadfb, 0xab29165f7f8dd, 0x25fbf2bd9ef4, 0x7f23b5464255a8, 0x6a1fe5dd38371c, 0x622608fe9d23c6, 0xce7a86cfa8a15} +{{0xa6e9f1e4cadfb, 0xab29165f7f8dd, 0x25fbf2bd9ef4, 0x7f23b5464255a8, 0x6a1fe5dd38371c, 0x622608fe9d23c6, 0xce7a86cfa8a15}} #endif #endif , #if 0 #elif RADIX == 16 -{0x14a, 0x1236, 0x839, 0xe2, 0xe2d, 0xe17, 0x1b8f, 0x18dd, 0xb20, 0xeb8, 0x1da9, 0xc53, 0x12e8, 0x146, 0x1b9b, 0x154, 0x1121, 0x1049, 0x105d, 0x631, 0xc9, 0xbe0, 0x8fa, 0xbc0, 0x34b, 0x178a, 0x77b, 0x2a7, 0x105b, 0x15} +{{0x14a, 0x1236, 0x839, 0xe2, 0xe2d, 0xe17, 0x1b8f, 0x18dd, 0xb20, 0xeb8, 0x1da9, 0xc53, 0x12e8, 0x146, 0x1b9b, 0x154, 0x1121, 0x1049, 0x105d, 0x631, 0xc9, 0xbe0, 0x8fa, 0xbc0, 0x34b, 0x178a, 0x77b, 0x2a7, 0x105b, 0x15}} #elif RADIX == 32 -{0x91b052a, 0x41c4839, 0xf70bb8b, 0x831bbb8, 0xa975c2c, 0xba18a7d, 0xb9b0a34, 0xc4842a9, 0x305d824, 0x324c6, 0x808fa5f, 0xc50d2d7, 0x54e77bb, 0x2a16c} +{{0x91b052a, 0x41c4839, 0xf70bb8b, 0x831bbb8, 0xa975c2c, 0xba18a7d, 0xb9b0a34, 0xc4842a9, 0x305d824, 0x324c6, 0x808fa5f, 0xc50d2d7, 0x54e77bb, 0x2a16c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbb8b41c483991b05, 0x975c2c831bbb8f70, 0x9b9b0a34ba18a7da, 0x4c6305d824c4842a, 0xd2d7808fa5f0032, 0xad416c54e77bbc5} +{{0xbb8b41c483991b05, 0x975c2c831bbb8f70, 0x9b9b0a34ba18a7da, 0x4c6305d824c4842a, 0xd2d7808fa5f0032, 0xad416c54e77bbc5}} #else -{0x1683890732360a, 0x320c6eee3dc2ee, 0x25d0c53ed4bae1, 0x4c4842a9b9b0a3, 0x6006498c60bb04, 0x71434b5e023e97, 0x56a0b62a73bdd} +{{0x1683890732360a, 0x320c6eee3dc2ee, 0x25d0c53ed4bae1, 0x4c4842a9b9b0a3, 0x6006498c60bb04, 0x71434b5e023e97, 0x56a0b62a73bdd}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1908,261 +1908,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x10c4, 0x1e1a, 0x1b68, 0xa71, 0xa1a, 0x1f60, 0xdda, 0xb59, 0x1bc0, 0x26c, 0x1325, 0x4cf, 0x1375, 0x10ef, 0x1702, 0x1eff, 0x171f, 0x467, 0xc1c, 0xf3b, 0xf74, 0x6fa, 0x177f, 0x911, 0x8bc, 0x16b7, 0x1022, 0xa5f, 0xa55, 0x9} +{{0x10c4, 0x1e1a, 0x1b68, 0xa71, 0xa1a, 0x1f60, 0xdda, 0xb59, 0x1bc0, 0x26c, 0x1325, 0x4cf, 0x1375, 0x10ef, 0x1702, 0x1eff, 0x171f, 0x467, 0xc1c, 0xf3b, 0xf74, 0x6fa, 0x177f, 0x911, 0x8bc, 0x16b7, 0x1022, 0xa5f, 0xa55, 0x9}} #elif RADIX == 32 -{0xf0d4311, 0x94e3b68, 0xafb0286, 0x16b2dd, 0x251366f, 0xdd499f3, 0x702877c, 0xdc7fdff, 0x6c1c233, 0xd3dd1e7, 0x2377f37, 0x5ba2f12, 0x4bf022b, 0x9955} +{{0xf0d4311, 0x94e3b68, 0xafb0286, 0x16b2dd, 0x251366f, 0xdd499f3, 0x702877c, 0xdc7fdff, 0x6c1c233, 0xd3dd1e7, 0x2377f37, 0x5ba2f12, 0x4bf022b, 0x9955}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x28694e3b68f0d43, 0x51366f016b2ddafb, 0xf702877cdd499f32, 0x1e76c1c233dc7fdf, 0xa2f122377f37d3dd, 0x45a9554bf022b5b} +{{0x28694e3b68f0d43, 0x51366f016b2ddafb, 0xf702877cdd499f32, 0x1e76c1c233dc7fdf, 0xa2f122377f37d3dd, 0x45a9554bf022b5b}} #else -{0xd29c76d1e1a86, 0x3c05acb76bec0a, 0x66ea4cf99289b3, 0x3dc7fdff702877, 0x7a7ba3ced83846, 0x56e8bc488ddfcd, 0x22d4aaa5f8115} +{{0xd29c76d1e1a86, 0x3c05acb76bec0a, 0x66ea4cf99289b3, 0x3dc7fdff702877, 0x7a7ba3ced83846, 0x56e8bc488ddfcd, 0x22d4aaa5f8115}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x14af, 0x786, 0xeda, 0x129c, 0x286, 0x17d8, 0xb76, 0x2d6, 0x6f0, 0x89b, 0x1cc9, 0x933, 0x1cdd, 0x143b, 0x1dc0, 0x1fbf, 0x1dc7, 0x119, 0x1b07, 0x3ce, 0x13dd, 0x19be, 0xddf, 0x244, 0x1a2f, 0x15ad, 0x1c08, 0xa97, 0xa95, 0x3} +{{0x14af, 0x786, 0xeda, 0x129c, 0x286, 0x17d8, 0xb76, 0x2d6, 0x6f0, 0x89b, 0x1cc9, 0x933, 0x1cdd, 0x143b, 0x1dc0, 0x1fbf, 0x1dc7, 0x119, 0x1b07, 0x3ce, 0x13dd, 0x19be, 0xddf, 0x244, 0x1a2f, 0x15ad, 0x1c08, 0xa97, 0xa95, 0x3}} #elif RADIX == 32 -{0x3c352bc, 0xa538eda, 0x6bec0a1, 0xc05acb7, 0xc944d9b, 0x375267c, 0xdc0a1df, 0xf71ff7f, 0xdb0708c, 0xf4f7479, 0x88ddfcd, 0xd6e8bc4, 0x52fc08a, 0x1aa55} +{{0x3c352bc, 0xa538eda, 0x6bec0a1, 0xc05acb7, 0xc944d9b, 0x375267c, 0xdc0a1df, 0xf71ff7f, 0xdb0708c, 0xf4f7479, 0x88ddfcd, 0xd6e8bc4, 0x52fc08a, 0x1aa55}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc0a1a538eda3c352, 0x944d9bc05acb76be, 0xfdc0a1df375267cc, 0x479db0708cf71ff7, 0xe8bc488ddfcdf4f7, 0x2fd6a5552fc08ad6} +{{0xc0a1a538eda3c352, 0x944d9bc05acb76be, 0xfdc0a1df375267cc, 0x479db0708cf71ff7, 0xe8bc488ddfcdf4f7, 0x2fd6a5552fc08ad6}} #else -{0x434a71db4786a5, 0x6f016b2ddafb02, 0x79ba933e64a26c, 0x4f71ff7fdc0a1d, 0x3e9ee8f3b60e11, 0x35ba2f122377f3, 0x7ab52aa97e045} +{{0x434a71db4786a5, 0x6f016b2ddafb02, 0x79ba933e64a26c, 0x4f71ff7fdc0a1d, 0x3e9ee8f3b60e11, 0x35ba2f122377f3, 0x7ab52aa97e045}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xd3b, 0x1cbd, 0x1177, 0x1087, 0x5d2, 0x1535, 0x1cb5, 0x1372, 0x158a, 0x931, 0x12da, 0x1b9d, 0x44e, 0xa00, 0xb71, 0xe8a, 0x1c57, 0x1a1, 0x5bb, 0x1180, 0x15f0, 0x1ca3, 0x119b, 0x16cc, 0xd3a, 0xaa7, 0xbc3, 0x9fc, 0xb07, 0x1a} +{{0xd3b, 0x1cbd, 0x1177, 0x1087, 0x5d2, 0x1535, 0x1cb5, 0x1372, 0x158a, 0x931, 0x12da, 0x1b9d, 0x44e, 0xa00, 0xb71, 0xe8a, 0x1c57, 0x1a1, 0x5bb, 0x1180, 0x15f0, 0x1ca3, 0x119b, 0x16cc, 0xd3a, 0xaa7, 0xbc3, 0x9fc, 0xb07, 0x1a}} #elif RADIX == 32 -{0xe5eb4ef, 0xa10f177, 0x5a9a974, 0x2a6e5cb, 0xda498d6, 0x13b73b2, 0xb715001, 0xf15dd14, 0x5bb0d0, 0x1d7c230, 0x9919be5, 0x53b4ead, 0x3f8bc35, 0xfc1d} +{{0xe5eb4ef, 0xa10f177, 0x5a9a974, 0x2a6e5cb, 0xda498d6, 0x13b73b2, 0xb715001, 0xf15dd14, 0x5bb0d0, 0x1d7c230, 0x9919be5, 0x53b4ead, 0x3f8bc35, 0xfc1d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa974a10f177e5eb4, 0xa498d62a6e5cb5a9, 0x4b71500113b73b2d, 0x23005bb0d0f15dd1, 0xb4ead9919be51d7c, 0x3cbec1d3f8bc3553} +{{0xa974a10f177e5eb4, 0xa498d62a6e5cb5a9, 0x4b71500113b73b2d, 0x23005bb0d0f15dd1, 0xb4ead9919be51d7c, 0x3cbec1d3f8bc3553}} #else -{0x69421e2efcbd69, 0x58a9b972d6a6a5, 0x89db9d96d24c6, 0xf15dd14b71500, 0x23af84600b761a, 0x54ed3ab66466f9, 0xe1f60e9fc5e1a} +{{0x69421e2efcbd69, 0x58a9b972d6a6a5, 0x89db9d96d24c6, 0xf15dd14b71500, 0x23af84600b761a, 0x54ed3ab66466f9, 0xe1f60e9fc5e1a}} #endif #endif , #if 0 #elif RADIX == 16 -{0x186, 0x245, 0xa48, 0x11da, 0x1354, 0x9fc, 0x168f, 0xff7, 0x1f2c, 0x6a2, 0x6fb, 0x980, 0x164f, 0xbb8, 0x49c, 0x1ad1, 0x145f, 0x80a, 0xf93, 0x2d8, 0x1846, 0x43, 0x5a9, 0x3a, 0x72e, 0x1e10, 0x741, 0x783, 0x967, 0x1a} +{{0x186, 0x245, 0xa48, 0x11da, 0x1354, 0x9fc, 0x168f, 0xff7, 0x1f2c, 0x6a2, 0x6fb, 0x980, 0x164f, 0xbb8, 0x49c, 0x1ad1, 0x145f, 0x80a, 0xf93, 0x2d8, 0x1846, 0x43, 0x5a9, 0x3a, 0x72e, 0x1e10, 0x741, 0x783, 0x967, 0x1a}} #elif RADIX == 32 -{0x122861b, 0x23b4a48, 0xf4fe4d5, 0xb1fef68, 0xfb3517c, 0x93d3006, 0x49c5dc5, 0x517f5a2, 0xf93405, 0x1e1185b, 0x745a902, 0x81cb80, 0xf06741f, 0xf59c} +{{0x122861b, 0x23b4a48, 0xf4fe4d5, 0xb1fef68, 0xfb3517c, 0x93d3006, 0x49c5dc5, 0x517f5a2, 0xf93405, 0x1e1185b, 0x745a902, 0x81cb80, 0xf06741f, 0xf59c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe4d523b4a4812286, 0xb3517cb1fef68f4f, 0x249c5dc593d3006f, 0x85b0f93405517f5a, 0x1cb80745a9021e11, 0x6ea59cf06741f08} +{{0xe4d523b4a4812286, 0xb3517cb1fef68f4f, 0x249c5dc593d3006f, 0x85b0f93405517f5a, 0x1cb80745a9021e11, 0x6ea59cf06741f08}} #else -{0x2a47694902450c, 0x72c7fbda3d3f93, 0x2c9e98037d9a8b, 0x5517f5a249c5dc, 0x43c230b61f2680, 0x42072e01d16a40, 0x3752ce7833a0f} +{{0x2a47694902450c, 0x72c7fbda3d3f93, 0x2c9e98037d9a8b, 0x5517f5a249c5dc, 0x43c230b61f2680, 0x42072e01d16a40, 0x3752ce7833a0f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1064, 0x8a7, 0x7c, 0x1876, 0xf16, 0x3a0, 0x124, 0x637, 0x11bf, 0x223, 0x6d, 0x58e, 0xcde, 0xaf, 0x99c, 0x1c62, 0xdcb, 0xe10, 0x7ba, 0x127f, 0x1a23, 0x69a, 0x7bd, 0x238, 0x455, 0x16ac, 0x1147, 0x12a, 0x14c1, 0x5} +{{0x1064, 0x8a7, 0x7c, 0x1876, 0xf16, 0x3a0, 0x124, 0x637, 0x11bf, 0x223, 0x6d, 0x58e, 0xcde, 0xaf, 0x99c, 0x1c62, 0xdcb, 0xe10, 0x7ba, 0x127f, 0x1a23, 0x69a, 0x7bd, 0x238, 0x455, 0x16ac, 0x1147, 0x12a, 0x14c1, 0x5}} #elif RADIX == 32 -{0x453c190, 0xb0ec07c, 0x41d03c5, 0xfcc6e12, 0x6d111c6, 0x378b1c0, 0x99c057b, 0x372f8c4, 0xe7ba708, 0xd688e4f, 0x707bd34, 0x5611544, 0x255147b, 0x2d304} +{{0x453c190, 0xb0ec07c, 0x41d03c5, 0xfcc6e12, 0x6d111c6, 0x378b1c0, 0x99c057b, 0x372f8c4, 0xe7ba708, 0xd688e4f, 0x707bd34, 0x5611544, 0x255147b, 0x2d304}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3c5b0ec07c453c1, 0xd111c6fcc6e1241d, 0x499c057b378b1c06, 0xe4fe7ba708372f8c, 0x11544707bd34d688, 0x24bd304255147b56} +{{0x3c5b0ec07c453c1, 0xd111c6fcc6e1241d, 0x499c057b378b1c06, 0xe4fe7ba708372f8c, 0x11544707bd34d688, 0x24bd304255147b56}} #else -{0xb61d80f88a783, 0x1bf31b8490740f, 0x59bc58e036888e, 0x372f8c499c057, 0x1ad11c9fcf74e1, 0x55845511c1ef4d, 0x21e98212a8a3d} +{{0xb61d80f88a783, 0x1bf31b8490740f, 0x59bc58e036888e, 0x372f8c499c057, 0x1ad11c9fcf74e1, 0x55845511c1ef4d, 0x21e98212a8a3d}} #endif #endif , #if 0 #elif RADIX == 16 -{0xaab, 0x60b, 0x8a0, 0x15d7, 0xbd8, 0x3ab, 0x1641, 0x1771, 0x134a, 0x17a, 0x785, 0x624, 0x1d, 0x1c3d, 0xcb1, 0xb5e, 0x23f, 0xf53, 0x879, 0x5e2, 0x903, 0xaff, 0xf72, 0xa2d, 0x7f4, 0xeb8, 0xd96, 0x1715, 0xffa, 0xa} +{{0xaab, 0x60b, 0x8a0, 0x15d7, 0xbd8, 0x3ab, 0x1641, 0x1771, 0x134a, 0x17a, 0x785, 0x624, 0x1d, 0x1c3d, 0xcb1, 0xb5e, 0x23f, 0xf53, 0x879, 0x5e2, 0x903, 0xaff, 0xf72, 0xa2d, 0x7f4, 0xeb8, 0xd96, 0x1715, 0xffa, 0xa}} #elif RADIX == 32 -{0x305aaad, 0x2bae8a0, 0x11d5af6, 0x2aee364, 0x850bd4d, 0x74c487, 0xcb1e1e8, 0x88fd6bc, 0x48797a9, 0xfa40cbc, 0x5af7257, 0x5c1fd14, 0xe2ad967, 0x12fea} +{{0x305aaad, 0x2bae8a0, 0x11d5af6, 0x2aee364, 0x850bd4d, 0x74c487, 0xcb1e1e8, 0x88fd6bc, 0x48797a9, 0xfa40cbc, 0x5af7257, 0x5c1fd14, 0xe2ad967, 0x12fea}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x5af62bae8a0305aa, 0x50bd4d2aee36411d, 0xccb1e1e8074c4878, 0xcbc48797a988fd6b, 0x1fd145af7257fa40, 0x2bfffeae2ad9675c} +{{0x5af62bae8a0305aa, 0x50bd4d2aee36411d, 0xccb1e1e8074c4878, 0xcbc48797a988fd6b, 0x1fd145af7257fa40, 0x2bfffeae2ad9675c}} #else -{0x6c575d14060b55, 0x34abb8d904756b, 0x403a6243c285ea, 0x188fd6bccb1e1e, 0x7f48197890f2f5, 0x5707f4516bdc95, 0x5bfff57156cb3} +{{0x6c575d14060b55, 0x34abb8d904756b, 0x403a6243c285ea, 0x188fd6bccb1e1e, 0x7f48197890f2f5, 0x5707f4516bdc95, 0x5bfff57156cb3}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x195c, 0x1d55, 0x99f, 0x11f, 0x106b, 0xab1, 0x3e7, 0x1e40, 0xa1e, 0xdf0, 0x1dd4, 0x5cd, 0xfc3, 0x1c99, 0xbfa, 0x1ead, 0x1f6, 0x12fa, 0x1465, 0xad7, 0x1a84, 0x18d8, 0x1b7f, 0x9fe, 0x14b1, 0x13b7, 0x189f, 0x12bc, 0xabc, 0x1f} +{{0x195c, 0x1d55, 0x99f, 0x11f, 0x106b, 0xab1, 0x3e7, 0x1e40, 0xa1e, 0xdf0, 0x1dd4, 0x5cd, 0xfc3, 0x1c99, 0xbfa, 0x1ead, 0x1f6, 0x12fa, 0x1465, 0xad7, 0x1a84, 0x18d8, 0x1b7f, 0x9fe, 0x14b1, 0x13b7, 0x189f, 0x12bc, 0xabc, 0x1f}} #elif RADIX == 32 -{0xeaae573, 0xc23e99f, 0x7558c1a, 0x7bc803e, 0xd46f828, 0xf0cb9bd, 0xbfae4cb, 0x7dbd5a, 0xf46597d, 0xc6a115a, 0xfdb7fc6, 0xdbd2c53, 0x57989f9, 0x37af2} +{{0xeaae573, 0xc23e99f, 0x7558c1a, 0x7bc803e, 0xd46f828, 0xf0cb9bd, 0xbfae4cb, 0x7dbd5a, 0xf46597d, 0xc6a115a, 0xfdb7fc6, 0xdbd2c53, 0x57989f9, 0x37af2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8c1ac23e99feaae5, 0x46f8287bc803e755, 0xabfae4cbf0cb9bdd, 0x15af46597d07dbd5, 0xd2c53fdb7fc6c6a1, 0x1d6aaf257989f9db} +{{0x8c1ac23e99feaae5, 0x46f8287bc803e755, 0xabfae4cbf0cb9bdd, 0x15af46597d07dbd5, 0xd2c53fdb7fc6c6a1, 0x1d6aaf257989f9db}} #else -{0x35847d33fd55ca, 0x21ef200f9d5630, 0x5f865cdeea37c1, 0x507dbd5abfae4c, 0x58d422b5e8cb2f, 0x76f4b14ff6dff1, 0xeb55792bcc4fc} +{{0x35847d33fd55ca, 0x21ef200f9d5630, 0x5f865cdeea37c1, 0x507dbd5abfae4c, 0x58d422b5e8cb2f, 0x76f4b14ff6dff1, 0xeb55792bcc4fc}} #endif #endif , #if 0 #elif RADIX == 16 -{0x970, 0x18b4, 0xc62, 0xf59, 0xf33, 0x6c0, 0x5ae, 0x86b, 0x1690, 0x17e1, 0x829, 0xab5, 0x169, 0x1115, 0x1b7e, 0x17fa, 0xcae, 0x1b7, 0xc7b, 0xb70, 0x11fc, 0x1417, 0x8b4, 0x1b78, 0x35a, 0x18e, 0x1e46, 0x15f0, 0xf64, 0x15} +{{0x970, 0x18b4, 0xc62, 0xf59, 0xf33, 0x6c0, 0x5ae, 0x86b, 0x1690, 0x17e1, 0x829, 0xab5, 0x169, 0x1115, 0x1b7e, 0x17fa, 0xcae, 0x1b7, 0xc7b, 0xb70, 0x11fc, 0x1417, 0x8b4, 0x1b78, 0x35a, 0x18e, 0x1e46, 0x15f0, 0xf64, 0x15}} #elif RADIX == 32 -{0xc5a25c2, 0xdeb2c62, 0xe3603cc, 0x410d65a, 0x29bf0da, 0x5a556a8, 0xb7e88a8, 0xb2baff5, 0xc7b0db, 0xbc7f16e, 0xf08b4a0, 0xc70d6b6, 0xbe1e460, 0x29d92} +{{0xc5a25c2, 0xdeb2c62, 0xe3603cc, 0x410d65a, 0x29bf0da, 0x5a556a8, 0xb7e88a8, 0xb2baff5, 0xc7b0db, 0xbc7f16e, 0xf08b4a0, 0xc70d6b6, 0xbe1e460, 0x29d92}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3ccdeb2c62c5a25, 0x9bf0da410d65ae36, 0x5b7e88a85a556a82, 0x16e0c7b0dbb2baff, 0xd6b6f08b4a0bc7f, 0x316bd92be1e460c7} +{{0x3ccdeb2c62c5a25, 0x9bf0da410d65ae36, 0x5b7e88a85a556a82, 0x16e0c7b0dbb2baff, 0xd6b6f08b4a0bc7f, 0x316bd92be1e460c7}} #else -{0x19bd658c58b44b, 0x69043596b8d80f, 0x42d2ab5414df86, 0x3b2baff5b7e88a, 0x178fe2dc18f61b, 0x31c35adbc22d28, 0x875ec95f0f230} +{{0x19bd658c58b44b, 0x69043596b8d80f, 0x42d2ab5414df86, 0x3b2baff5b7e88a, 0x178fe2dc18f61b, 0x31c35adbc22d28, 0x875ec95f0f230}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2384,261 +2384,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x187f, 0x1c7d, 0x163a, 0x54d, 0x197f, 0x1a5d, 0x1833, 0x823, 0x11ae, 0x225, 0xd6d, 0x1627, 0xd6c, 0x1625, 0xa36, 0xf64, 0xcfa, 0x327, 0x188d, 0xfab, 0x56b, 0x91c, 0x1cc5, 0x4fe, 0x2ae, 0x181a, 0x195, 0x1288, 0x10fc, 0x3} +{{0x187f, 0x1c7d, 0x163a, 0x54d, 0x197f, 0x1a5d, 0x1833, 0x823, 0x11ae, 0x225, 0xd6d, 0x1627, 0xd6c, 0x1625, 0xa36, 0xf64, 0xcfa, 0x327, 0x188d, 0xfab, 0x56b, 0x91c, 0x1cc5, 0x4fe, 0x2ae, 0x181a, 0x195, 0x1288, 0x10fc, 0x3}} #elif RADIX == 32 -{0xe3ee1fc, 0xca9b63a, 0x3d2ee5f, 0xb904783, 0x6d112c6, 0x5b2c4ed, 0xa36b12b, 0xb3e9ec8, 0x788d193, 0xe15adf5, 0xfdcc548, 0xd0ab89, 0x510195c, 0x1c3f2} +{{0xe3ee1fc, 0xca9b63a, 0x3d2ee5f, 0xb904783, 0x6d112c6, 0x5b2c4ed, 0xa36b12b, 0xb3e9ec8, 0x788d193, 0xe15adf5, 0xfdcc548, 0xd0ab89, 0x510195c, 0x1c3f2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xee5fca9b63ae3ee1, 0xd112c6b9047833d2, 0x8a36b12b5b2c4ed6, 0xdf5788d193b3e9ec, 0xab89fdcc548e15a, 0x40183f2510195c0d} +{{0xee5fca9b63ae3ee1, 0xd112c6b9047833d2, 0x8a36b12b5b2c4ed6, 0xdf5788d193b3e9ec, 0xab89fdcc548e15a, 0x40183f2510195c0d}} #else -{0x3f9536c75c7dc3, 0x1ae411e0cf4bb9, 0x5ad96276b68896, 0x3b3e9ec8a36b12, 0x1c2b5beaf11a32, 0x342ae27f73152, 0xfcc1f92880cae} +{{0x3f9536c75c7dc3, 0x1ae411e0cf4bb9, 0x5ad96276b68896, 0x3b3e9ec8a36b12, 0x1c2b5beaf11a32, 0x342ae27f73152, 0xfcc1f92880cae}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0xe9d, 0x171f, 0xd8e, 0x1953, 0xe5f, 0x1e97, 0x1e0c, 0x1208, 0xc6b, 0x889, 0x1b5b, 0x589, 0xb5b, 0x1589, 0x28d, 0x13d9, 0x1b3e, 0x8c9, 0x1e23, 0x1bea, 0x15a, 0xa47, 0x1731, 0x113f, 0x10ab, 0xe06, 0x65, 0x4a2, 0x83f, 0x1a} +{{0xe9d, 0x171f, 0xd8e, 0x1953, 0xe5f, 0x1e97, 0x1e0c, 0x1208, 0xc6b, 0x889, 0x1b5b, 0x589, 0xb5b, 0x1589, 0x28d, 0x13d9, 0x1b3e, 0x8c9, 0x1e23, 0x1bea, 0x15a, 0xa47, 0x1731, 0x113f, 0x10ab, 0xe06, 0x65, 0x4a2, 0x83f, 0x1a}} #elif RADIX == 32 -{0xb8fba77, 0xf2a6d8e, 0xcf4bb97, 0xae411e0, 0x5b444b1, 0xd6cb13b, 0x28dac4a, 0xecfa7b2, 0x5e23464, 0x3856b7d, 0x7f73152, 0x342ae2, 0x9440657, 0xf0fc} +{{0xb8fba77, 0xf2a6d8e, 0xcf4bb97, 0xae411e0, 0x5b444b1, 0xd6cb13b, 0x28dac4a, 0xecfa7b2, 0x5e23464, 0x3856b7d, 0x7f73152, 0x342ae2, 0x9440657, 0xf0fc}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbb97f2a6d8eb8fba, 0xb444b1ae411e0cf4, 0x228dac4ad6cb13b5, 0xb7d5e23464ecfa7b, 0x42ae27f731523856, 0x1e460fc944065703} +{{0xbb97f2a6d8eb8fba, 0xb444b1ae411e0cf4, 0x228dac4ad6cb13b5, 0xb7d5e23464ecfa7b, 0x42ae27f731523856, 0x1e460fc944065703}} #else -{0x2fe54db1d71f74, 0x46b9047833d2ee, 0x56b6589dada225, 0x4ecfa7b228dac4, 0x470ad6fabc468c, 0x40d0ab89fdcc54, 0xf2307e4a2032b} +{{0x2fe54db1d71f74, 0x46b9047833d2ee, 0x56b6589dada225, 0x4ecfa7b228dac4, 0x470ad6fabc468c, 0x40d0ab89fdcc54, 0xf2307e4a2032b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x237, 0xee8, 0xd8c, 0xafb, 0x18cd, 0x1ce1, 0x162a, 0x11c9, 0x1bbc, 0x1415, 0x1c35, 0x1d0c, 0x1104, 0x1558, 0x9d, 0xb17, 0x1097, 0x16d2, 0xc02, 0x1573, 0x1c5f, 0x1bec, 0x1a73, 0x1dfe, 0x1923, 0x18d6, 0x221, 0x11ee, 0x1581, 0xb} +{{0x237, 0xee8, 0xd8c, 0xafb, 0x18cd, 0x1ce1, 0x162a, 0x11c9, 0x1bbc, 0x1415, 0x1c35, 0x1d0c, 0x1104, 0x1558, 0x9d, 0xb17, 0x1097, 0x16d2, 0xc02, 0x1573, 0x1c5f, 0x1bec, 0x1a73, 0x1dfe, 0x1923, 0x18d6, 0x221, 0x11ee, 0x1581, 0xb}} #elif RADIX == 32 -{0x77408dd, 0x55f6d8c, 0xae70e33, 0xf239362, 0x35a0aee, 0x413a19c, 0x9daac4, 0x425d62e, 0x6c02b69, 0x6717eae, 0xfda73df, 0x6b648fb, 0x3dc221c, 0x1c606} +{{0x77408dd, 0x55f6d8c, 0xae70e33, 0xf239362, 0x35a0aee, 0x413a19c, 0x9daac4, 0x425d62e, 0x6c02b69, 0x6717eae, 0xfda73df, 0x6b648fb, 0x3dc221c, 0x1c606}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe3355f6d8c77408, 0x5a0aeef239362ae7, 0xe09daac4413a19c3, 0xeae6c02b69425d62, 0x648fbfda73df6717, 0x38396063dc221c6b} +{{0xe3355f6d8c77408, 0x5a0aeef239362ae7, 0xe09daac4413a19c3, 0xeae6c02b69425d62, 0x648fbfda73df6717, 0x38396063dc221c6b}} #else -{0x66abedb18ee811, 0x3bc8e4d8ab9c38, 0x2209d0ce1ad057, 0x1425d62e09daac, 0x6ce2fd5cd8056d, 0x1ad923eff69cf7, 0xbdcb031ee110e} +{{0x66abedb18ee811, 0x3bc8e4d8ab9c38, 0x2209d0ce1ad057, 0x1425d62e09daac, 0x6ce2fd5cd8056d, 0x1ad923eff69cf7, 0xbdcb031ee110e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x16a4, 0x11f0, 0x446, 0x1b2b, 0x129e, 0x1b52, 0x25, 0x18e4, 0x15d7, 0x545, 0x1502, 0x3af, 0x1b45, 0xff3, 0x1423, 0x1574, 0x1c5a, 0xff0, 0x1663, 0x114b, 0xc99, 0x1c89, 0x11f0, 0x15fd, 0x17a1, 0x14dd, 0x17f7, 0x1451, 0x5af, 0x17} +{{0x16a4, 0x11f0, 0x446, 0x1b2b, 0x129e, 0x1b52, 0x25, 0x18e4, 0x15d7, 0x545, 0x1502, 0x3af, 0x1b45, 0xff3, 0x1423, 0x1574, 0x1c5a, 0xff0, 0x1663, 0x114b, 0xc99, 0x1c89, 0x11f0, 0x15fd, 0x17a1, 0x14dd, 0x17f7, 0x1451, 0x5af, 0x17}} #elif RADIX == 32 -{0x8f85a92, 0xb656446, 0x5da94a7, 0x5f1c802, 0x22a2d7, 0xd1475f5, 0x4237f9e, 0x716aae9, 0x76637f8, 0x4b26629, 0xfb1f0e4, 0x6ede86b, 0x8a37f7a, 0x376be} +{{0x8f85a92, 0xb656446, 0x5da94a7, 0x5f1c802, 0x22a2d7, 0xd1475f5, 0x4237f9e, 0x716aae9, 0x76637f8, 0x4b26629, 0xfb1f0e4, 0x6ede86b, 0x8a37f7a, 0x376be}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x94a7b6564468f85a, 0x22a2d75f1c8025da, 0x94237f9ed1475f50, 0x62976637f8716aae, 0xde86bfb1f0e44b26, 0x25496be8a37f7a6e} +{{0x94a7b6564468f85a, 0x22a2d75f1c8025da, 0x94237f9ed1475f50, 0x62976637f8716aae, 0xde86bfb1f0e44b26, 0x25496be8a37f7a6e}} #else -{0x4f6cac88d1f0b5, 0x5d7c7200976a52, 0x768a3afa811516, 0x716aae94237f9, 0x964cc52ecc6ff, 0x1bb7a1afec7c39, 0x264b5f451bfbd} +{{0x4f6cac88d1f0b5, 0x5d7c7200976a52, 0x768a3afa811516, 0x716aae94237f9, 0x964cc52ecc6ff, 0x1bb7a1afec7c39, 0x264b5f451bfbd}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xc89, 0x16f8, 0x1bcf, 0x14c7, 0x1c81, 0x1c37, 0x3b1, 0xb00, 0x5e, 0xdb5, 0x920, 0x14db, 0x41, 0x1bd7, 0x159d, 0x1889, 0x1318, 0x95d, 0x13d5, 0x46b, 0x18bd, 0x1bf1, 0x1bf6, 0x1ba2, 0x2d6, 0x1b06, 0x17c1, 0x1a40, 0x1f02, 0x11} +{{0xc89, 0x16f8, 0x1bcf, 0x14c7, 0x1c81, 0x1c37, 0x3b1, 0xb00, 0x5e, 0xdb5, 0x920, 0x14db, 0x41, 0x1bd7, 0x159d, 0x1889, 0x1318, 0x95d, 0x13d5, 0x46b, 0x18bd, 0x1bf1, 0x1bf6, 0x1ba2, 0x2d6, 0x1b06, 0x17c1, 0x1a40, 0x1f02, 0x11}} #elif RADIX == 32 -{0xb7c3226, 0x698fbcf, 0x1e1bf20, 0x796003b, 0x206da81, 0x1069b69, 0x59ddeb8, 0xcc63113, 0x73d54ae, 0x8e2f48d, 0x45bf6df, 0x830b5b7, 0x4817c1d, 0xdc0b} +{{0xb7c3226, 0x698fbcf, 0x1e1bf20, 0x796003b, 0x206da81, 0x1069b69, 0x59ddeb8, 0xcc63113, 0x73d54ae, 0x8e2f48d, 0x45bf6df, 0x830b5b7, 0x4817c1d, 0xdc0b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbf20698fbcfb7c32, 0x6da81796003b1e1, 0x359ddeb81069b692, 0x48d73d54aecc6311, 0xb5b745bf6df8e2f, 0x9b3c0b4817c1d83} +{{0xbf20698fbcfb7c32, 0x6da81796003b1e1, 0x359ddeb81069b692, 0x48d73d54aecc6311, 0xb5b745bf6df8e2f, 0x9b3c0b4817c1d83}} #else -{0x40d31f79f6f864, 0x5e5800ec786fc, 0x40834db49036d4, 0x6cc6311359ddeb, 0x71c5e91ae7aa95, 0x60c2d6dd16fdb7, 0x4d9e05a40be0e} +{{0x40d31f79f6f864, 0x5e5800ec786fc, 0x40834db49036d4, 0x6cc6311359ddeb, 0x71c5e91ae7aa95, 0x60c2d6dd16fdb7, 0x4d9e05a40be0e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x8c0, 0x125b, 0x1d1c, 0x8a8, 0x1c41, 0xbb7, 0x15bf, 0x15ec, 0x959, 0x1fc5, 0xc2, 0x2ff, 0x1dd2, 0x1c02, 0x9db, 0x139d, 0x9a, 0x1654, 0xce7, 0xf6d, 0x13e5, 0x19be, 0x1f28, 0x161c, 0xe9f, 0x940, 0x77d, 0x162c, 0x385, 0x4} +{{0x8c0, 0x125b, 0x1d1c, 0x8a8, 0x1c41, 0xbb7, 0x15bf, 0x15ec, 0x959, 0x1fc5, 0xc2, 0x2ff, 0x1dd2, 0x1c02, 0x9db, 0x139d, 0x9a, 0x1654, 0xce7, 0xf6d, 0x13e5, 0x19be, 0x1f28, 0x161c, 0xe9f, 0x940, 0x77d, 0x162c, 0x385, 0x4}} #elif RADIX == 32 -{0x92da300, 0x5151d1c, 0xf5dbf10, 0x66bd95b, 0xc2fe2a5, 0x7485fe0, 0x9dbe017, 0x26a73a, 0xace7b2a, 0xf4f95ed, 0x39f28cd, 0xa03a7ec, 0xc5877d4, 0x20e16} +{{0x92da300, 0x5151d1c, 0xf5dbf10, 0x66bd95b, 0xc2fe2a5, 0x7485fe0, 0x9dbe017, 0x26a73a, 0xace7b2a, 0xf4f95ed, 0x39f28cd, 0xa03a7ec, 0xc5877d4, 0x20e16}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbf105151d1c92da3, 0x2fe2a566bd95bf5d, 0xa9dbe0177485fe0c, 0x5edace7b2a026a73, 0x3a7ec39f28cdf4f9, 0x20e16c5877d4a0} +{{0xbf105151d1c92da3, 0x2fe2a566bd95bf5d, 0xa9dbe0177485fe0c, 0x5edace7b2a026a73, 0x3a7ec39f28cdf4f9, 0x20e16c5877d4a0}} #else -{0x20a2a3a3925b46, 0x159af656fd76fc, 0x3ba42ff0617f15, 0x2026a73a9dbe01, 0x3e9f2bdb59cf65, 0x280e9fb0e7ca33, 0x1070b62c3bea} +{{0x20a2a3a3925b46, 0x159af656fd76fc, 0x3ba42ff0617f15, 0x2026a73a9dbe01, 0x3e9f2bdb59cf65, 0x280e9fb0e7ca33, 0x1070b62c3bea}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xd30, 0x670, 0x165f, 0x18f8, 0x3fe, 0x11e5, 0x663, 0x270, 0x18cb, 0x42b, 0x11c3, 0xe0a, 0x4fc, 0x18ad, 0xfd0, 0x3fa, 0x1957, 0x1544, 0x941, 0x181e, 0x661, 0x18b9, 0x74a, 0xa70, 0x866, 0x11f8, 0xd20, 0xae3, 0x19b8, 0xb} +{{0xd30, 0x670, 0x165f, 0x18f8, 0x3fe, 0x11e5, 0x663, 0x270, 0x18cb, 0x42b, 0x11c3, 0xe0a, 0x4fc, 0x18ad, 0xfd0, 0x3fa, 0x1957, 0x1544, 0x941, 0x181e, 0x661, 0x18b9, 0x74a, 0xa70, 0x866, 0x11f8, 0xd20, 0xae3, 0x19b8, 0xb}} #elif RADIX == 32 -{0x33834c1, 0xb1f165f, 0x38f28ff, 0x2c4e066, 0xc3215e3, 0x3f1c151, 0xfd0c569, 0x655c7f4, 0xc941aa2, 0xc998703, 0xe074ac5, 0xfc21994, 0x5c6d208, 0x1d6e1} +{{0x33834c1, 0xb1f165f, 0x38f28ff, 0x2c4e066, 0xc3215e3, 0x3f1c151, 0xfd0c569, 0x655c7f4, 0xc941aa2, 0xc998703, 0xe074ac5, 0xfc21994, 0x5c6d208, 0x1d6e1}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x28ffb1f165f33834, 0x3215e32c4e06638f, 0x4fd0c5693f1c151c, 0x703c941aa2655c7f, 0x21994e074ac5c998, 0x311e6e15c6d208fc} +{{0x28ffb1f165f33834, 0x3215e32c4e06638f, 0x4fd0c5693f1c151c, 0x703c941aa2655c7f, 0x21994e074ac5c998, 0x311e6e15c6d208fc}} #else -{0x7f63e2cbe67069, 0xcb138198e3ca3, 0x49f8e0a8e190af, 0x2655c7f4fd0c56, 0x39330e07928354, 0x3f08665381d2b1, 0x84f370ae36904} +{{0x7f63e2cbe67069, 0xcb138198e3ca3, 0x49f8e0a8e190af, 0x2655c7f4fd0c56, 0x39330e07928354, 0x3f08665381d2b1, 0x84f370ae36904}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1f47, 0x9e3, 0x5d, 0xdc6, 0x18a3, 0x1c99, 0x1253, 0x179f, 0x16b, 0x1b87, 0x27a, 0x9f8, 0x1064, 0x9ed, 0xe66, 0x47d, 0x4e9, 0x1805, 0x1349, 0x40, 0x1bbd, 0x7f6, 0x1c57, 0x1f9f, 0x11e9, 0x14cf, 0xe61, 0x1892, 0x833, 0x10} +{{0x1f47, 0x9e3, 0x5d, 0xdc6, 0x18a3, 0x1c99, 0x1253, 0x179f, 0x16b, 0x1b87, 0x27a, 0x9f8, 0x1064, 0x9ed, 0xe66, 0x47d, 0x4e9, 0x1805, 0x1349, 0x40, 0x1bbd, 0x7f6, 0x1c57, 0x1f9f, 0x11e9, 0x14cf, 0xe61, 0x1892, 0x833, 0x10}} #elif RADIX == 32 -{0x4f1fd1e, 0xdb8c05d, 0x3e4ce28, 0xaef3f25, 0x7adc385, 0x1913f02, 0xe664f6c, 0x93a48fa, 0x1349c02, 0xb6ef408, 0x3fc573f, 0x67c7a7f, 0x124e61a, 0xcf} +{{0x4f1fd1e, 0xdb8c05d, 0x3e4ce28, 0xaef3f25, 0x7adc385, 0x1913f02, 0xe664f6c, 0x93a48fa, 0x1349c02, 0xb6ef408, 0x3fc573f, 0x67c7a7f, 0x124e61a, 0xcf}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xce28db8c05d4f1fd, 0xadc385aef3f253e4, 0xae664f6c1913f027, 0x4081349c0293a48f, 0xc7a7f3fc573fb6ef, 0x79e0cf124e61a67} +{{0xce28db8c05d4f1fd, 0xadc385aef3f253e4, 0xae664f6c1913f027, 0x4081349c0293a48f, 0xc7a7f3fc573fb6ef, 0x79e0cf124e61a67}} #else -{0x51b7180ba9e3fa, 0x16bbcfc94f9338, 0x60c89f813d6e1c, 0x293a48fae664f6, 0x76dde810269380, 0x19f1e9fcff15cf, 0x3cf067892730d} +{{0x51b7180ba9e3fa, 0x16bbcfc94f9338, 0x60c89f813d6e1c, 0x293a48fae664f6, 0x76dde810269380, 0x19f1e9fcff15cf, 0x3cf067892730d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2860,261 +2860,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x17bf, 0xbb8, 0x485, 0x1296, 0x1b32, 0xe0b, 0x13f9, 0x15f5, 0x1a2b, 0xff6, 0xf53, 0x70a, 0x36c, 0x40f, 0xa89, 0xca6, 0x37e, 0x1488, 0x1796, 0x1be2, 0x1e0f, 0xf10, 0x1628, 0x1a20, 0x1ee7, 0x1e53, 0x48c, 0x94, 0xd53, 0xd} +{{0x17bf, 0xbb8, 0x485, 0x1296, 0x1b32, 0xe0b, 0x13f9, 0x15f5, 0x1a2b, 0xff6, 0xf53, 0x70a, 0x36c, 0x40f, 0xa89, 0xca6, 0x37e, 0x1488, 0x1796, 0x1be2, 0x1e0f, 0xf10, 0x1628, 0x1a20, 0x1ee7, 0x1e53, 0x48c, 0x94, 0xd53, 0xd}} #elif RADIX == 32 -{0x5dc5efd, 0xa52c485, 0x9705ecc, 0xaebeb3f, 0x537fb68, 0xdb0e14f, 0xa892078, 0xdf994c, 0x5796a44, 0x8783f7c, 0x4162878, 0x29fb9f4, 0x12848cf, 0x2a54c} +{{0x5dc5efd, 0xa52c485, 0x9705ecc, 0xaebeb3f, 0x537fb68, 0xdb0e14f, 0xa892078, 0xdf994c, 0x5796a44, 0x8783f7c, 0x4162878, 0x29fb9f4, 0x12848cf, 0x2a54c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x5ecca52c4855dc5e, 0x37fb68aebeb3f970, 0xca892078db0e14f5, 0xf7c5796a440df994, 0xfb9f441628788783, 0x406754c12848cf29} +{{0x5ecca52c4855dc5e, 0x37fb68aebeb3f970, 0xca892078db0e14f5, 0xf7c5796a440df994, 0xfb9f441628788783, 0x406754c12848cf29}} #else -{0x194a5890abb8bd, 0x22bafacfe5c17b, 0x46d870a7a9bfdb, 0x40df994ca89207, 0x10f07ef8af2d48, 0x4a7ee7d1058a1e, 0xff3aa60942467} +{{0x194a5890abb8bd, 0x22bafacfe5c17b, 0x46d870a7a9bfdb, 0x40df994ca89207, 0x10f07ef8af2d48, 0x4a7ee7d1058a1e, 0xff3aa60942467}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x66d, 0xaee, 0x1121, 0x14a5, 0x1ecc, 0xb82, 0xcfe, 0x1d7d, 0x168a, 0x1bfd, 0x13d4, 0x1c2, 0x18db, 0x903, 0x12a2, 0x1329, 0xdf, 0x1522, 0x15e5, 0x1ef8, 0x783, 0x3c4, 0x58a, 0x1e88, 0x1fb9, 0x794, 0x123, 0x1825, 0x1754, 0x1c} +{{0x66d, 0xaee, 0x1121, 0x14a5, 0x1ecc, 0xb82, 0xcfe, 0x1d7d, 0x168a, 0x1bfd, 0x13d4, 0x1c2, 0x18db, 0x903, 0x12a2, 0x1329, 0xdf, 0x1522, 0x15e5, 0x1ef8, 0x783, 0x3c4, 0x58a, 0x1e88, 0x1fb9, 0x794, 0x123, 0x1825, 0x1754, 0x1c}} #elif RADIX == 32 -{0x57719b7, 0x294b121, 0xe5c17b3, 0x2bafacf, 0xd4dfeda, 0x36c3853, 0x2a2481e, 0x37e653, 0x15e5a91, 0x21e0fdf, 0x1058a1e, 0xca7ee7d, 0x4a1233, 0x22d53} +{{0x57719b7, 0x294b121, 0xe5c17b3, 0x2bafacf, 0xd4dfeda, 0x36c3853, 0x2a2481e, 0x37e653, 0x15e5a91, 0x21e0fdf, 0x1058a1e, 0xca7ee7d, 0x4a1233, 0x22d53}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x17b3294b12157719, 0x4dfeda2bafacfe5c, 0x32a2481e36c3853d, 0xfdf15e5a91037e65, 0x7ee7d1058a1e21e0, 0x2e99d5304a1233ca} +{{0x17b3294b12157719, 0x4dfeda2bafacfe5c, 0x32a2481e36c3853d, 0xfdf15e5a91037e65, 0x7ee7d1058a1e21e0, 0x2e99d5304a1233ca}} #else -{0x665296242aee33, 0x68aebeb3f9705e, 0x71b61c29ea6ff6, 0x1037e6532a2481, 0x443c1fbe2bcb52, 0x729fb9f4416287, 0x70cea98250919} +{{0x665296242aee33, 0x68aebeb3f9705e, 0x71b61c29ea6ff6, 0x1037e6532a2481, 0x443c1fbe2bcb52, 0x729fb9f4416287, 0x70cea98250919}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x3e9, 0x9f6, 0x1c50, 0x27e, 0xa85, 0x39c, 0xa7b, 0x177c, 0xdfc, 0x77e, 0x1490, 0x11b8, 0xd2b, 0x17dc, 0xd7c, 0x16a0, 0xe21, 0xb86, 0x15bb, 0x844, 0x146c, 0xe51, 0xc6d, 0x143d, 0x1d2b, 0x1715, 0x18bb, 0xdc8, 0x55d, 0x16} +{{0x3e9, 0x9f6, 0x1c50, 0x27e, 0xa85, 0x39c, 0xa7b, 0x177c, 0xdfc, 0x77e, 0x1490, 0x11b8, 0xd2b, 0x17dc, 0xd7c, 0x16a0, 0xe21, 0xb86, 0x15bb, 0x844, 0x146c, 0xe51, 0xc6d, 0x143d, 0x1d2b, 0x1715, 0x18bb, 0xdc8, 0x55d, 0x16}} #elif RADIX == 32 -{0x4fb0fa6, 0x44fdc50, 0xb1ce2a1, 0xf2ef8a7, 0x903bf37, 0x4ae3714, 0xd7cbee3, 0x3886d40, 0x95bb5c3, 0x8d1b108, 0x7ac6d72, 0x8af4ae8, 0xb918bbb, 0x2f575} +{{0x4fb0fa6, 0x44fdc50, 0xb1ce2a1, 0xf2ef8a7, 0x903bf37, 0x4ae3714, 0xd7cbee3, 0x3886d40, 0x95bb5c3, 0x8d1b108, 0x7ac6d72, 0x8af4ae8, 0xb918bbb, 0x2f575}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe2a144fdc504fb0f, 0x3bf37f2ef8a7b1c, 0xd7cbee34ae37149, 0x10895bb5c33886d4, 0xf4ae87ac6d728d1b, 0x2a55575b918bbb8a} +{{0xe2a144fdc504fb0f, 0x3bf37f2ef8a7b1c, 0xd7cbee34ae37149, 0x10895bb5c33886d4, 0xf4ae87ac6d728d1b, 0x2a55575b918bbb8a}} #else -{0x4289fb8a09f61f, 0x5fcbbe29ec738a, 0x1a571b8a481df9, 0x33886d40d7cbee, 0x51a362112b76b8, 0x62bd2ba1eb1b5c, 0x4eaabadc8c5dd} +{{0x4289fb8a09f61f, 0x5fcbbe29ec738a, 0x1a571b8a481df9, 0x33886d40d7cbee, 0x51a362112b76b8, 0x62bd2ba1eb1b5c, 0x4eaabadc8c5dd}} #endif #endif , #if 0 #elif RADIX == 16 -{0x793, 0x1095, 0x8d0, 0x676, 0x2be, 0x1a9d, 0x6d6, 0x1d0, 0x112a, 0x18e1, 0x1741, 0xc68, 0x156d, 0x113f, 0x181e, 0x201, 0xcd7, 0xbb7, 0xdb, 0x64c, 0x181e, 0x63, 0x965, 0xf2, 0xc95, 0x50d, 0x1ec2, 0x1c03, 0x5b4, 0x1b} +{{0x793, 0x1095, 0x8d0, 0x676, 0x2be, 0x1a9d, 0x6d6, 0x1d0, 0x112a, 0x18e1, 0x1741, 0xc68, 0x156d, 0x113f, 0x181e, 0x201, 0xcd7, 0xbb7, 0xdb, 0x64c, 0x181e, 0x63, 0x965, 0xf2, 0xc95, 0x50d, 0x1ec2, 0x1c03, 0x5b4, 0x1b}} #elif RADIX == 32 -{0x84a9e4f, 0x8cec8d0, 0x6d4e8af, 0xa83a06d, 0x41c70c4, 0x5b58d17, 0x81e89fd, 0xb35c403, 0x80db5db, 0x1e078c9, 0xe496503, 0x86b2541, 0x807ec22, 0x166d3} +{{0x84a9e4f, 0x8cec8d0, 0x6d4e8af, 0xa83a06d, 0x41c70c4, 0x5b58d17, 0x81e89fd, 0xb35c403, 0x80db5db, 0x1e078c9, 0xe496503, 0x86b2541, 0x807ec22, 0x166d3}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe8af8cec8d084a9e, 0x1c70c4a83a06d6d4, 0x381e89fd5b58d174, 0x8c980db5dbb35c40, 0xb2541e4965031e07, 0x14256d3807ec2286} +{{0xe8af8cec8d084a9e, 0x1c70c4a83a06d6d4, 0x381e89fd5b58d174, 0x8c980db5dbb35c40, 0xb2541e4965031e07, 0x14256d3807ec2286}} #else -{0x5f19d91a10953c, 0x12a0e81b5b53a2, 0x6adac68ba0e386, 0x3b35c40381e89f, 0x63c0f19301b6bb, 0x21ac9507925940, 0xa12b69c03f611} +{{0x5f19d91a10953c, 0x12a0e81b5b53a2, 0x6adac68ba0e386, 0x3b35c40381e89f, 0x63c0f19301b6bb, 0x21ac9507925940, 0xa12b69c03f611}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x71d, 0xf0e, 0x506, 0x1aec, 0x3f6, 0x2c1, 0x17dd, 0x43f, 0x1552, 0x1488, 0x10c3, 0x5ea, 0xfd4, 0x634, 0x1eb1, 0x1711, 0x1424, 0xeb1, 0xfe1, 0xa0a, 0x165f, 0x5c8, 0x1544, 0x1493, 0x329, 0x19ec, 0x1db4, 0x983, 0x790, 0x1d} +{{0x71d, 0xf0e, 0x506, 0x1aec, 0x3f6, 0x2c1, 0x17dd, 0x43f, 0x1552, 0x1488, 0x10c3, 0x5ea, 0xfd4, 0x634, 0x1eb1, 0x1711, 0x1424, 0xeb1, 0xfe1, 0xa0a, 0x165f, 0x5c8, 0x1544, 0x1493, 0x329, 0x19ec, 0x1db4, 0x983, 0x790, 0x1d}} #elif RADIX == 32 -{0x7871c77, 0xb5d8506, 0xd1608fd, 0x4887f7d, 0xc3a4455, 0xf50bd50, 0xeb131a3, 0xd092e23, 0x4fe1758, 0x4597d41, 0x275442e, 0xf60ca69, 0x307db4c, 0x26e41} +{{0x7871c77, 0xb5d8506, 0xd1608fd, 0x4887f7d, 0xc3a4455, 0xf50bd50, 0xeb131a3, 0xd092e23, 0x4fe1758, 0x4597d41, 0x275442e, 0xf60ca69, 0x307db4c, 0x26e41}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8fdb5d85067871c, 0x3a44554887f7dd16, 0x3eb131a3f50bd50c, 0xd414fe1758d092e2, 0xca69275442e4597, 0x1e5de41307db4cf6} +{{0x8fdb5d85067871c, 0x3a44554887f7dd16, 0x3eb131a3f50bd50c, 0xd414fe1758d092e2, 0xca69275442e4597, 0x1e5de41307db4cf6}} #else -{0x7b6bb0a0cf0e38, 0x55221fdf745823, 0x1fa85ea861d222, 0xd092e23eb131a, 0x48b2fa829fc2eb, 0x3d8329a49d510b, 0xf2ef20983eda6} +{{0x7b6bb0a0cf0e38, 0x55221fdf745823, 0x1fa85ea861d222, 0xd092e23eb131a, 0x48b2fa829fc2eb, 0x3d8329a49d510b, 0xf2ef20983eda6}} #endif #endif , #if 0 #elif RADIX == 16 -{0x704, 0x1718, 0x1f41, 0x1569, 0x1353, 0x403, 0x8ba, 0xd3b, 0x1e9a, 0xca6, 0x1433, 0xc05, 0x2dd, 0xf7d, 0x12c8, 0x1109, 0x1797, 0x4e2, 0xf77, 0x569, 0xfcf, 0x1dd4, 0x11a4, 0x1354, 0x1563, 0x14b7, 0x6ad, 0xf7e, 0x251, 0xe} +{{0x704, 0x1718, 0x1f41, 0x1569, 0x1353, 0x403, 0x8ba, 0xd3b, 0x1e9a, 0xca6, 0x1433, 0xc05, 0x2dd, 0xf7d, 0x12c8, 0x1109, 0x1797, 0x4e2, 0xf77, 0x569, 0xfcf, 0x1dd4, 0x11a4, 0x1354, 0x1563, 0x14b7, 0x6ad, 0xf7e, 0x251, 0xe}} #elif RADIX == 32 -{0xb8c1c11, 0xead3f41, 0xa201cd4, 0x69a768b, 0x336537a, 0xb7580b4, 0x2c87be8, 0x5e5e213, 0x2f77271, 0xa3f3cad, 0xa91a4ee, 0x5bd58e6, 0xefc6ada, 0x2f945} +{{0xb8c1c11, 0xead3f41, 0xa201cd4, 0x69a768b, 0x336537a, 0xb7580b4, 0x2c87be8, 0x5e5e213, 0x2f77271, 0xa3f3cad, 0xa91a4ee, 0x5bd58e6, 0xefc6ada, 0x2f945}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1cd4ead3f41b8c1c, 0x36537a69a768ba20, 0x32c87be8b7580b43, 0xcad2f772715e5e21, 0xd58e6a91a4eea3f3, 0x480945efc6ada5b} +{{0x1cd4ead3f41b8c1c, 0x36537a69a768ba20, 0x32c87be8b7580b43, 0xcad2f772715e5e21, 0xd58e6a91a4eea3f3, 0x480945efc6ada5b}} #else -{0x29d5a7e8371838, 0x69a69da2e88073, 0x45bac05a19b29b, 0x15e5e2132c87be, 0x547e795a5eee4e, 0x16f5639aa4693b, 0x2404a2f7e356d} +{{0x29d5a7e8371838, 0x69a69da2e88073, 0x45bac05a19b29b, 0x15e5e2132c87be, 0x547e795a5eee4e, 0x16f5639aa4693b, 0x2404a2f7e356d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xf6, 0x15a2, 0x1cbc, 0x185c, 0x9a1, 0xc2f, 0x1123, 0x11, 0xda7, 0x1628, 0x41, 0x1163, 0x12f7, 0x9aa, 0x1235, 0x1444, 0x1c4a, 0x3b6, 0xfee, 0x96, 0x1ed, 0x1f4d, 0x5ec, 0x1bf2, 0x1bca, 0x151d, 0x58f, 0x293, 0x960, 0x20} +{{0xf6, 0x15a2, 0x1cbc, 0x185c, 0x9a1, 0xc2f, 0x1123, 0x11, 0xda7, 0x1628, 0x41, 0x1163, 0x12f7, 0x9aa, 0x1235, 0x1444, 0x1c4a, 0x3b6, 0xfee, 0x96, 0x1ed, 0x1f4d, 0x5ec, 0x1bf2, 0x1bca, 0x151d, 0x58f, 0x293, 0x960, 0x20}} #elif RADIX == 32 -{0xad103db, 0x70b9cbc, 0x3617a68, 0x9c02312, 0x41b1436, 0xbde2c60, 0x2354d54, 0x712a889, 0xcfee1db, 0x687b412, 0xe45ecfa, 0x8eef2b7, 0x52658fa, 0x3f580} +{{0xad103db, 0x70b9cbc, 0x3617a68, 0x9c02312, 0x41b1436, 0xbde2c60, 0x2354d54, 0x712a889, 0xcfee1db, 0x687b412, 0xe45ecfa, 0x8eef2b7, 0x52658fa, 0x3f580}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x7a6870b9cbcad103, 0x1b14369c02312361, 0x92354d54bde2c604, 0x412cfee1db712a88, 0xef2b7e45ecfa687b, 0x37da58052658fa8e} +{{0x7a6870b9cbcad103, 0x1b14369c02312361, 0x92354d54bde2c604, 0x412cfee1db712a88, 0xef2b7e45ecfa687b, 0x37da58052658fa8e}} #else -{0x50e1739795a207, 0x5a7008c48d85e9, 0x25ef163020d8a1, 0x3712a8892354d5, 0x4d0f68259fdc3b, 0x23bbcadf917b3e, 0xbad2c02932c7d} +{{0x50e1739795a207, 0x5a7008c48d85e9, 0x25ef163020d8a1, 0x3712a8892354d5, 0x4d0f68259fdc3b, 0x23bbcadf917b3e, 0xbad2c02932c7d}} #endif #endif , #if 0 #elif RADIX == 16 -{0xbc5, 0xa1d, 0xe8a, 0xe9c, 0x1af1, 0x13b5, 0xa68, 0x4a4, 0x135e, 0x171, 0x716, 0x2c2, 0x1c2b, 0x332, 0x349, 0x138c, 0x168b, 0x21c, 0x1629, 0xb97, 0x186, 0x629, 0x6e8, 0x497, 0x128c, 0x19d2, 0xcc1, 0x121, 0x250, 0x1a} +{{0xbc5, 0xa1d, 0xe8a, 0xe9c, 0x1af1, 0x13b5, 0xa68, 0x4a4, 0x135e, 0x171, 0x716, 0x2c2, 0x1c2b, 0x332, 0x349, 0x138c, 0x168b, 0x21c, 0x1629, 0xb97, 0x186, 0x629, 0x6e8, 0x497, 0x128c, 0x19d2, 0xcc1, 0x121, 0x250, 0x1a}} #elif RADIX == 32 -{0x50eaf17, 0x5d38e8a, 0x89daebc, 0x78948a6, 0x160b8cd, 0xac5847, 0x3491997, 0x5a2e718, 0xf62910e, 0x4861972, 0x2e6e831, 0xe94a309, 0x242cc1c, 0xd940} +{{0x50eaf17, 0x5d38e8a, 0x89daebc, 0x78948a6, 0x160b8cd, 0xac5847, 0x3491997, 0x5a2e718, 0xf62910e, 0x4861972, 0x2e6e831, 0xe94a309, 0x242cc1c, 0xd940}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xaebc5d38e8a50eaf, 0x60b8cd78948a689d, 0x834919970ac58471, 0x972f62910e5a2e71, 0x4a3092e6e8314861, 0x5e4940242cc1ce9} +{{0xaebc5d38e8a50eaf, 0x60b8cd78948a689d, 0x834919970ac58471, 0x972f62910e5a2e71, 0x4a3092e6e8314861, 0x5e4940242cc1ce9}} #else -{0x78ba71d14a1d5e, 0x35e25229a276ba, 0x38562c238b05c6, 0x65a2e718349199, 0x290c32e5ec5221, 0x3a528c24b9ba0c, 0x2f24a0121660e} +{{0x78ba71d14a1d5e, 0x35e25229a276ba, 0x38562c238b05c6, 0x65a2e718349199, 0x290c32e5ec5221, 0x3a528c24b9ba0c, 0x2f24a0121660e}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -3336,261 +3336,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0xb89, 0xf4d, 0xbd8, 0x18d2, 0x781, 0x1f79, 0xef5, 0xcfd, 0xa7d, 0x121a, 0x59e, 0x18a3, 0x2b, 0x122f, 0x9d5, 0x18aa, 0x105f, 0x1bcd, 0x871, 0x1f9d, 0xae4, 0xc5c, 0x385, 0xbe8, 0x1878, 0xcd8, 0x7a1, 0x7c8, 0x5b4, 0xe} +{{0xb89, 0xf4d, 0xbd8, 0x18d2, 0x781, 0x1f79, 0xef5, 0xcfd, 0xa7d, 0x121a, 0x59e, 0x18a3, 0x2b, 0x122f, 0x9d5, 0x18aa, 0x105f, 0x1bcd, 0x871, 0x1f9d, 0xae4, 0xc5c, 0x385, 0xbe8, 0x1878, 0xcd8, 0x7a1, 0x7c8, 0x5b4, 0xe}} #elif RADIX == 32 -{0x7a6ae25, 0x71a4bd8, 0x5fbc9e0, 0xf59faef, 0x9e90d29, 0xaf1465, 0x9d59178, 0xc17f154, 0xa871de6, 0xe2b93f3, 0xd038562, 0x6c61e17, 0xf907a16, 0x306d0} +{{0x7a6ae25, 0x71a4bd8, 0x5fbc9e0, 0xf59faef, 0x9e90d29, 0xaf1465, 0x9d59178, 0xc17f154, 0xa871de6, 0xe2b93f3, 0xd038562, 0x6c61e17, 0xf907a16, 0x306d0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc9e071a4bd87a6ae, 0xe90d29f59faef5fb, 0x49d591780af14659, 0x3f3a871de6c17f15, 0x61e17d038562e2b9, 0x9956d0f907a166c} +{{0xc9e071a4bd87a6ae, 0xe90d29f59faef5fb, 0x49d591780af14659, 0x3f3a871de6c17f15, 0x61e17d038562e2b9, 0x9956d0f907a166c}} #else -{0x40e3497b0f4d5c, 0x27d67ebbd7ef27, 0x40578a32cf4869, 0x6c17f1549d5917, 0x5c5727e750e3bc, 0x1b18785f40e158, 0x4cab687c83d0b} +{{0x40e3497b0f4d5c, 0x27d67ebbd7ef27, 0x40578a32cf4869, 0x6c17f1549d5917, 0x5c5727e750e3bc, 0x1b18785f40e158, 0x4cab687c83d0b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0xb60, 0x3d3, 0x12f6, 0xe34, 0x9e0, 0xfde, 0xbbd, 0xb3f, 0x129f, 0x1486, 0x1967, 0x1e28, 0x180a, 0xc8b, 0x1275, 0x1e2a, 0xc17, 0xef3, 0xa1c, 0x7e7, 0x2b9, 0xb17, 0xe1, 0x2fa, 0x61e, 0xb36, 0x1e8, 0x1f2, 0x156d, 0xc} +{{0xb60, 0x3d3, 0x12f6, 0xe34, 0x9e0, 0xfde, 0xbbd, 0xb3f, 0x129f, 0x1486, 0x1967, 0x1e28, 0x180a, 0xc8b, 0x1275, 0x1e2a, 0xc17, 0xef3, 0xa1c, 0x7e7, 0x2b9, 0xb17, 0xe1, 0x2fa, 0x61e, 0xb36, 0x1e8, 0x1f2, 0x156d, 0xc}} #elif RADIX == 32 -{0x1e9ad81, 0x1c692f6, 0xd7ef278, 0x7d67ebb, 0x67a434a, 0x2bc519, 0x275645e, 0xb05fc55, 0xea1c779, 0xb8ae4fc, 0xf40e158, 0x9b18785, 0x3e41e85, 0x245b4} +{{0x1e9ad81, 0x1c692f6, 0xd7ef278, 0x7d67ebb, 0x67a434a, 0x2bc519, 0x275645e, 0xb05fc55, 0xea1c779, 0xb8ae4fc, 0xf40e158, 0x9b18785, 0x3e41e85, 0x245b4}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf2781c692f61e9ad, 0x7a434a7d67ebbd7e, 0x5275645e02bc5196, 0x4fcea1c779b05fc5, 0x18785f40e158b8ae, 0x20e55b43e41e859b} +{{0xf2781c692f61e9ad, 0x7a434a7d67ebbd7e, 0x5275645e02bc5196, 0x4fcea1c779b05fc5, 0x18785f40e158b8ae, 0x20e55b43e41e859b}} #else -{0x7038d25ec3d35b, 0x29f59faef5fbc9, 0x7015e28cb3d21a, 0x1b05fc55275645, 0x1715c9f9d438ef, 0x66c61e17d03856, 0x32ada1f20f42} +{{0x7038d25ec3d35b, 0x29f59faef5fbc9, 0x7015e28cb3d21a, 0x1b05fc55275645, 0x1715c9f9d438ef, 0x66c61e17d03856, 0x32ada1f20f42}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x441, 0x1774, 0x1527, 0x106a, 0x577, 0x3fc, 0xf92, 0x12c4, 0x96a, 0x10ea, 0x10f5, 0x11c9, 0x1f8, 0x1407, 0x1bcc, 0x16c4, 0x15c1, 0x790, 0x5bc, 0x1c28, 0xbc6, 0x123c, 0xf19, 0x1d6f, 0x361, 0x1fcd, 0x1dc9, 0x20c, 0x17c6, 0x6} +{{0x441, 0x1774, 0x1527, 0x106a, 0x577, 0x3fc, 0xf92, 0x12c4, 0x96a, 0x10ea, 0x10f5, 0x11c9, 0x1f8, 0x1407, 0x1bcc, 0x16c4, 0x15c1, 0x790, 0x5bc, 0x1c28, 0xbc6, 0x123c, 0xf19, 0x1d6f, 0x361, 0x1fcd, 0x1dc9, 0x20c, 0x17c6, 0x6}} #elif RADIX == 32 -{0xbba1104, 0xe0d5527, 0x21fe15d, 0xaa588f9, 0xf587525, 0x7e23930, 0xbcca038, 0x5706d89, 0x5bc3c8, 0xe2f1b85, 0xdef1991, 0xe68d87a, 0x419dc9f, 0x35f18} +{{0xbba1104, 0xe0d5527, 0x21fe15d, 0xaa588f9, 0xf587525, 0x7e23930, 0xbcca038, 0x5706d89, 0x5bc3c8, 0xe2f1b85, 0xdef1991, 0xe68d87a, 0x419dc9f, 0x35f18}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe15de0d5527bba11, 0x587525aa588f921f, 0x9bcca0387e23930f, 0xb8505bc3c85706d8, 0x8d87adef1991e2f1, 0x139f18419dc9fe6} +{{0xe15de0d5527bba11, 0x587525aa588f921f, 0x9bcca0387e23930f, 0xb8505bc3c85706d8, 0x8d87adef1991e2f1, 0x139f18419dc9fe6}} #else -{0x3bc1aaa4f77422, 0x16a9623e487f85, 0x43f11c987ac3a9, 0x5706d89bcca03, 0x3c5e370a0b7879, 0x79a361eb7bc664, 0x9cf8c20cee4f} +{{0x3bc1aaa4f77422, 0x16a9623e487f85, 0x43f11c987ac3a9, 0x5706d89bcca03, 0x3c5e370a0b7879, 0x79a361eb7bc664, 0x9cf8c20cee4f}} #endif #endif , #if 0 #elif RADIX == 16 -{0x98a, 0x1bbb, 0x7d8, 0xd84, 0x3fe, 0x90b, 0xfe8, 0x12c3, 0x1e84, 0xde3, 0xbe1, 0x1217, 0x1925, 0x84a, 0xa0e, 0x7cd, 0x1854, 0x768, 0x6e6, 0x1d87, 0xfac, 0x6df, 0x109b, 0x64d, 0x9f2, 0x596, 0x435, 0x1918, 0x1095, 0x0} +{{0x98a, 0x1bbb, 0x7d8, 0xd84, 0x3fe, 0x90b, 0xfe8, 0x12c3, 0x1e84, 0xde3, 0xbe1, 0x1217, 0x1925, 0x84a, 0xa0e, 0x7cd, 0x1854, 0x768, 0x6e6, 0x1d87, 0xfac, 0x6df, 0x109b, 0x64d, 0x9f2, 0x596, 0x435, 0x1918, 0x1095, 0x0}} #elif RADIX == 32 -{0xddda628, 0x9b087d8, 0x84858ff, 0x12586fe, 0xe16f1fa, 0x49642eb, 0xa0e4256, 0x6150f9a, 0xe6e63b4, 0xfbeb3b0, 0x9b09b36, 0xcb27c8c, 0x2304352, 0x4257} +{{0xddda628, 0x9b087d8, 0x84858ff, 0x12586fe, 0xe16f1fa, 0x49642eb, 0xa0e4256, 0x6150f9a, 0xe6e63b4, 0xfbeb3b0, 0x9b09b36, 0xcb27c8c, 0x2304352, 0x4257}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x58ff9b087d8ddda6, 0x16f1fa12586fe848, 0xaa0e425649642ebe, 0x3b0e6e63b46150f9, 0x27c8c9b09b36fbeb, 0xa2c2572304352cb} +{{0x58ff9b087d8ddda6, 0x16f1fa12586fe848, 0xaa0e425649642ebe, 0x3b0e6e63b46150f9, 0x27c8c9b09b36fbeb, 0xa2c2572304352cb}} #else -{0x7f3610fb1bbb4c, 0x684961bfa12163, 0x324b2175f0b78f, 0x46150f9aa0e425, 0x5f7d6761cdcc76, 0x32c9f2326c26cd, 0x51612b91821a9} +{{0x7f3610fb1bbb4c, 0x684961bfa12163, 0x324b2175f0b78f, 0x46150f9aa0e425, 0x5f7d6761cdcc76, 0x32c9f2326c26cd, 0x51612b91821a9}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x17ec, 0x6b9, 0x1dc0, 0x1783, 0x18ee, 0xdd4, 0x1c7f, 0x1fb2, 0x16b0, 0x196e, 0x1e5a, 0x1fda, 0x11f9, 0x117, 0x1c30, 0x1a47, 0x2a2, 0x19e6, 0x1347, 0x2bb, 0x1463, 0x1f37, 0xa64, 0x3c6, 0x1910, 0x2bc, 0xbc0, 0x17e8, 0x1cfd, 0xa} +{{0x17ec, 0x6b9, 0x1dc0, 0x1783, 0x18ee, 0xdd4, 0x1c7f, 0x1fb2, 0x16b0, 0x196e, 0x1e5a, 0x1fda, 0x11f9, 0x117, 0x1c30, 0x1a47, 0x2a2, 0x19e6, 0x1347, 0x2bb, 0x1463, 0x1f37, 0xa64, 0x3c6, 0x1910, 0x2bc, 0xbc0, 0x17e8, 0x1cfd, 0xa}} #elif RADIX == 32 -{0x35cdfb1, 0xaf07dc0, 0xf6ea63b, 0xc3f65c7, 0x5acb75a, 0x7e7fb5e, 0xc3008bc, 0xa8b48f, 0x7347cf3, 0xbd18c57, 0x8ca64f9, 0x5e64407, 0xfd0bc01, 0x163f6} +{{0x35cdfb1, 0xaf07dc0, 0xf6ea63b, 0xc3f65c7, 0x5acb75a, 0x7e7fb5e, 0xc3008bc, 0xa8b48f, 0x7347cf3, 0xbd18c57, 0x8ca64f9, 0x5e64407, 0xfd0bc01, 0x163f6}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa63baf07dc035cdf, 0xacb75ac3f65c7f6e, 0xfc3008bc7e7fb5e5, 0xc577347cf30a8b48, 0x644078ca64f9bd18, 0x2d073f6fd0bc015e} +{{0xa63baf07dc035cdf, 0xacb75ac3f65c7f6e, 0xfc3008bc7e7fb5e5, 0xc577347cf30a8b48, 0x644078ca64f9bd18, 0x2d073f6fd0bc015e}} #else -{0x775e0fb806b9bf, 0x6b0fd971fdba98, 0x63f3fdaf2d65ba, 0x30a8b48fc3008b, 0x37a318aee68f9e, 0x5799101e32993e, 0x6439fb7e85e00} +{{0x775e0fb806b9bf, 0x6b0fd971fdba98, 0x63f3fdaf2d65ba, 0x30a8b48fc3008b, 0x37a318aee68f9e, 0x5799101e32993e, 0x6439fb7e85e00}} #endif #endif , #if 0 #elif RADIX == 16 -{0x440, 0x172e, 0x4f, 0x1e07, 0x15ce, 0x1b55, 0x68e, 0x2c, 0x13bb, 0x1f43, 0x1dda, 0x1fb4, 0xe54, 0x1502, 0x723, 0x7e7, 0x1147, 0x1ba0, 0x3d0, 0xf7c, 0x1754, 0x5fc, 0x1098, 0x16aa, 0x182, 0x1c1d, 0x18e9, 0x13ce, 0xbae, 0x18} +{{0x440, 0x172e, 0x4f, 0x1e07, 0x15ce, 0x1b55, 0x68e, 0x2c, 0x13bb, 0x1f43, 0x1dda, 0x1fb4, 0xe54, 0x1502, 0x723, 0x7e7, 0x1147, 0x1ba0, 0x3d0, 0xf7c, 0x1754, 0x5fc, 0x1098, 0x16aa, 0x182, 0x1c1d, 0x18e9, 0x13ce, 0xbae, 0x18}} #elif RADIX == 32 -{0xb971102, 0xbc0e04f, 0xedaad73, 0xec05868, 0xdafa1ce, 0x953f69d, 0x723a813, 0x451cfce, 0x83d0dd0, 0xe5d51ef, 0x550982f, 0xe860ad, 0x79d8e9e, 0x40eba} +{{0xb971102, 0xbc0e04f, 0xedaad73, 0xec05868, 0xdafa1ce, 0x953f69d, 0x723a813, 0x451cfce, 0x83d0dd0, 0xe5d51ef, 0x550982f, 0xe860ad, 0x79d8e9e, 0x40eba}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xad73bc0e04fb9711, 0xafa1ceec05868eda, 0xe723a813953f69dd, 0x1ef83d0dd0451cfc, 0x860ad550982fe5d5, 0xc2eba79d8e9e0e} +{{0xad73bc0e04fb9711, 0xafa1ceec05868eda, 0xe723a813953f69dd, 0x1ef83d0dd0451cfc, 0x860ad550982fe5d5, 0xc2eba79d8e9e0e}} #else -{0x67781c09f72e22, 0x3bb0161a3b6ab5, 0x1ca9fb4eed7d0e, 0x451cfce723a81, 0x7cbaa3df07a1ba, 0x3a182b554260b, 0x6175d3cec74f} +{{0x67781c09f72e22, 0x3bb0161a3b6ab5, 0x1ca9fb4eed7d0e, 0x451cfce723a81, 0x7cbaa3df07a1ba, 0x3a182b554260b, 0x6175d3cec74f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x18c5, 0x1326, 0x1d4d, 0x19eb, 0xea, 0x947, 0x1adf, 0xbf5, 0xafe, 0x1225, 0x18a0, 0xb3a, 0x8e0, 0xaea, 0x17aa, 0x19a5, 0x912, 0x634, 0x15c7, 0x1df7, 0x13cb, 0x1894, 0xeaa, 0xa69, 0x6ca, 0x1b49, 0x26f, 0x1f50, 0xd92, 0x6} +{{0x18c5, 0x1326, 0x1d4d, 0x19eb, 0xea, 0x947, 0x1adf, 0xbf5, 0xafe, 0x1225, 0x18a0, 0xb3a, 0x8e0, 0xaea, 0x17aa, 0x19a5, 0x912, 0x634, 0x15c7, 0x1df7, 0x13cb, 0x1894, 0xeaa, 0xa69, 0x6ca, 0x1b49, 0x26f, 0x1f50, 0xd92, 0x6}} #elif RADIX == 32 -{0x9936314, 0xb3d7d4d, 0xf4a383a, 0xf97ebad, 0xa0912ab, 0x3816758, 0x7aa5752, 0x244b34b, 0xf5c731a, 0xa4f2fbe, 0xd2eaac4, 0xa49b294, 0xea026fd, 0x3364b} +{{0x9936314, 0xb3d7d4d, 0xf4a383a, 0xf97ebad, 0xa0912ab, 0x3816758, 0x7aa5752, 0x244b34b, 0xf5c731a, 0xa4f2fbe, 0xd2eaac4, 0xa49b294, 0xea026fd, 0x3364b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x383ab3d7d4d99363, 0x912abf97ebadf4a, 0xb7aa57523816758a, 0xfbef5c731a244b34, 0x9b294d2eaac4a4f2, 0x54764bea026fda4} +{{0x383ab3d7d4d99363, 0x912abf97ebadf4a, 0xb7aa57523816758a, 0xfbef5c731a244b34, 0x9b294d2eaac4a4f2, 0x54764bea026fda4}} #else -{0x7567afa9b326c6, 0x2fe5faeb7d28e0, 0x11c0b3ac504895, 0x2244b34b7aa575, 0x149e5f7deb8e63, 0x6926ca534baab1, 0x2a3b25f50137e} +{{0x7567afa9b326c6, 0x2fe5faeb7d28e0, 0x11c0b3ac504895, 0x2244b34b7aa575, 0x149e5f7deb8e63, 0x6926ca534baab1, 0x2a3b25f50137e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x132f, 0x6d5, 0x95b, 0xa68, 0x1814, 0x12d3, 0x1f1e, 0x857, 0x14fa, 0xcf, 0x1f19, 0xe1b, 0x1cf7, 0xa53, 0x1455, 0x5ef, 0x3e2, 0x199c, 0x1162, 0x38d, 0x174b, 0x794, 0xef6, 0xf74, 0x9c, 0x1f55, 0x1c4d, 0x56f, 0x1638, 0x19} +{{0x132f, 0x6d5, 0x95b, 0xa68, 0x1814, 0x12d3, 0x1f1e, 0x857, 0x14fa, 0xcf, 0x1f19, 0xe1b, 0x1cf7, 0xa53, 0x1455, 0x5ef, 0x3e2, 0x199c, 0x1162, 0x38d, 0x174b, 0x794, 0xef6, 0xf74, 0x9c, 0x1f55, 0x1c4d, 0x56f, 0x1638, 0x19}} #elif RADIX == 32 -{0x36accbf, 0x14d095b, 0xe969e05, 0xe90aff1, 0x19067d3, 0x3ddc37f, 0x455529f, 0xf88bdf, 0xb162cce, 0xa5d2c71, 0xe8ef63c, 0xaa8271e, 0xadfc4df, 0xa8e0} +{{0x36accbf, 0x14d095b, 0xe969e05, 0xe90aff1, 0x19067d3, 0x3ddc37f, 0x455529f, 0xf88bdf, 0xb162cce, 0xa5d2c71, 0xe8ef63c, 0xaa8271e, 0xadfc4df, 0xa8e0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9e0514d095b36acc, 0x9067d3e90aff1e96, 0xf455529f3ddc37f1, 0xc71b162cce0f88bd, 0x8271ee8ef63ca5d2, 0x30898e0adfc4dfaa} +{{0x9e0514d095b36acc, 0x9067d3e90aff1e96, 0xf455529f3ddc37f1, 0xc71b162cce0f88bd, 0x8271ee8ef63ca5d2, 0x30898e0adfc4dfaa}} #else -{0xa29a12b66d599, 0x4fa42bfc7a5a78, 0x79eee1bf8c833e, 0x60f88bdf455529, 0x14ba58e362c599, 0x6aa09c7ba3bd8f, 0x804c7056fe26f} +{{0xa29a12b66d599, 0x4fa42bfc7a5a78, 0x79eee1bf8c833e, 0x60f88bdf455529, 0x14ba58e362c599, 0x6aa09c7ba3bd8f, 0x804c7056fe26f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.h index 04e360fe19..804968cef3 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp.h @@ -111,7 +111,7 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) static inline void fp_copy(fp_t *out, const fp_t *a) { - memcpy(out, a, sizeof(fp_t)); + memmove(out, a, sizeof(fp_t)); } static inline void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.h index 81801fa9a9..71242a4681 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/fp2.h @@ -42,4 +42,4 @@ fp2_sqr(fp2_t *x, const fp2_t *y) x->re.arr[5] = t.re.arr[5]; } -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.c index 00875b1aa5..992ceacc8c 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.c @@ -1,30 +1,30 @@ #include "gf65376.h" // see gf65376.h -const gf65376 ZERO = { 0, 0, 0, 0, 0, 0 }; +const gf65376 ZERO = {{ 0, 0, 0, 0, 0, 0 }}; // see gf65376.h -const gf65376 ONE = { 0x0000000000000003, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x3D00000000000000 }; +const gf65376 ONE = {{ 0x0000000000000003, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x3D00000000000000 }}; // see gf65376.h -const gf65376 gf65376_MINUS_ONE = { 0xFFFFFFFFFFFFFFFC, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x03FFFFFFFFFFFFFF }; +const gf65376 gf65376_MINUS_ONE = {{ 0xFFFFFFFFFFFFFFFC, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x03FFFFFFFFFFFFFF }}; // Montgomery representation of 2^256. -static const gf65376 R2 = { 0x3F03F03F03F03F13, 0x03F03F03F03F03F0, 0xF03F03F03F03F03F, - 0x3F03F03F03F03F03, 0x03F03F03F03F03F0, 0x1D3F03F03F03F03F }; +static const gf65376 R2 = {{ 0x3F03F03F03F03F13, 0x03F03F03F03F03F0, 0xF03F03F03F03F03F, + 0x3F03F03F03F03F03, 0x03F03F03F03F03F0, 0x1D3F03F03F03F03F }}; // The modulus itself (this is also a valid representation of zero). -static const gf65376 MODULUS = { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40FFFFFFFFFFFFFF }; +static const gf65376 MODULUS = {{ 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40FFFFFFFFFFFFFF }}; // 1/2^380 (in Montgomery representation). -static const gf65376 INVT380 = { 0x0000000000000010, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; +static const gf65376 INVT380 = {{ 0x0000000000000010, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }}; -static const gf65376 PM1O3 = { 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, - 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0x15aaaaaaaaaaaaaa }; +static const gf65376 PM1O3 = {{ 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, + 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa, 0x15aaaaaaaaaaaaaa }}; // Expand the most significant bit of x into a full-width 64-bit word // (0x0000000000000000 or 0xFFFFFFFFFFFFFFFF). diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.h index 2d04245fc1..a331d3ceb5 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/gf65376.h @@ -96,7 +96,7 @@ extern "C" * support the API inline functions; they MUST NOT be used directly. */ -#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) +#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) || defined(C_PEDANTIC_MODE) #include #define inner_gf65376_adc(cc, a, b, d) _addcarry_u64(cc, a, b, (unsigned long long *)(void *)d) #define inner_gf65376_sbb(cc, a, b, d) _subborrow_u64(cc, a, b, (unsigned long long *)(void *)d) @@ -117,6 +117,7 @@ inner_gf65376_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) } #endif +#if defined _MSC_VER || defined(C_PEDANTIC_MODE) #if defined _MSC_VER #define inner_gf65376_umul(lo, hi, x, y) \ do { \ @@ -124,10 +125,40 @@ inner_gf65376_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) (lo) = _umul128((x), (y), &umul_hi); \ (hi) = umul_hi; \ } while (0) +#else +#define inner_gf65376_umul(lo, hi, a, b) \ + do { \ + register uint64_t al, ah, bl, bh, temp; \ + uint64_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; \ + uint64_t mask_low = (uint64_t)(-1) >> (sizeof(uint64_t) * 4), mask_high = (uint64_t)(-1) << (sizeof(uint64_t) * 4); \ + al = a & mask_low; \ + ah = a >> (sizeof(uint64_t) * 4); \ + bl = b & mask_low; \ + bh = b >> (sizeof(uint64_t) * 4); \ + albl = al * bl; \ + albh = al * bh; \ + ahbl = ah * bl; \ + ahbh = ah * bh; \ + (lo) = albl & mask_low; \ + res1 = albl >> (sizeof(uint64_t) * 4); \ + res2 = ahbl & mask_low; \ + res3 = albh & mask_low; \ + temp = res1 + res2 + res3 ; \ + carry = temp >> (sizeof(uint64_t) * 4); \ + (lo) ^= temp << (sizeof(uint64_t) * 4); \ + res1 = ahbl >> (sizeof(uint64_t) * 4); \ + res2 = albh >> (sizeof(uint64_t) * 4); \ + res3 = ahbh & mask_low; \ + temp = res1 + res2 + res3 + carry; \ + (hi) = temp & mask_low; \ + carry = temp & mask_high; \ + (hi) ^= (ahbh & mask_high) + carry; \ + } while (0) +#endif #define inner_gf65376_umul_add(lo, hi, x, y, z) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x), (y), &umul_hi); \ + inner_gf65376_umul(umul_lo, umul_hi, (x), (y)); \ unsigned char umul_cc; \ umul_cc = inner_gf65376_adc(0, umul_lo, (z), &umul_lo); \ (void)inner_gf65376_adc(umul_cc, umul_hi, 0, &umul_hi); \ @@ -137,9 +168,9 @@ inner_gf65376_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) #define inner_gf65376_umul_x2(lo, hi, x1, y1, x2, y2) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x1), (y1), &umul_hi); \ + inner_gf65376_umul(umul_lo, umul_hi, (x1), (y1)); \ uint64_t umul_lo2, umul_hi2; \ - umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + inner_gf65376_umul(umul_lo2, umul_hi2, (x2), (y2)); \ unsigned char umul_cc; \ umul_cc = inner_gf65376_adc(0, umul_lo, umul_lo2, &umul_lo); \ (void)inner_gf65376_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ @@ -149,9 +180,9 @@ inner_gf65376_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) #define inner_gf65376_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x1), (y1), &umul_hi); \ + inner_gf65376_umul(umul_lo, umul_hi, (x1), (y1)); \ uint64_t umul_lo2, umul_hi2; \ - umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + inner_gf65376_umul(umul_lo2, umul_hi2, (x2), (y2)); \ unsigned char umul_cc; \ umul_cc = inner_gf65376_adc(0, umul_lo, umul_lo2, &umul_lo); \ (void)inner_gf65376_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.h index 2b16e23834..616504c7b1 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd.h @@ -415,7 +415,7 @@ void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B * @param t: an integer * @returns 0xFFFFFFFF on success, 0 on failure */ -static int +static inline int test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) { int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c index d980d12183..d6777fa92a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/hd_splitting_transforms.c @@ -11,131 +11,131 @@ const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1 const fp2_t FP2_CONSTANTS[5] = {{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e} +{{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e}} #elif RADIX == 32 -{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff} +{{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +{{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff}} #else -{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff} +{{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e} +{{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e}} #elif RADIX == 32 -{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff} +{{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +{{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff}} #else -{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff} +{{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff}} #endif #endif }}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c index ea32213c75..0fed774a04 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/l2.c @@ -24,8 +24,8 @@ copy(fp_num *x, fp_num *r) static void normalize(fp_num *x) { - if (x->s == 0.0 || isfinite(x->s) == 0) { - if (x->s == 0.0) { + if (fpclassify(x->s) == FP_ZERO || isfinite(x->s) == 0) { + if (fpclassify(x->s) == FP_ZERO) { x->e = INT_MIN; } } else { @@ -49,13 +49,6 @@ to_deltabar(fp_num *x) x->e = 0; } -static void -to_etabar(fp_num *x) -{ - x->s = ETABAR; - x->e = 0; -} - static void from_mpz(const ibz_t *x, fp_num *r) { diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_internals.h index e8d90141ac..2b76857205 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_internals.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/lll_internals.h @@ -43,13 +43,19 @@ /** @brief Type for fractions of integers * - * @typedef ibq_t +* @typedef ibq_t * * For fractions of integers of arbitrary size, used by intbig module, using gmp */ -typedef ibz_t ibq_t[2]; -typedef ibq_t ibq_vec_4_t[4]; -typedef ibq_t ibq_mat_4x4_t[4][4]; +typedef struct { + ibz_t q[2]; +} ibq_t; +typedef struct { + ibq_t v[4]; +} ibq_vec_4_t; +typedef struct { + ibq_vec_4_t m[4]; +} ibq_mat_4x4_t; /**@} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.c index 27f4a963db..13714eee4a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/mp.c @@ -2,6 +2,7 @@ #include #include #include +#include // double-wide multiplication void @@ -17,7 +18,7 @@ MUL(digit_t *out, const digit_t a, const digit_t b) out[0] = _umul128(a, b, &umul_hi); out[1] = umul_hi; -#elif defined(RADIX_64) && defined(HAVE_UINT128) +#elif defined(RADIX_64) && (defined(HAVE_UINT128) || defined(__SIZEOF_INT128__) || defined(__int128)) && !defined(C_PEDANTIC_MODE) unsigned __int128 umul_tmp; umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); out[0] = (uint64_t)umul_tmp; @@ -277,6 +278,7 @@ mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) assert((a[0] & 1) == 1); digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + memset(x, 0, sizeof(x)); mp_copy(aa, a, nwords); mp_one[0] = 1; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rationals.c index 0c5387e5e8..25f8519b3f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rationals.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rationals.c @@ -1,20 +1,20 @@ -#include + #include #include "internal.h" #include "lll_internals.h" void ibq_init(ibq_t *x) { - ibz_init(&((*x)[0])); - ibz_init(&((*x)[1])); - ibz_set(&((*x)[1]), 1); + ibz_init(&(x->q[0])); + ibz_init(&(x->q[1])); + ibz_set(&(x->q[1]), 1); } void ibq_finalize(ibq_t *x) { - ibz_finalize(&((*x)[0])); - ibz_finalize(&((*x)[1])); + ibz_finalize(&(x->q[0])); + ibz_finalize(&(x->q[1])); } void @@ -22,7 +22,7 @@ ibq_mat_4x4_init(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_init(&(*mat)[i][j]); + ibq_init(&mat->m[i].v[j]); } } } @@ -31,7 +31,7 @@ ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_finalize(&(*mat)[i][j]); + ibq_finalize(&mat->m[i].v[j]); } } } @@ -40,14 +40,14 @@ void ibq_vec_4_init(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_init(&(*vec)[i]); + ibq_init(&vec->v[i]); } } void ibq_vec_4_finalize(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_finalize(&(*vec)[i]); + ibq_finalize(&vec->v[i]); } } @@ -57,9 +57,9 @@ ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j][0]), 10); + ibz_print(&(mat->m[i].v[j].q[0]), 10); printf("/"); - ibz_print(&((*mat)[i][j][1]), 10); + ibz_print(&(mat->m[i].v[j].q[1]), 10); printf(" "); } printf("\n "); @@ -72,9 +72,9 @@ ibq_vec_4_print(const ibq_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i][0]), 10); + ibz_print(&(vec->v[i].q[0]), 10); printf("/"); - ibz_print(&((*vec)[i][1]), 10); + ibz_print(&(vec->v[i].q[1]), 10); printf(" "); } printf("\n\n"); @@ -86,10 +86,10 @@ ibq_reduce(ibq_t *x) ibz_t gcd, r; ibz_init(&gcd); ibz_init(&r); - ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); - ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + ibz_gcd(&gcd, &(x->q[0]), &(x->q[1])); + ibz_div(&(x->q[0]), &r, &(x->q[0]), &gcd); assert(ibz_is_zero(&r)); - ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + ibz_div(&(x->q[1]), &r, &(x->q[1]), &gcd); assert(ibz_is_zero(&r)); ibz_finalize(&gcd); ibz_finalize(&r); @@ -102,10 +102,10 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) ibz_init(&add); ibz_init(&prod); - ibz_mul(&add, &((*a)[0]), &((*b)[1])); - ibz_mul(&prod, &((*b)[0]), &((*a)[1])); - ibz_add(&((*sum)[0]), &add, &prod); - ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&add, &(a->q[0]), &(b->q[1])); + ibz_mul(&prod, &(b->q[0]), &(a->q[1])); + ibz_add(&(sum->q[0]), &add, &prod); + ibz_mul(&(sum->q[1]), &(a->q[1]), &(b->q[1])); ibz_finalize(&add); ibz_finalize(&prod); } @@ -113,8 +113,8 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) void ibq_neg(ibq_t *neg, const ibq_t *x) { - ibz_copy(&((*neg)[1]), &((*x)[1])); - ibz_neg(&((*neg)[0]), &((*x)[0])); + ibz_copy(&(neg->q[1]), &(x->q[1])); + ibz_neg(&(neg->q[0]), &(x->q[0])); } void @@ -143,8 +143,8 @@ ibq_abs(ibq_t *abs, const ibq_t *x) // once void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) { - ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); - ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&(prod->q[0]), &(a->q[0]), &(b->q[0])); + ibz_mul(&(prod->q[1]), &(a->q[1]), &(b->q[1])); } int @@ -152,9 +152,9 @@ ibq_inv(ibq_t *inv, const ibq_t *x) { int res = !ibq_is_zero(x); if (res) { - ibz_copy(&((*inv)[0]), &((*x)[0])); - ibz_copy(&((*inv)[1]), &((*x)[1])); - ibz_swap(&((*inv)[1]), &((*inv)[0])); + ibz_copy(&(inv->q[0]), &(x->q[0])); + ibz_copy(&(inv->q[1]), &(x->q[1])); + ibz_swap(&(inv->q[1]), &(inv->q[0])); } return (res); } @@ -165,15 +165,15 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) ibz_t x, y; ibz_init(&x); ibz_init(&y); - ibz_copy(&x, &((*a)[0])); - ibz_copy(&y, &((*b)[0])); - ibz_mul(&y, &y, &((*a)[1])); - ibz_mul(&x, &x, &((*b)[1])); - if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_copy(&x, &(a->q[0])); + ibz_copy(&y, &(b->q[0])); + ibz_mul(&y, &y, &(a->q[1])); + ibz_mul(&x, &x, &(b->q[1])); + if (ibz_cmp(&(a->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } - if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + if (ibz_cmp(&(b->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } @@ -186,28 +186,28 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) int ibq_is_zero(const ibq_t *x) { - return ibz_is_zero(&((*x)[0])); + return ibz_is_zero(&(x->q[0])); } int ibq_is_one(const ibq_t *x) { - return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); + return (0 == ibz_cmp(&(x->q[0]), &(x->q[1]))); } int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) { - ibz_copy(&((*q)[0]), a); - ibz_copy(&((*q)[1]), b); + ibz_copy(&(q->q[0]), a); + ibz_copy(&(q->q[1]), b); return !ibz_is_zero(b); } void ibq_copy(ibq_t *target, const ibq_t *value) // once { - ibz_copy(&((*target)[0]), &((*value)[0])); - ibz_copy(&((*target)[1]), &((*value)[1])); + ibz_copy(&(target->q[0]), &(value->q[0])); + ibz_copy(&(target->q[1]), &(value->q[1])); } int @@ -215,7 +215,7 @@ ibq_is_ibz(const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_mod(&r, &((*q)[0]), &((*q)[1])); + ibz_mod(&r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); @@ -226,7 +226,7 @@ ibq_to_ibz(ibz_t *z, const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + ibz_div(z, &r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h index d0861ac036..0362ca0c42 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/rng.h @@ -5,7 +5,7 @@ #include -static int randombytes(unsigned char *x, unsigned long long xlen){ +static inline int randombytes(unsigned char *x, unsigned long long xlen){ OQS_randombytes(x, xlen); return 0; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign.c index 7335c38d9a..cf2134085b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_broadwell/sqisign.c @@ -121,7 +121,7 @@ sqisign_verify(const unsigned char *m, unsigned long long siglen, const unsigned char *pk) { - + (void) siglen; int ret = 0; public_key_t pkt = { 0 }; signature_t sigt; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c index 143060e2c3..74184fc97b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/dim2id2iso.c @@ -191,7 +191,7 @@ fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, // reordering vectors and switching some signs if needed to make it in a nicer // shape static void -post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, bool is_special_order) { // if the left order is the special one, then we apply some additional post // treatment @@ -520,7 +520,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[0], 1); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); - post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + post_LLL_basis_treatment(&gram[0], &reduced[0], true); // for efficient lattice reduction, we replace ideal[0] by the equivalent // ideal of smallest norm @@ -562,7 +562,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[i], 1); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); - post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + post_LLL_basis_treatment(&gram[i], &reduced[i], false); } // enumerating small vectors diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.c index 1b12a8380f..6f7311e3c9 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/e0_basis.c @@ -2,54 +2,54 @@ const fp2_t BASIS_E0_PX = { #if 0 #elif RADIX == 16 -{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12} +{{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12}} #elif RADIX == 32 -{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd} +{{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1} +{{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1}} #else -{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5} +{{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5}} #endif #endif , #if 0 #elif RADIX == 16 -{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e} +{{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e}} #elif RADIX == 32 -{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581} +{{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164} +{{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164}} #else -{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418} +{{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418}} #endif #endif }; const fp2_t BASIS_E0_QX = { #if 0 #elif RADIX == 16 -{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd} +{{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd}} #elif RADIX == 32 -{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb} +{{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28} +{{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28}} #else -{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff} +{{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a} +{{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a}} #elif RADIX == 32 -{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875} +{{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9} +{{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9}} #else -{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d} +{{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d}} #endif #endif }; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h index e609c93a08..7cef95ca49 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/ec.h @@ -566,7 +566,7 @@ uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) { ec_point_t test; @@ -595,7 +595,7 @@ test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) { int check_P = test_point_order_twof(&B->P, E, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_verification.c index fecdb9c259..8aa451d366 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_verification.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/encode_verification.c @@ -99,36 +99,6 @@ ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) return proj_from_bytes(&curve->A, &curve->C, enc); } -static byte_t * -ec_point_to_bytes(byte_t *enc, const ec_point_t *point) -{ - return proj_to_bytes(enc, &point->x, &point->z); -} - -static const byte_t * -ec_point_from_bytes(ec_point_t *point, const byte_t *enc) -{ - return proj_from_bytes(&point->x, &point->z, enc); -} - -static byte_t * -ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) -{ - enc = ec_point_to_bytes(enc, &basis->P); - enc = ec_point_to_bytes(enc, &basis->Q); - enc = ec_point_to_bytes(enc, &basis->PmQ); - return enc; -} - -static const byte_t * -ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) -{ - enc = ec_point_from_bytes(&basis->P, enc); - enc = ec_point_from_bytes(&basis->Q, enc); - enc = ec_point_from_bytes(&basis->PmQ, enc); - return enc; -} - // public API byte_t * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c index a598a89c0e..936b00d135 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/endomorphism_action.c @@ -4,261 +4,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x7e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1} +{{0x7e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}} #elif RADIX == 32 -{0x1f8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8000} +{{0x1f8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1, 0x0, 0x0, 0x0, 0x0, 0x3f00000000000000} +{{0x1, 0x0, 0x0, 0x0, 0x0, 0x3f00000000000000}} #else -{0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf400000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12} +{{0x1196, 0x134b, 0xdbd, 0x118d, 0x712, 0x1646, 0x5d7, 0x8eb, 0x431, 0xf5b, 0x161e, 0x13b6, 0x1c07, 0x42, 0x8ba, 0xeec, 0x1a43, 0x545, 0x1cdb, 0x1659, 0x1614, 0xde, 0x72d, 0x1b80, 0x1706, 0x15a3, 0x894, 0xd4a, 0x1b2f, 0x12}} #elif RADIX == 32 -{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd} +{{0x9a5c65a, 0xa31adbd, 0x7b231c4, 0xc51d65d, 0x1e7ad90, 0x1e76d6, 0x8ba0217, 0xe90ddd8, 0x3cdb2a2, 0xf5852cb, 0x72d06, 0xd1dc1b7, 0xa94894a, 0x14cbd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1} +{{0x31c4a31adbd9a5c6, 0xe7ad90c51d65d7b2, 0x88ba021701e76d61, 0x2cb3cdb2a2e90ddd, 0xdc1b70072d06f585, 0x16eecbda94894ad1}} #else -{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5} +{{0x94635b7b34b8c, 0x431475975ec8c7, 0x380f3b6b0f3d6c, 0x2e90ddd88ba021, 0x5eb0a59679b654, 0x347706dc01cb41, 0xb7765ed4a44a5}} #endif #endif , #if 0 #elif RADIX == 16 -{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e} +{{0xa85, 0x10cc, 0x1ef, 0xb0b, 0x1082, 0x5be, 0xd14, 0x1100, 0x1a33, 0x174b, 0x181c, 0x83e, 0x1034, 0x18ba, 0x205, 0x1f39, 0x1e9, 0x1998, 0x130e, 0x801, 0xfeb, 0x698, 0xdf9, 0x6a5, 0x5b6, 0x2c8, 0x1283, 0xad9, 0x960, 0x1e}} #elif RADIX == 32 -{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581} +{{0x8662a17, 0x96161ef, 0x42df420, 0xce200d1, 0x1cba5e8, 0xd107d8, 0x205c5d4, 0x7a7e72, 0x330eccc, 0xc3fad00, 0x4adf934, 0x6416d8d, 0x5b32831, 0x2f581}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164} +{{0xf42096161ef8662a, 0xcba5e8ce200d142d, 0x2205c5d40d107d81, 0xd00330eccc07a7e7, 0x16d8d4adf934c3fa, 0x6065815b3283164}} #else -{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418} +{{0x412c2c3df0cc54, 0x2338803450b7d0, 0x206883ec0e5d2f, 0x407a7e72205c5d, 0x187f5a00661d99, 0x5905b6352b7e4d, 0x3032c0ad99418}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd} +{{0x16ed, 0x818, 0x127a, 0xcfb, 0x1be6, 0x1b40, 0x1bf1, 0xe75, 0x129c, 0x151, 0x425, 0x142e, 0x1edb, 0x254, 0x5cc, 0x1a5b, 0x1e1d, 0x1e27, 0x1a12, 0x8a8, 0x59e, 0x933, 0x1647, 0x686, 0x19e, 0x1e51, 0x151f, 0x1b6e, 0x1efe, 0xd}} #elif RADIX == 32 -{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb} +{{0x40c5bb5, 0x99f727a, 0x1da06f9, 0x71cebbf, 0x250a8ca, 0xb6e85c4, 0x5cc12a7, 0xf8774b6, 0x1a12f13, 0x9967915, 0xd64749, 0x288678d, 0x6dd51ff, 0x2ebfb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28} +{{0x6f999f727a40c5b, 0x50a8ca71cebbf1da, 0x65cc12a7b6e85c42, 0x9151a12f13f8774b, 0x8678d0d647499967, 0x2e23bfb6dd51ff28}} #else -{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff} +{{0x7333ee4f4818b7, 0x29c73aefc7681b, 0x3db742e2128546, 0x3f8774b65cc12a, 0x332cf22a3425e2, 0x4a219e343591d2, 0x6d1dfdb6ea8ff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a} +{{0x18a9, 0x1838, 0x1588, 0x1720, 0xf3f, 0x1fcd, 0x44d, 0x1e6b, 0x681, 0x1249, 0x1f8a, 0x5af, 0x1f58, 0x1c12, 0xf21, 0x1887, 0x278, 0x156a, 0xbfe, 0x765, 0x12f7, 0x4da, 0x16ce, 0x7c1, 0x1c04, 0x1773, 0x853, 0xab7, 0xe1d, 0x1a}} #elif RADIX == 32 -{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875} +{{0xc1c62a7, 0xee41588, 0xdfe6bcf, 0x7cd644, 0x8a9249a, 0xd60b5ff, 0xf21e097, 0x9e310e, 0xabfeab5, 0xd4bdcec, 0x836ce26, 0xb9f010f, 0x56e853b, 0x10875}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9} +{{0x6bcfee41588c1c62, 0xa9249a07cd644dfe, 0xef21e097d60b5ff8, 0xcecabfeab509e310, 0xf010f836ce26d4bd, 0x2a7787556e853bb9}} #else -{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d} +{{0x1fdc82b11838c5, 0x681f359137f9af, 0x3eb05affc54924, 0x509e310ef21e09, 0x5a97b9d957fd56, 0x6e7c043e0db389, 0x4fbc3aab7429d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1e36, 0x1718, 0xced, 0x186e, 0x83d, 0x1a23, 0xf5b, 0x5ca, 0x194d, 0x1bd8, 0xb67, 0x9f7, 0x1806, 0x17ae, 0x508, 0x117f, 0x5cc, 0x1809, 0x14b1, 0x85f, 0xcf0, 0x1b0c, 0x1753, 0x1484, 0xb5f, 0x1d62, 0x808, 0x1cc3, 0x844, 0x9} +{{0x1e36, 0x1718, 0xced, 0x186e, 0x83d, 0x1a23, 0xf5b, 0x5ca, 0x194d, 0x1bd8, 0xb67, 0x9f7, 0x1806, 0x17ae, 0x508, 0x117f, 0x5cc, 0x1809, 0x14b1, 0x85f, 0xcf0, 0x1b0c, 0x1753, 0x1484, 0xb5f, 0x1d62, 0x808, 0x1cc3, 0x844, 0x9}} #elif RADIX == 32 -{0xb8c78d9, 0x70dcced, 0xbd11a0f, 0x34b94f5, 0x67dec65, 0x193eeb, 0x508bd76, 0x97322fe, 0xf4b1c04, 0x633c10b, 0x9753d8, 0xb12d7e9, 0x986808e, 0x9113} +{{0xb8c78d9, 0x70dcced, 0xbd11a0f, 0x34b94f5, 0x67dec65, 0x193eeb, 0x508bd76, 0x97322fe, 0xf4b1c04, 0x633c10b, 0x9753d8, 0xb12d7e9, 0x986808e, 0x9113}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1a0f70dccedb8c78, 0x7dec6534b94f5bd1, 0xe508bd760193eeb6, 0x10bf4b1c0497322f, 0x2d7e909753d8633c, 0x3722113986808eb1} +{{0x1a0f70dccedb8c78, 0x7dec6534b94f5bd1, 0xe508bd760193eeb6, 0x10bf4b1c0497322f, 0x2d7e909753d8633c, 0x3722113986808eb1}} #else -{0x1ee1b99db718f1, 0x14d2e53d6f4468, 0x300c9f75b3ef63, 0x497322fe508bd7, 0xc678217e96380, 0x2c4b5fa425d4f6, 0xb51089cc34047} +{{0x1ee1b99db718f1, 0x14d2e53d6f4468, 0x300c9f75b3ef63, 0x497322fe508bd7, 0xc678217e96380, 0x2c4b5fa425d4f6, 0xb51089cc34047}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1785, 0x1652, 0x4b4, 0x1b37, 0x918, 0x12d, 0x1340, 0x16d3, 0xee, 0xb43, 0x52a, 0x1ff, 0x1e6b, 0x1424, 0x609, 0x1e2c, 0x19bd, 0x18f, 0x174a, 0x134d, 0x6f4, 0xa33, 0x1d5c, 0xa53, 0x73c, 0x361, 0x372, 0x1242, 0x87c, 0x17} +{{0x1785, 0x1652, 0x4b4, 0x1b37, 0x918, 0x12d, 0x1340, 0x16d3, 0xee, 0xb43, 0x52a, 0x1ff, 0x1e6b, 0x1424, 0x609, 0x1e2c, 0x19bd, 0x18f, 0x174a, 0x134d, 0x6f4, 0xa33, 0x1d5c, 0xa53, 0x73c, 0x361, 0x372, 0x1242, 0x87c, 0x17}} #elif RADIX == 32 -{0xb295e16, 0x366e4b4, 0x96a46, 0xbada734, 0x2a5a183, 0x9ac3fe5, 0x609a127, 0xe6f7c58, 0xb74a0c7, 0x99bd269, 0xa7d5c51, 0xb09cf14, 0x4843721, 0x381f2} +{{0xb295e16, 0x366e4b4, 0x96a46, 0xbada734, 0x2a5a183, 0x9ac3fe5, 0x609a127, 0xe6f7c58, 0xb74a0c7, 0x99bd269, 0xa7d5c51, 0xb09cf14, 0x4843721, 0x381f2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6a46366e4b4b295e, 0xa5a183bada734009, 0x8609a1279ac3fe52, 0x269b74a0c7e6f7c5, 0x9cf14a7d5c5199bd, 0x5ce1f24843721b0} +{{0x6a46366e4b4b295e, 0xa5a183bada734009, 0x8609a1279ac3fe52, 0x269b74a0c7e6f7c5, 0x9cf14a7d5c5199bd, 0x5ce1f24843721b0}} #else -{0xc6cdc969652bc, 0xeeb69cd0025a9, 0x3cd61ff2952d0c, 0x7e6f7c58609a12, 0x3337a4d36e9418, 0x6c273c529f5714, 0x2e70f92421b90} +{{0xc6cdc969652bc, 0xeeb69cd0025a9, 0x3cd61ff2952d0c, 0x7e6f7c58609a12, 0x3337a4d36e9418, 0x6c273c529f5714, 0x2e70f92421b90}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -480,261 +480,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x194c, 0x43, 0xc70, 0x1c7a, 0xa7a, 0xdf7, 0x1564, 0x1809, 0x8e8, 0x3a5, 0x1399, 0xf0a, 0x914, 0x1a27, 0xb1c, 0xcd0, 0xfc, 0xaa4, 0xd87, 0x1ed2, 0x2c0, 0x8e4, 0x1b93, 0x1a3f, 0x1d9b, 0x1a00, 0xbce, 0x17d2, 0x1a07, 0xf} +{{0x194c, 0x43, 0xc70, 0x1c7a, 0xa7a, 0xdf7, 0x1564, 0x1809, 0x8e8, 0x3a5, 0x1399, 0xf0a, 0x914, 0x1a27, 0xb1c, 0xcd0, 0xfc, 0xaa4, 0xd87, 0x1ed2, 0x2c0, 0x8e4, 0x1b93, 0x1a3f, 0x1d9b, 0x1a00, 0xbce, 0x17d2, 0x1a07, 0xf}} #elif RADIX == 32 -{0x21e531, 0xb8f4c70, 0x46fba9e, 0xa301356, 0x991d2a3, 0x451e153, 0xb1cd13a, 0x3f19a0, 0x4d87552, 0x20b03da, 0x7fb9347, 0x766f4, 0xfa4bced, 0x3d81e} +{{0x21e531, 0xb8f4c70, 0x46fba9e, 0xa301356, 0x991d2a3, 0x451e153, 0xb1cd13a, 0x3f19a0, 0x4d87552, 0x20b03da, 0x7fb9347, 0x766f4, 0xfa4bced, 0x3d81e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xba9eb8f4c70021e5, 0x91d2a3a30135646f, 0xb1cd13a451e1539, 0x3da4d8755203f19a, 0x766f47fb934720b0, 0xcae81efa4bced00} +{{0xba9eb8f4c70021e5, 0x91d2a3a30135646f, 0xb1cd13a451e1539, 0x3da4d8755203f19a, 0x766f47fb934720b0, 0xcae81efa4bced00}} #else -{0x3d71e98e0043ca, 0xe8c04d591beea, 0x5228f0a9cc8e95, 0x203f19a0b1cd13, 0x641607b49b0eaa, 0x401d9bd1fee4d1, 0x65740f7d25e76} +{{0x3d71e98e0043ca, 0xe8c04d591beea, 0x5228f0a9cc8e95, 0x203f19a0b1cd13, 0x641607b49b0eaa, 0x401d9bd1fee4d1, 0x65740f7d25e76}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1ed1, 0x10, 0x131c, 0x171e, 0x1a9e, 0x37d, 0xd59, 0x602, 0xa3a, 0x8e9, 0x14e6, 0x3c2, 0x1a45, 0x689, 0x2c7, 0x334, 0x3f, 0x1aa9, 0x1361, 0x7b4, 0xb0, 0x1a39, 0x1ee4, 0x1e8f, 0x766, 0x1680, 0x12f3, 0x1df4, 0x1e81, 0x4} +{{0x1ed1, 0x10, 0x131c, 0x171e, 0x1a9e, 0x37d, 0xd59, 0x602, 0xa3a, 0x8e9, 0x14e6, 0x3c2, 0x1a45, 0x689, 0x2c7, 0x334, 0x3f, 0x1aa9, 0x1361, 0x7b4, 0xb0, 0x1a39, 0x1ee4, 0x1e8f, 0x766, 0x1680, 0x12f3, 0x1df4, 0x1e81, 0x4}} #elif RADIX == 32 -{0x87b44, 0xae3d31c, 0x91beea7, 0xe8c04d5, 0xe6474a8, 0x9147854, 0x2c7344e, 0x80fc668, 0x9361d54, 0xc82c0f6, 0x1fee4d1, 0x401d9bd, 0xbe92f3b, 0x27a07} +{{0x87b44, 0xae3d31c, 0x91beea7, 0xe8c04d5, 0xe6474a8, 0x9147854, 0x2c7344e, 0x80fc668, 0x9361d54, 0xc82c0f6, 0x1fee4d1, 0x401d9bd, 0xbe92f3b, 0x27a07}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xeea7ae3d31c0087b, 0x6474a8e8c04d591b, 0x82c7344e9147854e, 0xf69361d5480fc66, 0x1d9bd1fee4d1c82c, 0x116ba07be92f3b40} +{{0xeea7ae3d31c0087b, 0x6474a8e8c04d591b, 0x82c7344e9147854e, 0xf69361d5480fc66, 0x1d9bd1fee4d1c82c, 0x116ba07be92f3b40}} #else -{0x4f5c7a638010f6, 0x23a30135646fba, 0x748a3c2a7323a5, 0x480fc6682c7344, 0x390581ed26c3aa, 0x500766f47fb934, 0x8b5d03df4979d} +{{0x4f5c7a638010f6, 0x23a30135646fba, 0x748a3c2a7323a5, 0x480fc6682c7344, 0x390581ed26c3aa, 0x500766f47fb934, 0x8b5d03df4979d}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x187c, 0x10c9, 0xfda, 0x189b, 0x3b, 0xbcd, 0x16ab, 0xabe, 0x102, 0x19b7, 0x288, 0x1c7e, 0x1ee8, 0x452, 0x853, 0x1b5a, 0x1ca8, 0x1129, 0xd16, 0x168a, 0x1414, 0x6ed, 0xc0, 0xda2, 0x19ae, 0x12fe, 0x1813, 0xdd8, 0x102e, 0x1f} +{{0x187c, 0x10c9, 0xfda, 0x189b, 0x3b, 0xbcd, 0x16ab, 0xabe, 0x102, 0x19b7, 0x288, 0x1c7e, 0x1ee8, 0x452, 0x853, 0x1b5a, 0x1ca8, 0x1129, 0xd16, 0x168a, 0x1414, 0x6ed, 0xc0, 0xda2, 0x19ae, 0x12fe, 0x1813, 0xdd8, 0x102e, 0x1f}} #elif RADIX == 32 -{0x864e1f3, 0xf136fda, 0xb5e680e, 0x957d6a, 0x88cdb84, 0xba38fc2, 0x8532297, 0xf2a36b4, 0x4d16894, 0x6d052d1, 0x440c037, 0x7f66b9b, 0xbb18139, 0x390b9} +{{0x864e1f3, 0xf136fda, 0xb5e680e, 0x957d6a, 0x88cdb84, 0xba38fc2, 0x8532297, 0xf2a36b4, 0x4d16894, 0x6d052d1, 0x440c037, 0x7f66b9b, 0xbb18139, 0x390b9}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x680ef136fda864e1, 0x8cdb840957d6ab5e, 0x48532297ba38fc28, 0x2d14d16894f2a36b, 0x66b9b440c0376d05, 0x3dec0b9bb181397f} +{{0x680ef136fda864e1, 0x8cdb840957d6ab5e, 0x48532297ba38fc28, 0x2d14d16894f2a36b, 0x66b9b440c0376d05, 0x3dec0b9bb181397f}} #else -{0x1de26dfb50c9c3, 0x10255f5aad79a0, 0x3dd1c7e14466dc, 0x4f2a36b4853229, 0x6da0a5a29a2d12, 0x5fd9ae6d10300d, 0xeb605cdd8c09c} +{{0x1de26dfb50c9c3, 0x10255f5aad79a0, 0x3dd1c7e14466dc, 0x4f2a36b4853229, 0x6da0a5a29a2d12, 0x5fd9ae6d10300d, 0xeb605cdd8c09c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1ca3, 0x16ad, 0x12b3, 0x9d7, 0xb37, 0x118b, 0xb22, 0x1662, 0xa8f, 0xd68, 0x6d5, 0x1a1f, 0x1f29, 0x632, 0x1b7e, 0xb6, 0xba7, 0xeca, 0x11ed, 0x13b, 0x18cc, 0x19a2, 0x77, 0x1582, 0x11ff, 0xc5f, 0x7de, 0x4b1, 0x1a7f, 0x18} +{{0x1ca3, 0x16ad, 0x12b3, 0x9d7, 0xb37, 0x118b, 0xb22, 0x1662, 0xa8f, 0xd68, 0x6d5, 0x1a1f, 0x1f29, 0x632, 0x1b7e, 0xb6, 0xba7, 0xeca, 0x11ed, 0x13b, 0x18cc, 0x19a2, 0x77, 0x1582, 0x11ff, 0xc5f, 0x7de, 0x4b1, 0x1a7f, 0x18}} #elif RADIX == 32 -{0xb56f28f, 0xd3af2b3, 0x28c5acd, 0x3ecc4b2, 0xd56b42a, 0xca743e6, 0xb7e3197, 0x2e9c16d, 0x71ed765, 0x1633027, 0x4077cd, 0x2fc7feb, 0x9627de6, 0x39fc} +{{0xb56f28f, 0xd3af2b3, 0x28c5acd, 0x3ecc4b2, 0xd56b42a, 0xca743e6, 0xb7e3197, 0x2e9c16d, 0x71ed765, 0x1633027, 0x4077cd, 0x2fc7feb, 0x9627de6, 0x39fc}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x5acdd3af2b3b56f2, 0x56b42a3ecc4b228c, 0xdb7e3197ca743e6d, 0x2771ed7652e9c16, 0xc7feb04077cd1633, 0x24529fc9627de62f} +{{0x5acdd3af2b3b56f2, 0x56b42a3ecc4b228c, 0xdb7e3197ca743e6d, 0x2771ed7652e9c16, 0xc7feb04077cd1633, 0x24529fc9627de62f}} #else -{0x1ba75e5676ade5, 0x28fb312c8a316b, 0x3e53a1f36ab5a1, 0x52e9c16db7e319, 0x22c6604ee3daec, 0xbf1ffac101df3, 0x1e94fe4b13ef3} +{{0x1ba75e5676ade5, 0x28fb312c8a316b, 0x3e53a1f36ab5a1, 0x52e9c16db7e319, 0x22c6604ee3daec, 0xbf1ffac101df3, 0x1e94fe4b13ef3}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1f7a, 0x1a13, 0x11f4, 0xaeb, 0x997, 0x12d, 0x315, 0x1d7, 0x2fc, 0x736, 0x927, 0x350, 0x695, 0x14ac, 0x703, 0x1ec7, 0x1567, 0x1527, 0x7ee, 0x1a23, 0x11aa, 0x919, 0x130b, 0x199e, 0x137d, 0x795, 0x4e4, 0x1dc6, 0xa87, 0xd} +{{0x1f7a, 0x1a13, 0x11f4, 0xaeb, 0x997, 0x12d, 0x315, 0x1d7, 0x2fc, 0x736, 0x927, 0x350, 0x695, 0x14ac, 0x703, 0x1ec7, 0x1567, 0x1527, 0x7ee, 0x1a23, 0x11aa, 0x919, 0x130b, 0x199e, 0x137d, 0x795, 0x4e4, 0x1dc6, 0xa87, 0xd}} #elif RADIX == 32 -{0xd09fde9, 0xd5d71f4, 0x5096a65, 0xf03ae31, 0x2739b0b, 0xa546a09, 0x703a561, 0xd59fd8e, 0x67eea93, 0xcc6ab44, 0x3d30b48, 0xcacdf73, 0xb8c4e43, 0x29a1f} +{{0xd09fde9, 0xd5d71f4, 0x5096a65, 0xf03ae31, 0x2739b0b, 0xa546a09, 0x703a561, 0xd59fd8e, 0x67eea93, 0xcc6ab44, 0x3d30b48, 0xcacdf73, 0xb8c4e43, 0x29a1f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6a65d5d71f4d09fd, 0x739b0bf03ae31509, 0xe703a561a546a092, 0xb4467eea93d59fd8, 0xcdf733d30b48cc6a, 0x3b52a1fb8c4e43ca} +{{0x6a65d5d71f4d09fd, 0x739b0bf03ae31509, 0xe703a561a546a092, 0xb4467eea93d59fd8, 0xcdf733d30b48cc6a, 0x3b52a1fb8c4e43ca}} #else -{0x4babae3e9a13fb, 0x2fc0eb8c5425a9, 0xd2a3504939cd8, 0x3d59fd8e703a56, 0x198d5688cfdd52, 0x72b37dccf4c2d2, 0xd6950fdc62721} +{{0x4babae3e9a13fb, 0x2fc0eb8c5425a9, 0xd2a3504939cd8, 0x3d59fd8e703a56, 0x198d5688cfdd52, 0x72b37dccf4c2d2, 0xd6950fdc62721}} #endif #endif , #if 0 #elif RADIX == 16 -{0xa54, 0x1685, 0x1b20, 0x1632, 0x1047, 0x159e, 0x14a0, 0x94c, 0x3c8, 0x793, 0x3a2, 0x1938, 0x1899, 0x15b7, 0xefa, 0xcc8, 0x12c3, 0x1335, 0x4ef, 0x1e93, 0x1861, 0x1602, 0x1d6c, 0x1ae7, 0x187, 0x18b1, 0x857, 0x8da, 0x12f7, 0xa} +{{0xa54, 0x1685, 0x1b20, 0x1632, 0x1047, 0x159e, 0x14a0, 0x94c, 0x3c8, 0x793, 0x3a2, 0x1938, 0x1899, 0x15b7, 0xefa, 0xcc8, 0x12c3, 0x1335, 0x4ef, 0x1e93, 0x1861, 0x1602, 0x1d6c, 0x1ae7, 0x187, 0x18b1, 0x857, 0x8da, 0x12f7, 0xa}} #elif RADIX == 32 -{0xb42a951, 0xec65b20, 0xacf411, 0x212994a, 0xa23c98f, 0x2672703, 0xefaadbe, 0xcb0d990, 0x64ef99a, 0x16187d2, 0xcfd6cb0, 0x58861f5, 0x1b4857c, 0x13bdd} +{{0xb42a951, 0xec65b20, 0xacf411, 0x212994a, 0xa23c98f, 0x2672703, 0xefaadbe, 0xcb0d990, 0x64ef99a, 0x16187d2, 0xcfd6cb0, 0x58861f5, 0x1b4857c, 0x13bdd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf411ec65b20b42a9, 0x23c98f212994a0ac, 0xefaadbe2672703a, 0x7d264ef99acb0d99, 0x861f5cfd6cb01618, 0x14a4bdd1b4857c58} +{{0xf411ec65b20b42a9, 0x23c98f212994a0ac, 0xefaadbe2672703a, 0x7d264ef99acb0d99, 0x861f5cfd6cb01618, 0x14a4bdd1b4857c58}} #else -{0x23d8cb64168552, 0x3c84a65282b3d0, 0x71339381d11e4c, 0x2cb0d990efaadb, 0x2c30fa4c9df33, 0x162187d73f5b2c, 0xa525ee8da42be} +{{0x23d8cb64168552, 0x3c84a65282b3d0, 0x71339381d11e4c, 0x2cb0d990efaadb, 0x2c30fa4c9df33, 0x162187d73f5b2c, 0xa525ee8da42be}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1e6b, 0x111, 0x74d, 0xb04, 0x738, 0x178f, 0xdc5, 0x835, 0x724, 0xaf9, 0xf3c, 0x1855, 0x266, 0x1b16, 0x1cf0, 0x1aa3, 0x32f, 0xce, 0x1f26, 0x16ba, 0x1cb6, 0x9b8, 0x12de, 0x1cef, 0x1a72, 0x1d68, 0xa02, 0x1c67, 0xa67, 0x13} +{{0x1e6b, 0x111, 0x74d, 0xb04, 0x738, 0x178f, 0xdc5, 0x835, 0x724, 0xaf9, 0xf3c, 0x1855, 0x266, 0x1b16, 0x1cf0, 0x1aa3, 0x32f, 0xce, 0x1f26, 0x16ba, 0x1cb6, 0x9b8, 0x12de, 0x1cef, 0x1a72, 0x1d68, 0xa02, 0x1c67, 0xa67, 0x13}} #elif RADIX == 32 -{0x88f9ae, 0x160874d, 0x5bc79ce, 0x9106adc, 0x3c57c9c, 0x99b0aaf, 0xcf0d8b0, 0xcbf547, 0x5f26067, 0xc72dad7, 0xdf2de4d, 0xb469cb9, 0x8cea02e, 0x1899f} +{{0x88f9ae, 0x160874d, 0x5bc79ce, 0x9106adc, 0x3c57c9c, 0x99b0aaf, 0xcf0d8b0, 0xcbf547, 0x5f26067, 0xc72dad7, 0xdf2de4d, 0xb469cb9, 0x8cea02e, 0x1899f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x79ce160874d088f9, 0xc57c9c9106adc5bc, 0x7cf0d8b099b0aaf3, 0xad75f260670cbf54, 0x69cb9df2de4dc72d, 0x2c4699f8cea02eb4} +{{0x79ce160874d088f9, 0xc57c9c9106adc5bc, 0x7cf0d8b099b0aaf3, 0xad75f260670cbf54, 0x69cb9df2de4dc72d, 0x2c4699f8cea02eb4}} #else -{0x1c2c10e9a111f3, 0x72441ab716f1e7, 0x4cd85579e2be4, 0x70cbf547cf0d8b, 0x38e5b5aebe4c0c, 0x2d1a72e77cb793, 0x5e34cfc675017} +{{0x1c2c10e9a111f3, 0x72441ab716f1e7, 0x4cd85579e2be4, 0x70cbf547cf0d8b, 0x38e5b5aebe4c0c, 0x2d1a72e77cb793, 0x5e34cfc675017}} #endif #endif , #if 0 #elif RADIX == 16 -{0x12d6, 0x1c7a, 0x9bb, 0x1ce1, 0x1ca, 0xf3f, 0x1036, 0x19a6, 0x1c79, 0x5bf, 0x3, 0x1a92, 0x1d08, 0xeaa, 0x11e8, 0xab1, 0x1ed2, 0x80c, 0x10c9, 0x1517, 0xc18, 0x1513, 0x1dff, 0xc00, 0x16a0, 0x14ce, 0x72d, 0x1a86, 0xd45, 0x19} +{{0x12d6, 0x1c7a, 0x9bb, 0x1ce1, 0x1ca, 0xf3f, 0x1036, 0x19a6, 0x1c79, 0x5bf, 0x3, 0x1a92, 0x1d08, 0xeaa, 0x11e8, 0xab1, 0x1ed2, 0x80c, 0x10c9, 0x1517, 0xc18, 0x1513, 0x1dff, 0xc00, 0x16a0, 0x14ce, 0x72d, 0x1a86, 0xd45, 0x19}} #elif RADIX == 32 -{0xe3d4b5b, 0xb9c29bb, 0x679f872, 0xe734d03, 0x32dff1, 0x4235240, 0x1e87557, 0x7b49563, 0xf0c9406, 0x9b062a2, 0x1dffa8, 0x675a818, 0x50c72da, 0x8517} +{{0xe3d4b5b, 0xb9c29bb, 0x679f872, 0xe734d03, 0x32dff1, 0x4235240, 0x1e87557, 0x7b49563, 0xf0c9406, 0x9b062a2, 0x1dffa8, 0x675a818, 0x50c72da, 0x8517}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf872b9c29bbe3d4b, 0x32dff1e734d03679, 0x31e8755742352400, 0x2a2f0c94067b4956, 0x5a81801dffa89b06, 0x172351750c72da67} +{{0xf872b9c29bbe3d4b, 0x32dff1e734d03679, 0x31e8755742352400, 0x2a2f0c94067b4956, 0x5a81801dffa89b06, 0x172351750c72da67}} #else -{0x657385377c7a96, 0x479cd340d9e7e1, 0x3a11a9200196ff, 0x67b495631e8755, 0x1360c545e19280, 0x19d6a060077fea, 0xb91a8ba86396d} +{{0x657385377c7a96, 0x479cd340d9e7e1, 0x3a11a9200196ff, 0x67b495631e8755, 0x1360c545e19280, 0x19d6a060077fea, 0xb91a8ba86396d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -956,261 +956,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x9a9, 0x8c7, 0x119d, 0xada, 0x1d98, 0xc97, 0x1a31, 0xdc6, 0x192a, 0xf3c, 0x509, 0xc5b, 0xea7, 0x1eb9, 0x59d, 0x1e3c, 0x114b, 0x88, 0x5a9, 0x1154, 0x18c0, 0x11db, 0x1ba9, 0x3b8, 0x837, 0x18d0, 0x17eb, 0x211, 0x1aa7, 0x11} +{{0x9a9, 0x8c7, 0x119d, 0xada, 0x1d98, 0xc97, 0x1a31, 0xdc6, 0x192a, 0xf3c, 0x509, 0xc5b, 0xea7, 0x1eb9, 0x59d, 0x1e3c, 0x114b, 0x88, 0x5a9, 0x1154, 0x18c0, 0x11db, 0x1ba9, 0x3b8, 0x837, 0x18d0, 0x17eb, 0x211, 0x1aa7, 0x11}} #elif RADIX == 32 -{0x463a6a6, 0x15b519d, 0x164bf66, 0xa9b8da3, 0x979e64, 0xa9d8b65, 0x59df5cb, 0x452fc78, 0x85a9044, 0xde3022a, 0x71ba98e, 0x6820dc7, 0x4237ebc, 0xca9c} +{{0x463a6a6, 0x15b519d, 0x164bf66, 0xa9b8da3, 0x979e64, 0xa9d8b65, 0x59df5cb, 0x452fc78, 0x85a9044, 0xde3022a, 0x71ba98e, 0x6820dc7, 0x4237ebc, 0xca9c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbf6615b519d463a6, 0x979e64a9b8da3164, 0x859df5cba9d8b650, 0x22a85a9044452fc7, 0x20dc771ba98ede30, 0x2a32a9c4237ebc68} +{{0xbf6615b519d463a6, 0x979e64a9b8da3164, 0x859df5cba9d8b650, 0x22a85a9044452fc7, 0x20dc771ba98ede30, 0x2a32a9c4237ebc68}} #else -{0x4c2b6a33a8c74d, 0x12a6e368c592fd, 0x5d4ec5b284bcf3, 0x4452fc7859df5c, 0x5bc604550b5208, 0x1a08371dc6ea63, 0x4d954e211bf5e} +{{0x4c2b6a33a8c74d, 0x12a6e368c592fd, 0x5d4ec5b284bcf3, 0x4452fc7859df5c, 0x5bc604550b5208, 0x1a08371dc6ea63, 0x4d954e211bf5e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1ae8, 0xa31, 0x1467, 0x2b6, 0x1f66, 0xb25, 0x168c, 0x1371, 0x64a, 0xbcf, 0x1942, 0x1b16, 0xba9, 0xfae, 0x167, 0x1f8f, 0x452, 0x822, 0x16a, 0x455, 0x1e30, 0xc76, 0x6ea, 0x18ee, 0x20d, 0x1e34, 0xdfa, 0x1884, 0x12a9, 0xd} +{{0x1ae8, 0xa31, 0x1467, 0x2b6, 0x1f66, 0xb25, 0x168c, 0x1371, 0x64a, 0xbcf, 0x1942, 0x1b16, 0xba9, 0xfae, 0x167, 0x1f8f, 0x452, 0x822, 0x16a, 0x455, 0x1e30, 0xc76, 0x6ea, 0x18ee, 0x20d, 0x1e34, 0xdfa, 0x1884, 0x12a9, 0xd}} #elif RADIX == 32 -{0x518eba1, 0x856d467, 0xc592fd9, 0x2a6e368, 0x425e799, 0xea762d9, 0x1677d72, 0x114bf1e, 0xa16a411, 0xb78c08a, 0xdc6ea63, 0x1a08371, 0x108dfaf, 0x2baa7} +{{0x518eba1, 0x856d467, 0xc592fd9, 0x2a6e368, 0x425e799, 0xea762d9, 0x1677d72, 0x114bf1e, 0xa16a411, 0xb78c08a, 0xdc6ea63, 0x1a08371, 0x108dfaf, 0x2baa7}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2fd9856d467518eb, 0x25e7992a6e368c59, 0xe1677d72ea762d94, 0x8aa16a411114bf1, 0x8371dc6ea63b78c, 0x290caa7108dfaf1a} +{{0x2fd9856d467518eb, 0x25e7992a6e368c59, 0xe1677d72ea762d94, 0x8aa16a411114bf1, 0x8371dc6ea63b78c, 0x290caa7108dfaf1a}} #else -{0x330ada8cea31d7, 0x64a9b8da3164bf, 0x1753b16ca12f3c, 0x1114bf1e1677d7, 0x76f1811542d482, 0x46820dc771ba98, 0x4465538846fd7} +{{0x330ada8cea31d7, 0x64a9b8da3164bf, 0x1753b16ca12f3c, 0x1114bf1e1677d7, 0x76f1811542d482, 0x46820dc771ba98, 0x4465538846fd7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x954, 0x49a, 0xee7, 0x1037, 0x171c, 0x81, 0x448, 0x76f, 0x1615, 0xefe, 0xe70, 0xc54, 0x3d4, 0xc30, 0x1aaf, 0x72c, 0x464, 0x7a7, 0x5b7, 0x1f2a, 0xa98, 0x8db, 0x1689, 0x1cc1, 0x11ae, 0x4bf, 0x1ddc, 0x1f93, 0x1b3e, 0xb} +{{0x954, 0x49a, 0xee7, 0x1037, 0x171c, 0x81, 0x448, 0x76f, 0x1615, 0xefe, 0xe70, 0xc54, 0x3d4, 0xc30, 0x1aaf, 0x72c, 0x464, 0x7a7, 0x5b7, 0x1f2a, 0xa98, 0x8db, 0x1689, 0x1cc1, 0x11ae, 0x4bf, 0x1ddc, 0x1f93, 0x1b3e, 0xb}} #elif RADIX == 32 -{0x24d2551, 0x206eee7, 0x8040dc7, 0x54ede44, 0x7077f58, 0xf518a8e, 0xaaf6180, 0x9190e59, 0x45b73d3, 0xdaa63e5, 0x8368946, 0x5fc6bb9, 0xf27ddc2, 0x1dcfb} +{{0x24d2551, 0x206eee7, 0x8040dc7, 0x54ede44, 0x7077f58, 0xf518a8e, 0xaaf6180, 0x9190e59, 0x45b73d3, 0xdaa63e5, 0x8368946, 0x5fc6bb9, 0xf27ddc2, 0x1dcfb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xdc7206eee724d25, 0x77f5854ede44804, 0x9aaf6180f518a8e7, 0x3e545b73d39190e5, 0xc6bb98368946daa6, 0x14aecfbf27ddc25f} +{{0xdc7206eee724d25, 0x77f5854ede44804, 0x9aaf6180f518a8e7, 0x3e545b73d39190e5, 0xc6bb98368946daa6, 0x14aecfbf27ddc25f}} #else -{0xe40dddce49a4a, 0x6153b791201037, 0x7a8c547383bfa, 0x39190e59aaf618, 0x5b54c7ca8b6e7a, 0x17f1aee60da251, 0xa5767df93eee1} +{{0xe40dddce49a4a, 0x6153b791201037, 0x7a8c547383bfa, 0x39190e59aaf618, 0x5b54c7ca8b6e7a, 0x17f1aee60da251, 0xa5767df93eee1}} #endif #endif , #if 0 #elif RADIX == 16 -{0xf14, 0xa31, 0x805, 0x19bd, 0x1b37, 0x5d5, 0x1211, 0x9c0, 0x557, 0x6b5, 0x1b2a, 0x775, 0x1a4f, 0x1d9, 0x520, 0x16be, 0x3d, 0x1cae, 0x4ca, 0x1a17, 0x1e64, 0x170b, 0x136, 0x1cd4, 0x150b, 0x1111, 0xf0b, 0x1af9, 0x3ce, 0x1c} +{{0xf14, 0xa31, 0x805, 0x19bd, 0x1b37, 0x5d5, 0x1211, 0x9c0, 0x557, 0x6b5, 0x1b2a, 0x775, 0x1a4f, 0x1d9, 0x520, 0x16be, 0x3d, 0x1cae, 0x4ca, 0x1a17, 0x1e64, 0x170b, 0x136, 0x1cd4, 0x150b, 0x1111, 0xf0b, 0x1af9, 0x3ce, 0x1c}} #elif RADIX == 32 -{0x518bc53, 0xf37a805, 0x12eaecd, 0x5d38121, 0x2a35a95, 0x93ceebb, 0x5200ece, 0xf6d7c, 0xe4cae57, 0x5f99342, 0xa8136b8, 0x88d42f9, 0x5f2f0b8, 0x1df3b} +{{0x518bc53, 0xf37a805, 0x12eaecd, 0x5d38121, 0x2a35a95, 0x93ceebb, 0x5200ece, 0xf6d7c, 0xe4cae57, 0x5f99342, 0xa8136b8, 0x88d42f9, 0x5f2f0b8, 0x1df3b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xaecdf37a805518bc, 0xa35a955d3812112e, 0xc5200ece93ceebb2, 0x342e4cae5700f6d7, 0xd42f9a8136b85f99, 0x1530f3b5f2f0b888} +{{0xaecdf37a805518bc, 0xa35a955d3812112e, 0xc5200ece93ceebb2, 0x342e4cae5700f6d7, 0xd42f9a8136b85f99, 0x1530f3b5f2f0b888}} #else -{0x1be6f500aa3178, 0x5574e04844babb, 0x749e775d951ad4, 0x700f6d7c5200ec, 0xbf32685c995ca, 0x22350be6a04dae, 0xa9879daf9785c} +{{0x1be6f500aa3178, 0x5574e04844babb, 0x749e775d951ad4, 0x700f6d7c5200ec, 0xbf32685c995ca, 0x22350be6a04dae, 0xa9879daf9785c}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1b6e, 0x5aa, 0x1bd9, 0x1e85, 0x1615, 0x1629, 0xb8b, 0x1066, 0x1532, 0x19ad, 0xe24, 0xcb8, 0x17fc, 0x2ab, 0x1726, 0x1ad5, 0x1c83, 0x1b32, 0x75e, 0x1794, 0x161d, 0x9c4, 0x11b6, 0x1c02, 0x14bb, 0x15d2, 0x10d5, 0x26b, 0x1765, 0x14} +{{0x1b6e, 0x5aa, 0x1bd9, 0x1e85, 0x1615, 0x1629, 0xb8b, 0x1066, 0x1532, 0x19ad, 0xe24, 0xcb8, 0x17fc, 0x2ab, 0x1726, 0x1ad5, 0x1c83, 0x1b32, 0x75e, 0x1794, 0x161d, 0x9c4, 0x11b6, 0x1c02, 0x14bb, 0x15d2, 0x10d5, 0x26b, 0x1765, 0x14}} #elif RADIX == 32 -{0x2d56dba, 0x7d0bbd9, 0xbb14d85, 0xca0ccb8, 0x24cd6d4, 0xff1970e, 0x726155d, 0x720f5ab, 0x875ed99, 0x25876f2, 0x51b64e, 0xe952ef8, 0x4d70d5a, 0x23d94} +{{0x2d56dba, 0x7d0bbd9, 0xbb14d85, 0xca0ccb8, 0x24cd6d4, 0xff1970e, 0x726155d, 0x720f5ab, 0x875ed99, 0x25876f2, 0x51b64e, 0xe952ef8, 0x4d70d5a, 0x23d94}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x4d857d0bbd92d56d, 0x4cd6d4ca0ccb8bb1, 0xb726155dff1970e2, 0x6f2875ed99720f5a, 0x52ef8051b64e2587, 0x2f5dd944d70d5ae9} +{{0x4d857d0bbd92d56d, 0x4cd6d4ca0ccb8bb1, 0xb726155dff1970e2, 0x6f2875ed99720f5a, 0x52ef8051b64e2587, 0x2f5dd944d70d5ae9}} #else -{0xafa177b25aadb, 0x5328332e2ec536, 0x6ff8cb871266b6, 0x1720f5ab726155, 0x44b0ede50ebdb3, 0x3a54bbe0146d93, 0x76eeca26b86ad} +{{0xafa177b25aadb, 0x5328332e2ec536, 0x6ff8cb871266b6, 0x1720f5ab726155, 0x44b0ede50ebdb3, 0x3a54bbe0146d93, 0x76eeca26b86ad}} #endif #endif , #if 0 #elif RADIX == 16 -{0x18aa, 0x459, 0x747, 0x401, 0x14be, 0x13ba, 0xafb, 0x1cb4, 0x636, 0xd10, 0x16ec, 0x1e6e, 0x1ee5, 0x1475, 0xf82, 0x1695, 0x1a54, 0xe4e, 0x1856, 0x459, 0x752, 0x1d56, 0x15a7, 0xde2, 0x158c, 0x623, 0x17, 0x10d9, 0x1156, 0x19} +{{0x18aa, 0x459, 0x747, 0x401, 0x14be, 0x13ba, 0xafb, 0x1cb4, 0x636, 0xd10, 0x16ec, 0x1e6e, 0x1ee5, 0x1475, 0xf82, 0x1695, 0x1a54, 0xe4e, 0x1856, 0x459, 0x752, 0x1d56, 0x15a7, 0xde2, 0x158c, 0x623, 0x17, 0x10d9, 0x1156, 0x19}} #elif RADIX == 32 -{0x22ce2ab, 0x8802747, 0xb9dd52f, 0xdb968af, 0xec68818, 0xb97cdd6, 0xf82a3af, 0x6952d2a, 0x3856727, 0xb1d488b, 0xc55a7ea, 0x11d631b, 0x1b20173, 0x955a} +{{0x22ce2ab, 0x8802747, 0xb9dd52f, 0xdb968af, 0xec68818, 0xb97cdd6, 0xf82a3af, 0x6952d2a, 0x3856727, 0xb1d488b, 0xc55a7ea, 0x11d631b, 0x1b20173, 0x955a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd52f880274722ce2, 0xc68818db968afb9d, 0xaf82a3afb97cdd6e, 0x88b38567276952d2, 0xd631bc55a7eab1d4, 0x2b7455a1b2017311} +{{0xd52f880274722ce2, 0xc68818db968afb9d, 0xaf82a3afb97cdd6e, 0x88b38567276952d2, 0xd631bc55a7eab1d4, 0x2b7455a1b2017311}} #else -{0x5f1004e8e459c5, 0x636e5a2bee7754, 0x7dcbe6eb763440, 0x76952d2af82a3a, 0x563a911670ace4, 0x44758c6f1569fa, 0x57a2ad0d900b9} +{{0x5f1004e8e459c5, 0x636e5a2bee7754, 0x7dcbe6eb763440, 0x76952d2af82a3a, 0x563a911670ace4, 0x44758c6f1569fa, 0x57a2ad0d900b9}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1557, 0x1987, 0x65f, 0x1c20, 0x14ef, 0xb3b, 0xbbe, 0x19db, 0xc77, 0x566, 0x9ea, 0xcab, 0xafc, 0x1fda, 0xb44, 0x1fe6, 0x1af3, 0x1829, 0x2ef, 0xc23, 0x83d, 0x82c, 0x1fa8, 0x14b, 0xd6e, 0xde8, 0x260, 0x1019, 0x97a, 0x3} +{{0x1557, 0x1987, 0x65f, 0x1c20, 0x14ef, 0xb3b, 0xbbe, 0x19db, 0xc77, 0x566, 0x9ea, 0xcab, 0xafc, 0x1fda, 0xb44, 0x1fe6, 0x1af3, 0x1829, 0x2ef, 0xc23, 0x83d, 0x82c, 0x1fa8, 0x14b, 0xd6e, 0xde8, 0x260, 0x1019, 0x97a, 0x3}} #elif RADIX == 32 -{0xcc3d55c, 0xf84065f, 0xe59dd3b, 0xdf3b6bb, 0xea2b331, 0xbf19569, 0xb44fed2, 0xebcffcc, 0x62efc14, 0x620f584, 0x97fa841, 0xf435b82, 0x322606, 0x1a5ea} +{{0xcc3d55c, 0xf84065f, 0xe59dd3b, 0xdf3b6bb, 0xea2b331, 0xbf19569, 0xb44fed2, 0xebcffcc, 0x62efc14, 0x620f584, 0x97fa841, 0xf435b82, 0x322606, 0x1a5ea}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xdd3bf84065fcc3d5, 0xa2b331df3b6bbe59, 0xcb44fed2bf19569e, 0x58462efc14ebcffc, 0x35b8297fa841620f, 0x17765ea0322606f4} +{{0xdd3bf84065fcc3d5, 0xa2b331df3b6bbe59, 0xcb44fed2bf19569e, 0x58462efc14ebcffc, 0x35b8297fa841620f, 0x17765ea0322606f4}} #else -{0x77f080cbf987aa, 0x477cedaef96774, 0x15f8cab4f51599, 0x4ebcffccb44fed, 0x2c41eb08c5df82, 0x3d0d6e0a5fea10, 0xbbb2f50191303} +{{0x77f080cbf987aa, 0x477cedaef96774, 0x15f8cab4f51599, 0x4ebcffccb44fed, 0x2c41eb08c5df82, 0x3d0d6e0a5fea10, 0xbbb2f50191303}} #endif #endif , #if 0 #elif RADIX == 16 -{0xb02, 0xc60, 0x791, 0x1cf7, 0xc15, 0x125a, 0x1697, 0xca1, 0x327, 0x89f, 0xf64, 0xddf, 0xcb7, 0x1977, 0x29f, 0x100a, 0xdac, 0xc8, 0x1e16, 0x1c4e, 0xedf, 0x1ec0, 0x1ac0, 0x1bbd, 0x16ee, 0x106a, 0x35c, 0x11cc, 0xdde, 0x20} +{{0xb02, 0xc60, 0x791, 0x1cf7, 0xc15, 0x125a, 0x1697, 0xca1, 0x327, 0x89f, 0xf64, 0xddf, 0xcb7, 0x1977, 0x29f, 0x100a, 0xdac, 0xc8, 0x1e16, 0x1c4e, 0xedf, 0x1ec0, 0x1ac0, 0x1bbd, 0x16ee, 0x106a, 0x35c, 0x11cc, 0xdde, 0x20}} #elif RADIX == 32 -{0x6302c0b, 0x79ee791, 0x792d305, 0x9d94369, 0x6444f8c, 0x2ddbbef, 0x29fcbbb, 0x36b2014, 0xde16064, 0x3b7f89, 0x7bac0f6, 0x355bbb7, 0x39835c8, 0x4077a} +{{0x6302c0b, 0x79ee791, 0x792d305, 0x9d94369, 0x6444f8c, 0x2ddbbef, 0x29fcbbb, 0x36b2014, 0xde16064, 0x3b7f89, 0x7bac0f6, 0x355bbb7, 0x39835c8, 0x4077a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd30579ee7916302c, 0x444f8c9d94369792, 0x429fcbbb2ddbbef6, 0xf89de1606436b201, 0x5bbb77bac0f603b7, 0x30b77a39835c835} +{{0xd30579ee7916302c, 0x444f8c9d94369792, 0x429fcbbb2ddbbef6, 0xf89de1606436b201, 0x5bbb77bac0f603b7, 0x30b77a39835c835}} #else -{0xaf3dcf22c6058, 0x327650da5e4b4c, 0x596eddf7b2227c, 0x436b201429fcbb, 0x4076ff13bc2c0c, 0xd56eeddeeb03d, 0x185bbd1cc1ae4} +{{0xaf3dcf22c6058, 0x327650da5e4b4c, 0x596eddf7b2227c, 0x436b201429fcbb, 0x4076ff13bc2c0c, 0xd56eeddeeb03d, 0x185bbd1cc1ae4}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1432,261 +1432,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0xccf, 0xad2, 0x1c72, 0x1f31, 0x155, 0xa3, 0x1062, 0xf6e, 0xe60, 0x87b, 0x1891, 0xb45, 0x1de8, 0x7f8, 0x18a6, 0x1e67, 0x988, 0x1887, 0xca7, 0x216, 0x1daa, 0x1aa5, 0xc3a, 0x11d3, 0x1282, 0x55d, 0x15fc, 0x1132, 0x1c5e, 0x8} +{{0xccf, 0xad2, 0x1c72, 0x1f31, 0x155, 0xa3, 0x1062, 0xf6e, 0xe60, 0x87b, 0x1891, 0xb45, 0x1de8, 0x7f8, 0x18a6, 0x1e67, 0x988, 0x1887, 0xca7, 0x216, 0x1daa, 0x1aa5, 0xc3a, 0x11d3, 0x1282, 0x55d, 0x15fc, 0x1132, 0x1c5e, 0x8}} #elif RADIX == 32 -{0x569333d, 0x7e63c72, 0x2051855, 0x81edd06, 0x9143db9, 0x7a168b8, 0x8a63fc7, 0xa623ccf, 0xcca7c43, 0x2f6a842, 0xa6c3ad5, 0xaeca0a3, 0x2655fc2, 0x617a} +{{0x569333d, 0x7e63c72, 0x2051855, 0x81edd06, 0x9143db9, 0x7a168b8, 0x8a63fc7, 0xa623ccf, 0xcca7c43, 0x2f6a842, 0xa6c3ad5, 0xaeca0a3, 0x2655fc2, 0x617a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x18557e63c7256933, 0x143db981edd06205, 0xf8a63fc77a168b89, 0x842cca7c43a623cc, 0xca0a3a6c3ad52f6a, 0xf8317a2655fc2ae} +{{0x18557e63c7256933, 0x143db981edd06205, 0xf8a63fc77a168b89, 0x842cca7c43a623cc, 0xca0a3a6c3ad52f6a, 0xf8317a2655fc2ae}} #else -{0x2afcc78e4ad266, 0x6607b741881461, 0x3bd0b45c48a1ed, 0x3a623ccf8a63fc, 0x25ed5085994f88, 0x2bb2828e9b0eb5, 0x7c18bd132afe1} +{{0x2afcc78e4ad266, 0x6607b741881461, 0x3bd0b45c48a1ed, 0x3a623ccf8a63fc, 0x25ed5085994f88, 0x2bb2828e9b0eb5, 0x7c18bd132afe1}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x13b1, 0x12b4, 0xf1c, 0xfcc, 0x1855, 0x1028, 0x1418, 0x3db, 0x1b98, 0xa1e, 0xe24, 0x2d1, 0x77a, 0x11fe, 0x1e29, 0x799, 0x1a62, 0x1e21, 0x1329, 0x1085, 0xf6a, 0x16a9, 0x1b0e, 0x1474, 0xca0, 0x157, 0x157f, 0x144c, 0x1317, 0x1b} +{{0x13b1, 0x12b4, 0xf1c, 0xfcc, 0x1855, 0x1028, 0x1418, 0x3db, 0x1b98, 0xa1e, 0xe24, 0x2d1, 0x77a, 0x11fe, 0x1e29, 0x799, 0x1a62, 0x1e21, 0x1329, 0x1085, 0xf6a, 0x16a9, 0x1b0e, 0x1474, 0xca0, 0x157, 0x157f, 0x144c, 0x1317, 0x1b}} #elif RADIX == 32 -{0x95a4ec7, 0x5f98f1c, 0x8814615, 0x607b741, 0x2450f6e, 0xde85a2e, 0xe298ff1, 0xe988f33, 0xb329f10, 0x4bdaa10, 0xe9b0eb5, 0xabb2828, 0x89957f0, 0x19c5e} +{{0x95a4ec7, 0x5f98f1c, 0x8814615, 0x607b741, 0x2450f6e, 0xde85a2e, 0xe298ff1, 0xe988f33, 0xb329f10, 0x4bdaa10, 0xe9b0eb5, 0xabb2828, 0x89957f0, 0x19c5e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x46155f98f1c95a4e, 0x450f6e607b741881, 0x3e298ff1de85a2e2, 0xa10b329f10e988f3, 0xb2828e9b0eb54bda, 0x32a0c5e89957f0ab} +{{0x46155f98f1c95a4e, 0x450f6e607b741881, 0x3e298ff1de85a2e2, 0xa10b329f10e988f3, 0xb2828e9b0eb54bda, 0x32a0c5e89957f0ab}} #else -{0x2abf31e392b49d, 0x3981edd0620518, 0xef42d1712287b, 0xe988f33e298ff, 0x297b54216653e2, 0x2aeca0a3a6c3ad, 0x91062f44cabf8} +{{0x2abf31e392b49d, 0x3981edd0620518, 0xef42d1712287b, 0xe988f33e298ff, 0x297b54216653e2, 0x2aeca0a3a6c3ad, 0x91062f44cabf8}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xdd8, 0x13bc, 0x17ae, 0x83e, 0x10c6, 0x1a72, 0x270, 0x84, 0xb92, 0x431, 0x1fdf, 0x9cf, 0x2a9, 0x121d, 0x5d5, 0x1d9f, 0xa48, 0xec9, 0xcfc, 0x6ee, 0x1812, 0x66b, 0xed8, 0xf7, 0x117b, 0x1fb7, 0xc5, 0x1f00, 0x134f, 0x1f} +{{0xdd8, 0x13bc, 0x17ae, 0x83e, 0x10c6, 0x1a72, 0x270, 0x84, 0xb92, 0x431, 0x1fdf, 0x9cf, 0x2a9, 0x121d, 0x5d5, 0x1d9f, 0xa48, 0xec9, 0xcfc, 0x6ee, 0x1812, 0x66b, 0xed8, 0xf7, 0x117b, 0x1fb7, 0xc5, 0x1f00, 0x134f, 0x1f}} #elif RADIX == 32 -{0x9de3763, 0x907d7ae, 0xd39431, 0x4810827, 0xdf218ae, 0xaa539ff, 0x5d590e8, 0xa923b3e, 0xccfc764, 0x5e048dd, 0xeeed833, 0xdbc5ec1, 0xe000c5f, 0x39d3f} +{{0x9de3763, 0x907d7ae, 0xd39431, 0x4810827, 0xdf218ae, 0xaa539ff, 0x5d590e8, 0xa923b3e, 0xccfc764, 0x5e048dd, 0xeeed833, 0xdbc5ec1, 0xe000c5f, 0x39d3f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9431907d7ae9de37, 0xf218ae48108270d3, 0xe5d590e8aa539ffd, 0x8ddccfc764a923b3, 0xc5ec1eeed8335e04, 0x195cd3fe000c5fdb} +{{0x9431907d7ae9de37, 0xf218ae48108270d3, 0xe5d590e8aa539ffd, 0x8ddccfc764a923b3, 0xc5ec1eeed8335e04, 0x195cd3fe000c5fdb}} #else -{0x6320faf5d3bc6e, 0x39204209c34e50, 0x45529cffef90c5, 0x4a923b3e5d590e, 0x6bc091bb99f8ec, 0x76f17b07bbb60c, 0xcae69ff00062f} +{{0x6320faf5d3bc6e, 0x39204209c34e50, 0x45529cffef90c5, 0x4a923b3e5d590e, 0x6bc091bb99f8ec, 0x76f17b07bbb60c, 0xcae69ff00062f}} #endif #endif , #if 0 #elif RADIX == 16 -{0xf36, 0x2c8, 0x1ab4, 0x17c1, 0x10be, 0x1a20, 0x1baf, 0x3ce, 0x1088, 0xd75, 0x1e25, 0x10f8, 0x3d2, 0x1b8, 0x9c7, 0x168, 0x44c, 0x372, 0xc50, 0x1d9a, 0x1b99, 0xab9, 0x8af, 0x657, 0xe84, 0xe1d, 0x1675, 0x47, 0x157e, 0xc} +{{0xf36, 0x2c8, 0x1ab4, 0x17c1, 0x10be, 0x1a20, 0x1baf, 0x3ce, 0x1088, 0xd75, 0x1e25, 0x10f8, 0x3d2, 0x1b8, 0x9c7, 0x168, 0x44c, 0x372, 0xc50, 0x1d9a, 0x1b99, 0xab9, 0x8af, 0x657, 0xe84, 0xe1d, 0x1675, 0x47, 0x157e, 0xc}} #elif RADIX == 32 -{0x1643cd9, 0xaf83ab4, 0xfd1042f, 0x2079dba, 0x256bac2, 0xf4a1f1e, 0x9c70dc0, 0x11302d0, 0x4c501b9, 0xcee67b3, 0xae8af55, 0xeba10c, 0x8f6757, 0x245f8} +{{0x1643cd9, 0xaf83ab4, 0xfd1042f, 0x2079dba, 0x256bac2, 0xf4a1f1e, 0x9c70dc0, 0x11302d0, 0x4c501b9, 0xcee67b3, 0xae8af55, 0xeba10c, 0x8f6757, 0x245f8}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x42faf83ab41643c, 0x56bac22079dbafd1, 0x9c70dc0f4a1f1e2, 0x7b34c501b911302d, 0xba10cae8af55cee6, 0x373d5f808f67570e} +{{0x42faf83ab41643c, 0x56bac22079dbafd1, 0x9c70dc0f4a1f1e2, 0x7b34c501b911302d, 0xba10cae8af55cee6, 0x373d5f808f67570e}} #else -{0x5f5f075682c879, 0x881e76ebf4410, 0x7a50f8f12b5d6, 0x111302d09c70dc, 0x39dccf6698a037, 0x43ae8432ba2bd5, 0xb5eafc047b3ab} +{{0x5f5f075682c879, 0x881e76ebf4410, 0x7a50f8f12b5d6, 0x111302d09c70dc, 0x39dccf6698a037, 0x43ae8432ba2bd5, 0xb5eafc047b3ab}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x4b0, 0x31c, 0x92f, 0xf0d, 0xbc1, 0x1e89, 0x4ce, 0x1480, 0xdee, 0x504, 0x970, 0x16c3, 0xcb6, 0xae7, 0x1147, 0x8c, 0xc2a, 0x1ff9, 0x7d8, 0xfe9, 0x1fb1, 0x748, 0x998, 0xb85, 0x1a8e, 0x19c7, 0x5f7, 0x103c, 0x12a4, 0xe} +{{0x4b0, 0x31c, 0x92f, 0xf0d, 0xbc1, 0x1e89, 0x4ce, 0x1480, 0xdee, 0x504, 0x970, 0x16c3, 0xcb6, 0xae7, 0x1147, 0x8c, 0xc2a, 0x1ff9, 0x7d8, 0xfe9, 0x1fb1, 0x748, 0x998, 0xb85, 0x1a8e, 0x19c7, 0x5f7, 0x103c, 0x12a4, 0xe}} #elif RADIX == 32 -{0x18e12c1, 0x5e1a92f, 0xef44af0, 0xba9004c, 0x7028237, 0x2dad869, 0x147573b, 0xb0a8119, 0x27d8ffc, 0x47ec5fd, 0xa9983a, 0xe3ea397, 0x785f7c, 0x33a92} +{{0x18e12c1, 0x5e1a92f, 0xef44af0, 0xba9004c, 0x7028237, 0x2dad869, 0x147573b, 0xb0a8119, 0x27d8ffc, 0x47ec5fd, 0xa9983a, 0xe3ea397, 0x785f7c, 0x33a92}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x4af05e1a92f18e12, 0x28237ba9004cef4, 0x9147573b2dad8697, 0x5fd27d8ffcb0a811, 0xea3970a9983a47ec, 0x3134a920785f7ce3} +{{0x4af05e1a92f18e12, 0x28237ba9004cef4, 0x9147573b2dad8697, 0x5fd27d8ffcb0a811, 0xea3970a9983a47ec, 0x3134a920785f7ce3}} #else -{0x60bc3525e31c25, 0x5eea40133bd12b, 0x596d6c34b81411, 0x4b0a8119147573, 0x48fd8bfa4fb1ff, 0x38fa8e5c2a660e, 0x85a54903c2fbe} +{{0x60bc3525e31c25, 0x5eea40133bd12b, 0x596d6c34b81411, 0x4b0a8119147573, 0x48fd8bfa4fb1ff, 0x38fa8e5c2a660e, 0x85a54903c2fbe}} #endif #endif , #if 0 #elif RADIX == 16 -{0x15a9, 0x1ae1, 0x1dd2, 0xa61, 0x1259, 0xfad, 0xe49, 0x1f6d, 0xd9a, 0x1371, 0xee7, 0x1179, 0x1bcf, 0x876, 0x3ca, 0xf7c, 0x1192, 0x315, 0x916, 0x1aa5, 0x1ca9, 0x10cb, 0xe32, 0x18b9, 0xf58, 0x1932, 0x1cce, 0x1ba7, 0x1377, 0x6} +{{0x15a9, 0x1ae1, 0x1dd2, 0xa61, 0x1259, 0xfad, 0xe49, 0x1f6d, 0xd9a, 0x1371, 0xee7, 0x1179, 0x1bcf, 0x876, 0x3ca, 0xf7c, 0x1192, 0x315, 0x916, 0x1aa5, 0x1ca9, 0x10cb, 0xe32, 0x18b9, 0xf58, 0x1932, 0x1cce, 0x1ba7, 0x1377, 0x6}} #elif RADIX == 32 -{0xd70d6a4, 0x54c3dd2, 0x97d6c96, 0x6bedae4, 0xe79b8b6, 0xf3e2f2e, 0x3ca43b6, 0xc649ef8, 0xa91618a, 0x5f2a754, 0x72e3286, 0x993d631, 0x74fccec, 0x34ddf} +{{0xd70d6a4, 0x54c3dd2, 0x97d6c96, 0x6bedae4, 0xe79b8b6, 0xf3e2f2e, 0x3ca43b6, 0xc649ef8, 0xa91618a, 0x5f2a754, 0x72e3286, 0x993d631, 0x74fccec, 0x34ddf}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6c9654c3dd2d70d6, 0x79b8b66bedae497d, 0x83ca43b6f3e2f2ee, 0x754a91618ac649ef, 0x3d63172e32865f2a, 0x29d8ddf74fccec99} +{{0x6c9654c3dd2d70d6, 0x79b8b66bedae497d, 0x83ca43b6f3e2f2ee, 0x754a91618ac649ef, 0x3d63172e32865f2a, 0x29d8ddf74fccec99}} #else -{0x2ca987ba5ae1ad, 0x59afb6b925f5b2, 0x379f179773cdc5, 0x2c649ef83ca43b, 0x4be54ea9522c31, 0x264f58c5cb8ca1, 0x4ac6efba7e676} +{{0x2ca987ba5ae1ad, 0x59afb6b925f5b2, 0x379f179773cdc5, 0x2c649ef83ca43b, 0x4be54ea9522c31, 0x264f58c5cb8ca1, 0x4ac6efba7e676}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1f79, 0xcad, 0x18f2, 0x1ba7, 0x1d14, 0x1fc6, 0x197d, 0x522, 0xab, 0x7bd, 0x57b, 0x1fbf, 0x12, 0xb50, 0x425, 0x1aa3, 0x1c8e, 0x11cf, 0x1c1b, 0x1774, 0x3fc, 0x36a, 0x148f, 0x1fd3, 0x608, 0x1711, 0x1142, 0xcfa, 0xd43, 0xd} +{{0x1f79, 0xcad, 0x18f2, 0x1ba7, 0x1d14, 0x1fc6, 0x197d, 0x522, 0xab, 0x7bd, 0x57b, 0x1fbf, 0x12, 0xb50, 0x425, 0x1aa3, 0x1c8e, 0x11cf, 0x1c1b, 0x1774, 0x3fc, 0x36a, 0x148f, 0x1fd3, 0x608, 0x1711, 0x1142, 0xcfa, 0xd43, 0xd}} #elif RADIX == 32 -{0x656fde5, 0x374f8f2, 0xdfe3745, 0xaca4597, 0x7b3de82, 0x4bf7e5, 0x4255a80, 0xf23b546, 0x9c1b8e7, 0x50ff2ee, 0xa748f1b, 0x889823f, 0x9f5142b, 0x2a50d} +{{0x656fde5, 0x374f8f2, 0xdfe3745, 0xaca4597, 0x7b3de82, 0x4bf7e5, 0x4255a80, 0xf23b546, 0x9c1b8e7, 0x50ff2ee, 0xa748f1b, 0x889823f, 0x9f5142b, 0x2a50d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3745374f8f2656fd, 0xb3de82aca4597dfe, 0x64255a8004bf7e57, 0x2ee9c1b8e7f23b54, 0x9823fa748f1b50ff, 0x3a4f50d9f5142b88} +{{0x3745374f8f2656fd, 0xb3de82aca4597dfe, 0x64255a8004bf7e57, 0x2ee9c1b8e7f23b54, 0x9823fa748f1b50ff, 0x3a4f50d9f5142b88}} #else -{0xa6e9f1e4cadfb, 0xab29165f7f8dd, 0x25fbf2bd9ef4, 0x7f23b5464255a8, 0x6a1fe5dd38371c, 0x622608fe9d23c6, 0xce7a86cfa8a15} +{{0xa6e9f1e4cadfb, 0xab29165f7f8dd, 0x25fbf2bd9ef4, 0x7f23b5464255a8, 0x6a1fe5dd38371c, 0x622608fe9d23c6, 0xce7a86cfa8a15}} #endif #endif , #if 0 #elif RADIX == 16 -{0x14a, 0x1236, 0x839, 0xe2, 0xe2d, 0xe17, 0x1b8f, 0x18dd, 0xb20, 0xeb8, 0x1da9, 0xc53, 0x12e8, 0x146, 0x1b9b, 0x154, 0x1121, 0x1049, 0x105d, 0x631, 0xc9, 0xbe0, 0x8fa, 0xbc0, 0x34b, 0x178a, 0x77b, 0x2a7, 0x105b, 0x15} +{{0x14a, 0x1236, 0x839, 0xe2, 0xe2d, 0xe17, 0x1b8f, 0x18dd, 0xb20, 0xeb8, 0x1da9, 0xc53, 0x12e8, 0x146, 0x1b9b, 0x154, 0x1121, 0x1049, 0x105d, 0x631, 0xc9, 0xbe0, 0x8fa, 0xbc0, 0x34b, 0x178a, 0x77b, 0x2a7, 0x105b, 0x15}} #elif RADIX == 32 -{0x91b052a, 0x41c4839, 0xf70bb8b, 0x831bbb8, 0xa975c2c, 0xba18a7d, 0xb9b0a34, 0xc4842a9, 0x305d824, 0x324c6, 0x808fa5f, 0xc50d2d7, 0x54e77bb, 0x2a16c} +{{0x91b052a, 0x41c4839, 0xf70bb8b, 0x831bbb8, 0xa975c2c, 0xba18a7d, 0xb9b0a34, 0xc4842a9, 0x305d824, 0x324c6, 0x808fa5f, 0xc50d2d7, 0x54e77bb, 0x2a16c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbb8b41c483991b05, 0x975c2c831bbb8f70, 0x9b9b0a34ba18a7da, 0x4c6305d824c4842a, 0xd2d7808fa5f0032, 0xad416c54e77bbc5} +{{0xbb8b41c483991b05, 0x975c2c831bbb8f70, 0x9b9b0a34ba18a7da, 0x4c6305d824c4842a, 0xd2d7808fa5f0032, 0xad416c54e77bbc5}} #else -{0x1683890732360a, 0x320c6eee3dc2ee, 0x25d0c53ed4bae1, 0x4c4842a9b9b0a3, 0x6006498c60bb04, 0x71434b5e023e97, 0x56a0b62a73bdd} +{{0x1683890732360a, 0x320c6eee3dc2ee, 0x25d0c53ed4bae1, 0x4c4842a9b9b0a3, 0x6006498c60bb04, 0x71434b5e023e97, 0x56a0b62a73bdd}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1908,261 +1908,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x10c4, 0x1e1a, 0x1b68, 0xa71, 0xa1a, 0x1f60, 0xdda, 0xb59, 0x1bc0, 0x26c, 0x1325, 0x4cf, 0x1375, 0x10ef, 0x1702, 0x1eff, 0x171f, 0x467, 0xc1c, 0xf3b, 0xf74, 0x6fa, 0x177f, 0x911, 0x8bc, 0x16b7, 0x1022, 0xa5f, 0xa55, 0x9} +{{0x10c4, 0x1e1a, 0x1b68, 0xa71, 0xa1a, 0x1f60, 0xdda, 0xb59, 0x1bc0, 0x26c, 0x1325, 0x4cf, 0x1375, 0x10ef, 0x1702, 0x1eff, 0x171f, 0x467, 0xc1c, 0xf3b, 0xf74, 0x6fa, 0x177f, 0x911, 0x8bc, 0x16b7, 0x1022, 0xa5f, 0xa55, 0x9}} #elif RADIX == 32 -{0xf0d4311, 0x94e3b68, 0xafb0286, 0x16b2dd, 0x251366f, 0xdd499f3, 0x702877c, 0xdc7fdff, 0x6c1c233, 0xd3dd1e7, 0x2377f37, 0x5ba2f12, 0x4bf022b, 0x9955} +{{0xf0d4311, 0x94e3b68, 0xafb0286, 0x16b2dd, 0x251366f, 0xdd499f3, 0x702877c, 0xdc7fdff, 0x6c1c233, 0xd3dd1e7, 0x2377f37, 0x5ba2f12, 0x4bf022b, 0x9955}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x28694e3b68f0d43, 0x51366f016b2ddafb, 0xf702877cdd499f32, 0x1e76c1c233dc7fdf, 0xa2f122377f37d3dd, 0x45a9554bf022b5b} +{{0x28694e3b68f0d43, 0x51366f016b2ddafb, 0xf702877cdd499f32, 0x1e76c1c233dc7fdf, 0xa2f122377f37d3dd, 0x45a9554bf022b5b}} #else -{0xd29c76d1e1a86, 0x3c05acb76bec0a, 0x66ea4cf99289b3, 0x3dc7fdff702877, 0x7a7ba3ced83846, 0x56e8bc488ddfcd, 0x22d4aaa5f8115} +{{0xd29c76d1e1a86, 0x3c05acb76bec0a, 0x66ea4cf99289b3, 0x3dc7fdff702877, 0x7a7ba3ced83846, 0x56e8bc488ddfcd, 0x22d4aaa5f8115}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x14af, 0x786, 0xeda, 0x129c, 0x286, 0x17d8, 0xb76, 0x2d6, 0x6f0, 0x89b, 0x1cc9, 0x933, 0x1cdd, 0x143b, 0x1dc0, 0x1fbf, 0x1dc7, 0x119, 0x1b07, 0x3ce, 0x13dd, 0x19be, 0xddf, 0x244, 0x1a2f, 0x15ad, 0x1c08, 0xa97, 0xa95, 0x3} +{{0x14af, 0x786, 0xeda, 0x129c, 0x286, 0x17d8, 0xb76, 0x2d6, 0x6f0, 0x89b, 0x1cc9, 0x933, 0x1cdd, 0x143b, 0x1dc0, 0x1fbf, 0x1dc7, 0x119, 0x1b07, 0x3ce, 0x13dd, 0x19be, 0xddf, 0x244, 0x1a2f, 0x15ad, 0x1c08, 0xa97, 0xa95, 0x3}} #elif RADIX == 32 -{0x3c352bc, 0xa538eda, 0x6bec0a1, 0xc05acb7, 0xc944d9b, 0x375267c, 0xdc0a1df, 0xf71ff7f, 0xdb0708c, 0xf4f7479, 0x88ddfcd, 0xd6e8bc4, 0x52fc08a, 0x1aa55} +{{0x3c352bc, 0xa538eda, 0x6bec0a1, 0xc05acb7, 0xc944d9b, 0x375267c, 0xdc0a1df, 0xf71ff7f, 0xdb0708c, 0xf4f7479, 0x88ddfcd, 0xd6e8bc4, 0x52fc08a, 0x1aa55}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc0a1a538eda3c352, 0x944d9bc05acb76be, 0xfdc0a1df375267cc, 0x479db0708cf71ff7, 0xe8bc488ddfcdf4f7, 0x2fd6a5552fc08ad6} +{{0xc0a1a538eda3c352, 0x944d9bc05acb76be, 0xfdc0a1df375267cc, 0x479db0708cf71ff7, 0xe8bc488ddfcdf4f7, 0x2fd6a5552fc08ad6}} #else -{0x434a71db4786a5, 0x6f016b2ddafb02, 0x79ba933e64a26c, 0x4f71ff7fdc0a1d, 0x3e9ee8f3b60e11, 0x35ba2f122377f3, 0x7ab52aa97e045} +{{0x434a71db4786a5, 0x6f016b2ddafb02, 0x79ba933e64a26c, 0x4f71ff7fdc0a1d, 0x3e9ee8f3b60e11, 0x35ba2f122377f3, 0x7ab52aa97e045}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xd3b, 0x1cbd, 0x1177, 0x1087, 0x5d2, 0x1535, 0x1cb5, 0x1372, 0x158a, 0x931, 0x12da, 0x1b9d, 0x44e, 0xa00, 0xb71, 0xe8a, 0x1c57, 0x1a1, 0x5bb, 0x1180, 0x15f0, 0x1ca3, 0x119b, 0x16cc, 0xd3a, 0xaa7, 0xbc3, 0x9fc, 0xb07, 0x1a} +{{0xd3b, 0x1cbd, 0x1177, 0x1087, 0x5d2, 0x1535, 0x1cb5, 0x1372, 0x158a, 0x931, 0x12da, 0x1b9d, 0x44e, 0xa00, 0xb71, 0xe8a, 0x1c57, 0x1a1, 0x5bb, 0x1180, 0x15f0, 0x1ca3, 0x119b, 0x16cc, 0xd3a, 0xaa7, 0xbc3, 0x9fc, 0xb07, 0x1a}} #elif RADIX == 32 -{0xe5eb4ef, 0xa10f177, 0x5a9a974, 0x2a6e5cb, 0xda498d6, 0x13b73b2, 0xb715001, 0xf15dd14, 0x5bb0d0, 0x1d7c230, 0x9919be5, 0x53b4ead, 0x3f8bc35, 0xfc1d} +{{0xe5eb4ef, 0xa10f177, 0x5a9a974, 0x2a6e5cb, 0xda498d6, 0x13b73b2, 0xb715001, 0xf15dd14, 0x5bb0d0, 0x1d7c230, 0x9919be5, 0x53b4ead, 0x3f8bc35, 0xfc1d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa974a10f177e5eb4, 0xa498d62a6e5cb5a9, 0x4b71500113b73b2d, 0x23005bb0d0f15dd1, 0xb4ead9919be51d7c, 0x3cbec1d3f8bc3553} +{{0xa974a10f177e5eb4, 0xa498d62a6e5cb5a9, 0x4b71500113b73b2d, 0x23005bb0d0f15dd1, 0xb4ead9919be51d7c, 0x3cbec1d3f8bc3553}} #else -{0x69421e2efcbd69, 0x58a9b972d6a6a5, 0x89db9d96d24c6, 0xf15dd14b71500, 0x23af84600b761a, 0x54ed3ab66466f9, 0xe1f60e9fc5e1a} +{{0x69421e2efcbd69, 0x58a9b972d6a6a5, 0x89db9d96d24c6, 0xf15dd14b71500, 0x23af84600b761a, 0x54ed3ab66466f9, 0xe1f60e9fc5e1a}} #endif #endif , #if 0 #elif RADIX == 16 -{0x186, 0x245, 0xa48, 0x11da, 0x1354, 0x9fc, 0x168f, 0xff7, 0x1f2c, 0x6a2, 0x6fb, 0x980, 0x164f, 0xbb8, 0x49c, 0x1ad1, 0x145f, 0x80a, 0xf93, 0x2d8, 0x1846, 0x43, 0x5a9, 0x3a, 0x72e, 0x1e10, 0x741, 0x783, 0x967, 0x1a} +{{0x186, 0x245, 0xa48, 0x11da, 0x1354, 0x9fc, 0x168f, 0xff7, 0x1f2c, 0x6a2, 0x6fb, 0x980, 0x164f, 0xbb8, 0x49c, 0x1ad1, 0x145f, 0x80a, 0xf93, 0x2d8, 0x1846, 0x43, 0x5a9, 0x3a, 0x72e, 0x1e10, 0x741, 0x783, 0x967, 0x1a}} #elif RADIX == 32 -{0x122861b, 0x23b4a48, 0xf4fe4d5, 0xb1fef68, 0xfb3517c, 0x93d3006, 0x49c5dc5, 0x517f5a2, 0xf93405, 0x1e1185b, 0x745a902, 0x81cb80, 0xf06741f, 0xf59c} +{{0x122861b, 0x23b4a48, 0xf4fe4d5, 0xb1fef68, 0xfb3517c, 0x93d3006, 0x49c5dc5, 0x517f5a2, 0xf93405, 0x1e1185b, 0x745a902, 0x81cb80, 0xf06741f, 0xf59c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe4d523b4a4812286, 0xb3517cb1fef68f4f, 0x249c5dc593d3006f, 0x85b0f93405517f5a, 0x1cb80745a9021e11, 0x6ea59cf06741f08} +{{0xe4d523b4a4812286, 0xb3517cb1fef68f4f, 0x249c5dc593d3006f, 0x85b0f93405517f5a, 0x1cb80745a9021e11, 0x6ea59cf06741f08}} #else -{0x2a47694902450c, 0x72c7fbda3d3f93, 0x2c9e98037d9a8b, 0x5517f5a249c5dc, 0x43c230b61f2680, 0x42072e01d16a40, 0x3752ce7833a0f} +{{0x2a47694902450c, 0x72c7fbda3d3f93, 0x2c9e98037d9a8b, 0x5517f5a249c5dc, 0x43c230b61f2680, 0x42072e01d16a40, 0x3752ce7833a0f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1064, 0x8a7, 0x7c, 0x1876, 0xf16, 0x3a0, 0x124, 0x637, 0x11bf, 0x223, 0x6d, 0x58e, 0xcde, 0xaf, 0x99c, 0x1c62, 0xdcb, 0xe10, 0x7ba, 0x127f, 0x1a23, 0x69a, 0x7bd, 0x238, 0x455, 0x16ac, 0x1147, 0x12a, 0x14c1, 0x5} +{{0x1064, 0x8a7, 0x7c, 0x1876, 0xf16, 0x3a0, 0x124, 0x637, 0x11bf, 0x223, 0x6d, 0x58e, 0xcde, 0xaf, 0x99c, 0x1c62, 0xdcb, 0xe10, 0x7ba, 0x127f, 0x1a23, 0x69a, 0x7bd, 0x238, 0x455, 0x16ac, 0x1147, 0x12a, 0x14c1, 0x5}} #elif RADIX == 32 -{0x453c190, 0xb0ec07c, 0x41d03c5, 0xfcc6e12, 0x6d111c6, 0x378b1c0, 0x99c057b, 0x372f8c4, 0xe7ba708, 0xd688e4f, 0x707bd34, 0x5611544, 0x255147b, 0x2d304} +{{0x453c190, 0xb0ec07c, 0x41d03c5, 0xfcc6e12, 0x6d111c6, 0x378b1c0, 0x99c057b, 0x372f8c4, 0xe7ba708, 0xd688e4f, 0x707bd34, 0x5611544, 0x255147b, 0x2d304}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3c5b0ec07c453c1, 0xd111c6fcc6e1241d, 0x499c057b378b1c06, 0xe4fe7ba708372f8c, 0x11544707bd34d688, 0x24bd304255147b56} +{{0x3c5b0ec07c453c1, 0xd111c6fcc6e1241d, 0x499c057b378b1c06, 0xe4fe7ba708372f8c, 0x11544707bd34d688, 0x24bd304255147b56}} #else -{0xb61d80f88a783, 0x1bf31b8490740f, 0x59bc58e036888e, 0x372f8c499c057, 0x1ad11c9fcf74e1, 0x55845511c1ef4d, 0x21e98212a8a3d} +{{0xb61d80f88a783, 0x1bf31b8490740f, 0x59bc58e036888e, 0x372f8c499c057, 0x1ad11c9fcf74e1, 0x55845511c1ef4d, 0x21e98212a8a3d}} #endif #endif , #if 0 #elif RADIX == 16 -{0xaab, 0x60b, 0x8a0, 0x15d7, 0xbd8, 0x3ab, 0x1641, 0x1771, 0x134a, 0x17a, 0x785, 0x624, 0x1d, 0x1c3d, 0xcb1, 0xb5e, 0x23f, 0xf53, 0x879, 0x5e2, 0x903, 0xaff, 0xf72, 0xa2d, 0x7f4, 0xeb8, 0xd96, 0x1715, 0xffa, 0xa} +{{0xaab, 0x60b, 0x8a0, 0x15d7, 0xbd8, 0x3ab, 0x1641, 0x1771, 0x134a, 0x17a, 0x785, 0x624, 0x1d, 0x1c3d, 0xcb1, 0xb5e, 0x23f, 0xf53, 0x879, 0x5e2, 0x903, 0xaff, 0xf72, 0xa2d, 0x7f4, 0xeb8, 0xd96, 0x1715, 0xffa, 0xa}} #elif RADIX == 32 -{0x305aaad, 0x2bae8a0, 0x11d5af6, 0x2aee364, 0x850bd4d, 0x74c487, 0xcb1e1e8, 0x88fd6bc, 0x48797a9, 0xfa40cbc, 0x5af7257, 0x5c1fd14, 0xe2ad967, 0x12fea} +{{0x305aaad, 0x2bae8a0, 0x11d5af6, 0x2aee364, 0x850bd4d, 0x74c487, 0xcb1e1e8, 0x88fd6bc, 0x48797a9, 0xfa40cbc, 0x5af7257, 0x5c1fd14, 0xe2ad967, 0x12fea}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x5af62bae8a0305aa, 0x50bd4d2aee36411d, 0xccb1e1e8074c4878, 0xcbc48797a988fd6b, 0x1fd145af7257fa40, 0x2bfffeae2ad9675c} +{{0x5af62bae8a0305aa, 0x50bd4d2aee36411d, 0xccb1e1e8074c4878, 0xcbc48797a988fd6b, 0x1fd145af7257fa40, 0x2bfffeae2ad9675c}} #else -{0x6c575d14060b55, 0x34abb8d904756b, 0x403a6243c285ea, 0x188fd6bccb1e1e, 0x7f48197890f2f5, 0x5707f4516bdc95, 0x5bfff57156cb3} +{{0x6c575d14060b55, 0x34abb8d904756b, 0x403a6243c285ea, 0x188fd6bccb1e1e, 0x7f48197890f2f5, 0x5707f4516bdc95, 0x5bfff57156cb3}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x195c, 0x1d55, 0x99f, 0x11f, 0x106b, 0xab1, 0x3e7, 0x1e40, 0xa1e, 0xdf0, 0x1dd4, 0x5cd, 0xfc3, 0x1c99, 0xbfa, 0x1ead, 0x1f6, 0x12fa, 0x1465, 0xad7, 0x1a84, 0x18d8, 0x1b7f, 0x9fe, 0x14b1, 0x13b7, 0x189f, 0x12bc, 0xabc, 0x1f} +{{0x195c, 0x1d55, 0x99f, 0x11f, 0x106b, 0xab1, 0x3e7, 0x1e40, 0xa1e, 0xdf0, 0x1dd4, 0x5cd, 0xfc3, 0x1c99, 0xbfa, 0x1ead, 0x1f6, 0x12fa, 0x1465, 0xad7, 0x1a84, 0x18d8, 0x1b7f, 0x9fe, 0x14b1, 0x13b7, 0x189f, 0x12bc, 0xabc, 0x1f}} #elif RADIX == 32 -{0xeaae573, 0xc23e99f, 0x7558c1a, 0x7bc803e, 0xd46f828, 0xf0cb9bd, 0xbfae4cb, 0x7dbd5a, 0xf46597d, 0xc6a115a, 0xfdb7fc6, 0xdbd2c53, 0x57989f9, 0x37af2} +{{0xeaae573, 0xc23e99f, 0x7558c1a, 0x7bc803e, 0xd46f828, 0xf0cb9bd, 0xbfae4cb, 0x7dbd5a, 0xf46597d, 0xc6a115a, 0xfdb7fc6, 0xdbd2c53, 0x57989f9, 0x37af2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8c1ac23e99feaae5, 0x46f8287bc803e755, 0xabfae4cbf0cb9bdd, 0x15af46597d07dbd5, 0xd2c53fdb7fc6c6a1, 0x1d6aaf257989f9db} +{{0x8c1ac23e99feaae5, 0x46f8287bc803e755, 0xabfae4cbf0cb9bdd, 0x15af46597d07dbd5, 0xd2c53fdb7fc6c6a1, 0x1d6aaf257989f9db}} #else -{0x35847d33fd55ca, 0x21ef200f9d5630, 0x5f865cdeea37c1, 0x507dbd5abfae4c, 0x58d422b5e8cb2f, 0x76f4b14ff6dff1, 0xeb55792bcc4fc} +{{0x35847d33fd55ca, 0x21ef200f9d5630, 0x5f865cdeea37c1, 0x507dbd5abfae4c, 0x58d422b5e8cb2f, 0x76f4b14ff6dff1, 0xeb55792bcc4fc}} #endif #endif , #if 0 #elif RADIX == 16 -{0x970, 0x18b4, 0xc62, 0xf59, 0xf33, 0x6c0, 0x5ae, 0x86b, 0x1690, 0x17e1, 0x829, 0xab5, 0x169, 0x1115, 0x1b7e, 0x17fa, 0xcae, 0x1b7, 0xc7b, 0xb70, 0x11fc, 0x1417, 0x8b4, 0x1b78, 0x35a, 0x18e, 0x1e46, 0x15f0, 0xf64, 0x15} +{{0x970, 0x18b4, 0xc62, 0xf59, 0xf33, 0x6c0, 0x5ae, 0x86b, 0x1690, 0x17e1, 0x829, 0xab5, 0x169, 0x1115, 0x1b7e, 0x17fa, 0xcae, 0x1b7, 0xc7b, 0xb70, 0x11fc, 0x1417, 0x8b4, 0x1b78, 0x35a, 0x18e, 0x1e46, 0x15f0, 0xf64, 0x15}} #elif RADIX == 32 -{0xc5a25c2, 0xdeb2c62, 0xe3603cc, 0x410d65a, 0x29bf0da, 0x5a556a8, 0xb7e88a8, 0xb2baff5, 0xc7b0db, 0xbc7f16e, 0xf08b4a0, 0xc70d6b6, 0xbe1e460, 0x29d92} +{{0xc5a25c2, 0xdeb2c62, 0xe3603cc, 0x410d65a, 0x29bf0da, 0x5a556a8, 0xb7e88a8, 0xb2baff5, 0xc7b0db, 0xbc7f16e, 0xf08b4a0, 0xc70d6b6, 0xbe1e460, 0x29d92}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3ccdeb2c62c5a25, 0x9bf0da410d65ae36, 0x5b7e88a85a556a82, 0x16e0c7b0dbb2baff, 0xd6b6f08b4a0bc7f, 0x316bd92be1e460c7} +{{0x3ccdeb2c62c5a25, 0x9bf0da410d65ae36, 0x5b7e88a85a556a82, 0x16e0c7b0dbb2baff, 0xd6b6f08b4a0bc7f, 0x316bd92be1e460c7}} #else -{0x19bd658c58b44b, 0x69043596b8d80f, 0x42d2ab5414df86, 0x3b2baff5b7e88a, 0x178fe2dc18f61b, 0x31c35adbc22d28, 0x875ec95f0f230} +{{0x19bd658c58b44b, 0x69043596b8d80f, 0x42d2ab5414df86, 0x3b2baff5b7e88a, 0x178fe2dc18f61b, 0x31c35adbc22d28, 0x875ec95f0f230}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2384,261 +2384,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x187f, 0x1c7d, 0x163a, 0x54d, 0x197f, 0x1a5d, 0x1833, 0x823, 0x11ae, 0x225, 0xd6d, 0x1627, 0xd6c, 0x1625, 0xa36, 0xf64, 0xcfa, 0x327, 0x188d, 0xfab, 0x56b, 0x91c, 0x1cc5, 0x4fe, 0x2ae, 0x181a, 0x195, 0x1288, 0x10fc, 0x3} +{{0x187f, 0x1c7d, 0x163a, 0x54d, 0x197f, 0x1a5d, 0x1833, 0x823, 0x11ae, 0x225, 0xd6d, 0x1627, 0xd6c, 0x1625, 0xa36, 0xf64, 0xcfa, 0x327, 0x188d, 0xfab, 0x56b, 0x91c, 0x1cc5, 0x4fe, 0x2ae, 0x181a, 0x195, 0x1288, 0x10fc, 0x3}} #elif RADIX == 32 -{0xe3ee1fc, 0xca9b63a, 0x3d2ee5f, 0xb904783, 0x6d112c6, 0x5b2c4ed, 0xa36b12b, 0xb3e9ec8, 0x788d193, 0xe15adf5, 0xfdcc548, 0xd0ab89, 0x510195c, 0x1c3f2} +{{0xe3ee1fc, 0xca9b63a, 0x3d2ee5f, 0xb904783, 0x6d112c6, 0x5b2c4ed, 0xa36b12b, 0xb3e9ec8, 0x788d193, 0xe15adf5, 0xfdcc548, 0xd0ab89, 0x510195c, 0x1c3f2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xee5fca9b63ae3ee1, 0xd112c6b9047833d2, 0x8a36b12b5b2c4ed6, 0xdf5788d193b3e9ec, 0xab89fdcc548e15a, 0x40183f2510195c0d} +{{0xee5fca9b63ae3ee1, 0xd112c6b9047833d2, 0x8a36b12b5b2c4ed6, 0xdf5788d193b3e9ec, 0xab89fdcc548e15a, 0x40183f2510195c0d}} #else -{0x3f9536c75c7dc3, 0x1ae411e0cf4bb9, 0x5ad96276b68896, 0x3b3e9ec8a36b12, 0x1c2b5beaf11a32, 0x342ae27f73152, 0xfcc1f92880cae} +{{0x3f9536c75c7dc3, 0x1ae411e0cf4bb9, 0x5ad96276b68896, 0x3b3e9ec8a36b12, 0x1c2b5beaf11a32, 0x342ae27f73152, 0xfcc1f92880cae}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0xe9d, 0x171f, 0xd8e, 0x1953, 0xe5f, 0x1e97, 0x1e0c, 0x1208, 0xc6b, 0x889, 0x1b5b, 0x589, 0xb5b, 0x1589, 0x28d, 0x13d9, 0x1b3e, 0x8c9, 0x1e23, 0x1bea, 0x15a, 0xa47, 0x1731, 0x113f, 0x10ab, 0xe06, 0x65, 0x4a2, 0x83f, 0x1a} +{{0xe9d, 0x171f, 0xd8e, 0x1953, 0xe5f, 0x1e97, 0x1e0c, 0x1208, 0xc6b, 0x889, 0x1b5b, 0x589, 0xb5b, 0x1589, 0x28d, 0x13d9, 0x1b3e, 0x8c9, 0x1e23, 0x1bea, 0x15a, 0xa47, 0x1731, 0x113f, 0x10ab, 0xe06, 0x65, 0x4a2, 0x83f, 0x1a}} #elif RADIX == 32 -{0xb8fba77, 0xf2a6d8e, 0xcf4bb97, 0xae411e0, 0x5b444b1, 0xd6cb13b, 0x28dac4a, 0xecfa7b2, 0x5e23464, 0x3856b7d, 0x7f73152, 0x342ae2, 0x9440657, 0xf0fc} +{{0xb8fba77, 0xf2a6d8e, 0xcf4bb97, 0xae411e0, 0x5b444b1, 0xd6cb13b, 0x28dac4a, 0xecfa7b2, 0x5e23464, 0x3856b7d, 0x7f73152, 0x342ae2, 0x9440657, 0xf0fc}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbb97f2a6d8eb8fba, 0xb444b1ae411e0cf4, 0x228dac4ad6cb13b5, 0xb7d5e23464ecfa7b, 0x42ae27f731523856, 0x1e460fc944065703} +{{0xbb97f2a6d8eb8fba, 0xb444b1ae411e0cf4, 0x228dac4ad6cb13b5, 0xb7d5e23464ecfa7b, 0x42ae27f731523856, 0x1e460fc944065703}} #else -{0x2fe54db1d71f74, 0x46b9047833d2ee, 0x56b6589dada225, 0x4ecfa7b228dac4, 0x470ad6fabc468c, 0x40d0ab89fdcc54, 0xf2307e4a2032b} +{{0x2fe54db1d71f74, 0x46b9047833d2ee, 0x56b6589dada225, 0x4ecfa7b228dac4, 0x470ad6fabc468c, 0x40d0ab89fdcc54, 0xf2307e4a2032b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x237, 0xee8, 0xd8c, 0xafb, 0x18cd, 0x1ce1, 0x162a, 0x11c9, 0x1bbc, 0x1415, 0x1c35, 0x1d0c, 0x1104, 0x1558, 0x9d, 0xb17, 0x1097, 0x16d2, 0xc02, 0x1573, 0x1c5f, 0x1bec, 0x1a73, 0x1dfe, 0x1923, 0x18d6, 0x221, 0x11ee, 0x1581, 0xb} +{{0x237, 0xee8, 0xd8c, 0xafb, 0x18cd, 0x1ce1, 0x162a, 0x11c9, 0x1bbc, 0x1415, 0x1c35, 0x1d0c, 0x1104, 0x1558, 0x9d, 0xb17, 0x1097, 0x16d2, 0xc02, 0x1573, 0x1c5f, 0x1bec, 0x1a73, 0x1dfe, 0x1923, 0x18d6, 0x221, 0x11ee, 0x1581, 0xb}} #elif RADIX == 32 -{0x77408dd, 0x55f6d8c, 0xae70e33, 0xf239362, 0x35a0aee, 0x413a19c, 0x9daac4, 0x425d62e, 0x6c02b69, 0x6717eae, 0xfda73df, 0x6b648fb, 0x3dc221c, 0x1c606} +{{0x77408dd, 0x55f6d8c, 0xae70e33, 0xf239362, 0x35a0aee, 0x413a19c, 0x9daac4, 0x425d62e, 0x6c02b69, 0x6717eae, 0xfda73df, 0x6b648fb, 0x3dc221c, 0x1c606}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe3355f6d8c77408, 0x5a0aeef239362ae7, 0xe09daac4413a19c3, 0xeae6c02b69425d62, 0x648fbfda73df6717, 0x38396063dc221c6b} +{{0xe3355f6d8c77408, 0x5a0aeef239362ae7, 0xe09daac4413a19c3, 0xeae6c02b69425d62, 0x648fbfda73df6717, 0x38396063dc221c6b}} #else -{0x66abedb18ee811, 0x3bc8e4d8ab9c38, 0x2209d0ce1ad057, 0x1425d62e09daac, 0x6ce2fd5cd8056d, 0x1ad923eff69cf7, 0xbdcb031ee110e} +{{0x66abedb18ee811, 0x3bc8e4d8ab9c38, 0x2209d0ce1ad057, 0x1425d62e09daac, 0x6ce2fd5cd8056d, 0x1ad923eff69cf7, 0xbdcb031ee110e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x16a4, 0x11f0, 0x446, 0x1b2b, 0x129e, 0x1b52, 0x25, 0x18e4, 0x15d7, 0x545, 0x1502, 0x3af, 0x1b45, 0xff3, 0x1423, 0x1574, 0x1c5a, 0xff0, 0x1663, 0x114b, 0xc99, 0x1c89, 0x11f0, 0x15fd, 0x17a1, 0x14dd, 0x17f7, 0x1451, 0x5af, 0x17} +{{0x16a4, 0x11f0, 0x446, 0x1b2b, 0x129e, 0x1b52, 0x25, 0x18e4, 0x15d7, 0x545, 0x1502, 0x3af, 0x1b45, 0xff3, 0x1423, 0x1574, 0x1c5a, 0xff0, 0x1663, 0x114b, 0xc99, 0x1c89, 0x11f0, 0x15fd, 0x17a1, 0x14dd, 0x17f7, 0x1451, 0x5af, 0x17}} #elif RADIX == 32 -{0x8f85a92, 0xb656446, 0x5da94a7, 0x5f1c802, 0x22a2d7, 0xd1475f5, 0x4237f9e, 0x716aae9, 0x76637f8, 0x4b26629, 0xfb1f0e4, 0x6ede86b, 0x8a37f7a, 0x376be} +{{0x8f85a92, 0xb656446, 0x5da94a7, 0x5f1c802, 0x22a2d7, 0xd1475f5, 0x4237f9e, 0x716aae9, 0x76637f8, 0x4b26629, 0xfb1f0e4, 0x6ede86b, 0x8a37f7a, 0x376be}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x94a7b6564468f85a, 0x22a2d75f1c8025da, 0x94237f9ed1475f50, 0x62976637f8716aae, 0xde86bfb1f0e44b26, 0x25496be8a37f7a6e} +{{0x94a7b6564468f85a, 0x22a2d75f1c8025da, 0x94237f9ed1475f50, 0x62976637f8716aae, 0xde86bfb1f0e44b26, 0x25496be8a37f7a6e}} #else -{0x4f6cac88d1f0b5, 0x5d7c7200976a52, 0x768a3afa811516, 0x716aae94237f9, 0x964cc52ecc6ff, 0x1bb7a1afec7c39, 0x264b5f451bfbd} +{{0x4f6cac88d1f0b5, 0x5d7c7200976a52, 0x768a3afa811516, 0x716aae94237f9, 0x964cc52ecc6ff, 0x1bb7a1afec7c39, 0x264b5f451bfbd}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xc89, 0x16f8, 0x1bcf, 0x14c7, 0x1c81, 0x1c37, 0x3b1, 0xb00, 0x5e, 0xdb5, 0x920, 0x14db, 0x41, 0x1bd7, 0x159d, 0x1889, 0x1318, 0x95d, 0x13d5, 0x46b, 0x18bd, 0x1bf1, 0x1bf6, 0x1ba2, 0x2d6, 0x1b06, 0x17c1, 0x1a40, 0x1f02, 0x11} +{{0xc89, 0x16f8, 0x1bcf, 0x14c7, 0x1c81, 0x1c37, 0x3b1, 0xb00, 0x5e, 0xdb5, 0x920, 0x14db, 0x41, 0x1bd7, 0x159d, 0x1889, 0x1318, 0x95d, 0x13d5, 0x46b, 0x18bd, 0x1bf1, 0x1bf6, 0x1ba2, 0x2d6, 0x1b06, 0x17c1, 0x1a40, 0x1f02, 0x11}} #elif RADIX == 32 -{0xb7c3226, 0x698fbcf, 0x1e1bf20, 0x796003b, 0x206da81, 0x1069b69, 0x59ddeb8, 0xcc63113, 0x73d54ae, 0x8e2f48d, 0x45bf6df, 0x830b5b7, 0x4817c1d, 0xdc0b} +{{0xb7c3226, 0x698fbcf, 0x1e1bf20, 0x796003b, 0x206da81, 0x1069b69, 0x59ddeb8, 0xcc63113, 0x73d54ae, 0x8e2f48d, 0x45bf6df, 0x830b5b7, 0x4817c1d, 0xdc0b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbf20698fbcfb7c32, 0x6da81796003b1e1, 0x359ddeb81069b692, 0x48d73d54aecc6311, 0xb5b745bf6df8e2f, 0x9b3c0b4817c1d83} +{{0xbf20698fbcfb7c32, 0x6da81796003b1e1, 0x359ddeb81069b692, 0x48d73d54aecc6311, 0xb5b745bf6df8e2f, 0x9b3c0b4817c1d83}} #else -{0x40d31f79f6f864, 0x5e5800ec786fc, 0x40834db49036d4, 0x6cc6311359ddeb, 0x71c5e91ae7aa95, 0x60c2d6dd16fdb7, 0x4d9e05a40be0e} +{{0x40d31f79f6f864, 0x5e5800ec786fc, 0x40834db49036d4, 0x6cc6311359ddeb, 0x71c5e91ae7aa95, 0x60c2d6dd16fdb7, 0x4d9e05a40be0e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x8c0, 0x125b, 0x1d1c, 0x8a8, 0x1c41, 0xbb7, 0x15bf, 0x15ec, 0x959, 0x1fc5, 0xc2, 0x2ff, 0x1dd2, 0x1c02, 0x9db, 0x139d, 0x9a, 0x1654, 0xce7, 0xf6d, 0x13e5, 0x19be, 0x1f28, 0x161c, 0xe9f, 0x940, 0x77d, 0x162c, 0x385, 0x4} +{{0x8c0, 0x125b, 0x1d1c, 0x8a8, 0x1c41, 0xbb7, 0x15bf, 0x15ec, 0x959, 0x1fc5, 0xc2, 0x2ff, 0x1dd2, 0x1c02, 0x9db, 0x139d, 0x9a, 0x1654, 0xce7, 0xf6d, 0x13e5, 0x19be, 0x1f28, 0x161c, 0xe9f, 0x940, 0x77d, 0x162c, 0x385, 0x4}} #elif RADIX == 32 -{0x92da300, 0x5151d1c, 0xf5dbf10, 0x66bd95b, 0xc2fe2a5, 0x7485fe0, 0x9dbe017, 0x26a73a, 0xace7b2a, 0xf4f95ed, 0x39f28cd, 0xa03a7ec, 0xc5877d4, 0x20e16} +{{0x92da300, 0x5151d1c, 0xf5dbf10, 0x66bd95b, 0xc2fe2a5, 0x7485fe0, 0x9dbe017, 0x26a73a, 0xace7b2a, 0xf4f95ed, 0x39f28cd, 0xa03a7ec, 0xc5877d4, 0x20e16}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbf105151d1c92da3, 0x2fe2a566bd95bf5d, 0xa9dbe0177485fe0c, 0x5edace7b2a026a73, 0x3a7ec39f28cdf4f9, 0x20e16c5877d4a0} +{{0xbf105151d1c92da3, 0x2fe2a566bd95bf5d, 0xa9dbe0177485fe0c, 0x5edace7b2a026a73, 0x3a7ec39f28cdf4f9, 0x20e16c5877d4a0}} #else -{0x20a2a3a3925b46, 0x159af656fd76fc, 0x3ba42ff0617f15, 0x2026a73a9dbe01, 0x3e9f2bdb59cf65, 0x280e9fb0e7ca33, 0x1070b62c3bea} +{{0x20a2a3a3925b46, 0x159af656fd76fc, 0x3ba42ff0617f15, 0x2026a73a9dbe01, 0x3e9f2bdb59cf65, 0x280e9fb0e7ca33, 0x1070b62c3bea}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xd30, 0x670, 0x165f, 0x18f8, 0x3fe, 0x11e5, 0x663, 0x270, 0x18cb, 0x42b, 0x11c3, 0xe0a, 0x4fc, 0x18ad, 0xfd0, 0x3fa, 0x1957, 0x1544, 0x941, 0x181e, 0x661, 0x18b9, 0x74a, 0xa70, 0x866, 0x11f8, 0xd20, 0xae3, 0x19b8, 0xb} +{{0xd30, 0x670, 0x165f, 0x18f8, 0x3fe, 0x11e5, 0x663, 0x270, 0x18cb, 0x42b, 0x11c3, 0xe0a, 0x4fc, 0x18ad, 0xfd0, 0x3fa, 0x1957, 0x1544, 0x941, 0x181e, 0x661, 0x18b9, 0x74a, 0xa70, 0x866, 0x11f8, 0xd20, 0xae3, 0x19b8, 0xb}} #elif RADIX == 32 -{0x33834c1, 0xb1f165f, 0x38f28ff, 0x2c4e066, 0xc3215e3, 0x3f1c151, 0xfd0c569, 0x655c7f4, 0xc941aa2, 0xc998703, 0xe074ac5, 0xfc21994, 0x5c6d208, 0x1d6e1} +{{0x33834c1, 0xb1f165f, 0x38f28ff, 0x2c4e066, 0xc3215e3, 0x3f1c151, 0xfd0c569, 0x655c7f4, 0xc941aa2, 0xc998703, 0xe074ac5, 0xfc21994, 0x5c6d208, 0x1d6e1}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x28ffb1f165f33834, 0x3215e32c4e06638f, 0x4fd0c5693f1c151c, 0x703c941aa2655c7f, 0x21994e074ac5c998, 0x311e6e15c6d208fc} +{{0x28ffb1f165f33834, 0x3215e32c4e06638f, 0x4fd0c5693f1c151c, 0x703c941aa2655c7f, 0x21994e074ac5c998, 0x311e6e15c6d208fc}} #else -{0x7f63e2cbe67069, 0xcb138198e3ca3, 0x49f8e0a8e190af, 0x2655c7f4fd0c56, 0x39330e07928354, 0x3f08665381d2b1, 0x84f370ae36904} +{{0x7f63e2cbe67069, 0xcb138198e3ca3, 0x49f8e0a8e190af, 0x2655c7f4fd0c56, 0x39330e07928354, 0x3f08665381d2b1, 0x84f370ae36904}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1f47, 0x9e3, 0x5d, 0xdc6, 0x18a3, 0x1c99, 0x1253, 0x179f, 0x16b, 0x1b87, 0x27a, 0x9f8, 0x1064, 0x9ed, 0xe66, 0x47d, 0x4e9, 0x1805, 0x1349, 0x40, 0x1bbd, 0x7f6, 0x1c57, 0x1f9f, 0x11e9, 0x14cf, 0xe61, 0x1892, 0x833, 0x10} +{{0x1f47, 0x9e3, 0x5d, 0xdc6, 0x18a3, 0x1c99, 0x1253, 0x179f, 0x16b, 0x1b87, 0x27a, 0x9f8, 0x1064, 0x9ed, 0xe66, 0x47d, 0x4e9, 0x1805, 0x1349, 0x40, 0x1bbd, 0x7f6, 0x1c57, 0x1f9f, 0x11e9, 0x14cf, 0xe61, 0x1892, 0x833, 0x10}} #elif RADIX == 32 -{0x4f1fd1e, 0xdb8c05d, 0x3e4ce28, 0xaef3f25, 0x7adc385, 0x1913f02, 0xe664f6c, 0x93a48fa, 0x1349c02, 0xb6ef408, 0x3fc573f, 0x67c7a7f, 0x124e61a, 0xcf} +{{0x4f1fd1e, 0xdb8c05d, 0x3e4ce28, 0xaef3f25, 0x7adc385, 0x1913f02, 0xe664f6c, 0x93a48fa, 0x1349c02, 0xb6ef408, 0x3fc573f, 0x67c7a7f, 0x124e61a, 0xcf}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xce28db8c05d4f1fd, 0xadc385aef3f253e4, 0xae664f6c1913f027, 0x4081349c0293a48f, 0xc7a7f3fc573fb6ef, 0x79e0cf124e61a67} +{{0xce28db8c05d4f1fd, 0xadc385aef3f253e4, 0xae664f6c1913f027, 0x4081349c0293a48f, 0xc7a7f3fc573fb6ef, 0x79e0cf124e61a67}} #else -{0x51b7180ba9e3fa, 0x16bbcfc94f9338, 0x60c89f813d6e1c, 0x293a48fae664f6, 0x76dde810269380, 0x19f1e9fcff15cf, 0x3cf067892730d} +{{0x51b7180ba9e3fa, 0x16bbcfc94f9338, 0x60c89f813d6e1c, 0x293a48fae664f6, 0x76dde810269380, 0x19f1e9fcff15cf, 0x3cf067892730d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2860,261 +2860,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x17bf, 0xbb8, 0x485, 0x1296, 0x1b32, 0xe0b, 0x13f9, 0x15f5, 0x1a2b, 0xff6, 0xf53, 0x70a, 0x36c, 0x40f, 0xa89, 0xca6, 0x37e, 0x1488, 0x1796, 0x1be2, 0x1e0f, 0xf10, 0x1628, 0x1a20, 0x1ee7, 0x1e53, 0x48c, 0x94, 0xd53, 0xd} +{{0x17bf, 0xbb8, 0x485, 0x1296, 0x1b32, 0xe0b, 0x13f9, 0x15f5, 0x1a2b, 0xff6, 0xf53, 0x70a, 0x36c, 0x40f, 0xa89, 0xca6, 0x37e, 0x1488, 0x1796, 0x1be2, 0x1e0f, 0xf10, 0x1628, 0x1a20, 0x1ee7, 0x1e53, 0x48c, 0x94, 0xd53, 0xd}} #elif RADIX == 32 -{0x5dc5efd, 0xa52c485, 0x9705ecc, 0xaebeb3f, 0x537fb68, 0xdb0e14f, 0xa892078, 0xdf994c, 0x5796a44, 0x8783f7c, 0x4162878, 0x29fb9f4, 0x12848cf, 0x2a54c} +{{0x5dc5efd, 0xa52c485, 0x9705ecc, 0xaebeb3f, 0x537fb68, 0xdb0e14f, 0xa892078, 0xdf994c, 0x5796a44, 0x8783f7c, 0x4162878, 0x29fb9f4, 0x12848cf, 0x2a54c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x5ecca52c4855dc5e, 0x37fb68aebeb3f970, 0xca892078db0e14f5, 0xf7c5796a440df994, 0xfb9f441628788783, 0x406754c12848cf29} +{{0x5ecca52c4855dc5e, 0x37fb68aebeb3f970, 0xca892078db0e14f5, 0xf7c5796a440df994, 0xfb9f441628788783, 0x406754c12848cf29}} #else -{0x194a5890abb8bd, 0x22bafacfe5c17b, 0x46d870a7a9bfdb, 0x40df994ca89207, 0x10f07ef8af2d48, 0x4a7ee7d1058a1e, 0xff3aa60942467} +{{0x194a5890abb8bd, 0x22bafacfe5c17b, 0x46d870a7a9bfdb, 0x40df994ca89207, 0x10f07ef8af2d48, 0x4a7ee7d1058a1e, 0xff3aa60942467}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x66d, 0xaee, 0x1121, 0x14a5, 0x1ecc, 0xb82, 0xcfe, 0x1d7d, 0x168a, 0x1bfd, 0x13d4, 0x1c2, 0x18db, 0x903, 0x12a2, 0x1329, 0xdf, 0x1522, 0x15e5, 0x1ef8, 0x783, 0x3c4, 0x58a, 0x1e88, 0x1fb9, 0x794, 0x123, 0x1825, 0x1754, 0x1c} +{{0x66d, 0xaee, 0x1121, 0x14a5, 0x1ecc, 0xb82, 0xcfe, 0x1d7d, 0x168a, 0x1bfd, 0x13d4, 0x1c2, 0x18db, 0x903, 0x12a2, 0x1329, 0xdf, 0x1522, 0x15e5, 0x1ef8, 0x783, 0x3c4, 0x58a, 0x1e88, 0x1fb9, 0x794, 0x123, 0x1825, 0x1754, 0x1c}} #elif RADIX == 32 -{0x57719b7, 0x294b121, 0xe5c17b3, 0x2bafacf, 0xd4dfeda, 0x36c3853, 0x2a2481e, 0x37e653, 0x15e5a91, 0x21e0fdf, 0x1058a1e, 0xca7ee7d, 0x4a1233, 0x22d53} +{{0x57719b7, 0x294b121, 0xe5c17b3, 0x2bafacf, 0xd4dfeda, 0x36c3853, 0x2a2481e, 0x37e653, 0x15e5a91, 0x21e0fdf, 0x1058a1e, 0xca7ee7d, 0x4a1233, 0x22d53}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x17b3294b12157719, 0x4dfeda2bafacfe5c, 0x32a2481e36c3853d, 0xfdf15e5a91037e65, 0x7ee7d1058a1e21e0, 0x2e99d5304a1233ca} +{{0x17b3294b12157719, 0x4dfeda2bafacfe5c, 0x32a2481e36c3853d, 0xfdf15e5a91037e65, 0x7ee7d1058a1e21e0, 0x2e99d5304a1233ca}} #else -{0x665296242aee33, 0x68aebeb3f9705e, 0x71b61c29ea6ff6, 0x1037e6532a2481, 0x443c1fbe2bcb52, 0x729fb9f4416287, 0x70cea98250919} +{{0x665296242aee33, 0x68aebeb3f9705e, 0x71b61c29ea6ff6, 0x1037e6532a2481, 0x443c1fbe2bcb52, 0x729fb9f4416287, 0x70cea98250919}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x3e9, 0x9f6, 0x1c50, 0x27e, 0xa85, 0x39c, 0xa7b, 0x177c, 0xdfc, 0x77e, 0x1490, 0x11b8, 0xd2b, 0x17dc, 0xd7c, 0x16a0, 0xe21, 0xb86, 0x15bb, 0x844, 0x146c, 0xe51, 0xc6d, 0x143d, 0x1d2b, 0x1715, 0x18bb, 0xdc8, 0x55d, 0x16} +{{0x3e9, 0x9f6, 0x1c50, 0x27e, 0xa85, 0x39c, 0xa7b, 0x177c, 0xdfc, 0x77e, 0x1490, 0x11b8, 0xd2b, 0x17dc, 0xd7c, 0x16a0, 0xe21, 0xb86, 0x15bb, 0x844, 0x146c, 0xe51, 0xc6d, 0x143d, 0x1d2b, 0x1715, 0x18bb, 0xdc8, 0x55d, 0x16}} #elif RADIX == 32 -{0x4fb0fa6, 0x44fdc50, 0xb1ce2a1, 0xf2ef8a7, 0x903bf37, 0x4ae3714, 0xd7cbee3, 0x3886d40, 0x95bb5c3, 0x8d1b108, 0x7ac6d72, 0x8af4ae8, 0xb918bbb, 0x2f575} +{{0x4fb0fa6, 0x44fdc50, 0xb1ce2a1, 0xf2ef8a7, 0x903bf37, 0x4ae3714, 0xd7cbee3, 0x3886d40, 0x95bb5c3, 0x8d1b108, 0x7ac6d72, 0x8af4ae8, 0xb918bbb, 0x2f575}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe2a144fdc504fb0f, 0x3bf37f2ef8a7b1c, 0xd7cbee34ae37149, 0x10895bb5c33886d4, 0xf4ae87ac6d728d1b, 0x2a55575b918bbb8a} +{{0xe2a144fdc504fb0f, 0x3bf37f2ef8a7b1c, 0xd7cbee34ae37149, 0x10895bb5c33886d4, 0xf4ae87ac6d728d1b, 0x2a55575b918bbb8a}} #else -{0x4289fb8a09f61f, 0x5fcbbe29ec738a, 0x1a571b8a481df9, 0x33886d40d7cbee, 0x51a362112b76b8, 0x62bd2ba1eb1b5c, 0x4eaabadc8c5dd} +{{0x4289fb8a09f61f, 0x5fcbbe29ec738a, 0x1a571b8a481df9, 0x33886d40d7cbee, 0x51a362112b76b8, 0x62bd2ba1eb1b5c, 0x4eaabadc8c5dd}} #endif #endif , #if 0 #elif RADIX == 16 -{0x793, 0x1095, 0x8d0, 0x676, 0x2be, 0x1a9d, 0x6d6, 0x1d0, 0x112a, 0x18e1, 0x1741, 0xc68, 0x156d, 0x113f, 0x181e, 0x201, 0xcd7, 0xbb7, 0xdb, 0x64c, 0x181e, 0x63, 0x965, 0xf2, 0xc95, 0x50d, 0x1ec2, 0x1c03, 0x5b4, 0x1b} +{{0x793, 0x1095, 0x8d0, 0x676, 0x2be, 0x1a9d, 0x6d6, 0x1d0, 0x112a, 0x18e1, 0x1741, 0xc68, 0x156d, 0x113f, 0x181e, 0x201, 0xcd7, 0xbb7, 0xdb, 0x64c, 0x181e, 0x63, 0x965, 0xf2, 0xc95, 0x50d, 0x1ec2, 0x1c03, 0x5b4, 0x1b}} #elif RADIX == 32 -{0x84a9e4f, 0x8cec8d0, 0x6d4e8af, 0xa83a06d, 0x41c70c4, 0x5b58d17, 0x81e89fd, 0xb35c403, 0x80db5db, 0x1e078c9, 0xe496503, 0x86b2541, 0x807ec22, 0x166d3} +{{0x84a9e4f, 0x8cec8d0, 0x6d4e8af, 0xa83a06d, 0x41c70c4, 0x5b58d17, 0x81e89fd, 0xb35c403, 0x80db5db, 0x1e078c9, 0xe496503, 0x86b2541, 0x807ec22, 0x166d3}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe8af8cec8d084a9e, 0x1c70c4a83a06d6d4, 0x381e89fd5b58d174, 0x8c980db5dbb35c40, 0xb2541e4965031e07, 0x14256d3807ec2286} +{{0xe8af8cec8d084a9e, 0x1c70c4a83a06d6d4, 0x381e89fd5b58d174, 0x8c980db5dbb35c40, 0xb2541e4965031e07, 0x14256d3807ec2286}} #else -{0x5f19d91a10953c, 0x12a0e81b5b53a2, 0x6adac68ba0e386, 0x3b35c40381e89f, 0x63c0f19301b6bb, 0x21ac9507925940, 0xa12b69c03f611} +{{0x5f19d91a10953c, 0x12a0e81b5b53a2, 0x6adac68ba0e386, 0x3b35c40381e89f, 0x63c0f19301b6bb, 0x21ac9507925940, 0xa12b69c03f611}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x71d, 0xf0e, 0x506, 0x1aec, 0x3f6, 0x2c1, 0x17dd, 0x43f, 0x1552, 0x1488, 0x10c3, 0x5ea, 0xfd4, 0x634, 0x1eb1, 0x1711, 0x1424, 0xeb1, 0xfe1, 0xa0a, 0x165f, 0x5c8, 0x1544, 0x1493, 0x329, 0x19ec, 0x1db4, 0x983, 0x790, 0x1d} +{{0x71d, 0xf0e, 0x506, 0x1aec, 0x3f6, 0x2c1, 0x17dd, 0x43f, 0x1552, 0x1488, 0x10c3, 0x5ea, 0xfd4, 0x634, 0x1eb1, 0x1711, 0x1424, 0xeb1, 0xfe1, 0xa0a, 0x165f, 0x5c8, 0x1544, 0x1493, 0x329, 0x19ec, 0x1db4, 0x983, 0x790, 0x1d}} #elif RADIX == 32 -{0x7871c77, 0xb5d8506, 0xd1608fd, 0x4887f7d, 0xc3a4455, 0xf50bd50, 0xeb131a3, 0xd092e23, 0x4fe1758, 0x4597d41, 0x275442e, 0xf60ca69, 0x307db4c, 0x26e41} +{{0x7871c77, 0xb5d8506, 0xd1608fd, 0x4887f7d, 0xc3a4455, 0xf50bd50, 0xeb131a3, 0xd092e23, 0x4fe1758, 0x4597d41, 0x275442e, 0xf60ca69, 0x307db4c, 0x26e41}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8fdb5d85067871c, 0x3a44554887f7dd16, 0x3eb131a3f50bd50c, 0xd414fe1758d092e2, 0xca69275442e4597, 0x1e5de41307db4cf6} +{{0x8fdb5d85067871c, 0x3a44554887f7dd16, 0x3eb131a3f50bd50c, 0xd414fe1758d092e2, 0xca69275442e4597, 0x1e5de41307db4cf6}} #else -{0x7b6bb0a0cf0e38, 0x55221fdf745823, 0x1fa85ea861d222, 0xd092e23eb131a, 0x48b2fa829fc2eb, 0x3d8329a49d510b, 0xf2ef20983eda6} +{{0x7b6bb0a0cf0e38, 0x55221fdf745823, 0x1fa85ea861d222, 0xd092e23eb131a, 0x48b2fa829fc2eb, 0x3d8329a49d510b, 0xf2ef20983eda6}} #endif #endif , #if 0 #elif RADIX == 16 -{0x704, 0x1718, 0x1f41, 0x1569, 0x1353, 0x403, 0x8ba, 0xd3b, 0x1e9a, 0xca6, 0x1433, 0xc05, 0x2dd, 0xf7d, 0x12c8, 0x1109, 0x1797, 0x4e2, 0xf77, 0x569, 0xfcf, 0x1dd4, 0x11a4, 0x1354, 0x1563, 0x14b7, 0x6ad, 0xf7e, 0x251, 0xe} +{{0x704, 0x1718, 0x1f41, 0x1569, 0x1353, 0x403, 0x8ba, 0xd3b, 0x1e9a, 0xca6, 0x1433, 0xc05, 0x2dd, 0xf7d, 0x12c8, 0x1109, 0x1797, 0x4e2, 0xf77, 0x569, 0xfcf, 0x1dd4, 0x11a4, 0x1354, 0x1563, 0x14b7, 0x6ad, 0xf7e, 0x251, 0xe}} #elif RADIX == 32 -{0xb8c1c11, 0xead3f41, 0xa201cd4, 0x69a768b, 0x336537a, 0xb7580b4, 0x2c87be8, 0x5e5e213, 0x2f77271, 0xa3f3cad, 0xa91a4ee, 0x5bd58e6, 0xefc6ada, 0x2f945} +{{0xb8c1c11, 0xead3f41, 0xa201cd4, 0x69a768b, 0x336537a, 0xb7580b4, 0x2c87be8, 0x5e5e213, 0x2f77271, 0xa3f3cad, 0xa91a4ee, 0x5bd58e6, 0xefc6ada, 0x2f945}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1cd4ead3f41b8c1c, 0x36537a69a768ba20, 0x32c87be8b7580b43, 0xcad2f772715e5e21, 0xd58e6a91a4eea3f3, 0x480945efc6ada5b} +{{0x1cd4ead3f41b8c1c, 0x36537a69a768ba20, 0x32c87be8b7580b43, 0xcad2f772715e5e21, 0xd58e6a91a4eea3f3, 0x480945efc6ada5b}} #else -{0x29d5a7e8371838, 0x69a69da2e88073, 0x45bac05a19b29b, 0x15e5e2132c87be, 0x547e795a5eee4e, 0x16f5639aa4693b, 0x2404a2f7e356d} +{{0x29d5a7e8371838, 0x69a69da2e88073, 0x45bac05a19b29b, 0x15e5e2132c87be, 0x547e795a5eee4e, 0x16f5639aa4693b, 0x2404a2f7e356d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xf6, 0x15a2, 0x1cbc, 0x185c, 0x9a1, 0xc2f, 0x1123, 0x11, 0xda7, 0x1628, 0x41, 0x1163, 0x12f7, 0x9aa, 0x1235, 0x1444, 0x1c4a, 0x3b6, 0xfee, 0x96, 0x1ed, 0x1f4d, 0x5ec, 0x1bf2, 0x1bca, 0x151d, 0x58f, 0x293, 0x960, 0x20} +{{0xf6, 0x15a2, 0x1cbc, 0x185c, 0x9a1, 0xc2f, 0x1123, 0x11, 0xda7, 0x1628, 0x41, 0x1163, 0x12f7, 0x9aa, 0x1235, 0x1444, 0x1c4a, 0x3b6, 0xfee, 0x96, 0x1ed, 0x1f4d, 0x5ec, 0x1bf2, 0x1bca, 0x151d, 0x58f, 0x293, 0x960, 0x20}} #elif RADIX == 32 -{0xad103db, 0x70b9cbc, 0x3617a68, 0x9c02312, 0x41b1436, 0xbde2c60, 0x2354d54, 0x712a889, 0xcfee1db, 0x687b412, 0xe45ecfa, 0x8eef2b7, 0x52658fa, 0x3f580} +{{0xad103db, 0x70b9cbc, 0x3617a68, 0x9c02312, 0x41b1436, 0xbde2c60, 0x2354d54, 0x712a889, 0xcfee1db, 0x687b412, 0xe45ecfa, 0x8eef2b7, 0x52658fa, 0x3f580}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x7a6870b9cbcad103, 0x1b14369c02312361, 0x92354d54bde2c604, 0x412cfee1db712a88, 0xef2b7e45ecfa687b, 0x37da58052658fa8e} +{{0x7a6870b9cbcad103, 0x1b14369c02312361, 0x92354d54bde2c604, 0x412cfee1db712a88, 0xef2b7e45ecfa687b, 0x37da58052658fa8e}} #else -{0x50e1739795a207, 0x5a7008c48d85e9, 0x25ef163020d8a1, 0x3712a8892354d5, 0x4d0f68259fdc3b, 0x23bbcadf917b3e, 0xbad2c02932c7d} +{{0x50e1739795a207, 0x5a7008c48d85e9, 0x25ef163020d8a1, 0x3712a8892354d5, 0x4d0f68259fdc3b, 0x23bbcadf917b3e, 0xbad2c02932c7d}} #endif #endif , #if 0 #elif RADIX == 16 -{0xbc5, 0xa1d, 0xe8a, 0xe9c, 0x1af1, 0x13b5, 0xa68, 0x4a4, 0x135e, 0x171, 0x716, 0x2c2, 0x1c2b, 0x332, 0x349, 0x138c, 0x168b, 0x21c, 0x1629, 0xb97, 0x186, 0x629, 0x6e8, 0x497, 0x128c, 0x19d2, 0xcc1, 0x121, 0x250, 0x1a} +{{0xbc5, 0xa1d, 0xe8a, 0xe9c, 0x1af1, 0x13b5, 0xa68, 0x4a4, 0x135e, 0x171, 0x716, 0x2c2, 0x1c2b, 0x332, 0x349, 0x138c, 0x168b, 0x21c, 0x1629, 0xb97, 0x186, 0x629, 0x6e8, 0x497, 0x128c, 0x19d2, 0xcc1, 0x121, 0x250, 0x1a}} #elif RADIX == 32 -{0x50eaf17, 0x5d38e8a, 0x89daebc, 0x78948a6, 0x160b8cd, 0xac5847, 0x3491997, 0x5a2e718, 0xf62910e, 0x4861972, 0x2e6e831, 0xe94a309, 0x242cc1c, 0xd940} +{{0x50eaf17, 0x5d38e8a, 0x89daebc, 0x78948a6, 0x160b8cd, 0xac5847, 0x3491997, 0x5a2e718, 0xf62910e, 0x4861972, 0x2e6e831, 0xe94a309, 0x242cc1c, 0xd940}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xaebc5d38e8a50eaf, 0x60b8cd78948a689d, 0x834919970ac58471, 0x972f62910e5a2e71, 0x4a3092e6e8314861, 0x5e4940242cc1ce9} +{{0xaebc5d38e8a50eaf, 0x60b8cd78948a689d, 0x834919970ac58471, 0x972f62910e5a2e71, 0x4a3092e6e8314861, 0x5e4940242cc1ce9}} #else -{0x78ba71d14a1d5e, 0x35e25229a276ba, 0x38562c238b05c6, 0x65a2e718349199, 0x290c32e5ec5221, 0x3a528c24b9ba0c, 0x2f24a0121660e} +{{0x78ba71d14a1d5e, 0x35e25229a276ba, 0x38562c238b05c6, 0x65a2e718349199, 0x290c32e5ec5221, 0x3a528c24b9ba0c, 0x2f24a0121660e}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -3336,261 +3336,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[8] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0xb89, 0xf4d, 0xbd8, 0x18d2, 0x781, 0x1f79, 0xef5, 0xcfd, 0xa7d, 0x121a, 0x59e, 0x18a3, 0x2b, 0x122f, 0x9d5, 0x18aa, 0x105f, 0x1bcd, 0x871, 0x1f9d, 0xae4, 0xc5c, 0x385, 0xbe8, 0x1878, 0xcd8, 0x7a1, 0x7c8, 0x5b4, 0xe} +{{0xb89, 0xf4d, 0xbd8, 0x18d2, 0x781, 0x1f79, 0xef5, 0xcfd, 0xa7d, 0x121a, 0x59e, 0x18a3, 0x2b, 0x122f, 0x9d5, 0x18aa, 0x105f, 0x1bcd, 0x871, 0x1f9d, 0xae4, 0xc5c, 0x385, 0xbe8, 0x1878, 0xcd8, 0x7a1, 0x7c8, 0x5b4, 0xe}} #elif RADIX == 32 -{0x7a6ae25, 0x71a4bd8, 0x5fbc9e0, 0xf59faef, 0x9e90d29, 0xaf1465, 0x9d59178, 0xc17f154, 0xa871de6, 0xe2b93f3, 0xd038562, 0x6c61e17, 0xf907a16, 0x306d0} +{{0x7a6ae25, 0x71a4bd8, 0x5fbc9e0, 0xf59faef, 0x9e90d29, 0xaf1465, 0x9d59178, 0xc17f154, 0xa871de6, 0xe2b93f3, 0xd038562, 0x6c61e17, 0xf907a16, 0x306d0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc9e071a4bd87a6ae, 0xe90d29f59faef5fb, 0x49d591780af14659, 0x3f3a871de6c17f15, 0x61e17d038562e2b9, 0x9956d0f907a166c} +{{0xc9e071a4bd87a6ae, 0xe90d29f59faef5fb, 0x49d591780af14659, 0x3f3a871de6c17f15, 0x61e17d038562e2b9, 0x9956d0f907a166c}} #else -{0x40e3497b0f4d5c, 0x27d67ebbd7ef27, 0x40578a32cf4869, 0x6c17f1549d5917, 0x5c5727e750e3bc, 0x1b18785f40e158, 0x4cab687c83d0b} +{{0x40e3497b0f4d5c, 0x27d67ebbd7ef27, 0x40578a32cf4869, 0x6c17f1549d5917, 0x5c5727e750e3bc, 0x1b18785f40e158, 0x4cab687c83d0b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0xb60, 0x3d3, 0x12f6, 0xe34, 0x9e0, 0xfde, 0xbbd, 0xb3f, 0x129f, 0x1486, 0x1967, 0x1e28, 0x180a, 0xc8b, 0x1275, 0x1e2a, 0xc17, 0xef3, 0xa1c, 0x7e7, 0x2b9, 0xb17, 0xe1, 0x2fa, 0x61e, 0xb36, 0x1e8, 0x1f2, 0x156d, 0xc} +{{0xb60, 0x3d3, 0x12f6, 0xe34, 0x9e0, 0xfde, 0xbbd, 0xb3f, 0x129f, 0x1486, 0x1967, 0x1e28, 0x180a, 0xc8b, 0x1275, 0x1e2a, 0xc17, 0xef3, 0xa1c, 0x7e7, 0x2b9, 0xb17, 0xe1, 0x2fa, 0x61e, 0xb36, 0x1e8, 0x1f2, 0x156d, 0xc}} #elif RADIX == 32 -{0x1e9ad81, 0x1c692f6, 0xd7ef278, 0x7d67ebb, 0x67a434a, 0x2bc519, 0x275645e, 0xb05fc55, 0xea1c779, 0xb8ae4fc, 0xf40e158, 0x9b18785, 0x3e41e85, 0x245b4} +{{0x1e9ad81, 0x1c692f6, 0xd7ef278, 0x7d67ebb, 0x67a434a, 0x2bc519, 0x275645e, 0xb05fc55, 0xea1c779, 0xb8ae4fc, 0xf40e158, 0x9b18785, 0x3e41e85, 0x245b4}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf2781c692f61e9ad, 0x7a434a7d67ebbd7e, 0x5275645e02bc5196, 0x4fcea1c779b05fc5, 0x18785f40e158b8ae, 0x20e55b43e41e859b} +{{0xf2781c692f61e9ad, 0x7a434a7d67ebbd7e, 0x5275645e02bc5196, 0x4fcea1c779b05fc5, 0x18785f40e158b8ae, 0x20e55b43e41e859b}} #else -{0x7038d25ec3d35b, 0x29f59faef5fbc9, 0x7015e28cb3d21a, 0x1b05fc55275645, 0x1715c9f9d438ef, 0x66c61e17d03856, 0x32ada1f20f42} +{{0x7038d25ec3d35b, 0x29f59faef5fbc9, 0x7015e28cb3d21a, 0x1b05fc55275645, 0x1715c9f9d438ef, 0x66c61e17d03856, 0x32ada1f20f42}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x441, 0x1774, 0x1527, 0x106a, 0x577, 0x3fc, 0xf92, 0x12c4, 0x96a, 0x10ea, 0x10f5, 0x11c9, 0x1f8, 0x1407, 0x1bcc, 0x16c4, 0x15c1, 0x790, 0x5bc, 0x1c28, 0xbc6, 0x123c, 0xf19, 0x1d6f, 0x361, 0x1fcd, 0x1dc9, 0x20c, 0x17c6, 0x6} +{{0x441, 0x1774, 0x1527, 0x106a, 0x577, 0x3fc, 0xf92, 0x12c4, 0x96a, 0x10ea, 0x10f5, 0x11c9, 0x1f8, 0x1407, 0x1bcc, 0x16c4, 0x15c1, 0x790, 0x5bc, 0x1c28, 0xbc6, 0x123c, 0xf19, 0x1d6f, 0x361, 0x1fcd, 0x1dc9, 0x20c, 0x17c6, 0x6}} #elif RADIX == 32 -{0xbba1104, 0xe0d5527, 0x21fe15d, 0xaa588f9, 0xf587525, 0x7e23930, 0xbcca038, 0x5706d89, 0x5bc3c8, 0xe2f1b85, 0xdef1991, 0xe68d87a, 0x419dc9f, 0x35f18} +{{0xbba1104, 0xe0d5527, 0x21fe15d, 0xaa588f9, 0xf587525, 0x7e23930, 0xbcca038, 0x5706d89, 0x5bc3c8, 0xe2f1b85, 0xdef1991, 0xe68d87a, 0x419dc9f, 0x35f18}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xe15de0d5527bba11, 0x587525aa588f921f, 0x9bcca0387e23930f, 0xb8505bc3c85706d8, 0x8d87adef1991e2f1, 0x139f18419dc9fe6} +{{0xe15de0d5527bba11, 0x587525aa588f921f, 0x9bcca0387e23930f, 0xb8505bc3c85706d8, 0x8d87adef1991e2f1, 0x139f18419dc9fe6}} #else -{0x3bc1aaa4f77422, 0x16a9623e487f85, 0x43f11c987ac3a9, 0x5706d89bcca03, 0x3c5e370a0b7879, 0x79a361eb7bc664, 0x9cf8c20cee4f} +{{0x3bc1aaa4f77422, 0x16a9623e487f85, 0x43f11c987ac3a9, 0x5706d89bcca03, 0x3c5e370a0b7879, 0x79a361eb7bc664, 0x9cf8c20cee4f}} #endif #endif , #if 0 #elif RADIX == 16 -{0x98a, 0x1bbb, 0x7d8, 0xd84, 0x3fe, 0x90b, 0xfe8, 0x12c3, 0x1e84, 0xde3, 0xbe1, 0x1217, 0x1925, 0x84a, 0xa0e, 0x7cd, 0x1854, 0x768, 0x6e6, 0x1d87, 0xfac, 0x6df, 0x109b, 0x64d, 0x9f2, 0x596, 0x435, 0x1918, 0x1095, 0x0} +{{0x98a, 0x1bbb, 0x7d8, 0xd84, 0x3fe, 0x90b, 0xfe8, 0x12c3, 0x1e84, 0xde3, 0xbe1, 0x1217, 0x1925, 0x84a, 0xa0e, 0x7cd, 0x1854, 0x768, 0x6e6, 0x1d87, 0xfac, 0x6df, 0x109b, 0x64d, 0x9f2, 0x596, 0x435, 0x1918, 0x1095, 0x0}} #elif RADIX == 32 -{0xddda628, 0x9b087d8, 0x84858ff, 0x12586fe, 0xe16f1fa, 0x49642eb, 0xa0e4256, 0x6150f9a, 0xe6e63b4, 0xfbeb3b0, 0x9b09b36, 0xcb27c8c, 0x2304352, 0x4257} +{{0xddda628, 0x9b087d8, 0x84858ff, 0x12586fe, 0xe16f1fa, 0x49642eb, 0xa0e4256, 0x6150f9a, 0xe6e63b4, 0xfbeb3b0, 0x9b09b36, 0xcb27c8c, 0x2304352, 0x4257}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x58ff9b087d8ddda6, 0x16f1fa12586fe848, 0xaa0e425649642ebe, 0x3b0e6e63b46150f9, 0x27c8c9b09b36fbeb, 0xa2c2572304352cb} +{{0x58ff9b087d8ddda6, 0x16f1fa12586fe848, 0xaa0e425649642ebe, 0x3b0e6e63b46150f9, 0x27c8c9b09b36fbeb, 0xa2c2572304352cb}} #else -{0x7f3610fb1bbb4c, 0x684961bfa12163, 0x324b2175f0b78f, 0x46150f9aa0e425, 0x5f7d6761cdcc76, 0x32c9f2326c26cd, 0x51612b91821a9} +{{0x7f3610fb1bbb4c, 0x684961bfa12163, 0x324b2175f0b78f, 0x46150f9aa0e425, 0x5f7d6761cdcc76, 0x32c9f2326c26cd, 0x51612b91821a9}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x17ec, 0x6b9, 0x1dc0, 0x1783, 0x18ee, 0xdd4, 0x1c7f, 0x1fb2, 0x16b0, 0x196e, 0x1e5a, 0x1fda, 0x11f9, 0x117, 0x1c30, 0x1a47, 0x2a2, 0x19e6, 0x1347, 0x2bb, 0x1463, 0x1f37, 0xa64, 0x3c6, 0x1910, 0x2bc, 0xbc0, 0x17e8, 0x1cfd, 0xa} +{{0x17ec, 0x6b9, 0x1dc0, 0x1783, 0x18ee, 0xdd4, 0x1c7f, 0x1fb2, 0x16b0, 0x196e, 0x1e5a, 0x1fda, 0x11f9, 0x117, 0x1c30, 0x1a47, 0x2a2, 0x19e6, 0x1347, 0x2bb, 0x1463, 0x1f37, 0xa64, 0x3c6, 0x1910, 0x2bc, 0xbc0, 0x17e8, 0x1cfd, 0xa}} #elif RADIX == 32 -{0x35cdfb1, 0xaf07dc0, 0xf6ea63b, 0xc3f65c7, 0x5acb75a, 0x7e7fb5e, 0xc3008bc, 0xa8b48f, 0x7347cf3, 0xbd18c57, 0x8ca64f9, 0x5e64407, 0xfd0bc01, 0x163f6} +{{0x35cdfb1, 0xaf07dc0, 0xf6ea63b, 0xc3f65c7, 0x5acb75a, 0x7e7fb5e, 0xc3008bc, 0xa8b48f, 0x7347cf3, 0xbd18c57, 0x8ca64f9, 0x5e64407, 0xfd0bc01, 0x163f6}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa63baf07dc035cdf, 0xacb75ac3f65c7f6e, 0xfc3008bc7e7fb5e5, 0xc577347cf30a8b48, 0x644078ca64f9bd18, 0x2d073f6fd0bc015e} +{{0xa63baf07dc035cdf, 0xacb75ac3f65c7f6e, 0xfc3008bc7e7fb5e5, 0xc577347cf30a8b48, 0x644078ca64f9bd18, 0x2d073f6fd0bc015e}} #else -{0x775e0fb806b9bf, 0x6b0fd971fdba98, 0x63f3fdaf2d65ba, 0x30a8b48fc3008b, 0x37a318aee68f9e, 0x5799101e32993e, 0x6439fb7e85e00} +{{0x775e0fb806b9bf, 0x6b0fd971fdba98, 0x63f3fdaf2d65ba, 0x30a8b48fc3008b, 0x37a318aee68f9e, 0x5799101e32993e, 0x6439fb7e85e00}} #endif #endif , #if 0 #elif RADIX == 16 -{0x440, 0x172e, 0x4f, 0x1e07, 0x15ce, 0x1b55, 0x68e, 0x2c, 0x13bb, 0x1f43, 0x1dda, 0x1fb4, 0xe54, 0x1502, 0x723, 0x7e7, 0x1147, 0x1ba0, 0x3d0, 0xf7c, 0x1754, 0x5fc, 0x1098, 0x16aa, 0x182, 0x1c1d, 0x18e9, 0x13ce, 0xbae, 0x18} +{{0x440, 0x172e, 0x4f, 0x1e07, 0x15ce, 0x1b55, 0x68e, 0x2c, 0x13bb, 0x1f43, 0x1dda, 0x1fb4, 0xe54, 0x1502, 0x723, 0x7e7, 0x1147, 0x1ba0, 0x3d0, 0xf7c, 0x1754, 0x5fc, 0x1098, 0x16aa, 0x182, 0x1c1d, 0x18e9, 0x13ce, 0xbae, 0x18}} #elif RADIX == 32 -{0xb971102, 0xbc0e04f, 0xedaad73, 0xec05868, 0xdafa1ce, 0x953f69d, 0x723a813, 0x451cfce, 0x83d0dd0, 0xe5d51ef, 0x550982f, 0xe860ad, 0x79d8e9e, 0x40eba} +{{0xb971102, 0xbc0e04f, 0xedaad73, 0xec05868, 0xdafa1ce, 0x953f69d, 0x723a813, 0x451cfce, 0x83d0dd0, 0xe5d51ef, 0x550982f, 0xe860ad, 0x79d8e9e, 0x40eba}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xad73bc0e04fb9711, 0xafa1ceec05868eda, 0xe723a813953f69dd, 0x1ef83d0dd0451cfc, 0x860ad550982fe5d5, 0xc2eba79d8e9e0e} +{{0xad73bc0e04fb9711, 0xafa1ceec05868eda, 0xe723a813953f69dd, 0x1ef83d0dd0451cfc, 0x860ad550982fe5d5, 0xc2eba79d8e9e0e}} #else -{0x67781c09f72e22, 0x3bb0161a3b6ab5, 0x1ca9fb4eed7d0e, 0x451cfce723a81, 0x7cbaa3df07a1ba, 0x3a182b554260b, 0x6175d3cec74f} +{{0x67781c09f72e22, 0x3bb0161a3b6ab5, 0x1ca9fb4eed7d0e, 0x451cfce723a81, 0x7cbaa3df07a1ba, 0x3a182b554260b, 0x6175d3cec74f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x18c5, 0x1326, 0x1d4d, 0x19eb, 0xea, 0x947, 0x1adf, 0xbf5, 0xafe, 0x1225, 0x18a0, 0xb3a, 0x8e0, 0xaea, 0x17aa, 0x19a5, 0x912, 0x634, 0x15c7, 0x1df7, 0x13cb, 0x1894, 0xeaa, 0xa69, 0x6ca, 0x1b49, 0x26f, 0x1f50, 0xd92, 0x6} +{{0x18c5, 0x1326, 0x1d4d, 0x19eb, 0xea, 0x947, 0x1adf, 0xbf5, 0xafe, 0x1225, 0x18a0, 0xb3a, 0x8e0, 0xaea, 0x17aa, 0x19a5, 0x912, 0x634, 0x15c7, 0x1df7, 0x13cb, 0x1894, 0xeaa, 0xa69, 0x6ca, 0x1b49, 0x26f, 0x1f50, 0xd92, 0x6}} #elif RADIX == 32 -{0x9936314, 0xb3d7d4d, 0xf4a383a, 0xf97ebad, 0xa0912ab, 0x3816758, 0x7aa5752, 0x244b34b, 0xf5c731a, 0xa4f2fbe, 0xd2eaac4, 0xa49b294, 0xea026fd, 0x3364b} +{{0x9936314, 0xb3d7d4d, 0xf4a383a, 0xf97ebad, 0xa0912ab, 0x3816758, 0x7aa5752, 0x244b34b, 0xf5c731a, 0xa4f2fbe, 0xd2eaac4, 0xa49b294, 0xea026fd, 0x3364b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x383ab3d7d4d99363, 0x912abf97ebadf4a, 0xb7aa57523816758a, 0xfbef5c731a244b34, 0x9b294d2eaac4a4f2, 0x54764bea026fda4} +{{0x383ab3d7d4d99363, 0x912abf97ebadf4a, 0xb7aa57523816758a, 0xfbef5c731a244b34, 0x9b294d2eaac4a4f2, 0x54764bea026fda4}} #else -{0x7567afa9b326c6, 0x2fe5faeb7d28e0, 0x11c0b3ac504895, 0x2244b34b7aa575, 0x149e5f7deb8e63, 0x6926ca534baab1, 0x2a3b25f50137e} +{{0x7567afa9b326c6, 0x2fe5faeb7d28e0, 0x11c0b3ac504895, 0x2244b34b7aa575, 0x149e5f7deb8e63, 0x6926ca534baab1, 0x2a3b25f50137e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x132f, 0x6d5, 0x95b, 0xa68, 0x1814, 0x12d3, 0x1f1e, 0x857, 0x14fa, 0xcf, 0x1f19, 0xe1b, 0x1cf7, 0xa53, 0x1455, 0x5ef, 0x3e2, 0x199c, 0x1162, 0x38d, 0x174b, 0x794, 0xef6, 0xf74, 0x9c, 0x1f55, 0x1c4d, 0x56f, 0x1638, 0x19} +{{0x132f, 0x6d5, 0x95b, 0xa68, 0x1814, 0x12d3, 0x1f1e, 0x857, 0x14fa, 0xcf, 0x1f19, 0xe1b, 0x1cf7, 0xa53, 0x1455, 0x5ef, 0x3e2, 0x199c, 0x1162, 0x38d, 0x174b, 0x794, 0xef6, 0xf74, 0x9c, 0x1f55, 0x1c4d, 0x56f, 0x1638, 0x19}} #elif RADIX == 32 -{0x36accbf, 0x14d095b, 0xe969e05, 0xe90aff1, 0x19067d3, 0x3ddc37f, 0x455529f, 0xf88bdf, 0xb162cce, 0xa5d2c71, 0xe8ef63c, 0xaa8271e, 0xadfc4df, 0xa8e0} +{{0x36accbf, 0x14d095b, 0xe969e05, 0xe90aff1, 0x19067d3, 0x3ddc37f, 0x455529f, 0xf88bdf, 0xb162cce, 0xa5d2c71, 0xe8ef63c, 0xaa8271e, 0xadfc4df, 0xa8e0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x9e0514d095b36acc, 0x9067d3e90aff1e96, 0xf455529f3ddc37f1, 0xc71b162cce0f88bd, 0x8271ee8ef63ca5d2, 0x30898e0adfc4dfaa} +{{0x9e0514d095b36acc, 0x9067d3e90aff1e96, 0xf455529f3ddc37f1, 0xc71b162cce0f88bd, 0x8271ee8ef63ca5d2, 0x30898e0adfc4dfaa}} #else -{0xa29a12b66d599, 0x4fa42bfc7a5a78, 0x79eee1bf8c833e, 0x60f88bdf455529, 0x14ba58e362c599, 0x6aa09c7ba3bd8f, 0x804c7056fe26f} +{{0xa29a12b66d599, 0x4fa42bfc7a5a78, 0x79eee1bf8c833e, 0x60f88bdf455529, 0x14ba58e362c599, 0x6aa09c7ba3bd8f, 0x804c7056fe26f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c index b5916de330..797b08a494 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_32.c @@ -1,11 +1,11 @@ // clang-format off // Command line : python monty.py 32 // 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -#ifdef RADIX_32 - #include #include +#ifdef RADIX_32 + #define sspint int32_t #define spint uint32_t #define udpint uint64_t @@ -817,22 +817,6 @@ static int modqr(const spint *h, const spint *x) { return modis1(r) | modis0(x); } -// conditional move g to f if d=1 -// strongly recommend inlining be disabled using compiler specific syntax -static void modcmv(int b, const spint *g, volatile spint *f) { - int i; - spint c0, c1, s, t; - spint r = 0x5aa5a55au; - c0 = (1 - b) + r; - c1 = b + r; - for (i = 0; i < 14; i++) { - s = g[i]; - t = f[i]; - f[i] = c0 * t + c1 * s; - f[i] -= r * (t + s); - } -} - // conditional swap g and f if d=1 // strongly recommend inlining be disabled using compiler specific syntax static void modcsw(int b, volatile spint *g, volatile spint *f) { @@ -886,52 +870,6 @@ static int modshr(unsigned int n, spint *a) { return r; } -// set a= 2^r -static void mod2r(unsigned int r, spint *a) { - unsigned int n = r / 28u; - unsigned int m = r % 28u; - modzer(a); - if (r >= 48 * 8) - return; - a[n] = 1; - a[n] <<= m; - nres(a, a); -} - -// export to byte array -static void modexp(const spint *a, char *b) { - int i; - spint c[14]; - redc(a, c); - for (i = 47; i >= 0; i--) { - b[i] = c[0] & (spint)0xff; - (void)modshr(8, c); - } -} - -// import from byte array -// returns 1 if in range, else 0 -static int modimp(const char *b, spint *a) { - int i, res; - for (i = 0; i < 14; i++) { - a[i] = 0; - } - for (i = 0; i < 48; i++) { - modshl(8, a); - a[0] += (spint)(unsigned char)b[i]; - } - res = modfsb(a); - nres(a, a); - return res; -} - -// determine sign -static int modsign(const spint *a) { - spint c[14]; - redc(a, c); - return c[0] % 2; -} - // return true if equal static int modcmp(const spint *a, const spint *b) { spint c[14], d[14]; @@ -1231,4 +1169,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif /* RADIX_32 */ \ No newline at end of file +#endif /* RADIX_32 */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c index 00f689bf79..6fdc22be70 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/fp_p65376_64.c @@ -1,11 +1,11 @@ // clang-format off // Command line : python monty.py 64 // 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -#ifdef RADIX_64 - #include #include +#ifdef RADIX_64 + #define sspint int64_t #define spint uint64_t #define udpint __uint128_t @@ -473,22 +473,6 @@ static int modqr(const spint *h, const spint *x) { return modis1(r) | modis0(x); } -// conditional move g to f if d=1 -// strongly recommend inlining be disabled using compiler specific syntax -static void modcmv(int b, const spint *g, volatile spint *f) { - int i; - spint c0, c1, s, t; - spint r = 0x3cc3c33c5aa5a55au; - c0 = (1 - b) + r; - c1 = b + r; - for (i = 0; i < 7; i++) { - s = g[i]; - t = f[i]; - f[i] = c0 * t + c1 * s; - f[i] -= r * (t + s); - } -} - // conditional swap g and f if d=1 // strongly recommend inlining be disabled using compiler specific syntax static void modcsw(int b, volatile spint *g, volatile spint *f) { @@ -542,52 +526,6 @@ static int modshr(unsigned int n, spint *a) { return r; } -// set a= 2^r -static void mod2r(unsigned int r, spint *a) { - unsigned int n = r / 55u; - unsigned int m = r % 55u; - modzer(a); - if (r >= 48 * 8) - return; - a[n] = 1; - a[n] <<= m; - nres(a, a); -} - -// export to byte array -static void modexp(const spint *a, char *b) { - int i; - spint c[7]; - redc(a, c); - for (i = 47; i >= 0; i--) { - b[i] = c[0] & (spint)0xff; - (void)modshr(8, c); - } -} - -// import from byte array -// returns 1 if in range, else 0 -static int modimp(const char *b, spint *a) { - int i, res; - for (i = 0; i < 7; i++) { - a[i] = 0; - } - for (i = 0; i < 48; i++) { - modshl(8, a); - a[0] += (spint)(unsigned char)b[i]; - } - res = modfsb(a); - nres(a, a); - return res; -} - -// determine sign -static int modsign(const spint *a) { - spint c[7]; - redc(a, c); - return c[0] % 2; -} - // return true if equal static int modcmp(const spint *a, const spint *b) { spint c[7], d[7]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.h index 2b16e23834..616504c7b1 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd.h @@ -415,7 +415,7 @@ void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B * @param t: an integer * @returns 0xFFFFFFFF on success, 0 on failure */ -static int +static inline int test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) { int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c index d980d12183..d6777fa92a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/hd_splitting_transforms.c @@ -11,131 +11,131 @@ const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1 const fp2_t FP2_CONSTANTS[5] = {{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2} +{{0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}} #elif RADIX == 32 -{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000} +{{0x3f0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10000}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000} +{{0x3, 0x0, 0x0, 0x0, 0x0, 0x3d00000000000000}} #else -{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000} +{{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe400000000000}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e} +{{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e}} #elif RADIX == 32 -{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff} +{{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +{{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff}} #else -{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff} +{{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e} +{{0x1f03, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0xfff, 0x1e}} #elif RADIX == 32 -{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff} +{{0xffffc0f, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0xfffffff, 0x30fff}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff} +{{0xfffffffffffffffc, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x3ffffffffffffff}} #else -{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff} +{{0x7ffffffffffff8, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x7fffffffffffff, 0x1ffffffffffff}} #endif #endif }}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c index ea32213c75..0fed774a04 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/l2.c @@ -24,8 +24,8 @@ copy(fp_num *x, fp_num *r) static void normalize(fp_num *x) { - if (x->s == 0.0 || isfinite(x->s) == 0) { - if (x->s == 0.0) { + if (fpclassify(x->s) == FP_ZERO || isfinite(x->s) == 0) { + if (fpclassify(x->s) == FP_ZERO) { x->e = INT_MIN; } } else { @@ -49,13 +49,6 @@ to_deltabar(fp_num *x) x->e = 0; } -static void -to_etabar(fp_num *x) -{ - x->s = ETABAR; - x->e = 0; -} - static void from_mpz(const ibz_t *x, fp_num *r) { diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_internals.h index e8d90141ac..2b76857205 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_internals.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/lll_internals.h @@ -43,13 +43,19 @@ /** @brief Type for fractions of integers * - * @typedef ibq_t +* @typedef ibq_t * * For fractions of integers of arbitrary size, used by intbig module, using gmp */ -typedef ibz_t ibq_t[2]; -typedef ibq_t ibq_vec_4_t[4]; -typedef ibq_t ibq_mat_4x4_t[4][4]; +typedef struct { + ibz_t q[2]; +} ibq_t; +typedef struct { + ibq_t v[4]; +} ibq_vec_4_t; +typedef struct { + ibq_vec_4_t m[4]; +} ibq_mat_4x4_t; /**@} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.c index 27f4a963db..13714eee4a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/mp.c @@ -2,6 +2,7 @@ #include #include #include +#include // double-wide multiplication void @@ -17,7 +18,7 @@ MUL(digit_t *out, const digit_t a, const digit_t b) out[0] = _umul128(a, b, &umul_hi); out[1] = umul_hi; -#elif defined(RADIX_64) && defined(HAVE_UINT128) +#elif defined(RADIX_64) && (defined(HAVE_UINT128) || defined(__SIZEOF_INT128__) || defined(__int128)) && !defined(C_PEDANTIC_MODE) unsigned __int128 umul_tmp; umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); out[0] = (uint64_t)umul_tmp; @@ -277,6 +278,7 @@ mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) assert((a[0] & 1) == 1); digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + memset(x, 0, sizeof(x)); mp_copy(aa, a, nwords); mp_one[0] = 1; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rationals.c index 0c5387e5e8..25f8519b3f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rationals.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rationals.c @@ -1,20 +1,20 @@ -#include + #include #include "internal.h" #include "lll_internals.h" void ibq_init(ibq_t *x) { - ibz_init(&((*x)[0])); - ibz_init(&((*x)[1])); - ibz_set(&((*x)[1]), 1); + ibz_init(&(x->q[0])); + ibz_init(&(x->q[1])); + ibz_set(&(x->q[1]), 1); } void ibq_finalize(ibq_t *x) { - ibz_finalize(&((*x)[0])); - ibz_finalize(&((*x)[1])); + ibz_finalize(&(x->q[0])); + ibz_finalize(&(x->q[1])); } void @@ -22,7 +22,7 @@ ibq_mat_4x4_init(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_init(&(*mat)[i][j]); + ibq_init(&mat->m[i].v[j]); } } } @@ -31,7 +31,7 @@ ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_finalize(&(*mat)[i][j]); + ibq_finalize(&mat->m[i].v[j]); } } } @@ -40,14 +40,14 @@ void ibq_vec_4_init(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_init(&(*vec)[i]); + ibq_init(&vec->v[i]); } } void ibq_vec_4_finalize(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_finalize(&(*vec)[i]); + ibq_finalize(&vec->v[i]); } } @@ -57,9 +57,9 @@ ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j][0]), 10); + ibz_print(&(mat->m[i].v[j].q[0]), 10); printf("/"); - ibz_print(&((*mat)[i][j][1]), 10); + ibz_print(&(mat->m[i].v[j].q[1]), 10); printf(" "); } printf("\n "); @@ -72,9 +72,9 @@ ibq_vec_4_print(const ibq_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i][0]), 10); + ibz_print(&(vec->v[i].q[0]), 10); printf("/"); - ibz_print(&((*vec)[i][1]), 10); + ibz_print(&(vec->v[i].q[1]), 10); printf(" "); } printf("\n\n"); @@ -86,10 +86,10 @@ ibq_reduce(ibq_t *x) ibz_t gcd, r; ibz_init(&gcd); ibz_init(&r); - ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); - ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + ibz_gcd(&gcd, &(x->q[0]), &(x->q[1])); + ibz_div(&(x->q[0]), &r, &(x->q[0]), &gcd); assert(ibz_is_zero(&r)); - ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + ibz_div(&(x->q[1]), &r, &(x->q[1]), &gcd); assert(ibz_is_zero(&r)); ibz_finalize(&gcd); ibz_finalize(&r); @@ -102,10 +102,10 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) ibz_init(&add); ibz_init(&prod); - ibz_mul(&add, &((*a)[0]), &((*b)[1])); - ibz_mul(&prod, &((*b)[0]), &((*a)[1])); - ibz_add(&((*sum)[0]), &add, &prod); - ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&add, &(a->q[0]), &(b->q[1])); + ibz_mul(&prod, &(b->q[0]), &(a->q[1])); + ibz_add(&(sum->q[0]), &add, &prod); + ibz_mul(&(sum->q[1]), &(a->q[1]), &(b->q[1])); ibz_finalize(&add); ibz_finalize(&prod); } @@ -113,8 +113,8 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) void ibq_neg(ibq_t *neg, const ibq_t *x) { - ibz_copy(&((*neg)[1]), &((*x)[1])); - ibz_neg(&((*neg)[0]), &((*x)[0])); + ibz_copy(&(neg->q[1]), &(x->q[1])); + ibz_neg(&(neg->q[0]), &(x->q[0])); } void @@ -143,8 +143,8 @@ ibq_abs(ibq_t *abs, const ibq_t *x) // once void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) { - ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); - ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&(prod->q[0]), &(a->q[0]), &(b->q[0])); + ibz_mul(&(prod->q[1]), &(a->q[1]), &(b->q[1])); } int @@ -152,9 +152,9 @@ ibq_inv(ibq_t *inv, const ibq_t *x) { int res = !ibq_is_zero(x); if (res) { - ibz_copy(&((*inv)[0]), &((*x)[0])); - ibz_copy(&((*inv)[1]), &((*x)[1])); - ibz_swap(&((*inv)[1]), &((*inv)[0])); + ibz_copy(&(inv->q[0]), &(x->q[0])); + ibz_copy(&(inv->q[1]), &(x->q[1])); + ibz_swap(&(inv->q[1]), &(inv->q[0])); } return (res); } @@ -165,15 +165,15 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) ibz_t x, y; ibz_init(&x); ibz_init(&y); - ibz_copy(&x, &((*a)[0])); - ibz_copy(&y, &((*b)[0])); - ibz_mul(&y, &y, &((*a)[1])); - ibz_mul(&x, &x, &((*b)[1])); - if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_copy(&x, &(a->q[0])); + ibz_copy(&y, &(b->q[0])); + ibz_mul(&y, &y, &(a->q[1])); + ibz_mul(&x, &x, &(b->q[1])); + if (ibz_cmp(&(a->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } - if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + if (ibz_cmp(&(b->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } @@ -186,28 +186,28 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) int ibq_is_zero(const ibq_t *x) { - return ibz_is_zero(&((*x)[0])); + return ibz_is_zero(&(x->q[0])); } int ibq_is_one(const ibq_t *x) { - return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); + return (0 == ibz_cmp(&(x->q[0]), &(x->q[1]))); } int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) { - ibz_copy(&((*q)[0]), a); - ibz_copy(&((*q)[1]), b); + ibz_copy(&(q->q[0]), a); + ibz_copy(&(q->q[1]), b); return !ibz_is_zero(b); } void ibq_copy(ibq_t *target, const ibq_t *value) // once { - ibz_copy(&((*target)[0]), &((*value)[0])); - ibz_copy(&((*target)[1]), &((*value)[1])); + ibz_copy(&(target->q[0]), &(value->q[0])); + ibz_copy(&(target->q[1]), &(value->q[1])); } int @@ -215,7 +215,7 @@ ibq_is_ibz(const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_mod(&r, &((*q)[0]), &((*q)[1])); + ibz_mod(&r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); @@ -226,7 +226,7 @@ ibq_to_ibz(ibz_t *z, const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + ibz_div(z, &r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h index d0861ac036..0362ca0c42 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/rng.h @@ -5,7 +5,7 @@ #include -static int randombytes(unsigned char *x, unsigned long long xlen){ +static inline int randombytes(unsigned char *x, unsigned long long xlen){ OQS_randombytes(x, xlen); return 0; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign.c index 7335c38d9a..cf2134085b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl3_ref/sqisign.c @@ -121,7 +121,7 @@ sqisign_verify(const unsigned char *m, unsigned long long siglen, const unsigned char *pk) { - + (void) siglen; int ret = 0; public_key_t pkt = { 0 }; signature_t sigt; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/asm_preamble.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/asm_preamble.h index 3ef7927e9c..ca2a054ce2 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/asm_preamble.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/asm_preamble.h @@ -9,8 +9,10 @@ #undef fp2_mul_c1 #undef fp2_sq_c0 #undef fp2_sq_c1 -#define p2 CAT(_, p2) -#define p CAT(_, p) +#undef p2 +#undef p +#define p2 CAT(_, SQISIGN_NAMESPACE(p2)) +#define p CAT(_, SQISIGN_NAMESPACE(p)) #define fp_add CAT(_, SQISIGN_NAMESPACE(fp_add)) #define fp_sub CAT(_, SQISIGN_NAMESPACE(fp_sub)) #define fp_mul CAT(_, SQISIGN_NAMESPACE(fp_mul)) diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c index 143060e2c3..74184fc97b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/dim2id2iso.c @@ -191,7 +191,7 @@ fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, // reordering vectors and switching some signs if needed to make it in a nicer // shape static void -post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, bool is_special_order) { // if the left order is the special one, then we apply some additional post // treatment @@ -520,7 +520,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[0], 1); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); - post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + post_LLL_basis_treatment(&gram[0], &reduced[0], true); // for efficient lattice reduction, we replace ideal[0] by the equivalent // ideal of smallest norm @@ -562,7 +562,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[i], 1); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); - post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + post_LLL_basis_treatment(&gram[i], &reduced[i], false); } // enumerating small vectors diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.c index a7148e485b..316b12f119 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/e0_basis.c @@ -2,54 +2,54 @@ const fp2_t BASIS_E0_PX = { #if 0 #elif RADIX == 16 -{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314} +{{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314}} #elif RADIX == 32 -{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d} +{{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6} +{{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6}} #else -{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5} +{{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5}} #endif #endif , #if 0 #elif RADIX == 16 -{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7} +{{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7}} #elif RADIX == 32 -{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b} +{{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5} +{{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5}} #else -{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f} +{{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f}} #endif #endif }; const fp2_t BASIS_E0_QX = { #if 0 #elif RADIX == 16 -{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe} +{{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe}} #elif RADIX == 32 -{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb} +{{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2} +{{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2}} #else -{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7} +{{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330} +{{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330}} #elif RADIX == 32 -{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339} +{{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5} +{{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5}} #else -{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85} +{{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85}} #endif #endif }; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h index e609c93a08..7cef95ca49 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/ec.h @@ -566,7 +566,7 @@ uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) { ec_point_t test; @@ -595,7 +595,7 @@ test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) { int check_P = test_point_order_twof(&B->P, E, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_verification.c index fecdb9c259..8aa451d366 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_verification.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/encode_verification.c @@ -99,36 +99,6 @@ ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) return proj_from_bytes(&curve->A, &curve->C, enc); } -static byte_t * -ec_point_to_bytes(byte_t *enc, const ec_point_t *point) -{ - return proj_to_bytes(enc, &point->x, &point->z); -} - -static const byte_t * -ec_point_from_bytes(ec_point_t *point, const byte_t *enc) -{ - return proj_from_bytes(&point->x, &point->z, enc); -} - -static byte_t * -ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) -{ - enc = ec_point_to_bytes(enc, &basis->P); - enc = ec_point_to_bytes(enc, &basis->Q); - enc = ec_point_to_bytes(enc, &basis->PmQ); - return enc; -} - -static const byte_t * -ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) -{ - enc = ec_point_from_bytes(&basis->P, enc); - enc = ec_point_from_bytes(&basis->Q, enc); - enc = ec_point_from_bytes(&basis->PmQ, enc); - return enc; -} - // public API byte_t * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c index d62ffc51c7..4b8e3e34c1 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/endomorphism_action.c @@ -4,261 +4,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x280} +{{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x280}} #elif RADIX == 32 -{0x12f68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x400} +{{0x12f68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x400}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x4b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x170000000000000} +{{0x4b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x170000000000000}} #else -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1300000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314} +{{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314}} #elif RADIX == 32 -{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d} +{{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6} +{{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6}} #else -{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5} +{{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5}} #endif #endif , #if 0 #elif RADIX == 16 -{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7} +{{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7}} #elif RADIX == 32 -{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b} +{{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5} +{{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5}} #else -{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f} +{{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe} +{{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe}} #elif RADIX == 32 -{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb} +{{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2} +{{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2}} #else -{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7} +{{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330} +{{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330}} #elif RADIX == 32 -{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339} +{{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5} +{{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5}} #else -{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85} +{{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x19da, 0x19cd, 0x19e2, 0x5ea, 0x1079, 0x11ba, 0x1f5e, 0x228, 0x1a45, 0x16ee, 0x18a1, 0x11eb, 0x127a, 0x1d6f, 0x106f, 0x118f, 0x1d0c, 0x1571, 0x1b2d, 0xb60, 0xb27, 0xe1f, 0xe58, 0xe01, 0x4f4, 0x183, 0x13a9, 0x1584, 0x5cb, 0xcce, 0x1ce7, 0x4da, 0x1e62, 0x1213, 0x7fe, 0x1e6, 0x17d, 0x350, 0x3a0} +{{0x19da, 0x19cd, 0x19e2, 0x5ea, 0x1079, 0x11ba, 0x1f5e, 0x228, 0x1a45, 0x16ee, 0x18a1, 0x11eb, 0x127a, 0x1d6f, 0x106f, 0x118f, 0x1d0c, 0x1571, 0x1b2d, 0xb60, 0xb27, 0xe1f, 0xe58, 0xe01, 0x4f4, 0x183, 0x13a9, 0x1584, 0x5cb, 0xcce, 0x1ce7, 0x4da, 0x1e62, 0x1213, 0x7fe, 0x1e6, 0x17d, 0x350, 0x3a0}} #elif RADIX == 32 -{0x1ced44bf, 0x159e2ce6, 0xea0f25e, 0x1147d7a3, 0x16eed228, 0xa3d78a1, 0x17f5be4f, 0x10c8c7c1, 0x165b571e, 0x1ac9d6c1, 0x172c387, 0x1064f470, 0x16127521, 0x1667172e, 0x44dae73, 0x1fa427e6, 0xbe8798f, 0x800} +{{0x1ced44bf, 0x159e2ce6, 0xea0f25e, 0x1147d7a3, 0x16eed228, 0xa3d78a1, 0x17f5be4f, 0x10c8c7c1, 0x165b571e, 0x1ac9d6c1, 0x172c387, 0x1064f470, 0x16127521, 0x1667172e, 0x44dae73, 0x1fa427e6, 0xbe8798f, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf25eacf167373b51, 0xbb48a228faf46ea0, 0x7f5be4f51ebc50db, 0xd96d5c7a1918f83, 0x8e0172c387d64eb6, 0x8b975849d4860c9e, 0x484fcc44dae73b33, 0x50d402fa1e63ff} +{{0xf25eacf167373b51, 0xbb48a228faf46ea0, 0x7f5be4f51ebc50db, 0xd96d5c7a1918f83, 0x8e0172c387d64eb6, 0x8b975849d4860c9e, 0x484fcc44dae73b33, 0x50d402fa1e63ff}} #else -{0xbd59e2ce6e76a2, 0xa228faf46ea0f2, 0x7a8f5e286ddda4, 0x1e86463e0dfd6f9, 0xfac9d6c1b2dab8, 0x60c9e8e0172c38, 0x1d99c5cbac24ea4, 0x1fd213f31136b9c, 0xa1a805f43cc7} +{{0xbd59e2ce6e76a2, 0xa228faf46ea0f2, 0x7a8f5e286ddda4, 0x1e86463e0dfd6f9, 0xfac9d6c1b2dab8, 0x60c9e8e0172c38, 0x1d99c5cbac24ea4, 0x1fd213f31136b9c, 0xa1a805f43cc7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1dea, 0x1bbc, 0x9b0, 0x1066, 0x10fb, 0x1fe8, 0x1bca, 0x34d, 0x275, 0x42a, 0xc7b, 0x6e8, 0x1f5c, 0x12e5, 0x155d, 0x4f2, 0x1422, 0xfce, 0x603, 0x17a8, 0xd9f, 0x182d, 0x9fe, 0x3b1, 0x342, 0x1c21, 0x1aff, 0x1e38, 0x1ac8, 0x1c98, 0x51f, 0x897, 0xe23, 0x17e7, 0xced, 0x1e6, 0x125a, 0x18f3, 0x1b8} +{{0x1dea, 0x1bbc, 0x9b0, 0x1066, 0x10fb, 0x1fe8, 0x1bca, 0x34d, 0x275, 0x42a, 0xc7b, 0x6e8, 0x1f5c, 0x12e5, 0x155d, 0x4f2, 0x1422, 0xfce, 0x603, 0x17a8, 0xd9f, 0x182d, 0x9fe, 0x3b1, 0x342, 0x1c21, 0x1aff, 0x1e38, 0x1ac8, 0x1c98, 0x51f, 0x897, 0xe23, 0x17e7, 0xced, 0x1e6, 0x125a, 0x18f3, 0x1b8}} #elif RADIX == 32 -{0xef520a6, 0xc9b0dde, 0x1a21f706, 0x1a6ef2bf, 0x42a13a8, 0x10dd0c7b, 0xecb97eb, 0x2227955, 0xc06fcea, 0xb67ef50, 0x114ff60b, 0x423421d, 0x18e35ffc, 0x1e4c6b23, 0x689728f, 0x1b6fcee2, 0x12d07999, 0x69c} +{{0xef520a6, 0xc9b0dde, 0x1a21f706, 0x1a6ef2bf, 0x42a13a8, 0x10dd0c7b, 0xecb97eb, 0x2227955, 0xc06fcea, 0xb67ef50, 0x114ff60b, 0x423421d, 0x18e35ffc, 0x1e4c6b23, 0x689728f, 0x1b6fcee2, 0x12d07999, 0x69c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf70664d86ef3bd48, 0xa84ea34dde57fa21, 0xecb97eb86e863d90, 0x8301bf3a8444f2aa, 0x43b14ff60b5b3f7a, 0x3591e38d7ff08468, 0xdf9dc4689728ff26, 0x463ce4b41e6676} +{{0xf70664d86ef3bd48, 0xa84ea34dde57fa21, 0xecb97eb86e863d90, 0x8301bf3a8444f2aa, 0x43b14ff60b5b3f7a, 0x3591e38d7ff08468, 0xdf9dc4689728ff26, 0x463ce4b41e6676}} #else -{0xcc9b0dde77a90, 0xa34dde57fa21f7, 0x15c37431ec85427, 0xa1113caabb2e5f, 0x16b67ef506037e7, 0x10846843b14ff60, 0x1f931ac8f1c6bff, 0x1db7e7711a25ca3, 0x8c79c9683ccc} +{{0xcc9b0dde77a90, 0xa34dde57fa21f7, 0x15c37431ec85427, 0xa1113caabb2e5f, 0x16b67ef506037e7, 0x10846843b14ff60, 0x1f931ac8f1c6bff, 0x1db7e7711a25ca3, 0x8c79c9683ccc}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -480,261 +480,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x4d6, 0x18fd, 0x18c0, 0x13c1, 0x1718, 0x122c, 0x830, 0xa02, 0xee0, 0x1ad1, 0x1485, 0x27f, 0x13e5, 0x118f, 0xdce, 0x31c, 0x1b49, 0x998, 0x117, 0x343, 0xdae, 0x1fe7, 0x16a0, 0x1c43, 0x172, 0xa6e, 0x702, 0xeb8, 0x835, 0x1cf, 0x48d, 0x4e4, 0x13eb, 0x57d, 0xccf, 0x97e, 0x3b6, 0x910, 0x2dd} +{{0x4d6, 0x18fd, 0x18c0, 0x13c1, 0x1718, 0x122c, 0x830, 0xa02, 0xee0, 0x1ad1, 0x1485, 0x27f, 0x13e5, 0x118f, 0xdce, 0x31c, 0x1b49, 0x998, 0x117, 0x343, 0xdae, 0x1fe7, 0x16a0, 0x1c43, 0x172, 0xa6e, 0x702, 0xeb8, 0x835, 0x1cf, 0x48d, 0x4e4, 0x13eb, 0x57d, 0xccf, 0x97e, 0x3b6, 0x910, 0x2dd}} #elif RADIX == 32 -{0x126b3651, 0x38c0c7e, 0xb2e313c, 0x10120c24, 0x1ad17702, 0x144ff485, 0x7463e7c, 0x14918e37, 0x22e998d, 0x1b6b8686, 0x3b507f9, 0xdc172e2, 0x1ae0e04a, 0x10e7a0d5, 0x164e4246, 0x13cafb3e, 0x1db25f99, 0x300} +{{0x126b3651, 0x38c0c7e, 0xb2e313c, 0x10120c24, 0x1ad17702, 0x144ff485, 0x7463e7c, 0x14918e37, 0x22e998d, 0x1b6b8686, 0x3b507f9, 0xdc172e2, 0x1ae0e04a, 0x10e7a0d5, 0x164e4246, 0x13cafb3e, 0x1db25f99, 0x300}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x313c1c6063f49acd, 0x45dc0a0241848b2e, 0x7463e7ca27fa42eb, 0x308ba66369231c6e, 0x5c43b507f9db5c34, 0xd06aeb838129b82e, 0x95f67d64e4246873, 0xfa44076c97e667} +{{0x313c1c6063f49acd, 0x45dc0a0241848b2e, 0x7463e7ca27fa42eb, 0x308ba66369231c6e, 0x5c43b507f9db5c34, 0xd06aeb838129b82e, 0x95f67d64e4246873, 0xfa44076c97e667}} #else -{0x7838c0c7e9359b, 0xa0241848b2e31, 0x1e513fd2175a2ee, 0xda48c71b9d18f9, 0x13b6b86861174cc, 0x9b82e5c43b507f, 0x1439e83575c1c09, 0x19e57d9f5939091, 0x44880ed92fcc} +{{0x7838c0c7e9359b, 0xa0241848b2e31, 0x1e513fd2175a2ee, 0xda48c71b9d18f9, 0x13b6b86861174cc, 0x9b82e5c43b507f, 0x1439e83575c1c09, 0x19e57d9f5939091, 0x44880ed92fcc}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x937, 0x63f, 0xe30, 0x4f0, 0x5c6, 0x48b, 0x120c, 0x280, 0xbb8, 0xeb4, 0x1d21, 0x89f, 0x1cf9, 0x1463, 0x373, 0x8c7, 0x6d2, 0x1a66, 0x1845, 0x10d0, 0x1b6b, 0x7f9, 0x1da8, 0x1710, 0x105c, 0x129b, 0x1c0, 0xbae, 0x1a0d, 0x873, 0x123, 0x1939, 0xcfa, 0x195f, 0x1333, 0x125f, 0xed, 0xa44, 0x697} +{{0x937, 0x63f, 0xe30, 0x4f0, 0x5c6, 0x48b, 0x120c, 0x280, 0xbb8, 0xeb4, 0x1d21, 0x89f, 0x1cf9, 0x1463, 0x373, 0x8c7, 0x6d2, 0x1a66, 0x1845, 0x10d0, 0x1b6b, 0x7f9, 0x1da8, 0x1710, 0x105c, 0x129b, 0x1c0, 0xbae, 0x1a0d, 0x873, 0x123, 0x1939, 0xcfa, 0x195f, 0x1333, 0x125f, 0xed, 0xa44, 0x697}} #elif RADIX == 32 -{0x149bfcfc, 0xe3031f, 0x2cb8c4f, 0x14048309, 0xeb45dc0, 0x513fd21, 0x19d18f9f, 0xd24638d, 0x108ba663, 0xedae1a1, 0x10ed41fe, 0x13705cb8, 0xeb83812, 0x1439e835, 0x15939091, 0xcf2becf, 0x76c97e6, 0x820} +{{0x149bfcfc, 0xe3031f, 0x2cb8c4f, 0x14048309, 0xeb45dc0, 0x513fd21, 0x19d18f9f, 0xd24638d, 0x108ba663, 0xedae1a1, 0x10ed41fe, 0x13705cb8, 0xeb83812, 0x1439e835, 0x15939091, 0xcf2becf, 0x76c97e6, 0x820}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8c4f071818fd26ff, 0xd1770280906122cb, 0x9d18f9f289fe90ba, 0xc22e998da48c71b, 0x9710ed41fe76d70d, 0xf41abae0e04a6e0b, 0xe57d9f5939091a1c, 0x6a9101db25f999} +{{0x8c4f071818fd26ff, 0xd1770280906122cb, 0x9d18f9f289fe90ba, 0xc22e998da48c71b, 0x9710ed41fe76d70d, 0xf41abae0e04a6e0b, 0xe57d9f5939091a1c, 0x6a9101db25f999}} #else -{0x9e0e3031fa4dfe, 0x10280906122cb8c, 0xf944ff485d68bb, 0x369231c6e7463e, 0x1cedae1a1845d33, 0xa6e0b9710ed41f, 0xd0e7a0d5d70702, 0x6795f67d64e424, 0xd52203b64bf3} +{{0x9e0e3031fa4dfe, 0x10280906122cb8c, 0xf944ff485d68bb, 0x369231c6e7463e, 0x1cedae1a1845d33, 0xa6e0b9710ed41f, 0xd0e7a0d5d70702, 0x6795f67d64e424, 0xd52203b64bf3}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1863, 0x635, 0x19a9, 0x17fc, 0xdfe, 0x1784, 0x150b, 0x16c3, 0x15c0, 0x1f5f, 0x11d9, 0x1064, 0x1893, 0x1829, 0x211, 0x1a9e, 0x2e1, 0x3cc, 0x1e64, 0x12ed, 0x1c2c, 0x18b9, 0x121d, 0x234, 0xec9, 0x14dc, 0x4b6, 0xaad, 0x19f6, 0x805, 0x1984, 0x1843, 0xfca, 0x1a7a, 0xe04, 0x4af, 0x881, 0x65b, 0x421} +{{0x1863, 0x635, 0x19a9, 0x17fc, 0xdfe, 0x1784, 0x150b, 0x16c3, 0x15c0, 0x1f5f, 0x11d9, 0x1064, 0x1893, 0x1829, 0x211, 0x1a9e, 0x2e1, 0x3cc, 0x1e64, 0x12ed, 0x1c2c, 0x18b9, 0x121d, 0x234, 0xec9, 0x14dc, 0x4b6, 0xaad, 0x19f6, 0x805, 0x1984, 0x1843, 0xfca, 0x1a7a, 0xe04, 0x4af, 0x881, 0x65b, 0x421}} #elif RADIX == 32 -{0x1c31ce4f, 0x199a931a, 0x11bfd7f, 0x161d42ef, 0x1f5fae05, 0xe0c91d9, 0x8e0a712, 0xe1d4f08, 0x1cc83cc1, 0xf0b25db, 0x1490ee2e, 0x1b8ec911, 0xab496d4, 0x402e7d9, 0x15843cc2, 0x134f4fc, 0x4092bdc, 0x85a} +{{0x1c31ce4f, 0x199a931a, 0x11bfd7f, 0x161d42ef, 0x1f5fae05, 0xe0c91d9, 0x8e0a712, 0xe1d4f08, 0x1cc83cc1, 0xf0b25db, 0x1490ee2e, 0x1b8ec911, 0xab496d4, 0x402e7d9, 0x15843cc2, 0x134f4fc, 0x4092bdc, 0x85a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xfd7fccd498d70c73, 0x7eb816c3a85de11b, 0x8e0a71270648ecfd, 0xdf320f305c3a9e10, 0x223490ee2e78592e, 0x73ecaad25b5371d9, 0x69e9f95843cc2201, 0xf996d1024af702} +{{0xfd7fccd498d70c73, 0x7eb816c3a85de11b, 0x8e0a71270648ecfd, 0xdf320f305c3a9e10, 0x223490ee2e78592e, 0x73ecaad25b5371d9, 0x69e9f95843cc2201, 0xf996d1024af702}} #else -{0xff99a931ae18e7, 0x16c3a85de11bfd, 0x938324767ebf5c, 0x170ea78423829c, 0x1cf0b25dbe641e6, 0x1371d9223490ee2, 0x1100b9f655692da, 0x9a7a7e5610f30, 0x432da20495ee} +{{0xff99a931ae18e7, 0x16c3a85de11bfd, 0x938324767ebf5c, 0x170ea78423829c, 0x1cf0b25dbe641e6, 0x1371d9223490ee2, 0x1100b9f655692da, 0x9a7a7e5610f30, 0x432da20495ee}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1a7, 0x175b, 0x9bd, 0xb94, 0x1a66, 0x1d52, 0x1eb3, 0x1431, 0x9e7, 0x1b9d, 0x75f, 0xcba, 0x17e9, 0xe1d, 0xdb, 0xc7b, 0x76, 0xa04, 0xd73, 0x3f7, 0x17dd, 0x1555, 0x5d6, 0x16ee, 0x1df6, 0x1429, 0x15cb, 0x140b, 0x1aeb, 0x14fb, 0x1984, 0x179b, 0x1ba1, 0x125e, 0xb62, 0x249, 0x95a, 0x137a, 0x7c} +{{0x1a7, 0x175b, 0x9bd, 0xb94, 0x1a66, 0x1d52, 0x1eb3, 0x1431, 0x9e7, 0x1b9d, 0x75f, 0xcba, 0x17e9, 0xe1d, 0xdb, 0xc7b, 0x76, 0xa04, 0xd73, 0x3f7, 0x17dd, 0x1555, 0x5d6, 0x16ee, 0x1df6, 0x1429, 0x15cb, 0x140b, 0x1aeb, 0x14fb, 0x1984, 0x179b, 0x1ba1, 0x125e, 0xb62, 0x249, 0x95a, 0x137a, 0x7c}} #elif RADIX == 32 -{0x10d3893a, 0x89bdbad, 0x14b4ccb9, 0x18facfa, 0x1b9d4f3d, 0x597475f, 0xdb876fd, 0x7663d83, 0x1ae6a040, 0xdf747ee, 0xe2eb555, 0x53df6b7, 0x102eb974, 0xa7debae, 0x379bcc2, 0x18a4bdba, 0xad09256, 0xcd2} +{{0x10d3893a, 0x89bdbad, 0x14b4ccb9, 0x18facfa, 0x1b9d4f3d, 0x597475f, 0xdb876fd, 0x7663d83, 0x1ae6a040, 0xdf747ee, 0xe2eb555, 0x53df6b7, 0x102eb974, 0xa7debae, 0x379bcc2, 0x18a4bdba, 0xad09256, 0xcd2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xccb944dedd6c34e2, 0x753cf431f59f54b4, 0xdb876fd2cba3afee, 0x76b9a8100ecc7b06, 0xd6ee2eb5556fba3f, 0xf5d740bae5d0a7be, 0x497b74379bcc253e, 0x84de92b42495b1} +{{0xccb944dedd6c34e2, 0x753cf431f59f54b4, 0xdb876fd2cba3afee, 0x76b9a8100ecc7b06, 0xd6ee2eb5556fba3f, 0xf5d740bae5d0a7be, 0x497b74379bcc253e, 0x84de92b42495b1}} #else -{0x17289bdbad869c4, 0xf431f59f54b4cc, 0x1e965d1d7f73a9e, 0x3b31ec1b6e1db, 0xadf747eed73502, 0x10a7bed6ee2eb55, 0x129f7aeba05d72e, 0xc525edd0de6f30, 0x109bd2568492b} +{{0x17289bdbad869c4, 0xf431f59f54b4cc, 0x1e965d1d7f73a9e, 0x3b31ec1b6e1db, 0xadf747eed73502, 0x10a7bed6ee2eb55, 0x129f7aeba05d72e, 0xc525edd0de6f30, 0x109bd2568492b}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1d6a, 0x5b, 0x24a, 0x1bfc, 0x1cef, 0xc7e, 0x1cac, 0x1e4, 0x68, 0x16da, 0x30d, 0x13a5, 0x505, 0x329, 0x9f4, 0x1dae, 0x371, 0x111b, 0x200, 0x1b69, 0x1e51, 0x3b7, 0x316, 0x509, 0x1af2, 0x1220, 0x8c2, 0x195a, 0x1050, 0x1b7a, 0xd8b, 0x1a21, 0x336, 0x14fa, 0x1a4b, 0x11d, 0x167d, 0x1501, 0x302} +{{0x1d6a, 0x5b, 0x24a, 0x1bfc, 0x1cef, 0xc7e, 0x1cac, 0x1e4, 0x68, 0x16da, 0x30d, 0x13a5, 0x505, 0x329, 0x9f4, 0x1dae, 0x371, 0x111b, 0x200, 0x1b69, 0x1e51, 0x3b7, 0x316, 0x509, 0x1af2, 0x1220, 0x8c2, 0x195a, 0x1050, 0x1b7a, 0xd8b, 0x1a21, 0x336, 0x14fa, 0x1a4b, 0x11d, 0x167d, 0x1501, 0x302}} #elif RADIX == 32 -{0x1eb53915, 0x1824a02d, 0x1fb9dfbf, 0xf272b18, 0x16da0340, 0x1674a30d, 0x1a0ca4a0, 0x171ed727, 0x40111b1, 0x1f9476d2, 0x918b0ed, 0x41af228, 0x5691852, 0x1dbd4143, 0xda216c5, 0x12e9f433, 0x13e84774, 0xc8d} +{{0x1eb53915, 0x1824a02d, 0x1fb9dfbf, 0xf272b18, 0x16da0340, 0x1674a30d, 0x1a0ca4a0, 0x171ed727, 0x40111b1, 0x1f9476d2, 0x918b0ed, 0x41af228, 0x5691852, 0x1dbd4143, 0xda216c5, 0x12e9f433, 0x13e84774, 0xc8d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xdfbfc125016fad4e, 0x680d01e4e5631fb9, 0xa0ca4a0b3a5186db, 0x9100446c6e3dae4f, 0x450918b0edfca3b6, 0xa0a195a46148835e, 0xd3e866da216c5ede, 0x75406cfa11dd25} +{{0xdfbfc125016fad4e, 0x680d01e4e5631fb9, 0xa0ca4a0b3a5186db, 0x9100446c6e3dae4f, 0x450918b0edfca3b6, 0xa0a195a46148835e, 0xd3e866da216c5ede, 0x75406cfa11dd25}} #else -{0x17f824a02df5a9c, 0x101e4e5631fb9df, 0x1059d28c36db406, 0x11b8f6b93e83292, 0x1bf9476d220088d, 0x8835e450918b0e, 0xf6f5050cad230a, 0x974fa19b6885b1, 0xea80d9f423ba} +{{0x17f824a02df5a9c, 0x101e4e5631fb9df, 0x1059d28c36db406, 0x11b8f6b93e83292, 0x1bf9476d220088d, 0x8835e450918b0e, 0xf6f5050cad230a, 0x974fa19b6885b1, 0xea80d9f423ba}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1e9d, 0xbb9, 0x14f9, 0xc51, 0x1731, 0x122e, 0x1901, 0x59a, 0xcc1, 0xb65, 0xc68, 0x1eaf, 0x1f48, 0x1e46, 0xe46, 0x9c1, 0x1013, 0x12f8, 0x18a, 0x177f, 0x1e19, 0x1cca, 0x257, 0x18b9, 0xa38, 0x184b, 0x15a4, 0x86d, 0xa8c, 0x1df5, 0xf2, 0x37, 0x5d9, 0x292, 0x11ae, 0x9e, 0x1fce, 0x7f4, 0x407} +{{0x1e9d, 0xbb9, 0x14f9, 0xc51, 0x1731, 0x122e, 0x1901, 0x59a, 0xcc1, 0xb65, 0xc68, 0x1eaf, 0x1f48, 0x1e46, 0xe46, 0x9c1, 0x1013, 0x12f8, 0x18a, 0x177f, 0x1e19, 0x1cca, 0x257, 0x18b9, 0xa38, 0x184b, 0x15a4, 0x86d, 0xa8c, 0x1df5, 0xf2, 0x37, 0x5d9, 0x292, 0x11ae, 0x9e, 0x1fce, 0x7f4, 0x407}} #elif RADIX == 32 -{0x1f4ecc63, 0x34f95dc, 0xbae62c5, 0xcd64064, 0xb656609, 0x3d5ec68, 0x3791be9, 0x134e0b9, 0x3152f88, 0x17866efe, 0x1912bf32, 0x96a38c5, 0x1b6b498, 0xefaaa31, 0x12037079, 0xb85245d, 0x1e7027a3, 0x727} +{{0x1f4ecc63, 0x34f95dc, 0xbae62c5, 0xcd64064, 0xb656609, 0x3d5ec68, 0x3791be9, 0x134e0b9, 0x3152f88, 0x17866efe, 0x1912bf32, 0x96a38c5, 0x1b6b498, 0xefaaa31, 0x12037079, 0xb85245d, 0x1e7027a3, 0x727}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x62c51a7caee7d3b3, 0x9598259ac80c8bae, 0x3791be91eaf6342d, 0xf0c54be20269c172, 0x18b912bf32bc3377, 0x551886dad2612d47, 0xa48bb203707977d, 0x29fd3f9c09e8d7} +{{0x62c51a7caee7d3b3, 0x9598259ac80c8bae, 0x3791be91eaf6342d, 0xf0c54be20269c172, 0x18b912bf32bc3377, 0x551886dad2612d47, 0xa48bb203707977d, 0x29fd3f9c09e8d7}} #else -{0x18a34f95dcfa766, 0x259ac80c8bae62, 0x148f57b1a16cacc, 0x809a705c8de46f, 0x57866efe18a97c, 0x12d4718b912bf3, 0xbbeaa8c436d693, 0x15c2922ec80dc1e, 0x53fa7f3813d1} +{{0x18a34f95dcfa766, 0x259ac80c8bae62, 0x148f57b1a16cacc, 0x809a705c8de46f, 0x57866efe18a97c, 0x12d4718b912bf3, 0xbbeaa8c436d693, 0x15c2922ec80dc1e, 0x53fa7f3813d1}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x177, 0xf70, 0x25, 0x503, 0x1f96, 0x1abd, 0x6f5, 0x115b, 0xa68, 0x1192, 0x338, 0x1bae, 0x15af, 0x1570, 0xb79, 0x1c9a, 0xe78, 0x19de, 0x860, 0x1076, 0x1a63, 0x1d52, 0x1511, 0x10c5, 0x1fdf, 0xab1, 0x1454, 0x2c4, 0x292, 0x1135, 0x273, 0x1d, 0xefa, 0x47, 0x344, 0x226, 0x9c1, 0x1af, 0x639} +{{0x177, 0xf70, 0x25, 0x503, 0x1f96, 0x1abd, 0x6f5, 0x115b, 0xa68, 0x1192, 0x338, 0x1bae, 0x15af, 0x1570, 0xb79, 0x1c9a, 0xe78, 0x19de, 0x860, 0x1076, 0x1a63, 0x1d52, 0x1511, 0x10c5, 0x1fdf, 0xab1, 0x1454, 0x2c4, 0x292, 0x1135, 0x273, 0x1d, 0xefa, 0x47, 0x344, 0x226, 0x9c1, 0x1af, 0x639}} #elif RADIX == 32 -{0xbbf600, 0x60257b8, 0xf7f2c50, 0xad9bd75, 0x11925344, 0x1f75c338, 0x1cd5c2b5, 0x78e4d2d, 0x10c19de7, 0x1698e0ec, 0x5a88f54, 0x163fdf86, 0xb128a8a, 0x189a8a48, 0x1401d139, 0x11008eef, 0xe088986, 0xd7a} +{{0xbbf600, 0x60257b8, 0xf7f2c50, 0xad9bd75, 0x11925344, 0x1f75c338, 0x1cd5c2b5, 0x78e4d2d, 0x10c19de7, 0x1698e0ec, 0x5a88f54, 0x163fdf86, 0xb128a8a, 0x189a8a48, 0x1401d139, 0x11008eef, 0xe088986, 0xd7a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2c503012bdc02efd, 0x494d115b37aeaf7f, 0xcd5c2b5fbae19c46, 0x64306779cf1c9a5b, 0xf0c5a88f54b4c707, 0x45242c4a2a2ac7fb, 0x11ddf401d139c4d, 0xd86bd3822261a2} +{{0x2c503012bdc02efd, 0x494d115b37aeaf7f, 0xcd5c2b5fbae19c46, 0x64306779cf1c9a5b, 0xf0c5a88f54b4c707, 0x45242c4a2a2ac7fb, 0x11ddf401d139c4d, 0xd86bd3822261a2}} #else -{0xa060257b805dfb, 0x1115b37aeaf7f2c, 0x1afdd70ce2324a6, 0x73c72696f3570a, 0x9698e0ec860cef, 0xac7fbf0c5a88f5, 0xe26a2921625151, 0x8804777d00744e, 0xd7a70444c3} +{{0xa060257b805dfb, 0x1115b37aeaf7f2c, 0x1afdd70ce2324a6, 0x73c72696f3570a, 0x9698e0ec860cef, 0xac7fbf0c5a88f5, 0xe26a2921625151, 0x8804777d00744e, 0xd7a70444c3}} #endif #endif , #if 0 #elif RADIX == 16 -{0x153b, 0x598, 0x100c, 0x1537, 0x1eda, 0x190b, 0x1406, 0x186e, 0x457, 0x469, 0x14a0, 0x1ce0, 0x1f6d, 0xf2f, 0x1837, 0x616, 0x16d0, 0xf35, 0x192b, 0x106, 0x17d6, 0x6b3, 0x169e, 0x27a, 0xe54, 0xa42, 0x1694, 0x16c3, 0x7b, 0x298, 0x118, 0xb0, 0x893, 0xbca, 0x1678, 0x19de, 0xb59, 0x3a, 0x43} +{{0x153b, 0x598, 0x100c, 0x1537, 0x1eda, 0x190b, 0x1406, 0x186e, 0x457, 0x469, 0x14a0, 0x1ce0, 0x1f6d, 0xf2f, 0x1837, 0x616, 0x16d0, 0xf35, 0x192b, 0x106, 0x17d6, 0x6b3, 0x169e, 0x27a, 0xe54, 0xa42, 0x1694, 0x16c3, 0x7b, 0x298, 0x118, 0xb0, 0x893, 0xbca, 0x1678, 0x19de, 0xb59, 0x3a, 0x43}} #elif RADIX == 32 -{0xa9d84f6, 0xf00c2cc, 0x2fdb553, 0x37501b2, 0x46922be, 0x179c14a0, 0x1bbcbfed, 0xd030b60, 0x1256f35b, 0x1df5820d, 0x1ab4f1ac, 0x84e5413, 0x1b0ed28a, 0x14c01ee, 0x60b008c, 0x1e179489, 0x1ace77ac, 0x8d2} +{{0xa9d84f6, 0xf00c2cc, 0x2fdb553, 0x37501b2, 0x46922be, 0x179c14a0, 0x1bbcbfed, 0xd030b60, 0x1256f35b, 0x1df5820d, 0x1ab4f1ac, 0x84e5413, 0x1b0ed28a, 0x14c01ee, 0x60b008c, 0x1e179489, 0x1ace77ac, 0x8d2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xb55378061662a761, 0xa48af86ea03642fd, 0xbbcbfedbce0a5011, 0x6c95bcd6da0616c1, 0x827ab4f1acefac10, 0xf76c3b4a2909ca, 0x2f291260b008c0a6, 0x680e96b39deb3c} +{{0xb55378061662a761, 0xa48af86ea03642fd, 0xbbcbfedbce0a5011, 0x6c95bcd6da0616c1, 0x827ab4f1acefac10, 0xf76c3b4a2909ca, 0x2f291260b008c0a6, 0x680e96b39deb3c}} #else -{0xa6f00c2cc54ec2, 0xf86ea03642fdb5, 0x16de7052808d245, 0x1b68185b06ef2ff, 0x19df5820d92b79a, 0x909ca827ab4f1a, 0x53007bb61da51, 0xf0bca44982c023, 0xd01d2d673bd6} +{{0xa6f00c2cc54ec2, 0xf86ea03642fdb5, 0x16de7052808d245, 0x1b68185b06ef2ff, 0x19df5820d92b79a, 0x909ca827ab4f1a, 0x53007bb61da51, 0xf0bca44982c023, 0xd01d2d673bd6}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -956,261 +956,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x185f, 0xecc, 0x21c, 0xa62, 0x77, 0x8b1, 0x1188, 0xec2, 0xda1, 0xbf0, 0xb11, 0x82d, 0x1fe9, 0x10d4, 0x1e36, 0x194e, 0x77e, 0x1112, 0x14c0, 0x1dcd, 0x1be7, 0x1a4c, 0x140e, 0x10c5, 0x1aaf, 0x236, 0xdf3, 0x1a21, 0x1dff, 0x15bb, 0xda8, 0x30e, 0x17b0, 0xc7b, 0xd1, 0xcb, 0x868, 0x2e5, 0x5a} +{{0x185f, 0xecc, 0x21c, 0xa62, 0x77, 0x8b1, 0x1188, 0xec2, 0xda1, 0xbf0, 0xb11, 0x82d, 0x1fe9, 0x10d4, 0x1e36, 0x194e, 0x77e, 0x1112, 0x14c0, 0x1dcd, 0x1be7, 0x1a4c, 0x140e, 0x10c5, 0x1aaf, 0x236, 0xdf3, 0x1a21, 0x1dff, 0x15bb, 0xda8, 0x30e, 0x17b0, 0xc7b, 0xd1, 0xcb, 0x868, 0x2e5, 0x5a}} #elif RADIX == 32 -{0xc2f86ac, 0x421c766, 0xc40eea6, 0x16146211, 0xbf06d0b, 0x505ab11, 0x1b4353fd, 0x17eca778, 0x9811123, 0x6f9fb9b, 0x5a07693, 0x6daaf86, 0x885be62, 0xaddf7ff, 0x30e6d4, 0x1458f77b, 0x34032c1, 0x52a} +{{0xc2f86ac, 0x421c766, 0xc40eea6, 0x16146211, 0xbf06d0b, 0x505ab11, 0x1b4353fd, 0x17eca778, 0x9811123, 0x6f9fb9b, 0x5a07693, 0x6daaf86, 0x885be62, 0xaddf7ff, 0x30e6d4, 0x1458f77b, 0x34032c1, 0x52a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xeea6210e3b330be1, 0xc1b42ec28c422c40, 0xb4353fd282d588af, 0xda604448efd94ef1, 0xf0c5a0769337cfdc, 0xfbffa216f988db55, 0xb1eef6030e6d456e, 0x120b950d00cb068} +{{0xeea6210e3b330be1, 0xc1b42ec28c422c40, 0xb4353fd282d588af, 0xda604448efd94ef1, 0xf0c5a0769337cfdc, 0xfbffa216f988db55, 0xb1eef6030e6d456e, 0x120b950d00cb068}} #else -{0x14c421c766617c3, 0x2ec28c422c40ee, 0x1e9416ac457e0da, 0x3bf653bc6d0d4f, 0x66f9fb9b4c0889, 0x8db55f0c5a0769, 0x2b77dffd10b7cc, 0x1a2c7bbd80c39b5, 0x9172a1a01960} +{{0x14c421c766617c3, 0x2ec28c422c40ee, 0x1e9416ac457e0da, 0x3bf653bc6d0d4f, 0x66f9fb9b4c0889, 0x8db55f0c5a0769, 0x2b77dffd10b7cc, 0x1a2c7bbd80c39b5, 0x9172a1a01960}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x61a, 0x3b3, 0x1087, 0x1a98, 0x81d, 0x22c, 0x1462, 0xbb0, 0x368, 0xafc, 0xac4, 0xa0b, 0x7fa, 0x1435, 0x178d, 0x1653, 0x11df, 0x444, 0xd30, 0x1f73, 0x6f9, 0x1693, 0xd03, 0x1c31, 0x16ab, 0x188d, 0xb7c, 0x1e88, 0x1f7f, 0x56e, 0x136a, 0xc3, 0x1dec, 0xb1e, 0x1834, 0x32, 0xa1a, 0x10b9, 0xe6} +{{0x61a, 0x3b3, 0x1087, 0x1a98, 0x81d, 0x22c, 0x1462, 0xbb0, 0x368, 0xafc, 0xac4, 0xa0b, 0x7fa, 0x1435, 0x178d, 0x1653, 0x11df, 0x444, 0xd30, 0x1f73, 0x6f9, 0x1693, 0xd03, 0x1c31, 0x16ab, 0x188d, 0xb7c, 0x1e88, 0x1f7f, 0x56e, 0x136a, 0xc3, 0x1dec, 0xb1e, 0x1834, 0x32, 0xa1a, 0x10b9, 0xe6}} #elif RADIX == 32 -{0x130d1113, 0x110871d9, 0xb103ba9, 0x1d851884, 0xafc1b42, 0x9416ac4, 0x6d0d4ff, 0x1dfb29de, 0x1a604448, 0x19be7ee6, 0x11681da4, 0x11b6abe1, 0x1a216f98, 0x2b77dff, 0x180c39b5, 0xd163dde, 0x10d00cb0, 0x54a} +{{0x130d1113, 0x110871d9, 0xb103ba9, 0x1d851884, 0xafc1b42, 0x9416ac4, 0x6d0d4ff, 0x1dfb29de, 0x1a604448, 0x19be7ee6, 0x11681da4, 0x11b6abe1, 0x1a216f98, 0x2b77dff, 0x180c39b5, 0xd163dde, 0x10d00cb0, 0x54a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3ba988438eccc344, 0xf06d0bb0a3108b10, 0x6d0d4ff4a0b5622b, 0x369811123bf653bc, 0x7c31681da4cdf3f7, 0xbeffe885be6236d5, 0x2c7bbd80c39b515b, 0x742e5434032c1a} +{{0x3ba988438eccc344, 0xf06d0bb0a3108b10, 0x6d0d4ff4a0b5622b, 0x369811123bf653bc, 0x7c31681da4cdf3f7, 0xbeffe885be6236d5, 0x2c7bbd80c39b515b, 0x742e5434032c1a}} #else -{0x15310871d998688, 0x10bb0a3108b103b, 0x1fa505ab115f836, 0x8efd94ef1b4353, 0x99be7ee6d30222, 0x236d57c31681da, 0x8addf7ff442df3, 0x68b1eef6030e6d, 0xe85ca8680658} +{{0x15310871d998688, 0x10bb0a3108b103b, 0x1fa505ab115f836, 0x8efd94ef1b4353, 0x99be7ee6d30222, 0x236d57c31681da, 0x8addf7ff442df3, 0x68b1eef6030e6d, 0xe85ca8680658}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xa5a, 0x2ab, 0x659, 0x149f, 0xf1b, 0xa1a, 0xb05, 0x1915, 0x1aa8, 0x1aa0, 0x1c4d, 0xe2f, 0xe1c, 0x19ab, 0x1d34, 0xa8f, 0xf59, 0x1f1, 0xc6d, 0x520, 0xb6e, 0x127f, 0x5dd, 0x175a, 0x1957, 0x1ca4, 0x1563, 0x122f, 0x705, 0xcd6, 0x1c02, 0xdc1, 0x93b, 0x387, 0x1870, 0x54, 0x853, 0x1adc, 0x6bc} +{{0xa5a, 0x2ab, 0x659, 0x149f, 0xf1b, 0xa1a, 0xb05, 0x1915, 0x1aa8, 0x1aa0, 0x1c4d, 0xe2f, 0xe1c, 0x19ab, 0x1d34, 0xa8f, 0xf59, 0x1f1, 0xc6d, 0x520, 0xb6e, 0x127f, 0x5dd, 0x175a, 0x1957, 0x1ca4, 0x1563, 0x122f, 0x705, 0xcd6, 0x1c02, 0xdc1, 0x93b, 0x387, 0x1870, 0x54, 0x853, 0x1adc, 0x6bc}} #elif RADIX == 32 -{0x152d7fc4, 0x1e659155, 0x69e3749, 0x8aac154, 0x1aa0d546, 0x11c5fc4d, 0x1a66adc3, 0x159547f4, 0x18da1f17, 0x1adb8a40, 0x1a2eec9f, 0x149957ba, 0x8beac7c, 0x66b1c16, 0x16dc1e01, 0x1c070e93, 0x2981530, 0xe2} +{{0x152d7fc4, 0x1e659155, 0x69e3749, 0x8aac154, 0x1aa0d546, 0x11c5fc4d, 0x1a66adc3, 0x159547f4, 0x18da1f17, 0x1adb8a40, 0x1a2eec9f, 0x149957ba, 0x8beac7c, 0x66b1c16, 0x16dc1e01, 0x1c070e93, 0x2981530, 0xe2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3749f32c8aad4b5f, 0x83551915582a869e, 0xa66adc38e2fe26ea, 0x63687c5eb2a8fe9, 0xf75a2eec9fd6dc52, 0x8e0b22fab1f2932a, 0xe1d276dc1e01335, 0x196b710a6054c38} +{{0x3749f32c8aad4b5f, 0x83551915582a869e, 0xa66adc38e2fe26ea, 0x63687c5eb2a8fe9, 0xf75a2eec9fd6dc52, 0x8e0b22fab1f2932a, 0xe1d276dc1e01335, 0x196b710a6054c38}} #else -{0x93e659155a96bf, 0x11915582a869e37, 0x1c717f137541aa, 0x17acaa3fa699ab7, 0x1fadb8a40c6d0f8, 0x12932af75a2eec9, 0x99ac705917d58f, 0xe038749db70780, 0x17d6e214c0a98} +{{0x93e659155a96bf, 0x11915582a869e37, 0x1c717f137541aa, 0x17acaa3fa699ab7, 0x1fadb8a40c6d0f8, 0x12932af75a2eec9, 0x99ac705917d58f, 0xe038749db70780, 0x17d6e214c0a98}} #endif #endif , #if 0 #elif RADIX == 16 -{0x66e, 0xe79, 0xadd, 0x23, 0xf11, 0x7d6, 0x1091, 0x42a, 0x1885, 0x128, 0x6f9, 0xcdd, 0x1d55, 0x19bd, 0x116f, 0x1dbd, 0x107b, 0xaef, 0x8bc, 0xa74, 0x7b5, 0xdff, 0x743, 0x17e0, 0x453, 0x414, 0x672, 0xf28, 0x198a, 0x19c4, 0x1e85, 0xcb9, 0x17c2, 0x14c6, 0x1871, 0x1034, 0x6cb, 0x55b, 0xbf} +{{0x66e, 0xe79, 0xadd, 0x23, 0xf11, 0x7d6, 0x1091, 0x42a, 0x1885, 0x128, 0x6f9, 0xcdd, 0x1d55, 0x19bd, 0x116f, 0x1dbd, 0x107b, 0xaef, 0x8bc, 0xa74, 0x7b5, 0xdff, 0x743, 0x17e0, 0x453, 0x414, 0x672, 0xf28, 0x198a, 0x19c4, 0x1e85, 0xcb9, 0x17c2, 0x14c6, 0x1871, 0x1034, 0x6cb, 0x55b, 0xbf}} #elif RADIX == 32 -{0x13370e29, 0x6add73c, 0x159e2202, 0x154244f, 0x128c429, 0x159ba6f9, 0x17e6f7aa, 0x7bedec5, 0x1178aef8, 0x19ed54e8, 0x3a1b7f, 0x28453bf, 0x1ca0ce44, 0x1ce26629, 0x4cb9f42, 0x1c698d7c, 0x165c0d30, 0x159} +{{0x13370e29, 0x6add73c, 0x159e2202, 0x154244f, 0x128c429, 0x159ba6f9, 0x17e6f7aa, 0x7bedec5, 0x1178aef8, 0x19ed54e8, 0x3a1b7f, 0x28453bf, 0x1ca0ce44, 0x1ce26629, 0x4cb9f42, 0x1c698d7c, 0x165c0d30, 0x159}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2202356eb9e4cdc3, 0xa310a42a8489f59e, 0x7e6f7aaacdd37c84, 0x445e2bbe0f7dbd8b, 0x77e03a1b7fcf6aa7, 0x3314f2833910508a, 0xd31af84cb9f42e71, 0xe956cd97034c38} +{{0x2202356eb9e4cdc3, 0xa310a42a8489f59e, 0x7e6f7aaacdd37c84, 0x445e2bbe0f7dbd8b, 0x77e03a1b7fcf6aa7, 0x3314f2833910508a, 0xd31af84cb9f42e71, 0xe956cd97034c38}} #else -{0x46add73c99b87, 0xa42a8489f59e22, 0x15566e9be425188, 0x183df6f62df9bde, 0x1f9ed54e88bc577, 0x10508a77e03a1b7, 0x1738998a79419c8, 0xe34c6be132e7d0, 0x22ad9b2e0698} +{{0x46add73c99b87, 0xa42a8489f59e22, 0x15566e9be425188, 0x183df6f62df9bde, 0x1f9ed54e88bc577, 0x10508a77e03a1b7, 0x1738998a79419c8, 0xe34c6be132e7d0, 0x22ad9b2e0698}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x165f, 0x1e7c, 0xe41, 0x12eb, 0xa1, 0x1655, 0x6db, 0x1dfc, 0x4a, 0xac7, 0x1dcb, 0x3d9, 0x16a0, 0x562, 0x1d70, 0x528, 0xaa7, 0x172e, 0x36c, 0x728, 0x1e76, 0x23f, 0x6e6, 0x53e, 0x1640, 0x1a82, 0x1b78, 0x1066, 0x895, 0x17eb, 0x1713, 0x174d, 0x679, 0x1415, 0x19a8, 0xe7c, 0x674, 0x1f81, 0x15} +{{0x165f, 0x1e7c, 0xe41, 0x12eb, 0xa1, 0x1655, 0x6db, 0x1dfc, 0x4a, 0xac7, 0x1dcb, 0x3d9, 0x16a0, 0x562, 0x1d70, 0x528, 0xaa7, 0x172e, 0x36c, 0x728, 0x1e76, 0x23f, 0x6e6, 0x53e, 0x1640, 0x1a82, 0x1b78, 0x1066, 0x895, 0x17eb, 0x1713, 0x174d, 0x679, 0x1415, 0x19a8, 0xe7c, 0x674, 0x1f81, 0x15}} #elif RADIX == 32 -{0xb2f81a0, 0x16e41f3e, 0x1541432e, 0xfe1b6ec, 0xac70257, 0x7b3dcb, 0x18158ad4, 0xa729475, 0x6d972e5, 0x1f9d8e50, 0x1e37308f, 0x10564029, 0x19b6f1a, 0x1bf5a256, 0x1374db89, 0xa282a67, 0x13a39f33, 0xc09} +{{0xb2f81a0, 0x16e41f3e, 0x1541432e, 0xfe1b6ec, 0xac70257, 0x7b3dcb, 0x18158ad4, 0xa729475, 0x6d972e5, 0x1f9d8e50, 0x1e37308f, 0x10564029, 0x19b6f1a, 0x1bf5a256, 0x1374db89, 0xa282a67, 0x13a39f33, 0xc09}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x432eb720f9f2cbe0, 0x1c095dfc36dd9541, 0x8158ad403d9ee5ab, 0x81b65cb954e528eb, 0x53e37308ffcec72, 0xd12b066dbc6a0ac8, 0x5054cf374db89dfa, 0xafe04ce8e7ccd4} +{{0x432eb720f9f2cbe0, 0x1c095dfc36dd9541, 0x8158ad403d9ee5ab, 0x81b65cb954e528eb, 0x53e37308ffcec72, 0xd12b066dbc6a0ac8, 0x5054cf374db89dfa, 0xafe04ce8e7ccd4}} #else -{0x5d6e41f3e597c0, 0x15dfc36dd954143, 0xa01ecf72d58e04, 0x55394a3ae0562b, 0x1ff9d8e5036cb97, 0xa0ac8053e37308, 0xefd68958336de3, 0x15141533cdd36e2, 0x15fc099d1cf99} +{{0x5d6e41f3e597c0, 0x15dfc36dd954143, 0xa01ecf72d58e04, 0x55394a3ae0562b, 0x1ff9d8e5036cb97, 0xa0ac8053e37308, 0xefd68958336de3, 0x15141533cdd36e2, 0x15fc099d1cf99}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1e32, 0x1f7c, 0x1c05, 0x372, 0x34a, 0x1d26, 0x11b9, 0x294, 0xa87, 0x1835, 0x158f, 0x1d19, 0x13e8, 0x4dc, 0x1e1a, 0x195f, 0x116e, 0x62c, 0x1839, 0x107a, 0xa4f, 0x119f, 0x18f3, 0xc48, 0x1c7a, 0x100d, 0x2e9, 0x12df, 0xbec, 0x6f1, 0x8bf, 0xe24, 0xa57, 0x50c, 0x28b, 0x31e, 0x430, 0x1b08, 0x378} +{{0x1e32, 0x1f7c, 0x1c05, 0x372, 0x34a, 0x1d26, 0x11b9, 0x294, 0xa87, 0x1835, 0x158f, 0x1d19, 0x13e8, 0x4dc, 0x1e1a, 0x195f, 0x116e, 0x62c, 0x1839, 0x107a, 0xa4f, 0x119f, 0x18f3, 0xc48, 0x1c7a, 0x100d, 0x2e9, 0x12df, 0xbec, 0x6f1, 0x8bf, 0xe24, 0xa57, 0x50c, 0x28b, 0x31e, 0x430, 0x1b08, 0x378}} #elif RADIX == 32 -{0xf1941d7, 0x5c05fbe, 0x9869437, 0x14a46e7a, 0x18355438, 0x3a3358f, 0xd13727d, 0x16ecaff8, 0x107262c8, 0x1a93e0f5, 0x8c79c67, 0x1bc7a62, 0xb7c5d30, 0x1378afb2, 0xee2445f, 0x2ca18a5, 0x180c785, 0x1c1} +{{0xf1941d7, 0x5c05fbe, 0x9869437, 0x14a46e7a, 0x18355438, 0x3a3358f, 0xd13727d, 0x16ecaff8, 0x107262c8, 0x1a93e0f5, 0x8c79c67, 0x1bc7a62, 0xb7c5d30, 0x1378afb2, 0xee2445f, 0x2ca18a5, 0x180c785, 0x1c1}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x94372e02fdf3c650, 0xd550e2948dcf4986, 0xd13727d1d19ac7e0, 0xac1c98b22dd95ff0, 0x4c48c79c67d49f07, 0x57d92df174c0378f, 0x94314aee2445f9bc, 0xc6c2086031e145} +{{0x94372e02fdf3c650, 0xd550e2948dcf4986, 0xd13727d1d19ac7e0, 0xac1c98b22dd95ff0, 0x4c48c79c67d49f07, 0x57d92df174c0378f, 0x94314aee2445f9bc, 0xc6c2086031e145}} #else -{0x6e5c05fbe78ca0, 0xe2948dcf498694, 0x1e8e8cd63f06aa8, 0x8b7657fc344dc9, 0xfa93e0f5839316, 0x378f4c48c79c6, 0x1cde2bec96f8ba6, 0x11650c52bb89117, 0x18d8410c063c2} +{{0x6e5c05fbe78ca0, 0xe2948dcf498694, 0x1e8e8cd63f06aa8, 0x8b7657fc344dc9, 0xfa93e0f5839316, 0x378f4c48c79c6, 0x1cde2bec96f8ba6, 0x11650c52bb89117, 0x18d8410c063c2}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1044, 0x2d0, 0x1004, 0x1082, 0x535, 0x141a, 0x10a6, 0x1f9d, 0xc2d, 0x1347, 0xdf4, 0x1db1, 0x90e, 0x116d, 0x59c, 0xc2b, 0x7c2, 0x15d7, 0x119, 0x32c, 0x1e89, 0x1b01, 0xe5f, 0x105f, 0xd7d, 0xb4f, 0x1c33, 0x1b3b, 0xf2d, 0xc22, 0x11d8, 0x1848, 0x11a9, 0x1ee7, 0x6ea, 0x165d, 0x17d4, 0x77, 0x64b} +{{0x1044, 0x2d0, 0x1004, 0x1082, 0x535, 0x141a, 0x10a6, 0x1f9d, 0xc2d, 0x1347, 0xdf4, 0x1db1, 0x90e, 0x116d, 0x59c, 0xc2b, 0x7c2, 0x15d7, 0x119, 0x32c, 0x1e89, 0x1b01, 0xe5f, 0x105f, 0xd7d, 0xb4f, 0x1c33, 0x1b3b, 0xf2d, 0xc22, 0x11d8, 0x1848, 0x11a9, 0x1ee7, 0x6ea, 0x165d, 0x17d4, 0x77, 0x64b}} #elif RADIX == 32 -{0x8227755, 0x5004168, 0x68a6b08, 0x1cec29a8, 0x1347616f, 0x1bb62df4, 0xe45b521, 0x1c261596, 0x2335d73, 0xfa24658, 0x1f72fec0, 0x9ed7d82, 0xcef866b, 0x6113cb7, 0x138488ec, 0x1abdcf1a, 0x1ea5974d, 0x83d} +{{0x8227755, 0x5004168, 0x68a6b08, 0x1cec29a8, 0x1347616f, 0x1bb62df4, 0xe45b521, 0x1c261596, 0x2335d73, 0xfa24658, 0x1f72fec0, 0x9ed7d82, 0xcef866b, 0x6113cb7, 0x138488ec, 0x1abdcf1a, 0x1ea5974d, 0x83d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6b0828020b42089d, 0x1d85bf9d8535068a, 0xe45b521ddb16fa4d, 0xc08cd75cf84c2b2c, 0xb05f72fec07d1232, 0x9e5bb3be19ad3daf, 0x7b9e3538488ec308, 0x1681defa965d375} +{{0x6b0828020b42089d, 0x1d85bf9d8535068a, 0xe45b521ddb16fa4d, 0xc08cd75cf84c2b2c, 0xb05f72fec07d1232, 0x9e5bb3be19ad3daf, 0x7b9e3538488ec308, 0x1681defa965d375}} #else -{0x1050041684113b, 0x1bf9d8535068a6b, 0x10eed8b7d268ec2, 0x13e130acb3916d4, 0xfa24658119aeb, 0xd3dafb05f72fec, 0x1844f2dd9df0cd, 0x1d5ee78d4e1223b, 0x1203bdf52cba6} +{{0x1050041684113b, 0x1bf9d8535068a6b, 0x10eed8b7d268ec2, 0x13e130acb3916d4, 0xfa24658119aeb, 0xd3dafb05f72fec, 0x1844f2dd9df0cd, 0x1d5ee78d4e1223b, 0x1203bdf52cba6}} #endif #endif , #if 0 #elif RADIX == 16 -{0x7bc, 0x14d4, 0x1225, 0x1afb, 0x179e, 0x2c0, 0x1c0, 0x1267, 0x450, 0x1f26, 0x1e3f, 0x2bb, 0x19a5, 0x12f9, 0xa57, 0x2d, 0x1ed, 0xa16, 0x754, 0x1893, 0x759, 0x6bb, 0x618, 0x1379, 0xff3, 0x1989, 0x1abb, 0x1c40, 0x1bf5, 0x71e, 0xd6d, 0xc04, 0x15ef, 0x6aa, 0x4da, 0x1fb6, 0xb5b, 0x9f2, 0x211} +{{0x7bc, 0x14d4, 0x1225, 0x1afb, 0x179e, 0x2c0, 0x1c0, 0x1267, 0x450, 0x1f26, 0x1e3f, 0x2bb, 0x19a5, 0x12f9, 0xa57, 0x2d, 0x1ed, 0xa16, 0x754, 0x1893, 0x759, 0x6bb, 0x618, 0x1379, 0xff3, 0x1989, 0x1abb, 0x1c40, 0x1bf5, 0x71e, 0xd6d, 0xc04, 0x15ef, 0x6aa, 0x4da, 0x1fb6, 0xb5b, 0x9f2, 0x211}} #elif RADIX == 32 -{0x3de2735, 0x17225a6a, 0x102f3daf, 0x13387005, 0x1f262284, 0x14577e3f, 0xbcbe734, 0x1ed016a9, 0xea8a160, 0x19d67126, 0x1930c1ae, 0x112ff39b, 0x11035779, 0x138f6fd7, 0x1ec046b6, 0x168d555e, 0x1adfed89, 0x412} +{{0x3de2735, 0x17225a6a, 0x102f3daf, 0x13387005, 0x1f262284, 0x14577e3f, 0xbcbe734, 0x1ed016a9, 0xea8a160, 0x19d67126, 0x1930c1ae, 0x112ff39b, 0x11035779, 0x138f6fd7, 0x1ec046b6, 0x168d555e, 0x1adfed89, 0x412}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3dafb912d350f789, 0x988a12670e00b02f, 0xbcbe734a2bbf1ffc, 0x33aa28583da02d52, 0x737930c1aeceb389, 0xb7ebc40d5de625fe, 0x1aaabdec046b69c7, 0x15a7c96b7fb626d} +{{0x3dafb912d350f789, 0x988a12670e00b02f, 0xbcbe734a2bbf1ffc, 0x33aa28583da02d52, 0x737930c1aeceb389, 0xb7ebc40d5de625fe, 0x1aaabdec046b69c7, 0x15a7c96b7fb626d}} #else -{0x15f7225a6a1ef13, 0x12670e00b02f3d, 0x1a515df8ffe4c45, 0xf680b54af2f9c, 0x1d9d6712675450b, 0x625fe737930c1a, 0x14e3dbf5e206aef, 0x1b46aaaf7b011ad, 0x104f92d6ff6c4} +{{0x15f7225a6a1ef13, 0x12670e00b02f3d, 0x1a515df8ffe4c45, 0xf680b54af2f9c, 0x1d9d6712675450b, 0x625fe737930c1a, 0x14e3dbf5e206aef, 0x1b46aaaf7b011ad, 0x104f92d6ff6c4}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1432,261 +1432,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x14d5, 0x1163, 0xf47, 0x337, 0x71e, 0x4ad, 0x1071, 0x19d4, 0x11d9, 0xdce, 0x186a, 0x1785, 0x13c8, 0x695, 0xe1b, 0x54b, 0x174f, 0xd0c, 0x17cb, 0x17db, 0xb41, 0xb78, 0x1a46, 0x66f, 0x23, 0x836, 0x19ce, 0xcdf, 0x90b, 0x6fb, 0x1508, 0x1bb5, 0x6aa, 0x1219, 0x1bba, 0x1d67, 0x1e46, 0x8d8, 0x62c} +{{0x14d5, 0x1163, 0xf47, 0x337, 0x71e, 0x4ad, 0x1071, 0x19d4, 0x11d9, 0xdce, 0x186a, 0x1785, 0x13c8, 0x695, 0xe1b, 0x54b, 0x174f, 0xd0c, 0x17cb, 0x17db, 0xb41, 0xb78, 0x1a46, 0x66f, 0x23, 0x836, 0x19ce, 0xcdf, 0x90b, 0x6fb, 0x1508, 0x1bb5, 0x6aa, 0x1219, 0x1bba, 0x1d67, 0x1e46, 0x8d8, 0x62c}} #elif RADIX == 32 -{0x1a6af50e, 0xef478b1, 0xb4e3c33, 0xea41c49, 0xdce8ece, 0x2f0b86a, 0xd9a5679, 0x14f2a5b8, 0xf96d0cb, 0x2d06fb7, 0xfd232de, 0x6c02333, 0x137f39c8, 0x37da42d, 0x15bb5a84, 0xea4326a, 0x123759f7, 0x9c7} +{{0x1a6af50e, 0xef478b1, 0xb4e3c33, 0xea41c49, 0xdce8ece, 0x2f0b86a, 0xd9a5679, 0x14f2a5b8, 0xf96d0cb, 0x2d06fb7, 0xfd232de, 0x6c02333, 0x137f39c8, 0x37da42d, 0x15bb5a84, 0xea4326a, 0x123759f7, 0x9c7}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3c3377a3c58e9abd, 0x3a3b39d483892b4e, 0xd9a56791785c3537, 0xbbe5b432e9e54b70, 0x666fd232de16837d, 0xd216cdfce720d804, 0x4864d55bb5a841be, 0x72363c8dd67ddd} +{{0x3c3377a3c58e9abd, 0x3a3b39d483892b4e, 0xd9a56791785c3537, 0xbbe5b432e9e54b70, 0x666fd232de16837d, 0xd216cdfce720d804, 0x4864d55bb5a841be, 0x72363c8dd67ddd}} #else -{0x66ef478b1d357a, 0x139d483892b4e3c, 0x1c8bc2e1a9b9d1d, 0xba7952dc366959, 0x1c2d06fb77cb686, 0xd804666fd232d, 0xdf690b66fe739, 0x1752193556ed6a1, 0xe46c791bacfb} +{{0x66ef478b1d357a, 0x139d483892b4e3c, 0x1c8bc2e1a9b9d1d, 0xba7952dc366959, 0x1c2d06fb77cb686, 0xd804666fd232d, 0xdf690b66fe739, 0x1752193556ed6a1, 0xe46c791bacfb}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1d37, 0x1c58, 0x1bd1, 0x10cd, 0x9c7, 0x92b, 0x41c, 0xe75, 0x1476, 0x1373, 0xe1a, 0x5e1, 0xcf2, 0x19a5, 0x1b86, 0x1952, 0x5d3, 0x1b43, 0x1df2, 0xdf6, 0x2d0, 0x12de, 0x1e91, 0x199b, 0x1008, 0x120d, 0x1e73, 0x1b37, 0x1a42, 0x1be, 0xd42, 0x16ed, 0x9aa, 0x1486, 0x1eee, 0x1759, 0x791, 0x236, 0x5bb} +{{0x1d37, 0x1c58, 0x1bd1, 0x10cd, 0x9c7, 0x92b, 0x41c, 0xe75, 0x1476, 0x1373, 0xe1a, 0x5e1, 0xcf2, 0x19a5, 0x1b86, 0x1952, 0x5d3, 0x1b43, 0x1df2, 0xdf6, 0x2d0, 0x12de, 0x1e91, 0x199b, 0x1008, 0x120d, 0x1e73, 0x1b37, 0x1a42, 0x1be, 0xd42, 0x16ed, 0x9aa, 0x1486, 0x1eee, 0x1759, 0x791, 0x236, 0x5bb}} #elif RADIX == 32 -{0xe9becab, 0x1bbd1e2c, 0xad38f0c, 0x13a90712, 0x1373a3b3, 0x8bc2e1a, 0x366959e, 0x1d3ca96e, 0x1be5b432, 0x10b41bed, 0x1bf48cb7, 0x1b008cc, 0xcdfce72, 0xdf690b, 0x156ed6a1, 0x1ba90c9a, 0x1c8dd67d, 0xd31} +{{0xe9becab, 0x1bbd1e2c, 0xad38f0c, 0x13a90712, 0x1373a3b3, 0x8bc2e1a, 0x366959e, 0x1d3ca96e, 0x1be5b432, 0x10b41bed, 0x1bf48cb7, 0x1b008cc, 0xcdfce72, 0xdf690b, 0x156ed6a1, 0x1ba90c9a, 0x1c8dd67d, 0xd31}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8f0cdde8f163a6fb, 0xce8ece7520e24ad3, 0x366959e45e170d4d, 0x6ef96d0cba7952dc, 0x199bf48cb785a0df, 0xb485b37f39c83601, 0x52193556ed6a106f, 0x488d8f23759f77} +{{0x8f0cdde8f163a6fb, 0xce8ece7520e24ad3, 0x366959e45e170d4d, 0x6ef96d0cba7952dc, 0x199bf48cb785a0df, 0xb485b37f39c83601, 0x52193556ed6a106f, 0x488d8f23759f77}} #else -{0x19bbd1e2c74df6, 0xce7520e24ad38f, 0xf22f0b86a6e747, 0x12e9e54b70d9a56, 0xf0b41beddf2da1, 0x83601199bf48cb, 0x837da42d9bf9ce, 0x1dd4864d55bb5a8, 0x911b1e46eb3e} +{{0x19bbd1e2c74df6, 0xce7520e24ad38f, 0xf22f0b86a6e747, 0x12e9e54b70d9a56, 0xf0b41beddf2da1, 0x83601199bf48cb, 0x837da42d9bf9ce, 0x1dd4864d55bb5a8, 0x911b1e46eb3e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x8f6, 0xe30, 0x75, 0xaf7, 0xb3c, 0x1672, 0x1e05, 0x157a, 0x16b1, 0x1fd, 0x3c2, 0x114d, 0x1000, 0x1b4f, 0x1f37, 0xc0e, 0xdd, 0x4de, 0xdff, 0x55e, 0x1a2f, 0x353, 0xc4a, 0x1225, 0x9ed, 0x9ff, 0x1493, 0x18e6, 0x96c, 0x163c, 0xa76, 0x1c78, 0x11b4, 0x1087, 0x1519, 0xc82, 0x3e0, 0x7d4, 0xf5} +{{0x8f6, 0xe30, 0x75, 0xaf7, 0xb3c, 0x1672, 0x1e05, 0x157a, 0x16b1, 0x1fd, 0x3c2, 0x114d, 0x1000, 0x1b4f, 0x1f37, 0xc0e, 0xdd, 0x4de, 0xdff, 0x55e, 0x1a2f, 0x353, 0xc4a, 0x1225, 0x9ed, 0x9ff, 0x1493, 0x18e6, 0x96c, 0x163c, 0xa76, 0x1c78, 0x11b4, 0x1087, 0x1519, 0xc82, 0x3e0, 0x7d4, 0xf5}} #elif RADIX == 32 -{0x47b122a, 0xe075718, 0x1c9678af, 0xbd7816c, 0x1fdb58d, 0x229a3c2, 0x1bed3e00, 0xdd6077c, 0x1bfe4de0, 0x1e8bcabc, 0x56250d4, 0x1fe9ed91, 0x39a9269, 0xb1e25b3, 0x9c7853b, 0x6610f1b, 0x1f0320aa, 0x7a0} +{{0x47b122a, 0xe075718, 0x1c9678af, 0xbd7816c, 0x1fdb58d, 0x229a3c2, 0x1bed3e00, 0xdd6077c, 0x1bfe4de0, 0x1e8bcabc, 0x56250d4, 0x1fe9ed91, 0x39a9269, 0xb1e25b3, 0x9c7853b, 0x6610f1b, 0x1f0320aa, 0x7a0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x78af703ab8c11ec4, 0xf6d6357af02d9c96, 0xbed3e00114d1e107, 0xe6ff93781bac0ef9, 0xb2256250d4f45e55, 0x12d98e6a49a7fd3d, 0xc21e369c7853b58f, 0xe9f507c0c82a8c} +{{0x78af703ab8c11ec4, 0xf6d6357af02d9c96, 0xbed3e00114d1e107, 0xe6ff93781bac0ef9, 0xb2256250d4f45e55, 0x12d98e6a49a7fd3d, 0xc21e369c7853b58f, 0xe9f507c0c82a8c}} #else -{0x15ee07571823d89, 0x357af02d9c9678, 0x8a68f083fb6b, 0x6eb03be6fb4f8, 0x9e8bcabcdff26f, 0x7fd3db2256250d, 0x1ac7896cc73524d, 0x330878da71e14e, 0x23ea0f819055} +{{0x15ee07571823d89, 0x357af02d9c9678, 0x8a68f083fb6b, 0x6eb03be6fb4f8, 0x9e8bcabcdff26f, 0x7fd3db2256250d, 0x1ac7896cc73524d, 0x330878da71e14e, 0x23ea0f819055}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1227, 0x1240, 0x423, 0xd84, 0x1dc1, 0x982, 0x1cb3, 0x14e1, 0x16eb, 0x1409, 0xf49, 0xec8, 0x888, 0xe0b, 0x1c45, 0x176, 0x49e, 0x1d40, 0x1e6b, 0x7a3, 0xfba, 0x175f, 0x1908, 0xb88, 0x168c, 0x1324, 0x159f, 0x1077, 0xac3, 0x10b4, 0x478, 0x240, 0x1682, 0x14f, 0x1599, 0x152f, 0x1197, 0xad5, 0x133} +{{0x1227, 0x1240, 0x423, 0xd84, 0x1dc1, 0x982, 0x1cb3, 0x14e1, 0x16eb, 0x1409, 0xf49, 0xec8, 0x888, 0xe0b, 0x1c45, 0x176, 0x49e, 0x1d40, 0x1e6b, 0x7a3, 0xfba, 0x175f, 0x1908, 0xb88, 0x168c, 0x1324, 0x159f, 0x1077, 0xac3, 0x10b4, 0x478, 0x240, 0x1682, 0x14f, 0x1599, 0x152f, 0x1197, 0xad5, 0x133}} #elif RADIX == 32 -{0x91396c4, 0x8423920, 0xbb82d8, 0x70f2cd3, 0x1409b75d, 0x1d90f49, 0x2b82d11, 0x9e0bb71, 0x1cd7d402, 0x1bee8f47, 0x8c845d7, 0x4968c5c, 0x1deb3f3, 0x85a2b0e, 0x424023c, 0x6429f68, 0xcbd4beb, 0xac} +{{0x91396c4, 0x8423920, 0xbb82d8, 0x70f2cd3, 0x1409b75d, 0x1d90f49, 0x2b82d11, 0x9e0bb71, 0x1cd7d402, 0x1bee8f47, 0x8c845d7, 0x4968c5c, 0x1deb3f3, 0x85a2b0e, 0x424023c, 0x6429f68, 0xcbd4beb, 0xac}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x82d84211c90244e5, 0x26dd74e1e59a60bb, 0x2b82d110ec87a4d0, 0x3f35f50093c176e2, 0x8b88c845d7df747a, 0x1587077acfcc92d1, 0x853ed0424023c42d, 0x12ab5632f52facc} +{{0x82d84211c90244e5, 0x26dd74e1e59a60bb, 0x2b82d110ec87a4d0, 0x3f35f50093c176e2, 0x8b88c845d7df747a, 0x1587077acfcc92d1, 0x853ed0424023c42d, 0x12ab5632f52facc}} #else -{0x1b08423920489cb, 0x174e1e59a60bb82, 0x887643d268136e, 0x24f05db88ae0b4, 0xfbee8f47e6bea0, 0xc92d18b88c845d, 0x2168ac383bd67e, 0x13214fb4109008f, 0xa56ac65ea5f5} +{{0x1b08423920489cb, 0x174e1e59a60bb82, 0x887643d268136e, 0x24f05db88ae0b4, 0xfbee8f47e6bea0, 0xc92d18b88c845d, 0x2168ac383bd67e, 0x13214fb4109008f, 0xa56ac65ea5f5}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1544, 0x1dea, 0x162d, 0x73d, 0x6d1, 0x1511, 0x5f2, 0x275, 0x1aff, 0x1c7, 0x1d84, 0x1875, 0x10df, 0x2e0, 0x70b, 0x9eb, 0x897, 0xf0f, 0xa5d, 0xf38, 0x108c, 0x1c12, 0x1649, 0x1849, 0x9b8, 0x2bc, 0x1b0, 0xd0e, 0xfdb, 0x8ee, 0x1b0b, 0x1fdc, 0xc1, 0x1771, 0x1776, 0xa12, 0x1392, 0xd10, 0x618} +{{0x1544, 0x1dea, 0x162d, 0x73d, 0x6d1, 0x1511, 0x5f2, 0x275, 0x1aff, 0x1c7, 0x1d84, 0x1875, 0x10df, 0x2e0, 0x70b, 0x9eb, 0x897, 0xf0f, 0xa5d, 0xf38, 0x108c, 0x1c12, 0x1649, 0x1849, 0x9b8, 0x2bc, 0x1b0, 0xd0e, 0xfdb, 0x8ee, 0x1b0b, 0x1fdc, 0xc1, 0x1771, 0x1776, 0xa12, 0x1392, 0xd10, 0x618}} #elif RADIX == 32 -{0xaa27395, 0x1b62def5, 0x44da273, 0x13a97caa, 0x1c7d7f8, 0x1f0ebd84, 0x58b821b, 0x974f59c, 0x14baf0f4, 0x14231e70, 0x9b24f04, 0x1789b8c2, 0x14383602, 0x14773f6d, 0x3fdcd85, 0x1daee20c, 0x1c9284ae, 0xd04} +{{0xaa27395, 0x1b62def5, 0x44da273, 0x13a97caa, 0x1c7d7f8, 0x1f0ebd84, 0x58b821b, 0x974f59c, 0x14baf0f4, 0x14231e70, 0x9b24f04, 0x1789b8c2, 0x14383602, 0x14773f6d, 0x3fdcd85, 0x1daee20c, 0x1c9284ae, 0xd04}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa273db16f7aaa89c, 0x1f5fe2752f95444d, 0x58b821bf875ec207, 0x852ebc3d12e9eb38, 0x1849b24f04a118f3, 0x9fb6d0e0d80af137, 0x5dc4183fdcd85a3b, 0x183442724a12bbb} +{{0xa273db16f7aaa89c, 0x1f5fe2752f95444d, 0x58b821bf875ec207, 0x852ebc3d12e9eb38, 0x1849b24f04a118f3, 0x9fb6d0e0d80af137, 0x5dc4183fdcd85a3b, 0x183442724a12bbb}} #else -{0xe7b62def555139, 0x1e2752f95444da2, 0xdfc3af61038faf, 0x144ba7ace162e08, 0x94231e70a5d787, 0xaf1371849b24f0, 0xd1dcfdb68706c0, 0xed771060ff7361, 0x156884e494257} +{{0xe7b62def555139, 0x1e2752f95444da2, 0xdfc3af61038faf, 0x144ba7ace162e08, 0x94231e70a5d787, 0xaf1371849b24f0, 0xd1dcfdb68706c0, 0xed771060ff7361, 0x156884e494257}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1756, 0x1187, 0x608, 0x637, 0x5c5, 0x459, 0x12f2, 0x9a1, 0x314, 0xe7f, 0x1c73, 0x27f, 0xa8d, 0x17f8, 0x1e33, 0x1878, 0x1c21, 0x123b, 0xb76, 0x7ea, 0x157, 0x16b4, 0xad7, 0x413, 0x56e, 0x4f3, 0x881, 0x1319, 0x1cc3, 0x1813, 0x1575, 0x1f0, 0x13f9, 0x1ef4, 0x8ae, 0x17c8, 0xd48, 0x157d, 0x5ea} +{{0x1756, 0x1187, 0x608, 0x637, 0x5c5, 0x459, 0x12f2, 0x9a1, 0x314, 0xe7f, 0x1c73, 0x27f, 0xa8d, 0x17f8, 0x1e33, 0x1878, 0x1c21, 0x123b, 0xb76, 0x7ea, 0x157, 0x16b4, 0xad7, 0x413, 0x56e, 0x4f3, 0x881, 0x1319, 0x1cc3, 0x1813, 0x1575, 0x1f0, 0x13f9, 0x1ef4, 0x8ae, 0x17c8, 0xd48, 0x157d, 0x5ea}} #elif RADIX == 32 -{0x1bab7032, 0xe6088c3, 0x164b8a63, 0xd0cbc88, 0xe7f18a2, 0x144ffc73, 0x19dfe151, 0x21c3c78, 0x16ed23be, 0x55cfd4, 0x1356bdad, 0x1e656e20, 0xc651024, 0x1c09f30e, 0x121f0aba, 0xbbde93f, 0xa45f211, 0x8eb} +{{0x1bab7032, 0xe6088c3, 0x164b8a63, 0xd0cbc88, 0xe7f18a2, 0x144ffc73, 0x19dfe151, 0x21c3c78, 0x16ed23be, 0x55cfd4, 0x1356bdad, 0x1e656e20, 0xc651024, 0x1c09f30e, 0x121f0aba, 0xbbde93f, 0xa45f211, 0x8eb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8a637304461eeadc, 0xfc6289a19791164b, 0x9dfe151a27fe39b9, 0xa5bb48ef843878f1, 0xc41356bdad02ae7e, 0xf98731944093ccad, 0x7bd27f21f0abae04, 0x155f5a917c8457} +{{0x8a637304461eeadc, 0xfc6289a19791164b, 0x9dfe151a27fe39b9, 0xa5bb48ef843878f1, 0xc41356bdad02ae7e, 0xf98731944093ccad, 0x7bd27f21f0abae04, 0x155f5a917c8457}} #else -{0xc6e6088c3dd5b8, 0x89a19791164b8a, 0x8d13ff1cdcfe31, 0x1e10e1e3c677f85, 0x1a055cfd4b7691d, 0x13ccadc41356bda, 0x17027cc398ca204, 0x15def49fc87c2ae, 0x2abeb522f908} +{{0xc6e6088c3dd5b8, 0x89a19791164b8a, 0x8d13ff1cdcfe31, 0x1e10e1e3c677f85, 0x1a055cfd4b7691d, 0x13ccadc41356bda, 0x17027cc398ca204, 0x15def49fc87c2ae, 0x2abeb522f908}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xbba, 0x1eb6, 0x49a, 0x12a5, 0x12d2, 0x30a, 0x172f, 0x174d, 0x1231, 0x1036, 0x122e, 0x158, 0x743, 0xf10, 0x1e52, 0x18c7, 0x152e, 0x13b1, 0x7ae, 0x128d, 0x9c4, 0x848, 0x4, 0x1e64, 0x1e6f, 0x10ca, 0x3d4, 0x164, 0x1c8, 0x3e2, 0x4e8, 0x27b, 0x1d32, 0x1cc2, 0x1c60, 0x7a8, 0x13df, 0x1f6b, 0x6ad} +{{0xbba, 0x1eb6, 0x49a, 0x12a5, 0x12d2, 0x30a, 0x172f, 0x174d, 0x1231, 0x1036, 0x122e, 0x158, 0x743, 0xf10, 0x1e52, 0x18c7, 0x152e, 0x13b1, 0x7ae, 0x128d, 0x9c4, 0x848, 0x4, 0x1e64, 0x1e6f, 0x10ca, 0x3d4, 0x164, 0x1c8, 0x3e2, 0x4e8, 0x27b, 0x1d32, 0x1cc2, 0x1c60, 0x7a8, 0x13df, 0x1f6b, 0x6ad}} #elif RADIX == 32 -{0x5dd7eaa, 0xa49af5b, 0x2a5a52a, 0x1a6dcbc6, 0x1036918d, 0xc2b122e, 0x93c40e8, 0x12ec63f9, 0xf5d3b1a, 0x271251a, 0x4002212, 0x195e6ff3, 0x5907a90, 0x1f10720, 0x427b274, 0x183985d3, 0x1ef9ea38, 0x45c} +{{0x5dd7eaa, 0xa49af5b, 0x2a5a52a, 0x1a6dcbc6, 0x1036918d, 0xc2b122e, 0x93c40e8, 0x12ec63f9, 0xf5d3b1a, 0x271251a, 0x4002212, 0x195e6ff3, 0x5907a90, 0x1f10720, 0x427b274, 0x183985d3, 0x1ef9ea38, 0x45c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa52a524d7ad9775f, 0xda46374db978c2a5, 0x93c40e8615891740, 0xd3d74ec6a5d8c7f2, 0xfe64002212138928, 0x83901641ea432bcd, 0x730ba6427b2740f8, 0x11fdae7be7a8e30} +{{0xa52a524d7ad9775f, 0xda46374db978c2a5, 0x93c40e8615891740, 0xd3d74ec6a5d8c7f2, 0xfe64002212138928, 0x83901641ea432bcd, 0x730ba6427b2740f8, 0x11fdae7be7a8e30}} #else -{0x54a49af5b2eebf, 0x374db978c2a5a5, 0x1430ac48ba06d23, 0x1a97631fca4f103, 0x4271251a7ae9d8, 0x32bcdfe6400221, 0x7c41c80b20f52, 0xc1cc2e9909ec9d, 0x8fb5cf7cf51c} +{{0x54a49af5b2eebf, 0x374db978c2a5a5, 0x1430ac48ba06d23, 0x1a97631fca4f103, 0x4271251a7ae9d8, 0x32bcdfe6400221, 0x7c41c80b20f52, 0xc1cc2e9909ec9d, 0x8fb5cf7cf51c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1d5e, 0x18e6, 0xc97, 0x1db2, 0x9df, 0x19d3, 0x1564, 0x1a3a, 0x90, 0xea5, 0xd74, 0x19fc, 0xf84, 0xadd, 0x2e5, 0x10bb, 0x183f, 0x1334, 0xa50, 0x54b, 0xd22, 0x1295, 0xf11, 0xfa1, 0x1810, 0xa3, 0xa81, 0x1026, 0x2b2, 0x19ee, 0x1a4a, 0xf8a, 0xfb3, 0x1463, 0x19c5, 0x42c, 0x830, 0x562, 0x3db} +{{0x1d5e, 0x18e6, 0xc97, 0x1db2, 0x9df, 0x19d3, 0x1564, 0x1a3a, 0x90, 0xea5, 0xd74, 0x19fc, 0xf84, 0xadd, 0x2e5, 0x10bb, 0x183f, 0x1334, 0xa50, 0x54b, 0xd22, 0x1295, 0xf11, 0xfa1, 0x1810, 0xa3, 0xa81, 0x1026, 0x2b2, 0x19ee, 0x1a4a, 0xf8a, 0xfb3, 0x1463, 0x19c5, 0x42c, 0x830, 0x562, 0x3db}} #elif RADIX == 32 -{0xeaf491f, 0x4c97c73, 0x14d3bfdb, 0x11d55933, 0xea50486, 0x133f8d74, 0x12ab75f0, 0x3f85d8b, 0x14a1334c, 0xb488a96, 0x1788ca5, 0x1478107d, 0x995020, 0xcf70aca, 0x6f8ad25, 0x1168c6fb, 0x1810b33, 0x892} +{{0xeaf491f, 0x4c97c73, 0x14d3bfdb, 0x11d55933, 0xea50486, 0x133f8d74, 0x12ab75f0, 0x3f85d8b, 0x14a1334c, 0xb488a96, 0x1788ca5, 0x1478107d, 0x995020, 0xcf70aca, 0x6f8ad25, 0x1168c6fb, 0x1810b33, 0x892}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbfdb264be39babd2, 0x94121a3aab2674d3, 0x2ab75f099fc6ba3a, 0xb5284cd307f0bb17, 0xfa1788ca55a4454, 0x8565026540828f02, 0xd18df66f8ad2567b, 0x7958906042cce2} +{{0xbfdb264be39babd2, 0x94121a3aab2674d3, 0x2ab75f099fc6ba3a, 0xb5284cd307f0bb17, 0xfa1788ca55a4454, 0x8565026540828f02, 0xd18df66f8ad2567b, 0x7958906042cce2}} #else -{0x1b64c97c73757a4, 0x1a3aab2674d3bf, 0x184cfe35d1d4a09, 0xc1fc2ec5caadd7, 0xab488a96a5099a, 0x28f020fa1788ca, 0xb3dc2b28132a04, 0x18b4637d9be2b49, 0xf2b120c08599} +{{0x1b64c97c73757a4, 0x1a3aab2674d3bf, 0x184cfe35d1d4a09, 0xc1fc2ec5caadd7, 0xab488a96a5099a, 0x28f020fa1788ca, 0xb3dc2b28132a04, 0x18b4637d9be2b49, 0xf2b120c08599}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1908,261 +1908,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0xca6, 0xc89, 0x411, 0x17e9, 0x188d, 0xad5, 0x8de, 0x403, 0x183a, 0x1e0d, 0x28c, 0xfdc, 0xbcc, 0xd0, 0xa3a, 0xb2e, 0x140c, 0x11b4, 0x4bb, 0x10da, 0xb22, 0x1a4c, 0xbfa, 0xbd5, 0xae6, 0xeff, 0x465, 0x8df, 0xcf9, 0x1c4a, 0x1807, 0x1462, 0xead, 0x1c43, 0x12a3, 0x1ec2, 0x1e12, 0xad0, 0x1cd} +{{0xca6, 0xc89, 0x411, 0x17e9, 0x188d, 0xad5, 0x8de, 0x403, 0x183a, 0x1e0d, 0x28c, 0xfdc, 0xbcc, 0xd0, 0xa3a, 0xb2e, 0x140c, 0x11b4, 0x4bb, 0x10da, 0xb22, 0x1a4c, 0xbfa, 0xbd5, 0xae6, 0xeff, 0x465, 0x8df, 0xcf9, 0x1c4a, 0x1807, 0x1462, 0xead, 0x1c43, 0x12a3, 0x1ec2, 0x1e12, 0xad0, 0x1cd}} #elif RADIX == 32 -{0x1653222c, 0x12411644, 0x15711b7e, 0x1a3795, 0x1e0dc1d1, 0x11fb828c, 0x1d034179, 0xc59728, 0x9771b4a, 0x2c8a1b4, 0x155fd693, 0x1feae65e, 0x37c8cae, 0x1e2533e5, 0x1b462c03, 0x8f886ea, 0x1097b0a5, 0x487} +{{0x1653222c, 0x12411644, 0x15711b7e, 0x1a3795, 0x1e0dc1d1, 0x11fb828c, 0x1d034179, 0xc59728, 0x9771b4a, 0x2c8a1b4, 0x155fd693, 0x1feae65e, 0x37c8cae, 0x1e2533e5, 0x1b462c03, 0x8f886ea, 0x1097b0a5, 0x487}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1b7e9208b22594c8, 0x3707440346f2b571, 0xd0341798fdc14678, 0xa25dc6d2818b2e51, 0xcbd55fd69316450d, 0x99f28df232bbfd5c, 0xf10dd5b462c03f12, 0xeab43c25ec2951} +{{0x1b7e9208b22594c8, 0x3707440346f2b571, 0xd0341798fdc14678, 0xa25dc6d2818b2e51, 0xcbd55fd69316450d, 0x99f28df232bbfd5c, 0xf10dd5b462c03f12, 0xeab43c25ec2951}} #else -{0xfd2411644b2991, 0x1440346f2b5711b, 0x1cc7ee0a33c1b83, 0xa062cb94740d05, 0x62c8a1b44bb8da, 0x1bfd5ccbd55fd69, 0x1f894cf946f9195, 0x147c43756d18b00, 0x2568784bd852} +{{0xfd2411644b2991, 0x1440346f2b5711b, 0x1cc7ee0a33c1b83, 0xa062cb94740d05, 0x62c8a1b44bb8da, 0x1bfd5ccbd55fd69, 0x1f894cf946f9195, 0x147c43756d18b00, 0x2568784bd852}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0xb2b, 0xb22, 0x904, 0xdfa, 0xe23, 0x12b5, 0x1a37, 0x1100, 0xe0e, 0x783, 0xa3, 0x3f7, 0x2f3, 0x1034, 0x128e, 0x2cb, 0x503, 0x1c6d, 0x112e, 0x1436, 0x2c8, 0x1693, 0xafe, 0x12f5, 0x1ab9, 0xbbf, 0x1919, 0xa37, 0x133e, 0x1f12, 0x1601, 0xd18, 0x1bab, 0x1f10, 0x14a8, 0x17b0, 0x784, 0xab4, 0x653} +{{0xb2b, 0xb22, 0x904, 0xdfa, 0xe23, 0x12b5, 0x1a37, 0x1100, 0xe0e, 0x783, 0xa3, 0x3f7, 0x2f3, 0x1034, 0x128e, 0x2cb, 0x503, 0x1c6d, 0x112e, 0x1436, 0x2c8, 0x1693, 0xafe, 0x12f5, 0x1ab9, 0xbbf, 0x1919, 0xa37, 0x133e, 0x1f12, 0x1601, 0xd18, 0x1bab, 0x1f10, 0x14a8, 0x17b0, 0x784, 0xab4, 0x653}} #elif RADIX == 32 -{0x595f7f3, 0x14904591, 0xd5c46df, 0x8068de5, 0x7837074, 0xc7ee0a3, 0x740d05e, 0x103165ca, 0x25dc6d2, 0x18b2286d, 0x1557f5a4, 0x17fab997, 0x8df232b, 0x1f894cf9, 0x16d18b00, 0xa3e21ba, 0x1c25ec29, 0x521} +{{0x595f7f3, 0x14904591, 0xd5c46df, 0x8068de5, 0x7837074, 0xc7ee0a3, 0x740d05e, 0x103165ca, 0x25dc6d2, 0x18b2286d, 0x1557f5a4, 0x17fab997, 0x8df232b, 0x1f894cf9, 0x16d18b00, 0xa3e21ba, 0x1c25ec29, 0x521}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x46dfa4822c89657d, 0xdc1d100d1bcad5c, 0x740d05e63f70519e, 0x689771b4a062cb94, 0x32f557f5a4c59143, 0xa67ca37c8caeff57, 0x7c43756d18b00fc4, 0x1aaad0f097b0a54} +{{0x46dfa4822c89657d, 0xdc1d100d1bcad5c, 0x740d05e63f70519e, 0x689771b4a062cb94, 0x32f557f5a4c59143, 0xa67ca37c8caeff57, 0x7c43756d18b00fc4, 0x1aaad0f097b0a54}} #else -{0x1bf49045912cafb, 0x1d100d1bcad5c46, 0xf31fb828cf06e0, 0x12818b2e51d0341, 0x98b2286d12ee36, 0xeff5732f557f5a, 0x7e2533e51be465, 0x151f10dd5b462c0, 0x1a55a1e12f614} +{{0x1bf49045912cafb, 0x1d100d1bcad5c46, 0xf31fb828cf06e0, 0x12818b2e51d0341, 0x98b2286d12ee36, 0xeff5732f557f5a, 0x7e2533e51be465, 0x151f10dd5b462c0, 0x1a55a1e12f614}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x517, 0x18a8, 0x1a92, 0x94f, 0x1bb0, 0xf2c, 0x43, 0x5a8, 0x1463, 0x1b4b, 0x1a1c, 0x1c0e, 0x148a, 0x7f5, 0x6a3, 0x820, 0x1fc7, 0x141c, 0x1c2b, 0xd98, 0x48c, 0x587, 0x1b23, 0x1fb5, 0x4c0, 0x179c, 0x169e, 0x1927, 0x16b8, 0x1beb, 0x6bb, 0x1923, 0x2b7, 0x146d, 0x32b, 0xd85, 0x1a89, 0x1fb0, 0x2be} +{{0x517, 0x18a8, 0x1a92, 0x94f, 0x1bb0, 0xf2c, 0x43, 0x5a8, 0x1463, 0x1b4b, 0x1a1c, 0x1c0e, 0x148a, 0x7f5, 0x6a3, 0x820, 0x1fc7, 0x141c, 0x1c2b, 0xd98, 0x48c, 0x587, 0x1b23, 0x1fb5, 0x4c0, 0x179c, 0x169e, 0x1927, 0x16b8, 0x1beb, 0x6bb, 0x1923, 0x2b7, 0x146d, 0x32b, 0xd85, 0x1a89, 0x1fb0, 0x2be}} #elif RADIX == 32 -{0x28bb412, 0x1fa92c54, 0xb376094, 0xd4010de, 0x1b4ba319, 0xb81da1c, 0x119fd691, 0x1c74101a, 0x185741cf, 0x19231b31, 0x15d91961, 0x1384c0fd, 0x49ed3d7, 0x1df5dae3, 0xf92335d, 0xae8da2b, 0x144b6146, 0xa86} +{{0x28bb412, 0x1fa92c54, 0xb376094, 0xd4010de, 0x1b4ba319, 0xb81da1c, 0x119fd691, 0x1c74101a, 0x185741cf, 0x19231b31, 0x15d91961, 0x1384c0fd, 0x49ed3d7, 0x1df5dae3, 0xf92335d, 0xae8da2b, 0x144b6146, 0xa86}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6094fd4962a0a2ed, 0x2e8c65a8021bcb37, 0x19fd6915c0ed0e6d, 0x8e15d073f8e82035, 0x1fb5d91961c918d9, 0xed71927b4f5e7098, 0xd1b456f92335defa, 0x7ec3512d85195} +{{0x6094fd4962a0a2ed, 0x2e8c65a8021bcb37, 0x19fd6915c0ed0e6d, 0x8e15d073f8e82035, 0x1fb5d91961c918d9, 0xed71927b4f5e7098, 0xd1b456f92335defa, 0x7ec3512d85195}} #else -{0x129fa92c54145da, 0x65a8021bcb3760, 0x8ae07687369746, 0xfe3a080d467f5a, 0x39231b31c2ba0e, 0x1e70981fb5d9196, 0xf7d76b8c93da7a, 0x5746d15be48cd7, 0xfd86a25b0a3} +{{0x129fa92c54145da, 0x65a8021bcb3760, 0x8ae07687369746, 0xfe3a080d467f5a, 0x39231b31c2ba0e, 0x1e70981fb5d9196, 0xf7d76b8c93da7a, 0x5746d15be48cd7, 0xfd86a25b0a3}} #endif #endif , #if 0 #elif RADIX == 16 -{0x147b, 0x14f1, 0xfdd, 0xb2a, 0xff7, 0x1426, 0xce1, 0x19a8, 0x1bf3, 0xbdd, 0x16dd, 0x1339, 0x10dd, 0x8f4, 0x1d29, 0x1b05, 0x1ee, 0x187b, 0x118a, 0x1e55, 0xcde, 0x1a18, 0x1b1f, 0x1648, 0x1c75, 0x1db8, 0xa2a, 0x1ab6, 0x1fa, 0xb0a, 0x1bdf, 0x1d18, 0x1a98, 0x12d9, 0x13df, 0x6e0, 0xa3c, 0x537, 0x345} +{{0x147b, 0x14f1, 0xfdd, 0xb2a, 0xff7, 0x1426, 0xce1, 0x19a8, 0x1bf3, 0xbdd, 0x16dd, 0x1339, 0x10dd, 0x8f4, 0x1d29, 0x1b05, 0x1ee, 0x187b, 0x118a, 0x1e55, 0xcde, 0x1a18, 0x1b1f, 0x1648, 0x1c75, 0x1db8, 0xa2a, 0x1ab6, 0x1fa, 0xb0a, 0x1bdf, 0x1d18, 0x1a98, 0x12d9, 0x13df, 0x6e0, 0xa3c, 0x537, 0x345}} #elif RADIX == 32 -{0x1a3dbe03, 0x14fdda78, 0x99feeb2, 0xd433868, 0xbdddf9e, 0x166736dd, 0x14a3d21b, 0x1eed82f4, 0x31587b0, 0x337bcab, 0x8d8fe86, 0x171c75b2, 0xad9455d, 0x158507eb, 0x11d18def, 0x17e5b3a9, 0x11e1b827, 0x13a} +{{0x1a3dbe03, 0x14fdda78, 0x99feeb2, 0xd433868, 0xbdddf9e, 0x166736dd, 0x14a3d21b, 0x1eed82f4, 0x31587b0, 0x337bcab, 0x8d8fe86, 0x171c75b2, 0xad9455d, 0x158507eb, 0x11d18def, 0x17e5b3a9, 0x11e1b827, 0x13a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xeeb2a7eed3c68f6f, 0x777e79a8670d099f, 0x4a3d21bb339b6eaf, 0x58c561ec3ddb05e9, 0xb648d8fe8619bde5, 0x83f5ab651576e38e, 0xcb67531d18defac2, 0xd94dd4786e09ef} +{{0xeeb2a7eed3c68f6f, 0x777e79a8670d099f, 0x4a3d21bb339b6eaf, 0x58c561ec3ddb05e9, 0xb648d8fe8619bde5, 0x83f5ab651576e38e, 0xcb67531d18defac2, 0xd94dd4786e09ef}} #else -{0x1654fdda78d1edf, 0x79a8670d099fee, 0xdd99cdb757bbbf, 0x10f76c17a528f48, 0xc337bcab18ac3d, 0x16e38eb648d8fe8, 0x1d6141fad5b28ab, 0x1bf2d9d4c74637b, 0x29ba8f0dc13} +{{0x1654fdda78d1edf, 0x79a8670d099fee, 0xdd99cdb757bbbf, 0x10f76c17a528f48, 0xc337bcab18ac3d, 0x16e38eb648d8fe8, 0x1d6141fad5b28ab, 0x1bf2d9d4c74637b, 0x29ba8f0dc13}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xc4b, 0x1f6e, 0xcba, 0x1a23, 0x8a1, 0x7c3, 0x1a45, 0x1ca3, 0x6a9, 0x643, 0x3b, 0xc83, 0x208, 0x21a, 0xd43, 0x1805, 0x1078, 0x9af, 0x80a, 0x1555, 0x50d, 0x1eb8, 0xa49, 0x161c, 0x1eee, 0xe1b, 0xf4b, 0x9de, 0x117e, 0x14f8, 0xea7, 0xd18, 0x112a, 0x1a38, 0x1cc7, 0x1c36, 0xe5, 0x10fa, 0x411} +{{0xc4b, 0x1f6e, 0xcba, 0x1a23, 0x8a1, 0x7c3, 0x1a45, 0x1ca3, 0x6a9, 0x643, 0x3b, 0xc83, 0x208, 0x21a, 0xd43, 0x1805, 0x1078, 0x9af, 0x80a, 0x1555, 0x50d, 0x1eb8, 0xa49, 0x161c, 0x1eee, 0xe1b, 0xf4b, 0x9de, 0x117e, 0x14f8, 0xea7, 0xd18, 0x112a, 0x1a38, 0x1cc7, 0x1c36, 0xe5, 0x10fa, 0x411}} #elif RADIX == 32 -{0x625cd26, 0x6cbafb7, 0x10d143a2, 0x51e914f, 0x643354f, 0x190603b, 0x1886841, 0x78c02b5, 0x10149af8, 0x1436aaa, 0x1c524fae, 0x37eeeb0, 0x779e96e, 0x1a7c45f9, 0x14d18753, 0x11f47112, 0x72f0db9, 0x6d0} +{{0x625cd26, 0x6cbafb7, 0x10d143a2, 0x51e914f, 0x643354f, 0x190603b, 0x1886841, 0x78c02b5, 0x10149af8, 0x1436aaa, 0x1c524fae, 0x37eeeb0, 0x779e96e, 0x1a7c45f9, 0x14d18753, 0x11f47112, 0x72f0db9, 0x6d0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x43a2365d7db98973, 0xcd53ca3d229f0d1, 0x18868410c8301d99, 0x540526be0f18056a, 0xd61c524fae0a1b55, 0x22fc9de7a5b86fdd, 0xe8e2254d18753d3e, 0x7c3e81cbc36e63} +{{0x43a2365d7db98973, 0xcd53ca3d229f0d1, 0x18868410c8301d99, 0x540526be0f18056a, 0xd61c524fae0a1b55, 0x22fc9de7a5b86fdd, 0xe8e2254d18753d3e, 0x7c3e81cbc36e63}} #else -{0x1446cbafb7312e6, 0x13ca3d229f0d143, 0x864180ecc866a, 0x183c6015a8621a1, 0x1c1436aaa80a4d7, 0x186fddd61c524fa, 0x1e9f117e4ef3d2d, 0x18fa388953461d4, 0xf87d039786dc} +{{0x1446cbafb7312e6, 0x13ca3d229f0d143, 0x864180ecc866a, 0x183c6015a8621a1, 0x1c1436aaa80a4d7, 0x186fddd61c524fa, 0x1e9f117e4ef3d2d, 0x18fa388953461d4, 0xf87d039786dc}} #endif #endif , #if 0 #elif RADIX == 16 -{0x9d5, 0x0, 0x181d, 0xced, 0x1fe0, 0x267, 0xc65, 0x1a4d, 0x9e3, 0x1f0c, 0x5d, 0xbae, 0x276, 0x1551, 0x1684, 0x1eab, 0x17f0, 0x1b20, 0xae6, 0xbc3, 0x95, 0x17c3, 0xfd8, 0x1359, 0x3f5, 0x12b6, 0x1410, 0x113, 0x1a19, 0x1c1d, 0xd91, 0x1446, 0x1233, 0x170, 0x1c50, 0x13ac, 0x6eb, 0x926, 0x3bf} +{{0x9d5, 0x0, 0x181d, 0xced, 0x1fe0, 0x267, 0xc65, 0x1a4d, 0x9e3, 0x1f0c, 0x5d, 0xbae, 0x276, 0x1551, 0x1684, 0x1eab, 0x17f0, 0x1b20, 0xae6, 0xbc3, 0x95, 0x17c3, 0xfd8, 0x1359, 0x3f5, 0x12b6, 0x1410, 0x113, 0x1a19, 0x1c1d, 0xd91, 0x1446, 0x1233, 0x170, 0x1c50, 0x13ac, 0x6eb, 0x926, 0x3bf}} #elif RADIX == 32 -{0x4eac70e, 0x1b81d000, 0x19ffc0ce, 0x126b1944, 0x1f0c4f1e, 0x1975c05d, 0x255444e, 0x1f0f55da, 0x15cdb20b, 0x18255786, 0x197ec5f0, 0x16c3f59a, 0x44e8212, 0x1e0ee864, 0x74466c8, 0x1402e123, 0x175ceb38, 0xc31} +{{0x4eac70e, 0x1b81d000, 0x19ffc0ce, 0x126b1944, 0x1f0c4f1e, 0x1975c05d, 0x255444e, 0x1f0f55da, 0x15cdb20b, 0x18255786, 0x197ec5f0, 0x16c3f59a, 0x44e8212, 0x1e0ee864, 0x74466c8, 0x1402e123, 0x175ceb38, 0xc31}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc0cedc0e80013ab1, 0x313c7a4d632899ff, 0x255444ecbae02efc, 0x35736c82fe1eabb4, 0xb3597ec5f0c12abc, 0x7432113a084ad87e, 0x5c24674466c8f07, 0x14a498dd73ace28} +{{0xc0cedc0e80013ab1, 0x313c7a4d632899ff, 0x255444ecbae02efc, 0x35736c82fe1eabb4, 0xb3597ec5f0c12abc, 0x7432113a084ad87e, 0x5c24674466c8f07, 0x14a498dd73ace28}} #else -{0x19db81d00027563, 0x7a4d632899ffc0, 0x765d70177e189e, 0xbf87aaed095511, 0x18255786ae6d90, 0xad87eb3597ec5f, 0x783ba19089d042, 0xa0170919d119b2, 0xe4931bae759c} +{{0x19db81d00027563, 0x7a4d632899ffc0, 0x765d70177e189e, 0xbf87aaed095511, 0x18255786ae6d90, 0xad87eb3597ec5f, 0x783ba19089d042, 0xa0170919d119b2, 0xe4931bae759c}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1997, 0xa5a, 0x4c4, 0x155d, 0x70b, 0x12f, 0xe9d, 0xfe0, 0x147c, 0x9b6, 0x18ea, 0xf41, 0x1636, 0x1707, 0x1a7e, 0x1326, 0x76d, 0xbef, 0x9fe, 0x1bb4, 0xe22, 0x200, 0x1a11, 0x7e6, 0x1709, 0x1be9, 0x1507, 0x1c63, 0xb6f, 0xceb, 0x1b88, 0x1ef6, 0x16b7, 0x20f, 0x1497, 0x1e1c, 0x26e, 0x139d, 0x330} +{{0x1997, 0xa5a, 0x4c4, 0x155d, 0x70b, 0x12f, 0xe9d, 0xfe0, 0x147c, 0x9b6, 0x18ea, 0xf41, 0x1636, 0x1707, 0x1a7e, 0x1326, 0x76d, 0xbef, 0x9fe, 0x1bb4, 0xe22, 0x200, 0x1a11, 0x7e6, 0x1709, 0x1be9, 0x1507, 0x1c63, 0xb6f, 0xceb, 0x1b88, 0x1ef6, 0x16b7, 0x20f, 0x1497, 0x1e1c, 0x26e, 0x139d, 0x330}} #elif RADIX == 32 -{0xccbbc7d, 0x1a4c452d, 0xbce1755, 0x1f03a742, 0x9b6a3e3, 0x19e838ea, 0x1f5c1ec6, 0x16d99369, 0x13fcbef3, 0x388b768, 0x6d08880, 0x1d37093f, 0x118ea0fb, 0x675adbf, 0xfef6dc4, 0x5c41f6b, 0x13778729, 0x568} +{{0xccbbc7d, 0x1a4c452d, 0xbce1755, 0x1f03a742, 0x9b6a3e3, 0x19e838ea, 0x1f5c1ec6, 0x16d99369, 0x13fcbef3, 0x388b768, 0x6d08880, 0x1d37093f, 0x118ea0fb, 0x675adbf, 0xfef6dc4, 0x5c41f6b, 0x13778729, 0x568}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1755d262296b32ef, 0xda8f8fe074e84bce, 0xf5c1ec6cf41c7526, 0x44ff2fbcedb326d3, 0x27e6d088801c45bb, 0xd6dfc63a83efa6e1, 0x883ed6fef6dc433a, 0x34e744dde1ca4b} +{{0x1755d262296b32ef, 0xda8f8fe074e84bce, 0xf5c1ec6cf41c7526, 0x44ff2fbcedb326d3, 0x27e6d088801c45bb, 0xd6dfc63a83efa6e1, 0x883ed6fef6dc433a, 0x34e744dde1ca4b}} #else -{0xaba4c452d665de, 0x18fe074e84bce17, 0x367a0e3a936d47, 0x13b6cc9b4fd707b, 0x388b7689fe5f7, 0xfa6e127e6d0888, 0x19d6b6fe31d41f, 0x12e20fb5bfbdb71, 0x69ce89bbc394} +{{0xaba4c452d665de, 0x18fe074e84bce17, 0x367a0e3a936d47, 0x13b6cc9b4fd707b, 0x388b7689fe5f7, 0xfa6e127e6d0888, 0x19d6b6fe31d41f, 0x12e20fb5bfbdb71, 0x69ce89bbc394}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1bf, 0x197b, 0x1b4, 0x1a8a, 0xd22, 0x1cb5, 0x298, 0x76b, 0x16b6, 0x5aa, 0x54b, 0x1b63, 0x1d59, 0x2dc, 0xfe1, 0x1b24, 0x1725, 0x9a8, 0x2dd, 0x150f, 0x12de, 0x9d9, 0x2fd, 0x95f, 0xcc1, 0x1ffd, 0x101b, 0x707, 0x1d9d, 0x464, 0x39e, 0x97b, 0x8cf, 0x4a5, 0xed1, 0x9c3, 0x1b66, 0x1521, 0x112} +{{0x1bf, 0x197b, 0x1b4, 0x1a8a, 0xd22, 0x1cb5, 0x298, 0x76b, 0x16b6, 0x5aa, 0x54b, 0x1b63, 0x1d59, 0x2dc, 0xfe1, 0x1b24, 0x1725, 0x9a8, 0x2dd, 0x150f, 0x12de, 0x9d9, 0x2fd, 0x95f, 0xcc1, 0x1ffd, 0x101b, 0x707, 0x1d9d, 0x464, 0x39e, 0x97b, 0x8cf, 0x4a5, 0xed1, 0x9c3, 0x1b66, 0x1521, 0x112}} #elif RADIX == 32 -{0x10df9458, 0x141b4cbd, 0xd5a45a8, 0x1b58a639, 0x5aab5b1, 0x76c654b, 0x108b73ab, 0x125d923f, 0x5ba9a8b, 0xcb7aa1e, 0x1f17ea76, 0x1facc14a, 0x1c1e037f, 0x2327674, 0x1e97b1cf, 0x14494a8c, 0x1b3270dd, 0x50e} +{{0x10df9458, 0x141b4cbd, 0xd5a45a8, 0x1b58a639, 0x5aab5b1, 0x76c654b, 0x108b73ab, 0x125d923f, 0x5ba9a8b, 0xcb7aa1e, 0x1f17ea76, 0x1facc14a, 0x1c1e037f, 0x2327674, 0x1e97b1cf, 0x14494a8c, 0x1b3270dd, 0x50e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x45a8a0da65ec37e5, 0xaad6c76b14c72d5a, 0x8b73ab3b632a596, 0xf16ea6a2e4bb247f, 0x295f17ea7665bd50, 0x3b3a70780dfff598, 0x929519e97b1cf119, 0x254876cc9c3768} +{{0x45a8a0da65ec37e5, 0xaad6c76b14c72d5a, 0x8b73ab3b632a596, 0xf16ea6a2e4bb247f, 0x295f17ea7665bd50, 0x3b3a70780dfff598, 0x929519e97b1cf119, 0x254876cc9c3768}} #else -{0x15141b4cbd86fca, 0xc76b14c72d5a45, 0x159db1952cb556b, 0xb92ec91fc22dce, 0xccb7aa1e2dd4d4, 0x1ff598295f17ea7, 0x188c9d9d383c06f, 0x1a24a5467a5ec73, 0x4a90ed99386e} +{{0x15141b4cbd86fca, 0xc76b14c72d5a45, 0x159db1952cb556b, 0xb92ec91fc22dce, 0xccb7aa1e2dd4d4, 0x1ff598295f17ea7, 0x188c9d9d383c06f, 0x1a24a5467a5ec73, 0x4a90ed99386e}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2384,261 +2384,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x116d, 0x165f, 0x7d3, 0x488, 0xe08, 0xb12, 0x2e0, 0x18b4, 0xdbb, 0x181e, 0xe50, 0x19b8, 0xc17, 0x1c82, 0x1cf6, 0x7b9, 0xebb, 0x1c0a, 0x183a, 0x1f42, 0x1a3e, 0x176b, 0x19fa, 0x7e4, 0x1646, 0x1dc5, 0x1e9d, 0x4b3, 0x18df, 0xf2d, 0xe2e, 0x2e3, 0x903, 0x19ef, 0x1f94, 0x237, 0x18ef, 0x26c, 0x12f} +{{0x116d, 0x165f, 0x7d3, 0x488, 0xe08, 0xb12, 0x2e0, 0x18b4, 0xdbb, 0x181e, 0xe50, 0x19b8, 0xc17, 0x1c82, 0x1cf6, 0x7b9, 0xebb, 0x1c0a, 0x183a, 0x1f42, 0x1a3e, 0x176b, 0x19fa, 0x7e4, 0x1646, 0x1dc5, 0x1e9d, 0x4b3, 0x18df, 0xf2d, 0xe2e, 0x2e3, 0x903, 0x19ef, 0x1f94, 0x237, 0x18ef, 0x26c, 0x12f}} #elif RADIX == 32 -{0x18b69673, 0x107d3b2f, 0x49c1048, 0x5a0b816, 0x181e6dde, 0x1f370e50, 0x1b720982, 0xbb3dcf3, 0x1075c0a7, 0x1e8fbe85, 0x4cfd5da, 0x18b6463f, 0x12cfd3bd, 0x796e37c, 0x62e3717, 0x533de90, 0x7788dff, 0x2e6} +{{0x18b69673, 0x107d3b2f, 0x49c1048, 0x5a0b816, 0x181e6dde, 0x1f370e50, 0x1b720982, 0xbb3dcf3, 0x1075c0a7, 0x1e8fbe85, 0x4cfd5da, 0x18b6463f, 0x12cfd3bd, 0x796e37c, 0x62e3717, 0x533de90, 0x7788dff, 0x2e6}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x104883e9d97e2da5, 0x79b778b41702c49c, 0xb720982f9b872860, 0x2c1d7029d767b9e7, 0xc7e4cfd5daf47df4, 0x71be4b3f4ef716c8, 0x67bd2062e37173cb, 0x1089b31de237fca} +{{0x104883e9d97e2da5, 0x79b778b41702c49c, 0xb720982f9b872860, 0x2c1d7029d767b9e7, 0xc7e4cfd5daf47df4, 0x71be4b3f4ef716c8, 0x67bd2062e37173cb, 0x1089b31de237fca}} #else -{0x9107d3b2fc5b4b, 0x178b41702c49c10, 0x17cdc394303cdb, 0x75d9ee79edc826, 0x15e8fbe8583ae05, 0x1716c8c7e4cfd5d, 0x19e5b8df259fa77, 0x1299ef4818b8dc5, 0x613663bc46ff} +{{0x9107d3b2fc5b4b, 0x178b41702c49c10, 0x17cdc394303cdb, 0x75d9ee79edc826, 0x15e8fbe8583ae05, 0x1716c8c7e4cfd5d, 0x19e5b8df259fa77, 0x1299ef4818b8dc5, 0x613663bc46ff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1c5d, 0x1d97, 0x1f4, 0x122, 0x1382, 0x2c4, 0xb8, 0x1e2d, 0x136e, 0x607, 0x394, 0x1e6e, 0x1305, 0x1720, 0xf3d, 0x19ee, 0x13ae, 0x1702, 0x160e, 0x17d0, 0x1e8f, 0x15da, 0x67e, 0x11f9, 0xd91, 0xf71, 0x1fa7, 0x192c, 0xe37, 0x13cb, 0x1b8b, 0x18b8, 0x1a40, 0x67b, 0x1fe5, 0x188d, 0x63b, 0x189b, 0x47b} +{{0x1c5d, 0x1d97, 0x1f4, 0x122, 0x1382, 0x2c4, 0xb8, 0x1e2d, 0x136e, 0x607, 0x394, 0x1e6e, 0x1305, 0x1720, 0xf3d, 0x19ee, 0x13ae, 0x1702, 0x160e, 0x17d0, 0x1e8f, 0x15da, 0x67e, 0x11f9, 0xd91, 0xf71, 0x1fa7, 0x192c, 0xe37, 0x13cb, 0x1b8b, 0x18b8, 0x1a40, 0x67b, 0x1fe5, 0x188d, 0x63b, 0x189b, 0x47b}} #elif RADIX == 32 -{0x1e2ed505, 0x41f4ecb, 0x11270412, 0x11682e05, 0x6079b77, 0x17cdc394, 0x1edc8260, 0x1aecf73c, 0xc1d7029, 0x17a3efa1, 0x1933f576, 0xe2d918f, 0x4b3f4ef, 0x19e5b8df, 0x18b8dc5, 0x194cf7a4, 0x11de237f, 0x159} +{{0x1e2ed505, 0x41f4ecb, 0x11270412, 0x11682e05, 0x6079b77, 0x17cdc394, 0x1edc8260, 0x1aecf73c, 0xc1d7029, 0x17a3efa1, 0x1933f576, 0xe2d918f, 0x4b3f4ef, 0x19e5b8df, 0x18b8dc5, 0x194cf7a4, 0x11de237f, 0x159}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x41220fa765f8bb5, 0x1e6dde2d05c0b127, 0xedc8260be6e1ca18, 0xb075c0a75d9ee79, 0x31f933f576bd1f7d, 0xdc6f92cfd3bdc5b2, 0x99ef4818b8dc5cf2, 0x6e26cc7788dff2} +{{0x41220fa765f8bb5, 0x1e6dde2d05c0b127, 0xedc8260be6e1ca18, 0xb075c0a75d9ee79, 0x31f933f576bd1f7d, 0xdc6f92cfd3bdc5b2, 0x99ef4818b8dc5cf2, 0x6e26cc7788dff2}} #else -{0x2441f4ecbf176a, 0x1de2d05c0b12704, 0x105f370e50c0f36, 0x9d767b9e7b7209, 0xd7a3efa160eb81, 0x1dc5b231f933f57, 0xe796e37c967e9d, 0x1ca67bd2062e371, 0xdc4d98ef11bf} +{{0x2441f4ecbf176a, 0x1de2d05c0b12704, 0x105f370e50c0f36, 0x9d767b9e7b7209, 0xd7a3efa160eb81, 0x1dc5b231f933f57, 0xe796e37c967e9d, 0x1ca67bd2062e371, 0xdc4d98ef11bf}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1e97, 0x1f23, 0x161, 0x7b2, 0x1221, 0x1d36, 0x14f1, 0xaa0, 0xce3, 0x1f6c, 0xeaf, 0x549, 0xa24, 0xe15, 0x1862, 0x1dba, 0xc75, 0xf1d, 0x15f9, 0x50d, 0xa99, 0x97b, 0xc21, 0x1549, 0x1c88, 0xfbe, 0xe33, 0xb27, 0x1dae, 0xb00, 0x82f, 0x44a, 0x371, 0x5c0, 0x1174, 0x1b28, 0xa0b, 0x9bd, 0x206} +{{0x1e97, 0x1f23, 0x161, 0x7b2, 0x1221, 0x1d36, 0x14f1, 0xaa0, 0xce3, 0x1f6c, 0xeaf, 0x549, 0xa24, 0xe15, 0x1862, 0x1dba, 0xc75, 0xf1d, 0x15f9, 0x50d, 0xa99, 0x97b, 0xc21, 0x1549, 0x1c88, 0xfbe, 0xe33, 0xb27, 0x1dae, 0xb00, 0x82f, 0x44a, 0x371, 0x5c0, 0x1174, 0x1b28, 0xa0b, 0x9bd, 0x206}} #elif RADIX == 32 -{0x1f4ba664, 0x4161f91, 0xda4427b, 0x15053c7a, 0x1f6c671a, 0x10a92eaf, 0x11385544, 0x75edd61, 0xbf2f1d6, 0x1aa64a1b, 0x9610a5e, 0x17dc88aa, 0xc9dc66f, 0x158076b9, 0x244a417, 0x1d0b8037, 0x105eca22, 0x7ea} +{{0x1f4ba664, 0x4161f91, 0xda4427b, 0x15053c7a, 0x1f6c671a, 0x10a92eaf, 0x11385544, 0x75edd61, 0xbf2f1d6, 0x1aa64a1b, 0x9610a5e, 0x17dc88aa, 0xc9dc66f, 0x158076b9, 0x244a417, 0x1d0b8037, 0x105eca22, 0x7ea}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x427b20b0fc8fd2e9, 0xb19c6aa0a78f4da4, 0x13855448549757fd, 0xdafcbc758ebdbac3, 0x1549610a5ed53250, 0x3b5cb27719befb91, 0x17006e244a417ac0, 0x1026f5417b288ba} +{{0x427b20b0fc8fd2e9, 0xb19c6aa0a78f4da4, 0x13855448549757fd, 0xdafcbc758ebdbac3, 0x1549610a5ed53250, 0x3b5cb27719befb91, 0x17006e244a417ac0, 0x1026f5417b288ba}} #else -{0xf64161f91fa5d3, 0x6aa0a78f4da442, 0x242a4babfed8ce, 0x163af6eb0c4e155, 0x1daa64a1b5f978e, 0x1efb911549610a5, 0x1d601dae593b8cd, 0xe85c01b8912905, 0x54dea82f6511} +{{0xf64161f91fa5d3, 0x6aa0a78f4da442, 0x242a4babfed8ce, 0x163af6eb0c4e155, 0x1daa64a1b5f978e, 0x1efb911549610a5, 0x1d601dae593b8cd, 0xe85c01b8912905, 0x54dea82f6511}} #endif #endif , #if 0 #elif RADIX == 16 -{0xcc, 0x1cb1, 0x706, 0x1f0b, 0xa79, 0xd89, 0xd1f, 0x1067, 0x1c50, 0x1e70, 0x41c, 0x1ce8, 0xd29, 0x7c7, 0x733, 0x460, 0x1e22, 0xe0b, 0x7f6, 0x1387, 0xe84, 0x273, 0x13e1, 0x1f1d, 0x1643, 0x1f1a, 0x3e, 0x7b7, 0xecf, 0x1578, 0x357, 0xaf4, 0x1f6c, 0x4c8, 0x11b9, 0x866, 0x80a, 0x13e2, 0x499} +{{0xcc, 0x1cb1, 0x706, 0x1f0b, 0xa79, 0xd89, 0xd1f, 0x1067, 0x1c50, 0x1e70, 0x41c, 0x1ce8, 0xd29, 0x7c7, 0x733, 0x460, 0x1e22, 0xe0b, 0x7f6, 0x1387, 0xe84, 0x273, 0x13e1, 0x1f1d, 0x1643, 0x1f1a, 0x3e, 0x7b7, 0xecf, 0x1578, 0x357, 0xaf4, 0x1f6c, 0x4c8, 0x11b9, 0x866, 0x80a, 0x13e2, 0x499}} #elif RADIX == 32 -{0x1066573b, 0x16706e58, 0x254f3f0, 0x33b47db, 0x1e70e284, 0x79d041c, 0x199f1da5, 0x222301c, 0xfece0bf, 0x1ba1270e, 0x1d9f089c, 0x35643f8, 0x1edc07df, 0x1abc3b3c, 0x18af41ab, 0xe4991f6, 0x5219a3, 0x292} +{{0x1066573b, 0x16706e58, 0x254f3f0, 0x33b47db, 0x1e70e284, 0x79d041c, 0x199f1da5, 0x222301c, 0xfece0bf, 0x1ba1270e, 0x1d9f089c, 0x35643f8, 0x1edc07df, 0x1abc3b3c, 0x18af41ab, 0xe4991f6, 0x5219a3, 0x292}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf3f0b38372c41995, 0xc38a106768fb6254, 0x99f1da53ce820e79, 0x73fb382fc4446039, 0x7f1d9f089cdd0938, 0x1d9e7b701f7c6ac8, 0x9323ed8af41abd5e, 0x15cf890148668dc} +{{0xf3f0b38372c41995, 0xc38a106768fb6254, 0x99f1da53ce820e79, 0x73fb382fc4446039, 0x7f1d9f089cdd0938, 0x1d9e7b701f7c6ac8, 0x9323ed8af41abd5e, 0x15cf890148668dc}} #else -{0x1e16706e588332b, 0x106768fb6254f3, 0x129e741073ce1c5, 0x1f111180e667c76, 0x19ba1270e7f6705, 0x1c6ac87f1d9f089, 0x1eaf0ecf3db80fb, 0x1724c8fb62bd06a, 0x109f120290cd1} +{{0x1e16706e588332b, 0x106768fb6254f3, 0x129e741073ce1c5, 0x1f111180e667c76, 0x19ba1270e7f6705, 0x1c6ac87f1d9f089, 0x1eaf0ecf3db80fb, 0x1724c8fb62bd06a, 0x109f120290cd1}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xd3d, 0x1bb8, 0x7b6, 0x2b7, 0x1f97, 0xc1a, 0x13ef, 0x6ac, 0xf50, 0x12de, 0xd45, 0x16d4, 0x69c, 0x16a8, 0xde4, 0xbd6, 0x14ea, 0x1d58, 0x193c, 0x160b, 0x1fc5, 0x20b, 0x1376, 0xbbb, 0x732, 0x8f8, 0x10f6, 0x1fef, 0xe7b, 0xb28, 0x10ba, 0x953, 0x1cfe, 0x1437, 0x1422, 0x178b, 0x1524, 0x590, 0x334} +{{0xd3d, 0x1bb8, 0x7b6, 0x2b7, 0x1f97, 0xc1a, 0x13ef, 0x6ac, 0xf50, 0x12de, 0xd45, 0x16d4, 0x69c, 0x16a8, 0xde4, 0xbd6, 0x14ea, 0x1d58, 0x193c, 0x160b, 0x1fc5, 0x20b, 0x1376, 0xbbb, 0x732, 0x8f8, 0x10f6, 0x1fef, 0xe7b, 0xb28, 0x10ba, 0x953, 0x1cfe, 0x1437, 0x1422, 0x178b, 0x1524, 0x590, 0x334}} #elif RADIX == 32 -{0x69ebcc0, 0xe7b6ddc, 0x6bf2e2b, 0x1564fbd8, 0x12de7a81, 0x12da8d45, 0x125aa0d3, 0xea5eb37, 0x1279d58a, 0x1ff16c17, 0x1b9bb082, 0x1f07325d, 0x1fbe1ec8, 0x59439ef, 0x1c95385d, 0x8a86fcf, 0x925e2e8, 0xc85} +{{0x69ebcc0, 0xe7b6ddc, 0x6bf2e2b, 0x1564fbd8, 0x12de7a81, 0x12da8d45, 0x125aa0d3, 0xea5eb37, 0x1279d58a, 0x1ff16c17, 0x1b9bb082, 0x1f07325d, 0x1fbe1ec8, 0x59439ef, 0x1c95385d, 0x8a86fcf, 0x925e2e8, 0xc85}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2e2b73db6ee1a7af, 0x79ea06ac9f7b06bf, 0x25aa0d396d46a2cb, 0xbc9e75629d4bd66f, 0x4bbb9bb082ff8b60, 0x1cf7fef87b23e0e6, 0x50df9fc95385d2ca, 0x51642a4978ba11} +{{0x2e2b73db6ee1a7af, 0x79ea06ac9f7b06bf, 0x25aa0d396d46a2cb, 0xbc9e75629d4bd66f, 0x4bbb9bb082ff8b60, 0x1cf7fef87b23e0e6, 0x50df9fc95385d2ca, 0x51642a4978ba11}} #else -{0x56e7b6ddc34f5e, 0x6ac9f7b06bf2e, 0x9cb6a35165bcf5, 0xa752f59bc96a83, 0x5ff16c1793ceac, 0x3e0e64bbb9bb08, 0x9650e7bff7c3d9, 0x45437e7f254e17, 0xa2c85492f174} +{{0x56e7b6ddc34f5e, 0x6ac9f7b06bf2e, 0x9cb6a35165bcf5, 0xa752f59bc96a83, 0x5ff16c1793ceac, 0x3e0e64bbb9bb08, 0x9650e7bff7c3d9, 0x45437e7f254e17, 0xa2c85492f174}} #endif #endif , #if 0 #elif RADIX == 16 -{0xb59, 0x1fb4, 0x1dac, 0x52d, 0x794, 0x1254, 0x1f9f, 0xdba, 0x151d, 0x1f01, 0x7f7, 0xb2b, 0x7e4, 0x1b36, 0x912, 0x1366, 0x1a04, 0x8ed, 0x1e58, 0x18f0, 0xffd, 0x455, 0xba9, 0x16d, 0x155f, 0x1198, 0x1264, 0x158b, 0x766, 0x66e, 0x1403, 0x15fd, 0xe0e, 0x1368, 0x9e6, 0x4af, 0x1fba, 0x1047, 0x464} +{{0xb59, 0x1fb4, 0x1dac, 0x52d, 0x794, 0x1254, 0x1f9f, 0xdba, 0x151d, 0x1f01, 0x7f7, 0xb2b, 0x7e4, 0x1b36, 0x912, 0x1366, 0x1a04, 0x8ed, 0x1e58, 0x18f0, 0xffd, 0x455, 0xba9, 0x16d, 0x155f, 0x1198, 0x1264, 0x158b, 0x766, 0x66e, 0x1403, 0x15fd, 0xe0e, 0x1368, 0x9e6, 0x4af, 0x1fba, 0x1047, 0x464}} #elif RADIX == 32 -{0x5acd34c, 0x1bdacfda, 0x150f2852, 0xdd7e7e4, 0x1f01a8eb, 0x116567f7, 0x96cd8fc, 0x49b324, 0x1cb08edd, 0xbff71e1, 0xd5d4915, 0x13155f0b, 0x162e4c91, 0x13371d9a, 0x1d5fda01, 0x19a6d0e0, 0x1dd12bd3, 0x3f} +{{0x5acd34c, 0x1bdacfda, 0x150f2852, 0xdd7e7e4, 0x1f01a8eb, 0x116567f7, 0x96cd8fc, 0x49b324, 0x1cb08edd, 0xbff71e1, 0xd5d4915, 0x13155f0b, 0x162e4c91, 0x13371d9a, 0x1d5fda01, 0x19a6d0e0, 0x1dd12bd3, 0x3f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2852ded67ed16b34, 0x6a3adbafcfc950f, 0x96cd8fc8b2b3fbfc, 0xf2c23b740936648, 0xe16d5d49155ffb8f, 0x8ecd58b9324662ab, 0x4da1c1d5fda0199b, 0x16411ff744af4f3} +{{0x2852ded67ed16b34, 0x6a3adbafcfc950f, 0x96cd8fc8b2b3fbfc, 0xf2c23b740936648, 0xe16d5d49155ffb8f, 0x8ecd58b9324662ab, 0x4da1c1d5fda0199b, 0x16411ff744af4f3}} #else -{0xa5bdacfda2d669, 0x1adbafcfc950f28, 0x1e45959fdfe0351, 0x1d024d99225b363, 0xabff71e1e58476, 0x662abe16d5d491, 0xccdc766ac5c992, 0x1cd36870757f680, 0x11823fee895e9} +{{0xa5bdacfda2d669, 0x1adbafcfc950f28, 0x1e45959fdfe0351, 0x1d024d99225b363, 0xabff71e1e58476, 0x662abe16d5d491, 0xccdc766ac5c992, 0x1cd36870757f680, 0x11823fee895e9}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1156, 0x273, 0x1153, 0x89b, 0xc67, 0x9dc, 0x14b5, 0x1d27, 0x1c5e, 0x18e6, 0x1dfa, 0x1beb, 0x12e7, 0xe02, 0x1614, 0x12b0, 0x1646, 0x1bdb, 0x1e1f, 0x1eb6, 0x361, 0x1fb, 0x2ee, 0xee2, 0x178c, 0xedd, 0x1ba6, 0xf1c, 0x1e7f, 0x1dac, 0x137d, 0x18db, 0x8e8, 0xa0, 0x1faf, 0x5cb, 0x1078, 0x1562, 0x36e} +{{0x1156, 0x273, 0x1153, 0x89b, 0xc67, 0x9dc, 0x14b5, 0x1d27, 0x1c5e, 0x18e6, 0x1dfa, 0x1beb, 0x12e7, 0xe02, 0x1614, 0x12b0, 0x1646, 0x1bdb, 0x1e1f, 0x1eb6, 0x361, 0x1fb, 0x2ee, 0xee2, 0x178c, 0xedd, 0x1ba6, 0xf1c, 0x1e7f, 0x1dac, 0x137d, 0x18db, 0x8e8, 0xa0, 0x1faf, 0x5cb, 0x1078, 0x1562, 0x36e}} #elif RADIX == 32 -{0x18ab4116, 0x17153139, 0x1718ce89, 0x93d2d53, 0x18e6e2f7, 0x1f7d7dfa, 0xa380a5c, 0x4695858, 0x1c3fbdbb, 0x18d87d6d, 0x217707e, 0x1bb78c77, 0x1c7374ce, 0x1ed679fd, 0x118db9be, 0xbc1408e, 0x3c172ff, 0x214} +{{0x18ab4116, 0x17153139, 0x1718ce89, 0x93d2d53, 0x18e6e2f7, 0x1f7d7dfa, 0xa380a5c, 0x4695858, 0x1c3fbdbb, 0x18d87d6d, 0x217707e, 0x1bb78c77, 0x1c7374ce, 0x1ed679fd, 0x118db9be, 0xbc1408e, 0x3c172ff, 0x214}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xce89b8a989ce2ad0, 0x9b8bdd27a5aa7718, 0xa380a5cfbebefd63, 0x6f0fef6ec8d2b0b0, 0x8ee217707ec6c3eb, 0x3cfef1cdd33b76f1, 0x82811d18db9bef6b, 0x7558a0f05cbfd7} +{{0xce89b8a989ce2ad0, 0x9b8bdd27a5aa7718, 0xa380a5cfbebefd63, 0x6f0fef6ec8d2b0b0, 0x8ee217707ec6c3eb, 0x3cfef1cdd33b76f1, 0x82811d18db9bef6b, 0x7558a0f05cbfd7}} #else -{0x1137153139c55a0, 0x1dd27a5aa7718ce, 0xe7df5f7eb1cdc5, 0x1b234ac2c28e029, 0x1d8d87d6de1fded, 0x1b76f18ee217707, 0x17b59e7f78e6e99, 0x15e0a0474636e6f, 0xeab141e0b97f} +{{0x1137153139c55a0, 0x1dd27a5aa7718ce, 0xe7df5f7eb1cdc5, 0x1b234ac2c28e029, 0x1d8d87d6de1fded, 0x1b76f18ee217707, 0x17b59e7f78e6e99, 0x15e0a0474636e6f, 0xeab141e0b97f}} #endif #endif , #if 0 #elif RADIX == 16 -{0xb32, 0x149, 0x1615, 0x77e, 0xf55, 0x189, 0xe2a, 0x13bc, 0xf83, 0x124d, 0xcaa, 0x22, 0xcea, 0x8f9, 0xc5e, 0x8bc, 0x4ff, 0x14da, 0x394, 0x4a2, 0x1767, 0x1d20, 0x1531, 0x1dff, 0x929, 0x15cf, 0x1f69, 0x1630, 0x669, 0x11ec, 0x162c, 0xcf3, 0xde5, 0x185f, 0x1da0, 0x1db9, 0x1d93, 0xb9b, 0x38f} +{{0xb32, 0x149, 0x1615, 0x77e, 0xf55, 0x189, 0xe2a, 0x13bc, 0xf83, 0x124d, 0xcaa, 0x22, 0xcea, 0x8f9, 0xc5e, 0x8bc, 0x4ff, 0x14da, 0x394, 0x4a2, 0x1767, 0x1d20, 0x1531, 0x1dff, 0x929, 0x15cf, 0x1f69, 0x1630, 0x669, 0x11ec, 0x162c, 0xcf3, 0xde5, 0x185f, 0x1da0, 0x1db9, 0x1d93, 0xb9b, 0x38f}} #elif RADIX == 32 -{0x15994382, 0x1d6150a4, 0x25eaa77, 0x1de38a83, 0x124d7c1c, 0x8044caa, 0xf23e59d, 0xff45e31, 0x7294da2, 0x5d9c944, 0x1fa98f48, 0x19e929ef, 0x18c3ed35, 0x8f619a6, 0xacf3b16, 0x830bede, 0xc9f6e7b, 0x1df} +{{0x15994382, 0x1d6150a4, 0x25eaa77, 0x1de38a83, 0x124d7c1c, 0x8044caa, 0xf23e59d, 0xff45e31, 0x7294da2, 0x5d9c944, 0x1fa98f48, 0x19e929ef, 0x18c3ed35, 0x8f619a6, 0xacf3b16, 0x830bede, 0xc9f6e7b, 0x1df}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xaa77eb0a85256650, 0x35f073bc7150625e, 0xf23e59d402265549, 0x21ca53689fe8bc62, 0x3dffa98f482ece4a, 0xcd3630fb4d73d25, 0x617dbcacf3b1647b, 0x17ae6fb27db9ed0} +{{0xaa77eb0a85256650, 0x35f073bc7150625e, 0xf23e59d402265549, 0x21ca53689fe8bc62, 0x3dffa98f482ece4a, 0xcd3630fb4d73d25, 0x617dbcacf3b1647b, 0x17ae6fb27db9ed0}} #else -{0xefd6150a4acca1, 0x73bc7150625eaa, 0xea01132aa49af8, 0x27fa2f18bc8f96, 0x105d9c944394a6d, 0x173d253dffa98f4, 0x123d8669b187da6, 0x14185f6f2b3cec5, 0x145cdf64fb73d} +{{0xefd6150a4acca1, 0x73bc7150625eaa, 0xea01132aa49af8, 0x27fa2f18bc8f96, 0x105d9c944394a6d, 0x173d253dffa98f4, 0x123d8669b187da6, 0x14185f6f2b3cec5, 0x145cdf64fb73d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2860,261 +2860,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x1414, 0x1912, 0xddb, 0xb9, 0x121b, 0x195d, 0x6c5, 0xe75, 0x1d16, 0xcaf, 0x1a05, 0x15cc, 0x1537, 0x1c57, 0x141a, 0x1b9d, 0x5b0, 0x9e7, 0xf10, 0x300, 0x1d5c, 0xc13, 0x245, 0x1b91, 0x352, 0x730, 0xd55, 0x1ca3, 0x1dac, 0x7b0, 0x15b9, 0x1ba6, 0x7d6, 0xba4, 0xc12, 0x649, 0xf1, 0xd51, 0x107} +{{0x1414, 0x1912, 0xddb, 0xb9, 0x121b, 0x195d, 0x6c5, 0xe75, 0x1d16, 0xcaf, 0x1a05, 0x15cc, 0x1537, 0x1c57, 0x141a, 0x1b9d, 0x5b0, 0x9e7, 0xf10, 0x300, 0x1d5c, 0xc13, 0x245, 0x1b91, 0x352, 0x730, 0xd55, 0x1ca3, 0x1dac, 0x7b0, 0x15b9, 0x1ba6, 0x7d6, 0xba4, 0xc12, 0x649, 0xf1, 0xd51, 0x107}} #elif RADIX == 32 -{0xa0a1383, 0x12ddbc89, 0x1764360b, 0x13a9b172, 0xcafe8b3, 0x1eb99a05, 0xd715ea6, 0x1b0dced0, 0x1e209e72, 0x1f570600, 0x11122b04, 0x60352dc, 0x128daaa7, 0x13d876b3, 0xdba6adc, 0x497487d, 0x7899258, 0x208} +{{0xa0a1383, 0x12ddbc89, 0x1764360b, 0x13a9b172, 0xcafe8b3, 0x1eb99a05, 0xd715ea6, 0x1b0dced0, 0x1e209e72, 0x1f570600, 0x11122b04, 0x60352dc, 0x128daaa7, 0x13d876b3, 0xdba6adc, 0x497487d, 0x7899258, 0x208}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x360b96ede44a8284, 0xbfa2ce75362e5764, 0xd715ea6f5ccd02b2, 0x788279cb61b9da0, 0x5b91122b04fab830, 0x3b59ca36aa9cc06a, 0x2e90fadba6adc9ec, 0x17b5441e2649609} +{{0x360b96ede44a8284, 0xbfa2ce75362e5764, 0xd715ea6f5ccd02b2, 0x788279cb61b9da0, 0x5b91122b04fab830, 0x3b59ca36aa9cc06a, 0x2e90fadba6adc9ec, 0x17b5441e2649609}} #else -{0x172ddbc8950509, 0xce75362e576436, 0x137ae6681595fd1, 0x12d86e76835c57a, 0x9f570600f104f3, 0x1cc06a5b91122b0, 0x4f61dace51b554, 0x24ba43eb6e9ab7, 0x146a883c4c92c} +{{0x172ddbc8950509, 0xce75362e576436, 0x137ae6681595fd1, 0x12d86e76835c57a, 0x9f570600f104f3, 0x1cc06a5b91122b0, 0x4f61dace51b554, 0x24ba43eb6e9ab7, 0x146a883c4c92c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1507, 0x1e44, 0xb76, 0x182e, 0xc86, 0xe57, 0x9b1, 0x139d, 0x1f45, 0xb2b, 0x681, 0x1d73, 0x1d4d, 0x1715, 0xd06, 0x6e7, 0x196c, 0x279, 0x3c4, 0xc0, 0x1f57, 0xb04, 0x891, 0x16e4, 0xd4, 0x9cc, 0x1b55, 0x728, 0x76b, 0x9ec, 0x156e, 0x16e9, 0x1f5, 0x12e9, 0xb04, 0x992, 0x83c, 0x1b54, 0x2c1} +{{0x1507, 0x1e44, 0xb76, 0x182e, 0xc86, 0xe57, 0x9b1, 0x139d, 0x1f45, 0xb2b, 0x681, 0x1d73, 0x1d4d, 0x1715, 0xd06, 0x6e7, 0x196c, 0x279, 0x3c4, 0xc0, 0x1f57, 0xb04, 0x891, 0x16e4, 0xd4, 0x9cc, 0x1b55, 0x728, 0x76b, 0x9ec, 0x156e, 0x16e9, 0x1f5, 0x12e9, 0xb04, 0x992, 0x83c, 0x1b54, 0x2c1}} #elif RADIX == 32 -{0xa83b449, 0x1cb76f22, 0x15d90d82, 0x1cea6c5c, 0xb2bfa2c, 0x17ae6681, 0x35c57a9, 0x16c373b4, 0x788279c, 0x7d5c180, 0x4448ac1, 0x1980d4b7, 0x1ca36aa9, 0x4f61dac, 0xb6e9ab7, 0x125d21f, 0x1e26496, 0x122} +{{0xa83b449, 0x1cb76f22, 0x15d90d82, 0x1cea6c5c, 0xb2bfa2c, 0x17ae6681, 0x35c57a9, 0x16c373b4, 0x788279c, 0x7d5c180, 0x4448ac1, 0x1980d4b7, 0x1ca36aa9, 0x4f61dac, 0xb6e9ab7, 0x125d21f, 0x1e26496, 0x122}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd82e5bb7912a0ed, 0xafe8b39d4d8b95d9, 0x35c57a9bd73340ac, 0x1e209e72d86e768, 0x96e4448ac13eae0c, 0xed6728daaa7301a, 0x4ba43eb6e9ab727b, 0x1ed51078992582} +{{0xd82e5bb7912a0ed, 0xafe8b39d4d8b95d9, 0x35c57a9bd73340ac, 0x1e209e72d86e768, 0x96e4448ac13eae0c, 0xed6728daaa7301a, 0x4ba43eb6e9ab727b, 0x1ed51078992582}} #else -{0x105cb76f22541da, 0xb39d4d8b95d90d, 0x14deb99a05657f4, 0x1cb61b9da0d715e, 0x27d5c1803c413c, 0x7301a96e4448ac, 0x193d876b3946d55, 0x92e90fadba6ad, 0x3daa20f1324b} +{{0x105cb76f22541da, 0xb39d4d8b95d90d, 0x14deb99a05657f4, 0x1cb61b9da0d715e, 0x27d5c1803c413c, 0x7301a96e4448ac, 0x193d876b3946d55, 0x92e90fadba6ad, 0x3daa20f1324b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xa07, 0x1f97, 0x13c4, 0xb69, 0x15ec, 0x161d, 0x194, 0x135c, 0xe18, 0x119a, 0x684, 0x199, 0x1a93, 0x906, 0x62e, 0x1ad4, 0xc99, 0x40b, 0x10df, 0xf12, 0x9ee, 0x93, 0x1837, 0x42d, 0x1ea3, 0x1967, 0x1d41, 0x422, 0x2d5, 0x17d0, 0x1550, 0x1c2d, 0x139a, 0x152b, 0xa57, 0x1072, 0x13bf, 0x1fe7, 0x57a} +{{0xa07, 0x1f97, 0x13c4, 0xb69, 0x15ec, 0x161d, 0x194, 0x135c, 0xe18, 0x119a, 0x684, 0x199, 0x1a93, 0x906, 0x62e, 0x1ad4, 0xc99, 0x40b, 0x10df, 0xf12, 0x9ee, 0x93, 0x1837, 0x42d, 0x1ea3, 0x1967, 0x1d41, 0x422, 0x2d5, 0x17d0, 0x1550, 0x1c2d, 0x139a, 0x152b, 0xa57, 0x1072, 0x13bf, 0x1fe7, 0x57a}} #elif RADIX == 32 -{0x1503e7ec, 0x133c4fcb, 0x76bd8b6, 0x1ae0652c, 0x119a70c4, 0xc332684, 0x17241b52, 0x99d6a18, 0x1be40b6, 0x1a7b9e25, 0xdc1b824, 0xcfea321, 0x108ba839, 0xbe80b54, 0x15c2daa8, 0x15ea5739, 0x1dfc1c94, 0xd3c} +{{0x1503e7ec, 0x133c4fcb, 0x76bd8b6, 0x1ae0652c, 0x119a70c4, 0xc332684, 0x17241b52, 0x99d6a18, 0x1be40b6, 0x1a7b9e25, 0xdc1b824, 0xcfea321, 0x108ba839, 0xbe80b54, 0x15c2daa8, 0x15ea5739, 0x1dfc1c94, 0xd3c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd8b699e27e5d40f9, 0x69c3135c0ca5876b, 0x7241b52619934246, 0x286f902d933ad431, 0x642dc1b824d3dcf1, 0x5aa422ea0e59fd4, 0xd4ae735c2daa85f4, 0x1a7f9e77f07252b} +{{0xd8b699e27e5d40f9, 0x69c3135c0ca5876b, 0x7241b52619934246, 0x286f902d933ad431, 0x642dc1b824d3dcf1, 0x5aa422ea0e59fd4, 0xd4ae735c2daa85f4, 0x1a7f9e77f07252b}} #else -{0x16d33c4fcba81f3, 0x1135c0ca5876bd8, 0x930cc9a12334e1, 0x164ceb50c5c906d, 0x9a7b9e250df205, 0x59fd4642dc1b82, 0x2fa02d52117507, 0xaf52b9cd70b6aa, 0x19ff3cefe0e4a} +{{0x16d33c4fcba81f3, 0x1135c0ca5876bd8, 0x930cc9a12334e1, 0x164ceb50c5c906d, 0x9a7b9e250df205, 0x59fd4642dc1b82, 0x2fa02d52117507, 0xaf52b9cd70b6aa, 0x19ff3cefe0e4a}} #endif #endif , #if 0 #elif RADIX == 16 -{0x2, 0x2d8, 0x113e, 0xa74, 0x660, 0x141f, 0x64f, 0x885, 0x46, 0x17b9, 0x94f, 0x1b44, 0x361, 0xbf6, 0x1f17, 0x583, 0x18b3, 0x118e, 0x9ba, 0x49f, 0x1fc3, 0x13eb, 0x11c8, 0xcc8, 0x1b2d, 0x8c, 0x9c6, 0x1d9, 0xf33, 0x53d, 0x129a, 0x1b4a, 0x65, 0x169a, 0xe74, 0x544, 0x17e3, 0x1f0f, 0x2a6} +{{0x2, 0x2d8, 0x113e, 0xa74, 0x660, 0x141f, 0x64f, 0x885, 0x46, 0x17b9, 0x94f, 0x1b44, 0x361, 0xbf6, 0x1f17, 0x583, 0x18b3, 0x118e, 0x9ba, 0x49f, 0x1fc3, 0x13eb, 0x11c8, 0xcc8, 0x1b2d, 0x8c, 0x9c6, 0x1d9, 0xf33, 0x53d, 0x129a, 0x1b4a, 0x65, 0x169a, 0xe74, 0x544, 0x17e3, 0x1f0f, 0x2a6}} #elif RADIX == 32 -{0x1324b, 0x913e16c, 0x7ccc0a7, 0x42993e8, 0x17b90232, 0x768894f, 0xbafd86c, 0xb32c1fc, 0x137518ec, 0x1ff0c93e, 0x88e44fa, 0x119b2d66, 0x76538c0, 0x29ebccc, 0xbb4a94d, 0x1d2d3406, 0x1f19511c, 0x3fd} +{{0x1324b, 0x913e16c, 0x7ccc0a7, 0x42993e8, 0x17b90232, 0x768894f, 0xbafd86c, 0xb32c1fc, 0x137518ec, 0x1ff0c93e, 0x88e44fa, 0x119b2d66, 0x76538c0, 0x29ebccc, 0xbb4a94d, 0x1d2d3406, 0x1f19511c, 0x3fd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc0a7489f0b60004c, 0xe408c885327d07cc, 0xbafd86c3b444a7de, 0xf4dd463b166583f8, 0xacc88e44faff8649, 0x5e661d94e3023365, 0x5a680cbb4a94d14f, 0xf7c3efc654473a} +{{0xc0a7489f0b60004c, 0xe408c885327d07cc, 0xbafd86c3b444a7de, 0xf4dd463b166583f8, 0xacc88e44faff8649, 0x5e661d94e3023365, 0x5a680cbb4a94d14f, 0xf7c3efc654473a}} #else -{0x14e913e16c00099, 0xc885327d07ccc0, 0x161da2253ef7204, 0xc59960fe2ebf61, 0x15ff0c93e9ba8c7, 0x23365acc88e44f, 0x8a7af330eca718, 0xe969a032ed2a53, 0x3f87df8ca88e} +{{0x14e913e16c00099, 0xc885327d07ccc0, 0x161da2253ef7204, 0xc59960fe2ebf61, 0x15ff0c93e9ba8c7, 0x23365acc88e44f, 0x8a7af330eca718, 0xe969a032ed2a53, 0x3f87df8ca88e}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xe6e, 0xc55, 0xb5a, 0x1be4, 0x10f8, 0x1175, 0x1ada, 0x13de, 0xa0d, 0x1cb, 0x6f3, 0x91f, 0x70c, 0x12ef, 0x1403, 0x115a, 0x1205, 0x1705, 0xb8a, 0x490, 0x681, 0x1a6f, 0xd49, 0x2ca, 0x7e2, 0x1ad8, 0x1aa6, 0x9e8, 0x1f0f, 0x1df, 0xc32, 0xd30, 0x1a34, 0xfc4, 0x1519, 0x1cde, 0x7c9, 0x12da, 0x157} +{{0xe6e, 0xc55, 0xb5a, 0x1be4, 0x10f8, 0x1175, 0x1ada, 0x13de, 0xa0d, 0x1cb, 0x6f3, 0x91f, 0x70c, 0x12ef, 0x1403, 0x115a, 0x1205, 0x1705, 0xb8a, 0x490, 0x681, 0x1a6f, 0xd49, 0x2ca, 0x7e2, 0x1ad8, 0x1aa6, 0x9e8, 0x1f0f, 0x1df, 0xc32, 0xd30, 0x1a34, 0xfc4, 0x1519, 0x1cde, 0x7c9, 0x12da, 0x157}} #elif RADIX == 32 -{0x17371973, 0x8b5a62a, 0x1d61f1be, 0x1ef6b6a2, 0x1cb506c, 0x1123e6f3, 0x1cbbce1, 0x58ad50, 0x17157059, 0x19a04920, 0xa6a4e9b, 0x1b07e216, 0x7a354da, 0xeffc3d, 0x8d30619, 0x65f89a3, 0x1e4f37aa, 0x651} +{{0x17371973, 0x8b5a62a, 0x1d61f1be, 0x1ef6b6a2, 0x1cb506c, 0x1123e6f3, 0x1cbbce1, 0x58ad50, 0x17157059, 0x19a04920, 0xa6a4e9b, 0x1b07e216, 0x7a354da, 0xeffc3d, 0x8d30619, 0x65f89a3, 0x1e4f37aa, 0x651}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf1be45ad3155cdc6, 0x2d41b3ded6d45d61, 0x1cbbce1891f37987, 0x5c55c1640b15aa0, 0x42ca6a4e9bcd0249, 0xfe1e9e8d536b60fc, 0xbf13468d30619077, 0x9cb68f93cdea8c} +{{0xf1be45ad3155cdc6, 0x2d41b3ded6d45d61, 0x1cbbce1891f37987, 0x5c55c1640b15aa0, 0x42ca6a4e9bcd0249, 0xfe1e9e8d536b60fc, 0xbf13468d30619077, 0x9cb68f93cdea8c}} #else -{0x17c8b5a62ab9b8c, 0x1b3ded6d45d61f1, 0x10c48f9bcc396a0, 0x1902c56a8072ef3, 0x179a04920b8ab82, 0xb60fc42ca6a4e9, 0x83bff0f4f46a9b, 0x32fc4d1a34c186, 0x1396d1f279bd5} +{{0x17c8b5a62ab9b8c, 0x1b3ded6d45d61f1, 0x10c48f9bcc396a0, 0x1902c56a8072ef3, 0x179a04920b8ab82, 0xb60fc42ca6a4e9, 0x83bff0f4f46a9b, 0x32fc4d1a34c186, 0x1396d1f279bd5}} #endif #endif , #if 0 #elif RADIX == 16 -{0xc71, 0x167c, 0x1de2, 0x708, 0xb78, 0x1797, 0x16d0, 0xc73, 0x1f29, 0x1014, 0x1753, 0x1dd9, 0x1326, 0xab2, 0x1e6e, 0x51a, 0x32d, 0x7c1, 0x127b, 0x1b08, 0xcd4, 0x5fd, 0x159a, 0xb2c, 0x137d, 0x28f, 0xc4f, 0x121a, 0x16dd, 0x1771, 0xa7b, 0x11b9, 0xe86, 0x199c, 0x1cb5, 0x2db, 0x14b3, 0x1e97, 0x7b} +{{0xc71, 0x167c, 0x1de2, 0x708, 0xb78, 0x1797, 0x16d0, 0xc73, 0x1f29, 0x1014, 0x1753, 0x1dd9, 0x1326, 0xab2, 0x1e6e, 0x51a, 0x32d, 0x7c1, 0x127b, 0x1b08, 0xcd4, 0x5fd, 0x159a, 0xb2c, 0x137d, 0x28f, 0xc4f, 0x121a, 0x16dd, 0x1771, 0xa7b, 0x11b9, 0xe86, 0x199c, 0x1cb5, 0x2db, 0x14b3, 0x1e97, 0x7b}} #elif RADIX == 32 -{0x638892e, 0x11de2b3e, 0x5d6f070, 0x39db42f, 0x1014f94b, 0x1bbb3753, 0x172aca64, 0x12d28d79, 0x4f67c11, 0xb353611, 0xcacd17f, 0x11f37d59, 0x86989e2, 0x1bb8db76, 0xd1b953d, 0xd7338e8, 0x598b6f9, 0x7bd} +{{0x638892e, 0x11de2b3e, 0x5d6f070, 0x39db42f, 0x1014f94b, 0x1bbb3753, 0x172aca64, 0x12d28d79, 0x4f67c11, 0xb353611, 0xcacd17f, 0x11f37d59, 0x86989e2, 0x1bb8db76, 0xd1b953d, 0xd7338e8, 0x598b6f9, 0x7bd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf0708ef159f18e22, 0x53e52c73b685e5d6, 0x72aca64ddd9ba9c0, 0x893d9f0465a51af3, 0xab2cacd17f59a9b0, 0x6dbb21a6278a3e6f, 0xe671d0d1b953dddc, 0x7fa5e9662dbe5a} +{{0xf0708ef159f18e22, 0x53e52c73b685e5d6, 0x72aca64ddd9ba9c0, 0x893d9f0465a51af3, 0xab2cacd17f59a9b0, 0x6dbb21a6278a3e6f, 0xe671d0d1b953dddc, 0x7fa5e9662dbe5a}} #else -{0xe11de2b3e31c44, 0x12c73b685e5d6f0, 0x126eecdd4e029f2, 0x1196946bcdcab29, 0x1eb35361127b3e0, 0xa3e6fab2cacd17, 0xeee36dd90d313c, 0x16b99c74346e54f, 0xff4bd2cc5b7c} +{{0xe11de2b3e31c44, 0x12c73b685e5d6f0, 0x126eecdd4e029f2, 0x1196946bcdcab29, 0x1eb35361127b3e0, 0xa3e6fab2cacd17, 0xeee36dd90d313c, 0x16b99c74346e54f, 0xff4bd2cc5b7c}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x111d, 0x19ac, 0x1a8f, 0xc58, 0xaa, 0xdc, 0x13de, 0x1dc, 0x17a6, 0x1e3d, 0x198a, 0x40a, 0x120b, 0x17ba, 0x91c, 0x1858, 0xee4, 0x33b, 0x18aa, 0x1124, 0x5f8, 0x37d, 0xf3e, 0xa4b, 0x1e1, 0x2bd, 0x1ff2, 0x1a56, 0x1168, 0x739, 0x1fee, 0x190c, 0x13e9, 0xd07, 0x17fd, 0x1b9e, 0x198b, 0x1faa, 0xd2} +{{0x111d, 0x19ac, 0x1a8f, 0xc58, 0xaa, 0xdc, 0x13de, 0x1dc, 0x17a6, 0x1e3d, 0x198a, 0x40a, 0x120b, 0x17ba, 0x91c, 0x1858, 0xee4, 0x33b, 0x18aa, 0x1124, 0x5f8, 0x37d, 0xf3e, 0xa4b, 0x1e1, 0x2bd, 0x1ff2, 0x1a56, 0x1168, 0x739, 0x1fee, 0x190c, 0x13e9, 0xd07, 0x17fd, 0x1b9e, 0x198b, 0x1faa, 0xd2}} #elif RADIX == 32 -{0x88e8fa0, 0x11a8fcd6, 0x170154c5, 0xee4f781, 0x1e3dbd30, 0xc81598a, 0xe5eea41, 0xe4c2c24, 0x115433b7, 0x97e2249, 0xb79f0df, 0x17a1e152, 0x95bfe42, 0x39cc5a3, 0x1390cff7, 0x1f5a0f3e, 0xc5ee7af, 0xd56} +{{0x88e8fa0, 0x11a8fcd6, 0x170154c5, 0xee4f781, 0x1e3dbd30, 0xc81598a, 0xe5eea41, 0xe4c2c24, 0x115433b7, 0x97e2249, 0xb79f0df, 0x17a1e152, 0x95bfe42, 0x39cc5a3, 0x1390cff7, 0x1f5a0f3e, 0xc5ee7af, 0xd56}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x54c58d47e6b223a3, 0xf6f4c1dc9ef03701, 0xe5eea41640acc578, 0x4c550ceddc985848, 0x2a4b79f0df4bf112, 0x62d1a56ff90af43c, 0xb41e7d390cff71ce, 0x187eab317b9ebfe} +{{0x54c58d47e6b223a3, 0xf6f4c1dc9ef03701, 0xe5eea41640acc578, 0x4c550ceddc985848, 0x2a4b79f0df4bf112, 0x62d1a56ff90af43c, 0xb41e7d390cff71ce, 0x187eab317b9ebfe}} #else -{0x18b1a8fcd644747, 0xc1dc9ef0370154, 0xb205662bc7b7a, 0x177261612397ba9, 0x1e97e22498aa19d, 0xaf43c2a4b79f0d, 0x18e73168d2b7fc8, 0x1fad079f4e433fd, 0x15fd5662f73d7} +{{0x18b1a8fcd644747, 0xc1dc9ef0370154, 0xb205662bc7b7a, 0x177261612397ba9, 0x1e97e22498aa19d, 0xaf43c2a4b79f0d, 0x18e73168d2b7fc8, 0x1fad079f4e433fd, 0x15fd5662f73d7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x63c, 0x609, 0x89c, 0x1f09, 0x9c9, 0x1e89, 0x1826, 0x1460, 0x15d6, 0xa52, 0xbb2, 0x1b93, 0x1f90, 0xa2f, 0x3b3, 0x1a76, 0x1c29, 0x17fc, 0x864, 0x55a, 0x1a9b, 0x7fa, 0x7ee, 0x75f, 0x1b4b, 0x15e6, 0xd75, 0x1238, 0x847, 0x1711, 0x9e7, 0xa37, 0x4b6, 0x1264, 0x3e1, 0xf87, 0x1c47, 0x706, 0x20b} +{{0x63c, 0x609, 0x89c, 0x1f09, 0x9c9, 0x1e89, 0x1826, 0x1460, 0x15d6, 0xa52, 0xbb2, 0x1b93, 0x1f90, 0xa2f, 0x3b3, 0x1a76, 0x1c29, 0x17fc, 0x864, 0x55a, 0x1a9b, 0x7fa, 0x7ee, 0x75f, 0x1b4b, 0x15e6, 0xd75, 0x1238, 0x847, 0x1711, 0x9e7, 0xa37, 0x4b6, 0x1264, 0x3e1, 0xf87, 0x1c47, 0x706, 0x20b}} #elif RADIX == 32 -{0x131e26c1, 0x1289c304, 0x25393f0, 0x30609bd, 0xa52aeb5, 0x3726bb2, 0x19a8bff2, 0x29d3b0e, 0x10c97fce, 0x16a6cab4, 0x1f3f71fe, 0x1cdb4b3a, 0x8e1aeb5, 0x1b88a11e, 0xca374f3, 0x1864c84b, 0x23be1c7, 0xab7} +{{0x131e26c1, 0x1289c304, 0x25393f0, 0x30609bd, 0xa52aeb5, 0x3726bb2, 0x19a8bff2, 0x29d3b0e, 0x10c97fce, 0x16a6cab4, 0x1f3f71fe, 0x1cdb4b3a, 0x8e1aeb5, 0x1b88a11e, 0xca374f3, 0x1864c84b, 0x23be1c7, 0xab7}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x93f0944e1824c789, 0x4abad460c137a253, 0x9a8bff21b935d929, 0xa4325ff3853a761d, 0x675f3f71feb53655, 0x508f2386bad79b69, 0xc99096ca374f3dc4, 0x129c1b88ef871f0} +{{0x93f0944e1824c789, 0x4abad460c137a253, 0x9a8bff21b935d929, 0xa4325ff3853a761d, 0x675f3f71feb53655, 0x508f2386bad79b69, 0xc99096ca374f3dc4, 0x129c1b88ef871f0}} #else -{0x1e1289c30498f13, 0xd460c137a25393, 0x190dc9aec94a55d, 0xe14e9d8766a2ff, 0x1d6a6cab4864bfe, 0x179b69675f3f71f, 0x1ee2284791c35d6, 0x1c326425b28dd3c, 0xa383711df0e3} +{{0x1e1289c30498f13, 0xd460c137a25393, 0x190dc9aec94a55d, 0xe14e9d8766a2ff, 0x1d6a6cab4864bfe, 0x179b69675f3f71f, 0x1ee2284791c35d6, 0x1c326425b28dd3c, 0xa383711df0e3}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.h index 1d899ededa..845fc9f461 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp.h @@ -112,7 +112,7 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) static inline void fp_copy(fp_t *out, const fp_t *a) { - memcpy(out, a, sizeof(fp_t)); + memmove(out, a, sizeof(fp_t)); } static inline void diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.h index 736e83e22a..e5baadf558 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/fp2.h @@ -46,4 +46,4 @@ fp2_sqr(fp2_t *x, const fp2_t *y) x->re.arr[7] = t.re.arr[7]; } -#endif \ No newline at end of file +#endif diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.c index 11cbd6cf08..775192dfc8 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.c @@ -1,30 +1,30 @@ #include "gf27500.h" // see gf27500.h -const gf27500 ZERO = { 0, 0, 0, 0, 0, 0, 0, 0 }; +const gf27500 ZERO = {{ 0, 0, 0, 0, 0, 0, 0, 0 }}; // see gf27500.h -const gf27500 ONE = { 0x0000000000000097, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0130000000000000 }; +const gf27500 ONE = {{ 0x0000000000000097, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0130000000000000 }}; // see gf27500.h -const gf27500 gf27500_MINUS_ONE = { 0xFFFFFFFFFFFFFF68, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x007FFFFFFFFFFFFF }; +const gf27500 gf27500_MINUS_ONE = {{ 0xFFFFFFFFFFFFFF68, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x007FFFFFFFFFFFFF }}; // Montgomery representation of 2^256. -static const gf27500 R2 = { 0xED097B425ED0F19A, 0x097B425ED097B425, 0x7B425ED097B425ED, 0x425ED097B425ED09, - 0x5ED097B425ED097B, 0xD097B425ED097B42, 0x97B425ED097B425E, 0x0045ED097B425ED0 }; +static const gf27500 R2 = {{ 0xED097B425ED0F19A, 0x097B425ED097B425, 0x7B425ED097B425ED, 0x425ED097B425ED09, + 0x5ED097B425ED097B, 0xD097B425ED097B42, 0x97B425ED097B425E, 0x0045ED097B425ED0 }}; // The modulus itself (this is also a valid representation of zero). -static const gf27500 MODULUS = { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x01AFFFFFFFFFFFFF }; +static const gf27500 MODULUS = {{ 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x01AFFFFFFFFFFFFF }}; // 1/2^496 (in Montgomery representation). -static const gf27500 INVT496 = { 0x0000000000010000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; +static const gf27500 INVT496 = {{ 0x0000000000010000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }}; -static const gf27500 PM1O3 = { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x011fffffffffffff }; +static const gf27500 PM1O3 = {{ 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x011fffffffffffff }}; // Expand the most significant bit of x into a full-width 64-bit word // (0x0000000000000000 or 0xFFFFFFFFFFFFFFFF). diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.h index 3ca640cc29..2bafd22e53 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/gf27500.h @@ -99,7 +99,7 @@ extern "C" * support the API inline functions; they MUST NOT be used directly. */ -#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) +#if (defined _MSC_VER && defined _M_X64) || (defined __x86_64__ && (defined __GNUC__ || defined __clang__)) || defined(C_PEDANTIC_MODE) #include #define inner_gf27500_adc(cc, a, b, d) _addcarry_u64(cc, a, b, (unsigned long long *)(void *)d) #define inner_gf27500_sbb(cc, a, b, d) _subborrow_u64(cc, a, b, (unsigned long long *)(void *)d) @@ -120,17 +120,48 @@ inner_gf27500_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) } #endif -#if defined _MSC_VER +#if defined _MSC_VER || defined(C_PEDANTIC_MODE) +#if defined _MSC_VER #define inner_gf27500_umul(lo, hi, x, y) \ do { \ uint64_t umul_hi; \ (lo) = _umul128((x), (y), &umul_hi); \ (hi) = umul_hi; \ } while (0) +#else +#define inner_gf27500_umul(lo, hi, a, b) \ + do { \ + register uint64_t al, ah, bl, bh, temp; \ + uint64_t albl, albh, ahbl, ahbh, res1, res2, res3, carry; \ + uint64_t mask_low = (uint64_t)(-1) >> (sizeof(uint64_t) * 4), mask_high = (uint64_t)(-1) << (sizeof(uint64_t) * 4); \ + al = a & mask_low; \ + ah = a >> (sizeof(uint64_t) * 4); \ + bl = b & mask_low; \ + bh = b >> (sizeof(uint64_t) * 4); \ + albl = al * bl; \ + albh = al * bh; \ + ahbl = ah * bl; \ + ahbh = ah * bh; \ + (lo) = albl & mask_low; \ + res1 = albl >> (sizeof(uint64_t) * 4); \ + res2 = ahbl & mask_low; \ + res3 = albh & mask_low; \ + temp = res1 + res2 + res3 ; \ + carry = temp >> (sizeof(uint64_t) * 4); \ + (lo) ^= temp << (sizeof(uint64_t) * 4); \ + res1 = ahbl >> (sizeof(uint64_t) * 4); \ + res2 = albh >> (sizeof(uint64_t) * 4); \ + res3 = ahbh & mask_low; \ + temp = res1 + res2 + res3 + carry; \ + (hi) = temp & mask_low; \ + carry = temp & mask_high; \ + (hi) ^= (ahbh & mask_high) + carry; \ + } while (0) +#endif #define inner_gf27500_umul_add(lo, hi, x, y, z) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x), (y), &umul_hi); \ + inner_gf27500_umul(umul_lo, umul_hi, (x), (y)); \ unsigned char umul_cc; \ umul_cc = inner_gf27500_adc(0, umul_lo, (z), &umul_lo); \ (void)inner_gf27500_adc(umul_cc, umul_hi, 0, &umul_hi); \ @@ -140,9 +171,9 @@ inner_gf27500_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) #define inner_gf27500_umul_x2(lo, hi, x1, y1, x2, y2) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x1), (y1), &umul_hi); \ + inner_gf27500_umul(umul_lo, umul_hi, (x1), (y1)); \ uint64_t umul_lo2, umul_hi2; \ - umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + inner_gf27500_umul(umul_lo2, umul_hi2, (x2), (y2)); \ unsigned char umul_cc; \ umul_cc = inner_gf27500_adc(0, umul_lo, umul_lo2, &umul_lo); \ (void)inner_gf27500_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ @@ -152,9 +183,9 @@ inner_gf27500_sbb(unsigned char cc, uint64_t a, uint64_t b, uint64_t *d) #define inner_gf27500_umul_x2_add(lo, hi, x1, y1, x2, y2, z) \ do { \ uint64_t umul_lo, umul_hi; \ - umul_lo = _umul128((x1), (y1), &umul_hi); \ + inner_gf27500_umul(umul_lo, umul_hi, (x1), (y1)); \ uint64_t umul_lo2, umul_hi2; \ - umul_lo2 = _umul128((x2), (y2), &umul_hi2); \ + inner_gf27500_umul(umul_lo2, umul_hi2, (x2), (y2)); \ unsigned char umul_cc; \ umul_cc = inner_gf27500_adc(0, umul_lo, umul_lo2, &umul_lo); \ (void)inner_gf27500_adc(umul_cc, umul_hi, umul_hi2, &umul_hi); \ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.h index 2b16e23834..616504c7b1 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd.h @@ -415,7 +415,7 @@ void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B * @param t: an integer * @returns 0xFFFFFFFF on success, 0 on failure */ -static int +static inline int test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) { int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c index a697ac7eb1..fe34f0e3ab 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/hd_splitting_transforms.c @@ -11,131 +11,131 @@ const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1 const fp2_t FP2_CONSTANTS[5] = {{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf} +{{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf}} #elif RADIX == 32 -{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f} +{{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff} +{{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff}} #else -{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff} +{{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf} +{{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf}} #elif RADIX == 32 -{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f} +{{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff} +{{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff}} #else -{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff} +{{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff}} #endif #endif }}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c index ea32213c75..0fed774a04 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/l2.c @@ -24,8 +24,8 @@ copy(fp_num *x, fp_num *r) static void normalize(fp_num *x) { - if (x->s == 0.0 || isfinite(x->s) == 0) { - if (x->s == 0.0) { + if (fpclassify(x->s) == FP_ZERO || isfinite(x->s) == 0) { + if (fpclassify(x->s) == FP_ZERO) { x->e = INT_MIN; } } else { @@ -49,13 +49,6 @@ to_deltabar(fp_num *x) x->e = 0; } -static void -to_etabar(fp_num *x) -{ - x->s = ETABAR; - x->e = 0; -} - static void from_mpz(const ibz_t *x, fp_num *r) { diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_internals.h index e8d90141ac..2b76857205 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_internals.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/lll_internals.h @@ -43,13 +43,19 @@ /** @brief Type for fractions of integers * - * @typedef ibq_t +* @typedef ibq_t * * For fractions of integers of arbitrary size, used by intbig module, using gmp */ -typedef ibz_t ibq_t[2]; -typedef ibq_t ibq_vec_4_t[4]; -typedef ibq_t ibq_mat_4x4_t[4][4]; +typedef struct { + ibz_t q[2]; +} ibq_t; +typedef struct { + ibq_t v[4]; +} ibq_vec_4_t; +typedef struct { + ibq_vec_4_t m[4]; +} ibq_mat_4x4_t; /**@} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.c index 27f4a963db..13714eee4a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/mp.c @@ -2,6 +2,7 @@ #include #include #include +#include // double-wide multiplication void @@ -17,7 +18,7 @@ MUL(digit_t *out, const digit_t a, const digit_t b) out[0] = _umul128(a, b, &umul_hi); out[1] = umul_hi; -#elif defined(RADIX_64) && defined(HAVE_UINT128) +#elif defined(RADIX_64) && (defined(HAVE_UINT128) || defined(__SIZEOF_INT128__) || defined(__int128)) && !defined(C_PEDANTIC_MODE) unsigned __int128 umul_tmp; umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); out[0] = (uint64_t)umul_tmp; @@ -277,6 +278,7 @@ mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) assert((a[0] & 1) == 1); digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + memset(x, 0, sizeof(x)); mp_copy(aa, a, nwords); mp_one[0] = 1; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rationals.c index 0c5387e5e8..25f8519b3f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rationals.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rationals.c @@ -1,20 +1,20 @@ -#include + #include #include "internal.h" #include "lll_internals.h" void ibq_init(ibq_t *x) { - ibz_init(&((*x)[0])); - ibz_init(&((*x)[1])); - ibz_set(&((*x)[1]), 1); + ibz_init(&(x->q[0])); + ibz_init(&(x->q[1])); + ibz_set(&(x->q[1]), 1); } void ibq_finalize(ibq_t *x) { - ibz_finalize(&((*x)[0])); - ibz_finalize(&((*x)[1])); + ibz_finalize(&(x->q[0])); + ibz_finalize(&(x->q[1])); } void @@ -22,7 +22,7 @@ ibq_mat_4x4_init(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_init(&(*mat)[i][j]); + ibq_init(&mat->m[i].v[j]); } } } @@ -31,7 +31,7 @@ ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_finalize(&(*mat)[i][j]); + ibq_finalize(&mat->m[i].v[j]); } } } @@ -40,14 +40,14 @@ void ibq_vec_4_init(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_init(&(*vec)[i]); + ibq_init(&vec->v[i]); } } void ibq_vec_4_finalize(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_finalize(&(*vec)[i]); + ibq_finalize(&vec->v[i]); } } @@ -57,9 +57,9 @@ ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j][0]), 10); + ibz_print(&(mat->m[i].v[j].q[0]), 10); printf("/"); - ibz_print(&((*mat)[i][j][1]), 10); + ibz_print(&(mat->m[i].v[j].q[1]), 10); printf(" "); } printf("\n "); @@ -72,9 +72,9 @@ ibq_vec_4_print(const ibq_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i][0]), 10); + ibz_print(&(vec->v[i].q[0]), 10); printf("/"); - ibz_print(&((*vec)[i][1]), 10); + ibz_print(&(vec->v[i].q[1]), 10); printf(" "); } printf("\n\n"); @@ -86,10 +86,10 @@ ibq_reduce(ibq_t *x) ibz_t gcd, r; ibz_init(&gcd); ibz_init(&r); - ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); - ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + ibz_gcd(&gcd, &(x->q[0]), &(x->q[1])); + ibz_div(&(x->q[0]), &r, &(x->q[0]), &gcd); assert(ibz_is_zero(&r)); - ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + ibz_div(&(x->q[1]), &r, &(x->q[1]), &gcd); assert(ibz_is_zero(&r)); ibz_finalize(&gcd); ibz_finalize(&r); @@ -102,10 +102,10 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) ibz_init(&add); ibz_init(&prod); - ibz_mul(&add, &((*a)[0]), &((*b)[1])); - ibz_mul(&prod, &((*b)[0]), &((*a)[1])); - ibz_add(&((*sum)[0]), &add, &prod); - ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&add, &(a->q[0]), &(b->q[1])); + ibz_mul(&prod, &(b->q[0]), &(a->q[1])); + ibz_add(&(sum->q[0]), &add, &prod); + ibz_mul(&(sum->q[1]), &(a->q[1]), &(b->q[1])); ibz_finalize(&add); ibz_finalize(&prod); } @@ -113,8 +113,8 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) void ibq_neg(ibq_t *neg, const ibq_t *x) { - ibz_copy(&((*neg)[1]), &((*x)[1])); - ibz_neg(&((*neg)[0]), &((*x)[0])); + ibz_copy(&(neg->q[1]), &(x->q[1])); + ibz_neg(&(neg->q[0]), &(x->q[0])); } void @@ -143,8 +143,8 @@ ibq_abs(ibq_t *abs, const ibq_t *x) // once void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) { - ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); - ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&(prod->q[0]), &(a->q[0]), &(b->q[0])); + ibz_mul(&(prod->q[1]), &(a->q[1]), &(b->q[1])); } int @@ -152,9 +152,9 @@ ibq_inv(ibq_t *inv, const ibq_t *x) { int res = !ibq_is_zero(x); if (res) { - ibz_copy(&((*inv)[0]), &((*x)[0])); - ibz_copy(&((*inv)[1]), &((*x)[1])); - ibz_swap(&((*inv)[1]), &((*inv)[0])); + ibz_copy(&(inv->q[0]), &(x->q[0])); + ibz_copy(&(inv->q[1]), &(x->q[1])); + ibz_swap(&(inv->q[1]), &(inv->q[0])); } return (res); } @@ -165,15 +165,15 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) ibz_t x, y; ibz_init(&x); ibz_init(&y); - ibz_copy(&x, &((*a)[0])); - ibz_copy(&y, &((*b)[0])); - ibz_mul(&y, &y, &((*a)[1])); - ibz_mul(&x, &x, &((*b)[1])); - if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_copy(&x, &(a->q[0])); + ibz_copy(&y, &(b->q[0])); + ibz_mul(&y, &y, &(a->q[1])); + ibz_mul(&x, &x, &(b->q[1])); + if (ibz_cmp(&(a->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } - if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + if (ibz_cmp(&(b->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } @@ -186,28 +186,28 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) int ibq_is_zero(const ibq_t *x) { - return ibz_is_zero(&((*x)[0])); + return ibz_is_zero(&(x->q[0])); } int ibq_is_one(const ibq_t *x) { - return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); + return (0 == ibz_cmp(&(x->q[0]), &(x->q[1]))); } int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) { - ibz_copy(&((*q)[0]), a); - ibz_copy(&((*q)[1]), b); + ibz_copy(&(q->q[0]), a); + ibz_copy(&(q->q[1]), b); return !ibz_is_zero(b); } void ibq_copy(ibq_t *target, const ibq_t *value) // once { - ibz_copy(&((*target)[0]), &((*value)[0])); - ibz_copy(&((*target)[1]), &((*value)[1])); + ibz_copy(&(target->q[0]), &(value->q[0])); + ibz_copy(&(target->q[1]), &(value->q[1])); } int @@ -215,7 +215,7 @@ ibq_is_ibz(const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_mod(&r, &((*q)[0]), &((*q)[1])); + ibz_mod(&r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); @@ -226,7 +226,7 @@ ibq_to_ibz(ibz_t *z, const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + ibz_div(z, &r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h index d0861ac036..0362ca0c42 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/rng.h @@ -5,7 +5,7 @@ #include -static int randombytes(unsigned char *x, unsigned long long xlen){ +static inline int randombytes(unsigned char *x, unsigned long long xlen){ OQS_randombytes(x, xlen); return 0; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign.c index 7335c38d9a..cf2134085b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_broadwell/sqisign.c @@ -121,7 +121,7 @@ sqisign_verify(const unsigned char *m, unsigned long long siglen, const unsigned char *pk) { - + (void) siglen; int ret = 0; public_key_t pkt = { 0 }; signature_t sigt; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c index 143060e2c3..74184fc97b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/dim2id2iso.c @@ -191,7 +191,7 @@ fixed_degree_isogeny_and_eval(quat_left_ideal_t *lideal, // reordering vectors and switching some signs if needed to make it in a nicer // shape static void -post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, const ibz_t *norm, bool is_special_order) +post_LLL_basis_treatment(ibz_mat_4x4_t *gram, ibz_mat_4x4_t *reduced, bool is_special_order) { // if the left order is the special one, then we apply some additional post // treatment @@ -520,7 +520,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[0], 1); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); ibz_mul(&adjusted_norm[0], &adjusted_norm[0], &ideal[0].lattice.denom); - post_LLL_basis_treatment(&gram[0], &reduced[0], &ideal[0].norm, true); + post_LLL_basis_treatment(&gram[0], &reduced[0], true); // for efficient lattice reduction, we replace ideal[0] by the equivalent // ideal of smallest norm @@ -562,7 +562,7 @@ find_uv(ibz_t *u, ibz_set(&adjusted_norm[i], 1); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); ibz_mul(&adjusted_norm[i], &adjusted_norm[i], &ideal[i].lattice.denom); - post_LLL_basis_treatment(&gram[i], &reduced[i], &ideal[i].norm, false); + post_LLL_basis_treatment(&gram[i], &reduced[i], false); } // enumerating small vectors diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.c index a7148e485b..316b12f119 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/e0_basis.c @@ -2,54 +2,54 @@ const fp2_t BASIS_E0_PX = { #if 0 #elif RADIX == 16 -{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314} +{{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314}} #elif RADIX == 32 -{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d} +{{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6} +{{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6}} #else -{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5} +{{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5}} #endif #endif , #if 0 #elif RADIX == 16 -{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7} +{{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7}} #elif RADIX == 32 -{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b} +{{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5} +{{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5}} #else -{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f} +{{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f}} #endif #endif }; const fp2_t BASIS_E0_QX = { #if 0 #elif RADIX == 16 -{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe} +{{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe}} #elif RADIX == 32 -{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb} +{{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2} +{{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2}} #else -{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7} +{{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330} +{{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330}} #elif RADIX == 32 -{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339} +{{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5} +{{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5}} #else -{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85} +{{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85}} #endif #endif }; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h index e609c93a08..7cef95ca49 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/ec.h @@ -566,7 +566,7 @@ uint32_t ec_is_basis_four_torsion(const ec_basis_t *B, const ec_curve_t *E); * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) { ec_point_t test; @@ -595,7 +595,7 @@ test_point_order_twof(const ec_point_t *P, const ec_curve_t *E, int t) * * @return 0xFFFFFFFF if the order is correct, 0 otherwise */ -static int +static inline int test_basis_order_twof(const ec_basis_t *B, const ec_curve_t *E, int t) { int check_P = test_point_order_twof(&B->P, E, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_verification.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_verification.c index fecdb9c259..8aa451d366 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_verification.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/encode_verification.c @@ -99,36 +99,6 @@ ec_curve_from_bytes(ec_curve_t *curve, const byte_t *enc) return proj_from_bytes(&curve->A, &curve->C, enc); } -static byte_t * -ec_point_to_bytes(byte_t *enc, const ec_point_t *point) -{ - return proj_to_bytes(enc, &point->x, &point->z); -} - -static const byte_t * -ec_point_from_bytes(ec_point_t *point, const byte_t *enc) -{ - return proj_from_bytes(&point->x, &point->z, enc); -} - -static byte_t * -ec_basis_to_bytes(byte_t *enc, const ec_basis_t *basis) -{ - enc = ec_point_to_bytes(enc, &basis->P); - enc = ec_point_to_bytes(enc, &basis->Q); - enc = ec_point_to_bytes(enc, &basis->PmQ); - return enc; -} - -static const byte_t * -ec_basis_from_bytes(ec_basis_t *basis, const byte_t *enc) -{ - enc = ec_point_from_bytes(&basis->P, enc); - enc = ec_point_from_bytes(&basis->Q, enc); - enc = ec_point_from_bytes(&basis->PmQ, enc); - return enc; -} - // public API byte_t * diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c index d62ffc51c7..4b8e3e34c1 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/endomorphism_action.c @@ -4,261 +4,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x280} +{{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x280}} #elif RADIX == 32 -{0x12f68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x400} +{{0x12f68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x400}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x4b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x170000000000000} +{{0x4b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x170000000000000}} #else -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1300000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1300000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314} +{{0x1099, 0xa9f, 0x14f8, 0x1537, 0x1a13, 0x97e, 0x1095, 0xc8b, 0xdd2, 0x1c5f, 0xbdf, 0x1344, 0x1330, 0x1733, 0x185d, 0x1b08, 0x464, 0x76f, 0xe44, 0x3fc, 0x1dc0, 0x1c62, 0x88, 0x972, 0x13f4, 0x18c8, 0x6bd, 0x804, 0x1269, 0x19e0, 0x14bd, 0x10a1, 0xe5e, 0x1af2, 0x156c, 0x3f7, 0x16a1, 0x47d, 0x314}} #elif RADIX == 32 -{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d} +{{0x184cba61, 0xf4f854f, 0x1fb42753, 0x45c2552, 0x1c5f6e93, 0x2688bdf, 0xedcce66, 0x64d8461, 0x1c8876f2, 0x177007f8, 0x12044718, 0x1913f44b, 0x10d7b8, 0x1cf049a5, 0x1d0a1a5e, 0x1b35e4e5, 0x1508fdea, 0x66d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6} +{{0x27537a7c2a7e132e, 0x7dba4c8b84aa5fb4, 0xedcce6613445eff1, 0xc7221dbc8c9b08c2, 0x8972044718bb803f, 0x24d280435ee3227e, 0x6bc9cbd0a1a5ee78, 0x1011f6d423f7ab6}} #else -{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5} +{{0xa6f4f854fc265d, 0x4c8b84aa5fb427, 0x1309a22f7f8bedd, 0x12326c230bb7339, 0x1177007f8e443b7, 0x3227e897204471, 0x173c12694021af7, 0xd9af272f428697, 0x523eda847ef5}} #endif #endif , #if 0 #elif RADIX == 16 -{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7} +{{0x4b1, 0x178f, 0x107b, 0x6f6, 0x75e, 0x1b27, 0x4db, 0x1e1b, 0xd78, 0x15b6, 0x1130, 0x8cc, 0x1ac0, 0x9b7, 0x692, 0x1e07, 0x1f4, 0xfd7, 0x2ab, 0x7b5, 0x1040, 0xa43, 0xb6d, 0x13a1, 0x1422, 0x10c9, 0x10b0, 0x1540, 0x827, 0xa69, 0x1761, 0x1f25, 0x1d16, 0x16f2, 0x1fcb, 0x92, 0xcba, 0x1c03, 0x3c7}} #elif RADIX == 32 -{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b} +{{0x1258c7b1, 0xd07bbc7, 0x9cebc6f, 0x10d936f6, 0x15b66bc7, 0x1199130, 0x926df58, 0x1f4f039a, 0x556fd70, 0x1c100f6a, 0x15b6a90, 0x1934229d, 0x15021610, 0x1534a09e, 0xdf25bb0, 0x12ede5d1, 0x5d024bf, 0xa9b}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5} +{{0xbc6f683dde3c9631, 0xd9af1e1b26dec9ce, 0x926df5808cc89856, 0x5155bf5c3e9e0734, 0x53a15b6a90e0807b, 0x504f540858432684, 0xdbcba2df25bb0a9a, 0x18f00d974092fe5}} #else -{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f} +{{0xded07bbc792c63, 0x11e1b26dec9cebc, 0xc046644c2b6cd7, 0x10fa781cd249b7d, 0x1c100f6a2ab7eb, 0x3268453a15b6a9, 0x54d2827aa042c2, 0x1976f2e8b7c96ec, 0x16e01b2e8125f}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe} +{{0x15c, 0x865, 0x1af6, 0x17b9, 0x6a2, 0x1c22, 0x17c5, 0x1149, 0xa7, 0x151e, 0xe57, 0x4c2, 0x18cd, 0xbd2, 0x7a4, 0x7c6, 0x74a, 0xd2, 0x902, 0x68c, 0x21e, 0x1e44, 0x1f5a, 0x1d4c, 0x115b, 0x1777, 0x16d4, 0x503, 0x3af, 0x7e4, 0x1aa7, 0x3dd, 0x827, 0x186b, 0x765, 0x1fc5, 0xc78, 0x9bd, 0xfe}} #elif RADIX == 32 -{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb} +{{0x10ae12d6, 0x13af6432, 0x88d457b, 0xa4df178, 0x151e053c, 0x14984e57, 0x122f4b19, 0x14a3e31e, 0x12040d23, 0x878d18, 0xcfad791, 0xef15bea, 0x140eda97, 0x13f20ebc, 0xe3ddd53, 0x1970d682, 0x3c7f14e, 0x4eb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2} +{{0x457b9d7b21942b84, 0x7814f149be2f088d, 0x22f4b19a4c272bd4, 0xc4810348e947c63d, 0x7d4cfad791043c68, 0x75e503b6a5dde2b, 0xe1ad04e3ddd539f9, 0x1326f58f1fc53b2}} #else -{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7} +{{0xf73af643285709, 0xf149be2f088d45, 0xcd261395ea3c0a, 0x3a51f18f48bd2c, 0x20878d18902069, 0x1dde2b7d4cfad79, 0x1cfc83af281db52, 0xcb86b4138f7754, 0xb4deb1e3f8a7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330} +{{0x6ac, 0x25e, 0xc7a, 0x1492, 0xd01, 0xbc0, 0x118, 0x376, 0x3e0, 0x7ae, 0x573, 0x171f, 0x35a, 0x1725, 0x48f, 0xc94, 0x133c, 0x16a4, 0x10a8, 0x178d, 0xdd7, 0x798, 0x1d05, 0x39f, 0xc2a, 0x179c, 0x407, 0xd3, 0x118a, 0x1c9f, 0xeac, 0x145b, 0xc35, 0x11a2, 0x58b, 0xe4, 0x5e3, 0xae7, 0x330}} #elif RADIX == 32 -{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339} +{{0x3563c78, 0x4c7a12f, 0x101a0349, 0x1bb04617, 0x7ae1f00, 0xae3e573, 0x7dc946b, 0x13c64a12, 0x1516a49, 0x375ef1b, 0x1fe829e6, 0x138c2a1c, 0x34c80f7, 0xe4fc628, 0xb45b756, 0x2e344c3, 0xf18390b, 0x339}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5} +{{0x349263d0978d58f, 0xb87c037608c2f01a, 0x7dc946b571f2b99e, 0xd8545a92678c9424, 0x439fe829e61baf78, 0xe3140d3203de7185, 0xc68986b45b756727, 0x32b9cbc60e42c5}} #else -{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85} +{{0x924c7a12f1ab1e, 0x37608c2f01a03, 0x15ab8f95ccf5c3e, 0x99e325091f7251, 0xc375ef1b0a8b52, 0x1e7185439fe829e, 0x1393f18a069901e, 0x1171a261ad16dd5, 0x6573978c1c85}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x19da, 0x19cd, 0x19e2, 0x5ea, 0x1079, 0x11ba, 0x1f5e, 0x228, 0x1a45, 0x16ee, 0x18a1, 0x11eb, 0x127a, 0x1d6f, 0x106f, 0x118f, 0x1d0c, 0x1571, 0x1b2d, 0xb60, 0xb27, 0xe1f, 0xe58, 0xe01, 0x4f4, 0x183, 0x13a9, 0x1584, 0x5cb, 0xcce, 0x1ce7, 0x4da, 0x1e62, 0x1213, 0x7fe, 0x1e6, 0x17d, 0x350, 0x3a0} +{{0x19da, 0x19cd, 0x19e2, 0x5ea, 0x1079, 0x11ba, 0x1f5e, 0x228, 0x1a45, 0x16ee, 0x18a1, 0x11eb, 0x127a, 0x1d6f, 0x106f, 0x118f, 0x1d0c, 0x1571, 0x1b2d, 0xb60, 0xb27, 0xe1f, 0xe58, 0xe01, 0x4f4, 0x183, 0x13a9, 0x1584, 0x5cb, 0xcce, 0x1ce7, 0x4da, 0x1e62, 0x1213, 0x7fe, 0x1e6, 0x17d, 0x350, 0x3a0}} #elif RADIX == 32 -{0x1ced44bf, 0x159e2ce6, 0xea0f25e, 0x1147d7a3, 0x16eed228, 0xa3d78a1, 0x17f5be4f, 0x10c8c7c1, 0x165b571e, 0x1ac9d6c1, 0x172c387, 0x1064f470, 0x16127521, 0x1667172e, 0x44dae73, 0x1fa427e6, 0xbe8798f, 0x800} +{{0x1ced44bf, 0x159e2ce6, 0xea0f25e, 0x1147d7a3, 0x16eed228, 0xa3d78a1, 0x17f5be4f, 0x10c8c7c1, 0x165b571e, 0x1ac9d6c1, 0x172c387, 0x1064f470, 0x16127521, 0x1667172e, 0x44dae73, 0x1fa427e6, 0xbe8798f, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf25eacf167373b51, 0xbb48a228faf46ea0, 0x7f5be4f51ebc50db, 0xd96d5c7a1918f83, 0x8e0172c387d64eb6, 0x8b975849d4860c9e, 0x484fcc44dae73b33, 0x50d402fa1e63ff} +{{0xf25eacf167373b51, 0xbb48a228faf46ea0, 0x7f5be4f51ebc50db, 0xd96d5c7a1918f83, 0x8e0172c387d64eb6, 0x8b975849d4860c9e, 0x484fcc44dae73b33, 0x50d402fa1e63ff}} #else -{0xbd59e2ce6e76a2, 0xa228faf46ea0f2, 0x7a8f5e286ddda4, 0x1e86463e0dfd6f9, 0xfac9d6c1b2dab8, 0x60c9e8e0172c38, 0x1d99c5cbac24ea4, 0x1fd213f31136b9c, 0xa1a805f43cc7} +{{0xbd59e2ce6e76a2, 0xa228faf46ea0f2, 0x7a8f5e286ddda4, 0x1e86463e0dfd6f9, 0xfac9d6c1b2dab8, 0x60c9e8e0172c38, 0x1d99c5cbac24ea4, 0x1fd213f31136b9c, 0xa1a805f43cc7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1dea, 0x1bbc, 0x9b0, 0x1066, 0x10fb, 0x1fe8, 0x1bca, 0x34d, 0x275, 0x42a, 0xc7b, 0x6e8, 0x1f5c, 0x12e5, 0x155d, 0x4f2, 0x1422, 0xfce, 0x603, 0x17a8, 0xd9f, 0x182d, 0x9fe, 0x3b1, 0x342, 0x1c21, 0x1aff, 0x1e38, 0x1ac8, 0x1c98, 0x51f, 0x897, 0xe23, 0x17e7, 0xced, 0x1e6, 0x125a, 0x18f3, 0x1b8} +{{0x1dea, 0x1bbc, 0x9b0, 0x1066, 0x10fb, 0x1fe8, 0x1bca, 0x34d, 0x275, 0x42a, 0xc7b, 0x6e8, 0x1f5c, 0x12e5, 0x155d, 0x4f2, 0x1422, 0xfce, 0x603, 0x17a8, 0xd9f, 0x182d, 0x9fe, 0x3b1, 0x342, 0x1c21, 0x1aff, 0x1e38, 0x1ac8, 0x1c98, 0x51f, 0x897, 0xe23, 0x17e7, 0xced, 0x1e6, 0x125a, 0x18f3, 0x1b8}} #elif RADIX == 32 -{0xef520a6, 0xc9b0dde, 0x1a21f706, 0x1a6ef2bf, 0x42a13a8, 0x10dd0c7b, 0xecb97eb, 0x2227955, 0xc06fcea, 0xb67ef50, 0x114ff60b, 0x423421d, 0x18e35ffc, 0x1e4c6b23, 0x689728f, 0x1b6fcee2, 0x12d07999, 0x69c} +{{0xef520a6, 0xc9b0dde, 0x1a21f706, 0x1a6ef2bf, 0x42a13a8, 0x10dd0c7b, 0xecb97eb, 0x2227955, 0xc06fcea, 0xb67ef50, 0x114ff60b, 0x423421d, 0x18e35ffc, 0x1e4c6b23, 0x689728f, 0x1b6fcee2, 0x12d07999, 0x69c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf70664d86ef3bd48, 0xa84ea34dde57fa21, 0xecb97eb86e863d90, 0x8301bf3a8444f2aa, 0x43b14ff60b5b3f7a, 0x3591e38d7ff08468, 0xdf9dc4689728ff26, 0x463ce4b41e6676} +{{0xf70664d86ef3bd48, 0xa84ea34dde57fa21, 0xecb97eb86e863d90, 0x8301bf3a8444f2aa, 0x43b14ff60b5b3f7a, 0x3591e38d7ff08468, 0xdf9dc4689728ff26, 0x463ce4b41e6676}} #else -{0xcc9b0dde77a90, 0xa34dde57fa21f7, 0x15c37431ec85427, 0xa1113caabb2e5f, 0x16b67ef506037e7, 0x10846843b14ff60, 0x1f931ac8f1c6bff, 0x1db7e7711a25ca3, 0x8c79c9683ccc} +{{0xcc9b0dde77a90, 0xa34dde57fa21f7, 0x15c37431ec85427, 0xa1113caabb2e5f, 0x16b67ef506037e7, 0x10846843b14ff60, 0x1f931ac8f1c6bff, 0x1db7e7711a25ca3, 0x8c79c9683ccc}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -480,261 +480,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x4d6, 0x18fd, 0x18c0, 0x13c1, 0x1718, 0x122c, 0x830, 0xa02, 0xee0, 0x1ad1, 0x1485, 0x27f, 0x13e5, 0x118f, 0xdce, 0x31c, 0x1b49, 0x998, 0x117, 0x343, 0xdae, 0x1fe7, 0x16a0, 0x1c43, 0x172, 0xa6e, 0x702, 0xeb8, 0x835, 0x1cf, 0x48d, 0x4e4, 0x13eb, 0x57d, 0xccf, 0x97e, 0x3b6, 0x910, 0x2dd} +{{0x4d6, 0x18fd, 0x18c0, 0x13c1, 0x1718, 0x122c, 0x830, 0xa02, 0xee0, 0x1ad1, 0x1485, 0x27f, 0x13e5, 0x118f, 0xdce, 0x31c, 0x1b49, 0x998, 0x117, 0x343, 0xdae, 0x1fe7, 0x16a0, 0x1c43, 0x172, 0xa6e, 0x702, 0xeb8, 0x835, 0x1cf, 0x48d, 0x4e4, 0x13eb, 0x57d, 0xccf, 0x97e, 0x3b6, 0x910, 0x2dd}} #elif RADIX == 32 -{0x126b3651, 0x38c0c7e, 0xb2e313c, 0x10120c24, 0x1ad17702, 0x144ff485, 0x7463e7c, 0x14918e37, 0x22e998d, 0x1b6b8686, 0x3b507f9, 0xdc172e2, 0x1ae0e04a, 0x10e7a0d5, 0x164e4246, 0x13cafb3e, 0x1db25f99, 0x300} +{{0x126b3651, 0x38c0c7e, 0xb2e313c, 0x10120c24, 0x1ad17702, 0x144ff485, 0x7463e7c, 0x14918e37, 0x22e998d, 0x1b6b8686, 0x3b507f9, 0xdc172e2, 0x1ae0e04a, 0x10e7a0d5, 0x164e4246, 0x13cafb3e, 0x1db25f99, 0x300}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x313c1c6063f49acd, 0x45dc0a0241848b2e, 0x7463e7ca27fa42eb, 0x308ba66369231c6e, 0x5c43b507f9db5c34, 0xd06aeb838129b82e, 0x95f67d64e4246873, 0xfa44076c97e667} +{{0x313c1c6063f49acd, 0x45dc0a0241848b2e, 0x7463e7ca27fa42eb, 0x308ba66369231c6e, 0x5c43b507f9db5c34, 0xd06aeb838129b82e, 0x95f67d64e4246873, 0xfa44076c97e667}} #else -{0x7838c0c7e9359b, 0xa0241848b2e31, 0x1e513fd2175a2ee, 0xda48c71b9d18f9, 0x13b6b86861174cc, 0x9b82e5c43b507f, 0x1439e83575c1c09, 0x19e57d9f5939091, 0x44880ed92fcc} +{{0x7838c0c7e9359b, 0xa0241848b2e31, 0x1e513fd2175a2ee, 0xda48c71b9d18f9, 0x13b6b86861174cc, 0x9b82e5c43b507f, 0x1439e83575c1c09, 0x19e57d9f5939091, 0x44880ed92fcc}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x937, 0x63f, 0xe30, 0x4f0, 0x5c6, 0x48b, 0x120c, 0x280, 0xbb8, 0xeb4, 0x1d21, 0x89f, 0x1cf9, 0x1463, 0x373, 0x8c7, 0x6d2, 0x1a66, 0x1845, 0x10d0, 0x1b6b, 0x7f9, 0x1da8, 0x1710, 0x105c, 0x129b, 0x1c0, 0xbae, 0x1a0d, 0x873, 0x123, 0x1939, 0xcfa, 0x195f, 0x1333, 0x125f, 0xed, 0xa44, 0x697} +{{0x937, 0x63f, 0xe30, 0x4f0, 0x5c6, 0x48b, 0x120c, 0x280, 0xbb8, 0xeb4, 0x1d21, 0x89f, 0x1cf9, 0x1463, 0x373, 0x8c7, 0x6d2, 0x1a66, 0x1845, 0x10d0, 0x1b6b, 0x7f9, 0x1da8, 0x1710, 0x105c, 0x129b, 0x1c0, 0xbae, 0x1a0d, 0x873, 0x123, 0x1939, 0xcfa, 0x195f, 0x1333, 0x125f, 0xed, 0xa44, 0x697}} #elif RADIX == 32 -{0x149bfcfc, 0xe3031f, 0x2cb8c4f, 0x14048309, 0xeb45dc0, 0x513fd21, 0x19d18f9f, 0xd24638d, 0x108ba663, 0xedae1a1, 0x10ed41fe, 0x13705cb8, 0xeb83812, 0x1439e835, 0x15939091, 0xcf2becf, 0x76c97e6, 0x820} +{{0x149bfcfc, 0xe3031f, 0x2cb8c4f, 0x14048309, 0xeb45dc0, 0x513fd21, 0x19d18f9f, 0xd24638d, 0x108ba663, 0xedae1a1, 0x10ed41fe, 0x13705cb8, 0xeb83812, 0x1439e835, 0x15939091, 0xcf2becf, 0x76c97e6, 0x820}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8c4f071818fd26ff, 0xd1770280906122cb, 0x9d18f9f289fe90ba, 0xc22e998da48c71b, 0x9710ed41fe76d70d, 0xf41abae0e04a6e0b, 0xe57d9f5939091a1c, 0x6a9101db25f999} +{{0x8c4f071818fd26ff, 0xd1770280906122cb, 0x9d18f9f289fe90ba, 0xc22e998da48c71b, 0x9710ed41fe76d70d, 0xf41abae0e04a6e0b, 0xe57d9f5939091a1c, 0x6a9101db25f999}} #else -{0x9e0e3031fa4dfe, 0x10280906122cb8c, 0xf944ff485d68bb, 0x369231c6e7463e, 0x1cedae1a1845d33, 0xa6e0b9710ed41f, 0xd0e7a0d5d70702, 0x6795f67d64e424, 0xd52203b64bf3} +{{0x9e0e3031fa4dfe, 0x10280906122cb8c, 0xf944ff485d68bb, 0x369231c6e7463e, 0x1cedae1a1845d33, 0xa6e0b9710ed41f, 0xd0e7a0d5d70702, 0x6795f67d64e424, 0xd52203b64bf3}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1863, 0x635, 0x19a9, 0x17fc, 0xdfe, 0x1784, 0x150b, 0x16c3, 0x15c0, 0x1f5f, 0x11d9, 0x1064, 0x1893, 0x1829, 0x211, 0x1a9e, 0x2e1, 0x3cc, 0x1e64, 0x12ed, 0x1c2c, 0x18b9, 0x121d, 0x234, 0xec9, 0x14dc, 0x4b6, 0xaad, 0x19f6, 0x805, 0x1984, 0x1843, 0xfca, 0x1a7a, 0xe04, 0x4af, 0x881, 0x65b, 0x421} +{{0x1863, 0x635, 0x19a9, 0x17fc, 0xdfe, 0x1784, 0x150b, 0x16c3, 0x15c0, 0x1f5f, 0x11d9, 0x1064, 0x1893, 0x1829, 0x211, 0x1a9e, 0x2e1, 0x3cc, 0x1e64, 0x12ed, 0x1c2c, 0x18b9, 0x121d, 0x234, 0xec9, 0x14dc, 0x4b6, 0xaad, 0x19f6, 0x805, 0x1984, 0x1843, 0xfca, 0x1a7a, 0xe04, 0x4af, 0x881, 0x65b, 0x421}} #elif RADIX == 32 -{0x1c31ce4f, 0x199a931a, 0x11bfd7f, 0x161d42ef, 0x1f5fae05, 0xe0c91d9, 0x8e0a712, 0xe1d4f08, 0x1cc83cc1, 0xf0b25db, 0x1490ee2e, 0x1b8ec911, 0xab496d4, 0x402e7d9, 0x15843cc2, 0x134f4fc, 0x4092bdc, 0x85a} +{{0x1c31ce4f, 0x199a931a, 0x11bfd7f, 0x161d42ef, 0x1f5fae05, 0xe0c91d9, 0x8e0a712, 0xe1d4f08, 0x1cc83cc1, 0xf0b25db, 0x1490ee2e, 0x1b8ec911, 0xab496d4, 0x402e7d9, 0x15843cc2, 0x134f4fc, 0x4092bdc, 0x85a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xfd7fccd498d70c73, 0x7eb816c3a85de11b, 0x8e0a71270648ecfd, 0xdf320f305c3a9e10, 0x223490ee2e78592e, 0x73ecaad25b5371d9, 0x69e9f95843cc2201, 0xf996d1024af702} +{{0xfd7fccd498d70c73, 0x7eb816c3a85de11b, 0x8e0a71270648ecfd, 0xdf320f305c3a9e10, 0x223490ee2e78592e, 0x73ecaad25b5371d9, 0x69e9f95843cc2201, 0xf996d1024af702}} #else -{0xff99a931ae18e7, 0x16c3a85de11bfd, 0x938324767ebf5c, 0x170ea78423829c, 0x1cf0b25dbe641e6, 0x1371d9223490ee2, 0x1100b9f655692da, 0x9a7a7e5610f30, 0x432da20495ee} +{{0xff99a931ae18e7, 0x16c3a85de11bfd, 0x938324767ebf5c, 0x170ea78423829c, 0x1cf0b25dbe641e6, 0x1371d9223490ee2, 0x1100b9f655692da, 0x9a7a7e5610f30, 0x432da20495ee}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1a7, 0x175b, 0x9bd, 0xb94, 0x1a66, 0x1d52, 0x1eb3, 0x1431, 0x9e7, 0x1b9d, 0x75f, 0xcba, 0x17e9, 0xe1d, 0xdb, 0xc7b, 0x76, 0xa04, 0xd73, 0x3f7, 0x17dd, 0x1555, 0x5d6, 0x16ee, 0x1df6, 0x1429, 0x15cb, 0x140b, 0x1aeb, 0x14fb, 0x1984, 0x179b, 0x1ba1, 0x125e, 0xb62, 0x249, 0x95a, 0x137a, 0x7c} +{{0x1a7, 0x175b, 0x9bd, 0xb94, 0x1a66, 0x1d52, 0x1eb3, 0x1431, 0x9e7, 0x1b9d, 0x75f, 0xcba, 0x17e9, 0xe1d, 0xdb, 0xc7b, 0x76, 0xa04, 0xd73, 0x3f7, 0x17dd, 0x1555, 0x5d6, 0x16ee, 0x1df6, 0x1429, 0x15cb, 0x140b, 0x1aeb, 0x14fb, 0x1984, 0x179b, 0x1ba1, 0x125e, 0xb62, 0x249, 0x95a, 0x137a, 0x7c}} #elif RADIX == 32 -{0x10d3893a, 0x89bdbad, 0x14b4ccb9, 0x18facfa, 0x1b9d4f3d, 0x597475f, 0xdb876fd, 0x7663d83, 0x1ae6a040, 0xdf747ee, 0xe2eb555, 0x53df6b7, 0x102eb974, 0xa7debae, 0x379bcc2, 0x18a4bdba, 0xad09256, 0xcd2} +{{0x10d3893a, 0x89bdbad, 0x14b4ccb9, 0x18facfa, 0x1b9d4f3d, 0x597475f, 0xdb876fd, 0x7663d83, 0x1ae6a040, 0xdf747ee, 0xe2eb555, 0x53df6b7, 0x102eb974, 0xa7debae, 0x379bcc2, 0x18a4bdba, 0xad09256, 0xcd2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xccb944dedd6c34e2, 0x753cf431f59f54b4, 0xdb876fd2cba3afee, 0x76b9a8100ecc7b06, 0xd6ee2eb5556fba3f, 0xf5d740bae5d0a7be, 0x497b74379bcc253e, 0x84de92b42495b1} +{{0xccb944dedd6c34e2, 0x753cf431f59f54b4, 0xdb876fd2cba3afee, 0x76b9a8100ecc7b06, 0xd6ee2eb5556fba3f, 0xf5d740bae5d0a7be, 0x497b74379bcc253e, 0x84de92b42495b1}} #else -{0x17289bdbad869c4, 0xf431f59f54b4cc, 0x1e965d1d7f73a9e, 0x3b31ec1b6e1db, 0xadf747eed73502, 0x10a7bed6ee2eb55, 0x129f7aeba05d72e, 0xc525edd0de6f30, 0x109bd2568492b} +{{0x17289bdbad869c4, 0xf431f59f54b4cc, 0x1e965d1d7f73a9e, 0x3b31ec1b6e1db, 0xadf747eed73502, 0x10a7bed6ee2eb55, 0x129f7aeba05d72e, 0xc525edd0de6f30, 0x109bd2568492b}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1d6a, 0x5b, 0x24a, 0x1bfc, 0x1cef, 0xc7e, 0x1cac, 0x1e4, 0x68, 0x16da, 0x30d, 0x13a5, 0x505, 0x329, 0x9f4, 0x1dae, 0x371, 0x111b, 0x200, 0x1b69, 0x1e51, 0x3b7, 0x316, 0x509, 0x1af2, 0x1220, 0x8c2, 0x195a, 0x1050, 0x1b7a, 0xd8b, 0x1a21, 0x336, 0x14fa, 0x1a4b, 0x11d, 0x167d, 0x1501, 0x302} +{{0x1d6a, 0x5b, 0x24a, 0x1bfc, 0x1cef, 0xc7e, 0x1cac, 0x1e4, 0x68, 0x16da, 0x30d, 0x13a5, 0x505, 0x329, 0x9f4, 0x1dae, 0x371, 0x111b, 0x200, 0x1b69, 0x1e51, 0x3b7, 0x316, 0x509, 0x1af2, 0x1220, 0x8c2, 0x195a, 0x1050, 0x1b7a, 0xd8b, 0x1a21, 0x336, 0x14fa, 0x1a4b, 0x11d, 0x167d, 0x1501, 0x302}} #elif RADIX == 32 -{0x1eb53915, 0x1824a02d, 0x1fb9dfbf, 0xf272b18, 0x16da0340, 0x1674a30d, 0x1a0ca4a0, 0x171ed727, 0x40111b1, 0x1f9476d2, 0x918b0ed, 0x41af228, 0x5691852, 0x1dbd4143, 0xda216c5, 0x12e9f433, 0x13e84774, 0xc8d} +{{0x1eb53915, 0x1824a02d, 0x1fb9dfbf, 0xf272b18, 0x16da0340, 0x1674a30d, 0x1a0ca4a0, 0x171ed727, 0x40111b1, 0x1f9476d2, 0x918b0ed, 0x41af228, 0x5691852, 0x1dbd4143, 0xda216c5, 0x12e9f433, 0x13e84774, 0xc8d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xdfbfc125016fad4e, 0x680d01e4e5631fb9, 0xa0ca4a0b3a5186db, 0x9100446c6e3dae4f, 0x450918b0edfca3b6, 0xa0a195a46148835e, 0xd3e866da216c5ede, 0x75406cfa11dd25} +{{0xdfbfc125016fad4e, 0x680d01e4e5631fb9, 0xa0ca4a0b3a5186db, 0x9100446c6e3dae4f, 0x450918b0edfca3b6, 0xa0a195a46148835e, 0xd3e866da216c5ede, 0x75406cfa11dd25}} #else -{0x17f824a02df5a9c, 0x101e4e5631fb9df, 0x1059d28c36db406, 0x11b8f6b93e83292, 0x1bf9476d220088d, 0x8835e450918b0e, 0xf6f5050cad230a, 0x974fa19b6885b1, 0xea80d9f423ba} +{{0x17f824a02df5a9c, 0x101e4e5631fb9df, 0x1059d28c36db406, 0x11b8f6b93e83292, 0x1bf9476d220088d, 0x8835e450918b0e, 0xf6f5050cad230a, 0x974fa19b6885b1, 0xea80d9f423ba}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1e9d, 0xbb9, 0x14f9, 0xc51, 0x1731, 0x122e, 0x1901, 0x59a, 0xcc1, 0xb65, 0xc68, 0x1eaf, 0x1f48, 0x1e46, 0xe46, 0x9c1, 0x1013, 0x12f8, 0x18a, 0x177f, 0x1e19, 0x1cca, 0x257, 0x18b9, 0xa38, 0x184b, 0x15a4, 0x86d, 0xa8c, 0x1df5, 0xf2, 0x37, 0x5d9, 0x292, 0x11ae, 0x9e, 0x1fce, 0x7f4, 0x407} +{{0x1e9d, 0xbb9, 0x14f9, 0xc51, 0x1731, 0x122e, 0x1901, 0x59a, 0xcc1, 0xb65, 0xc68, 0x1eaf, 0x1f48, 0x1e46, 0xe46, 0x9c1, 0x1013, 0x12f8, 0x18a, 0x177f, 0x1e19, 0x1cca, 0x257, 0x18b9, 0xa38, 0x184b, 0x15a4, 0x86d, 0xa8c, 0x1df5, 0xf2, 0x37, 0x5d9, 0x292, 0x11ae, 0x9e, 0x1fce, 0x7f4, 0x407}} #elif RADIX == 32 -{0x1f4ecc63, 0x34f95dc, 0xbae62c5, 0xcd64064, 0xb656609, 0x3d5ec68, 0x3791be9, 0x134e0b9, 0x3152f88, 0x17866efe, 0x1912bf32, 0x96a38c5, 0x1b6b498, 0xefaaa31, 0x12037079, 0xb85245d, 0x1e7027a3, 0x727} +{{0x1f4ecc63, 0x34f95dc, 0xbae62c5, 0xcd64064, 0xb656609, 0x3d5ec68, 0x3791be9, 0x134e0b9, 0x3152f88, 0x17866efe, 0x1912bf32, 0x96a38c5, 0x1b6b498, 0xefaaa31, 0x12037079, 0xb85245d, 0x1e7027a3, 0x727}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x62c51a7caee7d3b3, 0x9598259ac80c8bae, 0x3791be91eaf6342d, 0xf0c54be20269c172, 0x18b912bf32bc3377, 0x551886dad2612d47, 0xa48bb203707977d, 0x29fd3f9c09e8d7} +{{0x62c51a7caee7d3b3, 0x9598259ac80c8bae, 0x3791be91eaf6342d, 0xf0c54be20269c172, 0x18b912bf32bc3377, 0x551886dad2612d47, 0xa48bb203707977d, 0x29fd3f9c09e8d7}} #else -{0x18a34f95dcfa766, 0x259ac80c8bae62, 0x148f57b1a16cacc, 0x809a705c8de46f, 0x57866efe18a97c, 0x12d4718b912bf3, 0xbbeaa8c436d693, 0x15c2922ec80dc1e, 0x53fa7f3813d1} +{{0x18a34f95dcfa766, 0x259ac80c8bae62, 0x148f57b1a16cacc, 0x809a705c8de46f, 0x57866efe18a97c, 0x12d4718b912bf3, 0xbbeaa8c436d693, 0x15c2922ec80dc1e, 0x53fa7f3813d1}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x177, 0xf70, 0x25, 0x503, 0x1f96, 0x1abd, 0x6f5, 0x115b, 0xa68, 0x1192, 0x338, 0x1bae, 0x15af, 0x1570, 0xb79, 0x1c9a, 0xe78, 0x19de, 0x860, 0x1076, 0x1a63, 0x1d52, 0x1511, 0x10c5, 0x1fdf, 0xab1, 0x1454, 0x2c4, 0x292, 0x1135, 0x273, 0x1d, 0xefa, 0x47, 0x344, 0x226, 0x9c1, 0x1af, 0x639} +{{0x177, 0xf70, 0x25, 0x503, 0x1f96, 0x1abd, 0x6f5, 0x115b, 0xa68, 0x1192, 0x338, 0x1bae, 0x15af, 0x1570, 0xb79, 0x1c9a, 0xe78, 0x19de, 0x860, 0x1076, 0x1a63, 0x1d52, 0x1511, 0x10c5, 0x1fdf, 0xab1, 0x1454, 0x2c4, 0x292, 0x1135, 0x273, 0x1d, 0xefa, 0x47, 0x344, 0x226, 0x9c1, 0x1af, 0x639}} #elif RADIX == 32 -{0xbbf600, 0x60257b8, 0xf7f2c50, 0xad9bd75, 0x11925344, 0x1f75c338, 0x1cd5c2b5, 0x78e4d2d, 0x10c19de7, 0x1698e0ec, 0x5a88f54, 0x163fdf86, 0xb128a8a, 0x189a8a48, 0x1401d139, 0x11008eef, 0xe088986, 0xd7a} +{{0xbbf600, 0x60257b8, 0xf7f2c50, 0xad9bd75, 0x11925344, 0x1f75c338, 0x1cd5c2b5, 0x78e4d2d, 0x10c19de7, 0x1698e0ec, 0x5a88f54, 0x163fdf86, 0xb128a8a, 0x189a8a48, 0x1401d139, 0x11008eef, 0xe088986, 0xd7a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2c503012bdc02efd, 0x494d115b37aeaf7f, 0xcd5c2b5fbae19c46, 0x64306779cf1c9a5b, 0xf0c5a88f54b4c707, 0x45242c4a2a2ac7fb, 0x11ddf401d139c4d, 0xd86bd3822261a2} +{{0x2c503012bdc02efd, 0x494d115b37aeaf7f, 0xcd5c2b5fbae19c46, 0x64306779cf1c9a5b, 0xf0c5a88f54b4c707, 0x45242c4a2a2ac7fb, 0x11ddf401d139c4d, 0xd86bd3822261a2}} #else -{0xa060257b805dfb, 0x1115b37aeaf7f2c, 0x1afdd70ce2324a6, 0x73c72696f3570a, 0x9698e0ec860cef, 0xac7fbf0c5a88f5, 0xe26a2921625151, 0x8804777d00744e, 0xd7a70444c3} +{{0xa060257b805dfb, 0x1115b37aeaf7f2c, 0x1afdd70ce2324a6, 0x73c72696f3570a, 0x9698e0ec860cef, 0xac7fbf0c5a88f5, 0xe26a2921625151, 0x8804777d00744e, 0xd7a70444c3}} #endif #endif , #if 0 #elif RADIX == 16 -{0x153b, 0x598, 0x100c, 0x1537, 0x1eda, 0x190b, 0x1406, 0x186e, 0x457, 0x469, 0x14a0, 0x1ce0, 0x1f6d, 0xf2f, 0x1837, 0x616, 0x16d0, 0xf35, 0x192b, 0x106, 0x17d6, 0x6b3, 0x169e, 0x27a, 0xe54, 0xa42, 0x1694, 0x16c3, 0x7b, 0x298, 0x118, 0xb0, 0x893, 0xbca, 0x1678, 0x19de, 0xb59, 0x3a, 0x43} +{{0x153b, 0x598, 0x100c, 0x1537, 0x1eda, 0x190b, 0x1406, 0x186e, 0x457, 0x469, 0x14a0, 0x1ce0, 0x1f6d, 0xf2f, 0x1837, 0x616, 0x16d0, 0xf35, 0x192b, 0x106, 0x17d6, 0x6b3, 0x169e, 0x27a, 0xe54, 0xa42, 0x1694, 0x16c3, 0x7b, 0x298, 0x118, 0xb0, 0x893, 0xbca, 0x1678, 0x19de, 0xb59, 0x3a, 0x43}} #elif RADIX == 32 -{0xa9d84f6, 0xf00c2cc, 0x2fdb553, 0x37501b2, 0x46922be, 0x179c14a0, 0x1bbcbfed, 0xd030b60, 0x1256f35b, 0x1df5820d, 0x1ab4f1ac, 0x84e5413, 0x1b0ed28a, 0x14c01ee, 0x60b008c, 0x1e179489, 0x1ace77ac, 0x8d2} +{{0xa9d84f6, 0xf00c2cc, 0x2fdb553, 0x37501b2, 0x46922be, 0x179c14a0, 0x1bbcbfed, 0xd030b60, 0x1256f35b, 0x1df5820d, 0x1ab4f1ac, 0x84e5413, 0x1b0ed28a, 0x14c01ee, 0x60b008c, 0x1e179489, 0x1ace77ac, 0x8d2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xb55378061662a761, 0xa48af86ea03642fd, 0xbbcbfedbce0a5011, 0x6c95bcd6da0616c1, 0x827ab4f1acefac10, 0xf76c3b4a2909ca, 0x2f291260b008c0a6, 0x680e96b39deb3c} +{{0xb55378061662a761, 0xa48af86ea03642fd, 0xbbcbfedbce0a5011, 0x6c95bcd6da0616c1, 0x827ab4f1acefac10, 0xf76c3b4a2909ca, 0x2f291260b008c0a6, 0x680e96b39deb3c}} #else -{0xa6f00c2cc54ec2, 0xf86ea03642fdb5, 0x16de7052808d245, 0x1b68185b06ef2ff, 0x19df5820d92b79a, 0x909ca827ab4f1a, 0x53007bb61da51, 0xf0bca44982c023, 0xd01d2d673bd6} +{{0xa6f00c2cc54ec2, 0xf86ea03642fdb5, 0x16de7052808d245, 0x1b68185b06ef2ff, 0x19df5820d92b79a, 0x909ca827ab4f1a, 0x53007bb61da51, 0xf0bca44982c023, 0xd01d2d673bd6}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -956,261 +956,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x185f, 0xecc, 0x21c, 0xa62, 0x77, 0x8b1, 0x1188, 0xec2, 0xda1, 0xbf0, 0xb11, 0x82d, 0x1fe9, 0x10d4, 0x1e36, 0x194e, 0x77e, 0x1112, 0x14c0, 0x1dcd, 0x1be7, 0x1a4c, 0x140e, 0x10c5, 0x1aaf, 0x236, 0xdf3, 0x1a21, 0x1dff, 0x15bb, 0xda8, 0x30e, 0x17b0, 0xc7b, 0xd1, 0xcb, 0x868, 0x2e5, 0x5a} +{{0x185f, 0xecc, 0x21c, 0xa62, 0x77, 0x8b1, 0x1188, 0xec2, 0xda1, 0xbf0, 0xb11, 0x82d, 0x1fe9, 0x10d4, 0x1e36, 0x194e, 0x77e, 0x1112, 0x14c0, 0x1dcd, 0x1be7, 0x1a4c, 0x140e, 0x10c5, 0x1aaf, 0x236, 0xdf3, 0x1a21, 0x1dff, 0x15bb, 0xda8, 0x30e, 0x17b0, 0xc7b, 0xd1, 0xcb, 0x868, 0x2e5, 0x5a}} #elif RADIX == 32 -{0xc2f86ac, 0x421c766, 0xc40eea6, 0x16146211, 0xbf06d0b, 0x505ab11, 0x1b4353fd, 0x17eca778, 0x9811123, 0x6f9fb9b, 0x5a07693, 0x6daaf86, 0x885be62, 0xaddf7ff, 0x30e6d4, 0x1458f77b, 0x34032c1, 0x52a} +{{0xc2f86ac, 0x421c766, 0xc40eea6, 0x16146211, 0xbf06d0b, 0x505ab11, 0x1b4353fd, 0x17eca778, 0x9811123, 0x6f9fb9b, 0x5a07693, 0x6daaf86, 0x885be62, 0xaddf7ff, 0x30e6d4, 0x1458f77b, 0x34032c1, 0x52a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xeea6210e3b330be1, 0xc1b42ec28c422c40, 0xb4353fd282d588af, 0xda604448efd94ef1, 0xf0c5a0769337cfdc, 0xfbffa216f988db55, 0xb1eef6030e6d456e, 0x120b950d00cb068} +{{0xeea6210e3b330be1, 0xc1b42ec28c422c40, 0xb4353fd282d588af, 0xda604448efd94ef1, 0xf0c5a0769337cfdc, 0xfbffa216f988db55, 0xb1eef6030e6d456e, 0x120b950d00cb068}} #else -{0x14c421c766617c3, 0x2ec28c422c40ee, 0x1e9416ac457e0da, 0x3bf653bc6d0d4f, 0x66f9fb9b4c0889, 0x8db55f0c5a0769, 0x2b77dffd10b7cc, 0x1a2c7bbd80c39b5, 0x9172a1a01960} +{{0x14c421c766617c3, 0x2ec28c422c40ee, 0x1e9416ac457e0da, 0x3bf653bc6d0d4f, 0x66f9fb9b4c0889, 0x8db55f0c5a0769, 0x2b77dffd10b7cc, 0x1a2c7bbd80c39b5, 0x9172a1a01960}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x61a, 0x3b3, 0x1087, 0x1a98, 0x81d, 0x22c, 0x1462, 0xbb0, 0x368, 0xafc, 0xac4, 0xa0b, 0x7fa, 0x1435, 0x178d, 0x1653, 0x11df, 0x444, 0xd30, 0x1f73, 0x6f9, 0x1693, 0xd03, 0x1c31, 0x16ab, 0x188d, 0xb7c, 0x1e88, 0x1f7f, 0x56e, 0x136a, 0xc3, 0x1dec, 0xb1e, 0x1834, 0x32, 0xa1a, 0x10b9, 0xe6} +{{0x61a, 0x3b3, 0x1087, 0x1a98, 0x81d, 0x22c, 0x1462, 0xbb0, 0x368, 0xafc, 0xac4, 0xa0b, 0x7fa, 0x1435, 0x178d, 0x1653, 0x11df, 0x444, 0xd30, 0x1f73, 0x6f9, 0x1693, 0xd03, 0x1c31, 0x16ab, 0x188d, 0xb7c, 0x1e88, 0x1f7f, 0x56e, 0x136a, 0xc3, 0x1dec, 0xb1e, 0x1834, 0x32, 0xa1a, 0x10b9, 0xe6}} #elif RADIX == 32 -{0x130d1113, 0x110871d9, 0xb103ba9, 0x1d851884, 0xafc1b42, 0x9416ac4, 0x6d0d4ff, 0x1dfb29de, 0x1a604448, 0x19be7ee6, 0x11681da4, 0x11b6abe1, 0x1a216f98, 0x2b77dff, 0x180c39b5, 0xd163dde, 0x10d00cb0, 0x54a} +{{0x130d1113, 0x110871d9, 0xb103ba9, 0x1d851884, 0xafc1b42, 0x9416ac4, 0x6d0d4ff, 0x1dfb29de, 0x1a604448, 0x19be7ee6, 0x11681da4, 0x11b6abe1, 0x1a216f98, 0x2b77dff, 0x180c39b5, 0xd163dde, 0x10d00cb0, 0x54a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3ba988438eccc344, 0xf06d0bb0a3108b10, 0x6d0d4ff4a0b5622b, 0x369811123bf653bc, 0x7c31681da4cdf3f7, 0xbeffe885be6236d5, 0x2c7bbd80c39b515b, 0x742e5434032c1a} +{{0x3ba988438eccc344, 0xf06d0bb0a3108b10, 0x6d0d4ff4a0b5622b, 0x369811123bf653bc, 0x7c31681da4cdf3f7, 0xbeffe885be6236d5, 0x2c7bbd80c39b515b, 0x742e5434032c1a}} #else -{0x15310871d998688, 0x10bb0a3108b103b, 0x1fa505ab115f836, 0x8efd94ef1b4353, 0x99be7ee6d30222, 0x236d57c31681da, 0x8addf7ff442df3, 0x68b1eef6030e6d, 0xe85ca8680658} +{{0x15310871d998688, 0x10bb0a3108b103b, 0x1fa505ab115f836, 0x8efd94ef1b4353, 0x99be7ee6d30222, 0x236d57c31681da, 0x8addf7ff442df3, 0x68b1eef6030e6d, 0xe85ca8680658}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xa5a, 0x2ab, 0x659, 0x149f, 0xf1b, 0xa1a, 0xb05, 0x1915, 0x1aa8, 0x1aa0, 0x1c4d, 0xe2f, 0xe1c, 0x19ab, 0x1d34, 0xa8f, 0xf59, 0x1f1, 0xc6d, 0x520, 0xb6e, 0x127f, 0x5dd, 0x175a, 0x1957, 0x1ca4, 0x1563, 0x122f, 0x705, 0xcd6, 0x1c02, 0xdc1, 0x93b, 0x387, 0x1870, 0x54, 0x853, 0x1adc, 0x6bc} +{{0xa5a, 0x2ab, 0x659, 0x149f, 0xf1b, 0xa1a, 0xb05, 0x1915, 0x1aa8, 0x1aa0, 0x1c4d, 0xe2f, 0xe1c, 0x19ab, 0x1d34, 0xa8f, 0xf59, 0x1f1, 0xc6d, 0x520, 0xb6e, 0x127f, 0x5dd, 0x175a, 0x1957, 0x1ca4, 0x1563, 0x122f, 0x705, 0xcd6, 0x1c02, 0xdc1, 0x93b, 0x387, 0x1870, 0x54, 0x853, 0x1adc, 0x6bc}} #elif RADIX == 32 -{0x152d7fc4, 0x1e659155, 0x69e3749, 0x8aac154, 0x1aa0d546, 0x11c5fc4d, 0x1a66adc3, 0x159547f4, 0x18da1f17, 0x1adb8a40, 0x1a2eec9f, 0x149957ba, 0x8beac7c, 0x66b1c16, 0x16dc1e01, 0x1c070e93, 0x2981530, 0xe2} +{{0x152d7fc4, 0x1e659155, 0x69e3749, 0x8aac154, 0x1aa0d546, 0x11c5fc4d, 0x1a66adc3, 0x159547f4, 0x18da1f17, 0x1adb8a40, 0x1a2eec9f, 0x149957ba, 0x8beac7c, 0x66b1c16, 0x16dc1e01, 0x1c070e93, 0x2981530, 0xe2}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3749f32c8aad4b5f, 0x83551915582a869e, 0xa66adc38e2fe26ea, 0x63687c5eb2a8fe9, 0xf75a2eec9fd6dc52, 0x8e0b22fab1f2932a, 0xe1d276dc1e01335, 0x196b710a6054c38} +{{0x3749f32c8aad4b5f, 0x83551915582a869e, 0xa66adc38e2fe26ea, 0x63687c5eb2a8fe9, 0xf75a2eec9fd6dc52, 0x8e0b22fab1f2932a, 0xe1d276dc1e01335, 0x196b710a6054c38}} #else -{0x93e659155a96bf, 0x11915582a869e37, 0x1c717f137541aa, 0x17acaa3fa699ab7, 0x1fadb8a40c6d0f8, 0x12932af75a2eec9, 0x99ac705917d58f, 0xe038749db70780, 0x17d6e214c0a98} +{{0x93e659155a96bf, 0x11915582a869e37, 0x1c717f137541aa, 0x17acaa3fa699ab7, 0x1fadb8a40c6d0f8, 0x12932af75a2eec9, 0x99ac705917d58f, 0xe038749db70780, 0x17d6e214c0a98}} #endif #endif , #if 0 #elif RADIX == 16 -{0x66e, 0xe79, 0xadd, 0x23, 0xf11, 0x7d6, 0x1091, 0x42a, 0x1885, 0x128, 0x6f9, 0xcdd, 0x1d55, 0x19bd, 0x116f, 0x1dbd, 0x107b, 0xaef, 0x8bc, 0xa74, 0x7b5, 0xdff, 0x743, 0x17e0, 0x453, 0x414, 0x672, 0xf28, 0x198a, 0x19c4, 0x1e85, 0xcb9, 0x17c2, 0x14c6, 0x1871, 0x1034, 0x6cb, 0x55b, 0xbf} +{{0x66e, 0xe79, 0xadd, 0x23, 0xf11, 0x7d6, 0x1091, 0x42a, 0x1885, 0x128, 0x6f9, 0xcdd, 0x1d55, 0x19bd, 0x116f, 0x1dbd, 0x107b, 0xaef, 0x8bc, 0xa74, 0x7b5, 0xdff, 0x743, 0x17e0, 0x453, 0x414, 0x672, 0xf28, 0x198a, 0x19c4, 0x1e85, 0xcb9, 0x17c2, 0x14c6, 0x1871, 0x1034, 0x6cb, 0x55b, 0xbf}} #elif RADIX == 32 -{0x13370e29, 0x6add73c, 0x159e2202, 0x154244f, 0x128c429, 0x159ba6f9, 0x17e6f7aa, 0x7bedec5, 0x1178aef8, 0x19ed54e8, 0x3a1b7f, 0x28453bf, 0x1ca0ce44, 0x1ce26629, 0x4cb9f42, 0x1c698d7c, 0x165c0d30, 0x159} +{{0x13370e29, 0x6add73c, 0x159e2202, 0x154244f, 0x128c429, 0x159ba6f9, 0x17e6f7aa, 0x7bedec5, 0x1178aef8, 0x19ed54e8, 0x3a1b7f, 0x28453bf, 0x1ca0ce44, 0x1ce26629, 0x4cb9f42, 0x1c698d7c, 0x165c0d30, 0x159}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2202356eb9e4cdc3, 0xa310a42a8489f59e, 0x7e6f7aaacdd37c84, 0x445e2bbe0f7dbd8b, 0x77e03a1b7fcf6aa7, 0x3314f2833910508a, 0xd31af84cb9f42e71, 0xe956cd97034c38} +{{0x2202356eb9e4cdc3, 0xa310a42a8489f59e, 0x7e6f7aaacdd37c84, 0x445e2bbe0f7dbd8b, 0x77e03a1b7fcf6aa7, 0x3314f2833910508a, 0xd31af84cb9f42e71, 0xe956cd97034c38}} #else -{0x46add73c99b87, 0xa42a8489f59e22, 0x15566e9be425188, 0x183df6f62df9bde, 0x1f9ed54e88bc577, 0x10508a77e03a1b7, 0x1738998a79419c8, 0xe34c6be132e7d0, 0x22ad9b2e0698} +{{0x46add73c99b87, 0xa42a8489f59e22, 0x15566e9be425188, 0x183df6f62df9bde, 0x1f9ed54e88bc577, 0x10508a77e03a1b7, 0x1738998a79419c8, 0xe34c6be132e7d0, 0x22ad9b2e0698}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x165f, 0x1e7c, 0xe41, 0x12eb, 0xa1, 0x1655, 0x6db, 0x1dfc, 0x4a, 0xac7, 0x1dcb, 0x3d9, 0x16a0, 0x562, 0x1d70, 0x528, 0xaa7, 0x172e, 0x36c, 0x728, 0x1e76, 0x23f, 0x6e6, 0x53e, 0x1640, 0x1a82, 0x1b78, 0x1066, 0x895, 0x17eb, 0x1713, 0x174d, 0x679, 0x1415, 0x19a8, 0xe7c, 0x674, 0x1f81, 0x15} +{{0x165f, 0x1e7c, 0xe41, 0x12eb, 0xa1, 0x1655, 0x6db, 0x1dfc, 0x4a, 0xac7, 0x1dcb, 0x3d9, 0x16a0, 0x562, 0x1d70, 0x528, 0xaa7, 0x172e, 0x36c, 0x728, 0x1e76, 0x23f, 0x6e6, 0x53e, 0x1640, 0x1a82, 0x1b78, 0x1066, 0x895, 0x17eb, 0x1713, 0x174d, 0x679, 0x1415, 0x19a8, 0xe7c, 0x674, 0x1f81, 0x15}} #elif RADIX == 32 -{0xb2f81a0, 0x16e41f3e, 0x1541432e, 0xfe1b6ec, 0xac70257, 0x7b3dcb, 0x18158ad4, 0xa729475, 0x6d972e5, 0x1f9d8e50, 0x1e37308f, 0x10564029, 0x19b6f1a, 0x1bf5a256, 0x1374db89, 0xa282a67, 0x13a39f33, 0xc09} +{{0xb2f81a0, 0x16e41f3e, 0x1541432e, 0xfe1b6ec, 0xac70257, 0x7b3dcb, 0x18158ad4, 0xa729475, 0x6d972e5, 0x1f9d8e50, 0x1e37308f, 0x10564029, 0x19b6f1a, 0x1bf5a256, 0x1374db89, 0xa282a67, 0x13a39f33, 0xc09}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x432eb720f9f2cbe0, 0x1c095dfc36dd9541, 0x8158ad403d9ee5ab, 0x81b65cb954e528eb, 0x53e37308ffcec72, 0xd12b066dbc6a0ac8, 0x5054cf374db89dfa, 0xafe04ce8e7ccd4} +{{0x432eb720f9f2cbe0, 0x1c095dfc36dd9541, 0x8158ad403d9ee5ab, 0x81b65cb954e528eb, 0x53e37308ffcec72, 0xd12b066dbc6a0ac8, 0x5054cf374db89dfa, 0xafe04ce8e7ccd4}} #else -{0x5d6e41f3e597c0, 0x15dfc36dd954143, 0xa01ecf72d58e04, 0x55394a3ae0562b, 0x1ff9d8e5036cb97, 0xa0ac8053e37308, 0xefd68958336de3, 0x15141533cdd36e2, 0x15fc099d1cf99} +{{0x5d6e41f3e597c0, 0x15dfc36dd954143, 0xa01ecf72d58e04, 0x55394a3ae0562b, 0x1ff9d8e5036cb97, 0xa0ac8053e37308, 0xefd68958336de3, 0x15141533cdd36e2, 0x15fc099d1cf99}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1e32, 0x1f7c, 0x1c05, 0x372, 0x34a, 0x1d26, 0x11b9, 0x294, 0xa87, 0x1835, 0x158f, 0x1d19, 0x13e8, 0x4dc, 0x1e1a, 0x195f, 0x116e, 0x62c, 0x1839, 0x107a, 0xa4f, 0x119f, 0x18f3, 0xc48, 0x1c7a, 0x100d, 0x2e9, 0x12df, 0xbec, 0x6f1, 0x8bf, 0xe24, 0xa57, 0x50c, 0x28b, 0x31e, 0x430, 0x1b08, 0x378} +{{0x1e32, 0x1f7c, 0x1c05, 0x372, 0x34a, 0x1d26, 0x11b9, 0x294, 0xa87, 0x1835, 0x158f, 0x1d19, 0x13e8, 0x4dc, 0x1e1a, 0x195f, 0x116e, 0x62c, 0x1839, 0x107a, 0xa4f, 0x119f, 0x18f3, 0xc48, 0x1c7a, 0x100d, 0x2e9, 0x12df, 0xbec, 0x6f1, 0x8bf, 0xe24, 0xa57, 0x50c, 0x28b, 0x31e, 0x430, 0x1b08, 0x378}} #elif RADIX == 32 -{0xf1941d7, 0x5c05fbe, 0x9869437, 0x14a46e7a, 0x18355438, 0x3a3358f, 0xd13727d, 0x16ecaff8, 0x107262c8, 0x1a93e0f5, 0x8c79c67, 0x1bc7a62, 0xb7c5d30, 0x1378afb2, 0xee2445f, 0x2ca18a5, 0x180c785, 0x1c1} +{{0xf1941d7, 0x5c05fbe, 0x9869437, 0x14a46e7a, 0x18355438, 0x3a3358f, 0xd13727d, 0x16ecaff8, 0x107262c8, 0x1a93e0f5, 0x8c79c67, 0x1bc7a62, 0xb7c5d30, 0x1378afb2, 0xee2445f, 0x2ca18a5, 0x180c785, 0x1c1}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x94372e02fdf3c650, 0xd550e2948dcf4986, 0xd13727d1d19ac7e0, 0xac1c98b22dd95ff0, 0x4c48c79c67d49f07, 0x57d92df174c0378f, 0x94314aee2445f9bc, 0xc6c2086031e145} +{{0x94372e02fdf3c650, 0xd550e2948dcf4986, 0xd13727d1d19ac7e0, 0xac1c98b22dd95ff0, 0x4c48c79c67d49f07, 0x57d92df174c0378f, 0x94314aee2445f9bc, 0xc6c2086031e145}} #else -{0x6e5c05fbe78ca0, 0xe2948dcf498694, 0x1e8e8cd63f06aa8, 0x8b7657fc344dc9, 0xfa93e0f5839316, 0x378f4c48c79c6, 0x1cde2bec96f8ba6, 0x11650c52bb89117, 0x18d8410c063c2} +{{0x6e5c05fbe78ca0, 0xe2948dcf498694, 0x1e8e8cd63f06aa8, 0x8b7657fc344dc9, 0xfa93e0f5839316, 0x378f4c48c79c6, 0x1cde2bec96f8ba6, 0x11650c52bb89117, 0x18d8410c063c2}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1044, 0x2d0, 0x1004, 0x1082, 0x535, 0x141a, 0x10a6, 0x1f9d, 0xc2d, 0x1347, 0xdf4, 0x1db1, 0x90e, 0x116d, 0x59c, 0xc2b, 0x7c2, 0x15d7, 0x119, 0x32c, 0x1e89, 0x1b01, 0xe5f, 0x105f, 0xd7d, 0xb4f, 0x1c33, 0x1b3b, 0xf2d, 0xc22, 0x11d8, 0x1848, 0x11a9, 0x1ee7, 0x6ea, 0x165d, 0x17d4, 0x77, 0x64b} +{{0x1044, 0x2d0, 0x1004, 0x1082, 0x535, 0x141a, 0x10a6, 0x1f9d, 0xc2d, 0x1347, 0xdf4, 0x1db1, 0x90e, 0x116d, 0x59c, 0xc2b, 0x7c2, 0x15d7, 0x119, 0x32c, 0x1e89, 0x1b01, 0xe5f, 0x105f, 0xd7d, 0xb4f, 0x1c33, 0x1b3b, 0xf2d, 0xc22, 0x11d8, 0x1848, 0x11a9, 0x1ee7, 0x6ea, 0x165d, 0x17d4, 0x77, 0x64b}} #elif RADIX == 32 -{0x8227755, 0x5004168, 0x68a6b08, 0x1cec29a8, 0x1347616f, 0x1bb62df4, 0xe45b521, 0x1c261596, 0x2335d73, 0xfa24658, 0x1f72fec0, 0x9ed7d82, 0xcef866b, 0x6113cb7, 0x138488ec, 0x1abdcf1a, 0x1ea5974d, 0x83d} +{{0x8227755, 0x5004168, 0x68a6b08, 0x1cec29a8, 0x1347616f, 0x1bb62df4, 0xe45b521, 0x1c261596, 0x2335d73, 0xfa24658, 0x1f72fec0, 0x9ed7d82, 0xcef866b, 0x6113cb7, 0x138488ec, 0x1abdcf1a, 0x1ea5974d, 0x83d}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6b0828020b42089d, 0x1d85bf9d8535068a, 0xe45b521ddb16fa4d, 0xc08cd75cf84c2b2c, 0xb05f72fec07d1232, 0x9e5bb3be19ad3daf, 0x7b9e3538488ec308, 0x1681defa965d375} +{{0x6b0828020b42089d, 0x1d85bf9d8535068a, 0xe45b521ddb16fa4d, 0xc08cd75cf84c2b2c, 0xb05f72fec07d1232, 0x9e5bb3be19ad3daf, 0x7b9e3538488ec308, 0x1681defa965d375}} #else -{0x1050041684113b, 0x1bf9d8535068a6b, 0x10eed8b7d268ec2, 0x13e130acb3916d4, 0xfa24658119aeb, 0xd3dafb05f72fec, 0x1844f2dd9df0cd, 0x1d5ee78d4e1223b, 0x1203bdf52cba6} +{{0x1050041684113b, 0x1bf9d8535068a6b, 0x10eed8b7d268ec2, 0x13e130acb3916d4, 0xfa24658119aeb, 0xd3dafb05f72fec, 0x1844f2dd9df0cd, 0x1d5ee78d4e1223b, 0x1203bdf52cba6}} #endif #endif , #if 0 #elif RADIX == 16 -{0x7bc, 0x14d4, 0x1225, 0x1afb, 0x179e, 0x2c0, 0x1c0, 0x1267, 0x450, 0x1f26, 0x1e3f, 0x2bb, 0x19a5, 0x12f9, 0xa57, 0x2d, 0x1ed, 0xa16, 0x754, 0x1893, 0x759, 0x6bb, 0x618, 0x1379, 0xff3, 0x1989, 0x1abb, 0x1c40, 0x1bf5, 0x71e, 0xd6d, 0xc04, 0x15ef, 0x6aa, 0x4da, 0x1fb6, 0xb5b, 0x9f2, 0x211} +{{0x7bc, 0x14d4, 0x1225, 0x1afb, 0x179e, 0x2c0, 0x1c0, 0x1267, 0x450, 0x1f26, 0x1e3f, 0x2bb, 0x19a5, 0x12f9, 0xa57, 0x2d, 0x1ed, 0xa16, 0x754, 0x1893, 0x759, 0x6bb, 0x618, 0x1379, 0xff3, 0x1989, 0x1abb, 0x1c40, 0x1bf5, 0x71e, 0xd6d, 0xc04, 0x15ef, 0x6aa, 0x4da, 0x1fb6, 0xb5b, 0x9f2, 0x211}} #elif RADIX == 32 -{0x3de2735, 0x17225a6a, 0x102f3daf, 0x13387005, 0x1f262284, 0x14577e3f, 0xbcbe734, 0x1ed016a9, 0xea8a160, 0x19d67126, 0x1930c1ae, 0x112ff39b, 0x11035779, 0x138f6fd7, 0x1ec046b6, 0x168d555e, 0x1adfed89, 0x412} +{{0x3de2735, 0x17225a6a, 0x102f3daf, 0x13387005, 0x1f262284, 0x14577e3f, 0xbcbe734, 0x1ed016a9, 0xea8a160, 0x19d67126, 0x1930c1ae, 0x112ff39b, 0x11035779, 0x138f6fd7, 0x1ec046b6, 0x168d555e, 0x1adfed89, 0x412}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3dafb912d350f789, 0x988a12670e00b02f, 0xbcbe734a2bbf1ffc, 0x33aa28583da02d52, 0x737930c1aeceb389, 0xb7ebc40d5de625fe, 0x1aaabdec046b69c7, 0x15a7c96b7fb626d} +{{0x3dafb912d350f789, 0x988a12670e00b02f, 0xbcbe734a2bbf1ffc, 0x33aa28583da02d52, 0x737930c1aeceb389, 0xb7ebc40d5de625fe, 0x1aaabdec046b69c7, 0x15a7c96b7fb626d}} #else -{0x15f7225a6a1ef13, 0x12670e00b02f3d, 0x1a515df8ffe4c45, 0xf680b54af2f9c, 0x1d9d6712675450b, 0x625fe737930c1a, 0x14e3dbf5e206aef, 0x1b46aaaf7b011ad, 0x104f92d6ff6c4} +{{0x15f7225a6a1ef13, 0x12670e00b02f3d, 0x1a515df8ffe4c45, 0xf680b54af2f9c, 0x1d9d6712675450b, 0x625fe737930c1a, 0x14e3dbf5e206aef, 0x1b46aaaf7b011ad, 0x104f92d6ff6c4}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1432,261 +1432,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x14d5, 0x1163, 0xf47, 0x337, 0x71e, 0x4ad, 0x1071, 0x19d4, 0x11d9, 0xdce, 0x186a, 0x1785, 0x13c8, 0x695, 0xe1b, 0x54b, 0x174f, 0xd0c, 0x17cb, 0x17db, 0xb41, 0xb78, 0x1a46, 0x66f, 0x23, 0x836, 0x19ce, 0xcdf, 0x90b, 0x6fb, 0x1508, 0x1bb5, 0x6aa, 0x1219, 0x1bba, 0x1d67, 0x1e46, 0x8d8, 0x62c} +{{0x14d5, 0x1163, 0xf47, 0x337, 0x71e, 0x4ad, 0x1071, 0x19d4, 0x11d9, 0xdce, 0x186a, 0x1785, 0x13c8, 0x695, 0xe1b, 0x54b, 0x174f, 0xd0c, 0x17cb, 0x17db, 0xb41, 0xb78, 0x1a46, 0x66f, 0x23, 0x836, 0x19ce, 0xcdf, 0x90b, 0x6fb, 0x1508, 0x1bb5, 0x6aa, 0x1219, 0x1bba, 0x1d67, 0x1e46, 0x8d8, 0x62c}} #elif RADIX == 32 -{0x1a6af50e, 0xef478b1, 0xb4e3c33, 0xea41c49, 0xdce8ece, 0x2f0b86a, 0xd9a5679, 0x14f2a5b8, 0xf96d0cb, 0x2d06fb7, 0xfd232de, 0x6c02333, 0x137f39c8, 0x37da42d, 0x15bb5a84, 0xea4326a, 0x123759f7, 0x9c7} +{{0x1a6af50e, 0xef478b1, 0xb4e3c33, 0xea41c49, 0xdce8ece, 0x2f0b86a, 0xd9a5679, 0x14f2a5b8, 0xf96d0cb, 0x2d06fb7, 0xfd232de, 0x6c02333, 0x137f39c8, 0x37da42d, 0x15bb5a84, 0xea4326a, 0x123759f7, 0x9c7}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x3c3377a3c58e9abd, 0x3a3b39d483892b4e, 0xd9a56791785c3537, 0xbbe5b432e9e54b70, 0x666fd232de16837d, 0xd216cdfce720d804, 0x4864d55bb5a841be, 0x72363c8dd67ddd} +{{0x3c3377a3c58e9abd, 0x3a3b39d483892b4e, 0xd9a56791785c3537, 0xbbe5b432e9e54b70, 0x666fd232de16837d, 0xd216cdfce720d804, 0x4864d55bb5a841be, 0x72363c8dd67ddd}} #else -{0x66ef478b1d357a, 0x139d483892b4e3c, 0x1c8bc2e1a9b9d1d, 0xba7952dc366959, 0x1c2d06fb77cb686, 0xd804666fd232d, 0xdf690b66fe739, 0x1752193556ed6a1, 0xe46c791bacfb} +{{0x66ef478b1d357a, 0x139d483892b4e3c, 0x1c8bc2e1a9b9d1d, 0xba7952dc366959, 0x1c2d06fb77cb686, 0xd804666fd232d, 0xdf690b66fe739, 0x1752193556ed6a1, 0xe46c791bacfb}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1d37, 0x1c58, 0x1bd1, 0x10cd, 0x9c7, 0x92b, 0x41c, 0xe75, 0x1476, 0x1373, 0xe1a, 0x5e1, 0xcf2, 0x19a5, 0x1b86, 0x1952, 0x5d3, 0x1b43, 0x1df2, 0xdf6, 0x2d0, 0x12de, 0x1e91, 0x199b, 0x1008, 0x120d, 0x1e73, 0x1b37, 0x1a42, 0x1be, 0xd42, 0x16ed, 0x9aa, 0x1486, 0x1eee, 0x1759, 0x791, 0x236, 0x5bb} +{{0x1d37, 0x1c58, 0x1bd1, 0x10cd, 0x9c7, 0x92b, 0x41c, 0xe75, 0x1476, 0x1373, 0xe1a, 0x5e1, 0xcf2, 0x19a5, 0x1b86, 0x1952, 0x5d3, 0x1b43, 0x1df2, 0xdf6, 0x2d0, 0x12de, 0x1e91, 0x199b, 0x1008, 0x120d, 0x1e73, 0x1b37, 0x1a42, 0x1be, 0xd42, 0x16ed, 0x9aa, 0x1486, 0x1eee, 0x1759, 0x791, 0x236, 0x5bb}} #elif RADIX == 32 -{0xe9becab, 0x1bbd1e2c, 0xad38f0c, 0x13a90712, 0x1373a3b3, 0x8bc2e1a, 0x366959e, 0x1d3ca96e, 0x1be5b432, 0x10b41bed, 0x1bf48cb7, 0x1b008cc, 0xcdfce72, 0xdf690b, 0x156ed6a1, 0x1ba90c9a, 0x1c8dd67d, 0xd31} +{{0xe9becab, 0x1bbd1e2c, 0xad38f0c, 0x13a90712, 0x1373a3b3, 0x8bc2e1a, 0x366959e, 0x1d3ca96e, 0x1be5b432, 0x10b41bed, 0x1bf48cb7, 0x1b008cc, 0xcdfce72, 0xdf690b, 0x156ed6a1, 0x1ba90c9a, 0x1c8dd67d, 0xd31}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8f0cdde8f163a6fb, 0xce8ece7520e24ad3, 0x366959e45e170d4d, 0x6ef96d0cba7952dc, 0x199bf48cb785a0df, 0xb485b37f39c83601, 0x52193556ed6a106f, 0x488d8f23759f77} +{{0x8f0cdde8f163a6fb, 0xce8ece7520e24ad3, 0x366959e45e170d4d, 0x6ef96d0cba7952dc, 0x199bf48cb785a0df, 0xb485b37f39c83601, 0x52193556ed6a106f, 0x488d8f23759f77}} #else -{0x19bbd1e2c74df6, 0xce7520e24ad38f, 0xf22f0b86a6e747, 0x12e9e54b70d9a56, 0xf0b41beddf2da1, 0x83601199bf48cb, 0x837da42d9bf9ce, 0x1dd4864d55bb5a8, 0x911b1e46eb3e} +{{0x19bbd1e2c74df6, 0xce7520e24ad38f, 0xf22f0b86a6e747, 0x12e9e54b70d9a56, 0xf0b41beddf2da1, 0x83601199bf48cb, 0x837da42d9bf9ce, 0x1dd4864d55bb5a8, 0x911b1e46eb3e}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x8f6, 0xe30, 0x75, 0xaf7, 0xb3c, 0x1672, 0x1e05, 0x157a, 0x16b1, 0x1fd, 0x3c2, 0x114d, 0x1000, 0x1b4f, 0x1f37, 0xc0e, 0xdd, 0x4de, 0xdff, 0x55e, 0x1a2f, 0x353, 0xc4a, 0x1225, 0x9ed, 0x9ff, 0x1493, 0x18e6, 0x96c, 0x163c, 0xa76, 0x1c78, 0x11b4, 0x1087, 0x1519, 0xc82, 0x3e0, 0x7d4, 0xf5} +{{0x8f6, 0xe30, 0x75, 0xaf7, 0xb3c, 0x1672, 0x1e05, 0x157a, 0x16b1, 0x1fd, 0x3c2, 0x114d, 0x1000, 0x1b4f, 0x1f37, 0xc0e, 0xdd, 0x4de, 0xdff, 0x55e, 0x1a2f, 0x353, 0xc4a, 0x1225, 0x9ed, 0x9ff, 0x1493, 0x18e6, 0x96c, 0x163c, 0xa76, 0x1c78, 0x11b4, 0x1087, 0x1519, 0xc82, 0x3e0, 0x7d4, 0xf5}} #elif RADIX == 32 -{0x47b122a, 0xe075718, 0x1c9678af, 0xbd7816c, 0x1fdb58d, 0x229a3c2, 0x1bed3e00, 0xdd6077c, 0x1bfe4de0, 0x1e8bcabc, 0x56250d4, 0x1fe9ed91, 0x39a9269, 0xb1e25b3, 0x9c7853b, 0x6610f1b, 0x1f0320aa, 0x7a0} +{{0x47b122a, 0xe075718, 0x1c9678af, 0xbd7816c, 0x1fdb58d, 0x229a3c2, 0x1bed3e00, 0xdd6077c, 0x1bfe4de0, 0x1e8bcabc, 0x56250d4, 0x1fe9ed91, 0x39a9269, 0xb1e25b3, 0x9c7853b, 0x6610f1b, 0x1f0320aa, 0x7a0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x78af703ab8c11ec4, 0xf6d6357af02d9c96, 0xbed3e00114d1e107, 0xe6ff93781bac0ef9, 0xb2256250d4f45e55, 0x12d98e6a49a7fd3d, 0xc21e369c7853b58f, 0xe9f507c0c82a8c} +{{0x78af703ab8c11ec4, 0xf6d6357af02d9c96, 0xbed3e00114d1e107, 0xe6ff93781bac0ef9, 0xb2256250d4f45e55, 0x12d98e6a49a7fd3d, 0xc21e369c7853b58f, 0xe9f507c0c82a8c}} #else -{0x15ee07571823d89, 0x357af02d9c9678, 0x8a68f083fb6b, 0x6eb03be6fb4f8, 0x9e8bcabcdff26f, 0x7fd3db2256250d, 0x1ac7896cc73524d, 0x330878da71e14e, 0x23ea0f819055} +{{0x15ee07571823d89, 0x357af02d9c9678, 0x8a68f083fb6b, 0x6eb03be6fb4f8, 0x9e8bcabcdff26f, 0x7fd3db2256250d, 0x1ac7896cc73524d, 0x330878da71e14e, 0x23ea0f819055}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1227, 0x1240, 0x423, 0xd84, 0x1dc1, 0x982, 0x1cb3, 0x14e1, 0x16eb, 0x1409, 0xf49, 0xec8, 0x888, 0xe0b, 0x1c45, 0x176, 0x49e, 0x1d40, 0x1e6b, 0x7a3, 0xfba, 0x175f, 0x1908, 0xb88, 0x168c, 0x1324, 0x159f, 0x1077, 0xac3, 0x10b4, 0x478, 0x240, 0x1682, 0x14f, 0x1599, 0x152f, 0x1197, 0xad5, 0x133} +{{0x1227, 0x1240, 0x423, 0xd84, 0x1dc1, 0x982, 0x1cb3, 0x14e1, 0x16eb, 0x1409, 0xf49, 0xec8, 0x888, 0xe0b, 0x1c45, 0x176, 0x49e, 0x1d40, 0x1e6b, 0x7a3, 0xfba, 0x175f, 0x1908, 0xb88, 0x168c, 0x1324, 0x159f, 0x1077, 0xac3, 0x10b4, 0x478, 0x240, 0x1682, 0x14f, 0x1599, 0x152f, 0x1197, 0xad5, 0x133}} #elif RADIX == 32 -{0x91396c4, 0x8423920, 0xbb82d8, 0x70f2cd3, 0x1409b75d, 0x1d90f49, 0x2b82d11, 0x9e0bb71, 0x1cd7d402, 0x1bee8f47, 0x8c845d7, 0x4968c5c, 0x1deb3f3, 0x85a2b0e, 0x424023c, 0x6429f68, 0xcbd4beb, 0xac} +{{0x91396c4, 0x8423920, 0xbb82d8, 0x70f2cd3, 0x1409b75d, 0x1d90f49, 0x2b82d11, 0x9e0bb71, 0x1cd7d402, 0x1bee8f47, 0x8c845d7, 0x4968c5c, 0x1deb3f3, 0x85a2b0e, 0x424023c, 0x6429f68, 0xcbd4beb, 0xac}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x82d84211c90244e5, 0x26dd74e1e59a60bb, 0x2b82d110ec87a4d0, 0x3f35f50093c176e2, 0x8b88c845d7df747a, 0x1587077acfcc92d1, 0x853ed0424023c42d, 0x12ab5632f52facc} +{{0x82d84211c90244e5, 0x26dd74e1e59a60bb, 0x2b82d110ec87a4d0, 0x3f35f50093c176e2, 0x8b88c845d7df747a, 0x1587077acfcc92d1, 0x853ed0424023c42d, 0x12ab5632f52facc}} #else -{0x1b08423920489cb, 0x174e1e59a60bb82, 0x887643d268136e, 0x24f05db88ae0b4, 0xfbee8f47e6bea0, 0xc92d18b88c845d, 0x2168ac383bd67e, 0x13214fb4109008f, 0xa56ac65ea5f5} +{{0x1b08423920489cb, 0x174e1e59a60bb82, 0x887643d268136e, 0x24f05db88ae0b4, 0xfbee8f47e6bea0, 0xc92d18b88c845d, 0x2168ac383bd67e, 0x13214fb4109008f, 0xa56ac65ea5f5}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1544, 0x1dea, 0x162d, 0x73d, 0x6d1, 0x1511, 0x5f2, 0x275, 0x1aff, 0x1c7, 0x1d84, 0x1875, 0x10df, 0x2e0, 0x70b, 0x9eb, 0x897, 0xf0f, 0xa5d, 0xf38, 0x108c, 0x1c12, 0x1649, 0x1849, 0x9b8, 0x2bc, 0x1b0, 0xd0e, 0xfdb, 0x8ee, 0x1b0b, 0x1fdc, 0xc1, 0x1771, 0x1776, 0xa12, 0x1392, 0xd10, 0x618} +{{0x1544, 0x1dea, 0x162d, 0x73d, 0x6d1, 0x1511, 0x5f2, 0x275, 0x1aff, 0x1c7, 0x1d84, 0x1875, 0x10df, 0x2e0, 0x70b, 0x9eb, 0x897, 0xf0f, 0xa5d, 0xf38, 0x108c, 0x1c12, 0x1649, 0x1849, 0x9b8, 0x2bc, 0x1b0, 0xd0e, 0xfdb, 0x8ee, 0x1b0b, 0x1fdc, 0xc1, 0x1771, 0x1776, 0xa12, 0x1392, 0xd10, 0x618}} #elif RADIX == 32 -{0xaa27395, 0x1b62def5, 0x44da273, 0x13a97caa, 0x1c7d7f8, 0x1f0ebd84, 0x58b821b, 0x974f59c, 0x14baf0f4, 0x14231e70, 0x9b24f04, 0x1789b8c2, 0x14383602, 0x14773f6d, 0x3fdcd85, 0x1daee20c, 0x1c9284ae, 0xd04} +{{0xaa27395, 0x1b62def5, 0x44da273, 0x13a97caa, 0x1c7d7f8, 0x1f0ebd84, 0x58b821b, 0x974f59c, 0x14baf0f4, 0x14231e70, 0x9b24f04, 0x1789b8c2, 0x14383602, 0x14773f6d, 0x3fdcd85, 0x1daee20c, 0x1c9284ae, 0xd04}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa273db16f7aaa89c, 0x1f5fe2752f95444d, 0x58b821bf875ec207, 0x852ebc3d12e9eb38, 0x1849b24f04a118f3, 0x9fb6d0e0d80af137, 0x5dc4183fdcd85a3b, 0x183442724a12bbb} +{{0xa273db16f7aaa89c, 0x1f5fe2752f95444d, 0x58b821bf875ec207, 0x852ebc3d12e9eb38, 0x1849b24f04a118f3, 0x9fb6d0e0d80af137, 0x5dc4183fdcd85a3b, 0x183442724a12bbb}} #else -{0xe7b62def555139, 0x1e2752f95444da2, 0xdfc3af61038faf, 0x144ba7ace162e08, 0x94231e70a5d787, 0xaf1371849b24f0, 0xd1dcfdb68706c0, 0xed771060ff7361, 0x156884e494257} +{{0xe7b62def555139, 0x1e2752f95444da2, 0xdfc3af61038faf, 0x144ba7ace162e08, 0x94231e70a5d787, 0xaf1371849b24f0, 0xd1dcfdb68706c0, 0xed771060ff7361, 0x156884e494257}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1756, 0x1187, 0x608, 0x637, 0x5c5, 0x459, 0x12f2, 0x9a1, 0x314, 0xe7f, 0x1c73, 0x27f, 0xa8d, 0x17f8, 0x1e33, 0x1878, 0x1c21, 0x123b, 0xb76, 0x7ea, 0x157, 0x16b4, 0xad7, 0x413, 0x56e, 0x4f3, 0x881, 0x1319, 0x1cc3, 0x1813, 0x1575, 0x1f0, 0x13f9, 0x1ef4, 0x8ae, 0x17c8, 0xd48, 0x157d, 0x5ea} +{{0x1756, 0x1187, 0x608, 0x637, 0x5c5, 0x459, 0x12f2, 0x9a1, 0x314, 0xe7f, 0x1c73, 0x27f, 0xa8d, 0x17f8, 0x1e33, 0x1878, 0x1c21, 0x123b, 0xb76, 0x7ea, 0x157, 0x16b4, 0xad7, 0x413, 0x56e, 0x4f3, 0x881, 0x1319, 0x1cc3, 0x1813, 0x1575, 0x1f0, 0x13f9, 0x1ef4, 0x8ae, 0x17c8, 0xd48, 0x157d, 0x5ea}} #elif RADIX == 32 -{0x1bab7032, 0xe6088c3, 0x164b8a63, 0xd0cbc88, 0xe7f18a2, 0x144ffc73, 0x19dfe151, 0x21c3c78, 0x16ed23be, 0x55cfd4, 0x1356bdad, 0x1e656e20, 0xc651024, 0x1c09f30e, 0x121f0aba, 0xbbde93f, 0xa45f211, 0x8eb} +{{0x1bab7032, 0xe6088c3, 0x164b8a63, 0xd0cbc88, 0xe7f18a2, 0x144ffc73, 0x19dfe151, 0x21c3c78, 0x16ed23be, 0x55cfd4, 0x1356bdad, 0x1e656e20, 0xc651024, 0x1c09f30e, 0x121f0aba, 0xbbde93f, 0xa45f211, 0x8eb}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x8a637304461eeadc, 0xfc6289a19791164b, 0x9dfe151a27fe39b9, 0xa5bb48ef843878f1, 0xc41356bdad02ae7e, 0xf98731944093ccad, 0x7bd27f21f0abae04, 0x155f5a917c8457} +{{0x8a637304461eeadc, 0xfc6289a19791164b, 0x9dfe151a27fe39b9, 0xa5bb48ef843878f1, 0xc41356bdad02ae7e, 0xf98731944093ccad, 0x7bd27f21f0abae04, 0x155f5a917c8457}} #else -{0xc6e6088c3dd5b8, 0x89a19791164b8a, 0x8d13ff1cdcfe31, 0x1e10e1e3c677f85, 0x1a055cfd4b7691d, 0x13ccadc41356bda, 0x17027cc398ca204, 0x15def49fc87c2ae, 0x2abeb522f908} +{{0xc6e6088c3dd5b8, 0x89a19791164b8a, 0x8d13ff1cdcfe31, 0x1e10e1e3c677f85, 0x1a055cfd4b7691d, 0x13ccadc41356bda, 0x17027cc398ca204, 0x15def49fc87c2ae, 0x2abeb522f908}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xbba, 0x1eb6, 0x49a, 0x12a5, 0x12d2, 0x30a, 0x172f, 0x174d, 0x1231, 0x1036, 0x122e, 0x158, 0x743, 0xf10, 0x1e52, 0x18c7, 0x152e, 0x13b1, 0x7ae, 0x128d, 0x9c4, 0x848, 0x4, 0x1e64, 0x1e6f, 0x10ca, 0x3d4, 0x164, 0x1c8, 0x3e2, 0x4e8, 0x27b, 0x1d32, 0x1cc2, 0x1c60, 0x7a8, 0x13df, 0x1f6b, 0x6ad} +{{0xbba, 0x1eb6, 0x49a, 0x12a5, 0x12d2, 0x30a, 0x172f, 0x174d, 0x1231, 0x1036, 0x122e, 0x158, 0x743, 0xf10, 0x1e52, 0x18c7, 0x152e, 0x13b1, 0x7ae, 0x128d, 0x9c4, 0x848, 0x4, 0x1e64, 0x1e6f, 0x10ca, 0x3d4, 0x164, 0x1c8, 0x3e2, 0x4e8, 0x27b, 0x1d32, 0x1cc2, 0x1c60, 0x7a8, 0x13df, 0x1f6b, 0x6ad}} #elif RADIX == 32 -{0x5dd7eaa, 0xa49af5b, 0x2a5a52a, 0x1a6dcbc6, 0x1036918d, 0xc2b122e, 0x93c40e8, 0x12ec63f9, 0xf5d3b1a, 0x271251a, 0x4002212, 0x195e6ff3, 0x5907a90, 0x1f10720, 0x427b274, 0x183985d3, 0x1ef9ea38, 0x45c} +{{0x5dd7eaa, 0xa49af5b, 0x2a5a52a, 0x1a6dcbc6, 0x1036918d, 0xc2b122e, 0x93c40e8, 0x12ec63f9, 0xf5d3b1a, 0x271251a, 0x4002212, 0x195e6ff3, 0x5907a90, 0x1f10720, 0x427b274, 0x183985d3, 0x1ef9ea38, 0x45c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xa52a524d7ad9775f, 0xda46374db978c2a5, 0x93c40e8615891740, 0xd3d74ec6a5d8c7f2, 0xfe64002212138928, 0x83901641ea432bcd, 0x730ba6427b2740f8, 0x11fdae7be7a8e30} +{{0xa52a524d7ad9775f, 0xda46374db978c2a5, 0x93c40e8615891740, 0xd3d74ec6a5d8c7f2, 0xfe64002212138928, 0x83901641ea432bcd, 0x730ba6427b2740f8, 0x11fdae7be7a8e30}} #else -{0x54a49af5b2eebf, 0x374db978c2a5a5, 0x1430ac48ba06d23, 0x1a97631fca4f103, 0x4271251a7ae9d8, 0x32bcdfe6400221, 0x7c41c80b20f52, 0xc1cc2e9909ec9d, 0x8fb5cf7cf51c} +{{0x54a49af5b2eebf, 0x374db978c2a5a5, 0x1430ac48ba06d23, 0x1a97631fca4f103, 0x4271251a7ae9d8, 0x32bcdfe6400221, 0x7c41c80b20f52, 0xc1cc2e9909ec9d, 0x8fb5cf7cf51c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1d5e, 0x18e6, 0xc97, 0x1db2, 0x9df, 0x19d3, 0x1564, 0x1a3a, 0x90, 0xea5, 0xd74, 0x19fc, 0xf84, 0xadd, 0x2e5, 0x10bb, 0x183f, 0x1334, 0xa50, 0x54b, 0xd22, 0x1295, 0xf11, 0xfa1, 0x1810, 0xa3, 0xa81, 0x1026, 0x2b2, 0x19ee, 0x1a4a, 0xf8a, 0xfb3, 0x1463, 0x19c5, 0x42c, 0x830, 0x562, 0x3db} +{{0x1d5e, 0x18e6, 0xc97, 0x1db2, 0x9df, 0x19d3, 0x1564, 0x1a3a, 0x90, 0xea5, 0xd74, 0x19fc, 0xf84, 0xadd, 0x2e5, 0x10bb, 0x183f, 0x1334, 0xa50, 0x54b, 0xd22, 0x1295, 0xf11, 0xfa1, 0x1810, 0xa3, 0xa81, 0x1026, 0x2b2, 0x19ee, 0x1a4a, 0xf8a, 0xfb3, 0x1463, 0x19c5, 0x42c, 0x830, 0x562, 0x3db}} #elif RADIX == 32 -{0xeaf491f, 0x4c97c73, 0x14d3bfdb, 0x11d55933, 0xea50486, 0x133f8d74, 0x12ab75f0, 0x3f85d8b, 0x14a1334c, 0xb488a96, 0x1788ca5, 0x1478107d, 0x995020, 0xcf70aca, 0x6f8ad25, 0x1168c6fb, 0x1810b33, 0x892} +{{0xeaf491f, 0x4c97c73, 0x14d3bfdb, 0x11d55933, 0xea50486, 0x133f8d74, 0x12ab75f0, 0x3f85d8b, 0x14a1334c, 0xb488a96, 0x1788ca5, 0x1478107d, 0x995020, 0xcf70aca, 0x6f8ad25, 0x1168c6fb, 0x1810b33, 0x892}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xbfdb264be39babd2, 0x94121a3aab2674d3, 0x2ab75f099fc6ba3a, 0xb5284cd307f0bb17, 0xfa1788ca55a4454, 0x8565026540828f02, 0xd18df66f8ad2567b, 0x7958906042cce2} +{{0xbfdb264be39babd2, 0x94121a3aab2674d3, 0x2ab75f099fc6ba3a, 0xb5284cd307f0bb17, 0xfa1788ca55a4454, 0x8565026540828f02, 0xd18df66f8ad2567b, 0x7958906042cce2}} #else -{0x1b64c97c73757a4, 0x1a3aab2674d3bf, 0x184cfe35d1d4a09, 0xc1fc2ec5caadd7, 0xab488a96a5099a, 0x28f020fa1788ca, 0xb3dc2b28132a04, 0x18b4637d9be2b49, 0xf2b120c08599} +{{0x1b64c97c73757a4, 0x1a3aab2674d3bf, 0x184cfe35d1d4a09, 0xc1fc2ec5caadd7, 0xab488a96a5099a, 0x28f020fa1788ca, 0xb3dc2b28132a04, 0x18b4637d9be2b49, 0xf2b120c08599}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -1908,261 +1908,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0xca6, 0xc89, 0x411, 0x17e9, 0x188d, 0xad5, 0x8de, 0x403, 0x183a, 0x1e0d, 0x28c, 0xfdc, 0xbcc, 0xd0, 0xa3a, 0xb2e, 0x140c, 0x11b4, 0x4bb, 0x10da, 0xb22, 0x1a4c, 0xbfa, 0xbd5, 0xae6, 0xeff, 0x465, 0x8df, 0xcf9, 0x1c4a, 0x1807, 0x1462, 0xead, 0x1c43, 0x12a3, 0x1ec2, 0x1e12, 0xad0, 0x1cd} +{{0xca6, 0xc89, 0x411, 0x17e9, 0x188d, 0xad5, 0x8de, 0x403, 0x183a, 0x1e0d, 0x28c, 0xfdc, 0xbcc, 0xd0, 0xa3a, 0xb2e, 0x140c, 0x11b4, 0x4bb, 0x10da, 0xb22, 0x1a4c, 0xbfa, 0xbd5, 0xae6, 0xeff, 0x465, 0x8df, 0xcf9, 0x1c4a, 0x1807, 0x1462, 0xead, 0x1c43, 0x12a3, 0x1ec2, 0x1e12, 0xad0, 0x1cd}} #elif RADIX == 32 -{0x1653222c, 0x12411644, 0x15711b7e, 0x1a3795, 0x1e0dc1d1, 0x11fb828c, 0x1d034179, 0xc59728, 0x9771b4a, 0x2c8a1b4, 0x155fd693, 0x1feae65e, 0x37c8cae, 0x1e2533e5, 0x1b462c03, 0x8f886ea, 0x1097b0a5, 0x487} +{{0x1653222c, 0x12411644, 0x15711b7e, 0x1a3795, 0x1e0dc1d1, 0x11fb828c, 0x1d034179, 0xc59728, 0x9771b4a, 0x2c8a1b4, 0x155fd693, 0x1feae65e, 0x37c8cae, 0x1e2533e5, 0x1b462c03, 0x8f886ea, 0x1097b0a5, 0x487}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1b7e9208b22594c8, 0x3707440346f2b571, 0xd0341798fdc14678, 0xa25dc6d2818b2e51, 0xcbd55fd69316450d, 0x99f28df232bbfd5c, 0xf10dd5b462c03f12, 0xeab43c25ec2951} +{{0x1b7e9208b22594c8, 0x3707440346f2b571, 0xd0341798fdc14678, 0xa25dc6d2818b2e51, 0xcbd55fd69316450d, 0x99f28df232bbfd5c, 0xf10dd5b462c03f12, 0xeab43c25ec2951}} #else -{0xfd2411644b2991, 0x1440346f2b5711b, 0x1cc7ee0a33c1b83, 0xa062cb94740d05, 0x62c8a1b44bb8da, 0x1bfd5ccbd55fd69, 0x1f894cf946f9195, 0x147c43756d18b00, 0x2568784bd852} +{{0xfd2411644b2991, 0x1440346f2b5711b, 0x1cc7ee0a33c1b83, 0xa062cb94740d05, 0x62c8a1b44bb8da, 0x1bfd5ccbd55fd69, 0x1f894cf946f9195, 0x147c43756d18b00, 0x2568784bd852}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0xb2b, 0xb22, 0x904, 0xdfa, 0xe23, 0x12b5, 0x1a37, 0x1100, 0xe0e, 0x783, 0xa3, 0x3f7, 0x2f3, 0x1034, 0x128e, 0x2cb, 0x503, 0x1c6d, 0x112e, 0x1436, 0x2c8, 0x1693, 0xafe, 0x12f5, 0x1ab9, 0xbbf, 0x1919, 0xa37, 0x133e, 0x1f12, 0x1601, 0xd18, 0x1bab, 0x1f10, 0x14a8, 0x17b0, 0x784, 0xab4, 0x653} +{{0xb2b, 0xb22, 0x904, 0xdfa, 0xe23, 0x12b5, 0x1a37, 0x1100, 0xe0e, 0x783, 0xa3, 0x3f7, 0x2f3, 0x1034, 0x128e, 0x2cb, 0x503, 0x1c6d, 0x112e, 0x1436, 0x2c8, 0x1693, 0xafe, 0x12f5, 0x1ab9, 0xbbf, 0x1919, 0xa37, 0x133e, 0x1f12, 0x1601, 0xd18, 0x1bab, 0x1f10, 0x14a8, 0x17b0, 0x784, 0xab4, 0x653}} #elif RADIX == 32 -{0x595f7f3, 0x14904591, 0xd5c46df, 0x8068de5, 0x7837074, 0xc7ee0a3, 0x740d05e, 0x103165ca, 0x25dc6d2, 0x18b2286d, 0x1557f5a4, 0x17fab997, 0x8df232b, 0x1f894cf9, 0x16d18b00, 0xa3e21ba, 0x1c25ec29, 0x521} +{{0x595f7f3, 0x14904591, 0xd5c46df, 0x8068de5, 0x7837074, 0xc7ee0a3, 0x740d05e, 0x103165ca, 0x25dc6d2, 0x18b2286d, 0x1557f5a4, 0x17fab997, 0x8df232b, 0x1f894cf9, 0x16d18b00, 0xa3e21ba, 0x1c25ec29, 0x521}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x46dfa4822c89657d, 0xdc1d100d1bcad5c, 0x740d05e63f70519e, 0x689771b4a062cb94, 0x32f557f5a4c59143, 0xa67ca37c8caeff57, 0x7c43756d18b00fc4, 0x1aaad0f097b0a54} +{{0x46dfa4822c89657d, 0xdc1d100d1bcad5c, 0x740d05e63f70519e, 0x689771b4a062cb94, 0x32f557f5a4c59143, 0xa67ca37c8caeff57, 0x7c43756d18b00fc4, 0x1aaad0f097b0a54}} #else -{0x1bf49045912cafb, 0x1d100d1bcad5c46, 0xf31fb828cf06e0, 0x12818b2e51d0341, 0x98b2286d12ee36, 0xeff5732f557f5a, 0x7e2533e51be465, 0x151f10dd5b462c0, 0x1a55a1e12f614} +{{0x1bf49045912cafb, 0x1d100d1bcad5c46, 0xf31fb828cf06e0, 0x12818b2e51d0341, 0x98b2286d12ee36, 0xeff5732f557f5a, 0x7e2533e51be465, 0x151f10dd5b462c0, 0x1a55a1e12f614}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x517, 0x18a8, 0x1a92, 0x94f, 0x1bb0, 0xf2c, 0x43, 0x5a8, 0x1463, 0x1b4b, 0x1a1c, 0x1c0e, 0x148a, 0x7f5, 0x6a3, 0x820, 0x1fc7, 0x141c, 0x1c2b, 0xd98, 0x48c, 0x587, 0x1b23, 0x1fb5, 0x4c0, 0x179c, 0x169e, 0x1927, 0x16b8, 0x1beb, 0x6bb, 0x1923, 0x2b7, 0x146d, 0x32b, 0xd85, 0x1a89, 0x1fb0, 0x2be} +{{0x517, 0x18a8, 0x1a92, 0x94f, 0x1bb0, 0xf2c, 0x43, 0x5a8, 0x1463, 0x1b4b, 0x1a1c, 0x1c0e, 0x148a, 0x7f5, 0x6a3, 0x820, 0x1fc7, 0x141c, 0x1c2b, 0xd98, 0x48c, 0x587, 0x1b23, 0x1fb5, 0x4c0, 0x179c, 0x169e, 0x1927, 0x16b8, 0x1beb, 0x6bb, 0x1923, 0x2b7, 0x146d, 0x32b, 0xd85, 0x1a89, 0x1fb0, 0x2be}} #elif RADIX == 32 -{0x28bb412, 0x1fa92c54, 0xb376094, 0xd4010de, 0x1b4ba319, 0xb81da1c, 0x119fd691, 0x1c74101a, 0x185741cf, 0x19231b31, 0x15d91961, 0x1384c0fd, 0x49ed3d7, 0x1df5dae3, 0xf92335d, 0xae8da2b, 0x144b6146, 0xa86} +{{0x28bb412, 0x1fa92c54, 0xb376094, 0xd4010de, 0x1b4ba319, 0xb81da1c, 0x119fd691, 0x1c74101a, 0x185741cf, 0x19231b31, 0x15d91961, 0x1384c0fd, 0x49ed3d7, 0x1df5dae3, 0xf92335d, 0xae8da2b, 0x144b6146, 0xa86}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x6094fd4962a0a2ed, 0x2e8c65a8021bcb37, 0x19fd6915c0ed0e6d, 0x8e15d073f8e82035, 0x1fb5d91961c918d9, 0xed71927b4f5e7098, 0xd1b456f92335defa, 0x7ec3512d85195} +{{0x6094fd4962a0a2ed, 0x2e8c65a8021bcb37, 0x19fd6915c0ed0e6d, 0x8e15d073f8e82035, 0x1fb5d91961c918d9, 0xed71927b4f5e7098, 0xd1b456f92335defa, 0x7ec3512d85195}} #else -{0x129fa92c54145da, 0x65a8021bcb3760, 0x8ae07687369746, 0xfe3a080d467f5a, 0x39231b31c2ba0e, 0x1e70981fb5d9196, 0xf7d76b8c93da7a, 0x5746d15be48cd7, 0xfd86a25b0a3} +{{0x129fa92c54145da, 0x65a8021bcb3760, 0x8ae07687369746, 0xfe3a080d467f5a, 0x39231b31c2ba0e, 0x1e70981fb5d9196, 0xf7d76b8c93da7a, 0x5746d15be48cd7, 0xfd86a25b0a3}} #endif #endif , #if 0 #elif RADIX == 16 -{0x147b, 0x14f1, 0xfdd, 0xb2a, 0xff7, 0x1426, 0xce1, 0x19a8, 0x1bf3, 0xbdd, 0x16dd, 0x1339, 0x10dd, 0x8f4, 0x1d29, 0x1b05, 0x1ee, 0x187b, 0x118a, 0x1e55, 0xcde, 0x1a18, 0x1b1f, 0x1648, 0x1c75, 0x1db8, 0xa2a, 0x1ab6, 0x1fa, 0xb0a, 0x1bdf, 0x1d18, 0x1a98, 0x12d9, 0x13df, 0x6e0, 0xa3c, 0x537, 0x345} +{{0x147b, 0x14f1, 0xfdd, 0xb2a, 0xff7, 0x1426, 0xce1, 0x19a8, 0x1bf3, 0xbdd, 0x16dd, 0x1339, 0x10dd, 0x8f4, 0x1d29, 0x1b05, 0x1ee, 0x187b, 0x118a, 0x1e55, 0xcde, 0x1a18, 0x1b1f, 0x1648, 0x1c75, 0x1db8, 0xa2a, 0x1ab6, 0x1fa, 0xb0a, 0x1bdf, 0x1d18, 0x1a98, 0x12d9, 0x13df, 0x6e0, 0xa3c, 0x537, 0x345}} #elif RADIX == 32 -{0x1a3dbe03, 0x14fdda78, 0x99feeb2, 0xd433868, 0xbdddf9e, 0x166736dd, 0x14a3d21b, 0x1eed82f4, 0x31587b0, 0x337bcab, 0x8d8fe86, 0x171c75b2, 0xad9455d, 0x158507eb, 0x11d18def, 0x17e5b3a9, 0x11e1b827, 0x13a} +{{0x1a3dbe03, 0x14fdda78, 0x99feeb2, 0xd433868, 0xbdddf9e, 0x166736dd, 0x14a3d21b, 0x1eed82f4, 0x31587b0, 0x337bcab, 0x8d8fe86, 0x171c75b2, 0xad9455d, 0x158507eb, 0x11d18def, 0x17e5b3a9, 0x11e1b827, 0x13a}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xeeb2a7eed3c68f6f, 0x777e79a8670d099f, 0x4a3d21bb339b6eaf, 0x58c561ec3ddb05e9, 0xb648d8fe8619bde5, 0x83f5ab651576e38e, 0xcb67531d18defac2, 0xd94dd4786e09ef} +{{0xeeb2a7eed3c68f6f, 0x777e79a8670d099f, 0x4a3d21bb339b6eaf, 0x58c561ec3ddb05e9, 0xb648d8fe8619bde5, 0x83f5ab651576e38e, 0xcb67531d18defac2, 0xd94dd4786e09ef}} #else -{0x1654fdda78d1edf, 0x79a8670d099fee, 0xdd99cdb757bbbf, 0x10f76c17a528f48, 0xc337bcab18ac3d, 0x16e38eb648d8fe8, 0x1d6141fad5b28ab, 0x1bf2d9d4c74637b, 0x29ba8f0dc13} +{{0x1654fdda78d1edf, 0x79a8670d099fee, 0xdd99cdb757bbbf, 0x10f76c17a528f48, 0xc337bcab18ac3d, 0x16e38eb648d8fe8, 0x1d6141fad5b28ab, 0x1bf2d9d4c74637b, 0x29ba8f0dc13}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xc4b, 0x1f6e, 0xcba, 0x1a23, 0x8a1, 0x7c3, 0x1a45, 0x1ca3, 0x6a9, 0x643, 0x3b, 0xc83, 0x208, 0x21a, 0xd43, 0x1805, 0x1078, 0x9af, 0x80a, 0x1555, 0x50d, 0x1eb8, 0xa49, 0x161c, 0x1eee, 0xe1b, 0xf4b, 0x9de, 0x117e, 0x14f8, 0xea7, 0xd18, 0x112a, 0x1a38, 0x1cc7, 0x1c36, 0xe5, 0x10fa, 0x411} +{{0xc4b, 0x1f6e, 0xcba, 0x1a23, 0x8a1, 0x7c3, 0x1a45, 0x1ca3, 0x6a9, 0x643, 0x3b, 0xc83, 0x208, 0x21a, 0xd43, 0x1805, 0x1078, 0x9af, 0x80a, 0x1555, 0x50d, 0x1eb8, 0xa49, 0x161c, 0x1eee, 0xe1b, 0xf4b, 0x9de, 0x117e, 0x14f8, 0xea7, 0xd18, 0x112a, 0x1a38, 0x1cc7, 0x1c36, 0xe5, 0x10fa, 0x411}} #elif RADIX == 32 -{0x625cd26, 0x6cbafb7, 0x10d143a2, 0x51e914f, 0x643354f, 0x190603b, 0x1886841, 0x78c02b5, 0x10149af8, 0x1436aaa, 0x1c524fae, 0x37eeeb0, 0x779e96e, 0x1a7c45f9, 0x14d18753, 0x11f47112, 0x72f0db9, 0x6d0} +{{0x625cd26, 0x6cbafb7, 0x10d143a2, 0x51e914f, 0x643354f, 0x190603b, 0x1886841, 0x78c02b5, 0x10149af8, 0x1436aaa, 0x1c524fae, 0x37eeeb0, 0x779e96e, 0x1a7c45f9, 0x14d18753, 0x11f47112, 0x72f0db9, 0x6d0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x43a2365d7db98973, 0xcd53ca3d229f0d1, 0x18868410c8301d99, 0x540526be0f18056a, 0xd61c524fae0a1b55, 0x22fc9de7a5b86fdd, 0xe8e2254d18753d3e, 0x7c3e81cbc36e63} +{{0x43a2365d7db98973, 0xcd53ca3d229f0d1, 0x18868410c8301d99, 0x540526be0f18056a, 0xd61c524fae0a1b55, 0x22fc9de7a5b86fdd, 0xe8e2254d18753d3e, 0x7c3e81cbc36e63}} #else -{0x1446cbafb7312e6, 0x13ca3d229f0d143, 0x864180ecc866a, 0x183c6015a8621a1, 0x1c1436aaa80a4d7, 0x186fddd61c524fa, 0x1e9f117e4ef3d2d, 0x18fa388953461d4, 0xf87d039786dc} +{{0x1446cbafb7312e6, 0x13ca3d229f0d143, 0x864180ecc866a, 0x183c6015a8621a1, 0x1c1436aaa80a4d7, 0x186fddd61c524fa, 0x1e9f117e4ef3d2d, 0x18fa388953461d4, 0xf87d039786dc}} #endif #endif , #if 0 #elif RADIX == 16 -{0x9d5, 0x0, 0x181d, 0xced, 0x1fe0, 0x267, 0xc65, 0x1a4d, 0x9e3, 0x1f0c, 0x5d, 0xbae, 0x276, 0x1551, 0x1684, 0x1eab, 0x17f0, 0x1b20, 0xae6, 0xbc3, 0x95, 0x17c3, 0xfd8, 0x1359, 0x3f5, 0x12b6, 0x1410, 0x113, 0x1a19, 0x1c1d, 0xd91, 0x1446, 0x1233, 0x170, 0x1c50, 0x13ac, 0x6eb, 0x926, 0x3bf} +{{0x9d5, 0x0, 0x181d, 0xced, 0x1fe0, 0x267, 0xc65, 0x1a4d, 0x9e3, 0x1f0c, 0x5d, 0xbae, 0x276, 0x1551, 0x1684, 0x1eab, 0x17f0, 0x1b20, 0xae6, 0xbc3, 0x95, 0x17c3, 0xfd8, 0x1359, 0x3f5, 0x12b6, 0x1410, 0x113, 0x1a19, 0x1c1d, 0xd91, 0x1446, 0x1233, 0x170, 0x1c50, 0x13ac, 0x6eb, 0x926, 0x3bf}} #elif RADIX == 32 -{0x4eac70e, 0x1b81d000, 0x19ffc0ce, 0x126b1944, 0x1f0c4f1e, 0x1975c05d, 0x255444e, 0x1f0f55da, 0x15cdb20b, 0x18255786, 0x197ec5f0, 0x16c3f59a, 0x44e8212, 0x1e0ee864, 0x74466c8, 0x1402e123, 0x175ceb38, 0xc31} +{{0x4eac70e, 0x1b81d000, 0x19ffc0ce, 0x126b1944, 0x1f0c4f1e, 0x1975c05d, 0x255444e, 0x1f0f55da, 0x15cdb20b, 0x18255786, 0x197ec5f0, 0x16c3f59a, 0x44e8212, 0x1e0ee864, 0x74466c8, 0x1402e123, 0x175ceb38, 0xc31}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc0cedc0e80013ab1, 0x313c7a4d632899ff, 0x255444ecbae02efc, 0x35736c82fe1eabb4, 0xb3597ec5f0c12abc, 0x7432113a084ad87e, 0x5c24674466c8f07, 0x14a498dd73ace28} +{{0xc0cedc0e80013ab1, 0x313c7a4d632899ff, 0x255444ecbae02efc, 0x35736c82fe1eabb4, 0xb3597ec5f0c12abc, 0x7432113a084ad87e, 0x5c24674466c8f07, 0x14a498dd73ace28}} #else -{0x19db81d00027563, 0x7a4d632899ffc0, 0x765d70177e189e, 0xbf87aaed095511, 0x18255786ae6d90, 0xad87eb3597ec5f, 0x783ba19089d042, 0xa0170919d119b2, 0xe4931bae759c} +{{0x19db81d00027563, 0x7a4d632899ffc0, 0x765d70177e189e, 0xbf87aaed095511, 0x18255786ae6d90, 0xad87eb3597ec5f, 0x783ba19089d042, 0xa0170919d119b2, 0xe4931bae759c}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1997, 0xa5a, 0x4c4, 0x155d, 0x70b, 0x12f, 0xe9d, 0xfe0, 0x147c, 0x9b6, 0x18ea, 0xf41, 0x1636, 0x1707, 0x1a7e, 0x1326, 0x76d, 0xbef, 0x9fe, 0x1bb4, 0xe22, 0x200, 0x1a11, 0x7e6, 0x1709, 0x1be9, 0x1507, 0x1c63, 0xb6f, 0xceb, 0x1b88, 0x1ef6, 0x16b7, 0x20f, 0x1497, 0x1e1c, 0x26e, 0x139d, 0x330} +{{0x1997, 0xa5a, 0x4c4, 0x155d, 0x70b, 0x12f, 0xe9d, 0xfe0, 0x147c, 0x9b6, 0x18ea, 0xf41, 0x1636, 0x1707, 0x1a7e, 0x1326, 0x76d, 0xbef, 0x9fe, 0x1bb4, 0xe22, 0x200, 0x1a11, 0x7e6, 0x1709, 0x1be9, 0x1507, 0x1c63, 0xb6f, 0xceb, 0x1b88, 0x1ef6, 0x16b7, 0x20f, 0x1497, 0x1e1c, 0x26e, 0x139d, 0x330}} #elif RADIX == 32 -{0xccbbc7d, 0x1a4c452d, 0xbce1755, 0x1f03a742, 0x9b6a3e3, 0x19e838ea, 0x1f5c1ec6, 0x16d99369, 0x13fcbef3, 0x388b768, 0x6d08880, 0x1d37093f, 0x118ea0fb, 0x675adbf, 0xfef6dc4, 0x5c41f6b, 0x13778729, 0x568} +{{0xccbbc7d, 0x1a4c452d, 0xbce1755, 0x1f03a742, 0x9b6a3e3, 0x19e838ea, 0x1f5c1ec6, 0x16d99369, 0x13fcbef3, 0x388b768, 0x6d08880, 0x1d37093f, 0x118ea0fb, 0x675adbf, 0xfef6dc4, 0x5c41f6b, 0x13778729, 0x568}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x1755d262296b32ef, 0xda8f8fe074e84bce, 0xf5c1ec6cf41c7526, 0x44ff2fbcedb326d3, 0x27e6d088801c45bb, 0xd6dfc63a83efa6e1, 0x883ed6fef6dc433a, 0x34e744dde1ca4b} +{{0x1755d262296b32ef, 0xda8f8fe074e84bce, 0xf5c1ec6cf41c7526, 0x44ff2fbcedb326d3, 0x27e6d088801c45bb, 0xd6dfc63a83efa6e1, 0x883ed6fef6dc433a, 0x34e744dde1ca4b}} #else -{0xaba4c452d665de, 0x18fe074e84bce17, 0x367a0e3a936d47, 0x13b6cc9b4fd707b, 0x388b7689fe5f7, 0xfa6e127e6d0888, 0x19d6b6fe31d41f, 0x12e20fb5bfbdb71, 0x69ce89bbc394} +{{0xaba4c452d665de, 0x18fe074e84bce17, 0x367a0e3a936d47, 0x13b6cc9b4fd707b, 0x388b7689fe5f7, 0xfa6e127e6d0888, 0x19d6b6fe31d41f, 0x12e20fb5bfbdb71, 0x69ce89bbc394}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1bf, 0x197b, 0x1b4, 0x1a8a, 0xd22, 0x1cb5, 0x298, 0x76b, 0x16b6, 0x5aa, 0x54b, 0x1b63, 0x1d59, 0x2dc, 0xfe1, 0x1b24, 0x1725, 0x9a8, 0x2dd, 0x150f, 0x12de, 0x9d9, 0x2fd, 0x95f, 0xcc1, 0x1ffd, 0x101b, 0x707, 0x1d9d, 0x464, 0x39e, 0x97b, 0x8cf, 0x4a5, 0xed1, 0x9c3, 0x1b66, 0x1521, 0x112} +{{0x1bf, 0x197b, 0x1b4, 0x1a8a, 0xd22, 0x1cb5, 0x298, 0x76b, 0x16b6, 0x5aa, 0x54b, 0x1b63, 0x1d59, 0x2dc, 0xfe1, 0x1b24, 0x1725, 0x9a8, 0x2dd, 0x150f, 0x12de, 0x9d9, 0x2fd, 0x95f, 0xcc1, 0x1ffd, 0x101b, 0x707, 0x1d9d, 0x464, 0x39e, 0x97b, 0x8cf, 0x4a5, 0xed1, 0x9c3, 0x1b66, 0x1521, 0x112}} #elif RADIX == 32 -{0x10df9458, 0x141b4cbd, 0xd5a45a8, 0x1b58a639, 0x5aab5b1, 0x76c654b, 0x108b73ab, 0x125d923f, 0x5ba9a8b, 0xcb7aa1e, 0x1f17ea76, 0x1facc14a, 0x1c1e037f, 0x2327674, 0x1e97b1cf, 0x14494a8c, 0x1b3270dd, 0x50e} +{{0x10df9458, 0x141b4cbd, 0xd5a45a8, 0x1b58a639, 0x5aab5b1, 0x76c654b, 0x108b73ab, 0x125d923f, 0x5ba9a8b, 0xcb7aa1e, 0x1f17ea76, 0x1facc14a, 0x1c1e037f, 0x2327674, 0x1e97b1cf, 0x14494a8c, 0x1b3270dd, 0x50e}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x45a8a0da65ec37e5, 0xaad6c76b14c72d5a, 0x8b73ab3b632a596, 0xf16ea6a2e4bb247f, 0x295f17ea7665bd50, 0x3b3a70780dfff598, 0x929519e97b1cf119, 0x254876cc9c3768} +{{0x45a8a0da65ec37e5, 0xaad6c76b14c72d5a, 0x8b73ab3b632a596, 0xf16ea6a2e4bb247f, 0x295f17ea7665bd50, 0x3b3a70780dfff598, 0x929519e97b1cf119, 0x254876cc9c3768}} #else -{0x15141b4cbd86fca, 0xc76b14c72d5a45, 0x159db1952cb556b, 0xb92ec91fc22dce, 0xccb7aa1e2dd4d4, 0x1ff598295f17ea7, 0x188c9d9d383c06f, 0x1a24a5467a5ec73, 0x4a90ed99386e} +{{0x15141b4cbd86fca, 0xc76b14c72d5a45, 0x159db1952cb556b, 0xb92ec91fc22dce, 0xccb7aa1e2dd4d4, 0x1ff598295f17ea7, 0x188c9d9d383c06f, 0x1a24a5467a5ec73, 0x4a90ed99386e}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2384,261 +2384,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x116d, 0x165f, 0x7d3, 0x488, 0xe08, 0xb12, 0x2e0, 0x18b4, 0xdbb, 0x181e, 0xe50, 0x19b8, 0xc17, 0x1c82, 0x1cf6, 0x7b9, 0xebb, 0x1c0a, 0x183a, 0x1f42, 0x1a3e, 0x176b, 0x19fa, 0x7e4, 0x1646, 0x1dc5, 0x1e9d, 0x4b3, 0x18df, 0xf2d, 0xe2e, 0x2e3, 0x903, 0x19ef, 0x1f94, 0x237, 0x18ef, 0x26c, 0x12f} +{{0x116d, 0x165f, 0x7d3, 0x488, 0xe08, 0xb12, 0x2e0, 0x18b4, 0xdbb, 0x181e, 0xe50, 0x19b8, 0xc17, 0x1c82, 0x1cf6, 0x7b9, 0xebb, 0x1c0a, 0x183a, 0x1f42, 0x1a3e, 0x176b, 0x19fa, 0x7e4, 0x1646, 0x1dc5, 0x1e9d, 0x4b3, 0x18df, 0xf2d, 0xe2e, 0x2e3, 0x903, 0x19ef, 0x1f94, 0x237, 0x18ef, 0x26c, 0x12f}} #elif RADIX == 32 -{0x18b69673, 0x107d3b2f, 0x49c1048, 0x5a0b816, 0x181e6dde, 0x1f370e50, 0x1b720982, 0xbb3dcf3, 0x1075c0a7, 0x1e8fbe85, 0x4cfd5da, 0x18b6463f, 0x12cfd3bd, 0x796e37c, 0x62e3717, 0x533de90, 0x7788dff, 0x2e6} +{{0x18b69673, 0x107d3b2f, 0x49c1048, 0x5a0b816, 0x181e6dde, 0x1f370e50, 0x1b720982, 0xbb3dcf3, 0x1075c0a7, 0x1e8fbe85, 0x4cfd5da, 0x18b6463f, 0x12cfd3bd, 0x796e37c, 0x62e3717, 0x533de90, 0x7788dff, 0x2e6}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x104883e9d97e2da5, 0x79b778b41702c49c, 0xb720982f9b872860, 0x2c1d7029d767b9e7, 0xc7e4cfd5daf47df4, 0x71be4b3f4ef716c8, 0x67bd2062e37173cb, 0x1089b31de237fca} +{{0x104883e9d97e2da5, 0x79b778b41702c49c, 0xb720982f9b872860, 0x2c1d7029d767b9e7, 0xc7e4cfd5daf47df4, 0x71be4b3f4ef716c8, 0x67bd2062e37173cb, 0x1089b31de237fca}} #else -{0x9107d3b2fc5b4b, 0x178b41702c49c10, 0x17cdc394303cdb, 0x75d9ee79edc826, 0x15e8fbe8583ae05, 0x1716c8c7e4cfd5d, 0x19e5b8df259fa77, 0x1299ef4818b8dc5, 0x613663bc46ff} +{{0x9107d3b2fc5b4b, 0x178b41702c49c10, 0x17cdc394303cdb, 0x75d9ee79edc826, 0x15e8fbe8583ae05, 0x1716c8c7e4cfd5d, 0x19e5b8df259fa77, 0x1299ef4818b8dc5, 0x613663bc46ff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1c5d, 0x1d97, 0x1f4, 0x122, 0x1382, 0x2c4, 0xb8, 0x1e2d, 0x136e, 0x607, 0x394, 0x1e6e, 0x1305, 0x1720, 0xf3d, 0x19ee, 0x13ae, 0x1702, 0x160e, 0x17d0, 0x1e8f, 0x15da, 0x67e, 0x11f9, 0xd91, 0xf71, 0x1fa7, 0x192c, 0xe37, 0x13cb, 0x1b8b, 0x18b8, 0x1a40, 0x67b, 0x1fe5, 0x188d, 0x63b, 0x189b, 0x47b} +{{0x1c5d, 0x1d97, 0x1f4, 0x122, 0x1382, 0x2c4, 0xb8, 0x1e2d, 0x136e, 0x607, 0x394, 0x1e6e, 0x1305, 0x1720, 0xf3d, 0x19ee, 0x13ae, 0x1702, 0x160e, 0x17d0, 0x1e8f, 0x15da, 0x67e, 0x11f9, 0xd91, 0xf71, 0x1fa7, 0x192c, 0xe37, 0x13cb, 0x1b8b, 0x18b8, 0x1a40, 0x67b, 0x1fe5, 0x188d, 0x63b, 0x189b, 0x47b}} #elif RADIX == 32 -{0x1e2ed505, 0x41f4ecb, 0x11270412, 0x11682e05, 0x6079b77, 0x17cdc394, 0x1edc8260, 0x1aecf73c, 0xc1d7029, 0x17a3efa1, 0x1933f576, 0xe2d918f, 0x4b3f4ef, 0x19e5b8df, 0x18b8dc5, 0x194cf7a4, 0x11de237f, 0x159} +{{0x1e2ed505, 0x41f4ecb, 0x11270412, 0x11682e05, 0x6079b77, 0x17cdc394, 0x1edc8260, 0x1aecf73c, 0xc1d7029, 0x17a3efa1, 0x1933f576, 0xe2d918f, 0x4b3f4ef, 0x19e5b8df, 0x18b8dc5, 0x194cf7a4, 0x11de237f, 0x159}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x41220fa765f8bb5, 0x1e6dde2d05c0b127, 0xedc8260be6e1ca18, 0xb075c0a75d9ee79, 0x31f933f576bd1f7d, 0xdc6f92cfd3bdc5b2, 0x99ef4818b8dc5cf2, 0x6e26cc7788dff2} +{{0x41220fa765f8bb5, 0x1e6dde2d05c0b127, 0xedc8260be6e1ca18, 0xb075c0a75d9ee79, 0x31f933f576bd1f7d, 0xdc6f92cfd3bdc5b2, 0x99ef4818b8dc5cf2, 0x6e26cc7788dff2}} #else -{0x2441f4ecbf176a, 0x1de2d05c0b12704, 0x105f370e50c0f36, 0x9d767b9e7b7209, 0xd7a3efa160eb81, 0x1dc5b231f933f57, 0xe796e37c967e9d, 0x1ca67bd2062e371, 0xdc4d98ef11bf} +{{0x2441f4ecbf176a, 0x1de2d05c0b12704, 0x105f370e50c0f36, 0x9d767b9e7b7209, 0xd7a3efa160eb81, 0x1dc5b231f933f57, 0xe796e37c967e9d, 0x1ca67bd2062e371, 0xdc4d98ef11bf}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0x1e97, 0x1f23, 0x161, 0x7b2, 0x1221, 0x1d36, 0x14f1, 0xaa0, 0xce3, 0x1f6c, 0xeaf, 0x549, 0xa24, 0xe15, 0x1862, 0x1dba, 0xc75, 0xf1d, 0x15f9, 0x50d, 0xa99, 0x97b, 0xc21, 0x1549, 0x1c88, 0xfbe, 0xe33, 0xb27, 0x1dae, 0xb00, 0x82f, 0x44a, 0x371, 0x5c0, 0x1174, 0x1b28, 0xa0b, 0x9bd, 0x206} +{{0x1e97, 0x1f23, 0x161, 0x7b2, 0x1221, 0x1d36, 0x14f1, 0xaa0, 0xce3, 0x1f6c, 0xeaf, 0x549, 0xa24, 0xe15, 0x1862, 0x1dba, 0xc75, 0xf1d, 0x15f9, 0x50d, 0xa99, 0x97b, 0xc21, 0x1549, 0x1c88, 0xfbe, 0xe33, 0xb27, 0x1dae, 0xb00, 0x82f, 0x44a, 0x371, 0x5c0, 0x1174, 0x1b28, 0xa0b, 0x9bd, 0x206}} #elif RADIX == 32 -{0x1f4ba664, 0x4161f91, 0xda4427b, 0x15053c7a, 0x1f6c671a, 0x10a92eaf, 0x11385544, 0x75edd61, 0xbf2f1d6, 0x1aa64a1b, 0x9610a5e, 0x17dc88aa, 0xc9dc66f, 0x158076b9, 0x244a417, 0x1d0b8037, 0x105eca22, 0x7ea} +{{0x1f4ba664, 0x4161f91, 0xda4427b, 0x15053c7a, 0x1f6c671a, 0x10a92eaf, 0x11385544, 0x75edd61, 0xbf2f1d6, 0x1aa64a1b, 0x9610a5e, 0x17dc88aa, 0xc9dc66f, 0x158076b9, 0x244a417, 0x1d0b8037, 0x105eca22, 0x7ea}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x427b20b0fc8fd2e9, 0xb19c6aa0a78f4da4, 0x13855448549757fd, 0xdafcbc758ebdbac3, 0x1549610a5ed53250, 0x3b5cb27719befb91, 0x17006e244a417ac0, 0x1026f5417b288ba} +{{0x427b20b0fc8fd2e9, 0xb19c6aa0a78f4da4, 0x13855448549757fd, 0xdafcbc758ebdbac3, 0x1549610a5ed53250, 0x3b5cb27719befb91, 0x17006e244a417ac0, 0x1026f5417b288ba}} #else -{0xf64161f91fa5d3, 0x6aa0a78f4da442, 0x242a4babfed8ce, 0x163af6eb0c4e155, 0x1daa64a1b5f978e, 0x1efb911549610a5, 0x1d601dae593b8cd, 0xe85c01b8912905, 0x54dea82f6511} +{{0xf64161f91fa5d3, 0x6aa0a78f4da442, 0x242a4babfed8ce, 0x163af6eb0c4e155, 0x1daa64a1b5f978e, 0x1efb911549610a5, 0x1d601dae593b8cd, 0xe85c01b8912905, 0x54dea82f6511}} #endif #endif , #if 0 #elif RADIX == 16 -{0xcc, 0x1cb1, 0x706, 0x1f0b, 0xa79, 0xd89, 0xd1f, 0x1067, 0x1c50, 0x1e70, 0x41c, 0x1ce8, 0xd29, 0x7c7, 0x733, 0x460, 0x1e22, 0xe0b, 0x7f6, 0x1387, 0xe84, 0x273, 0x13e1, 0x1f1d, 0x1643, 0x1f1a, 0x3e, 0x7b7, 0xecf, 0x1578, 0x357, 0xaf4, 0x1f6c, 0x4c8, 0x11b9, 0x866, 0x80a, 0x13e2, 0x499} +{{0xcc, 0x1cb1, 0x706, 0x1f0b, 0xa79, 0xd89, 0xd1f, 0x1067, 0x1c50, 0x1e70, 0x41c, 0x1ce8, 0xd29, 0x7c7, 0x733, 0x460, 0x1e22, 0xe0b, 0x7f6, 0x1387, 0xe84, 0x273, 0x13e1, 0x1f1d, 0x1643, 0x1f1a, 0x3e, 0x7b7, 0xecf, 0x1578, 0x357, 0xaf4, 0x1f6c, 0x4c8, 0x11b9, 0x866, 0x80a, 0x13e2, 0x499}} #elif RADIX == 32 -{0x1066573b, 0x16706e58, 0x254f3f0, 0x33b47db, 0x1e70e284, 0x79d041c, 0x199f1da5, 0x222301c, 0xfece0bf, 0x1ba1270e, 0x1d9f089c, 0x35643f8, 0x1edc07df, 0x1abc3b3c, 0x18af41ab, 0xe4991f6, 0x5219a3, 0x292} +{{0x1066573b, 0x16706e58, 0x254f3f0, 0x33b47db, 0x1e70e284, 0x79d041c, 0x199f1da5, 0x222301c, 0xfece0bf, 0x1ba1270e, 0x1d9f089c, 0x35643f8, 0x1edc07df, 0x1abc3b3c, 0x18af41ab, 0xe4991f6, 0x5219a3, 0x292}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf3f0b38372c41995, 0xc38a106768fb6254, 0x99f1da53ce820e79, 0x73fb382fc4446039, 0x7f1d9f089cdd0938, 0x1d9e7b701f7c6ac8, 0x9323ed8af41abd5e, 0x15cf890148668dc} +{{0xf3f0b38372c41995, 0xc38a106768fb6254, 0x99f1da53ce820e79, 0x73fb382fc4446039, 0x7f1d9f089cdd0938, 0x1d9e7b701f7c6ac8, 0x9323ed8af41abd5e, 0x15cf890148668dc}} #else -{0x1e16706e588332b, 0x106768fb6254f3, 0x129e741073ce1c5, 0x1f111180e667c76, 0x19ba1270e7f6705, 0x1c6ac87f1d9f089, 0x1eaf0ecf3db80fb, 0x1724c8fb62bd06a, 0x109f120290cd1} +{{0x1e16706e588332b, 0x106768fb6254f3, 0x129e741073ce1c5, 0x1f111180e667c76, 0x19ba1270e7f6705, 0x1c6ac87f1d9f089, 0x1eaf0ecf3db80fb, 0x1724c8fb62bd06a, 0x109f120290cd1}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xd3d, 0x1bb8, 0x7b6, 0x2b7, 0x1f97, 0xc1a, 0x13ef, 0x6ac, 0xf50, 0x12de, 0xd45, 0x16d4, 0x69c, 0x16a8, 0xde4, 0xbd6, 0x14ea, 0x1d58, 0x193c, 0x160b, 0x1fc5, 0x20b, 0x1376, 0xbbb, 0x732, 0x8f8, 0x10f6, 0x1fef, 0xe7b, 0xb28, 0x10ba, 0x953, 0x1cfe, 0x1437, 0x1422, 0x178b, 0x1524, 0x590, 0x334} +{{0xd3d, 0x1bb8, 0x7b6, 0x2b7, 0x1f97, 0xc1a, 0x13ef, 0x6ac, 0xf50, 0x12de, 0xd45, 0x16d4, 0x69c, 0x16a8, 0xde4, 0xbd6, 0x14ea, 0x1d58, 0x193c, 0x160b, 0x1fc5, 0x20b, 0x1376, 0xbbb, 0x732, 0x8f8, 0x10f6, 0x1fef, 0xe7b, 0xb28, 0x10ba, 0x953, 0x1cfe, 0x1437, 0x1422, 0x178b, 0x1524, 0x590, 0x334}} #elif RADIX == 32 -{0x69ebcc0, 0xe7b6ddc, 0x6bf2e2b, 0x1564fbd8, 0x12de7a81, 0x12da8d45, 0x125aa0d3, 0xea5eb37, 0x1279d58a, 0x1ff16c17, 0x1b9bb082, 0x1f07325d, 0x1fbe1ec8, 0x59439ef, 0x1c95385d, 0x8a86fcf, 0x925e2e8, 0xc85} +{{0x69ebcc0, 0xe7b6ddc, 0x6bf2e2b, 0x1564fbd8, 0x12de7a81, 0x12da8d45, 0x125aa0d3, 0xea5eb37, 0x1279d58a, 0x1ff16c17, 0x1b9bb082, 0x1f07325d, 0x1fbe1ec8, 0x59439ef, 0x1c95385d, 0x8a86fcf, 0x925e2e8, 0xc85}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2e2b73db6ee1a7af, 0x79ea06ac9f7b06bf, 0x25aa0d396d46a2cb, 0xbc9e75629d4bd66f, 0x4bbb9bb082ff8b60, 0x1cf7fef87b23e0e6, 0x50df9fc95385d2ca, 0x51642a4978ba11} +{{0x2e2b73db6ee1a7af, 0x79ea06ac9f7b06bf, 0x25aa0d396d46a2cb, 0xbc9e75629d4bd66f, 0x4bbb9bb082ff8b60, 0x1cf7fef87b23e0e6, 0x50df9fc95385d2ca, 0x51642a4978ba11}} #else -{0x56e7b6ddc34f5e, 0x6ac9f7b06bf2e, 0x9cb6a35165bcf5, 0xa752f59bc96a83, 0x5ff16c1793ceac, 0x3e0e64bbb9bb08, 0x9650e7bff7c3d9, 0x45437e7f254e17, 0xa2c85492f174} +{{0x56e7b6ddc34f5e, 0x6ac9f7b06bf2e, 0x9cb6a35165bcf5, 0xa752f59bc96a83, 0x5ff16c1793ceac, 0x3e0e64bbb9bb08, 0x9650e7bff7c3d9, 0x45437e7f254e17, 0xa2c85492f174}} #endif #endif , #if 0 #elif RADIX == 16 -{0xb59, 0x1fb4, 0x1dac, 0x52d, 0x794, 0x1254, 0x1f9f, 0xdba, 0x151d, 0x1f01, 0x7f7, 0xb2b, 0x7e4, 0x1b36, 0x912, 0x1366, 0x1a04, 0x8ed, 0x1e58, 0x18f0, 0xffd, 0x455, 0xba9, 0x16d, 0x155f, 0x1198, 0x1264, 0x158b, 0x766, 0x66e, 0x1403, 0x15fd, 0xe0e, 0x1368, 0x9e6, 0x4af, 0x1fba, 0x1047, 0x464} +{{0xb59, 0x1fb4, 0x1dac, 0x52d, 0x794, 0x1254, 0x1f9f, 0xdba, 0x151d, 0x1f01, 0x7f7, 0xb2b, 0x7e4, 0x1b36, 0x912, 0x1366, 0x1a04, 0x8ed, 0x1e58, 0x18f0, 0xffd, 0x455, 0xba9, 0x16d, 0x155f, 0x1198, 0x1264, 0x158b, 0x766, 0x66e, 0x1403, 0x15fd, 0xe0e, 0x1368, 0x9e6, 0x4af, 0x1fba, 0x1047, 0x464}} #elif RADIX == 32 -{0x5acd34c, 0x1bdacfda, 0x150f2852, 0xdd7e7e4, 0x1f01a8eb, 0x116567f7, 0x96cd8fc, 0x49b324, 0x1cb08edd, 0xbff71e1, 0xd5d4915, 0x13155f0b, 0x162e4c91, 0x13371d9a, 0x1d5fda01, 0x19a6d0e0, 0x1dd12bd3, 0x3f} +{{0x5acd34c, 0x1bdacfda, 0x150f2852, 0xdd7e7e4, 0x1f01a8eb, 0x116567f7, 0x96cd8fc, 0x49b324, 0x1cb08edd, 0xbff71e1, 0xd5d4915, 0x13155f0b, 0x162e4c91, 0x13371d9a, 0x1d5fda01, 0x19a6d0e0, 0x1dd12bd3, 0x3f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x2852ded67ed16b34, 0x6a3adbafcfc950f, 0x96cd8fc8b2b3fbfc, 0xf2c23b740936648, 0xe16d5d49155ffb8f, 0x8ecd58b9324662ab, 0x4da1c1d5fda0199b, 0x16411ff744af4f3} +{{0x2852ded67ed16b34, 0x6a3adbafcfc950f, 0x96cd8fc8b2b3fbfc, 0xf2c23b740936648, 0xe16d5d49155ffb8f, 0x8ecd58b9324662ab, 0x4da1c1d5fda0199b, 0x16411ff744af4f3}} #else -{0xa5bdacfda2d669, 0x1adbafcfc950f28, 0x1e45959fdfe0351, 0x1d024d99225b363, 0xabff71e1e58476, 0x662abe16d5d491, 0xccdc766ac5c992, 0x1cd36870757f680, 0x11823fee895e9} +{{0xa5bdacfda2d669, 0x1adbafcfc950f28, 0x1e45959fdfe0351, 0x1d024d99225b363, 0xabff71e1e58476, 0x662abe16d5d491, 0xccdc766ac5c992, 0x1cd36870757f680, 0x11823fee895e9}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x1156, 0x273, 0x1153, 0x89b, 0xc67, 0x9dc, 0x14b5, 0x1d27, 0x1c5e, 0x18e6, 0x1dfa, 0x1beb, 0x12e7, 0xe02, 0x1614, 0x12b0, 0x1646, 0x1bdb, 0x1e1f, 0x1eb6, 0x361, 0x1fb, 0x2ee, 0xee2, 0x178c, 0xedd, 0x1ba6, 0xf1c, 0x1e7f, 0x1dac, 0x137d, 0x18db, 0x8e8, 0xa0, 0x1faf, 0x5cb, 0x1078, 0x1562, 0x36e} +{{0x1156, 0x273, 0x1153, 0x89b, 0xc67, 0x9dc, 0x14b5, 0x1d27, 0x1c5e, 0x18e6, 0x1dfa, 0x1beb, 0x12e7, 0xe02, 0x1614, 0x12b0, 0x1646, 0x1bdb, 0x1e1f, 0x1eb6, 0x361, 0x1fb, 0x2ee, 0xee2, 0x178c, 0xedd, 0x1ba6, 0xf1c, 0x1e7f, 0x1dac, 0x137d, 0x18db, 0x8e8, 0xa0, 0x1faf, 0x5cb, 0x1078, 0x1562, 0x36e}} #elif RADIX == 32 -{0x18ab4116, 0x17153139, 0x1718ce89, 0x93d2d53, 0x18e6e2f7, 0x1f7d7dfa, 0xa380a5c, 0x4695858, 0x1c3fbdbb, 0x18d87d6d, 0x217707e, 0x1bb78c77, 0x1c7374ce, 0x1ed679fd, 0x118db9be, 0xbc1408e, 0x3c172ff, 0x214} +{{0x18ab4116, 0x17153139, 0x1718ce89, 0x93d2d53, 0x18e6e2f7, 0x1f7d7dfa, 0xa380a5c, 0x4695858, 0x1c3fbdbb, 0x18d87d6d, 0x217707e, 0x1bb78c77, 0x1c7374ce, 0x1ed679fd, 0x118db9be, 0xbc1408e, 0x3c172ff, 0x214}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xce89b8a989ce2ad0, 0x9b8bdd27a5aa7718, 0xa380a5cfbebefd63, 0x6f0fef6ec8d2b0b0, 0x8ee217707ec6c3eb, 0x3cfef1cdd33b76f1, 0x82811d18db9bef6b, 0x7558a0f05cbfd7} +{{0xce89b8a989ce2ad0, 0x9b8bdd27a5aa7718, 0xa380a5cfbebefd63, 0x6f0fef6ec8d2b0b0, 0x8ee217707ec6c3eb, 0x3cfef1cdd33b76f1, 0x82811d18db9bef6b, 0x7558a0f05cbfd7}} #else -{0x1137153139c55a0, 0x1dd27a5aa7718ce, 0xe7df5f7eb1cdc5, 0x1b234ac2c28e029, 0x1d8d87d6de1fded, 0x1b76f18ee217707, 0x17b59e7f78e6e99, 0x15e0a0474636e6f, 0xeab141e0b97f} +{{0x1137153139c55a0, 0x1dd27a5aa7718ce, 0xe7df5f7eb1cdc5, 0x1b234ac2c28e029, 0x1d8d87d6de1fded, 0x1b76f18ee217707, 0x17b59e7f78e6e99, 0x15e0a0474636e6f, 0xeab141e0b97f}} #endif #endif , #if 0 #elif RADIX == 16 -{0xb32, 0x149, 0x1615, 0x77e, 0xf55, 0x189, 0xe2a, 0x13bc, 0xf83, 0x124d, 0xcaa, 0x22, 0xcea, 0x8f9, 0xc5e, 0x8bc, 0x4ff, 0x14da, 0x394, 0x4a2, 0x1767, 0x1d20, 0x1531, 0x1dff, 0x929, 0x15cf, 0x1f69, 0x1630, 0x669, 0x11ec, 0x162c, 0xcf3, 0xde5, 0x185f, 0x1da0, 0x1db9, 0x1d93, 0xb9b, 0x38f} +{{0xb32, 0x149, 0x1615, 0x77e, 0xf55, 0x189, 0xe2a, 0x13bc, 0xf83, 0x124d, 0xcaa, 0x22, 0xcea, 0x8f9, 0xc5e, 0x8bc, 0x4ff, 0x14da, 0x394, 0x4a2, 0x1767, 0x1d20, 0x1531, 0x1dff, 0x929, 0x15cf, 0x1f69, 0x1630, 0x669, 0x11ec, 0x162c, 0xcf3, 0xde5, 0x185f, 0x1da0, 0x1db9, 0x1d93, 0xb9b, 0x38f}} #elif RADIX == 32 -{0x15994382, 0x1d6150a4, 0x25eaa77, 0x1de38a83, 0x124d7c1c, 0x8044caa, 0xf23e59d, 0xff45e31, 0x7294da2, 0x5d9c944, 0x1fa98f48, 0x19e929ef, 0x18c3ed35, 0x8f619a6, 0xacf3b16, 0x830bede, 0xc9f6e7b, 0x1df} +{{0x15994382, 0x1d6150a4, 0x25eaa77, 0x1de38a83, 0x124d7c1c, 0x8044caa, 0xf23e59d, 0xff45e31, 0x7294da2, 0x5d9c944, 0x1fa98f48, 0x19e929ef, 0x18c3ed35, 0x8f619a6, 0xacf3b16, 0x830bede, 0xc9f6e7b, 0x1df}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xaa77eb0a85256650, 0x35f073bc7150625e, 0xf23e59d402265549, 0x21ca53689fe8bc62, 0x3dffa98f482ece4a, 0xcd3630fb4d73d25, 0x617dbcacf3b1647b, 0x17ae6fb27db9ed0} +{{0xaa77eb0a85256650, 0x35f073bc7150625e, 0xf23e59d402265549, 0x21ca53689fe8bc62, 0x3dffa98f482ece4a, 0xcd3630fb4d73d25, 0x617dbcacf3b1647b, 0x17ae6fb27db9ed0}} #else -{0xefd6150a4acca1, 0x73bc7150625eaa, 0xea01132aa49af8, 0x27fa2f18bc8f96, 0x105d9c944394a6d, 0x173d253dffa98f4, 0x123d8669b187da6, 0x14185f6f2b3cec5, 0x145cdf64fb73d} +{{0xefd6150a4acca1, 0x73bc7150625eaa, 0xea01132aa49af8, 0x27fa2f18bc8f96, 0x105d9c944394a6d, 0x173d253dffa98f4, 0x123d8669b187da6, 0x14185f6f2b3cec5, 0x145cdf64fb73d}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ @@ -2860,261 +2860,261 @@ const curve_with_endomorphism_ring_t CURVES_WITH_ENDOMORPHISMS[7] = {{{{ }}}}, {{{ #if 0 #elif RADIX == 16 -{0x1414, 0x1912, 0xddb, 0xb9, 0x121b, 0x195d, 0x6c5, 0xe75, 0x1d16, 0xcaf, 0x1a05, 0x15cc, 0x1537, 0x1c57, 0x141a, 0x1b9d, 0x5b0, 0x9e7, 0xf10, 0x300, 0x1d5c, 0xc13, 0x245, 0x1b91, 0x352, 0x730, 0xd55, 0x1ca3, 0x1dac, 0x7b0, 0x15b9, 0x1ba6, 0x7d6, 0xba4, 0xc12, 0x649, 0xf1, 0xd51, 0x107} +{{0x1414, 0x1912, 0xddb, 0xb9, 0x121b, 0x195d, 0x6c5, 0xe75, 0x1d16, 0xcaf, 0x1a05, 0x15cc, 0x1537, 0x1c57, 0x141a, 0x1b9d, 0x5b0, 0x9e7, 0xf10, 0x300, 0x1d5c, 0xc13, 0x245, 0x1b91, 0x352, 0x730, 0xd55, 0x1ca3, 0x1dac, 0x7b0, 0x15b9, 0x1ba6, 0x7d6, 0xba4, 0xc12, 0x649, 0xf1, 0xd51, 0x107}} #elif RADIX == 32 -{0xa0a1383, 0x12ddbc89, 0x1764360b, 0x13a9b172, 0xcafe8b3, 0x1eb99a05, 0xd715ea6, 0x1b0dced0, 0x1e209e72, 0x1f570600, 0x11122b04, 0x60352dc, 0x128daaa7, 0x13d876b3, 0xdba6adc, 0x497487d, 0x7899258, 0x208} +{{0xa0a1383, 0x12ddbc89, 0x1764360b, 0x13a9b172, 0xcafe8b3, 0x1eb99a05, 0xd715ea6, 0x1b0dced0, 0x1e209e72, 0x1f570600, 0x11122b04, 0x60352dc, 0x128daaa7, 0x13d876b3, 0xdba6adc, 0x497487d, 0x7899258, 0x208}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x360b96ede44a8284, 0xbfa2ce75362e5764, 0xd715ea6f5ccd02b2, 0x788279cb61b9da0, 0x5b91122b04fab830, 0x3b59ca36aa9cc06a, 0x2e90fadba6adc9ec, 0x17b5441e2649609} +{{0x360b96ede44a8284, 0xbfa2ce75362e5764, 0xd715ea6f5ccd02b2, 0x788279cb61b9da0, 0x5b91122b04fab830, 0x3b59ca36aa9cc06a, 0x2e90fadba6adc9ec, 0x17b5441e2649609}} #else -{0x172ddbc8950509, 0xce75362e576436, 0x137ae6681595fd1, 0x12d86e76835c57a, 0x9f570600f104f3, 0x1cc06a5b91122b0, 0x4f61dace51b554, 0x24ba43eb6e9ab7, 0x146a883c4c92c} +{{0x172ddbc8950509, 0xce75362e576436, 0x137ae6681595fd1, 0x12d86e76835c57a, 0x9f570600f104f3, 0x1cc06a5b91122b0, 0x4f61dace51b554, 0x24ba43eb6e9ab7, 0x146a883c4c92c}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, {{ #if 0 #elif RADIX == 16 -{0x1507, 0x1e44, 0xb76, 0x182e, 0xc86, 0xe57, 0x9b1, 0x139d, 0x1f45, 0xb2b, 0x681, 0x1d73, 0x1d4d, 0x1715, 0xd06, 0x6e7, 0x196c, 0x279, 0x3c4, 0xc0, 0x1f57, 0xb04, 0x891, 0x16e4, 0xd4, 0x9cc, 0x1b55, 0x728, 0x76b, 0x9ec, 0x156e, 0x16e9, 0x1f5, 0x12e9, 0xb04, 0x992, 0x83c, 0x1b54, 0x2c1} +{{0x1507, 0x1e44, 0xb76, 0x182e, 0xc86, 0xe57, 0x9b1, 0x139d, 0x1f45, 0xb2b, 0x681, 0x1d73, 0x1d4d, 0x1715, 0xd06, 0x6e7, 0x196c, 0x279, 0x3c4, 0xc0, 0x1f57, 0xb04, 0x891, 0x16e4, 0xd4, 0x9cc, 0x1b55, 0x728, 0x76b, 0x9ec, 0x156e, 0x16e9, 0x1f5, 0x12e9, 0xb04, 0x992, 0x83c, 0x1b54, 0x2c1}} #elif RADIX == 32 -{0xa83b449, 0x1cb76f22, 0x15d90d82, 0x1cea6c5c, 0xb2bfa2c, 0x17ae6681, 0x35c57a9, 0x16c373b4, 0x788279c, 0x7d5c180, 0x4448ac1, 0x1980d4b7, 0x1ca36aa9, 0x4f61dac, 0xb6e9ab7, 0x125d21f, 0x1e26496, 0x122} +{{0xa83b449, 0x1cb76f22, 0x15d90d82, 0x1cea6c5c, 0xb2bfa2c, 0x17ae6681, 0x35c57a9, 0x16c373b4, 0x788279c, 0x7d5c180, 0x4448ac1, 0x1980d4b7, 0x1ca36aa9, 0x4f61dac, 0xb6e9ab7, 0x125d21f, 0x1e26496, 0x122}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd82e5bb7912a0ed, 0xafe8b39d4d8b95d9, 0x35c57a9bd73340ac, 0x1e209e72d86e768, 0x96e4448ac13eae0c, 0xed6728daaa7301a, 0x4ba43eb6e9ab727b, 0x1ed51078992582} +{{0xd82e5bb7912a0ed, 0xafe8b39d4d8b95d9, 0x35c57a9bd73340ac, 0x1e209e72d86e768, 0x96e4448ac13eae0c, 0xed6728daaa7301a, 0x4ba43eb6e9ab727b, 0x1ed51078992582}} #else -{0x105cb76f22541da, 0xb39d4d8b95d90d, 0x14deb99a05657f4, 0x1cb61b9da0d715e, 0x27d5c1803c413c, 0x7301a96e4448ac, 0x193d876b3946d55, 0x92e90fadba6ad, 0x3daa20f1324b} +{{0x105cb76f22541da, 0xb39d4d8b95d90d, 0x14deb99a05657f4, 0x1cb61b9da0d715e, 0x27d5c1803c413c, 0x7301a96e4448ac, 0x193d876b3946d55, 0x92e90fadba6ad, 0x3daa20f1324b}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, true}, {{{ #if 0 #elif RADIX == 16 -{0xa07, 0x1f97, 0x13c4, 0xb69, 0x15ec, 0x161d, 0x194, 0x135c, 0xe18, 0x119a, 0x684, 0x199, 0x1a93, 0x906, 0x62e, 0x1ad4, 0xc99, 0x40b, 0x10df, 0xf12, 0x9ee, 0x93, 0x1837, 0x42d, 0x1ea3, 0x1967, 0x1d41, 0x422, 0x2d5, 0x17d0, 0x1550, 0x1c2d, 0x139a, 0x152b, 0xa57, 0x1072, 0x13bf, 0x1fe7, 0x57a} +{{0xa07, 0x1f97, 0x13c4, 0xb69, 0x15ec, 0x161d, 0x194, 0x135c, 0xe18, 0x119a, 0x684, 0x199, 0x1a93, 0x906, 0x62e, 0x1ad4, 0xc99, 0x40b, 0x10df, 0xf12, 0x9ee, 0x93, 0x1837, 0x42d, 0x1ea3, 0x1967, 0x1d41, 0x422, 0x2d5, 0x17d0, 0x1550, 0x1c2d, 0x139a, 0x152b, 0xa57, 0x1072, 0x13bf, 0x1fe7, 0x57a}} #elif RADIX == 32 -{0x1503e7ec, 0x133c4fcb, 0x76bd8b6, 0x1ae0652c, 0x119a70c4, 0xc332684, 0x17241b52, 0x99d6a18, 0x1be40b6, 0x1a7b9e25, 0xdc1b824, 0xcfea321, 0x108ba839, 0xbe80b54, 0x15c2daa8, 0x15ea5739, 0x1dfc1c94, 0xd3c} +{{0x1503e7ec, 0x133c4fcb, 0x76bd8b6, 0x1ae0652c, 0x119a70c4, 0xc332684, 0x17241b52, 0x99d6a18, 0x1be40b6, 0x1a7b9e25, 0xdc1b824, 0xcfea321, 0x108ba839, 0xbe80b54, 0x15c2daa8, 0x15ea5739, 0x1dfc1c94, 0xd3c}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xd8b699e27e5d40f9, 0x69c3135c0ca5876b, 0x7241b52619934246, 0x286f902d933ad431, 0x642dc1b824d3dcf1, 0x5aa422ea0e59fd4, 0xd4ae735c2daa85f4, 0x1a7f9e77f07252b} +{{0xd8b699e27e5d40f9, 0x69c3135c0ca5876b, 0x7241b52619934246, 0x286f902d933ad431, 0x642dc1b824d3dcf1, 0x5aa422ea0e59fd4, 0xd4ae735c2daa85f4, 0x1a7f9e77f07252b}} #else -{0x16d33c4fcba81f3, 0x1135c0ca5876bd8, 0x930cc9a12334e1, 0x164ceb50c5c906d, 0x9a7b9e250df205, 0x59fd4642dc1b82, 0x2fa02d52117507, 0xaf52b9cd70b6aa, 0x19ff3cefe0e4a} +{{0x16d33c4fcba81f3, 0x1135c0ca5876bd8, 0x930cc9a12334e1, 0x164ceb50c5c906d, 0x9a7b9e250df205, 0x59fd4642dc1b82, 0x2fa02d52117507, 0xaf52b9cd70b6aa, 0x19ff3cefe0e4a}} #endif #endif , #if 0 #elif RADIX == 16 -{0x2, 0x2d8, 0x113e, 0xa74, 0x660, 0x141f, 0x64f, 0x885, 0x46, 0x17b9, 0x94f, 0x1b44, 0x361, 0xbf6, 0x1f17, 0x583, 0x18b3, 0x118e, 0x9ba, 0x49f, 0x1fc3, 0x13eb, 0x11c8, 0xcc8, 0x1b2d, 0x8c, 0x9c6, 0x1d9, 0xf33, 0x53d, 0x129a, 0x1b4a, 0x65, 0x169a, 0xe74, 0x544, 0x17e3, 0x1f0f, 0x2a6} +{{0x2, 0x2d8, 0x113e, 0xa74, 0x660, 0x141f, 0x64f, 0x885, 0x46, 0x17b9, 0x94f, 0x1b44, 0x361, 0xbf6, 0x1f17, 0x583, 0x18b3, 0x118e, 0x9ba, 0x49f, 0x1fc3, 0x13eb, 0x11c8, 0xcc8, 0x1b2d, 0x8c, 0x9c6, 0x1d9, 0xf33, 0x53d, 0x129a, 0x1b4a, 0x65, 0x169a, 0xe74, 0x544, 0x17e3, 0x1f0f, 0x2a6}} #elif RADIX == 32 -{0x1324b, 0x913e16c, 0x7ccc0a7, 0x42993e8, 0x17b90232, 0x768894f, 0xbafd86c, 0xb32c1fc, 0x137518ec, 0x1ff0c93e, 0x88e44fa, 0x119b2d66, 0x76538c0, 0x29ebccc, 0xbb4a94d, 0x1d2d3406, 0x1f19511c, 0x3fd} +{{0x1324b, 0x913e16c, 0x7ccc0a7, 0x42993e8, 0x17b90232, 0x768894f, 0xbafd86c, 0xb32c1fc, 0x137518ec, 0x1ff0c93e, 0x88e44fa, 0x119b2d66, 0x76538c0, 0x29ebccc, 0xbb4a94d, 0x1d2d3406, 0x1f19511c, 0x3fd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xc0a7489f0b60004c, 0xe408c885327d07cc, 0xbafd86c3b444a7de, 0xf4dd463b166583f8, 0xacc88e44faff8649, 0x5e661d94e3023365, 0x5a680cbb4a94d14f, 0xf7c3efc654473a} +{{0xc0a7489f0b60004c, 0xe408c885327d07cc, 0xbafd86c3b444a7de, 0xf4dd463b166583f8, 0xacc88e44faff8649, 0x5e661d94e3023365, 0x5a680cbb4a94d14f, 0xf7c3efc654473a}} #else -{0x14e913e16c00099, 0xc885327d07ccc0, 0x161da2253ef7204, 0xc59960fe2ebf61, 0x15ff0c93e9ba8c7, 0x23365acc88e44f, 0x8a7af330eca718, 0xe969a032ed2a53, 0x3f87df8ca88e} +{{0x14e913e16c00099, 0xc885327d07ccc0, 0x161da2253ef7204, 0xc59960fe2ebf61, 0x15ff0c93e9ba8c7, 0x23365acc88e44f, 0x8a7af330eca718, 0xe969a032ed2a53, 0x3f87df8ca88e}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0xe6e, 0xc55, 0xb5a, 0x1be4, 0x10f8, 0x1175, 0x1ada, 0x13de, 0xa0d, 0x1cb, 0x6f3, 0x91f, 0x70c, 0x12ef, 0x1403, 0x115a, 0x1205, 0x1705, 0xb8a, 0x490, 0x681, 0x1a6f, 0xd49, 0x2ca, 0x7e2, 0x1ad8, 0x1aa6, 0x9e8, 0x1f0f, 0x1df, 0xc32, 0xd30, 0x1a34, 0xfc4, 0x1519, 0x1cde, 0x7c9, 0x12da, 0x157} +{{0xe6e, 0xc55, 0xb5a, 0x1be4, 0x10f8, 0x1175, 0x1ada, 0x13de, 0xa0d, 0x1cb, 0x6f3, 0x91f, 0x70c, 0x12ef, 0x1403, 0x115a, 0x1205, 0x1705, 0xb8a, 0x490, 0x681, 0x1a6f, 0xd49, 0x2ca, 0x7e2, 0x1ad8, 0x1aa6, 0x9e8, 0x1f0f, 0x1df, 0xc32, 0xd30, 0x1a34, 0xfc4, 0x1519, 0x1cde, 0x7c9, 0x12da, 0x157}} #elif RADIX == 32 -{0x17371973, 0x8b5a62a, 0x1d61f1be, 0x1ef6b6a2, 0x1cb506c, 0x1123e6f3, 0x1cbbce1, 0x58ad50, 0x17157059, 0x19a04920, 0xa6a4e9b, 0x1b07e216, 0x7a354da, 0xeffc3d, 0x8d30619, 0x65f89a3, 0x1e4f37aa, 0x651} +{{0x17371973, 0x8b5a62a, 0x1d61f1be, 0x1ef6b6a2, 0x1cb506c, 0x1123e6f3, 0x1cbbce1, 0x58ad50, 0x17157059, 0x19a04920, 0xa6a4e9b, 0x1b07e216, 0x7a354da, 0xeffc3d, 0x8d30619, 0x65f89a3, 0x1e4f37aa, 0x651}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf1be45ad3155cdc6, 0x2d41b3ded6d45d61, 0x1cbbce1891f37987, 0x5c55c1640b15aa0, 0x42ca6a4e9bcd0249, 0xfe1e9e8d536b60fc, 0xbf13468d30619077, 0x9cb68f93cdea8c} +{{0xf1be45ad3155cdc6, 0x2d41b3ded6d45d61, 0x1cbbce1891f37987, 0x5c55c1640b15aa0, 0x42ca6a4e9bcd0249, 0xfe1e9e8d536b60fc, 0xbf13468d30619077, 0x9cb68f93cdea8c}} #else -{0x17c8b5a62ab9b8c, 0x1b3ded6d45d61f1, 0x10c48f9bcc396a0, 0x1902c56a8072ef3, 0x179a04920b8ab82, 0xb60fc42ca6a4e9, 0x83bff0f4f46a9b, 0x32fc4d1a34c186, 0x1396d1f279bd5} +{{0x17c8b5a62ab9b8c, 0x1b3ded6d45d61f1, 0x10c48f9bcc396a0, 0x1902c56a8072ef3, 0x179a04920b8ab82, 0xb60fc42ca6a4e9, 0x83bff0f4f46a9b, 0x32fc4d1a34c186, 0x1396d1f279bd5}} #endif #endif , #if 0 #elif RADIX == 16 -{0xc71, 0x167c, 0x1de2, 0x708, 0xb78, 0x1797, 0x16d0, 0xc73, 0x1f29, 0x1014, 0x1753, 0x1dd9, 0x1326, 0xab2, 0x1e6e, 0x51a, 0x32d, 0x7c1, 0x127b, 0x1b08, 0xcd4, 0x5fd, 0x159a, 0xb2c, 0x137d, 0x28f, 0xc4f, 0x121a, 0x16dd, 0x1771, 0xa7b, 0x11b9, 0xe86, 0x199c, 0x1cb5, 0x2db, 0x14b3, 0x1e97, 0x7b} +{{0xc71, 0x167c, 0x1de2, 0x708, 0xb78, 0x1797, 0x16d0, 0xc73, 0x1f29, 0x1014, 0x1753, 0x1dd9, 0x1326, 0xab2, 0x1e6e, 0x51a, 0x32d, 0x7c1, 0x127b, 0x1b08, 0xcd4, 0x5fd, 0x159a, 0xb2c, 0x137d, 0x28f, 0xc4f, 0x121a, 0x16dd, 0x1771, 0xa7b, 0x11b9, 0xe86, 0x199c, 0x1cb5, 0x2db, 0x14b3, 0x1e97, 0x7b}} #elif RADIX == 32 -{0x638892e, 0x11de2b3e, 0x5d6f070, 0x39db42f, 0x1014f94b, 0x1bbb3753, 0x172aca64, 0x12d28d79, 0x4f67c11, 0xb353611, 0xcacd17f, 0x11f37d59, 0x86989e2, 0x1bb8db76, 0xd1b953d, 0xd7338e8, 0x598b6f9, 0x7bd} +{{0x638892e, 0x11de2b3e, 0x5d6f070, 0x39db42f, 0x1014f94b, 0x1bbb3753, 0x172aca64, 0x12d28d79, 0x4f67c11, 0xb353611, 0xcacd17f, 0x11f37d59, 0x86989e2, 0x1bb8db76, 0xd1b953d, 0xd7338e8, 0x598b6f9, 0x7bd}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xf0708ef159f18e22, 0x53e52c73b685e5d6, 0x72aca64ddd9ba9c0, 0x893d9f0465a51af3, 0xab2cacd17f59a9b0, 0x6dbb21a6278a3e6f, 0xe671d0d1b953dddc, 0x7fa5e9662dbe5a} +{{0xf0708ef159f18e22, 0x53e52c73b685e5d6, 0x72aca64ddd9ba9c0, 0x893d9f0465a51af3, 0xab2cacd17f59a9b0, 0x6dbb21a6278a3e6f, 0xe671d0d1b953dddc, 0x7fa5e9662dbe5a}} #else -{0xe11de2b3e31c44, 0x12c73b685e5d6f0, 0x126eecdd4e029f2, 0x1196946bcdcab29, 0x1eb35361127b3e0, 0xa3e6fab2cacd17, 0xeee36dd90d313c, 0x16b99c74346e54f, 0xff4bd2cc5b7c} +{{0xe11de2b3e31c44, 0x12c73b685e5d6f0, 0x126eecdd4e029f2, 0x1196946bcdcab29, 0x1eb35361127b3e0, 0xa3e6fab2cacd17, 0xeee36dd90d313c, 0x16b99c74346e54f, 0xff4bd2cc5b7c}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}, {{ #if 0 #elif RADIX == 16 -{0x111d, 0x19ac, 0x1a8f, 0xc58, 0xaa, 0xdc, 0x13de, 0x1dc, 0x17a6, 0x1e3d, 0x198a, 0x40a, 0x120b, 0x17ba, 0x91c, 0x1858, 0xee4, 0x33b, 0x18aa, 0x1124, 0x5f8, 0x37d, 0xf3e, 0xa4b, 0x1e1, 0x2bd, 0x1ff2, 0x1a56, 0x1168, 0x739, 0x1fee, 0x190c, 0x13e9, 0xd07, 0x17fd, 0x1b9e, 0x198b, 0x1faa, 0xd2} +{{0x111d, 0x19ac, 0x1a8f, 0xc58, 0xaa, 0xdc, 0x13de, 0x1dc, 0x17a6, 0x1e3d, 0x198a, 0x40a, 0x120b, 0x17ba, 0x91c, 0x1858, 0xee4, 0x33b, 0x18aa, 0x1124, 0x5f8, 0x37d, 0xf3e, 0xa4b, 0x1e1, 0x2bd, 0x1ff2, 0x1a56, 0x1168, 0x739, 0x1fee, 0x190c, 0x13e9, 0xd07, 0x17fd, 0x1b9e, 0x198b, 0x1faa, 0xd2}} #elif RADIX == 32 -{0x88e8fa0, 0x11a8fcd6, 0x170154c5, 0xee4f781, 0x1e3dbd30, 0xc81598a, 0xe5eea41, 0xe4c2c24, 0x115433b7, 0x97e2249, 0xb79f0df, 0x17a1e152, 0x95bfe42, 0x39cc5a3, 0x1390cff7, 0x1f5a0f3e, 0xc5ee7af, 0xd56} +{{0x88e8fa0, 0x11a8fcd6, 0x170154c5, 0xee4f781, 0x1e3dbd30, 0xc81598a, 0xe5eea41, 0xe4c2c24, 0x115433b7, 0x97e2249, 0xb79f0df, 0x17a1e152, 0x95bfe42, 0x39cc5a3, 0x1390cff7, 0x1f5a0f3e, 0xc5ee7af, 0xd56}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x54c58d47e6b223a3, 0xf6f4c1dc9ef03701, 0xe5eea41640acc578, 0x4c550ceddc985848, 0x2a4b79f0df4bf112, 0x62d1a56ff90af43c, 0xb41e7d390cff71ce, 0x187eab317b9ebfe} +{{0x54c58d47e6b223a3, 0xf6f4c1dc9ef03701, 0xe5eea41640acc578, 0x4c550ceddc985848, 0x2a4b79f0df4bf112, 0x62d1a56ff90af43c, 0xb41e7d390cff71ce, 0x187eab317b9ebfe}} #else -{0x18b1a8fcd644747, 0xc1dc9ef0370154, 0xb205662bc7b7a, 0x177261612397ba9, 0x1e97e22498aa19d, 0xaf43c2a4b79f0d, 0x18e73168d2b7fc8, 0x1fad079f4e433fd, 0x15fd5662f73d7} +{{0x18b1a8fcd644747, 0xc1dc9ef0370154, 0xb205662bc7b7a, 0x177261612397ba9, 0x1e97e22498aa19d, 0xaf43c2a4b79f0d, 0x18e73168d2b7fc8, 0x1fad079f4e433fd, 0x15fd5662f73d7}} #endif #endif , #if 0 #elif RADIX == 16 -{0x63c, 0x609, 0x89c, 0x1f09, 0x9c9, 0x1e89, 0x1826, 0x1460, 0x15d6, 0xa52, 0xbb2, 0x1b93, 0x1f90, 0xa2f, 0x3b3, 0x1a76, 0x1c29, 0x17fc, 0x864, 0x55a, 0x1a9b, 0x7fa, 0x7ee, 0x75f, 0x1b4b, 0x15e6, 0xd75, 0x1238, 0x847, 0x1711, 0x9e7, 0xa37, 0x4b6, 0x1264, 0x3e1, 0xf87, 0x1c47, 0x706, 0x20b} +{{0x63c, 0x609, 0x89c, 0x1f09, 0x9c9, 0x1e89, 0x1826, 0x1460, 0x15d6, 0xa52, 0xbb2, 0x1b93, 0x1f90, 0xa2f, 0x3b3, 0x1a76, 0x1c29, 0x17fc, 0x864, 0x55a, 0x1a9b, 0x7fa, 0x7ee, 0x75f, 0x1b4b, 0x15e6, 0xd75, 0x1238, 0x847, 0x1711, 0x9e7, 0xa37, 0x4b6, 0x1264, 0x3e1, 0xf87, 0x1c47, 0x706, 0x20b}} #elif RADIX == 32 -{0x131e26c1, 0x1289c304, 0x25393f0, 0x30609bd, 0xa52aeb5, 0x3726bb2, 0x19a8bff2, 0x29d3b0e, 0x10c97fce, 0x16a6cab4, 0x1f3f71fe, 0x1cdb4b3a, 0x8e1aeb5, 0x1b88a11e, 0xca374f3, 0x1864c84b, 0x23be1c7, 0xab7} +{{0x131e26c1, 0x1289c304, 0x25393f0, 0x30609bd, 0xa52aeb5, 0x3726bb2, 0x19a8bff2, 0x29d3b0e, 0x10c97fce, 0x16a6cab4, 0x1f3f71fe, 0x1cdb4b3a, 0x8e1aeb5, 0x1b88a11e, 0xca374f3, 0x1864c84b, 0x23be1c7, 0xab7}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x93f0944e1824c789, 0x4abad460c137a253, 0x9a8bff21b935d929, 0xa4325ff3853a761d, 0x675f3f71feb53655, 0x508f2386bad79b69, 0xc99096ca374f3dc4, 0x129c1b88ef871f0} +{{0x93f0944e1824c789, 0x4abad460c137a253, 0x9a8bff21b935d929, 0xa4325ff3853a761d, 0x675f3f71feb53655, 0x508f2386bad79b69, 0xc99096ca374f3dc4, 0x129c1b88ef871f0}} #else -{0x1e1289c30498f13, 0xd460c137a25393, 0x190dc9aec94a55d, 0xe14e9d8766a2ff, 0x1d6a6cab4864bfe, 0x179b69675f3f71f, 0x1ee2284791c35d6, 0x1c326425b28dd3c, 0xa383711df0e3} +{{0x1e1289c30498f13, 0xd460c137a25393, 0x190dc9aec94a55d, 0xe14e9d8766a2ff, 0x1d6a6cab4864bfe, 0x179b69675f3f71f, 0x1ee2284791c35d6, 0x1c326425b28dd3c, 0xa383711df0e3}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }}}, {{{ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c index 7033623b6b..dcfb55fe5b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_32.c @@ -1,11 +1,11 @@ // clang-format off // Command line : python monty.py 32 // 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -#ifdef RADIX_32 - #include #include +#ifdef RADIX_32 + #define sspint int32_t #define spint uint32_t #define udpint uint64_t @@ -1087,22 +1087,6 @@ static int modqr(const spint *h, const spint *x) { return modis1(r) | modis0(x); } -// conditional move g to f if d=1 -// strongly recommend inlining be disabled using compiler specific syntax -static void modcmv(int b, const spint *g, volatile spint *f) { - int i; - spint c0, c1, s, t; - spint r = 0x5aa5a55au; - c0 = (1 - b) + r; - c1 = b + r; - for (i = 0; i < 18; i++) { - s = g[i]; - t = f[i]; - f[i] = c0 * t + c1 * s; - f[i] -= r * (t + s); - } -} - // conditional swap g and f if d=1 // strongly recommend inlining be disabled using compiler specific syntax static void modcsw(int b, volatile spint *g, volatile spint *f) { @@ -1156,52 +1140,6 @@ static int modshr(unsigned int n, spint *a) { return r; } -// set a= 2^r -static void mod2r(unsigned int r, spint *a) { - unsigned int n = r / 29u; - unsigned int m = r % 29u; - modzer(a); - if (r >= 64 * 8) - return; - a[n] = 1; - a[n] <<= m; - nres(a, a); -} - -// export to byte array -static void modexp(const spint *a, char *b) { - int i; - spint c[18]; - redc(a, c); - for (i = 63; i >= 0; i--) { - b[i] = c[0] & (spint)0xff; - (void)modshr(8, c); - } -} - -// import from byte array -// returns 1 if in range, else 0 -static int modimp(const char *b, spint *a) { - int i, res; - for (i = 0; i < 18; i++) { - a[i] = 0; - } - for (i = 0; i < 64; i++) { - modshl(8, a); - a[0] += (spint)(unsigned char)b[i]; - } - res = modfsb(a); - nres(a, a); - return res; -} - -// determine sign -static int modsign(const spint *a) { - spint c[18]; - redc(a, c); - return c[0] % 2; -} - // return true if equal static int modcmp(const spint *a, const spint *b) { spint c[18], d[18]; @@ -1514,4 +1452,4 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) } } -#endif /* RADIX_32 */ \ No newline at end of file +#endif /* RADIX_32 */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c index 887e86f3f9..5ea283bba6 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/fp_p27500_64.c @@ -1,11 +1,11 @@ // clang-format off // Command line : python monty.py 64 // 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -#ifdef RADIX_64 - #include #include +#ifdef RADIX_64 + #define sspint int64_t #define spint uint64_t #define udpint __uint128_t @@ -564,22 +564,6 @@ static int modqr(const spint *h, const spint *x) { return modis1(r) | modis0(x); } -// conditional move g to f if d=1 -// strongly recommend inlining be disabled using compiler specific syntax -static void modcmv(int b, const spint *g, volatile spint *f) { - int i; - spint c0, c1, s, t; - spint r = 0x3cc3c33c5aa5a55au; - c0 = (1 - b) + r; - c1 = b + r; - for (i = 0; i < 9; i++) { - s = g[i]; - t = f[i]; - f[i] = c0 * t + c1 * s; - f[i] -= r * (t + s); - } -} - // conditional swap g and f if d=1 // strongly recommend inlining be disabled using compiler specific syntax static void modcsw(int b, volatile spint *g, volatile spint *f) { @@ -633,52 +617,6 @@ static int modshr(unsigned int n, spint *a) { return r; } -// set a= 2^r -static void mod2r(unsigned int r, spint *a) { - unsigned int n = r / 57u; - unsigned int m = r % 57u; - modzer(a); - if (r >= 64 * 8) - return; - a[n] = 1; - a[n] <<= m; - nres(a, a); -} - -// export to byte array -static void modexp(const spint *a, char *b) { - int i; - spint c[9]; - redc(a, c); - for (i = 63; i >= 0; i--) { - b[i] = c[0] & (spint)0xff; - (void)modshr(8, c); - } -} - -// import from byte array -// returns 1 if in range, else 0 -static int modimp(const char *b, spint *a) { - int i, res; - for (i = 0; i < 9; i++) { - a[i] = 0; - } - for (i = 0; i < 64; i++) { - modshl(8, a); - a[0] += (spint)(unsigned char)b[i]; - } - res = modfsb(a); - nres(a, a); - return res; -} - -// determine sign -static int modsign(const spint *a) { - spint c[9]; - redc(a, c); - return c[0] % 2; -} - // return true if equal static int modcmp(const spint *a, const spint *b) { spint c[9], d[9]; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.h index 2b16e23834..616504c7b1 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd.h @@ -415,7 +415,7 @@ void copy_bases_to_kernel(theta_kernel_couple_points_t *ker, const ec_basis_t *B * @param t: an integer * @returns 0xFFFFFFFF on success, 0 on failure */ -static int +static inline int test_couple_point_order_twof(const theta_couple_point_t *T, const theta_couple_curve_t *E, int t) { int check_P1 = test_point_order_twof(&T->P1, &E->E1, t); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c index a697ac7eb1..fe34f0e3ab 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/hd_splitting_transforms.c @@ -11,131 +11,131 @@ const int CHI_EVAL[4][4] = {{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1 const fp2_t FP2_CONSTANTS[5] = {{ #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500} +{{0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x500}} #elif RADIX == 32 -{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800} +{{0x25ed0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000} +{{0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x130000000000000}} #else -{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000} +{{0x12f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb00000000000}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf} +{{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf}} #elif RADIX == 32 -{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f} +{{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff} +{{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff}} #else -{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff} +{{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff}} #endif #endif , #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif }, { #if 0 #elif RADIX == 16 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 32 -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #else -{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} +{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}} #endif #endif , #if 0 #elif RADIX == 16 -{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf} +{{0x1ffb, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1fff, 0x1bf}} #elif RADIX == 32 -{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f} +{{0x1ffda12f, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x57f}} #elif RADIX == 64 #if defined(SQISIGN_GF_IMPL_BROADWELL) -{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff} +{{0xffffffffffffff68, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x7fffffffffffff}} #else -{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff} +{{0x1fffffffffffed0, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0x1ffffffffffffff, 0xffffffffffff}} #endif #endif }}; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c index ea32213c75..0fed774a04 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/l2.c @@ -24,8 +24,8 @@ copy(fp_num *x, fp_num *r) static void normalize(fp_num *x) { - if (x->s == 0.0 || isfinite(x->s) == 0) { - if (x->s == 0.0) { + if (fpclassify(x->s) == FP_ZERO || isfinite(x->s) == 0) { + if (fpclassify(x->s) == FP_ZERO) { x->e = INT_MIN; } } else { @@ -49,13 +49,6 @@ to_deltabar(fp_num *x) x->e = 0; } -static void -to_etabar(fp_num *x) -{ - x->s = ETABAR; - x->e = 0; -} - static void from_mpz(const ibz_t *x, fp_num *r) { diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_internals.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_internals.h index e8d90141ac..2b76857205 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_internals.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/lll_internals.h @@ -43,13 +43,19 @@ /** @brief Type for fractions of integers * - * @typedef ibq_t +* @typedef ibq_t * * For fractions of integers of arbitrary size, used by intbig module, using gmp */ -typedef ibz_t ibq_t[2]; -typedef ibq_t ibq_vec_4_t[4]; -typedef ibq_t ibq_mat_4x4_t[4][4]; +typedef struct { + ibz_t q[2]; +} ibq_t; +typedef struct { + ibq_t v[4]; +} ibq_vec_4_t; +typedef struct { + ibq_vec_4_t m[4]; +} ibq_mat_4x4_t; /**@} */ diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.c index 27f4a963db..13714eee4a 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/mp.c @@ -2,6 +2,7 @@ #include #include #include +#include // double-wide multiplication void @@ -17,7 +18,7 @@ MUL(digit_t *out, const digit_t a, const digit_t b) out[0] = _umul128(a, b, &umul_hi); out[1] = umul_hi; -#elif defined(RADIX_64) && defined(HAVE_UINT128) +#elif defined(RADIX_64) && (defined(HAVE_UINT128) || defined(__SIZEOF_INT128__) || defined(__int128)) && !defined(C_PEDANTIC_MODE) unsigned __int128 umul_tmp; umul_tmp = (unsigned __int128)(a) * (unsigned __int128)(b); out[0] = (uint64_t)umul_tmp; @@ -277,6 +278,7 @@ mp_inv_2e(digit_t *b, const digit_t *a, int e, unsigned int nwords) assert((a[0] & 1) == 1); digit_t x[nwords], y[nwords], aa[nwords], mp_one[nwords], tmp[nwords]; + memset(x, 0, sizeof(x)); mp_copy(aa, a, nwords); mp_one[0] = 1; diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rationals.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rationals.c index 0c5387e5e8..25f8519b3f 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rationals.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rationals.c @@ -1,20 +1,20 @@ -#include + #include #include "internal.h" #include "lll_internals.h" void ibq_init(ibq_t *x) { - ibz_init(&((*x)[0])); - ibz_init(&((*x)[1])); - ibz_set(&((*x)[1]), 1); + ibz_init(&(x->q[0])); + ibz_init(&(x->q[1])); + ibz_set(&(x->q[1]), 1); } void ibq_finalize(ibq_t *x) { - ibz_finalize(&((*x)[0])); - ibz_finalize(&((*x)[1])); + ibz_finalize(&(x->q[0])); + ibz_finalize(&(x->q[1])); } void @@ -22,7 +22,7 @@ ibq_mat_4x4_init(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_init(&(*mat)[i][j]); + ibq_init(&mat->m[i].v[j]); } } } @@ -31,7 +31,7 @@ ibq_mat_4x4_finalize(ibq_mat_4x4_t *mat) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibq_finalize(&(*mat)[i][j]); + ibq_finalize(&mat->m[i].v[j]); } } } @@ -40,14 +40,14 @@ void ibq_vec_4_init(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_init(&(*vec)[i]); + ibq_init(&vec->v[i]); } } void ibq_vec_4_finalize(ibq_vec_4_t *vec) { for (int i = 0; i < 4; i++) { - ibq_finalize(&(*vec)[i]); + ibq_finalize(&vec->v[i]); } } @@ -57,9 +57,9 @@ ibq_mat_4x4_print(const ibq_mat_4x4_t *mat) printf("matrix: "); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { - ibz_print(&((*mat)[i][j][0]), 10); + ibz_print(&(mat->m[i].v[j].q[0]), 10); printf("/"); - ibz_print(&((*mat)[i][j][1]), 10); + ibz_print(&(mat->m[i].v[j].q[1]), 10); printf(" "); } printf("\n "); @@ -72,9 +72,9 @@ ibq_vec_4_print(const ibq_vec_4_t *vec) { printf("vector: "); for (int i = 0; i < 4; i++) { - ibz_print(&((*vec)[i][0]), 10); + ibz_print(&(vec->v[i].q[0]), 10); printf("/"); - ibz_print(&((*vec)[i][1]), 10); + ibz_print(&(vec->v[i].q[1]), 10); printf(" "); } printf("\n\n"); @@ -86,10 +86,10 @@ ibq_reduce(ibq_t *x) ibz_t gcd, r; ibz_init(&gcd); ibz_init(&r); - ibz_gcd(&gcd, &((*x)[0]), &((*x)[1])); - ibz_div(&((*x)[0]), &r, &((*x)[0]), &gcd); + ibz_gcd(&gcd, &(x->q[0]), &(x->q[1])); + ibz_div(&(x->q[0]), &r, &(x->q[0]), &gcd); assert(ibz_is_zero(&r)); - ibz_div(&((*x)[1]), &r, &((*x)[1]), &gcd); + ibz_div(&(x->q[1]), &r, &(x->q[1]), &gcd); assert(ibz_is_zero(&r)); ibz_finalize(&gcd); ibz_finalize(&r); @@ -102,10 +102,10 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) ibz_init(&add); ibz_init(&prod); - ibz_mul(&add, &((*a)[0]), &((*b)[1])); - ibz_mul(&prod, &((*b)[0]), &((*a)[1])); - ibz_add(&((*sum)[0]), &add, &prod); - ibz_mul(&((*sum)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&add, &(a->q[0]), &(b->q[1])); + ibz_mul(&prod, &(b->q[0]), &(a->q[1])); + ibz_add(&(sum->q[0]), &add, &prod); + ibz_mul(&(sum->q[1]), &(a->q[1]), &(b->q[1])); ibz_finalize(&add); ibz_finalize(&prod); } @@ -113,8 +113,8 @@ ibq_add(ibq_t *sum, const ibq_t *a, const ibq_t *b) void ibq_neg(ibq_t *neg, const ibq_t *x) { - ibz_copy(&((*neg)[1]), &((*x)[1])); - ibz_neg(&((*neg)[0]), &((*x)[0])); + ibz_copy(&(neg->q[1]), &(x->q[1])); + ibz_neg(&(neg->q[0]), &(x->q[0])); } void @@ -143,8 +143,8 @@ ibq_abs(ibq_t *abs, const ibq_t *x) // once void ibq_mul(ibq_t *prod, const ibq_t *a, const ibq_t *b) { - ibz_mul(&((*prod)[0]), &((*a)[0]), &((*b)[0])); - ibz_mul(&((*prod)[1]), &((*a)[1]), &((*b)[1])); + ibz_mul(&(prod->q[0]), &(a->q[0]), &(b->q[0])); + ibz_mul(&(prod->q[1]), &(a->q[1]), &(b->q[1])); } int @@ -152,9 +152,9 @@ ibq_inv(ibq_t *inv, const ibq_t *x) { int res = !ibq_is_zero(x); if (res) { - ibz_copy(&((*inv)[0]), &((*x)[0])); - ibz_copy(&((*inv)[1]), &((*x)[1])); - ibz_swap(&((*inv)[1]), &((*inv)[0])); + ibz_copy(&(inv->q[0]), &(x->q[0])); + ibz_copy(&(inv->q[1]), &(x->q[1])); + ibz_swap(&(inv->q[1]), &(inv->q[0])); } return (res); } @@ -165,15 +165,15 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) ibz_t x, y; ibz_init(&x); ibz_init(&y); - ibz_copy(&x, &((*a)[0])); - ibz_copy(&y, &((*b)[0])); - ibz_mul(&y, &y, &((*a)[1])); - ibz_mul(&x, &x, &((*b)[1])); - if (ibz_cmp(&((*a)[1]), &ibz_const_zero) > 0) { + ibz_copy(&x, &(a->q[0])); + ibz_copy(&y, &(b->q[0])); + ibz_mul(&y, &y, &(a->q[1])); + ibz_mul(&x, &x, &(b->q[1])); + if (ibz_cmp(&(a->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } - if (ibz_cmp(&((*b)[1]), &ibz_const_zero) > 0) { + if (ibz_cmp(&(b->q[1]), &ibz_const_zero) > 0) { ibz_neg(&y, &y); ibz_neg(&x, &x); } @@ -186,28 +186,28 @@ ibq_cmp(const ibq_t *a, const ibq_t *b) int ibq_is_zero(const ibq_t *x) { - return ibz_is_zero(&((*x)[0])); + return ibz_is_zero(&(x->q[0])); } int ibq_is_one(const ibq_t *x) { - return (0 == ibz_cmp(&((*x)[0]), &((*x)[1]))); + return (0 == ibz_cmp(&(x->q[0]), &(x->q[1]))); } int ibq_set(ibq_t *q, const ibz_t *a, const ibz_t *b) { - ibz_copy(&((*q)[0]), a); - ibz_copy(&((*q)[1]), b); + ibz_copy(&(q->q[0]), a); + ibz_copy(&(q->q[1]), b); return !ibz_is_zero(b); } void ibq_copy(ibq_t *target, const ibq_t *value) // once { - ibz_copy(&((*target)[0]), &((*value)[0])); - ibz_copy(&((*target)[1]), &((*value)[1])); + ibz_copy(&(target->q[0]), &(value->q[0])); + ibz_copy(&(target->q[1]), &(value->q[1])); } int @@ -215,7 +215,7 @@ ibq_is_ibz(const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_mod(&r, &((*q)[0]), &((*q)[1])); + ibz_mod(&r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); @@ -226,7 +226,7 @@ ibq_to_ibz(ibz_t *z, const ibq_t *q) { ibz_t r; ibz_init(&r); - ibz_div(z, &r, &((*q)[0]), &((*q)[1])); + ibz_div(z, &r, &(q->q[0]), &(q->q[1])); int res = ibz_is_zero(&r); ibz_finalize(&r); return (res); diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h index d0861ac036..0362ca0c42 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/rng.h @@ -5,7 +5,7 @@ #include -static int randombytes(unsigned char *x, unsigned long long xlen){ +static inline int randombytes(unsigned char *x, unsigned long long xlen){ OQS_randombytes(x, xlen); return 0; } diff --git a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign.c b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign.c index 7335c38d9a..cf2134085b 100644 --- a/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign.c +++ b/src/sig/sqisign/the-sqisign_sqisign_lvl5_ref/sqisign.c @@ -121,7 +121,7 @@ sqisign_verify(const unsigned char *m, unsigned long long siglen, const unsigned char *pk) { - + (void) siglen; int ret = 0; public_key_t pkt = { 0 }; signature_t sigt; diff --git a/tests/test_leaks.py b/tests/test_leaks.py index 4b29150b01..9ba4f21d41 100644 --- a/tests/test_leaks.py +++ b/tests/test_leaks.py @@ -22,7 +22,7 @@ def test_sig_leak(sig_name): if not(helpers.is_sig_enabled_by_name(sig_name)): pytest.skip('Not enabled') if sys.platform != "linux" or os.system("grep ubuntu /etc/os-release") != 0 or os.system("uname -a | grep x86_64") != 0: pytest.skip('Leak testing not supported on this platform') helpers.run_subprocess( - ["valgrind", "-s", "--error-exitcode=1", "--leak-check=full", "--show-leak-kinds=all", helpers.path_to_executable('test_sig'), sig_name], + ["valgrind", "-s", "--max-stackframe=4116160", "--error-exitcode=1", "--leak-check=full", "--show-leak-kinds=all", helpers.path_to_executable('test_sig'), sig_name], ) @pytest.mark.skipif("SLH_DSA_LEAK_TEST" not in os.environ, reason="SLH DSA leak testing only performed in extended tests") From aa6655d1069a17b20bd5c2c1a5e16e367635e1a4 Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Thu, 18 Sep 2025 15:51:50 +0200 Subject: [PATCH 17/19] Point upstram to current PR [full tests] Signed-off-by: Basil Hess --- docs/algorithms/sig/sqisign.md | 2 +- docs/algorithms/sig/sqisign.yml | 2 +- scripts/copy_from_upstream/copy_from_upstream.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/algorithms/sig/sqisign.md b/docs/algorithms/sig/sqisign.md index b7f2002798..84d334ffc3 100644 --- a/docs/algorithms/sig/sqisign.md +++ b/docs/algorithms/sig/sqisign.md @@ -6,7 +6,7 @@ - **Authors' website**: https://sqisign.org/ - **Specification version**: Round 2. - **Primary Source**: - - **Source**: https://github.com/bhess/the-sqisign/commit/a8884349ee78b0c4da296c9f8ce6f208910d5ee6 + - **Source**: https://github.com/shane-digi/the-sqisign/commit/a8884349ee78b0c4da296c9f8ce6f208910d5ee6 - **Implementation license (SPDX-Identifier)**: Apache-2.0 diff --git a/docs/algorithms/sig/sqisign.yml b/docs/algorithms/sig/sqisign.yml index d22bb1fb70..26e8ade298 100644 --- a/docs/algorithms/sig/sqisign.yml +++ b/docs/algorithms/sig/sqisign.yml @@ -36,7 +36,7 @@ website: https://sqisign.org/ nist-round: 2 spec-version: Round 2 primary-upstream: - source: https://github.com/bhess/the-sqisign/commit/a8884349ee78b0c4da296c9f8ce6f208910d5ee6 + source: https://github.com/shane-digi/the-sqisign/commit/a8884349ee78b0c4da296c9f8ce6f208910d5ee6 spdx-license-identifier: Apache-2.0 parameter-sets: - name: SQIsign-lvl1 diff --git a/scripts/copy_from_upstream/copy_from_upstream.yml b/scripts/copy_from_upstream/copy_from_upstream.yml index ed1a8264e8..18c0f11a85 100644 --- a/scripts/copy_from_upstream/copy_from_upstream.yml +++ b/scripts/copy_from_upstream/copy_from_upstream.yml @@ -103,7 +103,7 @@ upstreams: - name: the-sqisign - git_url: https://github.com/bhess/the-sqisign.git + git_url: https://github.com/shane-digi/the-sqisign.git git_branch: oqs git_commit: a8884349ee78b0c4da296c9f8ce6f208910d5ee6 sig_scheme_path: '.' From 8d7e213945a7d436eed0a2a56c95d90ebce4ed27 Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Thu, 18 Sep 2025 16:15:22 +0200 Subject: [PATCH 18/19] Disable GMP on apple-mobile [full tests] Signed-off-by: Basil Hess --- CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 16dca055d7..be97be44df 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -179,6 +179,13 @@ if(${OQS_USE_GMP}) endif() endif() +if (DEFINED PLATFORM) + # GMP not available for these Apple platforms + if (PLATFORM STREQUAL "OS64" OR PLATFORM STREQUAL "TVOS") + set(OQS_USE_GMP OFF) + endif() +endif() + if(OQS_USE_ICICLE) enable_language(CXX) set(CMAKE_CXX_STANDARD 17) From 9686ba3704757f8fdcc191c754d34c79ad95f5cf Mon Sep 17 00:00:00 2001 From: Basil Hess Date: Thu, 18 Sep 2025 17:21:54 +0200 Subject: [PATCH 19/19] Remove unused patch files Signed-off-by: Basil Hess --- .../patches/sqisign_fp.patch | 108 --- .../patches/sqisign_namespace.patch | 622 ------------------ 2 files changed, 730 deletions(-) delete mode 100644 scripts/copy_from_upstream/patches/sqisign_fp.patch delete mode 100644 scripts/copy_from_upstream/patches/sqisign_namespace.patch diff --git a/scripts/copy_from_upstream/patches/sqisign_fp.patch b/scripts/copy_from_upstream/patches/sqisign_fp.patch deleted file mode 100644 index d704fedc60..0000000000 --- a/scripts/copy_from_upstream/patches/sqisign_fp.patch +++ /dev/null @@ -1,108 +0,0 @@ -diff --git a/src/gf/ref/lvl1/fp_p5248_32.c b/src/gf/ref/lvl1/fp_p5248_32.c -index a52add3..62e5491 100644 ---- a/src/gf/ref/lvl1/fp_p5248_32.c -+++ b/src/gf/ref/lvl1/fp_p5248_32.c -@@ -1,6 +1,7 @@ - // clang-format off - // Command line : python monty.py 32 - // 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -+#ifdef RADIX_32 - - #include - #include -@@ -940,3 +941,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) - fp_add(d, d, &a); - } - } -+ -+#endif /* RADIX_32 */ -diff --git a/src/gf/ref/lvl1/fp_p5248_64.c b/src/gf/ref/lvl1/fp_p5248_64.c -index cde28dd..57c2131 100644 ---- a/src/gf/ref/lvl1/fp_p5248_64.c -+++ b/src/gf/ref/lvl1/fp_p5248_64.c -@@ -1,6 +1,7 @@ - // clang-format off - // Command line : python monty.py 64 - // 0x4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -+#ifdef RADIX_64 - - #include - #include -@@ -789,3 +790,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) - fp_add(d, d, &a); - } - } -+ -+#endif /* RADIX_64 */ -diff --git a/src/gf/ref/lvl3/fp_p65376_32.c b/src/gf/ref/lvl3/fp_p65376_32.c -index 1483461..2aaad84 100644 ---- a/src/gf/ref/lvl3/fp_p65376_32.c -+++ b/src/gf/ref/lvl3/fp_p65376_32.c -@@ -1,6 +1,7 @@ - // clang-format off - // Command line : python monty.py 32 - // 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -+#ifdef RADIX_32 - - #include - #include -@@ -1229,3 +1230,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) - fp_add(d, d, &a); - } - } -+ -+#endif -diff --git a/src/gf/ref/lvl3/fp_p65376_64.c b/src/gf/ref/lvl3/fp_p65376_64.c -index 539cde5..9ac5fc5 100644 ---- a/src/gf/ref/lvl3/fp_p65376_64.c -+++ b/src/gf/ref/lvl3/fp_p65376_64.c -@@ -1,6 +1,7 @@ - // clang-format off - // Command line : python monty.py 64 - // 0x40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -+#ifdef RADIX_64 - - #include - #include -@@ -870,3 +871,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) - fp_add(d, d, &a); - } - } -+ -+#endif -diff --git a/src/gf/ref/lvl5/fp_p27500_32.c b/src/gf/ref/lvl5/fp_p27500_32.c -index ecf5ea7..f002495 100644 ---- a/src/gf/ref/lvl5/fp_p27500_32.c -+++ b/src/gf/ref/lvl5/fp_p27500_32.c -@@ -1,6 +1,7 @@ - // clang-format off - // Command line : python monty.py 32 - // 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -+#ifdef RADIX_32 - - #include - #include -@@ -1512,3 +1513,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) - fp_add(d, d, &a); - } - } -+ -+#endif -diff --git a/src/gf/ref/lvl5/fp_p27500_64.c b/src/gf/ref/lvl5/fp_p27500_64.c -index 33bb75e..c187e87 100644 ---- a/src/gf/ref/lvl5/fp_p27500_64.c -+++ b/src/gf/ref/lvl5/fp_p27500_64.c -@@ -1,6 +1,7 @@ - // clang-format off - // Command line : python monty.py 64 - // 0x1afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff -+#ifdef RADIX_64 - - #include - #include -@@ -968,3 +969,5 @@ fp_decode_reduce(fp_t *d, const void *src, size_t len) - fp_add(d, d, &a); - } - } -+ -+#endif diff --git a/scripts/copy_from_upstream/patches/sqisign_namespace.patch b/scripts/copy_from_upstream/patches/sqisign_namespace.patch deleted file mode 100644 index 6bdba58527..0000000000 --- a/scripts/copy_from_upstream/patches/sqisign_namespace.patch +++ /dev/null @@ -1,622 +0,0 @@ -diff --git a/include/sqisign_namespace.h b/include/sqisign_namespace.h -index 007d257..bbfe72c 100644 ---- a/include/sqisign_namespace.h -+++ b/include/sqisign_namespace.h -@@ -18,12 +18,6 @@ - #define PARAM_JOIN2(a, b) PARAM_JOIN2_(a, b) - #define PARAM_NAME2(end, s) PARAM_JOIN2(end, s) - --#ifndef DISABLE_NAMESPACING --#define SQISIGN_NAMESPACE_GENERIC(s) PARAM_NAME2(gen, s) --#else --#define SQISIGN_NAMESPACE_GENERIC(s) s --#endif -- - #if defined(SQISIGN_VARIANT) && !defined(DISABLE_NAMESPACING) - #if defined(SQISIGN_BUILD_TYPE_REF) - #define SQISIGN_NAMESPACE(s) PARAM_NAME3(ref, s) -@@ -60,23 +54,23 @@ - #undef quat_alg_scalar - #undef quat_alg_sub - --#define quat_alg_add SQISIGN_NAMESPACE_GENERIC(quat_alg_add) --#define quat_alg_conj SQISIGN_NAMESPACE_GENERIC(quat_alg_conj) --#define quat_alg_coord_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_coord_mul) --#define quat_alg_elem_copy SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy) --#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_copy_ibz) --#define quat_alg_elem_equal SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_equal) --#define quat_alg_elem_is_zero SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_is_zero) --#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_mul_by_scalar) --#define quat_alg_elem_set SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_set) --#define quat_alg_equal_denom SQISIGN_NAMESPACE_GENERIC(quat_alg_equal_denom) --#define quat_alg_init_set_ui SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set_ui) --#define quat_alg_make_primitive SQISIGN_NAMESPACE_GENERIC(quat_alg_make_primitive) --#define quat_alg_mul SQISIGN_NAMESPACE_GENERIC(quat_alg_mul) --#define quat_alg_norm SQISIGN_NAMESPACE_GENERIC(quat_alg_norm) --#define quat_alg_normalize SQISIGN_NAMESPACE_GENERIC(quat_alg_normalize) --#define quat_alg_scalar SQISIGN_NAMESPACE_GENERIC(quat_alg_scalar) --#define quat_alg_sub SQISIGN_NAMESPACE_GENERIC(quat_alg_sub) -+#define quat_alg_add SQISIGN_NAMESPACE(quat_alg_add) -+#define quat_alg_conj SQISIGN_NAMESPACE(quat_alg_conj) -+#define quat_alg_coord_mul SQISIGN_NAMESPACE(quat_alg_coord_mul) -+#define quat_alg_elem_copy SQISIGN_NAMESPACE(quat_alg_elem_copy) -+#define quat_alg_elem_copy_ibz SQISIGN_NAMESPACE(quat_alg_elem_copy_ibz) -+#define quat_alg_elem_equal SQISIGN_NAMESPACE(quat_alg_elem_equal) -+#define quat_alg_elem_is_zero SQISIGN_NAMESPACE(quat_alg_elem_is_zero) -+#define quat_alg_elem_mul_by_scalar SQISIGN_NAMESPACE(quat_alg_elem_mul_by_scalar) -+#define quat_alg_elem_set SQISIGN_NAMESPACE(quat_alg_elem_set) -+#define quat_alg_equal_denom SQISIGN_NAMESPACE(quat_alg_equal_denom) -+#define quat_alg_init_set_ui SQISIGN_NAMESPACE(quat_alg_init_set_ui) -+#define quat_alg_make_primitive SQISIGN_NAMESPACE(quat_alg_make_primitive) -+#define quat_alg_mul SQISIGN_NAMESPACE(quat_alg_mul) -+#define quat_alg_norm SQISIGN_NAMESPACE(quat_alg_norm) -+#define quat_alg_normalize SQISIGN_NAMESPACE(quat_alg_normalize) -+#define quat_alg_scalar SQISIGN_NAMESPACE(quat_alg_scalar) -+#define quat_alg_sub SQISIGN_NAMESPACE(quat_alg_sub) - - // Namespacing symbols exported from api.c: - #undef crypto_sign -@@ -134,14 +128,14 @@ - #undef ibz_mat_2x2_set - #undef ibz_vec_2_set - --#define ibz_2x2_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_2x2_mul_mod) --#define ibz_mat_2x2_add SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_add) --#define ibz_mat_2x2_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_copy) --#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_det_from_ibz) --#define ibz_mat_2x2_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_eval) --#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_inv_mod) --#define ibz_mat_2x2_set SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_set) --#define ibz_vec_2_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_set) -+#define ibz_2x2_mul_mod SQISIGN_NAMESPACE(ibz_2x2_mul_mod) -+#define ibz_mat_2x2_add SQISIGN_NAMESPACE(ibz_mat_2x2_add) -+#define ibz_mat_2x2_copy SQISIGN_NAMESPACE(ibz_mat_2x2_copy) -+#define ibz_mat_2x2_det_from_ibz SQISIGN_NAMESPACE(ibz_mat_2x2_det_from_ibz) -+#define ibz_mat_2x2_eval SQISIGN_NAMESPACE(ibz_mat_2x2_eval) -+#define ibz_mat_2x2_inv_mod SQISIGN_NAMESPACE(ibz_mat_2x2_inv_mod) -+#define ibz_mat_2x2_set SQISIGN_NAMESPACE(ibz_mat_2x2_set) -+#define ibz_vec_2_set SQISIGN_NAMESPACE(ibz_vec_2_set) - - // Namespacing symbols exported from dim2id2iso.c: - #undef dim2id2iso_arbitrary_isogeny_evaluation -@@ -184,34 +178,34 @@ - #undef ibz_vec_4_sub - #undef quat_qf_eval - --#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_mpm) --#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE_GENERIC(ibz_inv_dim4_make_coeff_pmp) --#define ibz_mat_4x4_copy SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_copy) --#define ibz_mat_4x4_equal SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_equal) --#define ibz_mat_4x4_eval SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval) --#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_eval_t) --#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_gcd) --#define ibz_mat_4x4_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_identity) --#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_inv_with_det_as_denom) --#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_identity) --#define ibz_mat_4x4_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_mul) --#define ibz_mat_4x4_negate SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_negate) --#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_div) --#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_scalar_mul) --#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_transpose) --#define ibz_mat_4x4_zero SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_zero) --#define ibz_vec_4_add SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_add) --#define ibz_vec_4_content SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_content) --#define ibz_vec_4_copy SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy) --#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_ibz) --#define ibz_vec_4_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_is_zero) --#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination) --#define ibz_vec_4_negate SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_negate) --#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_div) --#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul) --#define ibz_vec_4_set SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_set) --#define ibz_vec_4_sub SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_sub) --#define quat_qf_eval SQISIGN_NAMESPACE_GENERIC(quat_qf_eval) -+#define ibz_inv_dim4_make_coeff_mpm SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_mpm) -+#define ibz_inv_dim4_make_coeff_pmp SQISIGN_NAMESPACE(ibz_inv_dim4_make_coeff_pmp) -+#define ibz_mat_4x4_copy SQISIGN_NAMESPACE(ibz_mat_4x4_copy) -+#define ibz_mat_4x4_equal SQISIGN_NAMESPACE(ibz_mat_4x4_equal) -+#define ibz_mat_4x4_eval SQISIGN_NAMESPACE(ibz_mat_4x4_eval) -+#define ibz_mat_4x4_eval_t SQISIGN_NAMESPACE(ibz_mat_4x4_eval_t) -+#define ibz_mat_4x4_gcd SQISIGN_NAMESPACE(ibz_mat_4x4_gcd) -+#define ibz_mat_4x4_identity SQISIGN_NAMESPACE(ibz_mat_4x4_identity) -+#define ibz_mat_4x4_inv_with_det_as_denom SQISIGN_NAMESPACE(ibz_mat_4x4_inv_with_det_as_denom) -+#define ibz_mat_4x4_is_identity SQISIGN_NAMESPACE(ibz_mat_4x4_is_identity) -+#define ibz_mat_4x4_mul SQISIGN_NAMESPACE(ibz_mat_4x4_mul) -+#define ibz_mat_4x4_negate SQISIGN_NAMESPACE(ibz_mat_4x4_negate) -+#define ibz_mat_4x4_scalar_div SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_div) -+#define ibz_mat_4x4_scalar_mul SQISIGN_NAMESPACE(ibz_mat_4x4_scalar_mul) -+#define ibz_mat_4x4_transpose SQISIGN_NAMESPACE(ibz_mat_4x4_transpose) -+#define ibz_mat_4x4_zero SQISIGN_NAMESPACE(ibz_mat_4x4_zero) -+#define ibz_vec_4_add SQISIGN_NAMESPACE(ibz_vec_4_add) -+#define ibz_vec_4_content SQISIGN_NAMESPACE(ibz_vec_4_content) -+#define ibz_vec_4_copy SQISIGN_NAMESPACE(ibz_vec_4_copy) -+#define ibz_vec_4_copy_ibz SQISIGN_NAMESPACE(ibz_vec_4_copy_ibz) -+#define ibz_vec_4_is_zero SQISIGN_NAMESPACE(ibz_vec_4_is_zero) -+#define ibz_vec_4_linear_combination SQISIGN_NAMESPACE(ibz_vec_4_linear_combination) -+#define ibz_vec_4_negate SQISIGN_NAMESPACE(ibz_vec_4_negate) -+#define ibz_vec_4_scalar_div SQISIGN_NAMESPACE(ibz_vec_4_scalar_div) -+#define ibz_vec_4_scalar_mul SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul) -+#define ibz_vec_4_set SQISIGN_NAMESPACE(ibz_vec_4_set) -+#define ibz_vec_4_sub SQISIGN_NAMESPACE(ibz_vec_4_sub) -+#define quat_qf_eval SQISIGN_NAMESPACE(quat_qf_eval) - - // Namespacing symbols exported from ec.c: - #undef cswap_points -@@ -339,22 +333,22 @@ - #undef quat_left_ideal_finalize - #undef quat_left_ideal_init - --#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_finalize) --#define ibz_mat_2x2_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_init) --#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_finalize) --#define ibz_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_init) --#define ibz_vec_2_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_finalize) --#define ibz_vec_2_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_init) --#define ibz_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_finalize) --#define ibz_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_init) --#define quat_alg_elem_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_finalize) --#define quat_alg_elem_init SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_init) --#define quat_alg_finalize SQISIGN_NAMESPACE_GENERIC(quat_alg_finalize) --#define quat_alg_init_set SQISIGN_NAMESPACE_GENERIC(quat_alg_init_set) --#define quat_lattice_finalize SQISIGN_NAMESPACE_GENERIC(quat_lattice_finalize) --#define quat_lattice_init SQISIGN_NAMESPACE_GENERIC(quat_lattice_init) --#define quat_left_ideal_finalize SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_finalize) --#define quat_left_ideal_init SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_init) -+#define ibz_mat_2x2_finalize SQISIGN_NAMESPACE(ibz_mat_2x2_finalize) -+#define ibz_mat_2x2_init SQISIGN_NAMESPACE(ibz_mat_2x2_init) -+#define ibz_mat_4x4_finalize SQISIGN_NAMESPACE(ibz_mat_4x4_finalize) -+#define ibz_mat_4x4_init SQISIGN_NAMESPACE(ibz_mat_4x4_init) -+#define ibz_vec_2_finalize SQISIGN_NAMESPACE(ibz_vec_2_finalize) -+#define ibz_vec_2_init SQISIGN_NAMESPACE(ibz_vec_2_init) -+#define ibz_vec_4_finalize SQISIGN_NAMESPACE(ibz_vec_4_finalize) -+#define ibz_vec_4_init SQISIGN_NAMESPACE(ibz_vec_4_init) -+#define quat_alg_elem_finalize SQISIGN_NAMESPACE(quat_alg_elem_finalize) -+#define quat_alg_elem_init SQISIGN_NAMESPACE(quat_alg_elem_init) -+#define quat_alg_finalize SQISIGN_NAMESPACE(quat_alg_finalize) -+#define quat_alg_init_set SQISIGN_NAMESPACE(quat_alg_init_set) -+#define quat_lattice_finalize SQISIGN_NAMESPACE(quat_lattice_finalize) -+#define quat_lattice_init SQISIGN_NAMESPACE(quat_lattice_init) -+#define quat_left_ideal_finalize SQISIGN_NAMESPACE(quat_left_ideal_finalize) -+#define quat_left_ideal_init SQISIGN_NAMESPACE(quat_left_ideal_init) - - // Namespacing symbols exported from fp.c: - #undef fp_select -@@ -567,11 +561,11 @@ - #undef ibz_vec_4_linear_combination_mod - #undef ibz_vec_4_scalar_mul_mod - --#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_is_hnf) --#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE_GENERIC(ibz_mat_4xn_hnf_mod_core) --#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_copy_mod) --#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_linear_combination_mod) --#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_scalar_mul_mod) -+#define ibz_mat_4x4_is_hnf SQISIGN_NAMESPACE(ibz_mat_4x4_is_hnf) -+#define ibz_mat_4xn_hnf_mod_core SQISIGN_NAMESPACE(ibz_mat_4xn_hnf_mod_core) -+#define ibz_vec_4_copy_mod SQISIGN_NAMESPACE(ibz_vec_4_copy_mod) -+#define ibz_vec_4_linear_combination_mod SQISIGN_NAMESPACE(ibz_vec_4_linear_combination_mod) -+#define ibz_vec_4_scalar_mul_mod SQISIGN_NAMESPACE(ibz_vec_4_scalar_mul_mod) - - // Namespacing symbols exported from hnf_internal.c: - #undef ibz_centered_mod -@@ -579,15 +573,15 @@ - #undef ibz_mod_not_zero - #undef ibz_xgcd_with_u_not_0 - --#define ibz_centered_mod SQISIGN_NAMESPACE_GENERIC(ibz_centered_mod) --#define ibz_conditional_assign SQISIGN_NAMESPACE_GENERIC(ibz_conditional_assign) --#define ibz_mod_not_zero SQISIGN_NAMESPACE_GENERIC(ibz_mod_not_zero) --#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE_GENERIC(ibz_xgcd_with_u_not_0) -+#define ibz_centered_mod SQISIGN_NAMESPACE(ibz_centered_mod) -+#define ibz_conditional_assign SQISIGN_NAMESPACE(ibz_conditional_assign) -+#define ibz_mod_not_zero SQISIGN_NAMESPACE(ibz_mod_not_zero) -+#define ibz_xgcd_with_u_not_0 SQISIGN_NAMESPACE(ibz_xgcd_with_u_not_0) - - // Namespacing symbols exported from ibz_division.c: - #undef ibz_xgcd - --#define ibz_xgcd SQISIGN_NAMESPACE_GENERIC(ibz_xgcd) -+#define ibz_xgcd SQISIGN_NAMESPACE(ibz_xgcd) - - // Namespacing symbols exported from id2iso.c: - #undef change_of_basis_matrix_tate -@@ -624,22 +618,22 @@ - #undef quat_order_discriminant - #undef quat_order_is_maximal - --#define quat_lideal_add SQISIGN_NAMESPACE_GENERIC(quat_lideal_add) --#define quat_lideal_class_gram SQISIGN_NAMESPACE_GENERIC(quat_lideal_class_gram) --#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_conjugate_without_hnf) --#define quat_lideal_copy SQISIGN_NAMESPACE_GENERIC(quat_lideal_copy) --#define quat_lideal_create SQISIGN_NAMESPACE_GENERIC(quat_lideal_create) --#define quat_lideal_create_principal SQISIGN_NAMESPACE_GENERIC(quat_lideal_create_principal) --#define quat_lideal_equals SQISIGN_NAMESPACE_GENERIC(quat_lideal_equals) --#define quat_lideal_generator SQISIGN_NAMESPACE_GENERIC(quat_lideal_generator) --#define quat_lideal_inter SQISIGN_NAMESPACE_GENERIC(quat_lideal_inter) --#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lideal_inverse_lattice_without_hnf) --#define quat_lideal_mul SQISIGN_NAMESPACE_GENERIC(quat_lideal_mul) --#define quat_lideal_norm SQISIGN_NAMESPACE_GENERIC(quat_lideal_norm) --#define quat_lideal_right_order SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_order) --#define quat_lideal_right_transporter SQISIGN_NAMESPACE_GENERIC(quat_lideal_right_transporter) --#define quat_order_discriminant SQISIGN_NAMESPACE_GENERIC(quat_order_discriminant) --#define quat_order_is_maximal SQISIGN_NAMESPACE_GENERIC(quat_order_is_maximal) -+#define quat_lideal_add SQISIGN_NAMESPACE(quat_lideal_add) -+#define quat_lideal_class_gram SQISIGN_NAMESPACE(quat_lideal_class_gram) -+#define quat_lideal_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lideal_conjugate_without_hnf) -+#define quat_lideal_copy SQISIGN_NAMESPACE(quat_lideal_copy) -+#define quat_lideal_create SQISIGN_NAMESPACE(quat_lideal_create) -+#define quat_lideal_create_principal SQISIGN_NAMESPACE(quat_lideal_create_principal) -+#define quat_lideal_equals SQISIGN_NAMESPACE(quat_lideal_equals) -+#define quat_lideal_generator SQISIGN_NAMESPACE(quat_lideal_generator) -+#define quat_lideal_inter SQISIGN_NAMESPACE(quat_lideal_inter) -+#define quat_lideal_inverse_lattice_without_hnf SQISIGN_NAMESPACE(quat_lideal_inverse_lattice_without_hnf) -+#define quat_lideal_mul SQISIGN_NAMESPACE(quat_lideal_mul) -+#define quat_lideal_norm SQISIGN_NAMESPACE(quat_lideal_norm) -+#define quat_lideal_right_order SQISIGN_NAMESPACE(quat_lideal_right_order) -+#define quat_lideal_right_transporter SQISIGN_NAMESPACE(quat_lideal_right_transporter) -+#define quat_order_discriminant SQISIGN_NAMESPACE(quat_order_discriminant) -+#define quat_order_is_maximal SQISIGN_NAMESPACE(quat_order_is_maximal) - - // Namespacing symbols exported from intbig.c: - #undef ibz_abs -@@ -647,6 +641,10 @@ - #undef ibz_bitsize - #undef ibz_cmp - #undef ibz_cmp_int32 -+#undef ibz_const_one -+#undef ibz_const_three -+#undef ibz_const_two -+#undef ibz_const_zero - #undef ibz_convert_to_str - #undef ibz_copy - #undef ibz_copy_digits -@@ -687,57 +685,61 @@ - #undef ibz_to_digits - #undef ibz_two_adic - --#define ibz_abs SQISIGN_NAMESPACE_GENERIC(ibz_abs) --#define ibz_add SQISIGN_NAMESPACE_GENERIC(ibz_add) --#define ibz_bitsize SQISIGN_NAMESPACE_GENERIC(ibz_bitsize) --#define ibz_cmp SQISIGN_NAMESPACE_GENERIC(ibz_cmp) --#define ibz_cmp_int32 SQISIGN_NAMESPACE_GENERIC(ibz_cmp_int32) --#define ibz_convert_to_str SQISIGN_NAMESPACE_GENERIC(ibz_convert_to_str) --#define ibz_copy SQISIGN_NAMESPACE_GENERIC(ibz_copy) --#define ibz_copy_digits SQISIGN_NAMESPACE_GENERIC(ibz_copy_digits) --#define ibz_div SQISIGN_NAMESPACE_GENERIC(ibz_div) --#define ibz_div_2exp SQISIGN_NAMESPACE_GENERIC(ibz_div_2exp) --#define ibz_div_floor SQISIGN_NAMESPACE_GENERIC(ibz_div_floor) --#define ibz_divides SQISIGN_NAMESPACE_GENERIC(ibz_divides) --#define ibz_finalize SQISIGN_NAMESPACE_GENERIC(ibz_finalize) --#define ibz_gcd SQISIGN_NAMESPACE_GENERIC(ibz_gcd) --#define ibz_get SQISIGN_NAMESPACE_GENERIC(ibz_get) --#define ibz_init SQISIGN_NAMESPACE_GENERIC(ibz_init) --#define ibz_invmod SQISIGN_NAMESPACE_GENERIC(ibz_invmod) --#define ibz_is_even SQISIGN_NAMESPACE_GENERIC(ibz_is_even) --#define ibz_is_odd SQISIGN_NAMESPACE_GENERIC(ibz_is_odd) --#define ibz_is_one SQISIGN_NAMESPACE_GENERIC(ibz_is_one) --#define ibz_is_zero SQISIGN_NAMESPACE_GENERIC(ibz_is_zero) --#define ibz_legendre SQISIGN_NAMESPACE_GENERIC(ibz_legendre) --#define ibz_mod SQISIGN_NAMESPACE_GENERIC(ibz_mod) --#define ibz_mod_ui SQISIGN_NAMESPACE_GENERIC(ibz_mod_ui) --#define ibz_mul SQISIGN_NAMESPACE_GENERIC(ibz_mul) --#define ibz_neg SQISIGN_NAMESPACE_GENERIC(ibz_neg) --#define ibz_pow SQISIGN_NAMESPACE_GENERIC(ibz_pow) --#define ibz_pow_mod SQISIGN_NAMESPACE_GENERIC(ibz_pow_mod) --#define ibz_print SQISIGN_NAMESPACE_GENERIC(ibz_print) --#define ibz_probab_prime SQISIGN_NAMESPACE_GENERIC(ibz_probab_prime) --#define ibz_rand_interval SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval) --#define ibz_rand_interval_bits SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_bits) --#define ibz_rand_interval_i SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_i) --#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE_GENERIC(ibz_rand_interval_minm_m) --#define ibz_set SQISIGN_NAMESPACE_GENERIC(ibz_set) --#define ibz_set_from_str SQISIGN_NAMESPACE_GENERIC(ibz_set_from_str) --#define ibz_size_in_base SQISIGN_NAMESPACE_GENERIC(ibz_size_in_base) --#define ibz_sqrt SQISIGN_NAMESPACE_GENERIC(ibz_sqrt) --#define ibz_sqrt_floor SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_floor) --#define ibz_sqrt_mod_p SQISIGN_NAMESPACE_GENERIC(ibz_sqrt_mod_p) --#define ibz_sub SQISIGN_NAMESPACE_GENERIC(ibz_sub) --#define ibz_swap SQISIGN_NAMESPACE_GENERIC(ibz_swap) --#define ibz_to_digits SQISIGN_NAMESPACE_GENERIC(ibz_to_digits) --#define ibz_two_adic SQISIGN_NAMESPACE_GENERIC(ibz_two_adic) -+#define ibz_abs SQISIGN_NAMESPACE(ibz_abs) -+#define ibz_add SQISIGN_NAMESPACE(ibz_add) -+#define ibz_bitsize SQISIGN_NAMESPACE(ibz_bitsize) -+#define ibz_cmp SQISIGN_NAMESPACE(ibz_cmp) -+#define ibz_cmp_int32 SQISIGN_NAMESPACE(ibz_cmp_int32) -+#define ibz_const_one SQISIGN_NAMESPACE(ibz_const_one) -+#define ibz_const_three SQISIGN_NAMESPACE(ibz_const_three) -+#define ibz_const_two SQISIGN_NAMESPACE(ibz_const_two) -+#define ibz_const_zero SQISIGN_NAMESPACE(ibz_const_zero) -+#define ibz_convert_to_str SQISIGN_NAMESPACE(ibz_convert_to_str) -+#define ibz_copy SQISIGN_NAMESPACE(ibz_copy) -+#define ibz_copy_digits SQISIGN_NAMESPACE(ibz_copy_digits) -+#define ibz_div SQISIGN_NAMESPACE(ibz_div) -+#define ibz_div_2exp SQISIGN_NAMESPACE(ibz_div_2exp) -+#define ibz_div_floor SQISIGN_NAMESPACE(ibz_div_floor) -+#define ibz_divides SQISIGN_NAMESPACE(ibz_divides) -+#define ibz_finalize SQISIGN_NAMESPACE(ibz_finalize) -+#define ibz_gcd SQISIGN_NAMESPACE(ibz_gcd) -+#define ibz_get SQISIGN_NAMESPACE(ibz_get) -+#define ibz_init SQISIGN_NAMESPACE(ibz_init) -+#define ibz_invmod SQISIGN_NAMESPACE(ibz_invmod) -+#define ibz_is_even SQISIGN_NAMESPACE(ibz_is_even) -+#define ibz_is_odd SQISIGN_NAMESPACE(ibz_is_odd) -+#define ibz_is_one SQISIGN_NAMESPACE(ibz_is_one) -+#define ibz_is_zero SQISIGN_NAMESPACE(ibz_is_zero) -+#define ibz_legendre SQISIGN_NAMESPACE(ibz_legendre) -+#define ibz_mod SQISIGN_NAMESPACE(ibz_mod) -+#define ibz_mod_ui SQISIGN_NAMESPACE(ibz_mod_ui) -+#define ibz_mul SQISIGN_NAMESPACE(ibz_mul) -+#define ibz_neg SQISIGN_NAMESPACE(ibz_neg) -+#define ibz_pow SQISIGN_NAMESPACE(ibz_pow) -+#define ibz_pow_mod SQISIGN_NAMESPACE(ibz_pow_mod) -+#define ibz_print SQISIGN_NAMESPACE(ibz_print) -+#define ibz_probab_prime SQISIGN_NAMESPACE(ibz_probab_prime) -+#define ibz_rand_interval SQISIGN_NAMESPACE(ibz_rand_interval) -+#define ibz_rand_interval_bits SQISIGN_NAMESPACE(ibz_rand_interval_bits) -+#define ibz_rand_interval_i SQISIGN_NAMESPACE(ibz_rand_interval_i) -+#define ibz_rand_interval_minm_m SQISIGN_NAMESPACE(ibz_rand_interval_minm_m) -+#define ibz_set SQISIGN_NAMESPACE(ibz_set) -+#define ibz_set_from_str SQISIGN_NAMESPACE(ibz_set_from_str) -+#define ibz_size_in_base SQISIGN_NAMESPACE(ibz_size_in_base) -+#define ibz_sqrt SQISIGN_NAMESPACE(ibz_sqrt) -+#define ibz_sqrt_floor SQISIGN_NAMESPACE(ibz_sqrt_floor) -+#define ibz_sqrt_mod_p SQISIGN_NAMESPACE(ibz_sqrt_mod_p) -+#define ibz_sub SQISIGN_NAMESPACE(ibz_sub) -+#define ibz_swap SQISIGN_NAMESPACE(ibz_swap) -+#define ibz_to_digits SQISIGN_NAMESPACE(ibz_to_digits) -+#define ibz_two_adic SQISIGN_NAMESPACE(ibz_two_adic) - - // Namespacing symbols exported from integers.c: - #undef ibz_cornacchia_prime - #undef ibz_generate_random_prime - --#define ibz_cornacchia_prime SQISIGN_NAMESPACE_GENERIC(ibz_cornacchia_prime) --#define ibz_generate_random_prime SQISIGN_NAMESPACE_GENERIC(ibz_generate_random_prime) -+#define ibz_cornacchia_prime SQISIGN_NAMESPACE(ibz_cornacchia_prime) -+#define ibz_generate_random_prime SQISIGN_NAMESPACE(ibz_generate_random_prime) - - // Namespacing symbols exported from isog_chains.c: - #undef ec_eval_even -@@ -763,15 +765,15 @@ - #undef quat_lattice_lll - #undef quat_lll_core - --#define quat_lattice_lll SQISIGN_NAMESPACE_GENERIC(quat_lattice_lll) --#define quat_lll_core SQISIGN_NAMESPACE_GENERIC(quat_lll_core) -+#define quat_lattice_lll SQISIGN_NAMESPACE(quat_lattice_lll) -+#define quat_lll_core SQISIGN_NAMESPACE(quat_lll_core) - - // Namespacing symbols exported from lat_ball.c: - #undef quat_lattice_bound_parallelogram - #undef quat_lattice_sample_from_ball - --#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE_GENERIC(quat_lattice_bound_parallelogram) --#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE_GENERIC(quat_lattice_sample_from_ball) -+#define quat_lattice_bound_parallelogram SQISIGN_NAMESPACE(quat_lattice_bound_parallelogram) -+#define quat_lattice_sample_from_ball SQISIGN_NAMESPACE(quat_lattice_sample_from_ball) - - // Namespacing symbols exported from lattice.c: - #undef quat_lattice_add -@@ -789,29 +791,29 @@ - #undef quat_lattice_mul - #undef quat_lattice_reduce_denom - --#define quat_lattice_add SQISIGN_NAMESPACE_GENERIC(quat_lattice_add) --#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_alg_elem_mul) --#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_conjugate_without_hnf) --#define quat_lattice_contains SQISIGN_NAMESPACE_GENERIC(quat_lattice_contains) --#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_dual_without_hnf) --#define quat_lattice_equal SQISIGN_NAMESPACE_GENERIC(quat_lattice_equal) --#define quat_lattice_gram SQISIGN_NAMESPACE_GENERIC(quat_lattice_gram) --#define quat_lattice_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_hnf) --#define quat_lattice_inclusion SQISIGN_NAMESPACE_GENERIC(quat_lattice_inclusion) --#define quat_lattice_index SQISIGN_NAMESPACE_GENERIC(quat_lattice_index) --#define quat_lattice_intersect SQISIGN_NAMESPACE_GENERIC(quat_lattice_intersect) --#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE_GENERIC(quat_lattice_mat_alg_coord_mul_without_hnf) --#define quat_lattice_mul SQISIGN_NAMESPACE_GENERIC(quat_lattice_mul) --#define quat_lattice_reduce_denom SQISIGN_NAMESPACE_GENERIC(quat_lattice_reduce_denom) -+#define quat_lattice_add SQISIGN_NAMESPACE(quat_lattice_add) -+#define quat_lattice_alg_elem_mul SQISIGN_NAMESPACE(quat_lattice_alg_elem_mul) -+#define quat_lattice_conjugate_without_hnf SQISIGN_NAMESPACE(quat_lattice_conjugate_without_hnf) -+#define quat_lattice_contains SQISIGN_NAMESPACE(quat_lattice_contains) -+#define quat_lattice_dual_without_hnf SQISIGN_NAMESPACE(quat_lattice_dual_without_hnf) -+#define quat_lattice_equal SQISIGN_NAMESPACE(quat_lattice_equal) -+#define quat_lattice_gram SQISIGN_NAMESPACE(quat_lattice_gram) -+#define quat_lattice_hnf SQISIGN_NAMESPACE(quat_lattice_hnf) -+#define quat_lattice_inclusion SQISIGN_NAMESPACE(quat_lattice_inclusion) -+#define quat_lattice_index SQISIGN_NAMESPACE(quat_lattice_index) -+#define quat_lattice_intersect SQISIGN_NAMESPACE(quat_lattice_intersect) -+#define quat_lattice_mat_alg_coord_mul_without_hnf SQISIGN_NAMESPACE(quat_lattice_mat_alg_coord_mul_without_hnf) -+#define quat_lattice_mul SQISIGN_NAMESPACE(quat_lattice_mul) -+#define quat_lattice_reduce_denom SQISIGN_NAMESPACE(quat_lattice_reduce_denom) - - // Namespacing symbols exported from lll_applications.c: - #undef quat_lideal_lideal_mul_reduced - #undef quat_lideal_prime_norm_reduced_equivalent - #undef quat_lideal_reduce_basis - --#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE_GENERIC(quat_lideal_lideal_mul_reduced) --#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE_GENERIC(quat_lideal_prime_norm_reduced_equivalent) --#define quat_lideal_reduce_basis SQISIGN_NAMESPACE_GENERIC(quat_lideal_reduce_basis) -+#define quat_lideal_lideal_mul_reduced SQISIGN_NAMESPACE(quat_lideal_lideal_mul_reduced) -+#define quat_lideal_prime_norm_reduced_equivalent SQISIGN_NAMESPACE(quat_lideal_prime_norm_reduced_equivalent) -+#define quat_lideal_reduce_basis SQISIGN_NAMESPACE(quat_lideal_reduce_basis) - - // Namespacing symbols exported from lll_verification.c: - #undef ibq_vec_4_copy_ibz -@@ -820,18 +822,18 @@ - #undef quat_lll_set_ibq_parameters - #undef quat_lll_verify - --#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_copy_ibz) --#define quat_lll_bilinear SQISIGN_NAMESPACE_GENERIC(quat_lll_bilinear) --#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE_GENERIC(quat_lll_gram_schmidt_transposed_with_ibq) --#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE_GENERIC(quat_lll_set_ibq_parameters) --#define quat_lll_verify SQISIGN_NAMESPACE_GENERIC(quat_lll_verify) -+#define ibq_vec_4_copy_ibz SQISIGN_NAMESPACE(ibq_vec_4_copy_ibz) -+#define quat_lll_bilinear SQISIGN_NAMESPACE(quat_lll_bilinear) -+#define quat_lll_gram_schmidt_transposed_with_ibq SQISIGN_NAMESPACE(quat_lll_gram_schmidt_transposed_with_ibq) -+#define quat_lll_set_ibq_parameters SQISIGN_NAMESPACE(quat_lll_set_ibq_parameters) -+#define quat_lll_verify SQISIGN_NAMESPACE(quat_lll_verify) - - // Namespacing symbols exported from mem.c: - #undef sqisign_secure_clear - #undef sqisign_secure_free - --#define sqisign_secure_clear SQISIGN_NAMESPACE_GENERIC(sqisign_secure_clear) --#define sqisign_secure_free SQISIGN_NAMESPACE_GENERIC(sqisign_secure_free) -+#define sqisign_secure_clear SQISIGN_NAMESPACE(sqisign_secure_clear) -+#define sqisign_secure_free SQISIGN_NAMESPACE(sqisign_secure_free) - - // Namespacing symbols exported from mp.c: - #undef MUL -@@ -854,25 +856,25 @@ - #undef select_ct - #undef swap_ct - --#define MUL SQISIGN_NAMESPACE_GENERIC(MUL) --#define mp_add SQISIGN_NAMESPACE_GENERIC(mp_add) --#define mp_compare SQISIGN_NAMESPACE_GENERIC(mp_compare) --#define mp_copy SQISIGN_NAMESPACE_GENERIC(mp_copy) --#define mp_inv_2e SQISIGN_NAMESPACE_GENERIC(mp_inv_2e) --#define mp_invert_matrix SQISIGN_NAMESPACE_GENERIC(mp_invert_matrix) --#define mp_is_one SQISIGN_NAMESPACE_GENERIC(mp_is_one) --#define mp_is_zero SQISIGN_NAMESPACE_GENERIC(mp_is_zero) --#define mp_mod_2exp SQISIGN_NAMESPACE_GENERIC(mp_mod_2exp) --#define mp_mul SQISIGN_NAMESPACE_GENERIC(mp_mul) --#define mp_mul2 SQISIGN_NAMESPACE_GENERIC(mp_mul2) --#define mp_neg SQISIGN_NAMESPACE_GENERIC(mp_neg) --#define mp_print SQISIGN_NAMESPACE_GENERIC(mp_print) --#define mp_shiftl SQISIGN_NAMESPACE_GENERIC(mp_shiftl) --#define mp_shiftr SQISIGN_NAMESPACE_GENERIC(mp_shiftr) --#define mp_sub SQISIGN_NAMESPACE_GENERIC(mp_sub) --#define multiple_mp_shiftl SQISIGN_NAMESPACE_GENERIC(multiple_mp_shiftl) --#define select_ct SQISIGN_NAMESPACE_GENERIC(select_ct) --#define swap_ct SQISIGN_NAMESPACE_GENERIC(swap_ct) -+#define MUL SQISIGN_NAMESPACE(MUL) -+#define mp_add SQISIGN_NAMESPACE(mp_add) -+#define mp_compare SQISIGN_NAMESPACE(mp_compare) -+#define mp_copy SQISIGN_NAMESPACE(mp_copy) -+#define mp_inv_2e SQISIGN_NAMESPACE(mp_inv_2e) -+#define mp_invert_matrix SQISIGN_NAMESPACE(mp_invert_matrix) -+#define mp_is_one SQISIGN_NAMESPACE(mp_is_one) -+#define mp_is_zero SQISIGN_NAMESPACE(mp_is_zero) -+#define mp_mod_2exp SQISIGN_NAMESPACE(mp_mod_2exp) -+#define mp_mul SQISIGN_NAMESPACE(mp_mul) -+#define mp_mul2 SQISIGN_NAMESPACE(mp_mul2) -+#define mp_neg SQISIGN_NAMESPACE(mp_neg) -+#define mp_print SQISIGN_NAMESPACE(mp_print) -+#define mp_shiftl SQISIGN_NAMESPACE(mp_shiftl) -+#define mp_shiftr SQISIGN_NAMESPACE(mp_shiftr) -+#define mp_sub SQISIGN_NAMESPACE(mp_sub) -+#define multiple_mp_shiftl SQISIGN_NAMESPACE(multiple_mp_shiftl) -+#define select_ct SQISIGN_NAMESPACE(select_ct) -+#define swap_ct SQISIGN_NAMESPACE(swap_ct) - - // Namespacing symbols exported from normeq.c: - #undef quat_change_to_O0_basis -@@ -882,12 +884,12 @@ - #undef quat_represent_integer - #undef quat_sampling_random_ideal_O0_given_norm - --#define quat_change_to_O0_basis SQISIGN_NAMESPACE_GENERIC(quat_change_to_O0_basis) --#define quat_lattice_O0_set SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set) --#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE_GENERIC(quat_lattice_O0_set_extremal) --#define quat_order_elem_create SQISIGN_NAMESPACE_GENERIC(quat_order_elem_create) --#define quat_represent_integer SQISIGN_NAMESPACE_GENERIC(quat_represent_integer) --#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE_GENERIC(quat_sampling_random_ideal_O0_given_norm) -+#define quat_change_to_O0_basis SQISIGN_NAMESPACE(quat_change_to_O0_basis) -+#define quat_lattice_O0_set SQISIGN_NAMESPACE(quat_lattice_O0_set) -+#define quat_lattice_O0_set_extremal SQISIGN_NAMESPACE(quat_lattice_O0_set_extremal) -+#define quat_order_elem_create SQISIGN_NAMESPACE(quat_order_elem_create) -+#define quat_represent_integer SQISIGN_NAMESPACE(quat_represent_integer) -+#define quat_sampling_random_ideal_O0_given_norm SQISIGN_NAMESPACE(quat_sampling_random_ideal_O0_given_norm) - - // Namespacing symbols exported from printer.c: - #undef ibz_mat_2x2_print -@@ -899,23 +901,23 @@ - #undef quat_lattice_print - #undef quat_left_ideal_print - --#define ibz_mat_2x2_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_2x2_print) --#define ibz_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibz_mat_4x4_print) --#define ibz_vec_2_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_2_print) --#define ibz_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibz_vec_4_print) --#define quat_alg_elem_print SQISIGN_NAMESPACE_GENERIC(quat_alg_elem_print) --#define quat_alg_print SQISIGN_NAMESPACE_GENERIC(quat_alg_print) --#define quat_lattice_print SQISIGN_NAMESPACE_GENERIC(quat_lattice_print) --#define quat_left_ideal_print SQISIGN_NAMESPACE_GENERIC(quat_left_ideal_print) -+#define ibz_mat_2x2_print SQISIGN_NAMESPACE(ibz_mat_2x2_print) -+#define ibz_mat_4x4_print SQISIGN_NAMESPACE(ibz_mat_4x4_print) -+#define ibz_vec_2_print SQISIGN_NAMESPACE(ibz_vec_2_print) -+#define ibz_vec_4_print SQISIGN_NAMESPACE(ibz_vec_4_print) -+#define quat_alg_elem_print SQISIGN_NAMESPACE(quat_alg_elem_print) -+#define quat_alg_print SQISIGN_NAMESPACE(quat_alg_print) -+#define quat_lattice_print SQISIGN_NAMESPACE(quat_lattice_print) -+#define quat_left_ideal_print SQISIGN_NAMESPACE(quat_left_ideal_print) - - // Namespacing symbols exported from random_input_generation.c: - #undef quat_test_input_random_ideal_generation - #undef quat_test_input_random_ideal_lattice_generation - #undef quat_test_input_random_lattice_generation - --#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_generation) --#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_ideal_lattice_generation) --#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE_GENERIC(quat_test_input_random_lattice_generation) -+#define quat_test_input_random_ideal_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_generation) -+#define quat_test_input_random_ideal_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_ideal_lattice_generation) -+#define quat_test_input_random_lattice_generation SQISIGN_NAMESPACE(quat_test_input_random_lattice_generation) - - // Namespacing symbols exported from rationals.c: - #undef ibq_abs -@@ -941,28 +943,28 @@ - #undef ibq_vec_4_init - #undef ibq_vec_4_print - --#define ibq_abs SQISIGN_NAMESPACE_GENERIC(ibq_abs) --#define ibq_add SQISIGN_NAMESPACE_GENERIC(ibq_add) --#define ibq_cmp SQISIGN_NAMESPACE_GENERIC(ibq_cmp) --#define ibq_copy SQISIGN_NAMESPACE_GENERIC(ibq_copy) --#define ibq_finalize SQISIGN_NAMESPACE_GENERIC(ibq_finalize) --#define ibq_init SQISIGN_NAMESPACE_GENERIC(ibq_init) --#define ibq_inv SQISIGN_NAMESPACE_GENERIC(ibq_inv) --#define ibq_is_ibz SQISIGN_NAMESPACE_GENERIC(ibq_is_ibz) --#define ibq_is_one SQISIGN_NAMESPACE_GENERIC(ibq_is_one) --#define ibq_is_zero SQISIGN_NAMESPACE_GENERIC(ibq_is_zero) --#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_finalize) --#define ibq_mat_4x4_init SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_init) --#define ibq_mat_4x4_print SQISIGN_NAMESPACE_GENERIC(ibq_mat_4x4_print) --#define ibq_mul SQISIGN_NAMESPACE_GENERIC(ibq_mul) --#define ibq_neg SQISIGN_NAMESPACE_GENERIC(ibq_neg) --#define ibq_reduce SQISIGN_NAMESPACE_GENERIC(ibq_reduce) --#define ibq_set SQISIGN_NAMESPACE_GENERIC(ibq_set) --#define ibq_sub SQISIGN_NAMESPACE_GENERIC(ibq_sub) --#define ibq_to_ibz SQISIGN_NAMESPACE_GENERIC(ibq_to_ibz) --#define ibq_vec_4_finalize SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_finalize) --#define ibq_vec_4_init SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_init) --#define ibq_vec_4_print SQISIGN_NAMESPACE_GENERIC(ibq_vec_4_print) -+#define ibq_abs SQISIGN_NAMESPACE(ibq_abs) -+#define ibq_add SQISIGN_NAMESPACE(ibq_add) -+#define ibq_cmp SQISIGN_NAMESPACE(ibq_cmp) -+#define ibq_copy SQISIGN_NAMESPACE(ibq_copy) -+#define ibq_finalize SQISIGN_NAMESPACE(ibq_finalize) -+#define ibq_init SQISIGN_NAMESPACE(ibq_init) -+#define ibq_inv SQISIGN_NAMESPACE(ibq_inv) -+#define ibq_is_ibz SQISIGN_NAMESPACE(ibq_is_ibz) -+#define ibq_is_one SQISIGN_NAMESPACE(ibq_is_one) -+#define ibq_is_zero SQISIGN_NAMESPACE(ibq_is_zero) -+#define ibq_mat_4x4_finalize SQISIGN_NAMESPACE(ibq_mat_4x4_finalize) -+#define ibq_mat_4x4_init SQISIGN_NAMESPACE(ibq_mat_4x4_init) -+#define ibq_mat_4x4_print SQISIGN_NAMESPACE(ibq_mat_4x4_print) -+#define ibq_mul SQISIGN_NAMESPACE(ibq_mul) -+#define ibq_neg SQISIGN_NAMESPACE(ibq_neg) -+#define ibq_reduce SQISIGN_NAMESPACE(ibq_reduce) -+#define ibq_set SQISIGN_NAMESPACE(ibq_set) -+#define ibq_sub SQISIGN_NAMESPACE(ibq_sub) -+#define ibq_to_ibz SQISIGN_NAMESPACE(ibq_to_ibz) -+#define ibq_vec_4_finalize SQISIGN_NAMESPACE(ibq_vec_4_finalize) -+#define ibq_vec_4_init SQISIGN_NAMESPACE(ibq_vec_4_init) -+#define ibq_vec_4_print SQISIGN_NAMESPACE(ibq_vec_4_print) - - // Namespacing symbols exported from sign.c: - #undef protocols_sign